xref: /netbsd-src/external/bsd/jemalloc/dist/src/arena.c (revision 3117ece4fc4a4ca4489ba793710b60b0d26bab6c)
1 #include "jemalloc/internal/jemalloc_preamble.h"
2 #include "jemalloc/internal/jemalloc_internal_includes.h"
3 
4 #include "jemalloc/internal/assert.h"
5 #include "jemalloc/internal/decay.h"
6 #include "jemalloc/internal/ehooks.h"
7 #include "jemalloc/internal/extent_dss.h"
8 #include "jemalloc/internal/extent_mmap.h"
9 #include "jemalloc/internal/san.h"
10 #include "jemalloc/internal/mutex.h"
11 #include "jemalloc/internal/rtree.h"
12 #include "jemalloc/internal/safety_check.h"
13 #include "jemalloc/internal/util.h"
14 
15 JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
16 
17 /******************************************************************************/
18 /* Data. */
19 
20 /*
21  * Define names for both unininitialized and initialized phases, so that
22  * options and mallctl processing are straightforward.
23  */
24 const char *percpu_arena_mode_names[] = {
25 	"percpu",
26 	"phycpu",
27 	"disabled",
28 	"percpu",
29 	"phycpu"
30 };
31 percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT;
32 
33 ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT;
34 ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
35 
36 static atomic_zd_t dirty_decay_ms_default;
37 static atomic_zd_t muzzy_decay_ms_default;
38 
39 emap_t arena_emap_global;
40 pa_central_t arena_pa_central_global;
41 
42 div_info_t arena_binind_div_info[SC_NBINS];
43 
44 size_t opt_oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
45 size_t oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
46 
47 uint32_t arena_bin_offsets[SC_NBINS];
48 static unsigned nbins_total;
49 
50 static unsigned huge_arena_ind;
51 
52 const arena_config_t arena_config_default = {
53 	/* .extent_hooks = */ (extent_hooks_t *)__UNCONST(&ehooks_default_extent_hooks),
54 	/* .metadata_use_hooks = */ true,
55 };
56 
57 /******************************************************************************/
58 /*
59  * Function prototypes for static functions that are referenced prior to
60  * definition.
61  */
62 
63 static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
64     bool is_background_thread, bool all);
65 static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
66     bin_t *bin);
67 static void
68 arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
69     size_t npages_new);
70 
71 /******************************************************************************/
72 
73 void
74 arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
75     const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
76     size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
77 	*nthreads += arena_nthreads_get(arena, false);
78 	*dss = dss_prec_names[arena_dss_prec_get(arena)];
79 	*dirty_decay_ms = arena_decay_ms_get(arena, extent_state_dirty);
80 	*muzzy_decay_ms = arena_decay_ms_get(arena, extent_state_muzzy);
81 	pa_shard_basic_stats_merge(&arena->pa_shard, nactive, ndirty, nmuzzy);
82 }
83 
84 void
85 arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
86     const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
87     size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
88     bin_stats_data_t *bstats, arena_stats_large_t *lstats,
89     pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats) {
90 	cassert(config_stats);
91 
92 	arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
93 	    muzzy_decay_ms, nactive, ndirty, nmuzzy);
94 
95 	size_t base_allocated, base_resident, base_mapped, metadata_thp;
96 	base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
97 	    &base_mapped, &metadata_thp);
98 	size_t pac_mapped_sz = pac_mapped(&arena->pa_shard.pac);
99 	astats->mapped += base_mapped + pac_mapped_sz;
100 	astats->resident += base_resident;
101 
102 	LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
103 
104 	astats->base += base_allocated;
105 	atomic_load_add_store_zu(&astats->internal, arena_internal_get(arena));
106 	astats->metadata_thp += metadata_thp;
107 
108 	for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) {
109 		uint64_t nmalloc = locked_read_u64(tsdn,
110 		    LOCKEDINT_MTX(arena->stats.mtx),
111 		    &arena->stats.lstats[i].nmalloc);
112 		locked_inc_u64_unsynchronized(&lstats[i].nmalloc, nmalloc);
113 		astats->nmalloc_large += nmalloc;
114 
115 		uint64_t ndalloc = locked_read_u64(tsdn,
116 		    LOCKEDINT_MTX(arena->stats.mtx),
117 		    &arena->stats.lstats[i].ndalloc);
118 		locked_inc_u64_unsynchronized(&lstats[i].ndalloc, ndalloc);
119 		astats->ndalloc_large += ndalloc;
120 
121 		uint64_t nrequests = locked_read_u64(tsdn,
122 		    LOCKEDINT_MTX(arena->stats.mtx),
123 		    &arena->stats.lstats[i].nrequests);
124 		locked_inc_u64_unsynchronized(&lstats[i].nrequests,
125 		    nmalloc + nrequests);
126 		astats->nrequests_large += nmalloc + nrequests;
127 
128 		/* nfill == nmalloc for large currently. */
129 		locked_inc_u64_unsynchronized(&lstats[i].nfills, nmalloc);
130 		astats->nfills_large += nmalloc;
131 
132 		uint64_t nflush = locked_read_u64(tsdn,
133 		    LOCKEDINT_MTX(arena->stats.mtx),
134 		    &arena->stats.lstats[i].nflushes);
135 		locked_inc_u64_unsynchronized(&lstats[i].nflushes, nflush);
136 		astats->nflushes_large += nflush;
137 
138 		assert(nmalloc >= ndalloc);
139 		assert(nmalloc - ndalloc <= SIZE_T_MAX);
140 		size_t curlextents = (size_t)(nmalloc - ndalloc);
141 		lstats[i].curlextents += curlextents;
142 		astats->allocated_large +=
143 		    curlextents * sz_index2size(SC_NBINS + i);
144 	}
145 
146 	pa_shard_stats_merge(tsdn, &arena->pa_shard, &astats->pa_shard_stats,
147 	    estats, hpastats, secstats, &astats->resident);
148 
149 	LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
150 
151 	/* Currently cached bytes and sanitizer-stashed bytes in tcache. */
152 	astats->tcache_bytes = 0;
153 	astats->tcache_stashed_bytes = 0;
154 	malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
155 	cache_bin_array_descriptor_t *descriptor;
156 	ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
157 		for (szind_t i = 0; i < nhbins; i++) {
158 			cache_bin_t *cache_bin = &descriptor->bins[i];
159 			cache_bin_sz_t ncached, nstashed;
160 			cache_bin_nitems_get_remote(cache_bin,
161 			    &tcache_bin_info[i], &ncached, &nstashed);
162 
163 			astats->tcache_bytes += ncached * sz_index2size(i);
164 			astats->tcache_stashed_bytes += nstashed *
165 			    sz_index2size(i);
166 		}
167 	}
168 	malloc_mutex_prof_read(tsdn,
169 	    &astats->mutex_prof_data[arena_prof_mutex_tcache_list],
170 	    &arena->tcache_ql_mtx);
171 	malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
172 
173 #define READ_ARENA_MUTEX_PROF_DATA(mtx, ind)				\
174     malloc_mutex_lock(tsdn, &arena->mtx);				\
175     malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind],		\
176         &arena->mtx);							\
177     malloc_mutex_unlock(tsdn, &arena->mtx);
178 
179 	/* Gather per arena mutex profiling data. */
180 	READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
181 	READ_ARENA_MUTEX_PROF_DATA(base->mtx,
182 	    arena_prof_mutex_base);
183 #undef READ_ARENA_MUTEX_PROF_DATA
184 	pa_shard_mtx_stats_read(tsdn, &arena->pa_shard,
185 	    astats->mutex_prof_data);
186 
187 	nstime_copy(&astats->uptime, &arena->create_time);
188 	nstime_update(&astats->uptime);
189 	nstime_subtract(&astats->uptime, &arena->create_time);
190 
191 	for (szind_t i = 0; i < SC_NBINS; i++) {
192 		for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
193 			bin_stats_merge(tsdn, &bstats[i],
194 			    arena_get_bin(arena, i, j));
195 		}
196 	}
197 }
198 
199 static void
200 arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena,
201     bool is_background_thread) {
202 	if (!background_thread_enabled() || is_background_thread) {
203 		return;
204 	}
205 	background_thread_info_t *info =
206 	    arena_background_thread_info_get(arena);
207 	if (background_thread_indefinite_sleep(info)) {
208 		arena_maybe_do_deferred_work(tsdn, arena,
209 		    &arena->pa_shard.pac.decay_dirty, 0);
210 	}
211 }
212 
213 /*
214  * React to deferred work generated by a PAI function.
215  */
216 void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena) {
217 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
218 	    WITNESS_RANK_CORE, 0);
219 
220 	if (decay_immediately(&arena->pa_shard.pac.decay_dirty)) {
221 		arena_decay_dirty(tsdn, arena, false, true);
222 	}
223 	arena_background_thread_inactivity_check(tsdn, arena, false);
224 }
225 
226 static void *
227 arena_slab_reg_alloc(edata_t *slab, const bin_info_t *bin_info) {
228 	void *ret;
229 	slab_data_t *slab_data = edata_slab_data_get(slab);
230 	size_t regind;
231 
232 	assert(edata_nfree_get(slab) > 0);
233 	assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
234 
235 	regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
236 	ret = (void *)((uintptr_t)edata_addr_get(slab) +
237 	    (uintptr_t)(bin_info->reg_size * regind));
238 	edata_nfree_dec(slab);
239 	return ret;
240 }
241 
242 static void
243 arena_slab_reg_alloc_batch(edata_t *slab, const bin_info_t *bin_info,
244 			   unsigned cnt, void** ptrs) {
245 	slab_data_t *slab_data = edata_slab_data_get(slab);
246 
247 	assert(edata_nfree_get(slab) >= cnt);
248 	assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
249 
250 #if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
251 	for (unsigned i = 0; i < cnt; i++) {
252 		size_t regind = bitmap_sfu(slab_data->bitmap,
253 					   &bin_info->bitmap_info);
254 		*(ptrs + i) = (void *)((uintptr_t)edata_addr_get(slab) +
255 		    (uintptr_t)(bin_info->reg_size * regind));
256 	}
257 #else
258 	unsigned group = 0;
259 	bitmap_t g = slab_data->bitmap[group];
260 	unsigned i = 0;
261 	while (i < cnt) {
262 		while (g == 0) {
263 			g = slab_data->bitmap[++group];
264 		}
265 		size_t shift = group << LG_BITMAP_GROUP_NBITS;
266 		size_t pop = popcount_lu(g);
267 		if (pop > (cnt - i)) {
268 			pop = cnt - i;
269 		}
270 
271 		/*
272 		 * Load from memory locations only once, outside the
273 		 * hot loop below.
274 		 */
275 		uintptr_t base = (uintptr_t)edata_addr_get(slab);
276 		uintptr_t regsize = (uintptr_t)bin_info->reg_size;
277 		while (pop--) {
278 			size_t bit = cfs_lu(&g);
279 			size_t regind = shift + bit;
280 			*(ptrs + i) = (void *)(base + regsize * regind);
281 
282 			i++;
283 		}
284 		slab_data->bitmap[group] = g;
285 	}
286 #endif
287 	edata_nfree_sub(slab, cnt);
288 }
289 
290 static void
291 arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
292 	szind_t index, hindex;
293 
294 	cassert(config_stats);
295 
296 	if (usize < SC_LARGE_MINCLASS) {
297 		usize = SC_LARGE_MINCLASS;
298 	}
299 	index = sz_size2index(usize);
300 	hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
301 
302 	locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
303 	    &arena->stats.lstats[hindex].nmalloc, 1);
304 }
305 
306 static void
307 arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
308 	szind_t index, hindex;
309 
310 	cassert(config_stats);
311 
312 	if (usize < SC_LARGE_MINCLASS) {
313 		usize = SC_LARGE_MINCLASS;
314 	}
315 	index = sz_size2index(usize);
316 	hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
317 
318 	locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
319 	    &arena->stats.lstats[hindex].ndalloc, 1);
320 }
321 
322 static void
323 arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
324     size_t usize) {
325 	arena_large_malloc_stats_update(tsdn, arena, usize);
326 	arena_large_dalloc_stats_update(tsdn, arena, oldusize);
327 }
328 
329 edata_t *
330 arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
331     size_t alignment, bool zero) {
332 	bool deferred_work_generated = false;
333 	szind_t szind = sz_size2index(usize);
334 	size_t esize = usize + sz_large_pad;
335 
336 	bool guarded = san_large_extent_decide_guard(tsdn,
337 	    arena_get_ehooks(arena), esize, alignment);
338 	edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment,
339 	    /* slab */ false, szind, zero, guarded, &deferred_work_generated);
340 	assert(deferred_work_generated == false);
341 
342 	if (edata != NULL) {
343 		if (config_stats) {
344 			LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
345 			arena_large_malloc_stats_update(tsdn, arena, usize);
346 			LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
347 		}
348 	}
349 
350 	if (edata != NULL && sz_large_pad != 0) {
351 		arena_cache_oblivious_randomize(tsdn, arena, edata, alignment);
352 	}
353 
354 	return edata;
355 }
356 
357 void
358 arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
359 	if (config_stats) {
360 		LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
361 		arena_large_dalloc_stats_update(tsdn, arena,
362 		    edata_usize_get(edata));
363 		LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
364 	}
365 }
366 
367 void
368 arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
369     size_t oldusize) {
370 	size_t usize = edata_usize_get(edata);
371 
372 	if (config_stats) {
373 		LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
374 		arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
375 		LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
376 	}
377 }
378 
379 void
380 arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
381     size_t oldusize) {
382 	size_t usize = edata_usize_get(edata);
383 
384 	if (config_stats) {
385 		LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
386 		arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
387 		LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
388 	}
389 }
390 
391 /*
392  * In situations where we're not forcing a decay (i.e. because the user
393  * specifically requested it), should we purge ourselves, or wait for the
394  * background thread to get to it.
395  */
396 static pac_purge_eagerness_t
397 arena_decide_unforced_purge_eagerness(bool is_background_thread) {
398 	if (is_background_thread) {
399 		return PAC_PURGE_ALWAYS;
400 	} else if (!is_background_thread && background_thread_enabled()) {
401 		return PAC_PURGE_NEVER;
402 	} else {
403 		return PAC_PURGE_ON_EPOCH_ADVANCE;
404 	}
405 }
406 
407 bool
408 arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
409     ssize_t decay_ms) {
410 	pac_purge_eagerness_t eagerness = arena_decide_unforced_purge_eagerness(
411 	    /* is_background_thread */ false);
412 	return pa_decay_ms_set(tsdn, &arena->pa_shard, state, decay_ms,
413 	    eagerness);
414 }
415 
416 ssize_t
417 arena_decay_ms_get(arena_t *arena, extent_state_t state) {
418 	return pa_decay_ms_get(&arena->pa_shard, state);
419 }
420 
421 static bool
422 arena_decay_impl(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
423     pac_decay_stats_t *decay_stats, ecache_t *ecache,
424     bool is_background_thread, bool all) {
425 	if (all) {
426 		malloc_mutex_lock(tsdn, &decay->mtx);
427 		pac_decay_all(tsdn, &arena->pa_shard.pac, decay, decay_stats,
428 		    ecache, /* fully_decay */ all);
429 		malloc_mutex_unlock(tsdn, &decay->mtx);
430 		return false;
431 	}
432 
433 	if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
434 		/* No need to wait if another thread is in progress. */
435 		return true;
436 	}
437 	pac_purge_eagerness_t eagerness =
438 	    arena_decide_unforced_purge_eagerness(is_background_thread);
439 	bool epoch_advanced = pac_maybe_decay_purge(tsdn, &arena->pa_shard.pac,
440 	    decay, decay_stats, ecache, eagerness);
441 	size_t npages_new;
442 	if (epoch_advanced) {
443 		/* Backlog is updated on epoch advance. */
444 		npages_new = decay_epoch_npages_delta(decay);
445 	}
446 	malloc_mutex_unlock(tsdn, &decay->mtx);
447 
448 	if (have_background_thread && background_thread_enabled() &&
449 	    epoch_advanced && !is_background_thread) {
450 		arena_maybe_do_deferred_work(tsdn, arena, decay, npages_new);
451 	}
452 
453 	return false;
454 }
455 
456 static bool
457 arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
458     bool all) {
459 	return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_dirty,
460 	    &arena->pa_shard.pac.stats->decay_dirty,
461 	    &arena->pa_shard.pac.ecache_dirty, is_background_thread, all);
462 }
463 
464 static bool
465 arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
466     bool all) {
467 	if (pa_shard_dont_decay_muzzy(&arena->pa_shard)) {
468 		return false;
469 	}
470 	return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_muzzy,
471 	    &arena->pa_shard.pac.stats->decay_muzzy,
472 	    &arena->pa_shard.pac.ecache_muzzy, is_background_thread, all);
473 }
474 
475 void
476 arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
477 	if (all) {
478 		/*
479 		 * We should take a purge of "all" to mean "save as much memory
480 		 * as possible", including flushing any caches (for situations
481 		 * like thread death, or manual purge calls).
482 		 */
483 		sec_flush(tsdn, &arena->pa_shard.hpa_sec);
484 	}
485 	if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
486 		return;
487 	}
488 	arena_decay_muzzy(tsdn, arena, is_background_thread, all);
489 }
490 
491 static bool
492 arena_should_decay_early(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
493     background_thread_info_t *info, nstime_t *remaining_sleep,
494     size_t npages_new) {
495 	malloc_mutex_assert_owner(tsdn, &info->mtx);
496 
497 	if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
498 		return false;
499 	}
500 
501 	if (!decay_gradually(decay)) {
502 		malloc_mutex_unlock(tsdn, &decay->mtx);
503 		return false;
504 	}
505 
506 	nstime_init(remaining_sleep, background_thread_wakeup_time_get(info));
507 	if (nstime_compare(remaining_sleep, &decay->epoch) <= 0) {
508 		malloc_mutex_unlock(tsdn, &decay->mtx);
509 		return false;
510 	}
511 	nstime_subtract(remaining_sleep, &decay->epoch);
512 	if (npages_new > 0) {
513 		uint64_t npurge_new = decay_npages_purge_in(decay,
514 		    remaining_sleep, npages_new);
515 		info->npages_to_purge_new += npurge_new;
516 	}
517 	malloc_mutex_unlock(tsdn, &decay->mtx);
518 	return info->npages_to_purge_new >
519 	    ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD;
520 }
521 
522 /*
523  * Check if deferred work needs to be done sooner than planned.
524  * For decay we might want to wake up earlier because of an influx of dirty
525  * pages. Rather than waiting for previously estimated time, we proactively
526  * purge those pages.
527  * If background thread sleeps indefinitely, always wake up because some
528  * deferred work has been generated.
529  */
530 static void
531 arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
532     size_t npages_new) {
533 	background_thread_info_t *info = arena_background_thread_info_get(
534 	    arena);
535 	if (malloc_mutex_trylock(tsdn, &info->mtx)) {
536 		/*
537 		 * Background thread may hold the mutex for a long period of
538 		 * time.  We'd like to avoid the variance on application
539 		 * threads.  So keep this non-blocking, and leave the work to a
540 		 * future epoch.
541 		 */
542 		return;
543 	}
544 	if (!background_thread_is_started(info)) {
545 		goto label_done;
546 	}
547 
548 	nstime_t remaining_sleep;
549 	if (background_thread_indefinite_sleep(info)) {
550 		background_thread_wakeup_early(info, NULL);
551 	} else if (arena_should_decay_early(tsdn, arena, decay, info,
552 	    &remaining_sleep, npages_new)) {
553 		info->npages_to_purge_new = 0;
554 		background_thread_wakeup_early(info, &remaining_sleep);
555 	}
556 label_done:
557 	malloc_mutex_unlock(tsdn, &info->mtx);
558 }
559 
560 /* Called from background threads. */
561 void
562 arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena) {
563 	arena_decay(tsdn, arena, true, false);
564 	pa_shard_do_deferred_work(tsdn, &arena->pa_shard);
565 }
566 
567 void
568 arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) {
569 	bool deferred_work_generated = false;
570 	pa_dalloc(tsdn, &arena->pa_shard, slab, &deferred_work_generated);
571 	if (deferred_work_generated) {
572 		arena_handle_deferred_work(tsdn, arena);
573 	}
574 }
575 
576 static void
577 arena_bin_slabs_nonfull_insert(bin_t *bin, edata_t *slab) {
578 	assert(edata_nfree_get(slab) > 0);
579 	edata_heap_insert(&bin->slabs_nonfull, slab);
580 	if (config_stats) {
581 		bin->stats.nonfull_slabs++;
582 	}
583 }
584 
585 static void
586 arena_bin_slabs_nonfull_remove(bin_t *bin, edata_t *slab) {
587 	edata_heap_remove(&bin->slabs_nonfull, slab);
588 	if (config_stats) {
589 		bin->stats.nonfull_slabs--;
590 	}
591 }
592 
593 static edata_t *
594 arena_bin_slabs_nonfull_tryget(bin_t *bin) {
595 	edata_t *slab = edata_heap_remove_first(&bin->slabs_nonfull);
596 	if (slab == NULL) {
597 		return NULL;
598 	}
599 	if (config_stats) {
600 		bin->stats.reslabs++;
601 		bin->stats.nonfull_slabs--;
602 	}
603 	return slab;
604 }
605 
606 static void
607 arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, edata_t *slab) {
608 	assert(edata_nfree_get(slab) == 0);
609 	/*
610 	 *  Tracking extents is required by arena_reset, which is not allowed
611 	 *  for auto arenas.  Bypass this step to avoid touching the edata
612 	 *  linkage (often results in cache misses) for auto arenas.
613 	 */
614 	if (arena_is_auto(arena)) {
615 		return;
616 	}
617 	edata_list_active_append(&bin->slabs_full, slab);
618 }
619 
620 static void
621 arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, edata_t *slab) {
622 	if (arena_is_auto(arena)) {
623 		return;
624 	}
625 	edata_list_active_remove(&bin->slabs_full, slab);
626 }
627 
628 static void
629 arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
630 	edata_t *slab;
631 
632 	malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
633 	if (bin->slabcur != NULL) {
634 		slab = bin->slabcur;
635 		bin->slabcur = NULL;
636 		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
637 		arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
638 		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
639 	}
640 	while ((slab = edata_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
641 		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
642 		arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
643 		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
644 	}
645 	for (slab = edata_list_active_first(&bin->slabs_full); slab != NULL;
646 	     slab = edata_list_active_first(&bin->slabs_full)) {
647 		arena_bin_slabs_full_remove(arena, bin, slab);
648 		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
649 		arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
650 		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
651 	}
652 	if (config_stats) {
653 		bin->stats.curregs = 0;
654 		bin->stats.curslabs = 0;
655 	}
656 	malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
657 }
658 
659 void
660 arena_reset(tsd_t *tsd, arena_t *arena) {
661 	/*
662 	 * Locking in this function is unintuitive.  The caller guarantees that
663 	 * no concurrent operations are happening in this arena, but there are
664 	 * still reasons that some locking is necessary:
665 	 *
666 	 * - Some of the functions in the transitive closure of calls assume
667 	 *   appropriate locks are held, and in some cases these locks are
668 	 *   temporarily dropped to avoid lock order reversal or deadlock due to
669 	 *   reentry.
670 	 * - mallctl("epoch", ...) may concurrently refresh stats.  While
671 	 *   strictly speaking this is a "concurrent operation", disallowing
672 	 *   stats refreshes would impose an inconvenient burden.
673 	 */
674 
675 	/* Large allocations. */
676 	malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
677 
678 	for (edata_t *edata = edata_list_active_first(&arena->large);
679 	    edata != NULL; edata = edata_list_active_first(&arena->large)) {
680 		void *ptr = edata_base_get(edata);
681 		size_t usize;
682 
683 		malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
684 		emap_alloc_ctx_t alloc_ctx;
685 		emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
686 		    &alloc_ctx);
687 		assert(alloc_ctx.szind != SC_NSIZES);
688 
689 		if (config_stats || (config_prof && opt_prof)) {
690 			usize = sz_index2size(alloc_ctx.szind);
691 			assert(usize == isalloc(tsd_tsdn(tsd), ptr));
692 		}
693 		/* Remove large allocation from prof sample set. */
694 		if (config_prof && opt_prof) {
695 			prof_free(tsd, ptr, usize, &alloc_ctx);
696 		}
697 		large_dalloc(tsd_tsdn(tsd), edata);
698 		malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
699 	}
700 	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
701 
702 	/* Bins. */
703 	for (unsigned i = 0; i < SC_NBINS; i++) {
704 		for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
705 			arena_bin_reset(tsd, arena, arena_get_bin(arena, i, j));
706 		}
707 	}
708 	pa_shard_reset(tsd_tsdn(tsd), &arena->pa_shard);
709 }
710 
711 static void
712 arena_prepare_base_deletion_sync_finish(tsd_t *tsd, malloc_mutex_t **mutexes,
713     unsigned n_mtx) {
714 	for (unsigned i = 0; i < n_mtx; i++) {
715 		malloc_mutex_lock(tsd_tsdn(tsd), mutexes[i]);
716 		malloc_mutex_unlock(tsd_tsdn(tsd), mutexes[i]);
717 	}
718 }
719 
720 #define ARENA_DESTROY_MAX_DELAYED_MTX 32
721 static void
722 arena_prepare_base_deletion_sync(tsd_t *tsd, malloc_mutex_t *mtx,
723     malloc_mutex_t **delayed_mtx, unsigned *n_delayed) {
724 	if (!malloc_mutex_trylock(tsd_tsdn(tsd), mtx)) {
725 		/* No contention. */
726 		malloc_mutex_unlock(tsd_tsdn(tsd), mtx);
727 		return;
728 	}
729 	unsigned n = *n_delayed;
730 	assert(n < ARENA_DESTROY_MAX_DELAYED_MTX);
731 	/* Add another to the batch. */
732 	delayed_mtx[n++] = mtx;
733 
734 	if (n == ARENA_DESTROY_MAX_DELAYED_MTX) {
735 		arena_prepare_base_deletion_sync_finish(tsd, delayed_mtx, n);
736 		n = 0;
737 	}
738 	*n_delayed = n;
739 }
740 
741 static void
742 arena_prepare_base_deletion(tsd_t *tsd, base_t *base_to_destroy) {
743 	/*
744 	 * In order to coalesce, emap_try_acquire_edata_neighbor will attempt to
745 	 * check neighbor edata's state to determine eligibility.  This means
746 	 * under certain conditions, the metadata from an arena can be accessed
747 	 * w/o holding any locks from that arena.  In order to guarantee safe
748 	 * memory access, the metadata and the underlying base allocator needs
749 	 * to be kept alive, until all pending accesses are done.
750 	 *
751 	 * 1) with opt_retain, the arena boundary implies the is_head state
752 	 * (tracked in the rtree leaf), and the coalesce flow will stop at the
753 	 * head state branch.  Therefore no cross arena metadata access
754 	 * possible.
755 	 *
756 	 * 2) w/o opt_retain, the arena id needs to be read from the edata_t,
757 	 * meaning read only cross-arena metadata access is possible.  The
758 	 * coalesce attempt will stop at the arena_id mismatch, and is always
759 	 * under one of the ecache locks.  To allow safe passthrough of such
760 	 * metadata accesses, the loop below will iterate through all manual
761 	 * arenas' ecache locks.  As all the metadata from this base allocator
762 	 * have been unlinked from the rtree, after going through all the
763 	 * relevant ecache locks, it's safe to say that a) pending accesses are
764 	 * all finished, and b) no new access will be generated.
765 	 */
766 	if (opt_retain) {
767 		return;
768 	}
769 	unsigned destroy_ind = base_ind_get(base_to_destroy);
770 	assert(destroy_ind >= manual_arena_base);
771 
772 	tsdn_t *tsdn = tsd_tsdn(tsd);
773 	malloc_mutex_t *delayed_mtx[ARENA_DESTROY_MAX_DELAYED_MTX];
774 	unsigned n_delayed = 0, total = narenas_total_get();
775 	for (unsigned i = 0; i < total; i++) {
776 		if (i == destroy_ind) {
777 			continue;
778 		}
779 		arena_t *arena = arena_get(tsdn, i, false);
780 		if (arena == NULL) {
781 			continue;
782 		}
783 		pac_t *pac = &arena->pa_shard.pac;
784 		arena_prepare_base_deletion_sync(tsd, &pac->ecache_dirty.mtx,
785 		    delayed_mtx, &n_delayed);
786 		arena_prepare_base_deletion_sync(tsd, &pac->ecache_muzzy.mtx,
787 		    delayed_mtx, &n_delayed);
788 		arena_prepare_base_deletion_sync(tsd, &pac->ecache_retained.mtx,
789 		    delayed_mtx, &n_delayed);
790 	}
791 	arena_prepare_base_deletion_sync_finish(tsd, delayed_mtx, n_delayed);
792 }
793 #undef ARENA_DESTROY_MAX_DELAYED_MTX
794 
795 void
796 arena_destroy(tsd_t *tsd, arena_t *arena) {
797 	assert(base_ind_get(arena->base) >= narenas_auto);
798 	assert(arena_nthreads_get(arena, false) == 0);
799 	assert(arena_nthreads_get(arena, true) == 0);
800 
801 	/*
802 	 * No allocations have occurred since arena_reset() was called.
803 	 * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
804 	 * extents, so only retained extents may remain and it's safe to call
805 	 * pa_shard_destroy_retained.
806 	 */
807 	pa_shard_destroy(tsd_tsdn(tsd), &arena->pa_shard);
808 
809 	/*
810 	 * Remove the arena pointer from the arenas array.  We rely on the fact
811 	 * that there is no way for the application to get a dirty read from the
812 	 * arenas array unless there is an inherent race in the application
813 	 * involving access of an arena being concurrently destroyed.  The
814 	 * application must synchronize knowledge of the arena's validity, so as
815 	 * long as we use an atomic write to update the arenas array, the
816 	 * application will get a clean read any time after it synchronizes
817 	 * knowledge that the arena is no longer valid.
818 	 */
819 	arena_set(base_ind_get(arena->base), NULL);
820 
821 	/*
822 	 * Destroy the base allocator, which manages all metadata ever mapped by
823 	 * this arena.  The prepare function will make sure no pending access to
824 	 * the metadata in this base anymore.
825 	 */
826 	arena_prepare_base_deletion(tsd, arena->base);
827 	base_delete(tsd_tsdn(tsd), arena->base);
828 }
829 
830 static edata_t *
831 arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
832     const bin_info_t *bin_info) {
833 	bool deferred_work_generated = false;
834 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
835 	    WITNESS_RANK_CORE, 0);
836 
837 	bool guarded = san_slab_extent_decide_guard(tsdn,
838 	    arena_get_ehooks(arena));
839 	edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size,
840 	    /* alignment */ PAGE, /* slab */ true, /* szind */ binind,
841 	     /* zero */ false, guarded, &deferred_work_generated);
842 
843 	if (deferred_work_generated) {
844 		arena_handle_deferred_work(tsdn, arena);
845 	}
846 
847 	if (slab == NULL) {
848 		return NULL;
849 	}
850 	assert(edata_slab_get(slab));
851 
852 	/* Initialize slab internals. */
853 	slab_data_t *slab_data = edata_slab_data_get(slab);
854 	edata_nfree_binshard_set(slab, bin_info->nregs, binshard);
855 	bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
856 
857 	return slab;
858 }
859 
860 /*
861  * Before attempting the _with_fresh_slab approaches below, the _no_fresh_slab
862  * variants (i.e. through slabcur and nonfull) must be tried first.
863  */
864 static void
865 arena_bin_refill_slabcur_with_fresh_slab(tsdn_t *tsdn, arena_t *arena,
866     bin_t *bin, szind_t binind, edata_t *fresh_slab) {
867 	malloc_mutex_assert_owner(tsdn, &bin->lock);
868 	/* Only called after slabcur and nonfull both failed. */
869 	assert(bin->slabcur == NULL);
870 	assert(edata_heap_first(&bin->slabs_nonfull) == NULL);
871 	assert(fresh_slab != NULL);
872 
873 	/* A new slab from arena_slab_alloc() */
874 	assert(edata_nfree_get(fresh_slab) == bin_infos[binind].nregs);
875 	if (config_stats) {
876 		bin->stats.nslabs++;
877 		bin->stats.curslabs++;
878 	}
879 	bin->slabcur = fresh_slab;
880 }
881 
882 /* Refill slabcur and then alloc using the fresh slab */
883 static void *
884 arena_bin_malloc_with_fresh_slab(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
885     szind_t binind, edata_t *fresh_slab) {
886 	malloc_mutex_assert_owner(tsdn, &bin->lock);
887 	arena_bin_refill_slabcur_with_fresh_slab(tsdn, arena, bin, binind,
888 	    fresh_slab);
889 
890 	return arena_slab_reg_alloc(bin->slabcur, &bin_infos[binind]);
891 }
892 
893 static bool
894 arena_bin_refill_slabcur_no_fresh_slab(tsdn_t *tsdn, arena_t *arena,
895     bin_t *bin) {
896 	malloc_mutex_assert_owner(tsdn, &bin->lock);
897 	/* Only called after arena_slab_reg_alloc[_batch] failed. */
898 	assert(bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0);
899 
900 	if (bin->slabcur != NULL) {
901 		arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
902 	}
903 
904 	/* Look for a usable slab. */
905 	bin->slabcur = arena_bin_slabs_nonfull_tryget(bin);
906 	assert(bin->slabcur == NULL || edata_nfree_get(bin->slabcur) > 0);
907 
908 	return (bin->slabcur == NULL);
909 }
910 
911 bin_t *
912 arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
913     unsigned *binshard_p) {
914 	unsigned binshard;
915 	if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) {
916 		binshard = 0;
917 	} else {
918 		binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind];
919 	}
920 	assert(binshard < bin_infos[binind].n_shards);
921 	if (binshard_p != NULL) {
922 		*binshard_p = binshard;
923 	}
924 	return arena_get_bin(arena, binind, binshard);
925 }
926 
927 void
928 arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
929     cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind,
930     const unsigned nfill) {
931 	assert(cache_bin_ncached_get_local(cache_bin, cache_bin_info) == 0);
932 
933 	const bin_info_t *bin_info = &bin_infos[binind];
934 
935 	CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nfill);
936 	cache_bin_init_ptr_array_for_fill(cache_bin, cache_bin_info, &ptrs,
937 	    nfill);
938 	/*
939 	 * Bin-local resources are used first: 1) bin->slabcur, and 2) nonfull
940 	 * slabs.  After both are exhausted, new slabs will be allocated through
941 	 * arena_slab_alloc().
942 	 *
943 	 * Bin lock is only taken / released right before / after the while(...)
944 	 * refill loop, with new slab allocation (which has its own locking)
945 	 * kept outside of the loop.  This setup facilitates flat combining, at
946 	 * the cost of the nested loop (through goto label_refill).
947 	 *
948 	 * To optimize for cases with contention and limited resources
949 	 * (e.g. hugepage-backed or non-overcommit arenas), each fill-iteration
950 	 * gets one chance of slab_alloc, and a retry of bin local resources
951 	 * after the slab allocation (regardless if slab_alloc failed, because
952 	 * the bin lock is dropped during the slab allocation).
953 	 *
954 	 * In other words, new slab allocation is allowed, as long as there was
955 	 * progress since the previous slab_alloc.  This is tracked with
956 	 * made_progress below, initialized to true to jump start the first
957 	 * iteration.
958 	 *
959 	 * In other words (again), the loop will only terminate early (i.e. stop
960 	 * with filled < nfill) after going through the three steps: a) bin
961 	 * local exhausted, b) unlock and slab_alloc returns null, c) re-lock
962 	 * and bin local fails again.
963 	 */
964 	bool made_progress = true;
965 	edata_t *fresh_slab = NULL;
966 	bool alloc_and_retry = false;
967 	unsigned filled = 0;
968 	unsigned binshard;
969 	bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard);
970 
971 label_refill:
972 	malloc_mutex_lock(tsdn, &bin->lock);
973 
974 	while (filled < nfill) {
975 		/* Try batch-fill from slabcur first. */
976 		edata_t *slabcur = bin->slabcur;
977 		if (slabcur != NULL && edata_nfree_get(slabcur) > 0) {
978 			unsigned tofill = nfill - filled;
979 			unsigned nfree = edata_nfree_get(slabcur);
980 			unsigned cnt = tofill < nfree ? tofill : nfree;
981 
982 			arena_slab_reg_alloc_batch(slabcur, bin_info, cnt,
983 			    &ptrs.ptr[filled]);
984 			made_progress = true;
985 			filled += cnt;
986 			continue;
987 		}
988 		/* Next try refilling slabcur from nonfull slabs. */
989 		if (!arena_bin_refill_slabcur_no_fresh_slab(tsdn, arena, bin)) {
990 			assert(bin->slabcur != NULL);
991 			continue;
992 		}
993 
994 		/* Then see if a new slab was reserved already. */
995 		if (fresh_slab != NULL) {
996 			arena_bin_refill_slabcur_with_fresh_slab(tsdn, arena,
997 			    bin, binind, fresh_slab);
998 			assert(bin->slabcur != NULL);
999 			fresh_slab = NULL;
1000 			continue;
1001 		}
1002 
1003 		/* Try slab_alloc if made progress (or never did slab_alloc). */
1004 		if (made_progress) {
1005 			assert(bin->slabcur == NULL);
1006 			assert(fresh_slab == NULL);
1007 			alloc_and_retry = true;
1008 			/* Alloc a new slab then come back. */
1009 			break;
1010 		}
1011 
1012 		/* OOM. */
1013 
1014 		assert(fresh_slab == NULL);
1015 		assert(!alloc_and_retry);
1016 		break;
1017 	} /* while (filled < nfill) loop. */
1018 
1019 	if (config_stats && !alloc_and_retry) {
1020 		bin->stats.nmalloc += filled;
1021 		bin->stats.nrequests += cache_bin->tstats.nrequests;
1022 		bin->stats.curregs += filled;
1023 		bin->stats.nfills++;
1024 		cache_bin->tstats.nrequests = 0;
1025 	}
1026 
1027 	malloc_mutex_unlock(tsdn, &bin->lock);
1028 
1029 	if (alloc_and_retry) {
1030 		assert(fresh_slab == NULL);
1031 		assert(filled < nfill);
1032 		assert(made_progress);
1033 
1034 		fresh_slab = arena_slab_alloc(tsdn, arena, binind, binshard,
1035 		    bin_info);
1036 		/* fresh_slab NULL case handled in the for loop. */
1037 
1038 		alloc_and_retry = false;
1039 		made_progress = false;
1040 		goto label_refill;
1041 	}
1042 	assert(filled == nfill || (fresh_slab == NULL && !made_progress));
1043 
1044 	/* Release if allocated but not used. */
1045 	if (fresh_slab != NULL) {
1046 		assert(edata_nfree_get(fresh_slab) == bin_info->nregs);
1047 		arena_slab_dalloc(tsdn, arena, fresh_slab);
1048 		fresh_slab = NULL;
1049 	}
1050 
1051 	cache_bin_finish_fill(cache_bin, cache_bin_info, &ptrs, filled);
1052 	arena_decay_tick(tsdn, arena);
1053 }
1054 
1055 size_t
1056 arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
1057     void **ptrs, size_t nfill, bool zero) {
1058 	assert(binind < SC_NBINS);
1059 	const bin_info_t *bin_info = &bin_infos[binind];
1060 	const size_t nregs = bin_info->nregs;
1061 	assert(nregs > 0);
1062 	const size_t usize = bin_info->reg_size;
1063 
1064 	const bool manual_arena = !arena_is_auto(arena);
1065 	unsigned binshard;
1066 	bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard);
1067 
1068 	size_t nslab = 0;
1069 	size_t filled = 0;
1070 	edata_t *slab = NULL;
1071 	edata_list_active_t fulls;
1072 	edata_list_active_init(&fulls);
1073 
1074 	while (filled < nfill && (slab = arena_slab_alloc(tsdn, arena, binind,
1075 	    binshard, bin_info)) != NULL) {
1076 		assert((size_t)edata_nfree_get(slab) == nregs);
1077 		++nslab;
1078 		size_t batch = nfill - filled;
1079 		if (batch > nregs) {
1080 			batch = nregs;
1081 		}
1082 		assert(batch > 0);
1083 		arena_slab_reg_alloc_batch(slab, bin_info, (unsigned)batch,
1084 		    &ptrs[filled]);
1085 		assert(edata_addr_get(slab) == ptrs[filled]);
1086 		if (zero) {
1087 			memset(ptrs[filled], 0, batch * usize);
1088 		}
1089 		filled += batch;
1090 		if (batch == nregs) {
1091 			if (manual_arena) {
1092 				edata_list_active_append(&fulls, slab);
1093 			}
1094 			slab = NULL;
1095 		}
1096 	}
1097 
1098 	malloc_mutex_lock(tsdn, &bin->lock);
1099 	/*
1100 	 * Only the last slab can be non-empty, and the last slab is non-empty
1101 	 * iff slab != NULL.
1102 	 */
1103 	if (slab != NULL) {
1104 		arena_bin_lower_slab(tsdn, arena, slab, bin);
1105 	}
1106 	if (manual_arena) {
1107 		edata_list_active_concat(&bin->slabs_full, &fulls);
1108 	}
1109 	assert(edata_list_active_empty(&fulls));
1110 	if (config_stats) {
1111 		bin->stats.nslabs += nslab;
1112 		bin->stats.curslabs += nslab;
1113 		bin->stats.nmalloc += filled;
1114 		bin->stats.nrequests += filled;
1115 		bin->stats.curregs += filled;
1116 	}
1117 	malloc_mutex_unlock(tsdn, &bin->lock);
1118 
1119 	arena_decay_tick(tsdn, arena);
1120 	return filled;
1121 }
1122 
1123 /*
1124  * Without allocating a new slab, try arena_slab_reg_alloc() and re-fill
1125  * bin->slabcur if necessary.
1126  */
1127 static void *
1128 arena_bin_malloc_no_fresh_slab(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1129     szind_t binind) {
1130 	malloc_mutex_assert_owner(tsdn, &bin->lock);
1131 	if (bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0) {
1132 		if (arena_bin_refill_slabcur_no_fresh_slab(tsdn, arena, bin)) {
1133 			return NULL;
1134 		}
1135 	}
1136 
1137 	assert(bin->slabcur != NULL && edata_nfree_get(bin->slabcur) > 0);
1138 	return arena_slab_reg_alloc(bin->slabcur, &bin_infos[binind]);
1139 }
1140 
1141 static void *
1142 arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
1143 	assert(binind < SC_NBINS);
1144 	const bin_info_t *bin_info = &bin_infos[binind];
1145 	size_t usize = sz_index2size(binind);
1146 	unsigned binshard;
1147 	bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard);
1148 
1149 	malloc_mutex_lock(tsdn, &bin->lock);
1150 	edata_t *fresh_slab = NULL;
1151 	void *ret = arena_bin_malloc_no_fresh_slab(tsdn, arena, bin, binind);
1152 	if (ret == NULL) {
1153 		malloc_mutex_unlock(tsdn, &bin->lock);
1154 		/******************************/
1155 		fresh_slab = arena_slab_alloc(tsdn, arena, binind, binshard,
1156 		    bin_info);
1157 		/********************************/
1158 		malloc_mutex_lock(tsdn, &bin->lock);
1159 		/* Retry since the lock was dropped. */
1160 		ret = arena_bin_malloc_no_fresh_slab(tsdn, arena, bin, binind);
1161 		if (ret == NULL) {
1162 			if (fresh_slab == NULL) {
1163 				/* OOM */
1164 				malloc_mutex_unlock(tsdn, &bin->lock);
1165 				return NULL;
1166 			}
1167 			ret = arena_bin_malloc_with_fresh_slab(tsdn, arena, bin,
1168 			    binind, fresh_slab);
1169 			fresh_slab = NULL;
1170 		}
1171 	}
1172 	if (config_stats) {
1173 		bin->stats.nmalloc++;
1174 		bin->stats.nrequests++;
1175 		bin->stats.curregs++;
1176 	}
1177 	malloc_mutex_unlock(tsdn, &bin->lock);
1178 
1179 	if (fresh_slab != NULL) {
1180 		arena_slab_dalloc(tsdn, arena, fresh_slab);
1181 	}
1182 	if (zero) {
1183 		memset(ret, 0, usize);
1184 	}
1185 	arena_decay_tick(tsdn, arena);
1186 
1187 	return ret;
1188 }
1189 
1190 void *
1191 arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
1192     bool zero) {
1193 	assert(!tsdn_null(tsdn) || arena != NULL);
1194 
1195 	if (likely(!tsdn_null(tsdn))) {
1196 		arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, size);
1197 	}
1198 	if (unlikely(arena == NULL)) {
1199 		return NULL;
1200 	}
1201 
1202 	if (likely(size <= SC_SMALL_MAXCLASS)) {
1203 		return arena_malloc_small(tsdn, arena, ind, zero);
1204 	}
1205 	return large_malloc(tsdn, arena, sz_index2size(ind), zero);
1206 }
1207 
1208 void *
1209 arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
1210     bool zero, tcache_t *tcache) {
1211 	void *ret;
1212 
1213 	if (usize <= SC_SMALL_MAXCLASS) {
1214 		/* Small; alignment doesn't require special slab placement. */
1215 
1216 		/* usize should be a result of sz_sa2u() */
1217 		assert((usize & (alignment - 1)) == 0);
1218 
1219 		/*
1220 		 * Small usize can't come from an alignment larger than a page.
1221 		 */
1222 		assert(alignment <= PAGE);
1223 
1224 		ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1225 		    zero, tcache, true);
1226 	} else {
1227 		if (likely(alignment <= CACHELINE)) {
1228 			ret = large_malloc(tsdn, arena, usize, zero);
1229 		} else {
1230 			ret = large_palloc(tsdn, arena, usize, alignment, zero);
1231 		}
1232 	}
1233 	return ret;
1234 }
1235 
1236 void
1237 arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
1238 	cassert(config_prof);
1239 	assert(ptr != NULL);
1240 	assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
1241 	assert(usize <= SC_SMALL_MAXCLASS);
1242 
1243 	if (config_opt_safety_checks) {
1244 		safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS);
1245 	}
1246 
1247 	edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
1248 
1249 	szind_t szind = sz_size2index(usize);
1250 	edata_szind_set(edata, szind);
1251 	emap_remap(tsdn, &arena_emap_global, edata, szind, /* slab */ false);
1252 
1253 	assert(isalloc(tsdn, ptr) == usize);
1254 }
1255 
1256 static size_t
1257 arena_prof_demote(tsdn_t *tsdn, edata_t *edata, const void *ptr) {
1258 	cassert(config_prof);
1259 	assert(ptr != NULL);
1260 
1261 	edata_szind_set(edata, SC_NBINS);
1262 	emap_remap(tsdn, &arena_emap_global, edata, SC_NBINS, /* slab */ false);
1263 
1264 	assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
1265 
1266 	return SC_LARGE_MINCLASS;
1267 }
1268 
1269 void
1270 arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
1271     bool slow_path) {
1272 	cassert(config_prof);
1273 	assert(opt_prof);
1274 
1275 	edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
1276 	size_t usize = edata_usize_get(edata);
1277 	size_t bumped_usize = arena_prof_demote(tsdn, edata, ptr);
1278 	if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
1279 		/*
1280 		 * Currently, we only do redzoning for small sampled
1281 		 * allocations.
1282 		 */
1283 		assert(bumped_usize == SC_LARGE_MINCLASS);
1284 		safety_check_verify_redzone(ptr, usize, bumped_usize);
1285 	}
1286 	if (bumped_usize <= tcache_maxclass && tcache != NULL) {
1287 		tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
1288 		    sz_size2index(bumped_usize), slow_path);
1289 	} else {
1290 		large_dalloc(tsdn, edata);
1291 	}
1292 }
1293 
1294 static void
1295 arena_dissociate_bin_slab(arena_t *arena, edata_t *slab, bin_t *bin) {
1296 	/* Dissociate slab from bin. */
1297 	if (slab == bin->slabcur) {
1298 		bin->slabcur = NULL;
1299 	} else {
1300 		szind_t binind = edata_szind_get(slab);
1301 		const bin_info_t *bin_info = &bin_infos[binind];
1302 
1303 		/*
1304 		 * The following block's conditional is necessary because if the
1305 		 * slab only contains one region, then it never gets inserted
1306 		 * into the non-full slabs heap.
1307 		 */
1308 		if (bin_info->nregs == 1) {
1309 			arena_bin_slabs_full_remove(arena, bin, slab);
1310 		} else {
1311 			arena_bin_slabs_nonfull_remove(bin, slab);
1312 		}
1313 	}
1314 }
1315 
1316 static void
1317 arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
1318     bin_t *bin) {
1319 	assert(edata_nfree_get(slab) > 0);
1320 
1321 	/*
1322 	 * Make sure that if bin->slabcur is non-NULL, it refers to the
1323 	 * oldest/lowest non-full slab.  It is okay to NULL slabcur out rather
1324 	 * than proactively keeping it pointing at the oldest/lowest non-full
1325 	 * slab.
1326 	 */
1327 	if (bin->slabcur != NULL && edata_snad_comp(bin->slabcur, slab) > 0) {
1328 		/* Switch slabcur. */
1329 		if (edata_nfree_get(bin->slabcur) > 0) {
1330 			arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
1331 		} else {
1332 			arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1333 		}
1334 		bin->slabcur = slab;
1335 		if (config_stats) {
1336 			bin->stats.reslabs++;
1337 		}
1338 	} else {
1339 		arena_bin_slabs_nonfull_insert(bin, slab);
1340 	}
1341 }
1342 
1343 static void
1344 arena_dalloc_bin_slab_prepare(tsdn_t *tsdn, edata_t *slab, bin_t *bin) {
1345 	malloc_mutex_assert_owner(tsdn, &bin->lock);
1346 
1347 	assert(slab != bin->slabcur);
1348 	if (config_stats) {
1349 		bin->stats.curslabs--;
1350 	}
1351 }
1352 
1353 void
1354 arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena,
1355     edata_t *slab, bin_t *bin) {
1356 	arena_dissociate_bin_slab(arena, slab, bin);
1357 	arena_dalloc_bin_slab_prepare(tsdn, slab, bin);
1358 }
1359 
1360 void
1361 arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena,
1362     edata_t *slab, bin_t *bin) {
1363 	arena_bin_slabs_full_remove(arena, bin, slab);
1364 	arena_bin_lower_slab(tsdn, arena, slab, bin);
1365 }
1366 
1367 static void
1368 arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, edata_t *edata, void *ptr) {
1369 	szind_t binind = edata_szind_get(edata);
1370 	unsigned binshard = edata_binshard_get(edata);
1371 	bin_t *bin = arena_get_bin(arena, binind, binshard);
1372 
1373 	malloc_mutex_lock(tsdn, &bin->lock);
1374 	arena_dalloc_bin_locked_info_t info;
1375 	arena_dalloc_bin_locked_begin(&info, binind);
1376 	bool ret = arena_dalloc_bin_locked_step(tsdn, arena, bin,
1377 	    &info, binind, edata, ptr);
1378 	arena_dalloc_bin_locked_finish(tsdn, arena, bin, &info);
1379 	malloc_mutex_unlock(tsdn, &bin->lock);
1380 
1381 	if (ret) {
1382 		arena_slab_dalloc(tsdn, arena, edata);
1383 	}
1384 }
1385 
1386 void
1387 arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
1388 	edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
1389 	arena_t *arena = arena_get_from_edata(edata);
1390 
1391 	arena_dalloc_bin(tsdn, arena, edata, ptr);
1392 	arena_decay_tick(tsdn, arena);
1393 }
1394 
1395 bool
1396 arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
1397     size_t extra, bool zero, size_t *newsize) {
1398 	bool ret;
1399 	/* Calls with non-zero extra had to clamp extra. */
1400 	assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
1401 
1402 	edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
1403 	if (unlikely(size > SC_LARGE_MAXCLASS)) {
1404 		ret = true;
1405 		goto done;
1406 	}
1407 
1408 	size_t usize_min = sz_s2u(size);
1409 	size_t usize_max = sz_s2u(size + extra);
1410 	if (likely(oldsize <= SC_SMALL_MAXCLASS && usize_min
1411 	    <= SC_SMALL_MAXCLASS)) {
1412 		/*
1413 		 * Avoid moving the allocation if the size class can be left the
1414 		 * same.
1415 		 */
1416 		assert(bin_infos[sz_size2index(oldsize)].reg_size ==
1417 		    oldsize);
1418 		if ((usize_max > SC_SMALL_MAXCLASS
1419 		    || sz_size2index(usize_max) != sz_size2index(oldsize))
1420 		    && (size > oldsize || usize_max < oldsize)) {
1421 			ret = true;
1422 			goto done;
1423 		}
1424 
1425 		arena_t *arena = arena_get_from_edata(edata);
1426 		arena_decay_tick(tsdn, arena);
1427 		ret = false;
1428 	} else if (oldsize >= SC_LARGE_MINCLASS
1429 	    && usize_max >= SC_LARGE_MINCLASS) {
1430 		ret = large_ralloc_no_move(tsdn, edata, usize_min, usize_max,
1431 		    zero);
1432 	} else {
1433 		ret = true;
1434 	}
1435 done:
1436 	assert(edata == emap_edata_lookup(tsdn, &arena_emap_global, ptr));
1437 	*newsize = edata_usize_get(edata);
1438 
1439 	return ret;
1440 }
1441 
1442 static void *
1443 arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
1444     size_t alignment, bool zero, tcache_t *tcache) {
1445 	if (alignment == 0) {
1446 		return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1447 		    zero, tcache, true);
1448 	}
1449 	usize = sz_sa2u(usize, alignment);
1450 	if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
1451 		return NULL;
1452 	}
1453 	return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
1454 }
1455 
1456 void *
1457 arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
1458     size_t size, size_t alignment, bool zero, tcache_t *tcache,
1459     hook_ralloc_args_t *hook_args) {
1460 	size_t usize = alignment == 0 ? sz_s2u(size) : sz_sa2u(size, alignment);
1461 	if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) {
1462 		return NULL;
1463 	}
1464 
1465 	if (likely(usize <= SC_SMALL_MAXCLASS)) {
1466 		/* Try to avoid moving the allocation. */
1467 		UNUSED size_t newsize;
1468 		if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero,
1469 		    &newsize)) {
1470 			hook_invoke_expand(hook_args->is_realloc
1471 			    ? hook_expand_realloc : hook_expand_rallocx,
1472 			    ptr, oldsize, usize, (uintptr_t)ptr,
1473 			    hook_args->args);
1474 			return ptr;
1475 		}
1476 	}
1477 
1478 	if (oldsize >= SC_LARGE_MINCLASS
1479 	    && usize >= SC_LARGE_MINCLASS) {
1480 		return large_ralloc(tsdn, arena, ptr, usize,
1481 		    alignment, zero, tcache, hook_args);
1482 	}
1483 
1484 	/*
1485 	 * size and oldsize are different enough that we need to move the
1486 	 * object.  In that case, fall back to allocating new space and copying.
1487 	 */
1488 	void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
1489 	    zero, tcache);
1490 	if (ret == NULL) {
1491 		return NULL;
1492 	}
1493 
1494 	hook_invoke_alloc(hook_args->is_realloc
1495 	    ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret,
1496 	    hook_args->args);
1497 	hook_invoke_dalloc(hook_args->is_realloc
1498 	    ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
1499 
1500 	/*
1501 	 * Junk/zero-filling were already done by
1502 	 * ipalloc()/arena_malloc().
1503 	 */
1504 	size_t copysize = (usize < oldsize) ? usize : oldsize;
1505 	memcpy(ret, ptr, copysize);
1506 	isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
1507 	return ret;
1508 }
1509 
1510 ehooks_t *
1511 arena_get_ehooks(arena_t *arena) {
1512 	return base_ehooks_get(arena->base);
1513 }
1514 
1515 extent_hooks_t *
1516 arena_set_extent_hooks(tsd_t *tsd, arena_t *arena,
1517     extent_hooks_t *extent_hooks) {
1518 	background_thread_info_t *info;
1519 	if (have_background_thread) {
1520 		info = arena_background_thread_info_get(arena);
1521 		malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
1522 	}
1523 	/* No using the HPA now that we have the custom hooks. */
1524 	pa_shard_disable_hpa(tsd_tsdn(tsd), &arena->pa_shard);
1525 	extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
1526 	if (have_background_thread) {
1527 		malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
1528 	}
1529 
1530 	return ret;
1531 }
1532 
1533 dss_prec_t
1534 arena_dss_prec_get(arena_t *arena) {
1535 	return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
1536 }
1537 
1538 bool
1539 arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
1540 	if (!have_dss) {
1541 		return (dss_prec != dss_prec_disabled);
1542 	}
1543 	atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE);
1544 	return false;
1545 }
1546 
1547 ssize_t
1548 arena_dirty_decay_ms_default_get(void) {
1549 	return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED);
1550 }
1551 
1552 bool
1553 arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
1554 	if (!decay_ms_valid(decay_ms)) {
1555 		return true;
1556 	}
1557 	atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
1558 	return false;
1559 }
1560 
1561 ssize_t
1562 arena_muzzy_decay_ms_default_get(void) {
1563 	return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED);
1564 }
1565 
1566 bool
1567 arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
1568 	if (!decay_ms_valid(decay_ms)) {
1569 		return true;
1570 	}
1571 	atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
1572 	return false;
1573 }
1574 
1575 bool
1576 arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
1577     size_t *new_limit) {
1578 	assert(opt_retain);
1579 	return pac_retain_grow_limit_get_set(tsd_tsdn(tsd),
1580 	    &arena->pa_shard.pac, old_limit, new_limit);
1581 }
1582 
1583 unsigned
1584 arena_nthreads_get(arena_t *arena, bool internal) {
1585 	return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
1586 }
1587 
1588 void
1589 arena_nthreads_inc(arena_t *arena, bool internal) {
1590 	atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1591 }
1592 
1593 void
1594 arena_nthreads_dec(arena_t *arena, bool internal) {
1595 	atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1596 }
1597 
1598 arena_t *
1599 arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) {
1600 	arena_t *arena;
1601 	base_t *base;
1602 	unsigned i;
1603 
1604 	if (ind == 0) {
1605 		base = b0get();
1606 	} else {
1607 		base = base_new(tsdn, ind, config->extent_hooks,
1608 		    config->metadata_use_hooks);
1609 		if (base == NULL) {
1610 			return NULL;
1611 		}
1612 	}
1613 
1614 	size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total;
1615 	arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE);
1616 	if (arena == NULL) {
1617 		goto label_error;
1618 	}
1619 
1620 	atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
1621 	atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
1622 	arena->last_thd = NULL;
1623 
1624 	if (config_stats) {
1625 		if (arena_stats_init(tsdn, &arena->stats)) {
1626 			goto label_error;
1627 		}
1628 
1629 		ql_new(&arena->tcache_ql);
1630 		ql_new(&arena->cache_bin_array_descriptor_ql);
1631 		if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
1632 		    WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) {
1633 			goto label_error;
1634 		}
1635 	}
1636 
1637 	atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
1638 	    ATOMIC_RELAXED);
1639 
1640 	edata_list_active_init(&arena->large);
1641 	if (malloc_mutex_init(&arena->large_mtx, "arena_large",
1642 	    WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
1643 		goto label_error;
1644 	}
1645 
1646 	nstime_t cur_time;
1647 	nstime_init_update(&cur_time);
1648 	if (pa_shard_init(tsdn, &arena->pa_shard, &arena_pa_central_global,
1649 	    &arena_emap_global, base, ind, &arena->stats.pa_shard_stats,
1650 	    LOCKEDINT_MTX(arena->stats.mtx), &cur_time, oversize_threshold,
1651 	    arena_dirty_decay_ms_default_get(),
1652 	    arena_muzzy_decay_ms_default_get())) {
1653 		goto label_error;
1654 	}
1655 
1656 	/* Initialize bins. */
1657 	atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE);
1658 	for (i = 0; i < nbins_total; i++) {
1659 		bool err = bin_init(&arena->bins[i]);
1660 		if (err) {
1661 			goto label_error;
1662 		}
1663 	}
1664 
1665 	arena->base = base;
1666 	/* Set arena before creating background threads. */
1667 	arena_set(ind, arena);
1668 	arena->ind = ind;
1669 
1670 	nstime_init_update(&arena->create_time);
1671 
1672 	/*
1673 	 * We turn on the HPA if set to.  There are two exceptions:
1674 	 * - Custom extent hooks (we should only return memory allocated from
1675 	 *   them in that case).
1676 	 * - Arena 0 initialization.  In this case, we're mid-bootstrapping, and
1677 	 *   so arena_hpa_global is not yet initialized.
1678 	 */
1679 	if (opt_hpa && ehooks_are_default(base_ehooks_get(base)) && ind != 0) {
1680 		hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts;
1681 		hpa_shard_opts.deferral_allowed = background_thread_enabled();
1682 		if (pa_shard_enable_hpa(tsdn, &arena->pa_shard,
1683 		    &hpa_shard_opts, &opt_hpa_sec_opts)) {
1684 			goto label_error;
1685 		}
1686 	}
1687 
1688 	/* We don't support reentrancy for arena 0 bootstrapping. */
1689 	if (ind != 0) {
1690 		/*
1691 		 * If we're here, then arena 0 already exists, so bootstrapping
1692 		 * is done enough that we should have tsd.
1693 		 */
1694 		assert(!tsdn_null(tsdn));
1695 		pre_reentrancy(tsdn_tsd(tsdn), arena);
1696 		if (test_hooks_arena_new_hook) {
1697 			test_hooks_arena_new_hook();
1698 		}
1699 		post_reentrancy(tsdn_tsd(tsdn));
1700 	}
1701 
1702 	return arena;
1703 label_error:
1704 	if (ind != 0) {
1705 		base_delete(tsdn, base);
1706 	}
1707 	return NULL;
1708 }
1709 
1710 arena_t *
1711 arena_choose_huge(tsd_t *tsd) {
1712 	/* huge_arena_ind can be 0 during init (will use a0). */
1713 	if (huge_arena_ind == 0) {
1714 		assert(!malloc_initialized());
1715 	}
1716 
1717 	arena_t *huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, false);
1718 	if (huge_arena == NULL) {
1719 		/* Create the huge arena on demand. */
1720 		assert(huge_arena_ind != 0);
1721 		huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, true);
1722 		if (huge_arena == NULL) {
1723 			return NULL;
1724 		}
1725 		/*
1726 		 * Purge eagerly for huge allocations, because: 1) number of
1727 		 * huge allocations is usually small, which means ticker based
1728 		 * decay is not reliable; and 2) less immediate reuse is
1729 		 * expected for huge allocations.
1730 		 */
1731 		if (arena_dirty_decay_ms_default_get() > 0) {
1732 			arena_decay_ms_set(tsd_tsdn(tsd), huge_arena,
1733 			    extent_state_dirty, 0);
1734 		}
1735 		if (arena_muzzy_decay_ms_default_get() > 0) {
1736 			arena_decay_ms_set(tsd_tsdn(tsd), huge_arena,
1737 			    extent_state_muzzy, 0);
1738 		}
1739 	}
1740 
1741 	return huge_arena;
1742 }
1743 
1744 bool
1745 arena_init_huge(void) {
1746 	bool huge_enabled;
1747 
1748 	/* The threshold should be large size class. */
1749 	if (opt_oversize_threshold > SC_LARGE_MAXCLASS ||
1750 	    opt_oversize_threshold < SC_LARGE_MINCLASS) {
1751 		opt_oversize_threshold = 0;
1752 		oversize_threshold = SC_LARGE_MAXCLASS + PAGE;
1753 		huge_enabled = false;
1754 	} else {
1755 		/* Reserve the index for the huge arena. */
1756 		huge_arena_ind = narenas_total_get();
1757 		oversize_threshold = opt_oversize_threshold;
1758 		huge_enabled = true;
1759 	}
1760 
1761 	return huge_enabled;
1762 }
1763 
1764 bool
1765 arena_is_huge(unsigned arena_ind) {
1766 	if (huge_arena_ind == 0) {
1767 		return false;
1768 	}
1769 	return (arena_ind == huge_arena_ind);
1770 }
1771 
1772 bool
1773 arena_boot(sc_data_t *sc_data, base_t *base, bool hpa) {
1774 	arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
1775 	arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
1776 	for (unsigned i = 0; i < SC_NBINS; i++) {
1777 		sc_t *sc = &sc_data->sc[i];
1778 		div_init(&arena_binind_div_info[i],
1779 		    (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta));
1780 	}
1781 
1782 	uint32_t cur_offset = (uint32_t)offsetof(arena_t, bins);
1783 	for (szind_t i = 0; i < SC_NBINS; i++) {
1784 		arena_bin_offsets[i] = cur_offset;
1785 		nbins_total += bin_infos[i].n_shards;
1786 		cur_offset += (uint32_t)(bin_infos[i].n_shards * sizeof(bin_t));
1787 	}
1788 	return pa_central_init(&arena_pa_central_global, base, hpa,
1789 	    &hpa_hooks_default);
1790 }
1791 
1792 void
1793 arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
1794 	pa_shard_prefork0(tsdn, &arena->pa_shard);
1795 }
1796 
1797 void
1798 arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
1799 	if (config_stats) {
1800 		malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
1801 	}
1802 }
1803 
1804 void
1805 arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
1806 	pa_shard_prefork2(tsdn, &arena->pa_shard);
1807 }
1808 
1809 void
1810 arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
1811 	pa_shard_prefork3(tsdn, &arena->pa_shard);
1812 }
1813 
1814 void
1815 arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
1816 	pa_shard_prefork4(tsdn, &arena->pa_shard);
1817 }
1818 
1819 void
1820 arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
1821 	pa_shard_prefork5(tsdn, &arena->pa_shard);
1822 }
1823 
1824 void
1825 arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
1826 	base_prefork(tsdn, arena->base);
1827 }
1828 
1829 void
1830 arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
1831 	malloc_mutex_prefork(tsdn, &arena->large_mtx);
1832 }
1833 
1834 void
1835 arena_prefork8(tsdn_t *tsdn, arena_t *arena) {
1836 	for (unsigned i = 0; i < nbins_total; i++) {
1837 		bin_prefork(tsdn, &arena->bins[i]);
1838 	}
1839 }
1840 
1841 void
1842 arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
1843 	for (unsigned i = 0; i < nbins_total; i++) {
1844 		bin_postfork_parent(tsdn, &arena->bins[i]);
1845 	}
1846 
1847 	malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
1848 	base_postfork_parent(tsdn, arena->base);
1849 	pa_shard_postfork_parent(tsdn, &arena->pa_shard);
1850 	if (config_stats) {
1851 		malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
1852 	}
1853 }
1854 
1855 void
1856 arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
1857 	atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
1858 	atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
1859 	if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
1860 		arena_nthreads_inc(arena, false);
1861 	}
1862 	if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) {
1863 		arena_nthreads_inc(arena, true);
1864 	}
1865 	if (config_stats) {
1866 		ql_new(&arena->tcache_ql);
1867 		ql_new(&arena->cache_bin_array_descriptor_ql);
1868 		tcache_slow_t *tcache_slow = tcache_slow_get(tsdn_tsd(tsdn));
1869 		if (tcache_slow != NULL && tcache_slow->arena == arena) {
1870 			tcache_t *tcache = tcache_slow->tcache;
1871 			ql_elm_new(tcache_slow, link);
1872 			ql_tail_insert(&arena->tcache_ql, tcache_slow, link);
1873 			cache_bin_array_descriptor_init(
1874 			    &tcache_slow->cache_bin_array_descriptor,
1875 			    tcache->bins);
1876 			ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
1877 			    &tcache_slow->cache_bin_array_descriptor, link);
1878 		}
1879 	}
1880 
1881 	for (unsigned i = 0; i < nbins_total; i++) {
1882 		bin_postfork_child(tsdn, &arena->bins[i]);
1883 	}
1884 
1885 	malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
1886 	base_postfork_child(tsdn, arena->base);
1887 	pa_shard_postfork_child(tsdn, &arena->pa_shard);
1888 	if (config_stats) {
1889 		malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
1890 	}
1891 }
1892