xref: /netbsd-src/external/bsd/jemalloc.old/dist/src/extent.c (revision 8e33eff89e26cf71871ead62f0d5063e1313c33a)
1*8e33eff8Schristos #define JEMALLOC_EXTENT_C_
2*8e33eff8Schristos #include "jemalloc/internal/jemalloc_preamble.h"
3*8e33eff8Schristos #include "jemalloc/internal/jemalloc_internal_includes.h"
4*8e33eff8Schristos 
5*8e33eff8Schristos #include "jemalloc/internal/assert.h"
6*8e33eff8Schristos #include "jemalloc/internal/extent_dss.h"
7*8e33eff8Schristos #include "jemalloc/internal/extent_mmap.h"
8*8e33eff8Schristos #include "jemalloc/internal/ph.h"
9*8e33eff8Schristos #include "jemalloc/internal/rtree.h"
10*8e33eff8Schristos #include "jemalloc/internal/mutex.h"
11*8e33eff8Schristos #include "jemalloc/internal/mutex_pool.h"
12*8e33eff8Schristos 
13*8e33eff8Schristos /******************************************************************************/
14*8e33eff8Schristos /* Data. */
15*8e33eff8Schristos 
16*8e33eff8Schristos rtree_t		extents_rtree;
17*8e33eff8Schristos /* Keyed by the address of the extent_t being protected. */
18*8e33eff8Schristos mutex_pool_t	extent_mutex_pool;
19*8e33eff8Schristos 
20*8e33eff8Schristos size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
21*8e33eff8Schristos 
22*8e33eff8Schristos static const bitmap_info_t extents_bitmap_info =
23*8e33eff8Schristos     BITMAP_INFO_INITIALIZER(NPSIZES+1);
24*8e33eff8Schristos 
25*8e33eff8Schristos static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
26*8e33eff8Schristos     size_t size, size_t alignment, bool *zero, bool *commit,
27*8e33eff8Schristos     unsigned arena_ind);
28*8e33eff8Schristos static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
29*8e33eff8Schristos     size_t size, bool committed, unsigned arena_ind);
30*8e33eff8Schristos static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
31*8e33eff8Schristos     size_t size, bool committed, unsigned arena_ind);
32*8e33eff8Schristos static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
33*8e33eff8Schristos     size_t size, size_t offset, size_t length, unsigned arena_ind);
34*8e33eff8Schristos static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
35*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
36*8e33eff8Schristos     size_t length, bool growing_retained);
37*8e33eff8Schristos static bool extent_decommit_default(extent_hooks_t *extent_hooks,
38*8e33eff8Schristos     void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
39*8e33eff8Schristos #ifdef PAGES_CAN_PURGE_LAZY
40*8e33eff8Schristos static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
41*8e33eff8Schristos     size_t size, size_t offset, size_t length, unsigned arena_ind);
42*8e33eff8Schristos #endif
43*8e33eff8Schristos static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
44*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
45*8e33eff8Schristos     size_t length, bool growing_retained);
46*8e33eff8Schristos #ifdef PAGES_CAN_PURGE_FORCED
47*8e33eff8Schristos static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
48*8e33eff8Schristos     void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
49*8e33eff8Schristos #endif
50*8e33eff8Schristos static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
51*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
52*8e33eff8Schristos     size_t length, bool growing_retained);
53*8e33eff8Schristos #ifdef JEMALLOC_MAPS_COALESCE
54*8e33eff8Schristos static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
55*8e33eff8Schristos     size_t size, size_t size_a, size_t size_b, bool committed,
56*8e33eff8Schristos     unsigned arena_ind);
57*8e33eff8Schristos #endif
58*8e33eff8Schristos static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
59*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
60*8e33eff8Schristos     szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
61*8e33eff8Schristos     bool growing_retained);
62*8e33eff8Schristos #ifdef JEMALLOC_MAPS_COALESCE
63*8e33eff8Schristos static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
64*8e33eff8Schristos     size_t size_a, void *addr_b, size_t size_b, bool committed,
65*8e33eff8Schristos     unsigned arena_ind);
66*8e33eff8Schristos #endif
67*8e33eff8Schristos static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
68*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
69*8e33eff8Schristos     bool growing_retained);
70*8e33eff8Schristos 
71*8e33eff8Schristos const extent_hooks_t	extent_hooks_default = {
72*8e33eff8Schristos 	extent_alloc_default,
73*8e33eff8Schristos 	extent_dalloc_default,
74*8e33eff8Schristos 	extent_destroy_default,
75*8e33eff8Schristos 	extent_commit_default,
76*8e33eff8Schristos 	extent_decommit_default
77*8e33eff8Schristos #ifdef PAGES_CAN_PURGE_LAZY
78*8e33eff8Schristos 	,
79*8e33eff8Schristos 	extent_purge_lazy_default
80*8e33eff8Schristos #else
81*8e33eff8Schristos 	,
82*8e33eff8Schristos 	NULL
83*8e33eff8Schristos #endif
84*8e33eff8Schristos #ifdef PAGES_CAN_PURGE_FORCED
85*8e33eff8Schristos 	,
86*8e33eff8Schristos 	extent_purge_forced_default
87*8e33eff8Schristos #else
88*8e33eff8Schristos 	,
89*8e33eff8Schristos 	NULL
90*8e33eff8Schristos #endif
91*8e33eff8Schristos #ifdef JEMALLOC_MAPS_COALESCE
92*8e33eff8Schristos 	,
93*8e33eff8Schristos 	extent_split_default,
94*8e33eff8Schristos 	extent_merge_default
95*8e33eff8Schristos #endif
96*8e33eff8Schristos };
97*8e33eff8Schristos 
98*8e33eff8Schristos /* Used exclusively for gdump triggering. */
99*8e33eff8Schristos static atomic_zu_t curpages;
100*8e33eff8Schristos static atomic_zu_t highpages;
101*8e33eff8Schristos 
102*8e33eff8Schristos /******************************************************************************/
103*8e33eff8Schristos /*
104*8e33eff8Schristos  * Function prototypes for static functions that are referenced prior to
105*8e33eff8Schristos  * definition.
106*8e33eff8Schristos  */
107*8e33eff8Schristos 
108*8e33eff8Schristos static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
109*8e33eff8Schristos static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
110*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
111*8e33eff8Schristos     size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
112*8e33eff8Schristos     bool *zero, bool *commit, bool growing_retained);
113*8e33eff8Schristos static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
114*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
115*8e33eff8Schristos     extent_t *extent, bool *coalesced, bool growing_retained);
116*8e33eff8Schristos static void extent_record(tsdn_t *tsdn, arena_t *arena,
117*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
118*8e33eff8Schristos     bool growing_retained);
119*8e33eff8Schristos 
120*8e33eff8Schristos /******************************************************************************/
121*8e33eff8Schristos 
122*8e33eff8Schristos ph_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, ph_link,
123*8e33eff8Schristos     extent_esnead_comp)
124*8e33eff8Schristos 
125*8e33eff8Schristos typedef enum {
126*8e33eff8Schristos 	lock_result_success,
127*8e33eff8Schristos 	lock_result_failure,
128*8e33eff8Schristos 	lock_result_no_extent
129*8e33eff8Schristos } lock_result_t;
130*8e33eff8Schristos 
131*8e33eff8Schristos static lock_result_t
132*8e33eff8Schristos extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
133*8e33eff8Schristos     extent_t **result) {
134*8e33eff8Schristos 	extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
135*8e33eff8Schristos 	    elm, true);
136*8e33eff8Schristos 
137*8e33eff8Schristos 	if (extent1 == NULL) {
138*8e33eff8Schristos 		return lock_result_no_extent;
139*8e33eff8Schristos 	}
140*8e33eff8Schristos 	/*
141*8e33eff8Schristos 	 * It's possible that the extent changed out from under us, and with it
142*8e33eff8Schristos 	 * the leaf->extent mapping.  We have to recheck while holding the lock.
143*8e33eff8Schristos 	 */
144*8e33eff8Schristos 	extent_lock(tsdn, extent1);
145*8e33eff8Schristos 	extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
146*8e33eff8Schristos 	    &extents_rtree, elm, true);
147*8e33eff8Schristos 
148*8e33eff8Schristos 	if (extent1 == extent2) {
149*8e33eff8Schristos 		*result = extent1;
150*8e33eff8Schristos 		return lock_result_success;
151*8e33eff8Schristos 	} else {
152*8e33eff8Schristos 		extent_unlock(tsdn, extent1);
153*8e33eff8Schristos 		return lock_result_failure;
154*8e33eff8Schristos 	}
155*8e33eff8Schristos }
156*8e33eff8Schristos 
157*8e33eff8Schristos /*
158*8e33eff8Schristos  * Returns a pool-locked extent_t * if there's one associated with the given
159*8e33eff8Schristos  * address, and NULL otherwise.
160*8e33eff8Schristos  */
161*8e33eff8Schristos static extent_t *
162*8e33eff8Schristos extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) {
163*8e33eff8Schristos 	extent_t *ret = NULL;
164*8e33eff8Schristos 	rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
165*8e33eff8Schristos 	    rtree_ctx, (uintptr_t)addr, false, false);
166*8e33eff8Schristos 	if (elm == NULL) {
167*8e33eff8Schristos 		return NULL;
168*8e33eff8Schristos 	}
169*8e33eff8Schristos 	lock_result_t lock_result;
170*8e33eff8Schristos 	do {
171*8e33eff8Schristos 		lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret);
172*8e33eff8Schristos 	} while (lock_result == lock_result_failure);
173*8e33eff8Schristos 	return ret;
174*8e33eff8Schristos }
175*8e33eff8Schristos 
176*8e33eff8Schristos extent_t *
177*8e33eff8Schristos extent_alloc(tsdn_t *tsdn, arena_t *arena) {
178*8e33eff8Schristos 	malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
179*8e33eff8Schristos 	extent_t *extent = extent_avail_first(&arena->extent_avail);
180*8e33eff8Schristos 	if (extent == NULL) {
181*8e33eff8Schristos 		malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
182*8e33eff8Schristos 		return base_alloc_extent(tsdn, arena->base);
183*8e33eff8Schristos 	}
184*8e33eff8Schristos 	extent_avail_remove(&arena->extent_avail, extent);
185*8e33eff8Schristos 	malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
186*8e33eff8Schristos 	return extent;
187*8e33eff8Schristos }
188*8e33eff8Schristos 
189*8e33eff8Schristos void
190*8e33eff8Schristos extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
191*8e33eff8Schristos 	malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
192*8e33eff8Schristos 	extent_avail_insert(&arena->extent_avail, extent);
193*8e33eff8Schristos 	malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
194*8e33eff8Schristos }
195*8e33eff8Schristos 
196*8e33eff8Schristos extent_hooks_t *
197*8e33eff8Schristos extent_hooks_get(arena_t *arena) {
198*8e33eff8Schristos 	return base_extent_hooks_get(arena->base);
199*8e33eff8Schristos }
200*8e33eff8Schristos 
201*8e33eff8Schristos extent_hooks_t *
202*8e33eff8Schristos extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
203*8e33eff8Schristos 	background_thread_info_t *info;
204*8e33eff8Schristos 	if (have_background_thread) {
205*8e33eff8Schristos 		info = arena_background_thread_info_get(arena);
206*8e33eff8Schristos 		malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
207*8e33eff8Schristos 	}
208*8e33eff8Schristos 	extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
209*8e33eff8Schristos 	if (have_background_thread) {
210*8e33eff8Schristos 		malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
211*8e33eff8Schristos 	}
212*8e33eff8Schristos 
213*8e33eff8Schristos 	return ret;
214*8e33eff8Schristos }
215*8e33eff8Schristos 
216*8e33eff8Schristos static void
217*8e33eff8Schristos extent_hooks_assure_initialized(arena_t *arena,
218*8e33eff8Schristos     extent_hooks_t **r_extent_hooks) {
219*8e33eff8Schristos 	if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
220*8e33eff8Schristos 		*r_extent_hooks = extent_hooks_get(arena);
221*8e33eff8Schristos 	}
222*8e33eff8Schristos }
223*8e33eff8Schristos 
224*8e33eff8Schristos #ifndef JEMALLOC_JET
225*8e33eff8Schristos static
226*8e33eff8Schristos #endif
227*8e33eff8Schristos size_t
228*8e33eff8Schristos extent_size_quantize_floor(size_t size) {
229*8e33eff8Schristos 	size_t ret;
230*8e33eff8Schristos 	pszind_t pind;
231*8e33eff8Schristos 
232*8e33eff8Schristos 	assert(size > 0);
233*8e33eff8Schristos 	assert((size & PAGE_MASK) == 0);
234*8e33eff8Schristos 
235*8e33eff8Schristos 	pind = sz_psz2ind(size - sz_large_pad + 1);
236*8e33eff8Schristos 	if (pind == 0) {
237*8e33eff8Schristos 		/*
238*8e33eff8Schristos 		 * Avoid underflow.  This short-circuit would also do the right
239*8e33eff8Schristos 		 * thing for all sizes in the range for which there are
240*8e33eff8Schristos 		 * PAGE-spaced size classes, but it's simplest to just handle
241*8e33eff8Schristos 		 * the one case that would cause erroneous results.
242*8e33eff8Schristos 		 */
243*8e33eff8Schristos 		return size;
244*8e33eff8Schristos 	}
245*8e33eff8Schristos 	ret = sz_pind2sz(pind - 1) + sz_large_pad;
246*8e33eff8Schristos 	assert(ret <= size);
247*8e33eff8Schristos 	return ret;
248*8e33eff8Schristos }
249*8e33eff8Schristos 
250*8e33eff8Schristos #ifndef JEMALLOC_JET
251*8e33eff8Schristos static
252*8e33eff8Schristos #endif
253*8e33eff8Schristos size_t
254*8e33eff8Schristos extent_size_quantize_ceil(size_t size) {
255*8e33eff8Schristos 	size_t ret;
256*8e33eff8Schristos 
257*8e33eff8Schristos 	assert(size > 0);
258*8e33eff8Schristos 	assert(size - sz_large_pad <= LARGE_MAXCLASS);
259*8e33eff8Schristos 	assert((size & PAGE_MASK) == 0);
260*8e33eff8Schristos 
261*8e33eff8Schristos 	ret = extent_size_quantize_floor(size);
262*8e33eff8Schristos 	if (ret < size) {
263*8e33eff8Schristos 		/*
264*8e33eff8Schristos 		 * Skip a quantization that may have an adequately large extent,
265*8e33eff8Schristos 		 * because under-sized extents may be mixed in.  This only
266*8e33eff8Schristos 		 * happens when an unusual size is requested, i.e. for aligned
267*8e33eff8Schristos 		 * allocation, and is just one of several places where linear
268*8e33eff8Schristos 		 * search would potentially find sufficiently aligned available
269*8e33eff8Schristos 		 * memory somewhere lower.
270*8e33eff8Schristos 		 */
271*8e33eff8Schristos 		ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
272*8e33eff8Schristos 		    sz_large_pad;
273*8e33eff8Schristos 	}
274*8e33eff8Schristos 	return ret;
275*8e33eff8Schristos }
276*8e33eff8Schristos 
277*8e33eff8Schristos /* Generate pairing heap functions. */
278*8e33eff8Schristos ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
279*8e33eff8Schristos 
280*8e33eff8Schristos bool
281*8e33eff8Schristos extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
282*8e33eff8Schristos     bool delay_coalesce) {
283*8e33eff8Schristos 	if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
284*8e33eff8Schristos 	    malloc_mutex_rank_exclusive)) {
285*8e33eff8Schristos 		return true;
286*8e33eff8Schristos 	}
287*8e33eff8Schristos 	for (unsigned i = 0; i < NPSIZES+1; i++) {
288*8e33eff8Schristos 		extent_heap_new(&extents->heaps[i]);
289*8e33eff8Schristos 	}
290*8e33eff8Schristos 	bitmap_init(extents->bitmap, &extents_bitmap_info, true);
291*8e33eff8Schristos 	extent_list_init(&extents->lru);
292*8e33eff8Schristos 	atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
293*8e33eff8Schristos 	extents->state = state;
294*8e33eff8Schristos 	extents->delay_coalesce = delay_coalesce;
295*8e33eff8Schristos 	return false;
296*8e33eff8Schristos }
297*8e33eff8Schristos 
298*8e33eff8Schristos extent_state_t
299*8e33eff8Schristos extents_state_get(const extents_t *extents) {
300*8e33eff8Schristos 	return extents->state;
301*8e33eff8Schristos }
302*8e33eff8Schristos 
303*8e33eff8Schristos size_t
304*8e33eff8Schristos extents_npages_get(extents_t *extents) {
305*8e33eff8Schristos 	return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
306*8e33eff8Schristos }
307*8e33eff8Schristos 
308*8e33eff8Schristos static void
309*8e33eff8Schristos extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
310*8e33eff8Schristos 	malloc_mutex_assert_owner(tsdn, &extents->mtx);
311*8e33eff8Schristos 	assert(extent_state_get(extent) == extents->state);
312*8e33eff8Schristos 
313*8e33eff8Schristos 	size_t size = extent_size_get(extent);
314*8e33eff8Schristos 	size_t psz = extent_size_quantize_floor(size);
315*8e33eff8Schristos 	pszind_t pind = sz_psz2ind(psz);
316*8e33eff8Schristos 	if (extent_heap_empty(&extents->heaps[pind])) {
317*8e33eff8Schristos 		bitmap_unset(extents->bitmap, &extents_bitmap_info,
318*8e33eff8Schristos 		    (size_t)pind);
319*8e33eff8Schristos 	}
320*8e33eff8Schristos 	extent_heap_insert(&extents->heaps[pind], extent);
321*8e33eff8Schristos 	extent_list_append(&extents->lru, extent);
322*8e33eff8Schristos 	size_t npages = size >> LG_PAGE;
323*8e33eff8Schristos 	/*
324*8e33eff8Schristos 	 * All modifications to npages hold the mutex (as asserted above), so we
325*8e33eff8Schristos 	 * don't need an atomic fetch-add; we can get by with a load followed by
326*8e33eff8Schristos 	 * a store.
327*8e33eff8Schristos 	 */
328*8e33eff8Schristos 	size_t cur_extents_npages =
329*8e33eff8Schristos 	    atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
330*8e33eff8Schristos 	atomic_store_zu(&extents->npages, cur_extents_npages + npages,
331*8e33eff8Schristos 	    ATOMIC_RELAXED);
332*8e33eff8Schristos }
333*8e33eff8Schristos 
334*8e33eff8Schristos static void
335*8e33eff8Schristos extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
336*8e33eff8Schristos 	malloc_mutex_assert_owner(tsdn, &extents->mtx);
337*8e33eff8Schristos 	assert(extent_state_get(extent) == extents->state);
338*8e33eff8Schristos 
339*8e33eff8Schristos 	size_t size = extent_size_get(extent);
340*8e33eff8Schristos 	size_t psz = extent_size_quantize_floor(size);
341*8e33eff8Schristos 	pszind_t pind = sz_psz2ind(psz);
342*8e33eff8Schristos 	extent_heap_remove(&extents->heaps[pind], extent);
343*8e33eff8Schristos 	if (extent_heap_empty(&extents->heaps[pind])) {
344*8e33eff8Schristos 		bitmap_set(extents->bitmap, &extents_bitmap_info,
345*8e33eff8Schristos 		    (size_t)pind);
346*8e33eff8Schristos 	}
347*8e33eff8Schristos 	extent_list_remove(&extents->lru, extent);
348*8e33eff8Schristos 	size_t npages = size >> LG_PAGE;
349*8e33eff8Schristos 	/*
350*8e33eff8Schristos 	 * As in extents_insert_locked, we hold extents->mtx and so don't need
351*8e33eff8Schristos 	 * atomic operations for updating extents->npages.
352*8e33eff8Schristos 	 */
353*8e33eff8Schristos 	size_t cur_extents_npages =
354*8e33eff8Schristos 	    atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
355*8e33eff8Schristos 	assert(cur_extents_npages >= npages);
356*8e33eff8Schristos 	atomic_store_zu(&extents->npages,
357*8e33eff8Schristos 	    cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
358*8e33eff8Schristos }
359*8e33eff8Schristos 
360*8e33eff8Schristos /*
361*8e33eff8Schristos  * Find an extent with size [min_size, max_size) to satisfy the alignment
362*8e33eff8Schristos  * requirement.  For each size, try only the first extent in the heap.
363*8e33eff8Schristos  */
364*8e33eff8Schristos static extent_t *
365*8e33eff8Schristos extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
366*8e33eff8Schristos     size_t alignment) {
367*8e33eff8Schristos         pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
368*8e33eff8Schristos         pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
369*8e33eff8Schristos 
370*8e33eff8Schristos 	for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
371*8e33eff8Schristos 	    &extents_bitmap_info, (size_t)pind); i < pind_max; i =
372*8e33eff8Schristos 	    (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
373*8e33eff8Schristos 	    (size_t)i+1)) {
374*8e33eff8Schristos 		assert(i < NPSIZES);
375*8e33eff8Schristos 		assert(!extent_heap_empty(&extents->heaps[i]));
376*8e33eff8Schristos 		extent_t *extent = extent_heap_first(&extents->heaps[i]);
377*8e33eff8Schristos 		uintptr_t base = (uintptr_t)extent_base_get(extent);
378*8e33eff8Schristos 		size_t candidate_size = extent_size_get(extent);
379*8e33eff8Schristos 		assert(candidate_size >= min_size);
380*8e33eff8Schristos 
381*8e33eff8Schristos 		uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
382*8e33eff8Schristos 		    PAGE_CEILING(alignment));
383*8e33eff8Schristos 		if (base > next_align || base + candidate_size <= next_align) {
384*8e33eff8Schristos 			/* Overflow or not crossing the next alignment. */
385*8e33eff8Schristos 			continue;
386*8e33eff8Schristos 		}
387*8e33eff8Schristos 
388*8e33eff8Schristos 		size_t leadsize = next_align - base;
389*8e33eff8Schristos 		if (candidate_size - leadsize >= min_size) {
390*8e33eff8Schristos 			return extent;
391*8e33eff8Schristos 		}
392*8e33eff8Schristos 	}
393*8e33eff8Schristos 
394*8e33eff8Schristos 	return NULL;
395*8e33eff8Schristos }
396*8e33eff8Schristos 
397*8e33eff8Schristos /* Do any-best-fit extent selection, i.e. select any extent that best fits. */
398*8e33eff8Schristos static extent_t *
399*8e33eff8Schristos extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
400*8e33eff8Schristos     size_t size) {
401*8e33eff8Schristos 	pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
402*8e33eff8Schristos 	pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
403*8e33eff8Schristos 	    (size_t)pind);
404*8e33eff8Schristos 	if (i < NPSIZES+1) {
405*8e33eff8Schristos 		/*
406*8e33eff8Schristos 		 * In order to reduce fragmentation, avoid reusing and splitting
407*8e33eff8Schristos 		 * large extents for much smaller sizes.
408*8e33eff8Schristos 		 */
409*8e33eff8Schristos 		if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
410*8e33eff8Schristos 			return NULL;
411*8e33eff8Schristos 		}
412*8e33eff8Schristos 		assert(!extent_heap_empty(&extents->heaps[i]));
413*8e33eff8Schristos 		extent_t *extent = extent_heap_first(&extents->heaps[i]);
414*8e33eff8Schristos 		assert(extent_size_get(extent) >= size);
415*8e33eff8Schristos 		return extent;
416*8e33eff8Schristos 	}
417*8e33eff8Schristos 
418*8e33eff8Schristos 	return NULL;
419*8e33eff8Schristos }
420*8e33eff8Schristos 
421*8e33eff8Schristos /*
422*8e33eff8Schristos  * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
423*8e33eff8Schristos  * large enough.
424*8e33eff8Schristos  */
425*8e33eff8Schristos static extent_t *
426*8e33eff8Schristos extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
427*8e33eff8Schristos     size_t size) {
428*8e33eff8Schristos 	extent_t *ret = NULL;
429*8e33eff8Schristos 
430*8e33eff8Schristos 	pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
431*8e33eff8Schristos 	for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
432*8e33eff8Schristos 	    &extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i =
433*8e33eff8Schristos 	    (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
434*8e33eff8Schristos 	    (size_t)i+1)) {
435*8e33eff8Schristos 		assert(!extent_heap_empty(&extents->heaps[i]));
436*8e33eff8Schristos 		extent_t *extent = extent_heap_first(&extents->heaps[i]);
437*8e33eff8Schristos 		assert(extent_size_get(extent) >= size);
438*8e33eff8Schristos 		if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
439*8e33eff8Schristos 			ret = extent;
440*8e33eff8Schristos 		}
441*8e33eff8Schristos 		if (i == NPSIZES) {
442*8e33eff8Schristos 			break;
443*8e33eff8Schristos 		}
444*8e33eff8Schristos 		assert(i < NPSIZES);
445*8e33eff8Schristos 	}
446*8e33eff8Schristos 
447*8e33eff8Schristos 	return ret;
448*8e33eff8Schristos }
449*8e33eff8Schristos 
450*8e33eff8Schristos /*
451*8e33eff8Schristos  * Do {best,first}-fit extent selection, where the selection policy choice is
452*8e33eff8Schristos  * based on extents->delay_coalesce.  Best-fit selection requires less
453*8e33eff8Schristos  * searching, but its layout policy is less stable and may cause higher virtual
454*8e33eff8Schristos  * memory fragmentation as a side effect.
455*8e33eff8Schristos  */
456*8e33eff8Schristos static extent_t *
457*8e33eff8Schristos extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
458*8e33eff8Schristos     size_t esize, size_t alignment) {
459*8e33eff8Schristos 	malloc_mutex_assert_owner(tsdn, &extents->mtx);
460*8e33eff8Schristos 
461*8e33eff8Schristos 	size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
462*8e33eff8Schristos 	/* Beware size_t wrap-around. */
463*8e33eff8Schristos 	if (max_size < esize) {
464*8e33eff8Schristos 		return NULL;
465*8e33eff8Schristos 	}
466*8e33eff8Schristos 
467*8e33eff8Schristos 	extent_t *extent = extents->delay_coalesce ?
468*8e33eff8Schristos 	    extents_best_fit_locked(tsdn, arena, extents, max_size) :
469*8e33eff8Schristos 	    extents_first_fit_locked(tsdn, arena, extents, max_size);
470*8e33eff8Schristos 
471*8e33eff8Schristos 	if (alignment > PAGE && extent == NULL) {
472*8e33eff8Schristos 		/*
473*8e33eff8Schristos 		 * max_size guarantees the alignment requirement but is rather
474*8e33eff8Schristos 		 * pessimistic.  Next we try to satisfy the aligned allocation
475*8e33eff8Schristos 		 * with sizes in [esize, max_size).
476*8e33eff8Schristos 		 */
477*8e33eff8Schristos 		extent = extents_fit_alignment(extents, esize, max_size,
478*8e33eff8Schristos 		    alignment);
479*8e33eff8Schristos 	}
480*8e33eff8Schristos 
481*8e33eff8Schristos 	return extent;
482*8e33eff8Schristos }
483*8e33eff8Schristos 
484*8e33eff8Schristos static bool
485*8e33eff8Schristos extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
486*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
487*8e33eff8Schristos     extent_t *extent) {
488*8e33eff8Schristos 	extent_state_set(extent, extent_state_active);
489*8e33eff8Schristos 	bool coalesced;
490*8e33eff8Schristos 	extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
491*8e33eff8Schristos 	    extents, extent, &coalesced, false);
492*8e33eff8Schristos 	extent_state_set(extent, extents_state_get(extents));
493*8e33eff8Schristos 
494*8e33eff8Schristos 	if (!coalesced) {
495*8e33eff8Schristos 		return true;
496*8e33eff8Schristos 	}
497*8e33eff8Schristos 	extents_insert_locked(tsdn, extents, extent);
498*8e33eff8Schristos 	return false;
499*8e33eff8Schristos }
500*8e33eff8Schristos 
501*8e33eff8Schristos extent_t *
502*8e33eff8Schristos extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
503*8e33eff8Schristos     extents_t *extents, void *new_addr, size_t size, size_t pad,
504*8e33eff8Schristos     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
505*8e33eff8Schristos 	assert(size + pad != 0);
506*8e33eff8Schristos 	assert(alignment != 0);
507*8e33eff8Schristos 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
508*8e33eff8Schristos 	    WITNESS_RANK_CORE, 0);
509*8e33eff8Schristos 
510*8e33eff8Schristos 	extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
511*8e33eff8Schristos 	    new_addr, size, pad, alignment, slab, szind, zero, commit, false);
512*8e33eff8Schristos 	assert(extent == NULL || extent_dumpable_get(extent));
513*8e33eff8Schristos 	return extent;
514*8e33eff8Schristos }
515*8e33eff8Schristos 
516*8e33eff8Schristos void
517*8e33eff8Schristos extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
518*8e33eff8Schristos     extents_t *extents, extent_t *extent) {
519*8e33eff8Schristos 	assert(extent_base_get(extent) != NULL);
520*8e33eff8Schristos 	assert(extent_size_get(extent) != 0);
521*8e33eff8Schristos 	assert(extent_dumpable_get(extent));
522*8e33eff8Schristos 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
523*8e33eff8Schristos 	    WITNESS_RANK_CORE, 0);
524*8e33eff8Schristos 
525*8e33eff8Schristos 	extent_addr_set(extent, extent_base_get(extent));
526*8e33eff8Schristos 	extent_zeroed_set(extent, false);
527*8e33eff8Schristos 
528*8e33eff8Schristos 	extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
529*8e33eff8Schristos }
530*8e33eff8Schristos 
531*8e33eff8Schristos extent_t *
532*8e33eff8Schristos extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
533*8e33eff8Schristos     extents_t *extents, size_t npages_min) {
534*8e33eff8Schristos 	rtree_ctx_t rtree_ctx_fallback;
535*8e33eff8Schristos 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
536*8e33eff8Schristos 
537*8e33eff8Schristos 	malloc_mutex_lock(tsdn, &extents->mtx);
538*8e33eff8Schristos 
539*8e33eff8Schristos 	/*
540*8e33eff8Schristos 	 * Get the LRU coalesced extent, if any.  If coalescing was delayed,
541*8e33eff8Schristos 	 * the loop will iterate until the LRU extent is fully coalesced.
542*8e33eff8Schristos 	 */
543*8e33eff8Schristos 	extent_t *extent;
544*8e33eff8Schristos 	while (true) {
545*8e33eff8Schristos 		/* Get the LRU extent, if any. */
546*8e33eff8Schristos 		extent = extent_list_first(&extents->lru);
547*8e33eff8Schristos 		if (extent == NULL) {
548*8e33eff8Schristos 			goto label_return;
549*8e33eff8Schristos 		}
550*8e33eff8Schristos 		/* Check the eviction limit. */
551*8e33eff8Schristos 		size_t extents_npages = atomic_load_zu(&extents->npages,
552*8e33eff8Schristos 		    ATOMIC_RELAXED);
553*8e33eff8Schristos 		if (extents_npages <= npages_min) {
554*8e33eff8Schristos 			extent = NULL;
555*8e33eff8Schristos 			goto label_return;
556*8e33eff8Schristos 		}
557*8e33eff8Schristos 		extents_remove_locked(tsdn, extents, extent);
558*8e33eff8Schristos 		if (!extents->delay_coalesce) {
559*8e33eff8Schristos 			break;
560*8e33eff8Schristos 		}
561*8e33eff8Schristos 		/* Try to coalesce. */
562*8e33eff8Schristos 		if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
563*8e33eff8Schristos 		    rtree_ctx, extents, extent)) {
564*8e33eff8Schristos 			break;
565*8e33eff8Schristos 		}
566*8e33eff8Schristos 		/*
567*8e33eff8Schristos 		 * The LRU extent was just coalesced and the result placed in
568*8e33eff8Schristos 		 * the LRU at its neighbor's position.  Start over.
569*8e33eff8Schristos 		 */
570*8e33eff8Schristos 	}
571*8e33eff8Schristos 
572*8e33eff8Schristos 	/*
573*8e33eff8Schristos 	 * Either mark the extent active or deregister it to protect against
574*8e33eff8Schristos 	 * concurrent operations.
575*8e33eff8Schristos 	 */
576*8e33eff8Schristos 	switch (extents_state_get(extents)) {
577*8e33eff8Schristos 	case extent_state_active:
578*8e33eff8Schristos 		not_reached();
579*8e33eff8Schristos 	case extent_state_dirty:
580*8e33eff8Schristos 	case extent_state_muzzy:
581*8e33eff8Schristos 		extent_state_set(extent, extent_state_active);
582*8e33eff8Schristos 		break;
583*8e33eff8Schristos 	case extent_state_retained:
584*8e33eff8Schristos 		extent_deregister(tsdn, extent);
585*8e33eff8Schristos 		break;
586*8e33eff8Schristos 	default:
587*8e33eff8Schristos 		not_reached();
588*8e33eff8Schristos 	}
589*8e33eff8Schristos 
590*8e33eff8Schristos label_return:
591*8e33eff8Schristos 	malloc_mutex_unlock(tsdn, &extents->mtx);
592*8e33eff8Schristos 	return extent;
593*8e33eff8Schristos }
594*8e33eff8Schristos 
595*8e33eff8Schristos static void
596*8e33eff8Schristos extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
597*8e33eff8Schristos     extents_t *extents, extent_t *extent, bool growing_retained) {
598*8e33eff8Schristos 	/*
599*8e33eff8Schristos 	 * Leak extent after making sure its pages have already been purged, so
600*8e33eff8Schristos 	 * that this is only a virtual memory leak.
601*8e33eff8Schristos 	 */
602*8e33eff8Schristos 	if (extents_state_get(extents) == extent_state_dirty) {
603*8e33eff8Schristos 		if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
604*8e33eff8Schristos 		    extent, 0, extent_size_get(extent), growing_retained)) {
605*8e33eff8Schristos 			extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
606*8e33eff8Schristos 			    extent, 0, extent_size_get(extent),
607*8e33eff8Schristos 			    growing_retained);
608*8e33eff8Schristos 		}
609*8e33eff8Schristos 	}
610*8e33eff8Schristos 	extent_dalloc(tsdn, arena, extent);
611*8e33eff8Schristos }
612*8e33eff8Schristos 
613*8e33eff8Schristos void
614*8e33eff8Schristos extents_prefork(tsdn_t *tsdn, extents_t *extents) {
615*8e33eff8Schristos 	malloc_mutex_prefork(tsdn, &extents->mtx);
616*8e33eff8Schristos }
617*8e33eff8Schristos 
618*8e33eff8Schristos void
619*8e33eff8Schristos extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
620*8e33eff8Schristos 	malloc_mutex_postfork_parent(tsdn, &extents->mtx);
621*8e33eff8Schristos }
622*8e33eff8Schristos 
623*8e33eff8Schristos void
624*8e33eff8Schristos extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
625*8e33eff8Schristos 	malloc_mutex_postfork_child(tsdn, &extents->mtx);
626*8e33eff8Schristos }
627*8e33eff8Schristos 
628*8e33eff8Schristos static void
629*8e33eff8Schristos extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
630*8e33eff8Schristos     extent_t *extent) {
631*8e33eff8Schristos 	assert(extent_arena_get(extent) == arena);
632*8e33eff8Schristos 	assert(extent_state_get(extent) == extent_state_active);
633*8e33eff8Schristos 
634*8e33eff8Schristos 	extent_state_set(extent, extents_state_get(extents));
635*8e33eff8Schristos 	extents_insert_locked(tsdn, extents, extent);
636*8e33eff8Schristos }
637*8e33eff8Schristos 
638*8e33eff8Schristos static void
639*8e33eff8Schristos extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
640*8e33eff8Schristos     extent_t *extent) {
641*8e33eff8Schristos 	malloc_mutex_lock(tsdn, &extents->mtx);
642*8e33eff8Schristos 	extent_deactivate_locked(tsdn, arena, extents, extent);
643*8e33eff8Schristos 	malloc_mutex_unlock(tsdn, &extents->mtx);
644*8e33eff8Schristos }
645*8e33eff8Schristos 
646*8e33eff8Schristos static void
647*8e33eff8Schristos extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
648*8e33eff8Schristos     extent_t *extent) {
649*8e33eff8Schristos 	assert(extent_arena_get(extent) == arena);
650*8e33eff8Schristos 	assert(extent_state_get(extent) == extents_state_get(extents));
651*8e33eff8Schristos 
652*8e33eff8Schristos 	extents_remove_locked(tsdn, extents, extent);
653*8e33eff8Schristos 	extent_state_set(extent, extent_state_active);
654*8e33eff8Schristos }
655*8e33eff8Schristos 
656*8e33eff8Schristos static bool
657*8e33eff8Schristos extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
658*8e33eff8Schristos     const extent_t *extent, bool dependent, bool init_missing,
659*8e33eff8Schristos     rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
660*8e33eff8Schristos 	*r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
661*8e33eff8Schristos 	    (uintptr_t)extent_base_get(extent), dependent, init_missing);
662*8e33eff8Schristos 	if (!dependent && *r_elm_a == NULL) {
663*8e33eff8Schristos 		return true;
664*8e33eff8Schristos 	}
665*8e33eff8Schristos 	assert(*r_elm_a != NULL);
666*8e33eff8Schristos 
667*8e33eff8Schristos 	*r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
668*8e33eff8Schristos 	    (uintptr_t)extent_last_get(extent), dependent, init_missing);
669*8e33eff8Schristos 	if (!dependent && *r_elm_b == NULL) {
670*8e33eff8Schristos 		return true;
671*8e33eff8Schristos 	}
672*8e33eff8Schristos 	assert(*r_elm_b != NULL);
673*8e33eff8Schristos 
674*8e33eff8Schristos 	return false;
675*8e33eff8Schristos }
676*8e33eff8Schristos 
677*8e33eff8Schristos static void
678*8e33eff8Schristos extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
679*8e33eff8Schristos     rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
680*8e33eff8Schristos 	rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
681*8e33eff8Schristos 	if (elm_b != NULL) {
682*8e33eff8Schristos 		rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
683*8e33eff8Schristos 		    slab);
684*8e33eff8Schristos 	}
685*8e33eff8Schristos }
686*8e33eff8Schristos 
687*8e33eff8Schristos static void
688*8e33eff8Schristos extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
689*8e33eff8Schristos     szind_t szind) {
690*8e33eff8Schristos 	assert(extent_slab_get(extent));
691*8e33eff8Schristos 
692*8e33eff8Schristos 	/* Register interior. */
693*8e33eff8Schristos 	for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
694*8e33eff8Schristos 		rtree_write(tsdn, &extents_rtree, rtree_ctx,
695*8e33eff8Schristos 		    (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
696*8e33eff8Schristos 		    LG_PAGE), extent, szind, true);
697*8e33eff8Schristos 	}
698*8e33eff8Schristos }
699*8e33eff8Schristos 
700*8e33eff8Schristos static JEMALLOC_NORETURN void
701*8e33eff8Schristos extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
702*8e33eff8Schristos 	cassert(config_prof);
703*8e33eff8Schristos 	/* prof_gdump() requirement. */
704*8e33eff8Schristos 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
705*8e33eff8Schristos 	    WITNESS_RANK_CORE, 0);
706*8e33eff8Schristos 
707*8e33eff8Schristos 	if (opt_prof && extent_state_get(extent) == extent_state_active) {
708*8e33eff8Schristos 		size_t nadd = extent_size_get(extent) >> LG_PAGE;
709*8e33eff8Schristos 		size_t cur = atomic_fetch_add_zu(&curpages, nadd,
710*8e33eff8Schristos 		    ATOMIC_RELAXED) + nadd;
711*8e33eff8Schristos 		size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
712*8e33eff8Schristos 		while (cur > high && !atomic_compare_exchange_weak_zu(
713*8e33eff8Schristos 		    &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
714*8e33eff8Schristos 			/*
715*8e33eff8Schristos 			 * Don't refresh cur, because it may have decreased
716*8e33eff8Schristos 			 * since this thread lost the highpages update race.
717*8e33eff8Schristos 			 * Note that high is updated in case of CAS failure.
718*8e33eff8Schristos 			 */
719*8e33eff8Schristos 		}
720*8e33eff8Schristos 		if (cur > high && prof_gdump_get_unlocked()) {
721*8e33eff8Schristos 			prof_gdump(tsdn);
722*8e33eff8Schristos 		}
723*8e33eff8Schristos 	}
724*8e33eff8Schristos }
725*8e33eff8Schristos 
726*8e33eff8Schristos static JEMALLOC_NORETURN void
727*8e33eff8Schristos extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
728*8e33eff8Schristos 	cassert(config_prof);
729*8e33eff8Schristos 
730*8e33eff8Schristos 	if (opt_prof && extent_state_get(extent) == extent_state_active) {
731*8e33eff8Schristos 		size_t nsub = extent_size_get(extent) >> LG_PAGE;
732*8e33eff8Schristos 		assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
733*8e33eff8Schristos 		atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
734*8e33eff8Schristos 	}
735*8e33eff8Schristos }
736*8e33eff8Schristos 
737*8e33eff8Schristos static bool
738*8e33eff8Schristos extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
739*8e33eff8Schristos 	rtree_ctx_t rtree_ctx_fallback;
740*8e33eff8Schristos 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
741*8e33eff8Schristos 	rtree_leaf_elm_t *elm_a, *elm_b;
742*8e33eff8Schristos 
743*8e33eff8Schristos 	/*
744*8e33eff8Schristos 	 * We need to hold the lock to protect against a concurrent coalesce
745*8e33eff8Schristos 	 * operation that sees us in a partial state.
746*8e33eff8Schristos 	 */
747*8e33eff8Schristos 	extent_lock(tsdn, extent);
748*8e33eff8Schristos 
749*8e33eff8Schristos 	if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
750*8e33eff8Schristos 	    &elm_a, &elm_b)) {
751*8e33eff8Schristos 		extent_unlock(tsdn, extent);
752*8e33eff8Schristos 		return true;
753*8e33eff8Schristos 	}
754*8e33eff8Schristos 
755*8e33eff8Schristos 	szind_t szind = extent_szind_get_maybe_invalid(extent);
756*8e33eff8Schristos 	bool slab = extent_slab_get(extent);
757*8e33eff8Schristos 	extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
758*8e33eff8Schristos 	if (slab) {
759*8e33eff8Schristos 		extent_interior_register(tsdn, rtree_ctx, extent, szind);
760*8e33eff8Schristos 	}
761*8e33eff8Schristos 
762*8e33eff8Schristos 	extent_unlock(tsdn, extent);
763*8e33eff8Schristos 
764*8e33eff8Schristos 	if (config_prof && gdump_add) {
765*8e33eff8Schristos 		extent_gdump_add(tsdn, extent);
766*8e33eff8Schristos 	}
767*8e33eff8Schristos 
768*8e33eff8Schristos 	return false;
769*8e33eff8Schristos }
770*8e33eff8Schristos 
771*8e33eff8Schristos static bool
772*8e33eff8Schristos extent_register(tsdn_t *tsdn, extent_t *extent) {
773*8e33eff8Schristos 	return extent_register_impl(tsdn, extent, true);
774*8e33eff8Schristos }
775*8e33eff8Schristos 
776*8e33eff8Schristos static bool
777*8e33eff8Schristos extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
778*8e33eff8Schristos 	return extent_register_impl(tsdn, extent, false);
779*8e33eff8Schristos }
780*8e33eff8Schristos 
781*8e33eff8Schristos static void
782*8e33eff8Schristos extent_reregister(tsdn_t *tsdn, extent_t *extent) {
783*8e33eff8Schristos 	bool err = extent_register(tsdn, extent);
784*8e33eff8Schristos 	assert(!err);
785*8e33eff8Schristos }
786*8e33eff8Schristos 
787*8e33eff8Schristos /*
788*8e33eff8Schristos  * Removes all pointers to the given extent from the global rtree indices for
789*8e33eff8Schristos  * its interior.  This is relevant for slab extents, for which we need to do
790*8e33eff8Schristos  * metadata lookups at places other than the head of the extent.  We deregister
791*8e33eff8Schristos  * on the interior, then, when an extent moves from being an active slab to an
792*8e33eff8Schristos  * inactive state.
793*8e33eff8Schristos  */
794*8e33eff8Schristos static void
795*8e33eff8Schristos extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
796*8e33eff8Schristos     extent_t *extent) {
797*8e33eff8Schristos 	size_t i;
798*8e33eff8Schristos 
799*8e33eff8Schristos 	assert(extent_slab_get(extent));
800*8e33eff8Schristos 
801*8e33eff8Schristos 	for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
802*8e33eff8Schristos 		rtree_clear(tsdn, &extents_rtree, rtree_ctx,
803*8e33eff8Schristos 		    (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
804*8e33eff8Schristos 		    LG_PAGE));
805*8e33eff8Schristos 	}
806*8e33eff8Schristos }
807*8e33eff8Schristos 
808*8e33eff8Schristos /*
809*8e33eff8Schristos  * Removes all pointers to the given extent from the global rtree.
810*8e33eff8Schristos  */
811*8e33eff8Schristos static void
812*8e33eff8Schristos extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
813*8e33eff8Schristos 	rtree_ctx_t rtree_ctx_fallback;
814*8e33eff8Schristos 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
815*8e33eff8Schristos 	rtree_leaf_elm_t *elm_a, *elm_b;
816*8e33eff8Schristos 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
817*8e33eff8Schristos 	    &elm_a, &elm_b);
818*8e33eff8Schristos 
819*8e33eff8Schristos 	extent_lock(tsdn, extent);
820*8e33eff8Schristos 
821*8e33eff8Schristos 	extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false);
822*8e33eff8Schristos 	if (extent_slab_get(extent)) {
823*8e33eff8Schristos 		extent_interior_deregister(tsdn, rtree_ctx, extent);
824*8e33eff8Schristos 		extent_slab_set(extent, false);
825*8e33eff8Schristos 	}
826*8e33eff8Schristos 
827*8e33eff8Schristos 	extent_unlock(tsdn, extent);
828*8e33eff8Schristos 
829*8e33eff8Schristos 	if (config_prof && gdump) {
830*8e33eff8Schristos 		extent_gdump_sub(tsdn, extent);
831*8e33eff8Schristos 	}
832*8e33eff8Schristos }
833*8e33eff8Schristos 
834*8e33eff8Schristos static void
835*8e33eff8Schristos extent_deregister(tsdn_t *tsdn, extent_t *extent) {
836*8e33eff8Schristos 	extent_deregister_impl(tsdn, extent, true);
837*8e33eff8Schristos }
838*8e33eff8Schristos 
839*8e33eff8Schristos static void
840*8e33eff8Schristos extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
841*8e33eff8Schristos 	extent_deregister_impl(tsdn, extent, false);
842*8e33eff8Schristos }
843*8e33eff8Schristos 
844*8e33eff8Schristos /*
845*8e33eff8Schristos  * Tries to find and remove an extent from extents that can be used for the
846*8e33eff8Schristos  * given allocation request.
847*8e33eff8Schristos  */
848*8e33eff8Schristos static extent_t *
849*8e33eff8Schristos extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
850*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
851*8e33eff8Schristos     void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
852*8e33eff8Schristos     bool growing_retained) {
853*8e33eff8Schristos 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
854*8e33eff8Schristos 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
855*8e33eff8Schristos 	assert(alignment > 0);
856*8e33eff8Schristos 	if (config_debug && new_addr != NULL) {
857*8e33eff8Schristos 		/*
858*8e33eff8Schristos 		 * Non-NULL new_addr has two use cases:
859*8e33eff8Schristos 		 *
860*8e33eff8Schristos 		 *   1) Recycle a known-extant extent, e.g. during purging.
861*8e33eff8Schristos 		 *   2) Perform in-place expanding reallocation.
862*8e33eff8Schristos 		 *
863*8e33eff8Schristos 		 * Regardless of use case, new_addr must either refer to a
864*8e33eff8Schristos 		 * non-existing extent, or to the base of an extant extent,
865*8e33eff8Schristos 		 * since only active slabs support interior lookups (which of
866*8e33eff8Schristos 		 * course cannot be recycled).
867*8e33eff8Schristos 		 */
868*8e33eff8Schristos 		assert(PAGE_ADDR2BASE(new_addr) == new_addr);
869*8e33eff8Schristos 		assert(pad == 0);
870*8e33eff8Schristos 		assert(alignment <= PAGE);
871*8e33eff8Schristos 	}
872*8e33eff8Schristos 
873*8e33eff8Schristos 	size_t esize = size + pad;
874*8e33eff8Schristos 	malloc_mutex_lock(tsdn, &extents->mtx);
875*8e33eff8Schristos 	extent_hooks_assure_initialized(arena, r_extent_hooks);
876*8e33eff8Schristos 	extent_t *extent;
877*8e33eff8Schristos 	if (new_addr != NULL) {
878*8e33eff8Schristos 		extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr);
879*8e33eff8Schristos 		if (extent != NULL) {
880*8e33eff8Schristos 			/*
881*8e33eff8Schristos 			 * We might null-out extent to report an error, but we
882*8e33eff8Schristos 			 * still need to unlock the associated mutex after.
883*8e33eff8Schristos 			 */
884*8e33eff8Schristos 			extent_t *unlock_extent = extent;
885*8e33eff8Schristos 			assert(extent_base_get(extent) == new_addr);
886*8e33eff8Schristos 			if (extent_arena_get(extent) != arena ||
887*8e33eff8Schristos 			    extent_size_get(extent) < esize ||
888*8e33eff8Schristos 			    extent_state_get(extent) !=
889*8e33eff8Schristos 			    extents_state_get(extents)) {
890*8e33eff8Schristos 				extent = NULL;
891*8e33eff8Schristos 			}
892*8e33eff8Schristos 			extent_unlock(tsdn, unlock_extent);
893*8e33eff8Schristos 		}
894*8e33eff8Schristos 	} else {
895*8e33eff8Schristos 		extent = extents_fit_locked(tsdn, arena, extents, esize,
896*8e33eff8Schristos 		    alignment);
897*8e33eff8Schristos 	}
898*8e33eff8Schristos 	if (extent == NULL) {
899*8e33eff8Schristos 		malloc_mutex_unlock(tsdn, &extents->mtx);
900*8e33eff8Schristos 		return NULL;
901*8e33eff8Schristos 	}
902*8e33eff8Schristos 
903*8e33eff8Schristos 	extent_activate_locked(tsdn, arena, extents, extent);
904*8e33eff8Schristos 	malloc_mutex_unlock(tsdn, &extents->mtx);
905*8e33eff8Schristos 
906*8e33eff8Schristos 	return extent;
907*8e33eff8Schristos }
908*8e33eff8Schristos 
909*8e33eff8Schristos /*
910*8e33eff8Schristos  * Given an allocation request and an extent guaranteed to be able to satisfy
911*8e33eff8Schristos  * it, this splits off lead and trail extents, leaving extent pointing to an
912*8e33eff8Schristos  * extent satisfying the allocation.
913*8e33eff8Schristos  * This function doesn't put lead or trail into any extents_t; it's the caller's
914*8e33eff8Schristos  * job to ensure that they can be reused.
915*8e33eff8Schristos  */
916*8e33eff8Schristos typedef enum {
917*8e33eff8Schristos 	/*
918*8e33eff8Schristos 	 * Split successfully.  lead, extent, and trail, are modified to extents
919*8e33eff8Schristos 	 * describing the ranges before, in, and after the given allocation.
920*8e33eff8Schristos 	 */
921*8e33eff8Schristos 	extent_split_interior_ok,
922*8e33eff8Schristos 	/*
923*8e33eff8Schristos 	 * The extent can't satisfy the given allocation request.  None of the
924*8e33eff8Schristos 	 * input extent_t *s are touched.
925*8e33eff8Schristos 	 */
926*8e33eff8Schristos 	extent_split_interior_cant_alloc,
927*8e33eff8Schristos 	/*
928*8e33eff8Schristos 	 * In a potentially invalid state.  Must leak (if *to_leak is non-NULL),
929*8e33eff8Schristos 	 * and salvage what's still salvageable (if *to_salvage is non-NULL).
930*8e33eff8Schristos 	 * None of lead, extent, or trail are valid.
931*8e33eff8Schristos 	 */
932*8e33eff8Schristos 	extent_split_interior_error
933*8e33eff8Schristos } extent_split_interior_result_t;
934*8e33eff8Schristos 
935*8e33eff8Schristos static extent_split_interior_result_t
936*8e33eff8Schristos extent_split_interior(tsdn_t *tsdn, arena_t *arena,
937*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
938*8e33eff8Schristos     /* The result of splitting, in case of success. */
939*8e33eff8Schristos     extent_t **extent, extent_t **lead, extent_t **trail,
940*8e33eff8Schristos     /* The mess to clean up, in case of error. */
941*8e33eff8Schristos     extent_t **to_leak, extent_t **to_salvage,
942*8e33eff8Schristos     void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
943*8e33eff8Schristos     szind_t szind, bool growing_retained) {
944*8e33eff8Schristos 	size_t esize = size + pad;
945*8e33eff8Schristos 	size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
946*8e33eff8Schristos 	    PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
947*8e33eff8Schristos 	assert(new_addr == NULL || leadsize == 0);
948*8e33eff8Schristos 	if (extent_size_get(*extent) < leadsize + esize) {
949*8e33eff8Schristos 		return extent_split_interior_cant_alloc;
950*8e33eff8Schristos 	}
951*8e33eff8Schristos 	size_t trailsize = extent_size_get(*extent) - leadsize - esize;
952*8e33eff8Schristos 
953*8e33eff8Schristos 	*lead = NULL;
954*8e33eff8Schristos 	*trail = NULL;
955*8e33eff8Schristos 	*to_leak = NULL;
956*8e33eff8Schristos 	*to_salvage = NULL;
957*8e33eff8Schristos 
958*8e33eff8Schristos 	/* Split the lead. */
959*8e33eff8Schristos 	if (leadsize != 0) {
960*8e33eff8Schristos 		*lead = *extent;
961*8e33eff8Schristos 		*extent = extent_split_impl(tsdn, arena, r_extent_hooks,
962*8e33eff8Schristos 		    *lead, leadsize, NSIZES, false, esize + trailsize, szind,
963*8e33eff8Schristos 		    slab, growing_retained);
964*8e33eff8Schristos 		if (*extent == NULL) {
965*8e33eff8Schristos 			*to_leak = *lead;
966*8e33eff8Schristos 			*lead = NULL;
967*8e33eff8Schristos 			return extent_split_interior_error;
968*8e33eff8Schristos 		}
969*8e33eff8Schristos 	}
970*8e33eff8Schristos 
971*8e33eff8Schristos 	/* Split the trail. */
972*8e33eff8Schristos 	if (trailsize != 0) {
973*8e33eff8Schristos 		*trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
974*8e33eff8Schristos 		    esize, szind, slab, trailsize, NSIZES, false,
975*8e33eff8Schristos 		    growing_retained);
976*8e33eff8Schristos 		if (*trail == NULL) {
977*8e33eff8Schristos 			*to_leak = *extent;
978*8e33eff8Schristos 			*to_salvage = *lead;
979*8e33eff8Schristos 			*lead = NULL;
980*8e33eff8Schristos 			*extent = NULL;
981*8e33eff8Schristos 			return extent_split_interior_error;
982*8e33eff8Schristos 		}
983*8e33eff8Schristos 	}
984*8e33eff8Schristos 
985*8e33eff8Schristos 	if (leadsize == 0 && trailsize == 0) {
986*8e33eff8Schristos 		/*
987*8e33eff8Schristos 		 * Splitting causes szind to be set as a side effect, but no
988*8e33eff8Schristos 		 * splitting occurred.
989*8e33eff8Schristos 		 */
990*8e33eff8Schristos 		extent_szind_set(*extent, szind);
991*8e33eff8Schristos 		if (szind != NSIZES) {
992*8e33eff8Schristos 			rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
993*8e33eff8Schristos 			    (uintptr_t)extent_addr_get(*extent), szind, slab);
994*8e33eff8Schristos 			if (slab && extent_size_get(*extent) > PAGE) {
995*8e33eff8Schristos 				rtree_szind_slab_update(tsdn, &extents_rtree,
996*8e33eff8Schristos 				    rtree_ctx,
997*8e33eff8Schristos 				    (uintptr_t)extent_past_get(*extent) -
998*8e33eff8Schristos 				    (uintptr_t)PAGE, szind, slab);
999*8e33eff8Schristos 			}
1000*8e33eff8Schristos 		}
1001*8e33eff8Schristos 	}
1002*8e33eff8Schristos 
1003*8e33eff8Schristos 	return extent_split_interior_ok;
1004*8e33eff8Schristos }
1005*8e33eff8Schristos 
1006*8e33eff8Schristos /*
1007*8e33eff8Schristos  * This fulfills the indicated allocation request out of the given extent (which
1008*8e33eff8Schristos  * the caller should have ensured was big enough).  If there's any unused space
1009*8e33eff8Schristos  * before or after the resulting allocation, that space is given its own extent
1010*8e33eff8Schristos  * and put back into extents.
1011*8e33eff8Schristos  */
1012*8e33eff8Schristos static extent_t *
1013*8e33eff8Schristos extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
1014*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1015*8e33eff8Schristos     void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
1016*8e33eff8Schristos     szind_t szind, extent_t *extent, bool growing_retained) {
1017*8e33eff8Schristos 	extent_t *lead;
1018*8e33eff8Schristos 	extent_t *trail;
1019*8e33eff8Schristos 	extent_t *to_leak;
1020*8e33eff8Schristos 	extent_t *to_salvage;
1021*8e33eff8Schristos 
1022*8e33eff8Schristos 	extent_split_interior_result_t result = extent_split_interior(
1023*8e33eff8Schristos 	    tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1024*8e33eff8Schristos 	    &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
1025*8e33eff8Schristos 	    growing_retained);
1026*8e33eff8Schristos 
1027*8e33eff8Schristos 	if (result == extent_split_interior_ok) {
1028*8e33eff8Schristos 		if (lead != NULL) {
1029*8e33eff8Schristos 			extent_deactivate(tsdn, arena, extents, lead);
1030*8e33eff8Schristos 		}
1031*8e33eff8Schristos 		if (trail != NULL) {
1032*8e33eff8Schristos 			extent_deactivate(tsdn, arena, extents, trail);
1033*8e33eff8Schristos 		}
1034*8e33eff8Schristos 		return extent;
1035*8e33eff8Schristos 	} else {
1036*8e33eff8Schristos 		/*
1037*8e33eff8Schristos 		 * We should have picked an extent that was large enough to
1038*8e33eff8Schristos 		 * fulfill our allocation request.
1039*8e33eff8Schristos 		 */
1040*8e33eff8Schristos 		assert(result == extent_split_interior_error);
1041*8e33eff8Schristos 		if (to_salvage != NULL) {
1042*8e33eff8Schristos 			extent_deregister(tsdn, to_salvage);
1043*8e33eff8Schristos 		}
1044*8e33eff8Schristos 		if (to_leak != NULL) {
1045*8e33eff8Schristos 			void *leak = extent_base_get(to_leak);
1046*8e33eff8Schristos 			extent_deregister_no_gdump_sub(tsdn, to_leak);
1047*8e33eff8Schristos 			extents_leak(tsdn, arena, r_extent_hooks, extents,
1048*8e33eff8Schristos 			    to_leak, growing_retained);
1049*8e33eff8Schristos 			assert(extent_lock_from_addr(tsdn, rtree_ctx, leak)
1050*8e33eff8Schristos 			    == NULL);
1051*8e33eff8Schristos 		}
1052*8e33eff8Schristos 		return NULL;
1053*8e33eff8Schristos 	}
1054*8e33eff8Schristos 	unreachable();
1055*8e33eff8Schristos }
1056*8e33eff8Schristos 
1057*8e33eff8Schristos /*
1058*8e33eff8Schristos  * Tries to satisfy the given allocation request by reusing one of the extents
1059*8e33eff8Schristos  * in the given extents_t.
1060*8e33eff8Schristos  */
1061*8e33eff8Schristos static extent_t *
1062*8e33eff8Schristos extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1063*8e33eff8Schristos     extents_t *extents, void *new_addr, size_t size, size_t pad,
1064*8e33eff8Schristos     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
1065*8e33eff8Schristos     bool growing_retained) {
1066*8e33eff8Schristos 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1067*8e33eff8Schristos 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1068*8e33eff8Schristos 	assert(new_addr == NULL || !slab);
1069*8e33eff8Schristos 	assert(pad == 0 || !slab);
1070*8e33eff8Schristos 	assert(!*zero || !slab);
1071*8e33eff8Schristos 
1072*8e33eff8Schristos 	rtree_ctx_t rtree_ctx_fallback;
1073*8e33eff8Schristos 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1074*8e33eff8Schristos 
1075*8e33eff8Schristos 	extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
1076*8e33eff8Schristos 	    rtree_ctx, extents, new_addr, size, pad, alignment, slab,
1077*8e33eff8Schristos 	    growing_retained);
1078*8e33eff8Schristos 	if (extent == NULL) {
1079*8e33eff8Schristos 		return NULL;
1080*8e33eff8Schristos 	}
1081*8e33eff8Schristos 
1082*8e33eff8Schristos 	extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
1083*8e33eff8Schristos 	    extents, new_addr, size, pad, alignment, slab, szind, extent,
1084*8e33eff8Schristos 	    growing_retained);
1085*8e33eff8Schristos 	if (extent == NULL) {
1086*8e33eff8Schristos 		return NULL;
1087*8e33eff8Schristos 	}
1088*8e33eff8Schristos 
1089*8e33eff8Schristos 	if (*commit && !extent_committed_get(extent)) {
1090*8e33eff8Schristos 		if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
1091*8e33eff8Schristos 		    0, extent_size_get(extent), growing_retained)) {
1092*8e33eff8Schristos 			extent_record(tsdn, arena, r_extent_hooks, extents,
1093*8e33eff8Schristos 			    extent, growing_retained);
1094*8e33eff8Schristos 			return NULL;
1095*8e33eff8Schristos 		}
1096*8e33eff8Schristos 		extent_zeroed_set(extent, true);
1097*8e33eff8Schristos 	}
1098*8e33eff8Schristos 
1099*8e33eff8Schristos 	if (extent_committed_get(extent)) {
1100*8e33eff8Schristos 		*commit = true;
1101*8e33eff8Schristos 	}
1102*8e33eff8Schristos 	if (extent_zeroed_get(extent)) {
1103*8e33eff8Schristos 		*zero = true;
1104*8e33eff8Schristos 	}
1105*8e33eff8Schristos 
1106*8e33eff8Schristos 	if (pad != 0) {
1107*8e33eff8Schristos 		extent_addr_randomize(tsdn, extent, alignment);
1108*8e33eff8Schristos 	}
1109*8e33eff8Schristos 	assert(extent_state_get(extent) == extent_state_active);
1110*8e33eff8Schristos 	if (slab) {
1111*8e33eff8Schristos 		extent_slab_set(extent, slab);
1112*8e33eff8Schristos 		extent_interior_register(tsdn, rtree_ctx, extent, szind);
1113*8e33eff8Schristos 	}
1114*8e33eff8Schristos 
1115*8e33eff8Schristos 	if (*zero) {
1116*8e33eff8Schristos 		void *addr = extent_base_get(extent);
1117*8e33eff8Schristos 		size_t sz = extent_size_get(extent);
1118*8e33eff8Schristos 		if (!extent_zeroed_get(extent)) {
1119*8e33eff8Schristos 			if (pages_purge_forced(addr, sz)) {
1120*8e33eff8Schristos 				memset(addr, 0, sz);
1121*8e33eff8Schristos 			}
1122*8e33eff8Schristos 		} else if (config_debug) {
1123*8e33eff8Schristos 			size_t *p = (size_t *)(uintptr_t)addr;
1124*8e33eff8Schristos 			for (size_t i = 0; i < sz / sizeof(size_t); i++) {
1125*8e33eff8Schristos 				assert(p[i] == 0);
1126*8e33eff8Schristos 			}
1127*8e33eff8Schristos 		}
1128*8e33eff8Schristos 	}
1129*8e33eff8Schristos 	return extent;
1130*8e33eff8Schristos }
1131*8e33eff8Schristos 
1132*8e33eff8Schristos /*
1133*8e33eff8Schristos  * If the caller specifies (!*zero), it is still possible to receive zeroed
1134*8e33eff8Schristos  * memory, in which case *zero is toggled to true.  arena_extent_alloc() takes
1135*8e33eff8Schristos  * advantage of this to avoid demanding zeroed extents, but taking advantage of
1136*8e33eff8Schristos  * them if they are returned.
1137*8e33eff8Schristos  */
1138*8e33eff8Schristos static void *
1139*8e33eff8Schristos extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
1140*8e33eff8Schristos     size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
1141*8e33eff8Schristos 	void *ret;
1142*8e33eff8Schristos 
1143*8e33eff8Schristos 	assert(size != 0);
1144*8e33eff8Schristos 	assert(alignment != 0);
1145*8e33eff8Schristos 
1146*8e33eff8Schristos 	/* "primary" dss. */
1147*8e33eff8Schristos 	if (have_dss && dss_prec == dss_prec_primary && (ret =
1148*8e33eff8Schristos 	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1149*8e33eff8Schristos 	    commit)) != NULL) {
1150*8e33eff8Schristos 		return ret;
1151*8e33eff8Schristos 	}
1152*8e33eff8Schristos 	/* mmap. */
1153*8e33eff8Schristos 	if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
1154*8e33eff8Schristos 	    != NULL) {
1155*8e33eff8Schristos 		return ret;
1156*8e33eff8Schristos 	}
1157*8e33eff8Schristos 	/* "secondary" dss. */
1158*8e33eff8Schristos 	if (have_dss && dss_prec == dss_prec_secondary && (ret =
1159*8e33eff8Schristos 	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1160*8e33eff8Schristos 	    commit)) != NULL) {
1161*8e33eff8Schristos 		return ret;
1162*8e33eff8Schristos 	}
1163*8e33eff8Schristos 
1164*8e33eff8Schristos 	/* All strategies for allocation failed. */
1165*8e33eff8Schristos 	return NULL;
1166*8e33eff8Schristos }
1167*8e33eff8Schristos 
1168*8e33eff8Schristos static void *
1169*8e33eff8Schristos extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
1170*8e33eff8Schristos     size_t size, size_t alignment, bool *zero, bool *commit) {
1171*8e33eff8Schristos 	void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
1172*8e33eff8Schristos 	    commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
1173*8e33eff8Schristos 	    ATOMIC_RELAXED));
1174*8e33eff8Schristos 	if (have_madvise_huge && ret) {
1175*8e33eff8Schristos 		pages_set_thp_state(ret, size);
1176*8e33eff8Schristos 	}
1177*8e33eff8Schristos 	return ret;
1178*8e33eff8Schristos }
1179*8e33eff8Schristos 
1180*8e33eff8Schristos static void *
1181*8e33eff8Schristos extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
1182*8e33eff8Schristos     size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
1183*8e33eff8Schristos 	tsdn_t *tsdn;
1184*8e33eff8Schristos 	arena_t *arena;
1185*8e33eff8Schristos 
1186*8e33eff8Schristos 	tsdn = tsdn_fetch();
1187*8e33eff8Schristos 	arena = arena_get(tsdn, arena_ind, false);
1188*8e33eff8Schristos 	/*
1189*8e33eff8Schristos 	 * The arena we're allocating on behalf of must have been initialized
1190*8e33eff8Schristos 	 * already.
1191*8e33eff8Schristos 	 */
1192*8e33eff8Schristos 	assert(arena != NULL);
1193*8e33eff8Schristos 
1194*8e33eff8Schristos 	return extent_alloc_default_impl(tsdn, arena, new_addr, size,
1195*8e33eff8Schristos 	    alignment, zero, commit);
1196*8e33eff8Schristos }
1197*8e33eff8Schristos 
1198*8e33eff8Schristos static void
1199*8e33eff8Schristos extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
1200*8e33eff8Schristos 	tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1201*8e33eff8Schristos 	if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
1202*8e33eff8Schristos 		/*
1203*8e33eff8Schristos 		 * The only legitimate case of customized extent hooks for a0 is
1204*8e33eff8Schristos 		 * hooks with no allocation activities.  One such example is to
1205*8e33eff8Schristos 		 * place metadata on pre-allocated resources such as huge pages.
1206*8e33eff8Schristos 		 * In that case, rely on reentrancy_level checks to catch
1207*8e33eff8Schristos 		 * infinite recursions.
1208*8e33eff8Schristos 		 */
1209*8e33eff8Schristos 		pre_reentrancy(tsd, NULL);
1210*8e33eff8Schristos 	} else {
1211*8e33eff8Schristos 		pre_reentrancy(tsd, arena);
1212*8e33eff8Schristos 	}
1213*8e33eff8Schristos }
1214*8e33eff8Schristos 
1215*8e33eff8Schristos static void
1216*8e33eff8Schristos extent_hook_post_reentrancy(tsdn_t *tsdn) {
1217*8e33eff8Schristos 	tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1218*8e33eff8Schristos 	post_reentrancy(tsd);
1219*8e33eff8Schristos }
1220*8e33eff8Schristos 
1221*8e33eff8Schristos /*
1222*8e33eff8Schristos  * If virtual memory is retained, create increasingly larger extents from which
1223*8e33eff8Schristos  * to split requested extents in order to limit the total number of disjoint
1224*8e33eff8Schristos  * virtual memory ranges retained by each arena.
1225*8e33eff8Schristos  */
1226*8e33eff8Schristos static extent_t *
1227*8e33eff8Schristos extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
1228*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
1229*8e33eff8Schristos     bool slab, szind_t szind, bool *zero, bool *commit) {
1230*8e33eff8Schristos 	malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
1231*8e33eff8Schristos 	assert(pad == 0 || !slab);
1232*8e33eff8Schristos 	assert(!*zero || !slab);
1233*8e33eff8Schristos 
1234*8e33eff8Schristos 	size_t esize = size + pad;
1235*8e33eff8Schristos 	size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
1236*8e33eff8Schristos 	/* Beware size_t wrap-around. */
1237*8e33eff8Schristos 	if (alloc_size_min < esize) {
1238*8e33eff8Schristos 		goto label_err;
1239*8e33eff8Schristos 	}
1240*8e33eff8Schristos 	/*
1241*8e33eff8Schristos 	 * Find the next extent size in the series that would be large enough to
1242*8e33eff8Schristos 	 * satisfy this request.
1243*8e33eff8Schristos 	 */
1244*8e33eff8Schristos 	pszind_t egn_skip = 0;
1245*8e33eff8Schristos 	size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1246*8e33eff8Schristos 	while (alloc_size < alloc_size_min) {
1247*8e33eff8Schristos 		egn_skip++;
1248*8e33eff8Schristos 		if (arena->extent_grow_next + egn_skip == NPSIZES) {
1249*8e33eff8Schristos 			/* Outside legal range. */
1250*8e33eff8Schristos 			goto label_err;
1251*8e33eff8Schristos 		}
1252*8e33eff8Schristos 		assert(arena->extent_grow_next + egn_skip < NPSIZES);
1253*8e33eff8Schristos 		alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1254*8e33eff8Schristos 	}
1255*8e33eff8Schristos 
1256*8e33eff8Schristos 	extent_t *extent = extent_alloc(tsdn, arena);
1257*8e33eff8Schristos 	if (extent == NULL) {
1258*8e33eff8Schristos 		goto label_err;
1259*8e33eff8Schristos 	}
1260*8e33eff8Schristos 	bool zeroed = false;
1261*8e33eff8Schristos 	bool committed = false;
1262*8e33eff8Schristos 
1263*8e33eff8Schristos 	void *ptr;
1264*8e33eff8Schristos 	if (*r_extent_hooks == &extent_hooks_default) {
1265*8e33eff8Schristos 		ptr = extent_alloc_default_impl(tsdn, arena, NULL,
1266*8e33eff8Schristos 		    alloc_size, PAGE, &zeroed, &committed);
1267*8e33eff8Schristos 	} else {
1268*8e33eff8Schristos 		extent_hook_pre_reentrancy(tsdn, arena);
1269*8e33eff8Schristos 		ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
1270*8e33eff8Schristos 		    alloc_size, PAGE, &zeroed, &committed,
1271*8e33eff8Schristos 		    arena_ind_get(arena));
1272*8e33eff8Schristos 		extent_hook_post_reentrancy(tsdn);
1273*8e33eff8Schristos 	}
1274*8e33eff8Schristos 
1275*8e33eff8Schristos 	extent_init(extent, arena, ptr, alloc_size, false, NSIZES,
1276*8e33eff8Schristos 	    arena_extent_sn_next(arena), extent_state_active, zeroed,
1277*8e33eff8Schristos 	    committed, true);
1278*8e33eff8Schristos 	if (ptr == NULL) {
1279*8e33eff8Schristos 		extent_dalloc(tsdn, arena, extent);
1280*8e33eff8Schristos 		goto label_err;
1281*8e33eff8Schristos 	}
1282*8e33eff8Schristos 
1283*8e33eff8Schristos 	if (extent_register_no_gdump_add(tsdn, extent)) {
1284*8e33eff8Schristos 		extents_leak(tsdn, arena, r_extent_hooks,
1285*8e33eff8Schristos 		    &arena->extents_retained, extent, true);
1286*8e33eff8Schristos 		goto label_err;
1287*8e33eff8Schristos 	}
1288*8e33eff8Schristos 
1289*8e33eff8Schristos 	if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
1290*8e33eff8Schristos 		*zero = true;
1291*8e33eff8Schristos 	}
1292*8e33eff8Schristos 	if (extent_committed_get(extent)) {
1293*8e33eff8Schristos 		*commit = true;
1294*8e33eff8Schristos 	}
1295*8e33eff8Schristos 
1296*8e33eff8Schristos 	rtree_ctx_t rtree_ctx_fallback;
1297*8e33eff8Schristos 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1298*8e33eff8Schristos 
1299*8e33eff8Schristos 	extent_t *lead;
1300*8e33eff8Schristos 	extent_t *trail;
1301*8e33eff8Schristos 	extent_t *to_leak;
1302*8e33eff8Schristos 	extent_t *to_salvage;
1303*8e33eff8Schristos 	extent_split_interior_result_t result = extent_split_interior(
1304*8e33eff8Schristos 	    tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1305*8e33eff8Schristos 	    &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
1306*8e33eff8Schristos 	    true);
1307*8e33eff8Schristos 
1308*8e33eff8Schristos 	if (result == extent_split_interior_ok) {
1309*8e33eff8Schristos 		if (lead != NULL) {
1310*8e33eff8Schristos 			extent_record(tsdn, arena, r_extent_hooks,
1311*8e33eff8Schristos 			    &arena->extents_retained, lead, true);
1312*8e33eff8Schristos 		}
1313*8e33eff8Schristos 		if (trail != NULL) {
1314*8e33eff8Schristos 			extent_record(tsdn, arena, r_extent_hooks,
1315*8e33eff8Schristos 			    &arena->extents_retained, trail, true);
1316*8e33eff8Schristos 		}
1317*8e33eff8Schristos 	} else {
1318*8e33eff8Schristos 		/*
1319*8e33eff8Schristos 		 * We should have allocated a sufficiently large extent; the
1320*8e33eff8Schristos 		 * cant_alloc case should not occur.
1321*8e33eff8Schristos 		 */
1322*8e33eff8Schristos 		assert(result == extent_split_interior_error);
1323*8e33eff8Schristos 		if (to_salvage != NULL) {
1324*8e33eff8Schristos 			if (config_prof) {
1325*8e33eff8Schristos 				extent_gdump_add(tsdn, to_salvage);
1326*8e33eff8Schristos 			}
1327*8e33eff8Schristos 			extent_record(tsdn, arena, r_extent_hooks,
1328*8e33eff8Schristos 			    &arena->extents_retained, to_salvage, true);
1329*8e33eff8Schristos 		}
1330*8e33eff8Schristos 		if (to_leak != NULL) {
1331*8e33eff8Schristos 			extent_deregister_no_gdump_sub(tsdn, to_leak);
1332*8e33eff8Schristos 			extents_leak(tsdn, arena, r_extent_hooks,
1333*8e33eff8Schristos 			    &arena->extents_retained, to_leak, true);
1334*8e33eff8Schristos 		}
1335*8e33eff8Schristos 		goto label_err;
1336*8e33eff8Schristos 	}
1337*8e33eff8Schristos 
1338*8e33eff8Schristos 	if (*commit && !extent_committed_get(extent)) {
1339*8e33eff8Schristos 		if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
1340*8e33eff8Schristos 		    extent_size_get(extent), true)) {
1341*8e33eff8Schristos 			extent_record(tsdn, arena, r_extent_hooks,
1342*8e33eff8Schristos 			    &arena->extents_retained, extent, true);
1343*8e33eff8Schristos 			goto label_err;
1344*8e33eff8Schristos 		}
1345*8e33eff8Schristos 		extent_zeroed_set(extent, true);
1346*8e33eff8Schristos 	}
1347*8e33eff8Schristos 
1348*8e33eff8Schristos 	/*
1349*8e33eff8Schristos 	 * Increment extent_grow_next if doing so wouldn't exceed the allowed
1350*8e33eff8Schristos 	 * range.
1351*8e33eff8Schristos 	 */
1352*8e33eff8Schristos 	if (arena->extent_grow_next + egn_skip + 1 <=
1353*8e33eff8Schristos 	    arena->retain_grow_limit) {
1354*8e33eff8Schristos 		arena->extent_grow_next += egn_skip + 1;
1355*8e33eff8Schristos 	} else {
1356*8e33eff8Schristos 		arena->extent_grow_next = arena->retain_grow_limit;
1357*8e33eff8Schristos 	}
1358*8e33eff8Schristos 	/* All opportunities for failure are past. */
1359*8e33eff8Schristos 	malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1360*8e33eff8Schristos 
1361*8e33eff8Schristos 	if (config_prof) {
1362*8e33eff8Schristos 		/* Adjust gdump stats now that extent is final size. */
1363*8e33eff8Schristos 		extent_gdump_add(tsdn, extent);
1364*8e33eff8Schristos 	}
1365*8e33eff8Schristos 	if (pad != 0) {
1366*8e33eff8Schristos 		extent_addr_randomize(tsdn, extent, alignment);
1367*8e33eff8Schristos 	}
1368*8e33eff8Schristos 	if (slab) {
1369*8e33eff8Schristos 		rtree_ctx_t rtree_ctx_fallback1;
1370*8e33eff8Schristos 		rtree_ctx_t *rtree_ctx1 = tsdn_rtree_ctx(tsdn,
1371*8e33eff8Schristos 		    &rtree_ctx_fallback1);
1372*8e33eff8Schristos 
1373*8e33eff8Schristos 		extent_slab_set(extent, true);
1374*8e33eff8Schristos 		extent_interior_register(tsdn, rtree_ctx1, extent, szind);
1375*8e33eff8Schristos 	}
1376*8e33eff8Schristos 	if (*zero && !extent_zeroed_get(extent)) {
1377*8e33eff8Schristos 		void *addr = extent_base_get(extent);
1378*8e33eff8Schristos 		size_t sz = extent_size_get(extent);
1379*8e33eff8Schristos 		if (pages_purge_forced(addr, sz)) {
1380*8e33eff8Schristos 			memset(addr, 0, sz);
1381*8e33eff8Schristos 		}
1382*8e33eff8Schristos 	}
1383*8e33eff8Schristos 
1384*8e33eff8Schristos 	return extent;
1385*8e33eff8Schristos label_err:
1386*8e33eff8Schristos 	malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1387*8e33eff8Schristos 	return NULL;
1388*8e33eff8Schristos }
1389*8e33eff8Schristos 
1390*8e33eff8Schristos static extent_t *
1391*8e33eff8Schristos extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
1392*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1393*8e33eff8Schristos     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1394*8e33eff8Schristos 	assert(size != 0);
1395*8e33eff8Schristos 	assert(alignment != 0);
1396*8e33eff8Schristos 
1397*8e33eff8Schristos 	malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
1398*8e33eff8Schristos 
1399*8e33eff8Schristos 	extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
1400*8e33eff8Schristos 	    &arena->extents_retained, new_addr, size, pad, alignment, slab,
1401*8e33eff8Schristos 	    szind, zero, commit, true);
1402*8e33eff8Schristos 	if (extent != NULL) {
1403*8e33eff8Schristos 		malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1404*8e33eff8Schristos 		if (config_prof) {
1405*8e33eff8Schristos 			extent_gdump_add(tsdn, extent);
1406*8e33eff8Schristos 		}
1407*8e33eff8Schristos 	} else if (opt_retain && new_addr == NULL) {
1408*8e33eff8Schristos 		extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
1409*8e33eff8Schristos 		    pad, alignment, slab, szind, zero, commit);
1410*8e33eff8Schristos 		/* extent_grow_retained() always releases extent_grow_mtx. */
1411*8e33eff8Schristos 	} else {
1412*8e33eff8Schristos 		malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1413*8e33eff8Schristos 	}
1414*8e33eff8Schristos 	malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
1415*8e33eff8Schristos 
1416*8e33eff8Schristos 	return extent;
1417*8e33eff8Schristos }
1418*8e33eff8Schristos 
1419*8e33eff8Schristos static extent_t *
1420*8e33eff8Schristos extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
1421*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1422*8e33eff8Schristos     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1423*8e33eff8Schristos 	size_t esize = size + pad;
1424*8e33eff8Schristos 	extent_t *extent = extent_alloc(tsdn, arena);
1425*8e33eff8Schristos 	if (extent == NULL) {
1426*8e33eff8Schristos 		return NULL;
1427*8e33eff8Schristos 	}
1428*8e33eff8Schristos 	void *addr;
1429*8e33eff8Schristos 	if (*r_extent_hooks == &extent_hooks_default) {
1430*8e33eff8Schristos 		/* Call directly to propagate tsdn. */
1431*8e33eff8Schristos 		addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
1432*8e33eff8Schristos 		    alignment, zero, commit);
1433*8e33eff8Schristos 	} else {
1434*8e33eff8Schristos 		extent_hook_pre_reentrancy(tsdn, arena);
1435*8e33eff8Schristos 		addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
1436*8e33eff8Schristos 		    esize, alignment, zero, commit, arena_ind_get(arena));
1437*8e33eff8Schristos 		extent_hook_post_reentrancy(tsdn);
1438*8e33eff8Schristos 	}
1439*8e33eff8Schristos 	if (addr == NULL) {
1440*8e33eff8Schristos 		extent_dalloc(tsdn, arena, extent);
1441*8e33eff8Schristos 		return NULL;
1442*8e33eff8Schristos 	}
1443*8e33eff8Schristos 	extent_init(extent, arena, addr, esize, slab, szind,
1444*8e33eff8Schristos 	    arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
1445*8e33eff8Schristos 	    true);
1446*8e33eff8Schristos 	if (pad != 0) {
1447*8e33eff8Schristos 		extent_addr_randomize(tsdn, extent, alignment);
1448*8e33eff8Schristos 	}
1449*8e33eff8Schristos 	if (extent_register(tsdn, extent)) {
1450*8e33eff8Schristos 		extents_leak(tsdn, arena, r_extent_hooks,
1451*8e33eff8Schristos 		    &arena->extents_retained, extent, false);
1452*8e33eff8Schristos 		return NULL;
1453*8e33eff8Schristos 	}
1454*8e33eff8Schristos 
1455*8e33eff8Schristos 	return extent;
1456*8e33eff8Schristos }
1457*8e33eff8Schristos 
1458*8e33eff8Schristos extent_t *
1459*8e33eff8Schristos extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1460*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1461*8e33eff8Schristos     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1462*8e33eff8Schristos 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1463*8e33eff8Schristos 	    WITNESS_RANK_CORE, 0);
1464*8e33eff8Schristos 
1465*8e33eff8Schristos 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1466*8e33eff8Schristos 
1467*8e33eff8Schristos 	extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
1468*8e33eff8Schristos 	    new_addr, size, pad, alignment, slab, szind, zero, commit);
1469*8e33eff8Schristos 	if (extent == NULL) {
1470*8e33eff8Schristos 		if (opt_retain && new_addr != NULL) {
1471*8e33eff8Schristos 			/*
1472*8e33eff8Schristos 			 * When retain is enabled and new_addr is set, we do not
1473*8e33eff8Schristos 			 * attempt extent_alloc_wrapper_hard which does mmap
1474*8e33eff8Schristos 			 * that is very unlikely to succeed (unless it happens
1475*8e33eff8Schristos 			 * to be at the end).
1476*8e33eff8Schristos 			 */
1477*8e33eff8Schristos 			return NULL;
1478*8e33eff8Schristos 		}
1479*8e33eff8Schristos 		extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
1480*8e33eff8Schristos 		    new_addr, size, pad, alignment, slab, szind, zero, commit);
1481*8e33eff8Schristos 	}
1482*8e33eff8Schristos 
1483*8e33eff8Schristos 	assert(extent == NULL || extent_dumpable_get(extent));
1484*8e33eff8Schristos 	return extent;
1485*8e33eff8Schristos }
1486*8e33eff8Schristos 
1487*8e33eff8Schristos static bool
1488*8e33eff8Schristos extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
1489*8e33eff8Schristos     const extent_t *outer) {
1490*8e33eff8Schristos 	assert(extent_arena_get(inner) == arena);
1491*8e33eff8Schristos 	if (extent_arena_get(outer) != arena) {
1492*8e33eff8Schristos 		return false;
1493*8e33eff8Schristos 	}
1494*8e33eff8Schristos 
1495*8e33eff8Schristos 	assert(extent_state_get(inner) == extent_state_active);
1496*8e33eff8Schristos 	if (extent_state_get(outer) != extents->state) {
1497*8e33eff8Schristos 		return false;
1498*8e33eff8Schristos 	}
1499*8e33eff8Schristos 
1500*8e33eff8Schristos 	if (extent_committed_get(inner) != extent_committed_get(outer)) {
1501*8e33eff8Schristos 		return false;
1502*8e33eff8Schristos 	}
1503*8e33eff8Schristos 
1504*8e33eff8Schristos 	return true;
1505*8e33eff8Schristos }
1506*8e33eff8Schristos 
1507*8e33eff8Schristos static bool
1508*8e33eff8Schristos extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1509*8e33eff8Schristos     extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
1510*8e33eff8Schristos     bool growing_retained) {
1511*8e33eff8Schristos 	assert(extent_can_coalesce(arena, extents, inner, outer));
1512*8e33eff8Schristos 
1513*8e33eff8Schristos 	extent_activate_locked(tsdn, arena, extents, outer);
1514*8e33eff8Schristos 
1515*8e33eff8Schristos 	malloc_mutex_unlock(tsdn, &extents->mtx);
1516*8e33eff8Schristos 	bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
1517*8e33eff8Schristos 	    forward ? inner : outer, forward ? outer : inner, growing_retained);
1518*8e33eff8Schristos 	malloc_mutex_lock(tsdn, &extents->mtx);
1519*8e33eff8Schristos 
1520*8e33eff8Schristos 	if (err) {
1521*8e33eff8Schristos 		extent_deactivate_locked(tsdn, arena, extents, outer);
1522*8e33eff8Schristos 	}
1523*8e33eff8Schristos 
1524*8e33eff8Schristos 	return err;
1525*8e33eff8Schristos }
1526*8e33eff8Schristos 
1527*8e33eff8Schristos static extent_t *
1528*8e33eff8Schristos extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
1529*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1530*8e33eff8Schristos     extent_t *extent, bool *coalesced, bool growing_retained) {
1531*8e33eff8Schristos 	/*
1532*8e33eff8Schristos 	 * Continue attempting to coalesce until failure, to protect against
1533*8e33eff8Schristos 	 * races with other threads that are thwarted by this one.
1534*8e33eff8Schristos 	 */
1535*8e33eff8Schristos 	bool again;
1536*8e33eff8Schristos 	do {
1537*8e33eff8Schristos 		again = false;
1538*8e33eff8Schristos 
1539*8e33eff8Schristos 		/* Try to coalesce forward. */
1540*8e33eff8Schristos 		extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
1541*8e33eff8Schristos 		    extent_past_get(extent));
1542*8e33eff8Schristos 		if (next != NULL) {
1543*8e33eff8Schristos 			/*
1544*8e33eff8Schristos 			 * extents->mtx only protects against races for
1545*8e33eff8Schristos 			 * like-state extents, so call extent_can_coalesce()
1546*8e33eff8Schristos 			 * before releasing next's pool lock.
1547*8e33eff8Schristos 			 */
1548*8e33eff8Schristos 			bool can_coalesce = extent_can_coalesce(arena, extents,
1549*8e33eff8Schristos 			    extent, next);
1550*8e33eff8Schristos 
1551*8e33eff8Schristos 			extent_unlock(tsdn, next);
1552*8e33eff8Schristos 
1553*8e33eff8Schristos 			if (can_coalesce && !extent_coalesce(tsdn, arena,
1554*8e33eff8Schristos 			    r_extent_hooks, extents, extent, next, true,
1555*8e33eff8Schristos 			    growing_retained)) {
1556*8e33eff8Schristos 				if (extents->delay_coalesce) {
1557*8e33eff8Schristos 					/* Do minimal coalescing. */
1558*8e33eff8Schristos 					*coalesced = true;
1559*8e33eff8Schristos 					return extent;
1560*8e33eff8Schristos 				}
1561*8e33eff8Schristos 				again = true;
1562*8e33eff8Schristos 			}
1563*8e33eff8Schristos 		}
1564*8e33eff8Schristos 
1565*8e33eff8Schristos 		/* Try to coalesce backward. */
1566*8e33eff8Schristos 		extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
1567*8e33eff8Schristos 		    extent_before_get(extent));
1568*8e33eff8Schristos 		if (prev != NULL) {
1569*8e33eff8Schristos 			bool can_coalesce = extent_can_coalesce(arena, extents,
1570*8e33eff8Schristos 			    extent, prev);
1571*8e33eff8Schristos 			extent_unlock(tsdn, prev);
1572*8e33eff8Schristos 
1573*8e33eff8Schristos 			if (can_coalesce && !extent_coalesce(tsdn, arena,
1574*8e33eff8Schristos 			    r_extent_hooks, extents, extent, prev, false,
1575*8e33eff8Schristos 			    growing_retained)) {
1576*8e33eff8Schristos 				extent = prev;
1577*8e33eff8Schristos 				if (extents->delay_coalesce) {
1578*8e33eff8Schristos 					/* Do minimal coalescing. */
1579*8e33eff8Schristos 					*coalesced = true;
1580*8e33eff8Schristos 					return extent;
1581*8e33eff8Schristos 				}
1582*8e33eff8Schristos 				again = true;
1583*8e33eff8Schristos 			}
1584*8e33eff8Schristos 		}
1585*8e33eff8Schristos 	} while (again);
1586*8e33eff8Schristos 
1587*8e33eff8Schristos 	if (extents->delay_coalesce) {
1588*8e33eff8Schristos 		*coalesced = false;
1589*8e33eff8Schristos 	}
1590*8e33eff8Schristos 	return extent;
1591*8e33eff8Schristos }
1592*8e33eff8Schristos 
1593*8e33eff8Schristos /*
1594*8e33eff8Schristos  * Does the metadata management portions of putting an unused extent into the
1595*8e33eff8Schristos  * given extents_t (coalesces, deregisters slab interiors, the heap operations).
1596*8e33eff8Schristos  */
1597*8e33eff8Schristos static void
1598*8e33eff8Schristos extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1599*8e33eff8Schristos     extents_t *extents, extent_t *extent, bool growing_retained) {
1600*8e33eff8Schristos 	rtree_ctx_t rtree_ctx_fallback;
1601*8e33eff8Schristos 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1602*8e33eff8Schristos 
1603*8e33eff8Schristos 	assert((extents_state_get(extents) != extent_state_dirty &&
1604*8e33eff8Schristos 	    extents_state_get(extents) != extent_state_muzzy) ||
1605*8e33eff8Schristos 	    !extent_zeroed_get(extent));
1606*8e33eff8Schristos 
1607*8e33eff8Schristos 	malloc_mutex_lock(tsdn, &extents->mtx);
1608*8e33eff8Schristos 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1609*8e33eff8Schristos 
1610*8e33eff8Schristos 	extent_szind_set(extent, NSIZES);
1611*8e33eff8Schristos 	if (extent_slab_get(extent)) {
1612*8e33eff8Schristos 		extent_interior_deregister(tsdn, rtree_ctx, extent);
1613*8e33eff8Schristos 		extent_slab_set(extent, false);
1614*8e33eff8Schristos 	}
1615*8e33eff8Schristos 
1616*8e33eff8Schristos 	assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1617*8e33eff8Schristos 	    (uintptr_t)extent_base_get(extent), true) == extent);
1618*8e33eff8Schristos 
1619*8e33eff8Schristos 	if (!extents->delay_coalesce) {
1620*8e33eff8Schristos 		extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
1621*8e33eff8Schristos 		    rtree_ctx, extents, extent, NULL, growing_retained);
1622*8e33eff8Schristos 	} else if (extent_size_get(extent) >= LARGE_MINCLASS) {
1623*8e33eff8Schristos 		/* Always coalesce large extents eagerly. */
1624*8e33eff8Schristos 		bool coalesced;
1625*8e33eff8Schristos 		size_t prev_size;
1626*8e33eff8Schristos 		do {
1627*8e33eff8Schristos 			prev_size = extent_size_get(extent);
1628*8e33eff8Schristos 			assert(extent_state_get(extent) == extent_state_active);
1629*8e33eff8Schristos 			extent = extent_try_coalesce(tsdn, arena,
1630*8e33eff8Schristos 			    r_extent_hooks, rtree_ctx, extents, extent,
1631*8e33eff8Schristos 			    &coalesced, growing_retained);
1632*8e33eff8Schristos 		} while (coalesced &&
1633*8e33eff8Schristos 		    extent_size_get(extent) >= prev_size + LARGE_MINCLASS);
1634*8e33eff8Schristos 	}
1635*8e33eff8Schristos 	extent_deactivate_locked(tsdn, arena, extents, extent);
1636*8e33eff8Schristos 
1637*8e33eff8Schristos 	malloc_mutex_unlock(tsdn, &extents->mtx);
1638*8e33eff8Schristos }
1639*8e33eff8Schristos 
1640*8e33eff8Schristos void
1641*8e33eff8Schristos extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
1642*8e33eff8Schristos 	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1643*8e33eff8Schristos 
1644*8e33eff8Schristos 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1645*8e33eff8Schristos 	    WITNESS_RANK_CORE, 0);
1646*8e33eff8Schristos 
1647*8e33eff8Schristos 	if (extent_register(tsdn, extent)) {
1648*8e33eff8Schristos 		extents_leak(tsdn, arena, &extent_hooks,
1649*8e33eff8Schristos 		    &arena->extents_retained, extent, false);
1650*8e33eff8Schristos 		return;
1651*8e33eff8Schristos 	}
1652*8e33eff8Schristos 	extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
1653*8e33eff8Schristos }
1654*8e33eff8Schristos 
1655*8e33eff8Schristos static bool
1656*8e33eff8Schristos extent_dalloc_default_impl(void *addr, size_t size) {
1657*8e33eff8Schristos 	if (!have_dss || !extent_in_dss(addr)) {
1658*8e33eff8Schristos 		return extent_dalloc_mmap(addr, size);
1659*8e33eff8Schristos 	}
1660*8e33eff8Schristos 	return true;
1661*8e33eff8Schristos }
1662*8e33eff8Schristos 
1663*8e33eff8Schristos static bool
1664*8e33eff8Schristos extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1665*8e33eff8Schristos     bool committed, unsigned arena_ind) {
1666*8e33eff8Schristos 	return extent_dalloc_default_impl(addr, size);
1667*8e33eff8Schristos }
1668*8e33eff8Schristos 
1669*8e33eff8Schristos static bool
1670*8e33eff8Schristos extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
1671*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extent_t *extent) {
1672*8e33eff8Schristos 	bool err;
1673*8e33eff8Schristos 
1674*8e33eff8Schristos 	assert(extent_base_get(extent) != NULL);
1675*8e33eff8Schristos 	assert(extent_size_get(extent) != 0);
1676*8e33eff8Schristos 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1677*8e33eff8Schristos 	    WITNESS_RANK_CORE, 0);
1678*8e33eff8Schristos 
1679*8e33eff8Schristos 	extent_addr_set(extent, extent_base_get(extent));
1680*8e33eff8Schristos 
1681*8e33eff8Schristos 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1682*8e33eff8Schristos 	/* Try to deallocate. */
1683*8e33eff8Schristos 	if (*r_extent_hooks == &extent_hooks_default) {
1684*8e33eff8Schristos 		/* Call directly to propagate tsdn. */
1685*8e33eff8Schristos 		err = extent_dalloc_default_impl(extent_base_get(extent),
1686*8e33eff8Schristos 		    extent_size_get(extent));
1687*8e33eff8Schristos 	} else {
1688*8e33eff8Schristos 		extent_hook_pre_reentrancy(tsdn, arena);
1689*8e33eff8Schristos 		err = ((*r_extent_hooks)->dalloc == NULL ||
1690*8e33eff8Schristos 		    (*r_extent_hooks)->dalloc(*r_extent_hooks,
1691*8e33eff8Schristos 		    extent_base_get(extent), extent_size_get(extent),
1692*8e33eff8Schristos 		    extent_committed_get(extent), arena_ind_get(arena)));
1693*8e33eff8Schristos 		extent_hook_post_reentrancy(tsdn);
1694*8e33eff8Schristos 	}
1695*8e33eff8Schristos 
1696*8e33eff8Schristos 	if (!err) {
1697*8e33eff8Schristos 		extent_dalloc(tsdn, arena, extent);
1698*8e33eff8Schristos 	}
1699*8e33eff8Schristos 
1700*8e33eff8Schristos 	return err;
1701*8e33eff8Schristos }
1702*8e33eff8Schristos 
1703*8e33eff8Schristos void
1704*8e33eff8Schristos extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1705*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extent_t *extent) {
1706*8e33eff8Schristos 	assert(extent_dumpable_get(extent));
1707*8e33eff8Schristos 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1708*8e33eff8Schristos 	    WITNESS_RANK_CORE, 0);
1709*8e33eff8Schristos 
1710*8e33eff8Schristos 	/*
1711*8e33eff8Schristos 	 * Deregister first to avoid a race with other allocating threads, and
1712*8e33eff8Schristos 	 * reregister if deallocation fails.
1713*8e33eff8Schristos 	 */
1714*8e33eff8Schristos 	extent_deregister(tsdn, extent);
1715*8e33eff8Schristos 	if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) {
1716*8e33eff8Schristos 		return;
1717*8e33eff8Schristos 	}
1718*8e33eff8Schristos 
1719*8e33eff8Schristos 	extent_reregister(tsdn, extent);
1720*8e33eff8Schristos 	if (*r_extent_hooks != &extent_hooks_default) {
1721*8e33eff8Schristos 		extent_hook_pre_reentrancy(tsdn, arena);
1722*8e33eff8Schristos 	}
1723*8e33eff8Schristos 	/* Try to decommit; purge if that fails. */
1724*8e33eff8Schristos 	bool zeroed;
1725*8e33eff8Schristos 	if (!extent_committed_get(extent)) {
1726*8e33eff8Schristos 		zeroed = true;
1727*8e33eff8Schristos 	} else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
1728*8e33eff8Schristos 	    0, extent_size_get(extent))) {
1729*8e33eff8Schristos 		zeroed = true;
1730*8e33eff8Schristos 	} else if ((*r_extent_hooks)->purge_forced != NULL &&
1731*8e33eff8Schristos 	    !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
1732*8e33eff8Schristos 	    extent_base_get(extent), extent_size_get(extent), 0,
1733*8e33eff8Schristos 	    extent_size_get(extent), arena_ind_get(arena))) {
1734*8e33eff8Schristos 		zeroed = true;
1735*8e33eff8Schristos 	} else if (extent_state_get(extent) == extent_state_muzzy ||
1736*8e33eff8Schristos 	    ((*r_extent_hooks)->purge_lazy != NULL &&
1737*8e33eff8Schristos 	    !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1738*8e33eff8Schristos 	    extent_base_get(extent), extent_size_get(extent), 0,
1739*8e33eff8Schristos 	    extent_size_get(extent), arena_ind_get(arena)))) {
1740*8e33eff8Schristos 		zeroed = false;
1741*8e33eff8Schristos 	} else {
1742*8e33eff8Schristos 		zeroed = false;
1743*8e33eff8Schristos 	}
1744*8e33eff8Schristos 	if (*r_extent_hooks != &extent_hooks_default) {
1745*8e33eff8Schristos 		extent_hook_post_reentrancy(tsdn);
1746*8e33eff8Schristos 	}
1747*8e33eff8Schristos 	extent_zeroed_set(extent, zeroed);
1748*8e33eff8Schristos 
1749*8e33eff8Schristos 	if (config_prof) {
1750*8e33eff8Schristos 		extent_gdump_sub(tsdn, extent);
1751*8e33eff8Schristos 	}
1752*8e33eff8Schristos 
1753*8e33eff8Schristos 	extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
1754*8e33eff8Schristos 	    extent, false);
1755*8e33eff8Schristos }
1756*8e33eff8Schristos 
1757*8e33eff8Schristos static void
1758*8e33eff8Schristos extent_destroy_default_impl(void *addr, size_t size) {
1759*8e33eff8Schristos 	if (!have_dss || !extent_in_dss(addr)) {
1760*8e33eff8Schristos 		pages_unmap(addr, size);
1761*8e33eff8Schristos 	}
1762*8e33eff8Schristos }
1763*8e33eff8Schristos 
1764*8e33eff8Schristos static void
1765*8e33eff8Schristos extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1766*8e33eff8Schristos     bool committed, unsigned arena_ind) {
1767*8e33eff8Schristos 	extent_destroy_default_impl(addr, size);
1768*8e33eff8Schristos }
1769*8e33eff8Schristos 
1770*8e33eff8Schristos void
1771*8e33eff8Schristos extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
1772*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extent_t *extent) {
1773*8e33eff8Schristos 	assert(extent_base_get(extent) != NULL);
1774*8e33eff8Schristos 	assert(extent_size_get(extent) != 0);
1775*8e33eff8Schristos 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1776*8e33eff8Schristos 	    WITNESS_RANK_CORE, 0);
1777*8e33eff8Schristos 
1778*8e33eff8Schristos 	/* Deregister first to avoid a race with other allocating threads. */
1779*8e33eff8Schristos 	extent_deregister(tsdn, extent);
1780*8e33eff8Schristos 
1781*8e33eff8Schristos 	extent_addr_set(extent, extent_base_get(extent));
1782*8e33eff8Schristos 
1783*8e33eff8Schristos 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1784*8e33eff8Schristos 	/* Try to destroy; silently fail otherwise. */
1785*8e33eff8Schristos 	if (*r_extent_hooks == &extent_hooks_default) {
1786*8e33eff8Schristos 		/* Call directly to propagate tsdn. */
1787*8e33eff8Schristos 		extent_destroy_default_impl(extent_base_get(extent),
1788*8e33eff8Schristos 		    extent_size_get(extent));
1789*8e33eff8Schristos 	} else if ((*r_extent_hooks)->destroy != NULL) {
1790*8e33eff8Schristos 		extent_hook_pre_reentrancy(tsdn, arena);
1791*8e33eff8Schristos 		(*r_extent_hooks)->destroy(*r_extent_hooks,
1792*8e33eff8Schristos 		    extent_base_get(extent), extent_size_get(extent),
1793*8e33eff8Schristos 		    extent_committed_get(extent), arena_ind_get(arena));
1794*8e33eff8Schristos 		extent_hook_post_reentrancy(tsdn);
1795*8e33eff8Schristos 	}
1796*8e33eff8Schristos 
1797*8e33eff8Schristos 	extent_dalloc(tsdn, arena, extent);
1798*8e33eff8Schristos }
1799*8e33eff8Schristos 
1800*8e33eff8Schristos static bool
1801*8e33eff8Schristos extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1802*8e33eff8Schristos     size_t offset, size_t length, unsigned arena_ind) {
1803*8e33eff8Schristos 	return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
1804*8e33eff8Schristos 	    length);
1805*8e33eff8Schristos }
1806*8e33eff8Schristos 
1807*8e33eff8Schristos static bool
1808*8e33eff8Schristos extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
1809*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1810*8e33eff8Schristos     size_t length, bool growing_retained) {
1811*8e33eff8Schristos 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1812*8e33eff8Schristos 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1813*8e33eff8Schristos 
1814*8e33eff8Schristos 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1815*8e33eff8Schristos 	if (*r_extent_hooks != &extent_hooks_default) {
1816*8e33eff8Schristos 		extent_hook_pre_reentrancy(tsdn, arena);
1817*8e33eff8Schristos 	}
1818*8e33eff8Schristos 	bool err = ((*r_extent_hooks)->commit == NULL ||
1819*8e33eff8Schristos 	    (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
1820*8e33eff8Schristos 	    extent_size_get(extent), offset, length, arena_ind_get(arena)));
1821*8e33eff8Schristos 	if (*r_extent_hooks != &extent_hooks_default) {
1822*8e33eff8Schristos 		extent_hook_post_reentrancy(tsdn);
1823*8e33eff8Schristos 	}
1824*8e33eff8Schristos 	extent_committed_set(extent, extent_committed_get(extent) || !err);
1825*8e33eff8Schristos 	return err;
1826*8e33eff8Schristos }
1827*8e33eff8Schristos 
1828*8e33eff8Schristos bool
1829*8e33eff8Schristos extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
1830*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1831*8e33eff8Schristos     size_t length) {
1832*8e33eff8Schristos 	return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
1833*8e33eff8Schristos 	    length, false);
1834*8e33eff8Schristos }
1835*8e33eff8Schristos 
1836*8e33eff8Schristos static bool
1837*8e33eff8Schristos extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1838*8e33eff8Schristos     size_t offset, size_t length, unsigned arena_ind) {
1839*8e33eff8Schristos 	return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
1840*8e33eff8Schristos 	    length);
1841*8e33eff8Schristos }
1842*8e33eff8Schristos 
1843*8e33eff8Schristos bool
1844*8e33eff8Schristos extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
1845*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1846*8e33eff8Schristos     size_t length) {
1847*8e33eff8Schristos 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1848*8e33eff8Schristos 	    WITNESS_RANK_CORE, 0);
1849*8e33eff8Schristos 
1850*8e33eff8Schristos 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1851*8e33eff8Schristos 
1852*8e33eff8Schristos 	if (*r_extent_hooks != &extent_hooks_default) {
1853*8e33eff8Schristos 		extent_hook_pre_reentrancy(tsdn, arena);
1854*8e33eff8Schristos 	}
1855*8e33eff8Schristos 	bool err = ((*r_extent_hooks)->decommit == NULL ||
1856*8e33eff8Schristos 	    (*r_extent_hooks)->decommit(*r_extent_hooks,
1857*8e33eff8Schristos 	    extent_base_get(extent), extent_size_get(extent), offset, length,
1858*8e33eff8Schristos 	    arena_ind_get(arena)));
1859*8e33eff8Schristos 	if (*r_extent_hooks != &extent_hooks_default) {
1860*8e33eff8Schristos 		extent_hook_post_reentrancy(tsdn);
1861*8e33eff8Schristos 	}
1862*8e33eff8Schristos 	extent_committed_set(extent, extent_committed_get(extent) && err);
1863*8e33eff8Schristos 	return err;
1864*8e33eff8Schristos }
1865*8e33eff8Schristos 
1866*8e33eff8Schristos #ifdef PAGES_CAN_PURGE_LAZY
1867*8e33eff8Schristos static bool
1868*8e33eff8Schristos extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1869*8e33eff8Schristos     size_t offset, size_t length, unsigned arena_ind) {
1870*8e33eff8Schristos 	assert(addr != NULL);
1871*8e33eff8Schristos 	assert((offset & PAGE_MASK) == 0);
1872*8e33eff8Schristos 	assert(length != 0);
1873*8e33eff8Schristos 	assert((length & PAGE_MASK) == 0);
1874*8e33eff8Schristos 
1875*8e33eff8Schristos 	return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
1876*8e33eff8Schristos 	    length);
1877*8e33eff8Schristos }
1878*8e33eff8Schristos #endif
1879*8e33eff8Schristos 
1880*8e33eff8Schristos static bool
1881*8e33eff8Schristos extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
1882*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1883*8e33eff8Schristos     size_t length, bool growing_retained) {
1884*8e33eff8Schristos 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1885*8e33eff8Schristos 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1886*8e33eff8Schristos 
1887*8e33eff8Schristos 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1888*8e33eff8Schristos 
1889*8e33eff8Schristos 	if ((*r_extent_hooks)->purge_lazy == NULL) {
1890*8e33eff8Schristos 		return true;
1891*8e33eff8Schristos 	}
1892*8e33eff8Schristos 	if (*r_extent_hooks != &extent_hooks_default) {
1893*8e33eff8Schristos 		extent_hook_pre_reentrancy(tsdn, arena);
1894*8e33eff8Schristos 	}
1895*8e33eff8Schristos 	bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1896*8e33eff8Schristos 	    extent_base_get(extent), extent_size_get(extent), offset, length,
1897*8e33eff8Schristos 	    arena_ind_get(arena));
1898*8e33eff8Schristos 	if (*r_extent_hooks != &extent_hooks_default) {
1899*8e33eff8Schristos 		extent_hook_post_reentrancy(tsdn);
1900*8e33eff8Schristos 	}
1901*8e33eff8Schristos 
1902*8e33eff8Schristos 	return err;
1903*8e33eff8Schristos }
1904*8e33eff8Schristos 
1905*8e33eff8Schristos bool
1906*8e33eff8Schristos extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
1907*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1908*8e33eff8Schristos     size_t length) {
1909*8e33eff8Schristos 	return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
1910*8e33eff8Schristos 	    offset, length, false);
1911*8e33eff8Schristos }
1912*8e33eff8Schristos 
1913*8e33eff8Schristos #ifdef PAGES_CAN_PURGE_FORCED
1914*8e33eff8Schristos static bool
1915*8e33eff8Schristos extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
1916*8e33eff8Schristos     size_t size, size_t offset, size_t length, unsigned arena_ind) {
1917*8e33eff8Schristos 	assert(addr != NULL);
1918*8e33eff8Schristos 	assert((offset & PAGE_MASK) == 0);
1919*8e33eff8Schristos 	assert(length != 0);
1920*8e33eff8Schristos 	assert((length & PAGE_MASK) == 0);
1921*8e33eff8Schristos 
1922*8e33eff8Schristos 	return pages_purge_forced((void *)((uintptr_t)addr +
1923*8e33eff8Schristos 	    (uintptr_t)offset), length);
1924*8e33eff8Schristos }
1925*8e33eff8Schristos #endif
1926*8e33eff8Schristos 
1927*8e33eff8Schristos static bool
1928*8e33eff8Schristos extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
1929*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1930*8e33eff8Schristos     size_t length, bool growing_retained) {
1931*8e33eff8Schristos 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1932*8e33eff8Schristos 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1933*8e33eff8Schristos 
1934*8e33eff8Schristos 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1935*8e33eff8Schristos 
1936*8e33eff8Schristos 	if ((*r_extent_hooks)->purge_forced == NULL) {
1937*8e33eff8Schristos 		return true;
1938*8e33eff8Schristos 	}
1939*8e33eff8Schristos 	if (*r_extent_hooks != &extent_hooks_default) {
1940*8e33eff8Schristos 		extent_hook_pre_reentrancy(tsdn, arena);
1941*8e33eff8Schristos 	}
1942*8e33eff8Schristos 	bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
1943*8e33eff8Schristos 	    extent_base_get(extent), extent_size_get(extent), offset, length,
1944*8e33eff8Schristos 	    arena_ind_get(arena));
1945*8e33eff8Schristos 	if (*r_extent_hooks != &extent_hooks_default) {
1946*8e33eff8Schristos 		extent_hook_post_reentrancy(tsdn);
1947*8e33eff8Schristos 	}
1948*8e33eff8Schristos 	return err;
1949*8e33eff8Schristos }
1950*8e33eff8Schristos 
1951*8e33eff8Schristos bool
1952*8e33eff8Schristos extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
1953*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1954*8e33eff8Schristos     size_t length) {
1955*8e33eff8Schristos 	return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
1956*8e33eff8Schristos 	    offset, length, false);
1957*8e33eff8Schristos }
1958*8e33eff8Schristos 
1959*8e33eff8Schristos #ifdef JEMALLOC_MAPS_COALESCE
1960*8e33eff8Schristos static bool
1961*8e33eff8Schristos extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1962*8e33eff8Schristos     size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
1963*8e33eff8Schristos 	return !maps_coalesce;
1964*8e33eff8Schristos }
1965*8e33eff8Schristos #endif
1966*8e33eff8Schristos 
1967*8e33eff8Schristos /*
1968*8e33eff8Schristos  * Accepts the extent to split, and the characteristics of each side of the
1969*8e33eff8Schristos  * split.  The 'a' parameters go with the 'lead' of the resulting pair of
1970*8e33eff8Schristos  * extents (the lower addressed portion of the split), and the 'b' parameters go
1971*8e33eff8Schristos  * with the trail (the higher addressed portion).  This makes 'extent' the lead,
1972*8e33eff8Schristos  * and returns the trail (except in case of error).
1973*8e33eff8Schristos  */
1974*8e33eff8Schristos static extent_t *
1975*8e33eff8Schristos extent_split_impl(tsdn_t *tsdn, arena_t *arena,
1976*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
1977*8e33eff8Schristos     szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
1978*8e33eff8Schristos     bool growing_retained) {
1979*8e33eff8Schristos 	assert(extent_size_get(extent) == size_a + size_b);
1980*8e33eff8Schristos 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1981*8e33eff8Schristos 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1982*8e33eff8Schristos 
1983*8e33eff8Schristos 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1984*8e33eff8Schristos 
1985*8e33eff8Schristos 	if ((*r_extent_hooks)->split == NULL) {
1986*8e33eff8Schristos 		return NULL;
1987*8e33eff8Schristos 	}
1988*8e33eff8Schristos 
1989*8e33eff8Schristos 	extent_t *trail = extent_alloc(tsdn, arena);
1990*8e33eff8Schristos 	if (trail == NULL) {
1991*8e33eff8Schristos 		goto label_error_a;
1992*8e33eff8Schristos 	}
1993*8e33eff8Schristos 
1994*8e33eff8Schristos 	extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
1995*8e33eff8Schristos 	    size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
1996*8e33eff8Schristos 	    extent_state_get(extent), extent_zeroed_get(extent),
1997*8e33eff8Schristos 	    extent_committed_get(extent), extent_dumpable_get(extent));
1998*8e33eff8Schristos 
1999*8e33eff8Schristos 	rtree_ctx_t rtree_ctx_fallback;
2000*8e33eff8Schristos 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2001*8e33eff8Schristos 	rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
2002*8e33eff8Schristos 	{
2003*8e33eff8Schristos 		extent_t lead;
2004*8e33eff8Schristos 
2005*8e33eff8Schristos 		extent_init(&lead, arena, extent_addr_get(extent), size_a,
2006*8e33eff8Schristos 		    slab_a, szind_a, extent_sn_get(extent),
2007*8e33eff8Schristos 		    extent_state_get(extent), extent_zeroed_get(extent),
2008*8e33eff8Schristos 		    extent_committed_get(extent), extent_dumpable_get(extent));
2009*8e33eff8Schristos 
2010*8e33eff8Schristos 		extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
2011*8e33eff8Schristos 		    true, &lead_elm_a, &lead_elm_b);
2012*8e33eff8Schristos 	}
2013*8e33eff8Schristos 	rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
2014*8e33eff8Schristos 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
2015*8e33eff8Schristos 	    &trail_elm_a, &trail_elm_b);
2016*8e33eff8Schristos 
2017*8e33eff8Schristos 	if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
2018*8e33eff8Schristos 	    || trail_elm_b == NULL) {
2019*8e33eff8Schristos 		goto label_error_b;
2020*8e33eff8Schristos 	}
2021*8e33eff8Schristos 
2022*8e33eff8Schristos 	extent_lock2(tsdn, extent, trail);
2023*8e33eff8Schristos 
2024*8e33eff8Schristos 	if (*r_extent_hooks != &extent_hooks_default) {
2025*8e33eff8Schristos 		extent_hook_pre_reentrancy(tsdn, arena);
2026*8e33eff8Schristos 	}
2027*8e33eff8Schristos 	bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
2028*8e33eff8Schristos 	    size_a + size_b, size_a, size_b, extent_committed_get(extent),
2029*8e33eff8Schristos 	    arena_ind_get(arena));
2030*8e33eff8Schristos 	if (*r_extent_hooks != &extent_hooks_default) {
2031*8e33eff8Schristos 		extent_hook_post_reentrancy(tsdn);
2032*8e33eff8Schristos 	}
2033*8e33eff8Schristos 	if (err) {
2034*8e33eff8Schristos 		goto label_error_c;
2035*8e33eff8Schristos 	}
2036*8e33eff8Schristos 
2037*8e33eff8Schristos 	extent_size_set(extent, size_a);
2038*8e33eff8Schristos 	extent_szind_set(extent, szind_a);
2039*8e33eff8Schristos 
2040*8e33eff8Schristos 	extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
2041*8e33eff8Schristos 	    szind_a, slab_a);
2042*8e33eff8Schristos 	extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
2043*8e33eff8Schristos 	    szind_b, slab_b);
2044*8e33eff8Schristos 
2045*8e33eff8Schristos 	extent_unlock2(tsdn, extent, trail);
2046*8e33eff8Schristos 
2047*8e33eff8Schristos 	return trail;
2048*8e33eff8Schristos label_error_c:
2049*8e33eff8Schristos 	extent_unlock2(tsdn, extent, trail);
2050*8e33eff8Schristos label_error_b:
2051*8e33eff8Schristos 	extent_dalloc(tsdn, arena, trail);
2052*8e33eff8Schristos label_error_a:
2053*8e33eff8Schristos 	return NULL;
2054*8e33eff8Schristos }
2055*8e33eff8Schristos 
2056*8e33eff8Schristos extent_t *
2057*8e33eff8Schristos extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
2058*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
2059*8e33eff8Schristos     szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
2060*8e33eff8Schristos 	return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
2061*8e33eff8Schristos 	    szind_a, slab_a, size_b, szind_b, slab_b, false);
2062*8e33eff8Schristos }
2063*8e33eff8Schristos 
2064*8e33eff8Schristos static bool
2065*8e33eff8Schristos extent_merge_default_impl(void *addr_a, void *addr_b) {
2066*8e33eff8Schristos 	if (!maps_coalesce) {
2067*8e33eff8Schristos 		return true;
2068*8e33eff8Schristos 	}
2069*8e33eff8Schristos 	if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
2070*8e33eff8Schristos 		return true;
2071*8e33eff8Schristos 	}
2072*8e33eff8Schristos 
2073*8e33eff8Schristos 	return false;
2074*8e33eff8Schristos }
2075*8e33eff8Schristos 
2076*8e33eff8Schristos #ifdef JEMALLOC_MAPS_COALESCE
2077*8e33eff8Schristos static bool
2078*8e33eff8Schristos extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
2079*8e33eff8Schristos     void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
2080*8e33eff8Schristos 	return extent_merge_default_impl(addr_a, addr_b);
2081*8e33eff8Schristos }
2082*8e33eff8Schristos #endif
2083*8e33eff8Schristos 
2084*8e33eff8Schristos static bool
2085*8e33eff8Schristos extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
2086*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
2087*8e33eff8Schristos     bool growing_retained) {
2088*8e33eff8Schristos 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2089*8e33eff8Schristos 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2090*8e33eff8Schristos 
2091*8e33eff8Schristos 	extent_hooks_assure_initialized(arena, r_extent_hooks);
2092*8e33eff8Schristos 
2093*8e33eff8Schristos 	if ((*r_extent_hooks)->merge == NULL) {
2094*8e33eff8Schristos 		return true;
2095*8e33eff8Schristos 	}
2096*8e33eff8Schristos 
2097*8e33eff8Schristos 	bool err;
2098*8e33eff8Schristos 	if (*r_extent_hooks == &extent_hooks_default) {
2099*8e33eff8Schristos 		/* Call directly to propagate tsdn. */
2100*8e33eff8Schristos 		err = extent_merge_default_impl(extent_base_get(a),
2101*8e33eff8Schristos 		    extent_base_get(b));
2102*8e33eff8Schristos 	} else {
2103*8e33eff8Schristos 		extent_hook_pre_reentrancy(tsdn, arena);
2104*8e33eff8Schristos 		err = (*r_extent_hooks)->merge(*r_extent_hooks,
2105*8e33eff8Schristos 		    extent_base_get(a), extent_size_get(a), extent_base_get(b),
2106*8e33eff8Schristos 		    extent_size_get(b), extent_committed_get(a),
2107*8e33eff8Schristos 		    arena_ind_get(arena));
2108*8e33eff8Schristos 		extent_hook_post_reentrancy(tsdn);
2109*8e33eff8Schristos 	}
2110*8e33eff8Schristos 
2111*8e33eff8Schristos 	if (err) {
2112*8e33eff8Schristos 		return true;
2113*8e33eff8Schristos 	}
2114*8e33eff8Schristos 
2115*8e33eff8Schristos 	/*
2116*8e33eff8Schristos 	 * The rtree writes must happen while all the relevant elements are
2117*8e33eff8Schristos 	 * owned, so the following code uses decomposed helper functions rather
2118*8e33eff8Schristos 	 * than extent_{,de}register() to do things in the right order.
2119*8e33eff8Schristos 	 */
2120*8e33eff8Schristos 	rtree_ctx_t rtree_ctx_fallback;
2121*8e33eff8Schristos 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2122*8e33eff8Schristos 	rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
2123*8e33eff8Schristos 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
2124*8e33eff8Schristos 	    &a_elm_b);
2125*8e33eff8Schristos 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
2126*8e33eff8Schristos 	    &b_elm_b);
2127*8e33eff8Schristos 
2128*8e33eff8Schristos 	extent_lock2(tsdn, a, b);
2129*8e33eff8Schristos 
2130*8e33eff8Schristos 	if (a_elm_b != NULL) {
2131*8e33eff8Schristos 		rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
2132*8e33eff8Schristos 		    NSIZES, false);
2133*8e33eff8Schristos 	}
2134*8e33eff8Schristos 	if (b_elm_b != NULL) {
2135*8e33eff8Schristos 		rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
2136*8e33eff8Schristos 		    NSIZES, false);
2137*8e33eff8Schristos 	} else {
2138*8e33eff8Schristos 		b_elm_b = b_elm_a;
2139*8e33eff8Schristos 	}
2140*8e33eff8Schristos 
2141*8e33eff8Schristos 	extent_size_set(a, extent_size_get(a) + extent_size_get(b));
2142*8e33eff8Schristos 	extent_szind_set(a, NSIZES);
2143*8e33eff8Schristos 	extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
2144*8e33eff8Schristos 	    extent_sn_get(a) : extent_sn_get(b));
2145*8e33eff8Schristos 	extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
2146*8e33eff8Schristos 
2147*8e33eff8Schristos 	extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false);
2148*8e33eff8Schristos 
2149*8e33eff8Schristos 	extent_unlock2(tsdn, a, b);
2150*8e33eff8Schristos 
2151*8e33eff8Schristos 	extent_dalloc(tsdn, extent_arena_get(b), b);
2152*8e33eff8Schristos 
2153*8e33eff8Schristos 	return false;
2154*8e33eff8Schristos }
2155*8e33eff8Schristos 
2156*8e33eff8Schristos bool
2157*8e33eff8Schristos extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
2158*8e33eff8Schristos     extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
2159*8e33eff8Schristos 	return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
2160*8e33eff8Schristos }
2161*8e33eff8Schristos 
2162*8e33eff8Schristos bool
2163*8e33eff8Schristos extent_boot(void) {
2164*8e33eff8Schristos 	if (rtree_new(&extents_rtree, true)) {
2165*8e33eff8Schristos 		return true;
2166*8e33eff8Schristos 	}
2167*8e33eff8Schristos 
2168*8e33eff8Schristos 	if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
2169*8e33eff8Schristos 	    WITNESS_RANK_EXTENT_POOL)) {
2170*8e33eff8Schristos 		return true;
2171*8e33eff8Schristos 	}
2172*8e33eff8Schristos 
2173*8e33eff8Schristos 	if (have_dss) {
2174*8e33eff8Schristos 		extent_dss_boot();
2175*8e33eff8Schristos 	}
2176*8e33eff8Schristos 
2177*8e33eff8Schristos 	return false;
2178*8e33eff8Schristos }
2179