xref: /netbsd-src/external/bsd/jemalloc.old/dist/src/base.c (revision 8e33eff89e26cf71871ead62f0d5063e1313c33a)
1*8e33eff8Schristos #define JEMALLOC_BASE_C_
2*8e33eff8Schristos #include "jemalloc/internal/jemalloc_preamble.h"
3*8e33eff8Schristos #include "jemalloc/internal/jemalloc_internal_includes.h"
4*8e33eff8Schristos 
5*8e33eff8Schristos #include "jemalloc/internal/assert.h"
6*8e33eff8Schristos #include "jemalloc/internal/extent_mmap.h"
7*8e33eff8Schristos #include "jemalloc/internal/mutex.h"
8*8e33eff8Schristos #include "jemalloc/internal/sz.h"
9*8e33eff8Schristos 
10*8e33eff8Schristos /******************************************************************************/
11*8e33eff8Schristos /* Data. */
12*8e33eff8Schristos 
13*8e33eff8Schristos static base_t *b0;
14*8e33eff8Schristos 
15*8e33eff8Schristos metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT;
16*8e33eff8Schristos 
17*8e33eff8Schristos const char *metadata_thp_mode_names[] = {
18*8e33eff8Schristos 	"disabled",
19*8e33eff8Schristos 	"auto",
20*8e33eff8Schristos 	"always"
21*8e33eff8Schristos };
22*8e33eff8Schristos 
23*8e33eff8Schristos /******************************************************************************/
24*8e33eff8Schristos 
25*8e33eff8Schristos static inline bool
26*8e33eff8Schristos metadata_thp_madvise(void) {
27*8e33eff8Schristos 	return (metadata_thp_enabled() &&
28*8e33eff8Schristos 	    (init_system_thp_mode == thp_mode_default));
29*8e33eff8Schristos }
30*8e33eff8Schristos 
31*8e33eff8Schristos static void *
32*8e33eff8Schristos base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
33*8e33eff8Schristos 	void *addr;
34*8e33eff8Schristos 	bool zero = true;
35*8e33eff8Schristos 	bool commit = true;
36*8e33eff8Schristos 
37*8e33eff8Schristos 	/* Use huge page sizes and alignment regardless of opt_metadata_thp. */
38*8e33eff8Schristos 	assert(size == HUGEPAGE_CEILING(size));
39*8e33eff8Schristos 	size_t alignment = HUGEPAGE;
40*8e33eff8Schristos 	if (extent_hooks == &extent_hooks_default) {
41*8e33eff8Schristos 		addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit);
42*8e33eff8Schristos 	} else {
43*8e33eff8Schristos 		/* No arena context as we are creating new arenas. */
44*8e33eff8Schristos 		tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
45*8e33eff8Schristos 		pre_reentrancy(tsd, NULL);
46*8e33eff8Schristos 		addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment,
47*8e33eff8Schristos 		    &zero, &commit, ind);
48*8e33eff8Schristos 		post_reentrancy(tsd);
49*8e33eff8Schristos 	}
50*8e33eff8Schristos 
51*8e33eff8Schristos 	return addr;
52*8e33eff8Schristos }
53*8e33eff8Schristos 
54*8e33eff8Schristos static void
55*8e33eff8Schristos base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
56*8e33eff8Schristos     size_t size) {
57*8e33eff8Schristos 	/*
58*8e33eff8Schristos 	 * Cascade through dalloc, decommit, purge_forced, and purge_lazy,
59*8e33eff8Schristos 	 * stopping at first success.  This cascade is performed for consistency
60*8e33eff8Schristos 	 * with the cascade in extent_dalloc_wrapper() because an application's
61*8e33eff8Schristos 	 * custom hooks may not support e.g. dalloc.  This function is only ever
62*8e33eff8Schristos 	 * called as a side effect of arena destruction, so although it might
63*8e33eff8Schristos 	 * seem pointless to do anything besides dalloc here, the application
64*8e33eff8Schristos 	 * may in fact want the end state of all associated virtual memory to be
65*8e33eff8Schristos 	 * in some consistent-but-allocated state.
66*8e33eff8Schristos 	 */
67*8e33eff8Schristos 	if (extent_hooks == &extent_hooks_default) {
68*8e33eff8Schristos 		if (!extent_dalloc_mmap(addr, size)) {
69*8e33eff8Schristos 			goto label_done;
70*8e33eff8Schristos 		}
71*8e33eff8Schristos 		if (!pages_decommit(addr, size)) {
72*8e33eff8Schristos 			goto label_done;
73*8e33eff8Schristos 		}
74*8e33eff8Schristos 		if (!pages_purge_forced(addr, size)) {
75*8e33eff8Schristos 			goto label_done;
76*8e33eff8Schristos 		}
77*8e33eff8Schristos 		if (!pages_purge_lazy(addr, size)) {
78*8e33eff8Schristos 			goto label_done;
79*8e33eff8Schristos 		}
80*8e33eff8Schristos 		/* Nothing worked.  This should never happen. */
81*8e33eff8Schristos 		not_reached();
82*8e33eff8Schristos 	} else {
83*8e33eff8Schristos 		tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
84*8e33eff8Schristos 		pre_reentrancy(tsd, NULL);
85*8e33eff8Schristos 		if (extent_hooks->dalloc != NULL &&
86*8e33eff8Schristos 		    !extent_hooks->dalloc(extent_hooks, addr, size, true,
87*8e33eff8Schristos 		    ind)) {
88*8e33eff8Schristos 			goto label_post_reentrancy;
89*8e33eff8Schristos 		}
90*8e33eff8Schristos 		if (extent_hooks->decommit != NULL &&
91*8e33eff8Schristos 		    !extent_hooks->decommit(extent_hooks, addr, size, 0, size,
92*8e33eff8Schristos 		    ind)) {
93*8e33eff8Schristos 			goto label_post_reentrancy;
94*8e33eff8Schristos 		}
95*8e33eff8Schristos 		if (extent_hooks->purge_forced != NULL &&
96*8e33eff8Schristos 		    !extent_hooks->purge_forced(extent_hooks, addr, size, 0,
97*8e33eff8Schristos 		    size, ind)) {
98*8e33eff8Schristos 			goto label_post_reentrancy;
99*8e33eff8Schristos 		}
100*8e33eff8Schristos 		if (extent_hooks->purge_lazy != NULL &&
101*8e33eff8Schristos 		    !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
102*8e33eff8Schristos 		    ind)) {
103*8e33eff8Schristos 			goto label_post_reentrancy;
104*8e33eff8Schristos 		}
105*8e33eff8Schristos 		/* Nothing worked.  That's the application's problem. */
106*8e33eff8Schristos 	label_post_reentrancy:
107*8e33eff8Schristos 		post_reentrancy(tsd);
108*8e33eff8Schristos 	}
109*8e33eff8Schristos label_done:
110*8e33eff8Schristos 	if (metadata_thp_madvise()) {
111*8e33eff8Schristos 		/* Set NOHUGEPAGE after unmap to avoid kernel defrag. */
112*8e33eff8Schristos 		assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
113*8e33eff8Schristos 		    (size & HUGEPAGE_MASK) == 0);
114*8e33eff8Schristos 		pages_nohuge(addr, size);
115*8e33eff8Schristos 	}
116*8e33eff8Schristos }
117*8e33eff8Schristos 
118*8e33eff8Schristos static void
119*8e33eff8Schristos base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
120*8e33eff8Schristos     size_t size) {
121*8e33eff8Schristos 	size_t sn;
122*8e33eff8Schristos 
123*8e33eff8Schristos 	sn = *extent_sn_next;
124*8e33eff8Schristos 	(*extent_sn_next)++;
125*8e33eff8Schristos 
126*8e33eff8Schristos 	extent_binit(extent, addr, size, sn);
127*8e33eff8Schristos }
128*8e33eff8Schristos 
129*8e33eff8Schristos static size_t
130*8e33eff8Schristos base_get_num_blocks(base_t *base, bool with_new_block) {
131*8e33eff8Schristos 	base_block_t *b = base->blocks;
132*8e33eff8Schristos 	assert(b != NULL);
133*8e33eff8Schristos 
134*8e33eff8Schristos 	size_t n_blocks = with_new_block ? 2 : 1;
135*8e33eff8Schristos 	while (b->next != NULL) {
136*8e33eff8Schristos 		n_blocks++;
137*8e33eff8Schristos 		b = b->next;
138*8e33eff8Schristos 	}
139*8e33eff8Schristos 
140*8e33eff8Schristos 	return n_blocks;
141*8e33eff8Schristos }
142*8e33eff8Schristos 
143*8e33eff8Schristos static void
144*8e33eff8Schristos base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
145*8e33eff8Schristos 	assert(opt_metadata_thp == metadata_thp_auto);
146*8e33eff8Schristos 	malloc_mutex_assert_owner(tsdn, &base->mtx);
147*8e33eff8Schristos 	if (base->auto_thp_switched) {
148*8e33eff8Schristos 		return;
149*8e33eff8Schristos 	}
150*8e33eff8Schristos 	/* Called when adding a new block. */
151*8e33eff8Schristos 	bool should_switch;
152*8e33eff8Schristos 	if (base_ind_get(base) != 0) {
153*8e33eff8Schristos 		should_switch = (base_get_num_blocks(base, true) ==
154*8e33eff8Schristos 		    BASE_AUTO_THP_THRESHOLD);
155*8e33eff8Schristos 	} else {
156*8e33eff8Schristos 		should_switch = (base_get_num_blocks(base, true) ==
157*8e33eff8Schristos 		    BASE_AUTO_THP_THRESHOLD_A0);
158*8e33eff8Schristos 	}
159*8e33eff8Schristos 	if (!should_switch) {
160*8e33eff8Schristos 		return;
161*8e33eff8Schristos 	}
162*8e33eff8Schristos 
163*8e33eff8Schristos 	base->auto_thp_switched = true;
164*8e33eff8Schristos 	assert(!config_stats || base->n_thp == 0);
165*8e33eff8Schristos 	/* Make the initial blocks THP lazily. */
166*8e33eff8Schristos 	base_block_t *block = base->blocks;
167*8e33eff8Schristos 	while (block != NULL) {
168*8e33eff8Schristos 		assert((block->size & HUGEPAGE_MASK) == 0);
169*8e33eff8Schristos 		pages_huge(block, block->size);
170*8e33eff8Schristos 		if (config_stats) {
171*8e33eff8Schristos 			base->n_thp += HUGEPAGE_CEILING(block->size -
172*8e33eff8Schristos 			    extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
173*8e33eff8Schristos 		}
174*8e33eff8Schristos 		block = block->next;
175*8e33eff8Schristos 		assert(block == NULL || (base_ind_get(base) == 0));
176*8e33eff8Schristos 	}
177*8e33eff8Schristos }
178*8e33eff8Schristos 
179*8e33eff8Schristos static void *
180*8e33eff8Schristos base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
181*8e33eff8Schristos     size_t alignment) {
182*8e33eff8Schristos 	void *ret;
183*8e33eff8Schristos 
184*8e33eff8Schristos 	assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
185*8e33eff8Schristos 	assert(size == ALIGNMENT_CEILING(size, alignment));
186*8e33eff8Schristos 
187*8e33eff8Schristos 	*gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
188*8e33eff8Schristos 	    alignment) - (uintptr_t)extent_addr_get(extent);
189*8e33eff8Schristos 	ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
190*8e33eff8Schristos 	assert(extent_bsize_get(extent) >= *gap_size + size);
191*8e33eff8Schristos 	extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
192*8e33eff8Schristos 	    *gap_size + size), extent_bsize_get(extent) - *gap_size - size,
193*8e33eff8Schristos 	    extent_sn_get(extent));
194*8e33eff8Schristos 	return ret;
195*8e33eff8Schristos }
196*8e33eff8Schristos 
197*8e33eff8Schristos static void
198*8e33eff8Schristos base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
199*8e33eff8Schristos     void *addr, size_t size) {
200*8e33eff8Schristos 	if (extent_bsize_get(extent) > 0) {
201*8e33eff8Schristos 		/*
202*8e33eff8Schristos 		 * Compute the index for the largest size class that does not
203*8e33eff8Schristos 		 * exceed extent's size.
204*8e33eff8Schristos 		 */
205*8e33eff8Schristos 		szind_t index_floor =
206*8e33eff8Schristos 		    sz_size2index(extent_bsize_get(extent) + 1) - 1;
207*8e33eff8Schristos 		extent_heap_insert(&base->avail[index_floor], extent);
208*8e33eff8Schristos 	}
209*8e33eff8Schristos 
210*8e33eff8Schristos 	if (config_stats) {
211*8e33eff8Schristos 		base->allocated += size;
212*8e33eff8Schristos 		/*
213*8e33eff8Schristos 		 * Add one PAGE to base_resident for every page boundary that is
214*8e33eff8Schristos 		 * crossed by the new allocation. Adjust n_thp similarly when
215*8e33eff8Schristos 		 * metadata_thp is enabled.
216*8e33eff8Schristos 		 */
217*8e33eff8Schristos 		base->resident += PAGE_CEILING((uintptr_t)addr + size) -
218*8e33eff8Schristos 		    PAGE_CEILING((uintptr_t)addr - gap_size);
219*8e33eff8Schristos 		assert(base->allocated <= base->resident);
220*8e33eff8Schristos 		assert(base->resident <= base->mapped);
221*8e33eff8Schristos 		if (metadata_thp_madvise() && (opt_metadata_thp ==
222*8e33eff8Schristos 		    metadata_thp_always || base->auto_thp_switched)) {
223*8e33eff8Schristos 			base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size)
224*8e33eff8Schristos 			    - HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >>
225*8e33eff8Schristos 			    LG_HUGEPAGE;
226*8e33eff8Schristos 			assert(base->mapped >= base->n_thp << LG_HUGEPAGE);
227*8e33eff8Schristos 		}
228*8e33eff8Schristos 	}
229*8e33eff8Schristos }
230*8e33eff8Schristos 
231*8e33eff8Schristos static void *
232*8e33eff8Schristos base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
233*8e33eff8Schristos     size_t alignment) {
234*8e33eff8Schristos 	void *ret;
235*8e33eff8Schristos 	size_t gap_size;
236*8e33eff8Schristos 
237*8e33eff8Schristos 	ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
238*8e33eff8Schristos 	base_extent_bump_alloc_post(base, extent, gap_size, ret, size);
239*8e33eff8Schristos 	return ret;
240*8e33eff8Schristos }
241*8e33eff8Schristos 
242*8e33eff8Schristos /*
243*8e33eff8Schristos  * Allocate a block of virtual memory that is large enough to start with a
244*8e33eff8Schristos  * base_block_t header, followed by an object of specified size and alignment.
245*8e33eff8Schristos  * On success a pointer to the initialized base_block_t header is returned.
246*8e33eff8Schristos  */
247*8e33eff8Schristos static base_block_t *
248*8e33eff8Schristos base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
249*8e33eff8Schristos     unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size,
250*8e33eff8Schristos     size_t alignment) {
251*8e33eff8Schristos 	alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
252*8e33eff8Schristos 	size_t usize = ALIGNMENT_CEILING(size, alignment);
253*8e33eff8Schristos 	size_t header_size = sizeof(base_block_t);
254*8e33eff8Schristos 	size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) -
255*8e33eff8Schristos 	    header_size;
256*8e33eff8Schristos 	/*
257*8e33eff8Schristos 	 * Create increasingly larger blocks in order to limit the total number
258*8e33eff8Schristos 	 * of disjoint virtual memory ranges.  Choose the next size in the page
259*8e33eff8Schristos 	 * size class series (skipping size classes that are not a multiple of
260*8e33eff8Schristos 	 * HUGEPAGE), or a size large enough to satisfy the requested size and
261*8e33eff8Schristos 	 * alignment, whichever is larger.
262*8e33eff8Schristos 	 */
263*8e33eff8Schristos 	size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
264*8e33eff8Schristos 	    + usize));
265*8e33eff8Schristos 	pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 :
266*8e33eff8Schristos 	    *pind_last;
267*8e33eff8Schristos 	size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
268*8e33eff8Schristos 	size_t block_size = (min_block_size > next_block_size) ? min_block_size
269*8e33eff8Schristos 	    : next_block_size;
270*8e33eff8Schristos 	base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind,
271*8e33eff8Schristos 	    block_size);
272*8e33eff8Schristos 	if (block == NULL) {
273*8e33eff8Schristos 		return NULL;
274*8e33eff8Schristos 	}
275*8e33eff8Schristos 
276*8e33eff8Schristos 	if (metadata_thp_madvise()) {
277*8e33eff8Schristos 		void *addr = (void *)block;
278*8e33eff8Schristos 		assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
279*8e33eff8Schristos 		    (block_size & HUGEPAGE_MASK) == 0);
280*8e33eff8Schristos 		if (opt_metadata_thp == metadata_thp_always) {
281*8e33eff8Schristos 			pages_huge(addr, block_size);
282*8e33eff8Schristos 		} else if (opt_metadata_thp == metadata_thp_auto &&
283*8e33eff8Schristos 		    base != NULL) {
284*8e33eff8Schristos 			/* base != NULL indicates this is not a new base. */
285*8e33eff8Schristos 			malloc_mutex_lock(tsdn, &base->mtx);
286*8e33eff8Schristos 			base_auto_thp_switch(tsdn, base);
287*8e33eff8Schristos 			if (base->auto_thp_switched) {
288*8e33eff8Schristos 				pages_huge(addr, block_size);
289*8e33eff8Schristos 			}
290*8e33eff8Schristos 			malloc_mutex_unlock(tsdn, &base->mtx);
291*8e33eff8Schristos 		}
292*8e33eff8Schristos 	}
293*8e33eff8Schristos 
294*8e33eff8Schristos 	*pind_last = sz_psz2ind(block_size);
295*8e33eff8Schristos 	block->size = block_size;
296*8e33eff8Schristos 	block->next = NULL;
297*8e33eff8Schristos 	assert(block_size >= header_size);
298*8e33eff8Schristos 	base_extent_init(extent_sn_next, &block->extent,
299*8e33eff8Schristos 	    (void *)((uintptr_t)block + header_size), block_size - header_size);
300*8e33eff8Schristos 	return block;
301*8e33eff8Schristos }
302*8e33eff8Schristos 
303*8e33eff8Schristos /*
304*8e33eff8Schristos  * Allocate an extent that is at least as large as specified size, with
305*8e33eff8Schristos  * specified alignment.
306*8e33eff8Schristos  */
307*8e33eff8Schristos static extent_t *
308*8e33eff8Schristos base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
309*8e33eff8Schristos 	malloc_mutex_assert_owner(tsdn, &base->mtx);
310*8e33eff8Schristos 
311*8e33eff8Schristos 	extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
312*8e33eff8Schristos 	/*
313*8e33eff8Schristos 	 * Drop mutex during base_block_alloc(), because an extent hook will be
314*8e33eff8Schristos 	 * called.
315*8e33eff8Schristos 	 */
316*8e33eff8Schristos 	malloc_mutex_unlock(tsdn, &base->mtx);
317*8e33eff8Schristos 	base_block_t *block = base_block_alloc(tsdn, base, extent_hooks,
318*8e33eff8Schristos 	    base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
319*8e33eff8Schristos 	    alignment);
320*8e33eff8Schristos 	malloc_mutex_lock(tsdn, &base->mtx);
321*8e33eff8Schristos 	if (block == NULL) {
322*8e33eff8Schristos 		return NULL;
323*8e33eff8Schristos 	}
324*8e33eff8Schristos 	block->next = base->blocks;
325*8e33eff8Schristos 	base->blocks = block;
326*8e33eff8Schristos 	if (config_stats) {
327*8e33eff8Schristos 		base->allocated += sizeof(base_block_t);
328*8e33eff8Schristos 		base->resident += PAGE_CEILING(sizeof(base_block_t));
329*8e33eff8Schristos 		base->mapped += block->size;
330*8e33eff8Schristos 		if (metadata_thp_madvise() &&
331*8e33eff8Schristos 		    !(opt_metadata_thp == metadata_thp_auto
332*8e33eff8Schristos 		      && !base->auto_thp_switched)) {
333*8e33eff8Schristos 			assert(base->n_thp > 0);
334*8e33eff8Schristos 			base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >>
335*8e33eff8Schristos 			    LG_HUGEPAGE;
336*8e33eff8Schristos 		}
337*8e33eff8Schristos 		assert(base->allocated <= base->resident);
338*8e33eff8Schristos 		assert(base->resident <= base->mapped);
339*8e33eff8Schristos 		assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
340*8e33eff8Schristos 	}
341*8e33eff8Schristos 	return &block->extent;
342*8e33eff8Schristos }
343*8e33eff8Schristos 
344*8e33eff8Schristos base_t *
345*8e33eff8Schristos b0get(void) {
346*8e33eff8Schristos 	return b0;
347*8e33eff8Schristos }
348*8e33eff8Schristos 
349*8e33eff8Schristos base_t *
350*8e33eff8Schristos base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
351*8e33eff8Schristos 	pszind_t pind_last = 0;
352*8e33eff8Schristos 	size_t extent_sn_next = 0;
353*8e33eff8Schristos 	base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind,
354*8e33eff8Schristos 	    &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
355*8e33eff8Schristos 	if (block == NULL) {
356*8e33eff8Schristos 		return NULL;
357*8e33eff8Schristos 	}
358*8e33eff8Schristos 
359*8e33eff8Schristos 	size_t gap_size;
360*8e33eff8Schristos 	size_t base_alignment = CACHELINE;
361*8e33eff8Schristos 	size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
362*8e33eff8Schristos 	base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
363*8e33eff8Schristos 	    &gap_size, base_size, base_alignment);
364*8e33eff8Schristos 	base->ind = ind;
365*8e33eff8Schristos 	atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED);
366*8e33eff8Schristos 	if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
367*8e33eff8Schristos 	    malloc_mutex_rank_exclusive)) {
368*8e33eff8Schristos 		base_unmap(tsdn, extent_hooks, ind, block, block->size);
369*8e33eff8Schristos 		return NULL;
370*8e33eff8Schristos 	}
371*8e33eff8Schristos 	base->pind_last = pind_last;
372*8e33eff8Schristos 	base->extent_sn_next = extent_sn_next;
373*8e33eff8Schristos 	base->blocks = block;
374*8e33eff8Schristos 	base->auto_thp_switched = false;
375*8e33eff8Schristos 	for (szind_t i = 0; i < NSIZES; i++) {
376*8e33eff8Schristos 		extent_heap_new(&base->avail[i]);
377*8e33eff8Schristos 	}
378*8e33eff8Schristos 	if (config_stats) {
379*8e33eff8Schristos 		base->allocated = sizeof(base_block_t);
380*8e33eff8Schristos 		base->resident = PAGE_CEILING(sizeof(base_block_t));
381*8e33eff8Schristos 		base->mapped = block->size;
382*8e33eff8Schristos 		base->n_thp = (opt_metadata_thp == metadata_thp_always) &&
383*8e33eff8Schristos 		    metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t))
384*8e33eff8Schristos 		    >> LG_HUGEPAGE : 0;
385*8e33eff8Schristos 		assert(base->allocated <= base->resident);
386*8e33eff8Schristos 		assert(base->resident <= base->mapped);
387*8e33eff8Schristos 		assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
388*8e33eff8Schristos 	}
389*8e33eff8Schristos 	base_extent_bump_alloc_post(base, &block->extent, gap_size, base,
390*8e33eff8Schristos 	    base_size);
391*8e33eff8Schristos 
392*8e33eff8Schristos 	return base;
393*8e33eff8Schristos }
394*8e33eff8Schristos 
395*8e33eff8Schristos void
396*8e33eff8Schristos base_delete(tsdn_t *tsdn, base_t *base) {
397*8e33eff8Schristos 	extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
398*8e33eff8Schristos 	base_block_t *next = base->blocks;
399*8e33eff8Schristos 	do {
400*8e33eff8Schristos 		base_block_t *block = next;
401*8e33eff8Schristos 		next = block->next;
402*8e33eff8Schristos 		base_unmap(tsdn, extent_hooks, base_ind_get(base), block,
403*8e33eff8Schristos 		    block->size);
404*8e33eff8Schristos 	} while (next != NULL);
405*8e33eff8Schristos }
406*8e33eff8Schristos 
407*8e33eff8Schristos extent_hooks_t *
408*8e33eff8Schristos base_extent_hooks_get(base_t *base) {
409*8e33eff8Schristos 	return (extent_hooks_t *)atomic_load_p(&base->extent_hooks,
410*8e33eff8Schristos 	    ATOMIC_ACQUIRE);
411*8e33eff8Schristos }
412*8e33eff8Schristos 
413*8e33eff8Schristos extent_hooks_t *
414*8e33eff8Schristos base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
415*8e33eff8Schristos 	extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
416*8e33eff8Schristos 	atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE);
417*8e33eff8Schristos 	return old_extent_hooks;
418*8e33eff8Schristos }
419*8e33eff8Schristos 
420*8e33eff8Schristos static void *
421*8e33eff8Schristos base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
422*8e33eff8Schristos     size_t *esn) {
423*8e33eff8Schristos 	alignment = QUANTUM_CEILING(alignment);
424*8e33eff8Schristos 	size_t usize = ALIGNMENT_CEILING(size, alignment);
425*8e33eff8Schristos 	size_t asize = usize + alignment - QUANTUM;
426*8e33eff8Schristos 
427*8e33eff8Schristos 	extent_t *extent = NULL;
428*8e33eff8Schristos 	malloc_mutex_lock(tsdn, &base->mtx);
429*8e33eff8Schristos 	for (szind_t i = sz_size2index(asize); i < NSIZES; i++) {
430*8e33eff8Schristos 		extent = extent_heap_remove_first(&base->avail[i]);
431*8e33eff8Schristos 		if (extent != NULL) {
432*8e33eff8Schristos 			/* Use existing space. */
433*8e33eff8Schristos 			break;
434*8e33eff8Schristos 		}
435*8e33eff8Schristos 	}
436*8e33eff8Schristos 	if (extent == NULL) {
437*8e33eff8Schristos 		/* Try to allocate more space. */
438*8e33eff8Schristos 		extent = base_extent_alloc(tsdn, base, usize, alignment);
439*8e33eff8Schristos 	}
440*8e33eff8Schristos 	void *ret;
441*8e33eff8Schristos 	if (extent == NULL) {
442*8e33eff8Schristos 		ret = NULL;
443*8e33eff8Schristos 		goto label_return;
444*8e33eff8Schristos 	}
445*8e33eff8Schristos 
446*8e33eff8Schristos 	ret = base_extent_bump_alloc(base, extent, usize, alignment);
447*8e33eff8Schristos 	if (esn != NULL) {
448*8e33eff8Schristos 		*esn = extent_sn_get(extent);
449*8e33eff8Schristos 	}
450*8e33eff8Schristos label_return:
451*8e33eff8Schristos 	malloc_mutex_unlock(tsdn, &base->mtx);
452*8e33eff8Schristos 	return ret;
453*8e33eff8Schristos }
454*8e33eff8Schristos 
455*8e33eff8Schristos /*
456*8e33eff8Schristos  * base_alloc() returns zeroed memory, which is always demand-zeroed for the
457*8e33eff8Schristos  * auto arenas, in order to make multi-page sparse data structures such as radix
458*8e33eff8Schristos  * tree nodes efficient with respect to physical memory usage.  Upon success a
459*8e33eff8Schristos  * pointer to at least size bytes with specified alignment is returned.  Note
460*8e33eff8Schristos  * that size is rounded up to the nearest multiple of alignment to avoid false
461*8e33eff8Schristos  * sharing.
462*8e33eff8Schristos  */
463*8e33eff8Schristos void *
464*8e33eff8Schristos base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
465*8e33eff8Schristos 	return base_alloc_impl(tsdn, base, size, alignment, NULL);
466*8e33eff8Schristos }
467*8e33eff8Schristos 
468*8e33eff8Schristos extent_t *
469*8e33eff8Schristos base_alloc_extent(tsdn_t *tsdn, base_t *base) {
470*8e33eff8Schristos 	size_t esn;
471*8e33eff8Schristos 	extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
472*8e33eff8Schristos 	    CACHELINE, &esn);
473*8e33eff8Schristos 	if (extent == NULL) {
474*8e33eff8Schristos 		return NULL;
475*8e33eff8Schristos 	}
476*8e33eff8Schristos 	extent_esn_set(extent, esn);
477*8e33eff8Schristos 	return extent;
478*8e33eff8Schristos }
479*8e33eff8Schristos 
480*8e33eff8Schristos void
481*8e33eff8Schristos base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
482*8e33eff8Schristos     size_t *mapped, size_t *n_thp) {
483*8e33eff8Schristos 	cassert(config_stats);
484*8e33eff8Schristos 
485*8e33eff8Schristos 	malloc_mutex_lock(tsdn, &base->mtx);
486*8e33eff8Schristos 	assert(base->allocated <= base->resident);
487*8e33eff8Schristos 	assert(base->resident <= base->mapped);
488*8e33eff8Schristos 	*allocated = base->allocated;
489*8e33eff8Schristos 	*resident = base->resident;
490*8e33eff8Schristos 	*mapped = base->mapped;
491*8e33eff8Schristos 	*n_thp = base->n_thp;
492*8e33eff8Schristos 	malloc_mutex_unlock(tsdn, &base->mtx);
493*8e33eff8Schristos }
494*8e33eff8Schristos 
495*8e33eff8Schristos void
496*8e33eff8Schristos base_prefork(tsdn_t *tsdn, base_t *base) {
497*8e33eff8Schristos 	malloc_mutex_prefork(tsdn, &base->mtx);
498*8e33eff8Schristos }
499*8e33eff8Schristos 
500*8e33eff8Schristos void
501*8e33eff8Schristos base_postfork_parent(tsdn_t *tsdn, base_t *base) {
502*8e33eff8Schristos 	malloc_mutex_postfork_parent(tsdn, &base->mtx);
503*8e33eff8Schristos }
504*8e33eff8Schristos 
505*8e33eff8Schristos void
506*8e33eff8Schristos base_postfork_child(tsdn_t *tsdn, base_t *base) {
507*8e33eff8Schristos 	malloc_mutex_postfork_child(tsdn, &base->mtx);
508*8e33eff8Schristos }
509*8e33eff8Schristos 
510*8e33eff8Schristos bool
511*8e33eff8Schristos base_boot(tsdn_t *tsdn) {
512*8e33eff8Schristos 	b0 = base_new(tsdn, 0, (extent_hooks_t *)
513*8e33eff8Schristos 	    __UNCONST(&extent_hooks_default));
514*8e33eff8Schristos 	return (b0 == NULL);
515*8e33eff8Schristos }
516