xref: /freebsd-src/contrib/jemalloc/src/base.c (revision c5ad81420c495d1d5de04209b0ec4fcb435c322c)
1a4bd5210SJason Evans #define JEMALLOC_BASE_C_
2b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_preamble.h"
3b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_internal_includes.h"
4b7eaed25SJason Evans 
5b7eaed25SJason Evans #include "jemalloc/internal/assert.h"
6b7eaed25SJason Evans #include "jemalloc/internal/extent_mmap.h"
7b7eaed25SJason Evans #include "jemalloc/internal/mutex.h"
8b7eaed25SJason Evans #include "jemalloc/internal/sz.h"
9a4bd5210SJason Evans 
10a4bd5210SJason Evans /******************************************************************************/
11a4bd5210SJason Evans /* Data. */
12a4bd5210SJason Evans 
13b7eaed25SJason Evans static base_t *b0;
14a4bd5210SJason Evans 
150ef50b4eSJason Evans metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT;
160ef50b4eSJason Evans 
170ef50b4eSJason Evans const char *metadata_thp_mode_names[] = {
180ef50b4eSJason Evans 	"disabled",
190ef50b4eSJason Evans 	"auto",
200ef50b4eSJason Evans 	"always"
210ef50b4eSJason Evans };
220ef50b4eSJason Evans 
23a4bd5210SJason Evans /******************************************************************************/
24a4bd5210SJason Evans 
250ef50b4eSJason Evans static inline bool
metadata_thp_madvise(void)260ef50b4eSJason Evans metadata_thp_madvise(void) {
270ef50b4eSJason Evans 	return (metadata_thp_enabled() &&
280ef50b4eSJason Evans 	    (init_system_thp_mode == thp_mode_default));
290ef50b4eSJason Evans }
300ef50b4eSJason Evans 
31b7eaed25SJason Evans static void *
base_map(tsdn_t * tsdn,extent_hooks_t * extent_hooks,unsigned ind,size_t size)328b2f5aafSJason Evans base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
33d0e79aa3SJason Evans 	void *addr;
34b7eaed25SJason Evans 	bool zero = true;
35b7eaed25SJason Evans 	bool commit = true;
36d0e79aa3SJason Evans 
370ef50b4eSJason Evans 	/* Use huge page sizes and alignment regardless of opt_metadata_thp. */
38b7eaed25SJason Evans 	assert(size == HUGEPAGE_CEILING(size));
390ef50b4eSJason Evans 	size_t alignment = HUGEPAGE;
40b7eaed25SJason Evans 	if (extent_hooks == &extent_hooks_default) {
410ef50b4eSJason Evans 		addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit);
42a4bd5210SJason Evans 	} else {
438b2f5aafSJason Evans 		/* No arena context as we are creating new arenas. */
448b2f5aafSJason Evans 		tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
458b2f5aafSJason Evans 		pre_reentrancy(tsd, NULL);
460ef50b4eSJason Evans 		addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment,
47b7eaed25SJason Evans 		    &zero, &commit, ind);
488b2f5aafSJason Evans 		post_reentrancy(tsd);
49a4bd5210SJason Evans 	}
50a4bd5210SJason Evans 
51b7eaed25SJason Evans 	return addr;
52b7eaed25SJason Evans }
53b7eaed25SJason Evans 
54b7eaed25SJason Evans static void
base_unmap(tsdn_t * tsdn,extent_hooks_t * extent_hooks,unsigned ind,void * addr,size_t size)558b2f5aafSJason Evans base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
56b7eaed25SJason Evans     size_t size) {
57b7eaed25SJason Evans 	/*
58b7eaed25SJason Evans 	 * Cascade through dalloc, decommit, purge_forced, and purge_lazy,
59b7eaed25SJason Evans 	 * stopping at first success.  This cascade is performed for consistency
60b7eaed25SJason Evans 	 * with the cascade in extent_dalloc_wrapper() because an application's
61b7eaed25SJason Evans 	 * custom hooks may not support e.g. dalloc.  This function is only ever
62b7eaed25SJason Evans 	 * called as a side effect of arena destruction, so although it might
63b7eaed25SJason Evans 	 * seem pointless to do anything besides dalloc here, the application
64b7eaed25SJason Evans 	 * may in fact want the end state of all associated virtual memory to be
65b7eaed25SJason Evans 	 * in some consistent-but-allocated state.
66b7eaed25SJason Evans 	 */
67b7eaed25SJason Evans 	if (extent_hooks == &extent_hooks_default) {
68b7eaed25SJason Evans 		if (!extent_dalloc_mmap(addr, size)) {
690ef50b4eSJason Evans 			goto label_done;
70b7eaed25SJason Evans 		}
71b7eaed25SJason Evans 		if (!pages_decommit(addr, size)) {
720ef50b4eSJason Evans 			goto label_done;
73b7eaed25SJason Evans 		}
74b7eaed25SJason Evans 		if (!pages_purge_forced(addr, size)) {
750ef50b4eSJason Evans 			goto label_done;
76b7eaed25SJason Evans 		}
77b7eaed25SJason Evans 		if (!pages_purge_lazy(addr, size)) {
780ef50b4eSJason Evans 			goto label_done;
79b7eaed25SJason Evans 		}
80b7eaed25SJason Evans 		/* Nothing worked.  This should never happen. */
81b7eaed25SJason Evans 		not_reached();
82b7eaed25SJason Evans 	} else {
838b2f5aafSJason Evans 		tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
848b2f5aafSJason Evans 		pre_reentrancy(tsd, NULL);
85b7eaed25SJason Evans 		if (extent_hooks->dalloc != NULL &&
86b7eaed25SJason Evans 		    !extent_hooks->dalloc(extent_hooks, addr, size, true,
87b7eaed25SJason Evans 		    ind)) {
880ef50b4eSJason Evans 			goto label_post_reentrancy;
89b7eaed25SJason Evans 		}
90b7eaed25SJason Evans 		if (extent_hooks->decommit != NULL &&
91b7eaed25SJason Evans 		    !extent_hooks->decommit(extent_hooks, addr, size, 0, size,
92b7eaed25SJason Evans 		    ind)) {
930ef50b4eSJason Evans 			goto label_post_reentrancy;
94b7eaed25SJason Evans 		}
95b7eaed25SJason Evans 		if (extent_hooks->purge_forced != NULL &&
96b7eaed25SJason Evans 		    !extent_hooks->purge_forced(extent_hooks, addr, size, 0,
97b7eaed25SJason Evans 		    size, ind)) {
980ef50b4eSJason Evans 			goto label_post_reentrancy;
99b7eaed25SJason Evans 		}
100b7eaed25SJason Evans 		if (extent_hooks->purge_lazy != NULL &&
101b7eaed25SJason Evans 		    !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
102b7eaed25SJason Evans 		    ind)) {
1030ef50b4eSJason Evans 			goto label_post_reentrancy;
104b7eaed25SJason Evans 		}
105b7eaed25SJason Evans 		/* Nothing worked.  That's the application's problem. */
1060ef50b4eSJason Evans 	label_post_reentrancy:
1078b2f5aafSJason Evans 		post_reentrancy(tsd);
1080ef50b4eSJason Evans 	}
1090ef50b4eSJason Evans label_done:
1100ef50b4eSJason Evans 	if (metadata_thp_madvise()) {
1110ef50b4eSJason Evans 		/* Set NOHUGEPAGE after unmap to avoid kernel defrag. */
1120ef50b4eSJason Evans 		assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
1130ef50b4eSJason Evans 		    (size & HUGEPAGE_MASK) == 0);
1140ef50b4eSJason Evans 		pages_nohuge(addr, size);
115b7eaed25SJason Evans 	}
116b7eaed25SJason Evans }
117b7eaed25SJason Evans 
118b7eaed25SJason Evans static void
base_extent_init(size_t * extent_sn_next,extent_t * extent,void * addr,size_t size)119b7eaed25SJason Evans base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
120b7eaed25SJason Evans     size_t size) {
121b7eaed25SJason Evans 	size_t sn;
122b7eaed25SJason Evans 
123b7eaed25SJason Evans 	sn = *extent_sn_next;
124b7eaed25SJason Evans 	(*extent_sn_next)++;
125b7eaed25SJason Evans 
126b7eaed25SJason Evans 	extent_binit(extent, addr, size, sn);
127b7eaed25SJason Evans }
128b7eaed25SJason Evans 
1290ef50b4eSJason Evans static size_t
base_get_num_blocks(base_t * base,bool with_new_block)1300ef50b4eSJason Evans base_get_num_blocks(base_t *base, bool with_new_block) {
1310ef50b4eSJason Evans 	base_block_t *b = base->blocks;
1320ef50b4eSJason Evans 	assert(b != NULL);
1330ef50b4eSJason Evans 
1340ef50b4eSJason Evans 	size_t n_blocks = with_new_block ? 2 : 1;
1350ef50b4eSJason Evans 	while (b->next != NULL) {
1360ef50b4eSJason Evans 		n_blocks++;
1370ef50b4eSJason Evans 		b = b->next;
1380ef50b4eSJason Evans 	}
1390ef50b4eSJason Evans 
1400ef50b4eSJason Evans 	return n_blocks;
1410ef50b4eSJason Evans }
1420ef50b4eSJason Evans 
1430ef50b4eSJason Evans static void
base_auto_thp_switch(tsdn_t * tsdn,base_t * base)1440ef50b4eSJason Evans base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
1450ef50b4eSJason Evans 	assert(opt_metadata_thp == metadata_thp_auto);
1460ef50b4eSJason Evans 	malloc_mutex_assert_owner(tsdn, &base->mtx);
1470ef50b4eSJason Evans 	if (base->auto_thp_switched) {
1480ef50b4eSJason Evans 		return;
1490ef50b4eSJason Evans 	}
1500ef50b4eSJason Evans 	/* Called when adding a new block. */
1510ef50b4eSJason Evans 	bool should_switch;
1520ef50b4eSJason Evans 	if (base_ind_get(base) != 0) {
1530ef50b4eSJason Evans 		should_switch = (base_get_num_blocks(base, true) ==
1540ef50b4eSJason Evans 		    BASE_AUTO_THP_THRESHOLD);
1550ef50b4eSJason Evans 	} else {
1560ef50b4eSJason Evans 		should_switch = (base_get_num_blocks(base, true) ==
1570ef50b4eSJason Evans 		    BASE_AUTO_THP_THRESHOLD_A0);
1580ef50b4eSJason Evans 	}
1590ef50b4eSJason Evans 	if (!should_switch) {
1600ef50b4eSJason Evans 		return;
1610ef50b4eSJason Evans 	}
1620ef50b4eSJason Evans 
1630ef50b4eSJason Evans 	base->auto_thp_switched = true;
1640ef50b4eSJason Evans 	assert(!config_stats || base->n_thp == 0);
1650ef50b4eSJason Evans 	/* Make the initial blocks THP lazily. */
1660ef50b4eSJason Evans 	base_block_t *block = base->blocks;
1670ef50b4eSJason Evans 	while (block != NULL) {
1680ef50b4eSJason Evans 		assert((block->size & HUGEPAGE_MASK) == 0);
1690ef50b4eSJason Evans 		pages_huge(block, block->size);
1700ef50b4eSJason Evans 		if (config_stats) {
1710ef50b4eSJason Evans 			base->n_thp += HUGEPAGE_CEILING(block->size -
1720ef50b4eSJason Evans 			    extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
1730ef50b4eSJason Evans 		}
1740ef50b4eSJason Evans 		block = block->next;
1750ef50b4eSJason Evans 		assert(block == NULL || (base_ind_get(base) == 0));
1760ef50b4eSJason Evans 	}
1770ef50b4eSJason Evans }
1780ef50b4eSJason Evans 
179b7eaed25SJason Evans static void *
base_extent_bump_alloc_helper(extent_t * extent,size_t * gap_size,size_t size,size_t alignment)180b7eaed25SJason Evans base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
181b7eaed25SJason Evans     size_t alignment) {
182b7eaed25SJason Evans 	void *ret;
183b7eaed25SJason Evans 
184b7eaed25SJason Evans 	assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
185b7eaed25SJason Evans 	assert(size == ALIGNMENT_CEILING(size, alignment));
186b7eaed25SJason Evans 
187b7eaed25SJason Evans 	*gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
188b7eaed25SJason Evans 	    alignment) - (uintptr_t)extent_addr_get(extent);
189b7eaed25SJason Evans 	ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
190b7eaed25SJason Evans 	assert(extent_bsize_get(extent) >= *gap_size + size);
191b7eaed25SJason Evans 	extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
192b7eaed25SJason Evans 	    *gap_size + size), extent_bsize_get(extent) - *gap_size - size,
193b7eaed25SJason Evans 	    extent_sn_get(extent));
194b7eaed25SJason Evans 	return ret;
195b7eaed25SJason Evans }
196b7eaed25SJason Evans 
197b7eaed25SJason Evans static void
base_extent_bump_alloc_post(base_t * base,extent_t * extent,size_t gap_size,void * addr,size_t size)1980ef50b4eSJason Evans base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
1990ef50b4eSJason Evans     void *addr, size_t size) {
200b7eaed25SJason Evans 	if (extent_bsize_get(extent) > 0) {
201b7eaed25SJason Evans 		/*
202b7eaed25SJason Evans 		 * Compute the index for the largest size class that does not
203b7eaed25SJason Evans 		 * exceed extent's size.
204b7eaed25SJason Evans 		 */
205b7eaed25SJason Evans 		szind_t index_floor =
206b7eaed25SJason Evans 		    sz_size2index(extent_bsize_get(extent) + 1) - 1;
207b7eaed25SJason Evans 		extent_heap_insert(&base->avail[index_floor], extent);
208b7eaed25SJason Evans 	}
209b7eaed25SJason Evans 
210d0e79aa3SJason Evans 	if (config_stats) {
211b7eaed25SJason Evans 		base->allocated += size;
212d0e79aa3SJason Evans 		/*
213d0e79aa3SJason Evans 		 * Add one PAGE to base_resident for every page boundary that is
2140ef50b4eSJason Evans 		 * crossed by the new allocation. Adjust n_thp similarly when
2150ef50b4eSJason Evans 		 * metadata_thp is enabled.
216d0e79aa3SJason Evans 		 */
217b7eaed25SJason Evans 		base->resident += PAGE_CEILING((uintptr_t)addr + size) -
218b7eaed25SJason Evans 		    PAGE_CEILING((uintptr_t)addr - gap_size);
219b7eaed25SJason Evans 		assert(base->allocated <= base->resident);
220b7eaed25SJason Evans 		assert(base->resident <= base->mapped);
2210ef50b4eSJason Evans 		if (metadata_thp_madvise() && (opt_metadata_thp ==
2220ef50b4eSJason Evans 		    metadata_thp_always || base->auto_thp_switched)) {
2230ef50b4eSJason Evans 			base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size)
2240ef50b4eSJason Evans 			    - HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >>
2250ef50b4eSJason Evans 			    LG_HUGEPAGE;
2260ef50b4eSJason Evans 			assert(base->mapped >= base->n_thp << LG_HUGEPAGE);
2270ef50b4eSJason Evans 		}
228d0e79aa3SJason Evans 	}
229b7eaed25SJason Evans }
230b7eaed25SJason Evans 
231b7eaed25SJason Evans static void *
base_extent_bump_alloc(base_t * base,extent_t * extent,size_t size,size_t alignment)2320ef50b4eSJason Evans base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
2330ef50b4eSJason Evans     size_t alignment) {
234b7eaed25SJason Evans 	void *ret;
235b7eaed25SJason Evans 	size_t gap_size;
236b7eaed25SJason Evans 
237b7eaed25SJason Evans 	ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
2380ef50b4eSJason Evans 	base_extent_bump_alloc_post(base, extent, gap_size, ret, size);
239b7eaed25SJason Evans 	return ret;
240b7eaed25SJason Evans }
241b7eaed25SJason Evans 
242b7eaed25SJason Evans /*
243b7eaed25SJason Evans  * Allocate a block of virtual memory that is large enough to start with a
244b7eaed25SJason Evans  * base_block_t header, followed by an object of specified size and alignment.
245b7eaed25SJason Evans  * On success a pointer to the initialized base_block_t header is returned.
246b7eaed25SJason Evans  */
247b7eaed25SJason Evans static base_block_t *
base_block_alloc(tsdn_t * tsdn,base_t * base,extent_hooks_t * extent_hooks,unsigned ind,pszind_t * pind_last,size_t * extent_sn_next,size_t size,size_t alignment)2480ef50b4eSJason Evans base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
2490ef50b4eSJason Evans     unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size,
250b7eaed25SJason Evans     size_t alignment) {
251b7eaed25SJason Evans 	alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
252b7eaed25SJason Evans 	size_t usize = ALIGNMENT_CEILING(size, alignment);
253b7eaed25SJason Evans 	size_t header_size = sizeof(base_block_t);
254b7eaed25SJason Evans 	size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) -
255b7eaed25SJason Evans 	    header_size;
256b7eaed25SJason Evans 	/*
257b7eaed25SJason Evans 	 * Create increasingly larger blocks in order to limit the total number
258b7eaed25SJason Evans 	 * of disjoint virtual memory ranges.  Choose the next size in the page
259b7eaed25SJason Evans 	 * size class series (skipping size classes that are not a multiple of
260b7eaed25SJason Evans 	 * HUGEPAGE), or a size large enough to satisfy the requested size and
261b7eaed25SJason Evans 	 * alignment, whichever is larger.
262b7eaed25SJason Evans 	 */
263b7eaed25SJason Evans 	size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
264b7eaed25SJason Evans 	    + usize));
265*c5ad8142SEric van Gyzen 	pszind_t pind_next = (*pind_last + 1 < sz_psz2ind(SC_LARGE_MAXCLASS)) ?
266*c5ad8142SEric van Gyzen 	    *pind_last + 1 : *pind_last;
267b7eaed25SJason Evans 	size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
268b7eaed25SJason Evans 	size_t block_size = (min_block_size > next_block_size) ? min_block_size
269b7eaed25SJason Evans 	    : next_block_size;
2708b2f5aafSJason Evans 	base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind,
271b7eaed25SJason Evans 	    block_size);
272b7eaed25SJason Evans 	if (block == NULL) {
273b7eaed25SJason Evans 		return NULL;
274b7eaed25SJason Evans 	}
2750ef50b4eSJason Evans 
2760ef50b4eSJason Evans 	if (metadata_thp_madvise()) {
2770ef50b4eSJason Evans 		void *addr = (void *)block;
2780ef50b4eSJason Evans 		assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
2790ef50b4eSJason Evans 		    (block_size & HUGEPAGE_MASK) == 0);
2800ef50b4eSJason Evans 		if (opt_metadata_thp == metadata_thp_always) {
2810ef50b4eSJason Evans 			pages_huge(addr, block_size);
2820ef50b4eSJason Evans 		} else if (opt_metadata_thp == metadata_thp_auto &&
2830ef50b4eSJason Evans 		    base != NULL) {
2840ef50b4eSJason Evans 			/* base != NULL indicates this is not a new base. */
2850ef50b4eSJason Evans 			malloc_mutex_lock(tsdn, &base->mtx);
2860ef50b4eSJason Evans 			base_auto_thp_switch(tsdn, base);
2870ef50b4eSJason Evans 			if (base->auto_thp_switched) {
2880ef50b4eSJason Evans 				pages_huge(addr, block_size);
2890ef50b4eSJason Evans 			}
2900ef50b4eSJason Evans 			malloc_mutex_unlock(tsdn, &base->mtx);
2910ef50b4eSJason Evans 		}
2920ef50b4eSJason Evans 	}
2930ef50b4eSJason Evans 
294b7eaed25SJason Evans 	*pind_last = sz_psz2ind(block_size);
295b7eaed25SJason Evans 	block->size = block_size;
296b7eaed25SJason Evans 	block->next = NULL;
297b7eaed25SJason Evans 	assert(block_size >= header_size);
298b7eaed25SJason Evans 	base_extent_init(extent_sn_next, &block->extent,
299b7eaed25SJason Evans 	    (void *)((uintptr_t)block + header_size), block_size - header_size);
300b7eaed25SJason Evans 	return block;
301b7eaed25SJason Evans }
302b7eaed25SJason Evans 
303b7eaed25SJason Evans /*
304b7eaed25SJason Evans  * Allocate an extent that is at least as large as specified size, with
305b7eaed25SJason Evans  * specified alignment.
306b7eaed25SJason Evans  */
307b7eaed25SJason Evans static extent_t *
base_extent_alloc(tsdn_t * tsdn,base_t * base,size_t size,size_t alignment)308b7eaed25SJason Evans base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
309b7eaed25SJason Evans 	malloc_mutex_assert_owner(tsdn, &base->mtx);
310b7eaed25SJason Evans 
311b7eaed25SJason Evans 	extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
312b7eaed25SJason Evans 	/*
313b7eaed25SJason Evans 	 * Drop mutex during base_block_alloc(), because an extent hook will be
314b7eaed25SJason Evans 	 * called.
315b7eaed25SJason Evans 	 */
316b7eaed25SJason Evans 	malloc_mutex_unlock(tsdn, &base->mtx);
3170ef50b4eSJason Evans 	base_block_t *block = base_block_alloc(tsdn, base, extent_hooks,
3188b2f5aafSJason Evans 	    base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
3198b2f5aafSJason Evans 	    alignment);
320b7eaed25SJason Evans 	malloc_mutex_lock(tsdn, &base->mtx);
321b7eaed25SJason Evans 	if (block == NULL) {
322b7eaed25SJason Evans 		return NULL;
323b7eaed25SJason Evans 	}
324b7eaed25SJason Evans 	block->next = base->blocks;
325b7eaed25SJason Evans 	base->blocks = block;
326b7eaed25SJason Evans 	if (config_stats) {
327b7eaed25SJason Evans 		base->allocated += sizeof(base_block_t);
328b7eaed25SJason Evans 		base->resident += PAGE_CEILING(sizeof(base_block_t));
329b7eaed25SJason Evans 		base->mapped += block->size;
3300ef50b4eSJason Evans 		if (metadata_thp_madvise() &&
3310ef50b4eSJason Evans 		    !(opt_metadata_thp == metadata_thp_auto
3320ef50b4eSJason Evans 		      && !base->auto_thp_switched)) {
3330ef50b4eSJason Evans 			assert(base->n_thp > 0);
3340ef50b4eSJason Evans 			base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >>
3350ef50b4eSJason Evans 			    LG_HUGEPAGE;
3360ef50b4eSJason Evans 		}
337b7eaed25SJason Evans 		assert(base->allocated <= base->resident);
338b7eaed25SJason Evans 		assert(base->resident <= base->mapped);
3390ef50b4eSJason Evans 		assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
340b7eaed25SJason Evans 	}
341b7eaed25SJason Evans 	return &block->extent;
342b7eaed25SJason Evans }
343b7eaed25SJason Evans 
344b7eaed25SJason Evans base_t *
b0get(void)345b7eaed25SJason Evans b0get(void) {
346b7eaed25SJason Evans 	return b0;
347b7eaed25SJason Evans }
348b7eaed25SJason Evans 
349b7eaed25SJason Evans base_t *
base_new(tsdn_t * tsdn,unsigned ind,extent_hooks_t * extent_hooks)350b7eaed25SJason Evans base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
351b7eaed25SJason Evans 	pszind_t pind_last = 0;
352b7eaed25SJason Evans 	size_t extent_sn_next = 0;
3530ef50b4eSJason Evans 	base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind,
3548b2f5aafSJason Evans 	    &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
355b7eaed25SJason Evans 	if (block == NULL) {
356b7eaed25SJason Evans 		return NULL;
357b7eaed25SJason Evans 	}
358b7eaed25SJason Evans 
359b7eaed25SJason Evans 	size_t gap_size;
360b7eaed25SJason Evans 	size_t base_alignment = CACHELINE;
361b7eaed25SJason Evans 	size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
362b7eaed25SJason Evans 	base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
363b7eaed25SJason Evans 	    &gap_size, base_size, base_alignment);
364b7eaed25SJason Evans 	base->ind = ind;
365b7eaed25SJason Evans 	atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED);
366b7eaed25SJason Evans 	if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
367b7eaed25SJason Evans 	    malloc_mutex_rank_exclusive)) {
3688b2f5aafSJason Evans 		base_unmap(tsdn, extent_hooks, ind, block, block->size);
369b7eaed25SJason Evans 		return NULL;
370b7eaed25SJason Evans 	}
371b7eaed25SJason Evans 	base->pind_last = pind_last;
372b7eaed25SJason Evans 	base->extent_sn_next = extent_sn_next;
373b7eaed25SJason Evans 	base->blocks = block;
3740ef50b4eSJason Evans 	base->auto_thp_switched = false;
375*c5ad8142SEric van Gyzen 	for (szind_t i = 0; i < SC_NSIZES; i++) {
376b7eaed25SJason Evans 		extent_heap_new(&base->avail[i]);
377b7eaed25SJason Evans 	}
378b7eaed25SJason Evans 	if (config_stats) {
379b7eaed25SJason Evans 		base->allocated = sizeof(base_block_t);
380b7eaed25SJason Evans 		base->resident = PAGE_CEILING(sizeof(base_block_t));
381b7eaed25SJason Evans 		base->mapped = block->size;
3820ef50b4eSJason Evans 		base->n_thp = (opt_metadata_thp == metadata_thp_always) &&
3830ef50b4eSJason Evans 		    metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t))
3840ef50b4eSJason Evans 		    >> LG_HUGEPAGE : 0;
385b7eaed25SJason Evans 		assert(base->allocated <= base->resident);
386b7eaed25SJason Evans 		assert(base->resident <= base->mapped);
3870ef50b4eSJason Evans 		assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
388b7eaed25SJason Evans 	}
3890ef50b4eSJason Evans 	base_extent_bump_alloc_post(base, &block->extent, gap_size, base,
390b7eaed25SJason Evans 	    base_size);
391b7eaed25SJason Evans 
392b7eaed25SJason Evans 	return base;
393a4bd5210SJason Evans }
394a4bd5210SJason Evans 
395a4bd5210SJason Evans void
base_delete(tsdn_t * tsdn,base_t * base)3968b2f5aafSJason Evans base_delete(tsdn_t *tsdn, base_t *base) {
397b7eaed25SJason Evans 	extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
398b7eaed25SJason Evans 	base_block_t *next = base->blocks;
399b7eaed25SJason Evans 	do {
400b7eaed25SJason Evans 		base_block_t *block = next;
401b7eaed25SJason Evans 		next = block->next;
4028b2f5aafSJason Evans 		base_unmap(tsdn, extent_hooks, base_ind_get(base), block,
403b7eaed25SJason Evans 		    block->size);
404b7eaed25SJason Evans 	} while (next != NULL);
405b7eaed25SJason Evans }
406a4bd5210SJason Evans 
407b7eaed25SJason Evans extent_hooks_t *
base_extent_hooks_get(base_t * base)408b7eaed25SJason Evans base_extent_hooks_get(base_t *base) {
409b7eaed25SJason Evans 	return (extent_hooks_t *)atomic_load_p(&base->extent_hooks,
410b7eaed25SJason Evans 	    ATOMIC_ACQUIRE);
411b7eaed25SJason Evans }
412b7eaed25SJason Evans 
413b7eaed25SJason Evans extent_hooks_t *
base_extent_hooks_set(base_t * base,extent_hooks_t * extent_hooks)414b7eaed25SJason Evans base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
415b7eaed25SJason Evans 	extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
416b7eaed25SJason Evans 	atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE);
417b7eaed25SJason Evans 	return old_extent_hooks;
418b7eaed25SJason Evans }
419b7eaed25SJason Evans 
420b7eaed25SJason Evans static void *
base_alloc_impl(tsdn_t * tsdn,base_t * base,size_t size,size_t alignment,size_t * esn)421b7eaed25SJason Evans base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
422b7eaed25SJason Evans     size_t *esn) {
423b7eaed25SJason Evans 	alignment = QUANTUM_CEILING(alignment);
424b7eaed25SJason Evans 	size_t usize = ALIGNMENT_CEILING(size, alignment);
425b7eaed25SJason Evans 	size_t asize = usize + alignment - QUANTUM;
426b7eaed25SJason Evans 
427b7eaed25SJason Evans 	extent_t *extent = NULL;
428b7eaed25SJason Evans 	malloc_mutex_lock(tsdn, &base->mtx);
429*c5ad8142SEric van Gyzen 	for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) {
430b7eaed25SJason Evans 		extent = extent_heap_remove_first(&base->avail[i]);
431b7eaed25SJason Evans 		if (extent != NULL) {
432b7eaed25SJason Evans 			/* Use existing space. */
433b7eaed25SJason Evans 			break;
434b7eaed25SJason Evans 		}
435b7eaed25SJason Evans 	}
436b7eaed25SJason Evans 	if (extent == NULL) {
437b7eaed25SJason Evans 		/* Try to allocate more space. */
438b7eaed25SJason Evans 		extent = base_extent_alloc(tsdn, base, usize, alignment);
439b7eaed25SJason Evans 	}
440b7eaed25SJason Evans 	void *ret;
441b7eaed25SJason Evans 	if (extent == NULL) {
442b7eaed25SJason Evans 		ret = NULL;
443b7eaed25SJason Evans 		goto label_return;
444b7eaed25SJason Evans 	}
445b7eaed25SJason Evans 
4460ef50b4eSJason Evans 	ret = base_extent_bump_alloc(base, extent, usize, alignment);
447b7eaed25SJason Evans 	if (esn != NULL) {
448b7eaed25SJason Evans 		*esn = extent_sn_get(extent);
449b7eaed25SJason Evans 	}
450b7eaed25SJason Evans label_return:
451b7eaed25SJason Evans 	malloc_mutex_unlock(tsdn, &base->mtx);
452b7eaed25SJason Evans 	return ret;
453b7eaed25SJason Evans }
454b7eaed25SJason Evans 
455b7eaed25SJason Evans /*
456b7eaed25SJason Evans  * base_alloc() returns zeroed memory, which is always demand-zeroed for the
457b7eaed25SJason Evans  * auto arenas, in order to make multi-page sparse data structures such as radix
458b7eaed25SJason Evans  * tree nodes efficient with respect to physical memory usage.  Upon success a
459b7eaed25SJason Evans  * pointer to at least size bytes with specified alignment is returned.  Note
460b7eaed25SJason Evans  * that size is rounded up to the nearest multiple of alignment to avoid false
461b7eaed25SJason Evans  * sharing.
462b7eaed25SJason Evans  */
463b7eaed25SJason Evans void *
base_alloc(tsdn_t * tsdn,base_t * base,size_t size,size_t alignment)464b7eaed25SJason Evans base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
465b7eaed25SJason Evans 	return base_alloc_impl(tsdn, base, size, alignment, NULL);
466b7eaed25SJason Evans }
467b7eaed25SJason Evans 
468b7eaed25SJason Evans extent_t *
base_alloc_extent(tsdn_t * tsdn,base_t * base)469b7eaed25SJason Evans base_alloc_extent(tsdn_t *tsdn, base_t *base) {
470b7eaed25SJason Evans 	size_t esn;
471b7eaed25SJason Evans 	extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
472b7eaed25SJason Evans 	    CACHELINE, &esn);
473b7eaed25SJason Evans 	if (extent == NULL) {
474b7eaed25SJason Evans 		return NULL;
475b7eaed25SJason Evans 	}
476b7eaed25SJason Evans 	extent_esn_set(extent, esn);
477b7eaed25SJason Evans 	return extent;
478b7eaed25SJason Evans }
479b7eaed25SJason Evans 
480b7eaed25SJason Evans void
base_stats_get(tsdn_t * tsdn,base_t * base,size_t * allocated,size_t * resident,size_t * mapped,size_t * n_thp)481b7eaed25SJason Evans base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
4820ef50b4eSJason Evans     size_t *mapped, size_t *n_thp) {
483b7eaed25SJason Evans 	cassert(config_stats);
484b7eaed25SJason Evans 
485b7eaed25SJason Evans 	malloc_mutex_lock(tsdn, &base->mtx);
486b7eaed25SJason Evans 	assert(base->allocated <= base->resident);
487b7eaed25SJason Evans 	assert(base->resident <= base->mapped);
488b7eaed25SJason Evans 	*allocated = base->allocated;
489b7eaed25SJason Evans 	*resident = base->resident;
490b7eaed25SJason Evans 	*mapped = base->mapped;
4910ef50b4eSJason Evans 	*n_thp = base->n_thp;
492b7eaed25SJason Evans 	malloc_mutex_unlock(tsdn, &base->mtx);
493b7eaed25SJason Evans }
494b7eaed25SJason Evans 
495b7eaed25SJason Evans void
base_prefork(tsdn_t * tsdn,base_t * base)496b7eaed25SJason Evans base_prefork(tsdn_t *tsdn, base_t *base) {
497b7eaed25SJason Evans 	malloc_mutex_prefork(tsdn, &base->mtx);
498b7eaed25SJason Evans }
499b7eaed25SJason Evans 
500b7eaed25SJason Evans void
base_postfork_parent(tsdn_t * tsdn,base_t * base)501b7eaed25SJason Evans base_postfork_parent(tsdn_t *tsdn, base_t *base) {
502b7eaed25SJason Evans 	malloc_mutex_postfork_parent(tsdn, &base->mtx);
503b7eaed25SJason Evans }
504b7eaed25SJason Evans 
505b7eaed25SJason Evans void
base_postfork_child(tsdn_t * tsdn,base_t * base)506b7eaed25SJason Evans base_postfork_child(tsdn_t *tsdn, base_t *base) {
507b7eaed25SJason Evans 	malloc_mutex_postfork_child(tsdn, &base->mtx);
508a4bd5210SJason Evans }
509a4bd5210SJason Evans 
510a4bd5210SJason Evans bool
base_boot(tsdn_t * tsdn)511b7eaed25SJason Evans base_boot(tsdn_t *tsdn) {
512b7eaed25SJason Evans 	b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
513b7eaed25SJason Evans 	return (b0 == NULL);
514a4bd5210SJason Evans }
515