xref: /netbsd-src/external/bsd/jemalloc/dist/test/unit/retained.c (revision 7bdf38e5b7a28439665f2fdeff81e36913eef7dd)
1a0698ed9Schristos #include "test/jemalloc_test.h"
2a0698ed9Schristos 
3*7bdf38e5Schristos #include "jemalloc/internal/san.h"
4a0698ed9Schristos #include "jemalloc/internal/spin.h"
5a0698ed9Schristos 
6a0698ed9Schristos static unsigned		arena_ind;
7a0698ed9Schristos static size_t		sz;
8a0698ed9Schristos static size_t		esz;
9a0698ed9Schristos #define NEPOCHS		8
10a0698ed9Schristos #define PER_THD_NALLOCS	1
11a0698ed9Schristos static atomic_u_t	epoch;
12a0698ed9Schristos static atomic_u_t	nfinished;
13a0698ed9Schristos 
14a0698ed9Schristos static unsigned
15a0698ed9Schristos do_arena_create(extent_hooks_t *h) {
16*7bdf38e5Schristos 	unsigned new_arena_ind;
17*7bdf38e5Schristos 	size_t ind_sz = sizeof(unsigned);
18*7bdf38e5Schristos 	expect_d_eq(mallctl("arenas.create", (void *)&new_arena_ind, &ind_sz,
19a0698ed9Schristos 	    (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
20a0698ed9Schristos 	    "Unexpected mallctl() failure");
21*7bdf38e5Schristos 	return new_arena_ind;
22a0698ed9Schristos }
23a0698ed9Schristos 
24a0698ed9Schristos static void
25*7bdf38e5Schristos do_arena_destroy(unsigned ind) {
26a0698ed9Schristos 	size_t mib[3];
27a0698ed9Schristos 	size_t miblen;
28a0698ed9Schristos 
29a0698ed9Schristos 	miblen = sizeof(mib)/sizeof(size_t);
30*7bdf38e5Schristos 	expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
31a0698ed9Schristos 	    "Unexpected mallctlnametomib() failure");
32*7bdf38e5Schristos 	mib[1] = (size_t)ind;
33*7bdf38e5Schristos 	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
34a0698ed9Schristos 	    "Unexpected mallctlbymib() failure");
35a0698ed9Schristos }
36a0698ed9Schristos 
37a0698ed9Schristos static void
38a0698ed9Schristos do_refresh(void) {
39*7bdf38e5Schristos 	uint64_t refresh_epoch = 1;
40*7bdf38e5Schristos 	expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&refresh_epoch,
41*7bdf38e5Schristos 	    sizeof(refresh_epoch)), 0, "Unexpected mallctl() failure");
42a0698ed9Schristos }
43a0698ed9Schristos 
44a0698ed9Schristos static size_t
45*7bdf38e5Schristos do_get_size_impl(const char *cmd, unsigned ind) {
46a0698ed9Schristos 	size_t mib[4];
47a0698ed9Schristos 	size_t miblen = sizeof(mib) / sizeof(size_t);
48a0698ed9Schristos 	size_t z = sizeof(size_t);
49a0698ed9Schristos 
50*7bdf38e5Schristos 	expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
51a0698ed9Schristos 	    0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
52*7bdf38e5Schristos 	mib[2] = ind;
53a0698ed9Schristos 	size_t size;
54*7bdf38e5Schristos 	expect_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0),
55a0698ed9Schristos 	    0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd);
56a0698ed9Schristos 
57a0698ed9Schristos 	return size;
58a0698ed9Schristos }
59a0698ed9Schristos 
60a0698ed9Schristos static size_t
61*7bdf38e5Schristos do_get_active(unsigned ind) {
62*7bdf38e5Schristos 	return do_get_size_impl("stats.arenas.0.pactive", ind) * PAGE;
63a0698ed9Schristos }
64a0698ed9Schristos 
65a0698ed9Schristos static size_t
66*7bdf38e5Schristos do_get_mapped(unsigned ind) {
67*7bdf38e5Schristos 	return do_get_size_impl("stats.arenas.0.mapped", ind);
68a0698ed9Schristos }
69a0698ed9Schristos 
70a0698ed9Schristos static void *
71a0698ed9Schristos thd_start(void *arg) {
72a0698ed9Schristos 	for (unsigned next_epoch = 1; next_epoch < NEPOCHS; next_epoch++) {
73a0698ed9Schristos 		/* Busy-wait for next epoch. */
74a0698ed9Schristos 		unsigned cur_epoch;
75a0698ed9Schristos 		spin_t spinner = SPIN_INITIALIZER;
76a0698ed9Schristos 		while ((cur_epoch = atomic_load_u(&epoch, ATOMIC_ACQUIRE)) !=
77a0698ed9Schristos 		    next_epoch) {
78a0698ed9Schristos 			spin_adaptive(&spinner);
79a0698ed9Schristos 		}
80*7bdf38e5Schristos 		expect_u_eq(cur_epoch, next_epoch, "Unexpected epoch");
81a0698ed9Schristos 
82a0698ed9Schristos 		/*
83a0698ed9Schristos 		 * Allocate.  The main thread will reset the arena, so there's
84a0698ed9Schristos 		 * no need to deallocate.
85a0698ed9Schristos 		 */
86a0698ed9Schristos 		for (unsigned i = 0; i < PER_THD_NALLOCS; i++) {
87a0698ed9Schristos 			void *p = mallocx(sz, MALLOCX_ARENA(arena_ind) |
88a0698ed9Schristos 			    MALLOCX_TCACHE_NONE
89a0698ed9Schristos 			    );
90*7bdf38e5Schristos 			expect_ptr_not_null(p,
91a0698ed9Schristos 			    "Unexpected mallocx() failure\n");
92a0698ed9Schristos 		}
93a0698ed9Schristos 
94a0698ed9Schristos 		/* Let the main thread know we've finished this iteration. */
95a0698ed9Schristos 		atomic_fetch_add_u(&nfinished, 1, ATOMIC_RELEASE);
96a0698ed9Schristos 	}
97a0698ed9Schristos 
98a0698ed9Schristos 	return NULL;
99a0698ed9Schristos }
100a0698ed9Schristos 
101a0698ed9Schristos TEST_BEGIN(test_retained) {
102a0698ed9Schristos 	test_skip_if(!config_stats);
103*7bdf38e5Schristos 	test_skip_if(opt_hpa);
104a0698ed9Schristos 
105a0698ed9Schristos 	arena_ind = do_arena_create(NULL);
106a0698ed9Schristos 	sz = nallocx(HUGEPAGE, 0);
107*7bdf38e5Schristos 	size_t guard_sz = san_guard_enabled() ? SAN_PAGE_GUARDS_SIZE : 0;
108*7bdf38e5Schristos 	esz = sz + sz_large_pad + guard_sz;
109a0698ed9Schristos 
110a0698ed9Schristos 	atomic_store_u(&epoch, 0, ATOMIC_RELAXED);
111a0698ed9Schristos 
112a0698ed9Schristos 	unsigned nthreads = ncpus * 2;
113*7bdf38e5Schristos 	if (LG_SIZEOF_PTR < 3 && nthreads > 16) {
114*7bdf38e5Schristos 		nthreads = 16; /* 32-bit platform could run out of vaddr. */
115*7bdf38e5Schristos 	}
116a0698ed9Schristos 	VARIABLE_ARRAY(thd_t, threads, nthreads);
117a0698ed9Schristos 	for (unsigned i = 0; i < nthreads; i++) {
118a0698ed9Schristos 		thd_create(&threads[i], thd_start, NULL);
119a0698ed9Schristos 	}
120a0698ed9Schristos 
121a0698ed9Schristos 	for (unsigned e = 1; e < NEPOCHS; e++) {
122a0698ed9Schristos 		atomic_store_u(&nfinished, 0, ATOMIC_RELEASE);
123a0698ed9Schristos 		atomic_store_u(&epoch, e, ATOMIC_RELEASE);
124a0698ed9Schristos 
125a0698ed9Schristos 		/* Wait for threads to finish allocating. */
126a0698ed9Schristos 		spin_t spinner = SPIN_INITIALIZER;
127a0698ed9Schristos 		while (atomic_load_u(&nfinished, ATOMIC_ACQUIRE) < nthreads) {
128a0698ed9Schristos 			spin_adaptive(&spinner);
129a0698ed9Schristos 		}
130a0698ed9Schristos 
131a0698ed9Schristos 		/*
132a0698ed9Schristos 		 * Assert that retained is no more than the sum of size classes
133a0698ed9Schristos 		 * that should have been used to satisfy the worker threads'
134a0698ed9Schristos 		 * requests, discounting per growth fragmentation.
135a0698ed9Schristos 		 */
136a0698ed9Schristos 		do_refresh();
137a0698ed9Schristos 
138*7bdf38e5Schristos 		size_t allocated = (esz - guard_sz) * nthreads *
139*7bdf38e5Schristos 		    PER_THD_NALLOCS;
140a0698ed9Schristos 		size_t active = do_get_active(arena_ind);
141*7bdf38e5Schristos 		expect_zu_le(allocated, active, "Unexpected active memory");
142a0698ed9Schristos 		size_t mapped = do_get_mapped(arena_ind);
143*7bdf38e5Schristos 		expect_zu_le(active, mapped, "Unexpected mapped memory");
144a0698ed9Schristos 
145a0698ed9Schristos 		arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false);
146a0698ed9Schristos 		size_t usable = 0;
147a0698ed9Schristos 		size_t fragmented = 0;
148a0698ed9Schristos 		for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind <
149*7bdf38e5Schristos 		    arena->pa_shard.pac.exp_grow.next; pind++) {
150a0698ed9Schristos 			size_t psz = sz_pind2sz(pind);
151a0698ed9Schristos 			size_t psz_fragmented = psz % esz;
152a0698ed9Schristos 			size_t psz_usable = psz - psz_fragmented;
153a0698ed9Schristos 			/*
154a0698ed9Schristos 			 * Only consider size classes that wouldn't be skipped.
155a0698ed9Schristos 			 */
156a0698ed9Schristos 			if (psz_usable > 0) {
157*7bdf38e5Schristos 				expect_zu_lt(usable, allocated,
158a0698ed9Schristos 				    "Excessive retained memory "
159a0698ed9Schristos 				    "(%#zx[+%#zx] > %#zx)", usable, psz_usable,
160a0698ed9Schristos 				    allocated);
161a0698ed9Schristos 				fragmented += psz_fragmented;
162a0698ed9Schristos 				usable += psz_usable;
163a0698ed9Schristos 			}
164a0698ed9Schristos 		}
165a0698ed9Schristos 
166a0698ed9Schristos 		/*
167a0698ed9Schristos 		 * Clean up arena.  Destroying and recreating the arena
168a0698ed9Schristos 		 * is simpler that specifying extent hooks that deallocate
169a0698ed9Schristos 		 * (rather than retaining) during reset.
170a0698ed9Schristos 		 */
171a0698ed9Schristos 		do_arena_destroy(arena_ind);
172*7bdf38e5Schristos 		expect_u_eq(do_arena_create(NULL), arena_ind,
173a0698ed9Schristos 		    "Unexpected arena index");
174a0698ed9Schristos 	}
175a0698ed9Schristos 
176a0698ed9Schristos 	for (unsigned i = 0; i < nthreads; i++) {
177a0698ed9Schristos 		thd_join(threads[i], NULL);
178a0698ed9Schristos 	}
179a0698ed9Schristos 
180a0698ed9Schristos 	do_arena_destroy(arena_ind);
181a0698ed9Schristos }
182a0698ed9Schristos TEST_END
183a0698ed9Schristos 
184a0698ed9Schristos int
185a0698ed9Schristos main(void) {
186a0698ed9Schristos 	return test(
187a0698ed9Schristos 	    test_retained);
188a0698ed9Schristos }
189