xref: /netbsd-src/external/bsd/jemalloc/dist/test/unit/retained.c (revision 4439cfd0acf9c7dc90625e5cd83b2317a9ab8967)
1 #include "test/jemalloc_test.h"
2 
3 #include "jemalloc/internal/san.h"
4 #include "jemalloc/internal/spin.h"
5 
6 static unsigned		arena_ind;
7 static size_t		sz;
8 static size_t		esz;
9 #define NEPOCHS		8
10 #define PER_THD_NALLOCS	1
11 static atomic_u_t	epoch;
12 static atomic_u_t	nfinished;
13 
14 static unsigned
15 do_arena_create(extent_hooks_t *h) {
16 	unsigned new_arena_ind;
17 	size_t ind_sz = sizeof(unsigned);
18 	expect_d_eq(mallctl("arenas.create", (void *)&new_arena_ind, &ind_sz,
19 	    (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
20 	    "Unexpected mallctl() failure");
21 	return new_arena_ind;
22 }
23 
24 static void
25 do_arena_destroy(unsigned ind) {
26 	size_t mib[3];
27 	size_t miblen;
28 
29 	miblen = sizeof(mib)/sizeof(size_t);
30 	expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
31 	    "Unexpected mallctlnametomib() failure");
32 	mib[1] = (size_t)ind;
33 	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
34 	    "Unexpected mallctlbymib() failure");
35 }
36 
37 static void
38 do_refresh(void) {
39 	uint64_t refresh_epoch = 1;
40 	expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&refresh_epoch,
41 	    sizeof(refresh_epoch)), 0, "Unexpected mallctl() failure");
42 }
43 
44 static size_t
45 do_get_size_impl(const char *cmd, unsigned ind) {
46 	size_t mib[4];
47 	size_t miblen = sizeof(mib) / sizeof(size_t);
48 	size_t z = sizeof(size_t);
49 
50 	expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
51 	    0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
52 	mib[2] = ind;
53 	size_t size;
54 	expect_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0),
55 	    0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd);
56 
57 	return size;
58 }
59 
60 static size_t
61 do_get_active(unsigned ind) {
62 	return do_get_size_impl("stats.arenas.0.pactive", ind) * PAGE;
63 }
64 
65 static size_t
66 do_get_mapped(unsigned ind) {
67 	return do_get_size_impl("stats.arenas.0.mapped", ind);
68 }
69 
70 static void *
71 thd_start(void *arg) {
72 	for (unsigned next_epoch = 1; next_epoch < NEPOCHS; next_epoch++) {
73 		/* Busy-wait for next epoch. */
74 		unsigned cur_epoch;
75 		spin_t spinner = SPIN_INITIALIZER;
76 		while ((cur_epoch = atomic_load_u(&epoch, ATOMIC_ACQUIRE)) !=
77 		    next_epoch) {
78 			spin_adaptive(&spinner);
79 		}
80 		expect_u_eq(cur_epoch, next_epoch, "Unexpected epoch");
81 
82 		/*
83 		 * Allocate.  The main thread will reset the arena, so there's
84 		 * no need to deallocate.
85 		 */
86 		for (unsigned i = 0; i < PER_THD_NALLOCS; i++) {
87 			void *p = mallocx(sz, MALLOCX_ARENA(arena_ind) |
88 			    MALLOCX_TCACHE_NONE
89 			    );
90 			expect_ptr_not_null(p,
91 			    "Unexpected mallocx() failure\n");
92 		}
93 
94 		/* Let the main thread know we've finished this iteration. */
95 		atomic_fetch_add_u(&nfinished, 1, ATOMIC_RELEASE);
96 	}
97 
98 	return NULL;
99 }
100 
101 TEST_BEGIN(test_retained) {
102 	test_skip_if(!config_stats);
103 	test_skip_if(opt_hpa);
104 
105 	arena_ind = do_arena_create(NULL);
106 	sz = nallocx(HUGEPAGE, 0);
107 	size_t guard_sz = san_guard_enabled() ? SAN_PAGE_GUARDS_SIZE : 0;
108 	esz = sz + sz_large_pad + guard_sz;
109 
110 	atomic_store_u(&epoch, 0, ATOMIC_RELAXED);
111 
112 	unsigned nthreads = ncpus * 2;
113 	if (LG_SIZEOF_PTR < 3 && nthreads > 16) {
114 		nthreads = 16; /* 32-bit platform could run out of vaddr. */
115 	}
116 	VARIABLE_ARRAY(thd_t, threads, nthreads);
117 	for (unsigned i = 0; i < nthreads; i++) {
118 		thd_create(&threads[i], thd_start, NULL);
119 	}
120 
121 	for (unsigned e = 1; e < NEPOCHS; e++) {
122 		atomic_store_u(&nfinished, 0, ATOMIC_RELEASE);
123 		atomic_store_u(&epoch, e, ATOMIC_RELEASE);
124 
125 		/* Wait for threads to finish allocating. */
126 		spin_t spinner = SPIN_INITIALIZER;
127 		while (atomic_load_u(&nfinished, ATOMIC_ACQUIRE) < nthreads) {
128 			spin_adaptive(&spinner);
129 		}
130 
131 		/*
132 		 * Assert that retained is no more than the sum of size classes
133 		 * that should have been used to satisfy the worker threads'
134 		 * requests, discounting per growth fragmentation.
135 		 */
136 		do_refresh();
137 
138 		size_t allocated = (esz - guard_sz) * nthreads *
139 		    PER_THD_NALLOCS;
140 		size_t active = do_get_active(arena_ind);
141 		expect_zu_le(allocated, active, "Unexpected active memory");
142 		size_t mapped = do_get_mapped(arena_ind);
143 		expect_zu_le(active, mapped, "Unexpected mapped memory");
144 
145 		arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false);
146 		size_t usable = 0;
147 		size_t fragmented = 0;
148 		for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind <
149 		    arena->pa_shard.pac.exp_grow.next; pind++) {
150 			size_t psz = sz_pind2sz(pind);
151 			size_t psz_fragmented = psz % esz;
152 			size_t psz_usable = psz - psz_fragmented;
153 			/*
154 			 * Only consider size classes that wouldn't be skipped.
155 			 */
156 			if (psz_usable > 0) {
157 				expect_zu_lt(usable, allocated,
158 				    "Excessive retained memory "
159 				    "(%#zx[+%#zx] > %#zx)", usable, psz_usable,
160 				    allocated);
161 				fragmented += psz_fragmented;
162 				usable += psz_usable;
163 			}
164 		}
165 
166 		/*
167 		 * Clean up arena.  Destroying and recreating the arena
168 		 * is simpler that specifying extent hooks that deallocate
169 		 * (rather than retaining) during reset.
170 		 */
171 		do_arena_destroy(arena_ind);
172 		expect_u_eq(do_arena_create(NULL), arena_ind,
173 		    "Unexpected arena index");
174 	}
175 
176 	for (unsigned i = 0; i < nthreads; i++) {
177 		thd_join(threads[i], NULL);
178 	}
179 
180 	do_arena_destroy(arena_ind);
181 }
182 TEST_END
183 
184 int
185 main(void) {
186 	return test(
187 	    test_retained);
188 }
189