xref: /netbsd-src/external/bsd/jemalloc/dist/test/unit/arena_decay.c (revision 7bdf38e5b7a28439665f2fdeff81e36913eef7dd)
1*7bdf38e5Schristos #include "test/jemalloc_test.h"
2*7bdf38e5Schristos #include "test/arena_util.h"
3*7bdf38e5Schristos 
4*7bdf38e5Schristos #include "jemalloc/internal/ticker.h"
5*7bdf38e5Schristos 
6*7bdf38e5Schristos static nstime_monotonic_t *nstime_monotonic_orig;
7*7bdf38e5Schristos static nstime_update_t *nstime_update_orig;
8*7bdf38e5Schristos 
9*7bdf38e5Schristos static unsigned nupdates_mock;
10*7bdf38e5Schristos static nstime_t time_mock;
11*7bdf38e5Schristos static bool monotonic_mock;
12*7bdf38e5Schristos 
13*7bdf38e5Schristos static bool
14*7bdf38e5Schristos nstime_monotonic_mock(void) {
15*7bdf38e5Schristos 	return monotonic_mock;
16*7bdf38e5Schristos }
17*7bdf38e5Schristos 
18*7bdf38e5Schristos static void
19*7bdf38e5Schristos nstime_update_mock(nstime_t *time) {
20*7bdf38e5Schristos 	nupdates_mock++;
21*7bdf38e5Schristos 	if (monotonic_mock) {
22*7bdf38e5Schristos 		nstime_copy(time, &time_mock);
23*7bdf38e5Schristos 	}
24*7bdf38e5Schristos }
25*7bdf38e5Schristos 
26*7bdf38e5Schristos TEST_BEGIN(test_decay_ticks) {
27*7bdf38e5Schristos 	test_skip_if(is_background_thread_enabled());
28*7bdf38e5Schristos 	test_skip_if(opt_hpa);
29*7bdf38e5Schristos 
30*7bdf38e5Schristos 	ticker_geom_t *decay_ticker;
31*7bdf38e5Schristos 	unsigned tick0, tick1, arena_ind;
32*7bdf38e5Schristos 	size_t sz, large0;
33*7bdf38e5Schristos 	void *p;
34*7bdf38e5Schristos 
35*7bdf38e5Schristos 	sz = sizeof(size_t);
36*7bdf38e5Schristos 	expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
37*7bdf38e5Schristos 	    0), 0, "Unexpected mallctl failure");
38*7bdf38e5Schristos 
39*7bdf38e5Schristos 	/* Set up a manually managed arena for test. */
40*7bdf38e5Schristos 	arena_ind = do_arena_create(0, 0);
41*7bdf38e5Schristos 
42*7bdf38e5Schristos 	/* Migrate to the new arena, and get the ticker. */
43*7bdf38e5Schristos 	unsigned old_arena_ind;
44*7bdf38e5Schristos 	size_t sz_arena_ind = sizeof(old_arena_ind);
45*7bdf38e5Schristos 	expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind,
46*7bdf38e5Schristos 	    &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0,
47*7bdf38e5Schristos 	    "Unexpected mallctl() failure");
48*7bdf38e5Schristos 	decay_ticker = tsd_arena_decay_tickerp_get(tsd_fetch());
49*7bdf38e5Schristos 	expect_ptr_not_null(decay_ticker,
50*7bdf38e5Schristos 	    "Unexpected failure getting decay ticker");
51*7bdf38e5Schristos 
52*7bdf38e5Schristos 	/*
53*7bdf38e5Schristos 	 * Test the standard APIs using a large size class, since we can't
54*7bdf38e5Schristos 	 * control tcache interactions for small size classes (except by
55*7bdf38e5Schristos 	 * completely disabling tcache for the entire test program).
56*7bdf38e5Schristos 	 */
57*7bdf38e5Schristos 
58*7bdf38e5Schristos 	/* malloc(). */
59*7bdf38e5Schristos 	tick0 = ticker_geom_read(decay_ticker);
60*7bdf38e5Schristos 	p = malloc(large0);
61*7bdf38e5Schristos 	expect_ptr_not_null(p, "Unexpected malloc() failure");
62*7bdf38e5Schristos 	tick1 = ticker_geom_read(decay_ticker);
63*7bdf38e5Schristos 	expect_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
64*7bdf38e5Schristos 	/* free(). */
65*7bdf38e5Schristos 	tick0 = ticker_geom_read(decay_ticker);
66*7bdf38e5Schristos 	free(p);
67*7bdf38e5Schristos 	tick1 = ticker_geom_read(decay_ticker);
68*7bdf38e5Schristos 	expect_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
69*7bdf38e5Schristos 
70*7bdf38e5Schristos 	/* calloc(). */
71*7bdf38e5Schristos 	tick0 = ticker_geom_read(decay_ticker);
72*7bdf38e5Schristos 	p = calloc(1, large0);
73*7bdf38e5Schristos 	expect_ptr_not_null(p, "Unexpected calloc() failure");
74*7bdf38e5Schristos 	tick1 = ticker_geom_read(decay_ticker);
75*7bdf38e5Schristos 	expect_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
76*7bdf38e5Schristos 	free(p);
77*7bdf38e5Schristos 
78*7bdf38e5Schristos 	/* posix_memalign(). */
79*7bdf38e5Schristos 	tick0 = ticker_geom_read(decay_ticker);
80*7bdf38e5Schristos 	expect_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0,
81*7bdf38e5Schristos 	    "Unexpected posix_memalign() failure");
82*7bdf38e5Schristos 	tick1 = ticker_geom_read(decay_ticker);
83*7bdf38e5Schristos 	expect_u32_ne(tick1, tick0,
84*7bdf38e5Schristos 	    "Expected ticker to tick during posix_memalign()");
85*7bdf38e5Schristos 	free(p);
86*7bdf38e5Schristos 
87*7bdf38e5Schristos 	/* aligned_alloc(). */
88*7bdf38e5Schristos 	tick0 = ticker_geom_read(decay_ticker);
89*7bdf38e5Schristos 	p = aligned_alloc(sizeof(size_t), large0);
90*7bdf38e5Schristos 	expect_ptr_not_null(p, "Unexpected aligned_alloc() failure");
91*7bdf38e5Schristos 	tick1 = ticker_geom_read(decay_ticker);
92*7bdf38e5Schristos 	expect_u32_ne(tick1, tick0,
93*7bdf38e5Schristos 	    "Expected ticker to tick during aligned_alloc()");
94*7bdf38e5Schristos 	free(p);
95*7bdf38e5Schristos 
96*7bdf38e5Schristos 	/* realloc(). */
97*7bdf38e5Schristos 	/* Allocate. */
98*7bdf38e5Schristos 	tick0 = ticker_geom_read(decay_ticker);
99*7bdf38e5Schristos 	p = realloc(NULL, large0);
100*7bdf38e5Schristos 	expect_ptr_not_null(p, "Unexpected realloc() failure");
101*7bdf38e5Schristos 	tick1 = ticker_geom_read(decay_ticker);
102*7bdf38e5Schristos 	expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
103*7bdf38e5Schristos 	/* Reallocate. */
104*7bdf38e5Schristos 	tick0 = ticker_geom_read(decay_ticker);
105*7bdf38e5Schristos 	p = realloc(p, large0);
106*7bdf38e5Schristos 	expect_ptr_not_null(p, "Unexpected realloc() failure");
107*7bdf38e5Schristos 	tick1 = ticker_geom_read(decay_ticker);
108*7bdf38e5Schristos 	expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
109*7bdf38e5Schristos 	/* Deallocate. */
110*7bdf38e5Schristos 	tick0 = ticker_geom_read(decay_ticker);
111*7bdf38e5Schristos 	realloc(p, 0);
112*7bdf38e5Schristos 	tick1 = ticker_geom_read(decay_ticker);
113*7bdf38e5Schristos 	expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
114*7bdf38e5Schristos 
115*7bdf38e5Schristos 	/*
116*7bdf38e5Schristos 	 * Test the *allocx() APIs using large and small size classes, with
117*7bdf38e5Schristos 	 * tcache explicitly disabled.
118*7bdf38e5Schristos 	 */
119*7bdf38e5Schristos 	{
120*7bdf38e5Schristos 		unsigned i;
121*7bdf38e5Schristos 		size_t allocx_sizes[2];
122*7bdf38e5Schristos 		allocx_sizes[0] = large0;
123*7bdf38e5Schristos 		allocx_sizes[1] = 1;
124*7bdf38e5Schristos 
125*7bdf38e5Schristos 		for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
126*7bdf38e5Schristos 			sz = allocx_sizes[i];
127*7bdf38e5Schristos 
128*7bdf38e5Schristos 			/* mallocx(). */
129*7bdf38e5Schristos 			tick0 = ticker_geom_read(decay_ticker);
130*7bdf38e5Schristos 			p = mallocx(sz, MALLOCX_TCACHE_NONE);
131*7bdf38e5Schristos 			expect_ptr_not_null(p, "Unexpected mallocx() failure");
132*7bdf38e5Schristos 			tick1 = ticker_geom_read(decay_ticker);
133*7bdf38e5Schristos 			expect_u32_ne(tick1, tick0,
134*7bdf38e5Schristos 			    "Expected ticker to tick during mallocx() (sz=%zu)",
135*7bdf38e5Schristos 			    sz);
136*7bdf38e5Schristos 			/* rallocx(). */
137*7bdf38e5Schristos 			tick0 = ticker_geom_read(decay_ticker);
138*7bdf38e5Schristos 			p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
139*7bdf38e5Schristos 			expect_ptr_not_null(p, "Unexpected rallocx() failure");
140*7bdf38e5Schristos 			tick1 = ticker_geom_read(decay_ticker);
141*7bdf38e5Schristos 			expect_u32_ne(tick1, tick0,
142*7bdf38e5Schristos 			    "Expected ticker to tick during rallocx() (sz=%zu)",
143*7bdf38e5Schristos 			    sz);
144*7bdf38e5Schristos 			/* xallocx(). */
145*7bdf38e5Schristos 			tick0 = ticker_geom_read(decay_ticker);
146*7bdf38e5Schristos 			xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
147*7bdf38e5Schristos 			tick1 = ticker_geom_read(decay_ticker);
148*7bdf38e5Schristos 			expect_u32_ne(tick1, tick0,
149*7bdf38e5Schristos 			    "Expected ticker to tick during xallocx() (sz=%zu)",
150*7bdf38e5Schristos 			    sz);
151*7bdf38e5Schristos 			/* dallocx(). */
152*7bdf38e5Schristos 			tick0 = ticker_geom_read(decay_ticker);
153*7bdf38e5Schristos 			dallocx(p, MALLOCX_TCACHE_NONE);
154*7bdf38e5Schristos 			tick1 = ticker_geom_read(decay_ticker);
155*7bdf38e5Schristos 			expect_u32_ne(tick1, tick0,
156*7bdf38e5Schristos 			    "Expected ticker to tick during dallocx() (sz=%zu)",
157*7bdf38e5Schristos 			    sz);
158*7bdf38e5Schristos 			/* sdallocx(). */
159*7bdf38e5Schristos 			p = mallocx(sz, MALLOCX_TCACHE_NONE);
160*7bdf38e5Schristos 			expect_ptr_not_null(p, "Unexpected mallocx() failure");
161*7bdf38e5Schristos 			tick0 = ticker_geom_read(decay_ticker);
162*7bdf38e5Schristos 			sdallocx(p, sz, MALLOCX_TCACHE_NONE);
163*7bdf38e5Schristos 			tick1 = ticker_geom_read(decay_ticker);
164*7bdf38e5Schristos 			expect_u32_ne(tick1, tick0,
165*7bdf38e5Schristos 			    "Expected ticker to tick during sdallocx() "
166*7bdf38e5Schristos 			    "(sz=%zu)", sz);
167*7bdf38e5Schristos 		}
168*7bdf38e5Schristos 	}
169*7bdf38e5Schristos 
170*7bdf38e5Schristos 	/*
171*7bdf38e5Schristos 	 * Test tcache fill/flush interactions for large and small size classes,
172*7bdf38e5Schristos 	 * using an explicit tcache.
173*7bdf38e5Schristos 	 */
174*7bdf38e5Schristos 	unsigned tcache_ind, i;
175*7bdf38e5Schristos 	size_t tcache_sizes[2];
176*7bdf38e5Schristos 	tcache_sizes[0] = large0;
177*7bdf38e5Schristos 	tcache_sizes[1] = 1;
178*7bdf38e5Schristos 
179*7bdf38e5Schristos 	size_t tcache_max, sz_tcache_max;
180*7bdf38e5Schristos 	sz_tcache_max = sizeof(tcache_max);
181*7bdf38e5Schristos 	expect_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
182*7bdf38e5Schristos 	    &sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure");
183*7bdf38e5Schristos 
184*7bdf38e5Schristos 	sz = sizeof(unsigned);
185*7bdf38e5Schristos 	expect_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
186*7bdf38e5Schristos 	    NULL, 0), 0, "Unexpected mallctl failure");
187*7bdf38e5Schristos 
188*7bdf38e5Schristos 	for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
189*7bdf38e5Schristos 		sz = tcache_sizes[i];
190*7bdf38e5Schristos 
191*7bdf38e5Schristos 		/* tcache fill. */
192*7bdf38e5Schristos 		tick0 = ticker_geom_read(decay_ticker);
193*7bdf38e5Schristos 		p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
194*7bdf38e5Schristos 		expect_ptr_not_null(p, "Unexpected mallocx() failure");
195*7bdf38e5Schristos 		tick1 = ticker_geom_read(decay_ticker);
196*7bdf38e5Schristos 		expect_u32_ne(tick1, tick0,
197*7bdf38e5Schristos 		    "Expected ticker to tick during tcache fill "
198*7bdf38e5Schristos 		    "(sz=%zu)", sz);
199*7bdf38e5Schristos 		/* tcache flush. */
200*7bdf38e5Schristos 		dallocx(p, MALLOCX_TCACHE(tcache_ind));
201*7bdf38e5Schristos 		tick0 = ticker_geom_read(decay_ticker);
202*7bdf38e5Schristos 		expect_d_eq(mallctl("tcache.flush", NULL, NULL,
203*7bdf38e5Schristos 		    (void *)&tcache_ind, sizeof(unsigned)), 0,
204*7bdf38e5Schristos 		    "Unexpected mallctl failure");
205*7bdf38e5Schristos 		tick1 = ticker_geom_read(decay_ticker);
206*7bdf38e5Schristos 
207*7bdf38e5Schristos 		/* Will only tick if it's in tcache. */
208*7bdf38e5Schristos 		expect_u32_ne(tick1, tick0,
209*7bdf38e5Schristos 		    "Expected ticker to tick during tcache flush (sz=%zu)", sz);
210*7bdf38e5Schristos 	}
211*7bdf38e5Schristos }
212*7bdf38e5Schristos TEST_END
213*7bdf38e5Schristos 
214*7bdf38e5Schristos static void
215*7bdf38e5Schristos decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt,
216*7bdf38e5Schristos     uint64_t dirty_npurge0, uint64_t muzzy_npurge0, bool terminate_asap) {
217*7bdf38e5Schristos #define NINTERVALS 101
218*7bdf38e5Schristos 	nstime_t time, update_interval, decay_ms, deadline;
219*7bdf38e5Schristos 
220*7bdf38e5Schristos 	nstime_init_update(&time);
221*7bdf38e5Schristos 
222*7bdf38e5Schristos 	nstime_init2(&decay_ms, dt, 0);
223*7bdf38e5Schristos 	nstime_copy(&deadline, &time);
224*7bdf38e5Schristos 	nstime_add(&deadline, &decay_ms);
225*7bdf38e5Schristos 
226*7bdf38e5Schristos 	nstime_init2(&update_interval, dt, 0);
227*7bdf38e5Schristos 	nstime_idivide(&update_interval, NINTERVALS);
228*7bdf38e5Schristos 
229*7bdf38e5Schristos 	/*
230*7bdf38e5Schristos 	 * Keep q's slab from being deallocated during the looping below.  If a
231*7bdf38e5Schristos 	 * cached slab were to repeatedly come and go during looping, it could
232*7bdf38e5Schristos 	 * prevent the decay backlog ever becoming empty.
233*7bdf38e5Schristos 	 */
234*7bdf38e5Schristos 	void *p = do_mallocx(1, flags);
235*7bdf38e5Schristos 	uint64_t dirty_npurge1, muzzy_npurge1;
236*7bdf38e5Schristos 	do {
237*7bdf38e5Schristos 		for (unsigned i = 0; i < ARENA_DECAY_NTICKS_PER_UPDATE / 2;
238*7bdf38e5Schristos 		    i++) {
239*7bdf38e5Schristos 			void *q = do_mallocx(1, flags);
240*7bdf38e5Schristos 			dallocx(q, flags);
241*7bdf38e5Schristos 		}
242*7bdf38e5Schristos 		dirty_npurge1 = get_arena_dirty_npurge(arena_ind);
243*7bdf38e5Schristos 		muzzy_npurge1 = get_arena_muzzy_npurge(arena_ind);
244*7bdf38e5Schristos 
245*7bdf38e5Schristos 		nstime_add(&time_mock, &update_interval);
246*7bdf38e5Schristos 		nstime_update(&time);
247*7bdf38e5Schristos 	} while (nstime_compare(&time, &deadline) <= 0 && ((dirty_npurge1 ==
248*7bdf38e5Schristos 	    dirty_npurge0 && muzzy_npurge1 == muzzy_npurge0) ||
249*7bdf38e5Schristos 	    !terminate_asap));
250*7bdf38e5Schristos 	dallocx(p, flags);
251*7bdf38e5Schristos 
252*7bdf38e5Schristos 	if (config_stats) {
253*7bdf38e5Schristos 		expect_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 +
254*7bdf38e5Schristos 		    muzzy_npurge0, "Expected purging to occur");
255*7bdf38e5Schristos 	}
256*7bdf38e5Schristos #undef NINTERVALS
257*7bdf38e5Schristos }
258*7bdf38e5Schristos 
259*7bdf38e5Schristos TEST_BEGIN(test_decay_ticker) {
260*7bdf38e5Schristos 	test_skip_if(is_background_thread_enabled());
261*7bdf38e5Schristos 	test_skip_if(opt_hpa);
262*7bdf38e5Schristos #define NPS 2048
263*7bdf38e5Schristos 	ssize_t ddt = opt_dirty_decay_ms;
264*7bdf38e5Schristos 	ssize_t mdt = opt_muzzy_decay_ms;
265*7bdf38e5Schristos 	unsigned arena_ind = do_arena_create(ddt, mdt);
266*7bdf38e5Schristos 	int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE);
267*7bdf38e5Schristos 	void *ps[NPS];
268*7bdf38e5Schristos 
269*7bdf38e5Schristos 	/*
270*7bdf38e5Schristos 	 * Allocate a bunch of large objects, pause the clock, deallocate every
271*7bdf38e5Schristos 	 * other object (to fragment virtual memory), restore the clock, then
272*7bdf38e5Schristos 	 * [md]allocx() in a tight loop while advancing time rapidly to verify
273*7bdf38e5Schristos 	 * the ticker triggers purging.
274*7bdf38e5Schristos 	 */
275*7bdf38e5Schristos 	size_t large;
276*7bdf38e5Schristos 	size_t sz = sizeof(size_t);
277*7bdf38e5Schristos 	expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large, &sz, NULL,
278*7bdf38e5Schristos 	    0), 0, "Unexpected mallctl failure");
279*7bdf38e5Schristos 
280*7bdf38e5Schristos 	do_purge(arena_ind);
281*7bdf38e5Schristos 	uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind);
282*7bdf38e5Schristos 	uint64_t muzzy_npurge0 = get_arena_muzzy_npurge(arena_ind);
283*7bdf38e5Schristos 
284*7bdf38e5Schristos 	for (unsigned i = 0; i < NPS; i++) {
285*7bdf38e5Schristos 		ps[i] = do_mallocx(large, flags);
286*7bdf38e5Schristos 	}
287*7bdf38e5Schristos 
288*7bdf38e5Schristos 	nupdates_mock = 0;
289*7bdf38e5Schristos 	nstime_init_update(&time_mock);
290*7bdf38e5Schristos 	monotonic_mock = true;
291*7bdf38e5Schristos 
292*7bdf38e5Schristos 	nstime_monotonic_orig = nstime_monotonic;
293*7bdf38e5Schristos 	nstime_update_orig = nstime_update;
294*7bdf38e5Schristos 	nstime_monotonic = nstime_monotonic_mock;
295*7bdf38e5Schristos 	nstime_update = nstime_update_mock;
296*7bdf38e5Schristos 
297*7bdf38e5Schristos 	for (unsigned i = 0; i < NPS; i += 2) {
298*7bdf38e5Schristos 		dallocx(ps[i], flags);
299*7bdf38e5Schristos 		unsigned nupdates0 = nupdates_mock;
300*7bdf38e5Schristos 		do_decay(arena_ind);
301*7bdf38e5Schristos 		expect_u_gt(nupdates_mock, nupdates0,
302*7bdf38e5Schristos 		    "Expected nstime_update() to be called");
303*7bdf38e5Schristos 	}
304*7bdf38e5Schristos 
305*7bdf38e5Schristos 	decay_ticker_helper(arena_ind, flags, true, ddt, dirty_npurge0,
306*7bdf38e5Schristos 	    muzzy_npurge0, true);
307*7bdf38e5Schristos 	decay_ticker_helper(arena_ind, flags, false, ddt+mdt, dirty_npurge0,
308*7bdf38e5Schristos 	    muzzy_npurge0, false);
309*7bdf38e5Schristos 
310*7bdf38e5Schristos 	do_arena_destroy(arena_ind);
311*7bdf38e5Schristos 
312*7bdf38e5Schristos 	nstime_monotonic = nstime_monotonic_orig;
313*7bdf38e5Schristos 	nstime_update = nstime_update_orig;
314*7bdf38e5Schristos #undef NPS
315*7bdf38e5Schristos }
316*7bdf38e5Schristos TEST_END
317*7bdf38e5Schristos 
318*7bdf38e5Schristos TEST_BEGIN(test_decay_nonmonotonic) {
319*7bdf38e5Schristos 	test_skip_if(is_background_thread_enabled());
320*7bdf38e5Schristos 	test_skip_if(opt_hpa);
321*7bdf38e5Schristos #define NPS (SMOOTHSTEP_NSTEPS + 1)
322*7bdf38e5Schristos 	int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
323*7bdf38e5Schristos 	void *ps[NPS];
324*7bdf38e5Schristos 	uint64_t npurge0 = 0;
325*7bdf38e5Schristos 	uint64_t npurge1 = 0;
326*7bdf38e5Schristos 	size_t sz, large0;
327*7bdf38e5Schristos 	unsigned i, nupdates0;
328*7bdf38e5Schristos 
329*7bdf38e5Schristos 	sz = sizeof(size_t);
330*7bdf38e5Schristos 	expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
331*7bdf38e5Schristos 	    0), 0, "Unexpected mallctl failure");
332*7bdf38e5Schristos 
333*7bdf38e5Schristos 	expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
334*7bdf38e5Schristos 	    "Unexpected mallctl failure");
335*7bdf38e5Schristos 	do_epoch();
336*7bdf38e5Schristos 	sz = sizeof(uint64_t);
337*7bdf38e5Schristos 	npurge0 = get_arena_npurge(0);
338*7bdf38e5Schristos 
339*7bdf38e5Schristos 	nupdates_mock = 0;
340*7bdf38e5Schristos 	nstime_init_update(&time_mock);
341*7bdf38e5Schristos 	monotonic_mock = false;
342*7bdf38e5Schristos 
343*7bdf38e5Schristos 	nstime_monotonic_orig = nstime_monotonic;
344*7bdf38e5Schristos 	nstime_update_orig = nstime_update;
345*7bdf38e5Schristos 	nstime_monotonic = nstime_monotonic_mock;
346*7bdf38e5Schristos 	nstime_update = nstime_update_mock;
347*7bdf38e5Schristos 
348*7bdf38e5Schristos 	for (i = 0; i < NPS; i++) {
349*7bdf38e5Schristos 		ps[i] = mallocx(large0, flags);
350*7bdf38e5Schristos 		expect_ptr_not_null(ps[i], "Unexpected mallocx() failure");
351*7bdf38e5Schristos 	}
352*7bdf38e5Schristos 
353*7bdf38e5Schristos 	for (i = 0; i < NPS; i++) {
354*7bdf38e5Schristos 		dallocx(ps[i], flags);
355*7bdf38e5Schristos 		nupdates0 = nupdates_mock;
356*7bdf38e5Schristos 		expect_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
357*7bdf38e5Schristos 		    "Unexpected arena.0.decay failure");
358*7bdf38e5Schristos 		expect_u_gt(nupdates_mock, nupdates0,
359*7bdf38e5Schristos 		    "Expected nstime_update() to be called");
360*7bdf38e5Schristos 	}
361*7bdf38e5Schristos 
362*7bdf38e5Schristos 	do_epoch();
363*7bdf38e5Schristos 	sz = sizeof(uint64_t);
364*7bdf38e5Schristos 	npurge1 = get_arena_npurge(0);
365*7bdf38e5Schristos 
366*7bdf38e5Schristos 	if (config_stats) {
367*7bdf38e5Schristos 		expect_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
368*7bdf38e5Schristos 	}
369*7bdf38e5Schristos 
370*7bdf38e5Schristos 	nstime_monotonic = nstime_monotonic_orig;
371*7bdf38e5Schristos 	nstime_update = nstime_update_orig;
372*7bdf38e5Schristos #undef NPS
373*7bdf38e5Schristos }
374*7bdf38e5Schristos TEST_END
375*7bdf38e5Schristos 
376*7bdf38e5Schristos TEST_BEGIN(test_decay_now) {
377*7bdf38e5Schristos 	test_skip_if(is_background_thread_enabled());
378*7bdf38e5Schristos 	test_skip_if(opt_hpa);
379*7bdf38e5Schristos 
380*7bdf38e5Schristos 	unsigned arena_ind = do_arena_create(0, 0);
381*7bdf38e5Schristos 	expect_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
382*7bdf38e5Schristos 	expect_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
383*7bdf38e5Schristos 	size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
384*7bdf38e5Schristos 	/* Verify that dirty/muzzy pages never linger after deallocation. */
385*7bdf38e5Schristos 	for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
386*7bdf38e5Schristos 		size_t size = sizes[i];
387*7bdf38e5Schristos 		generate_dirty(arena_ind, size);
388*7bdf38e5Schristos 		expect_zu_eq(get_arena_pdirty(arena_ind), 0,
389*7bdf38e5Schristos 		    "Unexpected dirty pages");
390*7bdf38e5Schristos 		expect_zu_eq(get_arena_pmuzzy(arena_ind), 0,
391*7bdf38e5Schristos 		    "Unexpected muzzy pages");
392*7bdf38e5Schristos 	}
393*7bdf38e5Schristos 	do_arena_destroy(arena_ind);
394*7bdf38e5Schristos }
395*7bdf38e5Schristos TEST_END
396*7bdf38e5Schristos 
397*7bdf38e5Schristos TEST_BEGIN(test_decay_never) {
398*7bdf38e5Schristos 	test_skip_if(is_background_thread_enabled() || !config_stats);
399*7bdf38e5Schristos 	test_skip_if(opt_hpa);
400*7bdf38e5Schristos 
401*7bdf38e5Schristos 	unsigned arena_ind = do_arena_create(-1, -1);
402*7bdf38e5Schristos 	int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
403*7bdf38e5Schristos 	expect_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
404*7bdf38e5Schristos 	expect_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
405*7bdf38e5Schristos 	size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
406*7bdf38e5Schristos 	void *ptrs[sizeof(sizes)/sizeof(size_t)];
407*7bdf38e5Schristos 	for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
408*7bdf38e5Schristos 		ptrs[i] = do_mallocx(sizes[i], flags);
409*7bdf38e5Schristos 	}
410*7bdf38e5Schristos 	/* Verify that each deallocation generates additional dirty pages. */
411*7bdf38e5Schristos 	size_t pdirty_prev = get_arena_pdirty(arena_ind);
412*7bdf38e5Schristos 	size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind);
413*7bdf38e5Schristos 	expect_zu_eq(pdirty_prev, 0, "Unexpected dirty pages");
414*7bdf38e5Schristos 	expect_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages");
415*7bdf38e5Schristos 	for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
416*7bdf38e5Schristos 		dallocx(ptrs[i], flags);
417*7bdf38e5Schristos 		size_t pdirty = get_arena_pdirty(arena_ind);
418*7bdf38e5Schristos 		size_t pmuzzy = get_arena_pmuzzy(arena_ind);
419*7bdf38e5Schristos 		expect_zu_gt(pdirty + (size_t)get_arena_dirty_purged(arena_ind),
420*7bdf38e5Schristos 		    pdirty_prev, "Expected dirty pages to increase.");
421*7bdf38e5Schristos 		expect_zu_eq(pmuzzy, 0, "Unexpected muzzy pages");
422*7bdf38e5Schristos 		pdirty_prev = pdirty;
423*7bdf38e5Schristos 	}
424*7bdf38e5Schristos 	do_arena_destroy(arena_ind);
425*7bdf38e5Schristos }
426*7bdf38e5Schristos TEST_END
427*7bdf38e5Schristos 
428*7bdf38e5Schristos int
429*7bdf38e5Schristos main(void) {
430*7bdf38e5Schristos 	return test(
431*7bdf38e5Schristos 	    test_decay_ticks,
432*7bdf38e5Schristos 	    test_decay_ticker,
433*7bdf38e5Schristos 	    test_decay_nonmonotonic,
434*7bdf38e5Schristos 	    test_decay_now,
435*7bdf38e5Schristos 	    test_decay_never);
436*7bdf38e5Schristos }
437