xref: /netbsd-src/external/bsd/jemalloc/dist/test/unit/hpa_background_thread.c (revision 7bdf38e5b7a28439665f2fdeff81e36913eef7dd)
1*7bdf38e5Schristos #include "test/jemalloc_test.h"
2*7bdf38e5Schristos #include "test/sleep.h"
3*7bdf38e5Schristos 
4*7bdf38e5Schristos static void
5*7bdf38e5Schristos sleep_for_background_thread_interval() {
6*7bdf38e5Schristos 	/*
7*7bdf38e5Schristos 	 * The sleep interval set in our .sh file is 50ms.  So it likely will
8*7bdf38e5Schristos 	 * run if we sleep for four times that.
9*7bdf38e5Schristos 	 */
10*7bdf38e5Schristos 	sleep_ns(200 * 1000 * 1000);
11*7bdf38e5Schristos }
12*7bdf38e5Schristos 
13*7bdf38e5Schristos static unsigned
14*7bdf38e5Schristos create_arena() {
15*7bdf38e5Schristos 	unsigned arena_ind;
16*7bdf38e5Schristos 	size_t sz;
17*7bdf38e5Schristos 
18*7bdf38e5Schristos 	sz = sizeof(unsigned);
19*7bdf38e5Schristos 	expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 2),
20*7bdf38e5Schristos 	    0, "Unexpected mallctl() failure");
21*7bdf38e5Schristos 	return arena_ind;
22*7bdf38e5Schristos }
23*7bdf38e5Schristos 
24*7bdf38e5Schristos static size_t
25*7bdf38e5Schristos get_empty_ndirty(unsigned arena_ind) {
26*7bdf38e5Schristos 	int err;
27*7bdf38e5Schristos 	size_t ndirty_huge;
28*7bdf38e5Schristos 	size_t ndirty_nonhuge;
29*7bdf38e5Schristos 	uint64_t epoch = 1;
30*7bdf38e5Schristos 	size_t sz = sizeof(epoch);
31*7bdf38e5Schristos 	err = je_mallctl("epoch", (void *)&epoch, &sz, (void *)&epoch,
32*7bdf38e5Schristos 	    sizeof(epoch));
33*7bdf38e5Schristos 	expect_d_eq(0, err, "Unexpected mallctl() failure");
34*7bdf38e5Schristos 
35*7bdf38e5Schristos 	size_t mib[6];
36*7bdf38e5Schristos 	size_t miblen = sizeof(mib)/sizeof(mib[0]);
37*7bdf38e5Schristos 	err = mallctlnametomib(
38*7bdf38e5Schristos 	    "stats.arenas.0.hpa_shard.empty_slabs.ndirty_nonhuge", mib,
39*7bdf38e5Schristos 	    &miblen);
40*7bdf38e5Schristos 	expect_d_eq(0, err, "Unexpected mallctlnametomib() failure");
41*7bdf38e5Schristos 
42*7bdf38e5Schristos 	sz = sizeof(ndirty_nonhuge);
43*7bdf38e5Schristos 	mib[2] = arena_ind;
44*7bdf38e5Schristos 	err = mallctlbymib(mib, miblen, &ndirty_nonhuge, &sz, NULL, 0);
45*7bdf38e5Schristos 	expect_d_eq(0, err, "Unexpected mallctlbymib() failure");
46*7bdf38e5Schristos 
47*7bdf38e5Schristos 	err = mallctlnametomib(
48*7bdf38e5Schristos 	    "stats.arenas.0.hpa_shard.empty_slabs.ndirty_huge", mib,
49*7bdf38e5Schristos 	    &miblen);
50*7bdf38e5Schristos 	expect_d_eq(0, err, "Unexpected mallctlnametomib() failure");
51*7bdf38e5Schristos 
52*7bdf38e5Schristos 	sz = sizeof(ndirty_huge);
53*7bdf38e5Schristos 	mib[2] = arena_ind;
54*7bdf38e5Schristos 	err = mallctlbymib(mib, miblen, &ndirty_huge, &sz, NULL, 0);
55*7bdf38e5Schristos 	expect_d_eq(0, err, "Unexpected mallctlbymib() failure");
56*7bdf38e5Schristos 
57*7bdf38e5Schristos 	return ndirty_huge + ndirty_nonhuge;
58*7bdf38e5Schristos }
59*7bdf38e5Schristos 
60*7bdf38e5Schristos static void
61*7bdf38e5Schristos set_background_thread_enabled(bool enabled) {
62*7bdf38e5Schristos 	int err;
63*7bdf38e5Schristos 	err = je_mallctl("background_thread", NULL, NULL, &enabled,
64*7bdf38e5Schristos 	    sizeof(enabled));
65*7bdf38e5Schristos 	expect_d_eq(0, err, "Unexpected mallctl failure");
66*7bdf38e5Schristos }
67*7bdf38e5Schristos 
68*7bdf38e5Schristos static void
69*7bdf38e5Schristos wait_until_thread_is_enabled(unsigned arena_id) {
70*7bdf38e5Schristos 	tsd_t* tsd = tsd_fetch();
71*7bdf38e5Schristos 
72*7bdf38e5Schristos 	bool sleeping = false;
73*7bdf38e5Schristos 	int iterations = 0;
74*7bdf38e5Schristos 	do {
75*7bdf38e5Schristos 		background_thread_info_t *info =
76*7bdf38e5Schristos 		    background_thread_info_get(arena_id);
77*7bdf38e5Schristos 		malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
78*7bdf38e5Schristos 		malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
79*7bdf38e5Schristos 		sleeping = background_thread_indefinite_sleep(info);
80*7bdf38e5Schristos 		assert_d_lt(iterations, UINT64_C(1000000),
81*7bdf38e5Schristos 		    "Waiting for a thread to start for too long");
82*7bdf38e5Schristos 	} while (!sleeping);
83*7bdf38e5Schristos }
84*7bdf38e5Schristos 
85*7bdf38e5Schristos static void
86*7bdf38e5Schristos expect_purging(unsigned arena_ind, bool expect_deferred) {
87*7bdf38e5Schristos 	size_t empty_ndirty;
88*7bdf38e5Schristos 
89*7bdf38e5Schristos 	empty_ndirty = get_empty_ndirty(arena_ind);
90*7bdf38e5Schristos 	expect_zu_eq(0, empty_ndirty, "Expected arena to start unused.");
91*7bdf38e5Schristos 
92*7bdf38e5Schristos 	/*
93*7bdf38e5Schristos 	 * It's possible that we get unlucky with our stats collection timing,
94*7bdf38e5Schristos 	 * and the background thread runs in between the deallocation and the
95*7bdf38e5Schristos 	 * stats collection.  So we retry 10 times, and see if we *ever* see
96*7bdf38e5Schristos 	 * deferred reclamation.
97*7bdf38e5Schristos 	 */
98*7bdf38e5Schristos 	bool observed_dirty_page = false;
99*7bdf38e5Schristos 	for (int i = 0; i < 10; i++) {
100*7bdf38e5Schristos 		void *ptr = mallocx(PAGE,
101*7bdf38e5Schristos 		    MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena_ind));
102*7bdf38e5Schristos 		empty_ndirty = get_empty_ndirty(arena_ind);
103*7bdf38e5Schristos 		expect_zu_eq(0, empty_ndirty, "All pages should be active");
104*7bdf38e5Schristos 		dallocx(ptr, MALLOCX_TCACHE_NONE);
105*7bdf38e5Schristos 		empty_ndirty = get_empty_ndirty(arena_ind);
106*7bdf38e5Schristos 		if (expect_deferred) {
107*7bdf38e5Schristos 			expect_true(empty_ndirty == 0 || empty_ndirty == 1 ||
108*7bdf38e5Schristos 			    opt_prof, "Unexpected extra dirty page count: %zu",
109*7bdf38e5Schristos 			    empty_ndirty);
110*7bdf38e5Schristos 		} else {
111*7bdf38e5Schristos 			assert_zu_eq(0, empty_ndirty,
112*7bdf38e5Schristos 			    "Saw dirty pages without deferred purging");
113*7bdf38e5Schristos 		}
114*7bdf38e5Schristos 		if (empty_ndirty > 0) {
115*7bdf38e5Schristos 			observed_dirty_page = true;
116*7bdf38e5Schristos 			break;
117*7bdf38e5Schristos 		}
118*7bdf38e5Schristos 	}
119*7bdf38e5Schristos 	expect_b_eq(expect_deferred, observed_dirty_page, "");
120*7bdf38e5Schristos 
121*7bdf38e5Schristos 	/*
122*7bdf38e5Schristos 	 * Under high concurrency / heavy test load (e.g. using run_test.sh),
123*7bdf38e5Schristos 	 * the background thread may not get scheduled for a longer period of
124*7bdf38e5Schristos 	 * time.  Retry 100 times max before bailing out.
125*7bdf38e5Schristos 	 */
126*7bdf38e5Schristos 	unsigned retry = 0;
127*7bdf38e5Schristos 	while ((empty_ndirty = get_empty_ndirty(arena_ind)) > 0 &&
128*7bdf38e5Schristos 	    expect_deferred && (retry++ < 100)) {
129*7bdf38e5Schristos 		sleep_for_background_thread_interval();
130*7bdf38e5Schristos 	}
131*7bdf38e5Schristos 
132*7bdf38e5Schristos 	expect_zu_eq(0, empty_ndirty, "Should have seen a background purge");
133*7bdf38e5Schristos }
134*7bdf38e5Schristos 
135*7bdf38e5Schristos TEST_BEGIN(test_hpa_background_thread_purges) {
136*7bdf38e5Schristos 	test_skip_if(!config_stats);
137*7bdf38e5Schristos 	test_skip_if(!hpa_supported());
138*7bdf38e5Schristos 	test_skip_if(!have_background_thread);
139*7bdf38e5Schristos 	/* Skip since guarded pages cannot be allocated from hpa. */
140*7bdf38e5Schristos 	test_skip_if(san_guard_enabled());
141*7bdf38e5Schristos 
142*7bdf38e5Schristos 	unsigned arena_ind = create_arena();
143*7bdf38e5Schristos 	/*
144*7bdf38e5Schristos 	 * Our .sh sets dirty mult to 0, so all dirty pages should get purged
145*7bdf38e5Schristos 	 * any time any thread frees.
146*7bdf38e5Schristos 	 */
147*7bdf38e5Schristos 	expect_purging(arena_ind, /* expect_deferred */ true);
148*7bdf38e5Schristos }
149*7bdf38e5Schristos TEST_END
150*7bdf38e5Schristos 
151*7bdf38e5Schristos TEST_BEGIN(test_hpa_background_thread_enable_disable) {
152*7bdf38e5Schristos 	test_skip_if(!config_stats);
153*7bdf38e5Schristos 	test_skip_if(!hpa_supported());
154*7bdf38e5Schristos 	test_skip_if(!have_background_thread);
155*7bdf38e5Schristos 	/* Skip since guarded pages cannot be allocated from hpa. */
156*7bdf38e5Schristos 	test_skip_if(san_guard_enabled());
157*7bdf38e5Schristos 
158*7bdf38e5Schristos 	unsigned arena_ind = create_arena();
159*7bdf38e5Schristos 
160*7bdf38e5Schristos 	set_background_thread_enabled(false);
161*7bdf38e5Schristos 	expect_purging(arena_ind, false);
162*7bdf38e5Schristos 
163*7bdf38e5Schristos 	set_background_thread_enabled(true);
164*7bdf38e5Schristos 	wait_until_thread_is_enabled(arena_ind);
165*7bdf38e5Schristos 	expect_purging(arena_ind, true);
166*7bdf38e5Schristos }
167*7bdf38e5Schristos TEST_END
168*7bdf38e5Schristos 
169*7bdf38e5Schristos int
170*7bdf38e5Schristos main(void) {
171*7bdf38e5Schristos 	/*
172*7bdf38e5Schristos 	 * OK, this is a sort of nasty hack.  We don't want to add *another*
173*7bdf38e5Schristos 	 * config option for HPA (the intent is that it becomes available on
174*7bdf38e5Schristos 	 * more platforms over time, and we're trying to prune back config
175*7bdf38e5Schristos 	 * options generally.  But we'll get initialization errors on other
176*7bdf38e5Schristos 	 * platforms if we set hpa:true in the MALLOC_CONF (even if we set
177*7bdf38e5Schristos 	 * abort_conf:false as well).  So we reach into the internals and set
178*7bdf38e5Schristos 	 * them directly, but only if we know that we're actually going to do
179*7bdf38e5Schristos 	 * something nontrivial in the tests.
180*7bdf38e5Schristos 	 */
181*7bdf38e5Schristos 	if (config_stats && hpa_supported() && have_background_thread) {
182*7bdf38e5Schristos 		opt_hpa = true;
183*7bdf38e5Schristos 		opt_background_thread = true;
184*7bdf38e5Schristos 	}
185*7bdf38e5Schristos 	return test_no_reentrancy(
186*7bdf38e5Schristos 	    test_hpa_background_thread_purges,
187*7bdf38e5Schristos 	    test_hpa_background_thread_enable_disable);
188*7bdf38e5Schristos }
189