xref: /netbsd-src/external/bsd/jemalloc/dist/test/unit/hpa_background_thread.c (revision f8cf1a9151c7af1cb0bd8b09c13c66bca599c027)
1 #include "test/jemalloc_test.h"
2 #include "test/sleep.h"
3 
4 static void
5 sleep_for_background_thread_interval() {
6 	/*
7 	 * The sleep interval set in our .sh file is 50ms.  So it likely will
8 	 * run if we sleep for four times that.
9 	 */
10 	sleep_ns(200 * 1000 * 1000);
11 }
12 
13 static unsigned
14 create_arena() {
15 	unsigned arena_ind;
16 	size_t sz;
17 
18 	sz = sizeof(unsigned);
19 	expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 2),
20 	    0, "Unexpected mallctl() failure");
21 	return arena_ind;
22 }
23 
24 static size_t
25 get_empty_ndirty(unsigned arena_ind) {
26 	int err;
27 	size_t ndirty_huge;
28 	size_t ndirty_nonhuge;
29 	uint64_t epoch = 1;
30 	size_t sz = sizeof(epoch);
31 	err = je_mallctl("epoch", (void *)&epoch, &sz, (void *)&epoch,
32 	    sizeof(epoch));
33 	expect_d_eq(0, err, "Unexpected mallctl() failure");
34 
35 	size_t mib[6];
36 	size_t miblen = sizeof(mib)/sizeof(mib[0]);
37 	err = mallctlnametomib(
38 	    "stats.arenas.0.hpa_shard.empty_slabs.ndirty_nonhuge", mib,
39 	    &miblen);
40 	expect_d_eq(0, err, "Unexpected mallctlnametomib() failure");
41 
42 	sz = sizeof(ndirty_nonhuge);
43 	mib[2] = arena_ind;
44 	err = mallctlbymib(mib, miblen, &ndirty_nonhuge, &sz, NULL, 0);
45 	expect_d_eq(0, err, "Unexpected mallctlbymib() failure");
46 
47 	err = mallctlnametomib(
48 	    "stats.arenas.0.hpa_shard.empty_slabs.ndirty_huge", mib,
49 	    &miblen);
50 	expect_d_eq(0, err, "Unexpected mallctlnametomib() failure");
51 
52 	sz = sizeof(ndirty_huge);
53 	mib[2] = arena_ind;
54 	err = mallctlbymib(mib, miblen, &ndirty_huge, &sz, NULL, 0);
55 	expect_d_eq(0, err, "Unexpected mallctlbymib() failure");
56 
57 	return ndirty_huge + ndirty_nonhuge;
58 }
59 
60 static void
61 set_background_thread_enabled(bool enabled) {
62 	int err;
63 	err = je_mallctl("background_thread", NULL, NULL, &enabled,
64 	    sizeof(enabled));
65 	expect_d_eq(0, err, "Unexpected mallctl failure");
66 }
67 
68 static void
69 wait_until_thread_is_enabled(unsigned arena_id) {
70 	tsd_t* tsd = tsd_fetch();
71 
72 	bool sleeping = false;
73 	int iterations = 0;
74 	do {
75 		background_thread_info_t *info =
76 		    background_thread_info_get(arena_id);
77 		malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
78 		malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
79 		sleeping = background_thread_indefinite_sleep(info);
80 		assert_d_lt(iterations, UINT64_C(1000000),
81 		    "Waiting for a thread to start for too long");
82 	} while (!sleeping);
83 }
84 
85 static void
86 expect_purging(unsigned arena_ind, bool expect_deferred) {
87 	size_t empty_ndirty;
88 
89 	empty_ndirty = get_empty_ndirty(arena_ind);
90 	expect_zu_eq(0, empty_ndirty, "Expected arena to start unused.");
91 
92 	/*
93 	 * It's possible that we get unlucky with our stats collection timing,
94 	 * and the background thread runs in between the deallocation and the
95 	 * stats collection.  So we retry 10 times, and see if we *ever* see
96 	 * deferred reclamation.
97 	 */
98 	bool observed_dirty_page = false;
99 	for (int i = 0; i < 10; i++) {
100 		void *ptr = mallocx(PAGE,
101 		    MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena_ind));
102 		empty_ndirty = get_empty_ndirty(arena_ind);
103 		expect_zu_eq(0, empty_ndirty, "All pages should be active");
104 		dallocx(ptr, MALLOCX_TCACHE_NONE);
105 		empty_ndirty = get_empty_ndirty(arena_ind);
106 		if (expect_deferred) {
107 			expect_true(empty_ndirty == 0 || empty_ndirty == 1 ||
108 			    opt_prof, "Unexpected extra dirty page count: %zu",
109 			    empty_ndirty);
110 		} else {
111 			assert_zu_eq(0, empty_ndirty,
112 			    "Saw dirty pages without deferred purging");
113 		}
114 		if (empty_ndirty > 0) {
115 			observed_dirty_page = true;
116 			break;
117 		}
118 	}
119 	expect_b_eq(expect_deferred, observed_dirty_page, "");
120 
121 	/*
122 	 * Under high concurrency / heavy test load (e.g. using run_test.sh),
123 	 * the background thread may not get scheduled for a longer period of
124 	 * time.  Retry 100 times max before bailing out.
125 	 */
126 	unsigned retry = 0;
127 	while ((empty_ndirty = get_empty_ndirty(arena_ind)) > 0 &&
128 	    expect_deferred && (retry++ < 100)) {
129 		sleep_for_background_thread_interval();
130 	}
131 
132 	expect_zu_eq(0, empty_ndirty, "Should have seen a background purge");
133 }
134 
135 TEST_BEGIN(test_hpa_background_thread_purges) {
136 	test_skip_if(!config_stats);
137 	test_skip_if(!hpa_supported());
138 	test_skip_if(!have_background_thread);
139 	/* Skip since guarded pages cannot be allocated from hpa. */
140 	test_skip_if(san_guard_enabled());
141 
142 	unsigned arena_ind = create_arena();
143 	/*
144 	 * Our .sh sets dirty mult to 0, so all dirty pages should get purged
145 	 * any time any thread frees.
146 	 */
147 	expect_purging(arena_ind, /* expect_deferred */ true);
148 }
149 TEST_END
150 
151 TEST_BEGIN(test_hpa_background_thread_enable_disable) {
152 	test_skip_if(!config_stats);
153 	test_skip_if(!hpa_supported());
154 	test_skip_if(!have_background_thread);
155 	/* Skip since guarded pages cannot be allocated from hpa. */
156 	test_skip_if(san_guard_enabled());
157 
158 	unsigned arena_ind = create_arena();
159 
160 	set_background_thread_enabled(false);
161 	expect_purging(arena_ind, false);
162 
163 	set_background_thread_enabled(true);
164 	wait_until_thread_is_enabled(arena_ind);
165 	expect_purging(arena_ind, true);
166 }
167 TEST_END
168 
169 int
170 main(void) {
171 	/*
172 	 * OK, this is a sort of nasty hack.  We don't want to add *another*
173 	 * config option for HPA (the intent is that it becomes available on
174 	 * more platforms over time, and we're trying to prune back config
175 	 * options generally.  But we'll get initialization errors on other
176 	 * platforms if we set hpa:true in the MALLOC_CONF (even if we set
177 	 * abort_conf:false as well).  So we reach into the internals and set
178 	 * them directly, but only if we know that we're actually going to do
179 	 * something nontrivial in the tests.
180 	 */
181 	if (config_stats && hpa_supported() && have_background_thread) {
182 		opt_hpa = true;
183 		opt_background_thread = true;
184 	}
185 	return test_no_reentrancy(
186 	    test_hpa_background_thread_purges,
187 	    test_hpa_background_thread_enable_disable);
188 }
189