xref: /netbsd-src/external/bsd/jemalloc/dist/test/unit/tsd.c (revision 7bdf38e5b7a28439665f2fdeff81e36913eef7dd)
1 #include "test/jemalloc_test.h"
2 
3 /*
4  * If we're e.g. in debug mode, we *never* enter the fast path, and so shouldn't
5  * be asserting that we're on one.
6  */
7 static bool originally_fast;
8 static int data_cleanup_count;
9 
10 void
11 data_cleanup(int *data) {
12 	if (data_cleanup_count == 0) {
13 		expect_x_eq(*data, MALLOC_TSD_TEST_DATA_INIT,
14 		    "Argument passed into cleanup function should match tsd "
15 		    "value");
16 	}
17 	++data_cleanup_count;
18 
19 	/*
20 	 * Allocate during cleanup for two rounds, in order to assure that
21 	 * jemalloc's internal tsd reinitialization happens.
22 	 */
23 	bool reincarnate = false;
24 	switch (*data) {
25 	case MALLOC_TSD_TEST_DATA_INIT:
26 		*data = 1;
27 		reincarnate = true;
28 		break;
29 	case 1:
30 		*data = 2;
31 		reincarnate = true;
32 		break;
33 	case 2:
34 		return;
35 	default:
36 		not_reached();
37 	}
38 
39 	if (reincarnate) {
40 		void *p = mallocx(1, 0);
41 		expect_ptr_not_null(p, "Unexpeced mallocx() failure");
42 		dallocx(p, 0);
43 	}
44 }
45 
46 static void *
47 thd_start(void *arg) {
48 	int d = (int)(uintptr_t)arg;
49 	void *p;
50 
51 	/*
52 	 * Test free before tsd init -- the free fast path (which does not
53 	 * explicitly check for NULL) has to tolerate this case, and fall back
54 	 * to free_default.
55 	 */
56 	free(NULL);
57 
58 	tsd_t *tsd = tsd_fetch();
59 	expect_x_eq(tsd_test_data_get(tsd), MALLOC_TSD_TEST_DATA_INIT,
60 	    "Initial tsd get should return initialization value");
61 
62 	p = malloc(1);
63 	expect_ptr_not_null(p, "Unexpected malloc() failure");
64 
65 	tsd_test_data_set(tsd, d);
66 	expect_x_eq(tsd_test_data_get(tsd), d,
67 	    "After tsd set, tsd get should return value that was set");
68 
69 	d = 0;
70 	expect_x_eq(tsd_test_data_get(tsd), (int)(uintptr_t)arg,
71 	    "Resetting local data should have no effect on tsd");
72 
73 	tsd_test_callback_set(tsd, &data_cleanup);
74 
75 	free(p);
76 	return NULL;
77 }
78 
79 TEST_BEGIN(test_tsd_main_thread) {
80 	thd_start((void *)(uintptr_t)0xa5f3e329);
81 }
82 TEST_END
83 
84 TEST_BEGIN(test_tsd_sub_thread) {
85 	thd_t thd;
86 
87 	data_cleanup_count = 0;
88 	thd_create(&thd, thd_start, (void *)MALLOC_TSD_TEST_DATA_INIT);
89 	thd_join(thd, NULL);
90 	/*
91 	 * We reincarnate twice in the data cleanup, so it should execute at
92 	 * least 3 times.
93 	 */
94 	expect_x_ge(data_cleanup_count, 3,
95 	    "Cleanup function should have executed multiple times.");
96 }
97 TEST_END
98 
99 static void *
100 thd_start_reincarnated(void *arg) {
101 	tsd_t *tsd = tsd_fetch();
102 	assert(tsd);
103 
104 	void *p = malloc(1);
105 	expect_ptr_not_null(p, "Unexpected malloc() failure");
106 
107 	/* Manually trigger reincarnation. */
108 	expect_ptr_not_null(tsd_arena_get(tsd),
109 	    "Should have tsd arena set.");
110 	tsd_cleanup((void *)tsd);
111 	expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
112 	    "TSD arena should have been cleared.");
113 	expect_u_eq(tsd_state_get(tsd), tsd_state_purgatory,
114 	    "TSD state should be purgatory\n");
115 
116 	free(p);
117 	expect_u_eq(tsd_state_get(tsd), tsd_state_reincarnated,
118 	    "TSD state should be reincarnated\n");
119 	p = mallocx(1, MALLOCX_TCACHE_NONE);
120 	expect_ptr_not_null(p, "Unexpected malloc() failure");
121 	expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
122 	    "Should not have tsd arena set after reincarnation.");
123 
124 	free(p);
125 	tsd_cleanup((void *)tsd);
126 	expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
127 	    "TSD arena should have been cleared after 2nd cleanup.");
128 
129 	return NULL;
130 }
131 
132 TEST_BEGIN(test_tsd_reincarnation) {
133 	thd_t thd;
134 	thd_create(&thd, thd_start_reincarnated, NULL);
135 	thd_join(thd, NULL);
136 }
137 TEST_END
138 
139 typedef struct {
140 	atomic_u32_t phase;
141 	atomic_b_t error;
142 } global_slow_data_t;
143 
144 static void *
145 thd_start_global_slow(void *arg) {
146 	/* PHASE 0 */
147 	global_slow_data_t *data = (global_slow_data_t *)arg;
148 	free(mallocx(1, 0));
149 
150 	tsd_t *tsd = tsd_fetch();
151 	/*
152 	 * No global slowness has happened yet; there was an error if we were
153 	 * originally fast but aren't now.
154 	 */
155 	atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd),
156 	    ATOMIC_SEQ_CST);
157 	atomic_store_u32(&data->phase, 1, ATOMIC_SEQ_CST);
158 
159 	/* PHASE 2 */
160 	while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 2) {
161 	}
162 	free(mallocx(1, 0));
163 	atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
164 	atomic_store_u32(&data->phase, 3, ATOMIC_SEQ_CST);
165 
166 	/* PHASE 4 */
167 	while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 4) {
168 	}
169 	free(mallocx(1, 0));
170 	atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
171 	atomic_store_u32(&data->phase, 5, ATOMIC_SEQ_CST);
172 
173 	/* PHASE 6 */
174 	while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 6) {
175 	}
176 	free(mallocx(1, 0));
177 	/* Only one decrement so far. */
178 	atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
179 	atomic_store_u32(&data->phase, 7, ATOMIC_SEQ_CST);
180 
181 	/* PHASE 8 */
182 	while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 8) {
183 	}
184 	free(mallocx(1, 0));
185 	/*
186 	 * Both decrements happened; we should be fast again (if we ever
187 	 * were)
188 	 */
189 	atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd),
190 	    ATOMIC_SEQ_CST);
191 	atomic_store_u32(&data->phase, 9, ATOMIC_SEQ_CST);
192 
193 	return NULL;
194 }
195 
196 TEST_BEGIN(test_tsd_global_slow) {
197 	global_slow_data_t data = {ATOMIC_INIT(0), ATOMIC_INIT(false)};
198 	/*
199 	 * Note that the "mallocx" here (vs. malloc) is important, since the
200 	 * compiler is allowed to optimize away free(malloc(1)) but not
201 	 * free(mallocx(1)).
202 	 */
203 	free(mallocx(1, 0));
204 	tsd_t *tsd = tsd_fetch();
205 	originally_fast = tsd_fast(tsd);
206 
207 	thd_t thd;
208 	thd_create(&thd, thd_start_global_slow, (void *)&data.phase);
209 	/* PHASE 1 */
210 	while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 1) {
211 		/*
212 		 * We don't have a portable condvar/semaphore mechanism.
213 		 * Spin-wait.
214 		 */
215 	}
216 	expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
217 	tsd_global_slow_inc(tsd_tsdn(tsd));
218 	free(mallocx(1, 0));
219 	expect_false(tsd_fast(tsd), "");
220 	atomic_store_u32(&data.phase, 2, ATOMIC_SEQ_CST);
221 
222 	/* PHASE 3 */
223 	while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 3) {
224 	}
225 	expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
226 	/* Increase again, so that we can test multiple fast/slow changes. */
227 	tsd_global_slow_inc(tsd_tsdn(tsd));
228 	atomic_store_u32(&data.phase, 4, ATOMIC_SEQ_CST);
229 	free(mallocx(1, 0));
230 	expect_false(tsd_fast(tsd), "");
231 
232 	/* PHASE 5 */
233 	while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 5) {
234 	}
235 	expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
236 	tsd_global_slow_dec(tsd_tsdn(tsd));
237 	atomic_store_u32(&data.phase, 6, ATOMIC_SEQ_CST);
238 	/* We only decreased once; things should still be slow. */
239 	free(mallocx(1, 0));
240 	expect_false(tsd_fast(tsd), "");
241 
242 	/* PHASE 7 */
243 	while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 7) {
244 	}
245 	expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
246 	tsd_global_slow_dec(tsd_tsdn(tsd));
247 	atomic_store_u32(&data.phase, 8, ATOMIC_SEQ_CST);
248 	/* We incremented and then decremented twice; we should be fast now. */
249 	free(mallocx(1, 0));
250 	expect_true(!originally_fast || tsd_fast(tsd), "");
251 
252 	/* PHASE 9 */
253 	while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 9) {
254 	}
255 	expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
256 
257 	thd_join(thd, NULL);
258 }
259 TEST_END
260 
261 int
262 main(void) {
263 	/* Ensure tsd bootstrapped. */
264 	if (nallocx(1, 0) == 0) {
265 		malloc_printf("Initialization error");
266 		return test_status_fail;
267 	}
268 
269 	return test_no_reentrancy(
270 	    test_tsd_main_thread,
271 	    test_tsd_sub_thread,
272 	    test_tsd_reincarnation,
273 	    test_tsd_global_slow);
274 }
275