xref: /netbsd-src/external/bsd/jemalloc/dist/test/unit/prof_recent.c (revision 7bdf38e5b7a28439665f2fdeff81e36913eef7dd)
1 #include "test/jemalloc_test.h"
2 
3 #include "jemalloc/internal/prof_recent.h"
4 
5 /* As specified in the shell script */
6 #define OPT_ALLOC_MAX 3
7 
8 /* Invariant before and after every test (when config_prof is on) */
9 static void
10 confirm_prof_setup() {
11 	/* Options */
12 	assert_true(opt_prof, "opt_prof not on");
13 	assert_true(opt_prof_active, "opt_prof_active not on");
14 	assert_zd_eq(opt_prof_recent_alloc_max, OPT_ALLOC_MAX,
15 	    "opt_prof_recent_alloc_max not set correctly");
16 
17 	/* Dynamics */
18 	assert_true(prof_active_state, "prof_active not on");
19 	assert_zd_eq(prof_recent_alloc_max_ctl_read(), OPT_ALLOC_MAX,
20 	    "prof_recent_alloc_max not set correctly");
21 }
22 
23 TEST_BEGIN(test_confirm_setup) {
24 	test_skip_if(!config_prof);
25 	confirm_prof_setup();
26 }
27 TEST_END
28 
29 TEST_BEGIN(test_prof_recent_off) {
30 	test_skip_if(config_prof);
31 
32 	const ssize_t past_ref = 0, future_ref = 0;
33 	const size_t len_ref = sizeof(ssize_t);
34 
35 	ssize_t past = past_ref, future = future_ref;
36 	size_t len = len_ref;
37 
38 #define ASSERT_SHOULD_FAIL(opt, a, b, c, d) do {			\
39 	assert_d_eq(mallctl("experimental.prof_recent." opt, a, b, c,	\
40 	    d), ENOENT, "Should return ENOENT when config_prof is off");\
41 	assert_zd_eq(past, past_ref, "output was touched");		\
42 	assert_zu_eq(len, len_ref, "output length was touched");	\
43 	assert_zd_eq(future, future_ref, "input was touched");		\
44 } while (0)
45 
46 	ASSERT_SHOULD_FAIL("alloc_max", NULL, NULL, NULL, 0);
47 	ASSERT_SHOULD_FAIL("alloc_max", &past, &len, NULL, 0);
48 	ASSERT_SHOULD_FAIL("alloc_max", NULL, NULL, &future, len);
49 	ASSERT_SHOULD_FAIL("alloc_max", &past, &len, &future, len);
50 
51 #undef ASSERT_SHOULD_FAIL
52 }
53 TEST_END
54 
55 TEST_BEGIN(test_prof_recent_on) {
56 	test_skip_if(!config_prof);
57 
58 	ssize_t past, future;
59 	size_t len = sizeof(ssize_t);
60 
61 	confirm_prof_setup();
62 
63 	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
64 	    NULL, NULL, NULL, 0), 0, "no-op mallctl should be allowed");
65 	confirm_prof_setup();
66 
67 	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
68 	    &past, &len, NULL, 0), 0, "Read error");
69 	expect_zd_eq(past, OPT_ALLOC_MAX, "Wrong read result");
70 	future = OPT_ALLOC_MAX + 1;
71 	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
72 	    NULL, NULL, &future, len), 0, "Write error");
73 	future = -1;
74 	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
75 	    &past, &len, &future, len), 0, "Read/write error");
76 	expect_zd_eq(past, OPT_ALLOC_MAX + 1, "Wrong read result");
77 	future = -2;
78 	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
79 	    &past, &len, &future, len), EINVAL,
80 	    "Invalid write should return EINVAL");
81 	expect_zd_eq(past, OPT_ALLOC_MAX + 1,
82 	    "Output should not be touched given invalid write");
83 	future = OPT_ALLOC_MAX;
84 	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
85 	    &past, &len, &future, len), 0, "Read/write error");
86 	expect_zd_eq(past, -1, "Wrong read result");
87 	future = OPT_ALLOC_MAX + 2;
88 	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
89 	    &past, &len, &future, len * 2), EINVAL,
90 	    "Invalid write should return EINVAL");
91 	expect_zd_eq(past, -1,
92 	    "Output should not be touched given invalid write");
93 
94 	confirm_prof_setup();
95 }
96 TEST_END
97 
98 /* Reproducible sequence of request sizes */
99 #define NTH_REQ_SIZE(n) ((n) * 97 + 101)
100 
101 static void
102 confirm_malloc(void *p) {
103 	assert_ptr_not_null(p, "malloc failed unexpectedly");
104 	edata_t *e = emap_edata_lookup(TSDN_NULL, &arena_emap_global, p);
105 	assert_ptr_not_null(e, "NULL edata for living pointer");
106 	prof_recent_t *n = edata_prof_recent_alloc_get_no_lock_test(e);
107 	assert_ptr_not_null(n, "Record in edata should not be NULL");
108 	expect_ptr_not_null(n->alloc_tctx,
109 	    "alloc_tctx in record should not be NULL");
110 	expect_ptr_eq(e, prof_recent_alloc_edata_get_no_lock_test(n),
111 	    "edata pointer in record is not correct");
112 	expect_ptr_null(n->dalloc_tctx, "dalloc_tctx in record should be NULL");
113 }
114 
115 static void
116 confirm_record_size(prof_recent_t *n, unsigned kth) {
117 	expect_zu_eq(n->size, NTH_REQ_SIZE(kth),
118 	    "Recorded allocation size is wrong");
119 }
120 
121 static void
122 confirm_record_living(prof_recent_t *n) {
123 	expect_ptr_not_null(n->alloc_tctx,
124 	    "alloc_tctx in record should not be NULL");
125 	edata_t *edata = prof_recent_alloc_edata_get_no_lock_test(n);
126 	assert_ptr_not_null(edata,
127 	    "Recorded edata should not be NULL for living pointer");
128 	expect_ptr_eq(n, edata_prof_recent_alloc_get_no_lock_test(edata),
129 	    "Record in edata is not correct");
130 	expect_ptr_null(n->dalloc_tctx, "dalloc_tctx in record should be NULL");
131 }
132 
133 static void
134 confirm_record_released(prof_recent_t *n) {
135 	expect_ptr_not_null(n->alloc_tctx,
136 	    "alloc_tctx in record should not be NULL");
137 	expect_ptr_null(prof_recent_alloc_edata_get_no_lock_test(n),
138 	    "Recorded edata should be NULL for released pointer");
139 	expect_ptr_not_null(n->dalloc_tctx,
140 	    "dalloc_tctx in record should not be NULL for released pointer");
141 }
142 
143 TEST_BEGIN(test_prof_recent_alloc) {
144 	test_skip_if(!config_prof);
145 
146 	bool b;
147 	unsigned i, c;
148 	size_t req_size;
149 	void *p;
150 	prof_recent_t *n;
151 	ssize_t future;
152 
153 	confirm_prof_setup();
154 
155 	/*
156 	 * First batch of 2 * OPT_ALLOC_MAX allocations.  After the
157 	 * (OPT_ALLOC_MAX - 1)'th allocation the recorded allocations should
158 	 * always be the last OPT_ALLOC_MAX allocations coming from here.
159 	 */
160 	for (i = 0; i < 2 * OPT_ALLOC_MAX; ++i) {
161 		req_size = NTH_REQ_SIZE(i);
162 		p = malloc(req_size);
163 		confirm_malloc(p);
164 		if (i < OPT_ALLOC_MAX - 1) {
165 			assert_false(ql_empty(&prof_recent_alloc_list),
166 			    "Empty recent allocation");
167 			free(p);
168 			/*
169 			 * The recorded allocations may still include some
170 			 * other allocations before the test run started,
171 			 * so keep allocating without checking anything.
172 			 */
173 			continue;
174 		}
175 		c = 0;
176 		ql_foreach(n, &prof_recent_alloc_list, link) {
177 			++c;
178 			confirm_record_size(n, i + c - OPT_ALLOC_MAX);
179 			if (c == OPT_ALLOC_MAX) {
180 				confirm_record_living(n);
181 			} else {
182 				confirm_record_released(n);
183 			}
184 		}
185 		assert_u_eq(c, OPT_ALLOC_MAX,
186 		    "Incorrect total number of allocations");
187 		free(p);
188 	}
189 
190 	confirm_prof_setup();
191 
192 	b = false;
193 	assert_d_eq(mallctl("prof.active", NULL, NULL, &b, sizeof(bool)), 0,
194 	    "mallctl for turning off prof_active failed");
195 
196 	/*
197 	 * Second batch of OPT_ALLOC_MAX allocations.  Since prof_active is
198 	 * turned off, this batch shouldn't be recorded.
199 	 */
200 	for (; i < 3 * OPT_ALLOC_MAX; ++i) {
201 		req_size = NTH_REQ_SIZE(i);
202 		p = malloc(req_size);
203 		assert_ptr_not_null(p, "malloc failed unexpectedly");
204 		c = 0;
205 		ql_foreach(n, &prof_recent_alloc_list, link) {
206 			confirm_record_size(n, c + OPT_ALLOC_MAX);
207 			confirm_record_released(n);
208 			++c;
209 		}
210 		assert_u_eq(c, OPT_ALLOC_MAX,
211 		    "Incorrect total number of allocations");
212 		free(p);
213 	}
214 
215 	b = true;
216 	assert_d_eq(mallctl("prof.active", NULL, NULL, &b, sizeof(bool)), 0,
217 	    "mallctl for turning on prof_active failed");
218 
219 	confirm_prof_setup();
220 
221 	/*
222 	 * Third batch of OPT_ALLOC_MAX allocations.  Since prof_active is
223 	 * turned back on, they should be recorded, and in the list of recorded
224 	 * allocations they should follow the first batch rather than the
225 	 * second batch.
226 	 */
227 	for (; i < 4 * OPT_ALLOC_MAX; ++i) {
228 		req_size = NTH_REQ_SIZE(i);
229 		p = malloc(req_size);
230 		confirm_malloc(p);
231 		c = 0;
232 		ql_foreach(n, &prof_recent_alloc_list, link) {
233 			++c;
234 			confirm_record_size(n,
235 			    /* Is the allocation from the third batch? */
236 			    i + c - OPT_ALLOC_MAX >= 3 * OPT_ALLOC_MAX ?
237 			    /* If yes, then it's just recorded. */
238 			    i + c - OPT_ALLOC_MAX :
239 			    /*
240 			     * Otherwise, it should come from the first batch
241 			     * instead of the second batch.
242 			     */
243 			    i + c - 2 * OPT_ALLOC_MAX);
244 			if (c == OPT_ALLOC_MAX) {
245 				confirm_record_living(n);
246 			} else {
247 				confirm_record_released(n);
248 			}
249 		}
250 		assert_u_eq(c, OPT_ALLOC_MAX,
251 		    "Incorrect total number of allocations");
252 		free(p);
253 	}
254 
255 	/* Increasing the limit shouldn't alter the list of records. */
256 	future = OPT_ALLOC_MAX + 1;
257 	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
258 	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
259 	c = 0;
260 	ql_foreach(n, &prof_recent_alloc_list, link) {
261 		confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
262 		confirm_record_released(n);
263 		++c;
264 	}
265 	assert_u_eq(c, OPT_ALLOC_MAX,
266 	    "Incorrect total number of allocations");
267 
268 	/*
269 	 * Decreasing the limit shouldn't alter the list of records as long as
270 	 * the new limit is still no less than the length of the list.
271 	 */
272 	future = OPT_ALLOC_MAX;
273 	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
274 	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
275 	c = 0;
276 	ql_foreach(n, &prof_recent_alloc_list, link) {
277 		confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
278 		confirm_record_released(n);
279 		++c;
280 	}
281 	assert_u_eq(c, OPT_ALLOC_MAX,
282 	    "Incorrect total number of allocations");
283 
284 	/*
285 	 * Decreasing the limit should shorten the list of records if the new
286 	 * limit is less than the length of the list.
287 	 */
288 	future = OPT_ALLOC_MAX - 1;
289 	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
290 	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
291 	c = 0;
292 	ql_foreach(n, &prof_recent_alloc_list, link) {
293 		++c;
294 		confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
295 		confirm_record_released(n);
296 	}
297 	assert_u_eq(c, OPT_ALLOC_MAX - 1,
298 	    "Incorrect total number of allocations");
299 
300 	/* Setting to unlimited shouldn't alter the list of records. */
301 	future = -1;
302 	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
303 	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
304 	c = 0;
305 	ql_foreach(n, &prof_recent_alloc_list, link) {
306 		++c;
307 		confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
308 		confirm_record_released(n);
309 	}
310 	assert_u_eq(c, OPT_ALLOC_MAX - 1,
311 	    "Incorrect total number of allocations");
312 
313 	/* Downshift to only one record. */
314 	future = 1;
315 	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
316 	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
317 	assert_false(ql_empty(&prof_recent_alloc_list), "Recent list is empty");
318 	n = ql_first(&prof_recent_alloc_list);
319 	confirm_record_size(n, 4 * OPT_ALLOC_MAX - 1);
320 	confirm_record_released(n);
321 	n = ql_next(&prof_recent_alloc_list, n, link);
322 	assert_ptr_null(n, "Recent list should only contain one record");
323 
324 	/* Completely turn off. */
325 	future = 0;
326 	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
327 	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
328 	assert_true(ql_empty(&prof_recent_alloc_list),
329 	    "Recent list should be empty");
330 
331 	/* Restore the settings. */
332 	future = OPT_ALLOC_MAX;
333 	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
334 	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
335 	assert_true(ql_empty(&prof_recent_alloc_list),
336 	    "Recent list should be empty");
337 
338 	confirm_prof_setup();
339 }
340 TEST_END
341 
342 #undef NTH_REQ_SIZE
343 
344 #define DUMP_OUT_SIZE 4096
345 static char dump_out[DUMP_OUT_SIZE];
346 static size_t dump_out_len = 0;
347 
348 static void
349 test_dump_write_cb(void *not_used, const char *str) {
350 	size_t len = strlen(str);
351 	assert(dump_out_len + len < DUMP_OUT_SIZE);
352 	memcpy(dump_out + dump_out_len, str, len + 1);
353 	dump_out_len += len;
354 }
355 
356 static void
357 call_dump() {
358 	static void *in[2] = {test_dump_write_cb, NULL};
359 	dump_out_len = 0;
360 	assert_d_eq(mallctl("experimental.prof_recent.alloc_dump",
361 	    NULL, NULL, in, sizeof(in)), 0, "Dump mallctl raised error");
362 }
363 
364 typedef struct {
365 	size_t size;
366 	size_t usize;
367 	bool released;
368 } confirm_record_t;
369 
370 #define DUMP_ERROR "Dump output is wrong"
371 
372 static void
373 confirm_record(const char *template, const confirm_record_t *records,
374     const size_t n_records) {
375 	static const char *types[2] = {"alloc", "dalloc"};
376 	static char buf[64];
377 
378 	/*
379 	 * The template string would be in the form of:
380 	 * "{...,\"recent_alloc\":[]}",
381 	 * and dump_out would be in the form of:
382 	 * "{...,\"recent_alloc\":[...]}".
383 	 * Using "- 2" serves to cut right before the ending "]}".
384 	 */
385 	assert_d_eq(memcmp(dump_out, template, strlen(template) - 2), 0,
386 	    DUMP_ERROR);
387 	assert_d_eq(memcmp(dump_out + strlen(dump_out) - 2,
388 	    template + strlen(template) - 2, 2), 0, DUMP_ERROR);
389 
390 	const char *start = dump_out + strlen(template) - 2;
391 	const char *end = dump_out + strlen(dump_out) - 2;
392 	const confirm_record_t *record;
393 	for (record = records; record < records + n_records; ++record) {
394 
395 #define ASSERT_CHAR(c) do {						\
396 	assert_true(start < end, DUMP_ERROR);				\
397 	assert_c_eq(*start++, c, DUMP_ERROR);				\
398 } while (0)
399 
400 #define ASSERT_STR(s) do {						\
401 	const size_t len = strlen(s);					\
402 	assert_true(start + len <= end, DUMP_ERROR);			\
403 	assert_d_eq(memcmp(start, s, len), 0, DUMP_ERROR);		\
404 	start += len;							\
405 } while (0)
406 
407 #define ASSERT_FORMATTED_STR(s, ...) do {				\
408 	malloc_snprintf(buf, sizeof(buf), s, __VA_ARGS__);		\
409 	ASSERT_STR(buf);						\
410 } while (0)
411 
412 		if (record != records) {
413 			ASSERT_CHAR(',');
414 		}
415 
416 		ASSERT_CHAR('{');
417 
418 		ASSERT_STR("\"size\"");
419 		ASSERT_CHAR(':');
420 		ASSERT_FORMATTED_STR("%zu", record->size);
421 		ASSERT_CHAR(',');
422 
423 		ASSERT_STR("\"usize\"");
424 		ASSERT_CHAR(':');
425 		ASSERT_FORMATTED_STR("%zu", record->usize);
426 		ASSERT_CHAR(',');
427 
428 		ASSERT_STR("\"released\"");
429 		ASSERT_CHAR(':');
430 		ASSERT_STR(record->released ? "true" : "false");
431 		ASSERT_CHAR(',');
432 
433 		const char **type = types;
434 		while (true) {
435 			ASSERT_FORMATTED_STR("\"%s_thread_uid\"", *type);
436 			ASSERT_CHAR(':');
437 			while (isdigit(*start)) {
438 				++start;
439 			}
440 			ASSERT_CHAR(',');
441 
442 			if (opt_prof_sys_thread_name) {
443 				ASSERT_FORMATTED_STR("\"%s_thread_name\"",
444 				    *type);
445 				ASSERT_CHAR(':');
446 				ASSERT_CHAR('"');
447 				while (*start != '"') {
448 					++start;
449 				}
450 				ASSERT_CHAR('"');
451 				ASSERT_CHAR(',');
452 			}
453 
454 			ASSERT_FORMATTED_STR("\"%s_time\"", *type);
455 			ASSERT_CHAR(':');
456 			while (isdigit(*start)) {
457 				++start;
458 			}
459 			ASSERT_CHAR(',');
460 
461 			ASSERT_FORMATTED_STR("\"%s_trace\"", *type);
462 			ASSERT_CHAR(':');
463 			ASSERT_CHAR('[');
464 			while (isdigit(*start) || *start == 'x' ||
465 			    (*start >= 'a' && *start <= 'f') ||
466 			    *start == '\"' || *start == ',') {
467 				++start;
468 			}
469 			ASSERT_CHAR(']');
470 
471 			if (strcmp(*type, "dalloc") == 0) {
472 				break;
473 			}
474 
475 			assert(strcmp(*type, "alloc") == 0);
476 			if (!record->released) {
477 				break;
478 			}
479 
480 			ASSERT_CHAR(',');
481 			++type;
482 		}
483 
484 		ASSERT_CHAR('}');
485 
486 #undef ASSERT_FORMATTED_STR
487 #undef ASSERT_STR
488 #undef ASSERT_CHAR
489 
490 	}
491 	assert_ptr_eq(record, records + n_records, DUMP_ERROR);
492 	assert_ptr_eq(start, end, DUMP_ERROR);
493 }
494 
495 TEST_BEGIN(test_prof_recent_alloc_dump) {
496 	test_skip_if(!config_prof);
497 
498 	confirm_prof_setup();
499 
500 	ssize_t future;
501 	void *p, *q;
502 	confirm_record_t records[2];
503 
504 	assert_zu_eq(lg_prof_sample, (size_t)0,
505 	    "lg_prof_sample not set correctly");
506 
507 	future = 0;
508 	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
509 	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
510 	call_dump();
511 	expect_str_eq(dump_out, "{\"sample_interval\":1,"
512 	    "\"recent_alloc_max\":0,\"recent_alloc\":[]}", DUMP_ERROR);
513 
514 	future = 2;
515 	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
516 	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
517 	call_dump();
518 	const char *template = "{\"sample_interval\":1,"
519 	    "\"recent_alloc_max\":2,\"recent_alloc\":[]}";
520 	expect_str_eq(dump_out, template, DUMP_ERROR);
521 
522 	p = malloc(7);
523 	call_dump();
524 	records[0].size = 7;
525 	records[0].usize = sz_s2u(7);
526 	records[0].released = false;
527 	confirm_record(template, records, 1);
528 
529 	q = mallocx(17, MALLOCX_ALIGN(128));
530 	call_dump();
531 	records[1].size = 17;
532 	records[1].usize = sz_sa2u(17, 128);
533 	records[1].released = false;
534 	confirm_record(template, records, 2);
535 
536 	free(q);
537 	call_dump();
538 	records[1].released = true;
539 	confirm_record(template, records, 2);
540 
541 	free(p);
542 	call_dump();
543 	records[0].released = true;
544 	confirm_record(template, records, 2);
545 
546 	future = OPT_ALLOC_MAX;
547 	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
548 	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
549 	confirm_prof_setup();
550 }
551 TEST_END
552 
553 #undef DUMP_ERROR
554 #undef DUMP_OUT_SIZE
555 
556 #define N_THREADS 8
557 #define N_PTRS 512
558 #define N_CTLS 8
559 #define N_ITERS 2048
560 #define STRESS_ALLOC_MAX 4096
561 
562 typedef struct {
563 	thd_t thd;
564 	size_t id;
565 	void *ptrs[N_PTRS];
566 	size_t count;
567 } thd_data_t;
568 
569 static thd_data_t thd_data[N_THREADS];
570 static ssize_t test_max;
571 
572 static void
573 test_write_cb(void *cbopaque, const char *str) {
574 	sleep_ns(1000 * 1000);
575 }
576 
577 static void *
578 f_thread(void *arg) {
579 	const size_t thd_id = *(size_t *)arg;
580 	thd_data_t *data_p = thd_data + thd_id;
581 	assert(data_p->id == thd_id);
582 	data_p->count = 0;
583 	uint64_t rand = (uint64_t)thd_id;
584 	tsd_t *tsd = tsd_fetch();
585 	assert(test_max > 1);
586 	ssize_t last_max = -1;
587 	for (int i = 0; i < N_ITERS; i++) {
588 		rand = prng_range_u64(&rand, N_PTRS + N_CTLS * 5);
589 		assert(data_p->count <= N_PTRS);
590 		if (rand < data_p->count) {
591 			assert(data_p->count > 0);
592 			if (rand != data_p->count - 1) {
593 				assert(data_p->count > 1);
594 				void *temp = data_p->ptrs[rand];
595 				data_p->ptrs[rand] =
596 				    data_p->ptrs[data_p->count - 1];
597 				data_p->ptrs[data_p->count - 1] = temp;
598 			}
599 			free(data_p->ptrs[--data_p->count]);
600 		} else if (rand < N_PTRS) {
601 			assert(data_p->count < N_PTRS);
602 			data_p->ptrs[data_p->count++] = malloc(1);
603 		} else if (rand % 5 == 0) {
604 			prof_recent_alloc_dump(tsd, test_write_cb, NULL);
605 		} else if (rand % 5 == 1) {
606 			last_max = prof_recent_alloc_max_ctl_read();
607 		} else if (rand % 5 == 2) {
608 			last_max =
609 			    prof_recent_alloc_max_ctl_write(tsd, test_max * 2);
610 		} else if (rand % 5 == 3) {
611 			last_max =
612 			    prof_recent_alloc_max_ctl_write(tsd, test_max);
613 		} else {
614 			assert(rand % 5 == 4);
615 			last_max =
616 			    prof_recent_alloc_max_ctl_write(tsd, test_max / 2);
617 		}
618 		assert_zd_ge(last_max, -1, "Illegal last-N max");
619 	}
620 
621 	while (data_p->count > 0) {
622 		free(data_p->ptrs[--data_p->count]);
623 	}
624 
625 	return NULL;
626 }
627 
628 TEST_BEGIN(test_prof_recent_stress) {
629 	test_skip_if(!config_prof);
630 
631 	confirm_prof_setup();
632 
633 	test_max = OPT_ALLOC_MAX;
634 	for (size_t i = 0; i < N_THREADS; i++) {
635 		thd_data_t *data_p = thd_data + i;
636 		data_p->id = i;
637 		thd_create(&data_p->thd, &f_thread, &data_p->id);
638 	}
639 	for (size_t i = 0; i < N_THREADS; i++) {
640 		thd_data_t *data_p = thd_data + i;
641 		thd_join(data_p->thd, NULL);
642 	}
643 
644 	test_max = STRESS_ALLOC_MAX;
645 	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
646 	    NULL, NULL, &test_max, sizeof(ssize_t)), 0, "Write error");
647 	for (size_t i = 0; i < N_THREADS; i++) {
648 		thd_data_t *data_p = thd_data + i;
649 		data_p->id = i;
650 		thd_create(&data_p->thd, &f_thread, &data_p->id);
651 	}
652 	for (size_t i = 0; i < N_THREADS; i++) {
653 		thd_data_t *data_p = thd_data + i;
654 		thd_join(data_p->thd, NULL);
655 	}
656 
657 	test_max = OPT_ALLOC_MAX;
658 	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
659 	    NULL, NULL, &test_max, sizeof(ssize_t)), 0, "Write error");
660 	confirm_prof_setup();
661 }
662 TEST_END
663 
664 #undef STRESS_ALLOC_MAX
665 #undef N_ITERS
666 #undef N_PTRS
667 #undef N_THREADS
668 
669 int
670 main(void) {
671 	return test(
672 	    test_confirm_setup,
673 	    test_prof_recent_off,
674 	    test_prof_recent_on,
675 	    test_prof_recent_alloc,
676 	    test_prof_recent_alloc_dump,
677 	    test_prof_recent_stress);
678 }
679