xref: /netbsd-src/external/bsd/jemalloc/dist/test/unit/prof_reset.c (revision 7bdf38e5b7a28439665f2fdeff81e36913eef7dd)
1 #include "test/jemalloc_test.h"
2 
3 #include "jemalloc/internal/prof_data.h"
4 #include "jemalloc/internal/prof_sys.h"
5 
6 static int
7 prof_dump_open_file_intercept(const char *filename, int mode) {
8 	int fd;
9 
10 	fd = open("/dev/null", O_WRONLY);
11 	assert_d_ne(fd, -1, "Unexpected open() failure");
12 
13 	return fd;
14 }
15 
16 static void
17 set_prof_active(bool active) {
18 	expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
19 	    sizeof(active)), 0, "Unexpected mallctl failure");
20 }
21 
22 static size_t
23 get_lg_prof_sample(void) {
24 	size_t ret;
25 	size_t sz = sizeof(size_t);
26 
27 	expect_d_eq(mallctl("prof.lg_sample", (void *)&ret, &sz, NULL, 0), 0,
28 	    "Unexpected mallctl failure while reading profiling sample rate");
29 	return ret;
30 }
31 
32 static void
33 do_prof_reset(size_t lg_prof_sample_input) {
34 	expect_d_eq(mallctl("prof.reset", NULL, NULL,
35 	    (void *)&lg_prof_sample_input, sizeof(size_t)), 0,
36 	    "Unexpected mallctl failure while resetting profile data");
37 	expect_zu_eq(lg_prof_sample_input, get_lg_prof_sample(),
38 	    "Expected profile sample rate change");
39 }
40 
41 TEST_BEGIN(test_prof_reset_basic) {
42 	size_t lg_prof_sample_orig, lg_prof_sample_cur, lg_prof_sample_next;
43 	size_t sz;
44 	unsigned i;
45 
46 	test_skip_if(!config_prof);
47 
48 	sz = sizeof(size_t);
49 	expect_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig,
50 	    &sz, NULL, 0), 0,
51 	    "Unexpected mallctl failure while reading profiling sample rate");
52 	expect_zu_eq(lg_prof_sample_orig, 0,
53 	    "Unexpected profiling sample rate");
54 	lg_prof_sample_cur = get_lg_prof_sample();
55 	expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
56 	    "Unexpected disagreement between \"opt.lg_prof_sample\" and "
57 	    "\"prof.lg_sample\"");
58 
59 	/* Test simple resets. */
60 	for (i = 0; i < 2; i++) {
61 		expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
62 		    "Unexpected mallctl failure while resetting profile data");
63 		lg_prof_sample_cur = get_lg_prof_sample();
64 		expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
65 		    "Unexpected profile sample rate change");
66 	}
67 
68 	/* Test resets with prof.lg_sample changes. */
69 	lg_prof_sample_next = 1;
70 	for (i = 0; i < 2; i++) {
71 		do_prof_reset(lg_prof_sample_next);
72 		lg_prof_sample_cur = get_lg_prof_sample();
73 		expect_zu_eq(lg_prof_sample_cur, lg_prof_sample_next,
74 		    "Expected profile sample rate change");
75 		lg_prof_sample_next = lg_prof_sample_orig;
76 	}
77 
78 	/* Make sure the test code restored prof.lg_sample. */
79 	lg_prof_sample_cur = get_lg_prof_sample();
80 	expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
81 	    "Unexpected disagreement between \"opt.lg_prof_sample\" and "
82 	    "\"prof.lg_sample\"");
83 }
84 TEST_END
85 
86 TEST_BEGIN(test_prof_reset_cleanup) {
87 	test_skip_if(!config_prof);
88 
89 	set_prof_active(true);
90 
91 	expect_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
92 	void *p = mallocx(1, 0);
93 	expect_ptr_not_null(p, "Unexpected mallocx() failure");
94 	expect_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
95 
96 	prof_cnt_t cnt_all;
97 	prof_cnt_all(&cnt_all);
98 	expect_u64_eq(cnt_all.curobjs, 1, "Expected 1 allocation");
99 
100 	expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
101 	    "Unexpected error while resetting heap profile data");
102 	prof_cnt_all(&cnt_all);
103 	expect_u64_eq(cnt_all.curobjs, 0, "Expected 0 allocations");
104 	expect_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
105 
106 	dallocx(p, 0);
107 	expect_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
108 
109 	set_prof_active(false);
110 }
111 TEST_END
112 
113 #define NTHREADS		4
114 #define NALLOCS_PER_THREAD	(1U << 13)
115 #define OBJ_RING_BUF_COUNT	1531
116 #define RESET_INTERVAL		(1U << 10)
117 #define DUMP_INTERVAL		3677
118 static void *
119 thd_start(void *varg) {
120 	unsigned thd_ind = *(unsigned *)varg;
121 	unsigned i;
122 	void *objs[OBJ_RING_BUF_COUNT];
123 
124 	memset(objs, 0, sizeof(objs));
125 
126 	for (i = 0; i < NALLOCS_PER_THREAD; i++) {
127 		if (i % RESET_INTERVAL == 0) {
128 			expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0),
129 			    0, "Unexpected error while resetting heap profile "
130 			    "data");
131 		}
132 
133 		if (i % DUMP_INTERVAL == 0) {
134 			expect_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
135 			    0, "Unexpected error while dumping heap profile");
136 		}
137 
138 		{
139 			void **pp = &objs[i % OBJ_RING_BUF_COUNT];
140 			if (*pp != NULL) {
141 				dallocx(*pp, 0);
142 				*pp = NULL;
143 			}
144 			*pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i);
145 			expect_ptr_not_null(*pp,
146 			    "Unexpected btalloc() failure");
147 		}
148 	}
149 
150 	/* Clean up any remaining objects. */
151 	for (i = 0; i < OBJ_RING_BUF_COUNT; i++) {
152 		void **pp = &objs[i % OBJ_RING_BUF_COUNT];
153 		if (*pp != NULL) {
154 			dallocx(*pp, 0);
155 			*pp = NULL;
156 		}
157 	}
158 
159 	return NULL;
160 }
161 
162 TEST_BEGIN(test_prof_reset) {
163 	size_t lg_prof_sample_orig;
164 	thd_t thds[NTHREADS];
165 	unsigned thd_args[NTHREADS];
166 	unsigned i;
167 	size_t bt_count, tdata_count;
168 
169 	test_skip_if(!config_prof);
170 
171 	bt_count = prof_bt_count();
172 	expect_zu_eq(bt_count, 0,
173 	    "Unexpected pre-existing tdata structures");
174 	tdata_count = prof_tdata_count();
175 
176 	lg_prof_sample_orig = get_lg_prof_sample();
177 	do_prof_reset(5);
178 
179 	set_prof_active(true);
180 
181 	for (i = 0; i < NTHREADS; i++) {
182 		thd_args[i] = i;
183 		thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
184 	}
185 	for (i = 0; i < NTHREADS; i++) {
186 		thd_join(thds[i], NULL);
187 	}
188 
189 	expect_zu_eq(prof_bt_count(), bt_count,
190 	    "Unexpected bactrace count change");
191 	expect_zu_eq(prof_tdata_count(), tdata_count,
192 	    "Unexpected remaining tdata structures");
193 
194 	set_prof_active(false);
195 
196 	do_prof_reset(lg_prof_sample_orig);
197 }
198 TEST_END
199 #undef NTHREADS
200 #undef NALLOCS_PER_THREAD
201 #undef OBJ_RING_BUF_COUNT
202 #undef RESET_INTERVAL
203 #undef DUMP_INTERVAL
204 
205 /* Test sampling at the same allocation site across resets. */
206 #define NITER 10
207 TEST_BEGIN(test_xallocx) {
208 	size_t lg_prof_sample_orig;
209 	unsigned i;
210 	void *ptrs[NITER];
211 
212 	test_skip_if(!config_prof);
213 
214 	lg_prof_sample_orig = get_lg_prof_sample();
215 	set_prof_active(true);
216 
217 	/* Reset profiling. */
218 	do_prof_reset(0);
219 
220 	for (i = 0; i < NITER; i++) {
221 		void *p;
222 		size_t sz, nsz;
223 
224 		/* Reset profiling. */
225 		do_prof_reset(0);
226 
227 		/* Allocate small object (which will be promoted). */
228 		p = ptrs[i] = mallocx(1, 0);
229 		expect_ptr_not_null(p, "Unexpected mallocx() failure");
230 
231 		/* Reset profiling. */
232 		do_prof_reset(0);
233 
234 		/* Perform successful xallocx(). */
235 		sz = sallocx(p, 0);
236 		expect_zu_eq(xallocx(p, sz, 0, 0), sz,
237 		    "Unexpected xallocx() failure");
238 
239 		/* Perform unsuccessful xallocx(). */
240 		nsz = nallocx(sz+1, 0);
241 		expect_zu_eq(xallocx(p, nsz, 0, 0), sz,
242 		    "Unexpected xallocx() success");
243 	}
244 
245 	for (i = 0; i < NITER; i++) {
246 		/* dallocx. */
247 		dallocx(ptrs[i], 0);
248 	}
249 
250 	set_prof_active(false);
251 	do_prof_reset(lg_prof_sample_orig);
252 }
253 TEST_END
254 #undef NITER
255 
256 int
257 main(void) {
258 	/* Intercept dumping prior to running any tests. */
259 	prof_dump_open_file = prof_dump_open_file_intercept;
260 
261 	return test_no_reentrancy(
262 	    test_prof_reset_basic,
263 	    test_prof_reset_cleanup,
264 	    test_prof_reset,
265 	    test_xallocx);
266 }
267