xref: /netbsd-src/external/bsd/jemalloc/dist/src/stats.c (revision 3117ece4fc4a4ca4489ba793710b60b0d26bab6c)
1 #include "jemalloc/internal/jemalloc_preamble.h"
2 #include "jemalloc/internal/jemalloc_internal_includes.h"
3 
4 #include "jemalloc/internal/assert.h"
5 #include "jemalloc/internal/ctl.h"
6 #include "jemalloc/internal/emitter.h"
7 #include "jemalloc/internal/fxp.h"
8 #include "jemalloc/internal/mutex.h"
9 #include "jemalloc/internal/mutex_prof.h"
10 #include "jemalloc/internal/prof_stats.h"
11 
12 const char *global_mutex_names[mutex_prof_num_global_mutexes] = {
13 #define OP(mtx) #mtx,
14 	MUTEX_PROF_GLOBAL_MUTEXES
15 #undef OP
16 };
17 
18 const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = {
19 #define OP(mtx) #mtx,
20 	MUTEX_PROF_ARENA_MUTEXES
21 #undef OP
22 };
23 
24 #define CTL_GET(n, v, t) do {						\
25 	size_t sz = sizeof(t);						\
26 	xmallctl(n, (void *)v, &sz, NULL, 0);				\
27 } while (0)
28 
29 #define CTL_LEAF_PREPARE(mib, miblen, name) do {			\
30 	assert(miblen < CTL_MAX_DEPTH);					\
31 	size_t miblen_new = CTL_MAX_DEPTH;				\
32 	xmallctlmibnametomib(mib, miblen, name, &miblen_new);		\
33 	assert(miblen_new > miblen);					\
34 } while (0)
35 
36 #define CTL_LEAF(mib, miblen, leaf, v, t) do {			\
37 	assert(miblen < CTL_MAX_DEPTH);					\
38 	size_t miblen_new = CTL_MAX_DEPTH;				\
39 	size_t sz = sizeof(t);						\
40 	xmallctlbymibname(mib, miblen, leaf, &miblen_new, (void *)v,	\
41 	    &sz, NULL, 0);						\
42 	assert(miblen_new == miblen + 1);				\
43 } while (0)
44 
45 #define CTL_M2_GET(n, i, v, t) do {					\
46 	size_t mib[CTL_MAX_DEPTH];					\
47 	size_t miblen = sizeof(mib) / sizeof(size_t);			\
48 	size_t sz = sizeof(t);						\
49 	xmallctlnametomib(n, mib, &miblen);				\
50 	mib[2] = (i);							\
51 	xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0);		\
52 } while (0)
53 
54 /******************************************************************************/
55 /* Data. */
56 
57 bool opt_stats_print = false;
58 char opt_stats_print_opts[stats_print_tot_num_options+1] = "";
59 
60 int64_t opt_stats_interval = STATS_INTERVAL_DEFAULT;
61 char opt_stats_interval_opts[stats_print_tot_num_options+1] = "";
62 
63 static counter_accum_t stats_interval_accumulated;
64 /* Per thread batch accum size for stats_interval. */
65 static uint64_t stats_interval_accum_batch;
66 
67 /******************************************************************************/
68 
69 static uint64_t
70 rate_per_second(uint64_t value, uint64_t uptime_ns) {
71 	uint64_t billion = 1000000000;
72 	if (uptime_ns == 0 || value == 0) {
73 		return 0;
74 	}
75 	if (uptime_ns < billion) {
76 		return value;
77 	} else {
78 		uint64_t uptime_s = uptime_ns / billion;
79 		return value / uptime_s;
80 	}
81 }
82 
83 /* Calculate x.yyy and output a string (takes a fixed sized char array). */
84 static bool
85 get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) {
86 	if (divisor == 0 || dividend > divisor) {
87 		/* The rate is not supposed to be greater than 1. */
88 		return true;
89 	}
90 	if (dividend > 0) {
91 		assert(UINT64_MAX / dividend >= 1000);
92 	}
93 
94 	unsigned n = (unsigned)((dividend * 1000) / divisor);
95 	if (n < 10) {
96 		malloc_snprintf(str, 6, "0.00%u", n);
97 	} else if (n < 100) {
98 		malloc_snprintf(str, 6, "0.0%u", n);
99 	} else if (n < 1000) {
100 		malloc_snprintf(str, 6, "0.%u", n);
101 	} else {
102 		malloc_snprintf(str, 6, "1");
103 	}
104 
105 	return false;
106 }
107 
108 static void
109 mutex_stats_init_cols(emitter_row_t *row, const char *table_name,
110     emitter_col_t *name,
111     emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
112     emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) {
113 	mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0;
114 	mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0;
115 
116 	emitter_col_t *col;
117 
118 	if (name != NULL) {
119 		emitter_col_init(name, row);
120 		name->justify = emitter_justify_left;
121 		name->width = 21;
122 		name->type = emitter_type_title;
123 		name->str_val = table_name;
124 	}
125 
126 #define WIDTH_uint32_t 12
127 #define WIDTH_uint64_t 16
128 #define OP(counter, counter_type, human, derived, base_counter)		\
129 	col = &col_##counter_type[k_##counter_type];			\
130 	++k_##counter_type;						\
131 	emitter_col_init(col, row);					\
132 	col->justify = emitter_justify_right;				\
133 	col->width = derived ? 8 : WIDTH_##counter_type;		\
134 	col->type = emitter_type_title;					\
135 	col->str_val = human;
136 	MUTEX_PROF_COUNTERS
137 #undef OP
138 #undef WIDTH_uint32_t
139 #undef WIDTH_uint64_t
140 	col_uint64_t[mutex_counter_total_wait_time_ps].width = 10;
141 }
142 
143 static void
144 mutex_stats_read_global(size_t mib[], size_t miblen, const char *name,
145     emitter_col_t *col_name,
146     emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
147     emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
148     uint64_t uptime) {
149 	CTL_LEAF_PREPARE(mib, miblen, name);
150 	size_t miblen_name = miblen + 1;
151 
152 	col_name->str_val = name;
153 
154 	emitter_col_t *dst;
155 #define EMITTER_TYPE_uint32_t emitter_type_uint32
156 #define EMITTER_TYPE_uint64_t emitter_type_uint64
157 #define OP(counter, counter_type, human, derived, base_counter)		\
158 	dst = &col_##counter_type[mutex_counter_##counter];		\
159 	dst->type = EMITTER_TYPE_##counter_type;			\
160 	if (!derived) {							\
161 		CTL_LEAF(mib, miblen_name, #counter,			\
162 		    (counter_type *)&dst->bool_val, counter_type);	\
163 	} else {							\
164 		emitter_col_t *base =					\
165 		    &col_##counter_type[mutex_counter_##base_counter];	\
166 		dst->counter_type##_val =				\
167 		    (counter_type)rate_per_second(			\
168 		    base->counter_type##_val, uptime);			\
169 	}
170 	MUTEX_PROF_COUNTERS
171 #undef OP
172 #undef EMITTER_TYPE_uint32_t
173 #undef EMITTER_TYPE_uint64_t
174 }
175 
176 static void
177 mutex_stats_read_arena(size_t mib[], size_t miblen, const char *name,
178     emitter_col_t *col_name,
179     emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
180     emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
181     uint64_t uptime) {
182 	CTL_LEAF_PREPARE(mib, miblen, name);
183 	size_t miblen_name = miblen + 1;
184 
185 	col_name->str_val = name;
186 
187 	emitter_col_t *dst;
188 #define EMITTER_TYPE_uint32_t emitter_type_uint32
189 #define EMITTER_TYPE_uint64_t emitter_type_uint64
190 #define OP(counter, counter_type, human, derived, base_counter)		\
191 	dst = &col_##counter_type[mutex_counter_##counter];		\
192 	dst->type = EMITTER_TYPE_##counter_type;			\
193 	if (!derived) {							\
194 		CTL_LEAF(mib, miblen_name, #counter,			\
195 		    (counter_type *)&dst->bool_val, counter_type);	\
196 	} else {							\
197 		emitter_col_t *base =					\
198 		    &col_##counter_type[mutex_counter_##base_counter];	\
199 		dst->counter_type##_val =				\
200 		    (counter_type)rate_per_second(			\
201 		    base->counter_type##_val, uptime);			\
202 	}
203 	MUTEX_PROF_COUNTERS
204 #undef OP
205 #undef EMITTER_TYPE_uint32_t
206 #undef EMITTER_TYPE_uint64_t
207 }
208 
209 static void
210 mutex_stats_read_arena_bin(size_t mib[], size_t miblen,
211     emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
212     emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
213     uint64_t uptime) {
214 	CTL_LEAF_PREPARE(mib, miblen, "mutex");
215 	size_t miblen_mutex = miblen + 1;
216 
217 	emitter_col_t *dst;
218 
219 #define EMITTER_TYPE_uint32_t emitter_type_uint32
220 #define EMITTER_TYPE_uint64_t emitter_type_uint64
221 #define OP(counter, counter_type, human, derived, base_counter)		\
222 	dst = &col_##counter_type[mutex_counter_##counter];		\
223 	dst->type = EMITTER_TYPE_##counter_type;			\
224 	if (!derived) {							\
225 		CTL_LEAF(mib, miblen_mutex, #counter,			\
226 		    (counter_type *)&dst->bool_val, counter_type);	\
227 	} else {							\
228 		emitter_col_t *base =					\
229 		    &col_##counter_type[mutex_counter_##base_counter];	\
230 		dst->counter_type##_val =				\
231 		    (counter_type)rate_per_second(			\
232 		    base->counter_type##_val, uptime);			\
233 	}
234 	MUTEX_PROF_COUNTERS
235 #undef OP
236 #undef EMITTER_TYPE_uint32_t
237 #undef EMITTER_TYPE_uint64_t
238 }
239 
240 /* "row" can be NULL to avoid emitting in table mode. */
241 static void
242 mutex_stats_emit(emitter_t *emitter, emitter_row_t *row,
243     emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
244     emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) {
245 	if (row != NULL) {
246 		emitter_table_row(emitter, row);
247 	}
248 
249 	mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0;
250 	mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0;
251 
252 	emitter_col_t *col;
253 
254 #define EMITTER_TYPE_uint32_t emitter_type_uint32
255 #define EMITTER_TYPE_uint64_t emitter_type_uint64
256 #define OP(counter, type, human, derived, base_counter)		\
257 	if (!derived) {                    \
258 		col = &col_##type[k_##type];                        \
259 		++k_##type;                            \
260 		emitter_json_kv(emitter, #counter, EMITTER_TYPE_##type,        \
261 		    (const void *)&col->bool_val); \
262 	}
263 	MUTEX_PROF_COUNTERS;
264 #undef OP
265 #undef EMITTER_TYPE_uint32_t
266 #undef EMITTER_TYPE_uint64_t
267 }
268 
269 #define COL_DECLARE(column_name)					\
270 	emitter_col_t col_##column_name;
271 
272 #define COL_INIT(row_name, column_name, left_or_right, col_width, etype)\
273 	emitter_col_init(&col_##column_name, &row_name);		\
274 	col_##column_name.justify = emitter_justify_##left_or_right;	\
275 	col_##column_name.width = col_width;				\
276 	col_##column_name.type = emitter_type_##etype;
277 
278 #define COL(row_name, column_name, left_or_right, col_width, etype)	\
279 	COL_DECLARE(column_name);					\
280 	COL_INIT(row_name, column_name, left_or_right, col_width, etype)
281 
282 #define COL_HDR_DECLARE(column_name)					\
283 	COL_DECLARE(column_name);					\
284 	emitter_col_t header_##column_name;
285 
286 #define COL_HDR_INIT(row_name, column_name, human, left_or_right,	\
287 	col_width, etype)						\
288 	COL_INIT(row_name, column_name, left_or_right, col_width, etype)\
289 	emitter_col_init(&header_##column_name, &header_##row_name);	\
290 	header_##column_name.justify = emitter_justify_##left_or_right;	\
291 	header_##column_name.width = col_width;				\
292 	header_##column_name.type = emitter_type_title;			\
293 	header_##column_name.str_val = human ? human : #column_name;
294 
295 #define COL_HDR(row_name, column_name, human, left_or_right, col_width,	\
296     etype)								\
297 	COL_HDR_DECLARE(column_name)					\
298 	COL_HDR_INIT(row_name, column_name, human, left_or_right,	\
299 	    col_width, etype)
300 
301 JEMALLOC_COLD
302 static void
303 stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i,
304     uint64_t uptime) {
305 	size_t page;
306 	bool in_gap, in_gap_prev;
307 	unsigned nbins, j;
308 
309 	CTL_GET("arenas.page", &page, size_t);
310 
311 	CTL_GET("arenas.nbins", &nbins, unsigned);
312 
313 	emitter_row_t header_row;
314 	emitter_row_init(&header_row);
315 
316 	emitter_row_t row;
317 	emitter_row_init(&row);
318 
319 	bool prof_stats_on = config_prof && opt_prof && opt_prof_stats
320 	    && i == MALLCTL_ARENAS_ALL;
321 
322 	COL_HDR(row, size, NULL, right, 20, size)
323 	COL_HDR(row, ind, NULL, right, 4, unsigned)
324 	COL_HDR(row, allocated, NULL, right, 13, uint64)
325 	COL_HDR(row, nmalloc, NULL, right, 13, uint64)
326 	COL_HDR(row, nmalloc_ps, "(#/sec)", right, 8, uint64)
327 	COL_HDR(row, ndalloc, NULL, right, 13, uint64)
328 	COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64)
329 	COL_HDR(row, nrequests, NULL, right, 13, uint64)
330 	COL_HDR(row, nrequests_ps, "(#/sec)", right, 10, uint64)
331 	COL_HDR_DECLARE(prof_live_requested);
332 	COL_HDR_DECLARE(prof_live_count);
333 	COL_HDR_DECLARE(prof_accum_requested);
334 	COL_HDR_DECLARE(prof_accum_count);
335 	if (prof_stats_on) {
336 		COL_HDR_INIT(row, prof_live_requested, NULL, right, 21, uint64)
337 		COL_HDR_INIT(row, prof_live_count, NULL, right, 17, uint64)
338 		COL_HDR_INIT(row, prof_accum_requested, NULL, right, 21, uint64)
339 		COL_HDR_INIT(row, prof_accum_count, NULL, right, 17, uint64)
340 	}
341 	COL_HDR(row, nshards, NULL, right, 9, unsigned)
342 	COL_HDR(row, curregs, NULL, right, 13, size)
343 	COL_HDR(row, curslabs, NULL, right, 13, size)
344 	COL_HDR(row, nonfull_slabs, NULL, right, 15, size)
345 	COL_HDR(row, regs, NULL, right, 5, unsigned)
346 	COL_HDR(row, pgs, NULL, right, 4, size)
347 	/* To buffer a right- and left-justified column. */
348 	COL_HDR(row, justify_spacer, NULL, right, 1, title)
349 	COL_HDR(row, util, NULL, right, 6, title)
350 	COL_HDR(row, nfills, NULL, right, 13, uint64)
351 	COL_HDR(row, nfills_ps, "(#/sec)", right, 8, uint64)
352 	COL_HDR(row, nflushes, NULL, right, 13, uint64)
353 	COL_HDR(row, nflushes_ps, "(#/sec)", right, 8, uint64)
354 	COL_HDR(row, nslabs, NULL, right, 13, uint64)
355 	COL_HDR(row, nreslabs, NULL, right, 13, uint64)
356 	COL_HDR(row, nreslabs_ps, "(#/sec)", right, 8, uint64)
357 
358 	/* Don't want to actually print the name. */
359 	header_justify_spacer.str_val = " ";
360 	col_justify_spacer.str_val = " ";
361 
362 	emitter_col_t col_mutex64[mutex_prof_num_uint64_t_counters];
363 	emitter_col_t col_mutex32[mutex_prof_num_uint32_t_counters];
364 
365 	emitter_col_t header_mutex64[mutex_prof_num_uint64_t_counters];
366 	emitter_col_t header_mutex32[mutex_prof_num_uint32_t_counters];
367 
368 	if (mutex) {
369 		mutex_stats_init_cols(&row, NULL, NULL, col_mutex64,
370 		    col_mutex32);
371 		mutex_stats_init_cols(&header_row, NULL, NULL, header_mutex64,
372 		    header_mutex32);
373 	}
374 
375 	/*
376 	 * We print a "bins:" header as part of the table row; we need to adjust
377 	 * the header size column to compensate.
378 	 */
379 	header_size.width -=5;
380 	emitter_table_printf(emitter, "bins:");
381 	emitter_table_row(emitter, &header_row);
382 	emitter_json_array_kv_begin(emitter, "bins");
383 
384 	size_t stats_arenas_mib[CTL_MAX_DEPTH];
385 	CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
386 	stats_arenas_mib[2] = i;
387 	CTL_LEAF_PREPARE(stats_arenas_mib, 3, "bins");
388 
389 	size_t arenas_bin_mib[CTL_MAX_DEPTH];
390 	CTL_LEAF_PREPARE(arenas_bin_mib, 0, "arenas.bin");
391 
392 	size_t prof_stats_mib[CTL_MAX_DEPTH];
393 	if (prof_stats_on) {
394 		CTL_LEAF_PREPARE(prof_stats_mib, 0, "prof.stats.bins");
395 	}
396 
397 	for (j = 0, in_gap = false; j < nbins; j++) {
398 		uint64_t nslabs;
399 		size_t reg_size, slab_size, curregs;
400 		size_t curslabs;
401 		size_t nonfull_slabs;
402 		uint32_t nregs, nshards;
403 		uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
404 		uint64_t nreslabs;
405 		prof_stats_t prof_live;
406 		prof_stats_t prof_accum;
407 
408 		stats_arenas_mib[4] = j;
409 		arenas_bin_mib[2] = j;
410 
411 		CTL_LEAF(stats_arenas_mib, 5, "nslabs", &nslabs, uint64_t);
412 
413 		if (prof_stats_on) {
414 			prof_stats_mib[3] = j;
415 			CTL_LEAF(prof_stats_mib, 4, "live", &prof_live,
416 			    prof_stats_t);
417 			CTL_LEAF(prof_stats_mib, 4, "accum", &prof_accum,
418 			    prof_stats_t);
419 		}
420 
421 		in_gap_prev = in_gap;
422 		if (prof_stats_on) {
423 			in_gap = (nslabs == 0 && prof_accum.count == 0);
424 		} else {
425 			in_gap = (nslabs == 0);
426 		}
427 
428 		if (in_gap_prev && !in_gap) {
429 			emitter_table_printf(emitter,
430 			    "                     ---\n");
431 		}
432 
433 		if (in_gap && !emitter_outputs_json(emitter)) {
434 			continue;
435 		}
436 
437 		CTL_LEAF(arenas_bin_mib, 3, "size", &reg_size, size_t);
438 		CTL_LEAF(arenas_bin_mib, 3, "nregs", &nregs, uint32_t);
439 		CTL_LEAF(arenas_bin_mib, 3, "slab_size", &slab_size, size_t);
440 		CTL_LEAF(arenas_bin_mib, 3, "nshards", &nshards, uint32_t);
441 		CTL_LEAF(stats_arenas_mib, 5, "nmalloc", &nmalloc, uint64_t);
442 		CTL_LEAF(stats_arenas_mib, 5, "ndalloc", &ndalloc, uint64_t);
443 		CTL_LEAF(stats_arenas_mib, 5, "curregs", &curregs, size_t);
444 		CTL_LEAF(stats_arenas_mib, 5, "nrequests", &nrequests,
445 		    uint64_t);
446 		CTL_LEAF(stats_arenas_mib, 5, "nfills", &nfills, uint64_t);
447 		CTL_LEAF(stats_arenas_mib, 5, "nflushes", &nflushes, uint64_t);
448 		CTL_LEAF(stats_arenas_mib, 5, "nreslabs", &nreslabs, uint64_t);
449 		CTL_LEAF(stats_arenas_mib, 5, "curslabs", &curslabs, size_t);
450 		CTL_LEAF(stats_arenas_mib, 5, "nonfull_slabs", &nonfull_slabs,
451 		    size_t);
452 
453 		if (mutex) {
454 			mutex_stats_read_arena_bin(stats_arenas_mib, 5,
455 			    col_mutex64, col_mutex32, uptime);
456 		}
457 
458 		emitter_json_object_begin(emitter);
459 		emitter_json_kv(emitter, "nmalloc", emitter_type_uint64,
460 		    &nmalloc);
461 		emitter_json_kv(emitter, "ndalloc", emitter_type_uint64,
462 		    &ndalloc);
463 		emitter_json_kv(emitter, "curregs", emitter_type_size,
464 		    &curregs);
465 		emitter_json_kv(emitter, "nrequests", emitter_type_uint64,
466 		    &nrequests);
467 		if (prof_stats_on) {
468 			emitter_json_kv(emitter, "prof_live_requested",
469 			    emitter_type_uint64, &prof_live.req_sum);
470 			emitter_json_kv(emitter, "prof_live_count",
471 			    emitter_type_uint64, &prof_live.count);
472 			emitter_json_kv(emitter, "prof_accum_requested",
473 			    emitter_type_uint64, &prof_accum.req_sum);
474 			emitter_json_kv(emitter, "prof_accum_count",
475 			    emitter_type_uint64, &prof_accum.count);
476 		}
477 		emitter_json_kv(emitter, "nfills", emitter_type_uint64,
478 		    &nfills);
479 		emitter_json_kv(emitter, "nflushes", emitter_type_uint64,
480 		    &nflushes);
481 		emitter_json_kv(emitter, "nreslabs", emitter_type_uint64,
482 		    &nreslabs);
483 		emitter_json_kv(emitter, "curslabs", emitter_type_size,
484 		    &curslabs);
485 		emitter_json_kv(emitter, "nonfull_slabs", emitter_type_size,
486 		    &nonfull_slabs);
487 		if (mutex) {
488 			emitter_json_object_kv_begin(emitter, "mutex");
489 			mutex_stats_emit(emitter, NULL, col_mutex64,
490 			    col_mutex32);
491 			emitter_json_object_end(emitter);
492 		}
493 		emitter_json_object_end(emitter);
494 
495 		size_t availregs = nregs * curslabs;
496 		char util[6];
497 		if (get_rate_str((uint64_t)curregs, (uint64_t)availregs, util))
498 		{
499 			if (availregs == 0) {
500 				malloc_snprintf(util, sizeof(util), "1");
501 			} else if (curregs > availregs) {
502 				/*
503 				 * Race detected: the counters were read in
504 				 * separate mallctl calls and concurrent
505 				 * operations happened in between.  In this case
506 				 * no meaningful utilization can be computed.
507 				 */
508 				malloc_snprintf(util, sizeof(util), " race");
509 			} else {
510 				not_reached();
511 			}
512 		}
513 
514 		col_size.size_val = reg_size;
515 		col_ind.unsigned_val = j;
516 		col_allocated.size_val = curregs * reg_size;
517 		col_nmalloc.uint64_val = nmalloc;
518 		col_nmalloc_ps.uint64_val = rate_per_second(nmalloc, uptime);
519 		col_ndalloc.uint64_val = ndalloc;
520 		col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime);
521 		col_nrequests.uint64_val = nrequests;
522 		col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime);
523 		if (prof_stats_on) {
524 			col_prof_live_requested.uint64_val = prof_live.req_sum;
525 			col_prof_live_count.uint64_val = prof_live.count;
526 			col_prof_accum_requested.uint64_val =
527 			    prof_accum.req_sum;
528 			col_prof_accum_count.uint64_val = prof_accum.count;
529 		}
530 		col_nshards.unsigned_val = nshards;
531 		col_curregs.size_val = curregs;
532 		col_curslabs.size_val = curslabs;
533 		col_nonfull_slabs.size_val = nonfull_slabs;
534 		col_regs.unsigned_val = nregs;
535 		col_pgs.size_val = slab_size / page;
536 		col_util.str_val = util;
537 		col_nfills.uint64_val = nfills;
538 		col_nfills_ps.uint64_val = rate_per_second(nfills, uptime);
539 		col_nflushes.uint64_val = nflushes;
540 		col_nflushes_ps.uint64_val = rate_per_second(nflushes, uptime);
541 		col_nslabs.uint64_val = nslabs;
542 		col_nreslabs.uint64_val = nreslabs;
543 		col_nreslabs_ps.uint64_val = rate_per_second(nreslabs, uptime);
544 
545 		/*
546 		 * Note that mutex columns were initialized above, if mutex ==
547 		 * true.
548 		 */
549 
550 		emitter_table_row(emitter, &row);
551 	}
552 	emitter_json_array_end(emitter); /* Close "bins". */
553 
554 	if (in_gap) {
555 		emitter_table_printf(emitter, "                     ---\n");
556 	}
557 }
558 
559 JEMALLOC_COLD
560 static void
561 stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
562 	unsigned nbins, nlextents, j;
563 	bool in_gap, in_gap_prev;
564 
565 	CTL_GET("arenas.nbins", &nbins, unsigned);
566 	CTL_GET("arenas.nlextents", &nlextents, unsigned);
567 
568 	emitter_row_t header_row;
569 	emitter_row_init(&header_row);
570 	emitter_row_t row;
571 	emitter_row_init(&row);
572 
573 	bool prof_stats_on = config_prof && opt_prof && opt_prof_stats
574 	    && i == MALLCTL_ARENAS_ALL;
575 
576 	COL_HDR(row, size, NULL, right, 20, size)
577 	COL_HDR(row, ind, NULL, right, 4, unsigned)
578 	COL_HDR(row, allocated, NULL, right, 13, size)
579 	COL_HDR(row, nmalloc, NULL, right, 13, uint64)
580 	COL_HDR(row, nmalloc_ps, "(#/sec)", right, 8, uint64)
581 	COL_HDR(row, ndalloc, NULL, right, 13, uint64)
582 	COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64)
583 	COL_HDR(row, nrequests, NULL, right, 13, uint64)
584 	COL_HDR(row, nrequests_ps, "(#/sec)", right, 8, uint64)
585 	COL_HDR_DECLARE(prof_live_requested)
586 	COL_HDR_DECLARE(prof_live_count)
587 	COL_HDR_DECLARE(prof_accum_requested)
588 	COL_HDR_DECLARE(prof_accum_count)
589 	if (prof_stats_on) {
590 		COL_HDR_INIT(row, prof_live_requested, NULL, right, 21, uint64)
591 		COL_HDR_INIT(row, prof_live_count, NULL, right, 17, uint64)
592 		COL_HDR_INIT(row, prof_accum_requested, NULL, right, 21, uint64)
593 		COL_HDR_INIT(row, prof_accum_count, NULL, right, 17, uint64)
594 	}
595 	COL_HDR(row, curlextents, NULL, right, 13, size)
596 
597 	/* As with bins, we label the large extents table. */
598 	header_size.width -= 6;
599 	emitter_table_printf(emitter, "large:");
600 	emitter_table_row(emitter, &header_row);
601 	emitter_json_array_kv_begin(emitter, "lextents");
602 
603 	size_t stats_arenas_mib[CTL_MAX_DEPTH];
604 	CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
605 	stats_arenas_mib[2] = i;
606 	CTL_LEAF_PREPARE(stats_arenas_mib, 3, "lextents");
607 
608 	size_t arenas_lextent_mib[CTL_MAX_DEPTH];
609 	CTL_LEAF_PREPARE(arenas_lextent_mib, 0, "arenas.lextent");
610 
611 	size_t prof_stats_mib[CTL_MAX_DEPTH];
612 	if (prof_stats_on) {
613 		CTL_LEAF_PREPARE(prof_stats_mib, 0, "prof.stats.lextents");
614 	}
615 
616 	for (j = 0, in_gap = false; j < nlextents; j++) {
617 		uint64_t nmalloc, ndalloc, nrequests;
618 		size_t lextent_size, curlextents;
619 		prof_stats_t prof_live;
620 		prof_stats_t prof_accum;
621 
622 		stats_arenas_mib[4] = j;
623 		arenas_lextent_mib[2] = j;
624 
625 		CTL_LEAF(stats_arenas_mib, 5, "nmalloc", &nmalloc, uint64_t);
626 		CTL_LEAF(stats_arenas_mib, 5, "ndalloc", &ndalloc, uint64_t);
627 		CTL_LEAF(stats_arenas_mib, 5, "nrequests", &nrequests,
628 		    uint64_t);
629 
630 		in_gap_prev = in_gap;
631 		in_gap = (nrequests == 0);
632 
633 		if (in_gap_prev && !in_gap) {
634 			emitter_table_printf(emitter,
635 			    "                     ---\n");
636 		}
637 
638 		CTL_LEAF(arenas_lextent_mib, 3, "size", &lextent_size, size_t);
639 		CTL_LEAF(stats_arenas_mib, 5, "curlextents", &curlextents,
640 		    size_t);
641 
642 		if (prof_stats_on) {
643 			prof_stats_mib[3] = j;
644 			CTL_LEAF(prof_stats_mib, 4, "live", &prof_live,
645 			    prof_stats_t);
646 			CTL_LEAF(prof_stats_mib, 4, "accum", &prof_accum,
647 			    prof_stats_t);
648 		}
649 
650 		emitter_json_object_begin(emitter);
651 		if (prof_stats_on) {
652 			emitter_json_kv(emitter, "prof_live_requested",
653 			    emitter_type_uint64, &prof_live.req_sum);
654 			emitter_json_kv(emitter, "prof_live_count",
655 			    emitter_type_uint64, &prof_live.count);
656 			emitter_json_kv(emitter, "prof_accum_requested",
657 			    emitter_type_uint64, &prof_accum.req_sum);
658 			emitter_json_kv(emitter, "prof_accum_count",
659 			    emitter_type_uint64, &prof_accum.count);
660 		}
661 		emitter_json_kv(emitter, "curlextents", emitter_type_size,
662 		    &curlextents);
663 		emitter_json_object_end(emitter);
664 
665 		col_size.size_val = lextent_size;
666 		col_ind.unsigned_val = nbins + j;
667 		col_allocated.size_val = curlextents * lextent_size;
668 		col_nmalloc.uint64_val = nmalloc;
669 		col_nmalloc_ps.uint64_val = rate_per_second(nmalloc, uptime);
670 		col_ndalloc.uint64_val = ndalloc;
671 		col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime);
672 		col_nrequests.uint64_val = nrequests;
673 		col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime);
674 		if (prof_stats_on) {
675 			col_prof_live_requested.uint64_val = prof_live.req_sum;
676 			col_prof_live_count.uint64_val = prof_live.count;
677 			col_prof_accum_requested.uint64_val =
678 			    prof_accum.req_sum;
679 			col_prof_accum_count.uint64_val = prof_accum.count;
680 		}
681 		col_curlextents.size_val = curlextents;
682 
683 		if (!in_gap) {
684 			emitter_table_row(emitter, &row);
685 		}
686 	}
687 	emitter_json_array_end(emitter); /* Close "lextents". */
688 	if (in_gap) {
689 		emitter_table_printf(emitter, "                     ---\n");
690 	}
691 }
692 
693 JEMALLOC_COLD
694 static void
695 stats_arena_extents_print(emitter_t *emitter, unsigned i) {
696 	unsigned j;
697 	bool in_gap, in_gap_prev;
698 	emitter_row_t header_row;
699 	emitter_row_init(&header_row);
700 	emitter_row_t row;
701 	emitter_row_init(&row);
702 
703 	COL_HDR(row, size, NULL, right, 20, size)
704 	COL_HDR(row, ind, NULL, right, 4, unsigned)
705 	COL_HDR(row, ndirty, NULL, right, 13, size)
706 	COL_HDR(row, dirty, NULL, right, 13, size)
707 	COL_HDR(row, nmuzzy, NULL, right, 13, size)
708 	COL_HDR(row, muzzy, NULL, right, 13, size)
709 	COL_HDR(row, nretained, NULL, right, 13, size)
710 	COL_HDR(row, retained, NULL, right, 13, size)
711 	COL_HDR(row, ntotal, NULL, right, 13, size)
712 	COL_HDR(row, total, NULL, right, 13, size)
713 
714 	/* Label this section. */
715 	header_size.width -= 8;
716 	emitter_table_printf(emitter, "extents:");
717 	emitter_table_row(emitter, &header_row);
718 	emitter_json_array_kv_begin(emitter, "extents");
719 
720 	size_t stats_arenas_mib[CTL_MAX_DEPTH];
721 	CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
722 	stats_arenas_mib[2] = i;
723 	CTL_LEAF_PREPARE(stats_arenas_mib, 3, "extents");
724 
725 	in_gap = false;
726 	for (j = 0; j < SC_NPSIZES; j++) {
727 		size_t ndirty, nmuzzy, nretained, total, dirty_bytes,
728 		    muzzy_bytes, retained_bytes, total_bytes;
729 		stats_arenas_mib[4] = j;
730 
731 		CTL_LEAF(stats_arenas_mib, 5, "ndirty", &ndirty, size_t);
732 		CTL_LEAF(stats_arenas_mib, 5, "nmuzzy", &nmuzzy, size_t);
733 		CTL_LEAF(stats_arenas_mib, 5, "nretained", &nretained, size_t);
734 		CTL_LEAF(stats_arenas_mib, 5, "dirty_bytes", &dirty_bytes,
735 		    size_t);
736 		CTL_LEAF(stats_arenas_mib, 5, "muzzy_bytes", &muzzy_bytes,
737 		    size_t);
738 		CTL_LEAF(stats_arenas_mib, 5, "retained_bytes",
739 		    &retained_bytes, size_t);
740 
741 		total = ndirty + nmuzzy + nretained;
742 		total_bytes = dirty_bytes + muzzy_bytes + retained_bytes;
743 
744 		in_gap_prev = in_gap;
745 		in_gap = (total == 0);
746 
747 		if (in_gap_prev && !in_gap) {
748 			emitter_table_printf(emitter,
749 			    "                     ---\n");
750 		}
751 
752 		emitter_json_object_begin(emitter);
753 		emitter_json_kv(emitter, "ndirty", emitter_type_size, &ndirty);
754 		emitter_json_kv(emitter, "nmuzzy", emitter_type_size, &nmuzzy);
755 		emitter_json_kv(emitter, "nretained", emitter_type_size,
756 		    &nretained);
757 
758 		emitter_json_kv(emitter, "dirty_bytes", emitter_type_size,
759 		    &dirty_bytes);
760 		emitter_json_kv(emitter, "muzzy_bytes", emitter_type_size,
761 		    &muzzy_bytes);
762 		emitter_json_kv(emitter, "retained_bytes", emitter_type_size,
763 		    &retained_bytes);
764 		emitter_json_object_end(emitter);
765 
766 		col_size.size_val = sz_pind2sz(j);
767 		col_ind.size_val = j;
768 		col_ndirty.size_val = ndirty;
769 		col_dirty.size_val = dirty_bytes;
770 		col_nmuzzy.size_val = nmuzzy;
771 		col_muzzy.size_val = muzzy_bytes;
772 		col_nretained.size_val = nretained;
773 		col_retained.size_val = retained_bytes;
774 		col_ntotal.size_val = total;
775 		col_total.size_val = total_bytes;
776 
777 		if (!in_gap) {
778 			emitter_table_row(emitter, &row);
779 		}
780 	}
781 	emitter_json_array_end(emitter); /* Close "extents". */
782 	if (in_gap) {
783 		emitter_table_printf(emitter, "                     ---\n");
784 	}
785 }
786 
787 static void
788 stats_arena_hpa_shard_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
789 	emitter_row_t header_row;
790 	emitter_row_init(&header_row);
791 	emitter_row_t row;
792 	emitter_row_init(&row);
793 
794 	uint64_t npurge_passes;
795 	uint64_t npurges;
796 	uint64_t nhugifies;
797 	uint64_t ndehugifies;
798 
799 	CTL_M2_GET("stats.arenas.0.hpa_shard.npurge_passes",
800 	    i, &npurge_passes, uint64_t);
801 	CTL_M2_GET("stats.arenas.0.hpa_shard.npurges",
802 	    i, &npurges, uint64_t);
803 	CTL_M2_GET("stats.arenas.0.hpa_shard.nhugifies",
804 	    i, &nhugifies, uint64_t);
805 	CTL_M2_GET("stats.arenas.0.hpa_shard.ndehugifies",
806 	    i, &ndehugifies, uint64_t);
807 
808 	size_t npageslabs_huge;
809 	size_t nactive_huge;
810 	size_t ndirty_huge;
811 
812 	size_t npageslabs_nonhuge;
813 	size_t nactive_nonhuge;
814 	size_t ndirty_nonhuge;
815 	size_t nretained_nonhuge;
816 
817 	size_t sec_bytes;
818 	CTL_M2_GET("stats.arenas.0.hpa_sec_bytes", i, &sec_bytes, size_t);
819 	emitter_kv(emitter, "sec_bytes", "Bytes in small extent cache",
820 	    emitter_type_size, &sec_bytes);
821 
822 	/* First, global stats. */
823 	emitter_table_printf(emitter,
824 	    "HPA shard stats:\n"
825 	    "  Purge passes: %" FMTu64 " (%" FMTu64 " / sec)\n"
826 	    "  Purges: %" FMTu64 " (%" FMTu64 " / sec)\n"
827 	    "  Hugeifies: %" FMTu64 " (%" FMTu64 " / sec)\n"
828 	    "  Dehugifies: %" FMTu64 " (%" FMTu64 " / sec)\n"
829 	    "\n",
830 	    npurge_passes, rate_per_second(npurge_passes, uptime),
831 	    npurges, rate_per_second(npurges, uptime),
832 	    nhugifies, rate_per_second(nhugifies, uptime),
833 	    ndehugifies, rate_per_second(ndehugifies, uptime));
834 
835 	emitter_json_object_kv_begin(emitter, "hpa_shard");
836 	emitter_json_kv(emitter, "npurge_passes", emitter_type_uint64,
837 	    &npurge_passes);
838 	emitter_json_kv(emitter, "npurges", emitter_type_uint64,
839 	    &npurges);
840 	emitter_json_kv(emitter, "nhugifies", emitter_type_uint64,
841 	    &nhugifies);
842 	emitter_json_kv(emitter, "ndehugifies", emitter_type_uint64,
843 	    &ndehugifies);
844 
845 	/* Next, full slab stats. */
846 	CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.npageslabs_huge",
847 	    i, &npageslabs_huge, size_t);
848 	CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.nactive_huge",
849 	    i, &nactive_huge, size_t);
850 	CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.ndirty_huge",
851 	    i, &ndirty_huge, size_t);
852 
853 	CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.npageslabs_nonhuge",
854 	    i, &npageslabs_nonhuge, size_t);
855 	CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.nactive_nonhuge",
856 	    i, &nactive_nonhuge, size_t);
857 	CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.ndirty_nonhuge",
858 	    i, &ndirty_nonhuge, size_t);
859 	nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES
860 	    - nactive_nonhuge - ndirty_nonhuge;
861 
862 	emitter_table_printf(emitter,
863 	    "  In full slabs:\n"
864 	    "      npageslabs: %zu huge, %zu nonhuge\n"
865 	    "      nactive: %zu huge, %zu nonhuge \n"
866 	    "      ndirty: %zu huge, %zu nonhuge \n"
867 	    "      nretained: 0 huge, %zu nonhuge \n",
868 	    npageslabs_huge, npageslabs_nonhuge,
869 	    nactive_huge, nactive_nonhuge,
870 	    ndirty_huge, ndirty_nonhuge,
871 	    nretained_nonhuge);
872 
873 	emitter_json_object_kv_begin(emitter, "full_slabs");
874 	emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size,
875 	    &npageslabs_huge);
876 	emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
877 	    &nactive_huge);
878 	emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
879 	    &nactive_huge);
880 	emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size,
881 	    &npageslabs_nonhuge);
882 	emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size,
883 	    &nactive_nonhuge);
884 	emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size,
885 	    &ndirty_nonhuge);
886 	emitter_json_object_end(emitter); /* End "full_slabs" */
887 
888 	/* Next, empty slab stats. */
889 	CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.npageslabs_huge",
890 	    i, &npageslabs_huge, size_t);
891 	CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.nactive_huge",
892 	    i, &nactive_huge, size_t);
893 	CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.ndirty_huge",
894 	    i, &ndirty_huge, size_t);
895 
896 	CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.npageslabs_nonhuge",
897 	    i, &npageslabs_nonhuge, size_t);
898 	CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.nactive_nonhuge",
899 	    i, &nactive_nonhuge, size_t);
900 	CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.ndirty_nonhuge",
901 	    i, &ndirty_nonhuge, size_t);
902 	nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES
903 	    - nactive_nonhuge - ndirty_nonhuge;
904 
905 	emitter_table_printf(emitter,
906 	    "  In empty slabs:\n"
907 	    "      npageslabs: %zu huge, %zu nonhuge\n"
908 	    "      nactive: %zu huge, %zu nonhuge \n"
909 	    "      ndirty: %zu huge, %zu nonhuge \n"
910 	    "      nretained: 0 huge, %zu nonhuge \n"
911 	    "\n",
912 	    npageslabs_huge, npageslabs_nonhuge,
913 	    nactive_huge, nactive_nonhuge,
914 	    ndirty_huge, ndirty_nonhuge,
915 	    nretained_nonhuge);
916 
917 	emitter_json_object_kv_begin(emitter, "empty_slabs");
918 	emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size,
919 	    &npageslabs_huge);
920 	emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
921 	    &nactive_huge);
922 	emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
923 	    &nactive_huge);
924 	emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size,
925 	    &npageslabs_nonhuge);
926 	emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size,
927 	    &nactive_nonhuge);
928 	emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size,
929 	    &ndirty_nonhuge);
930 	emitter_json_object_end(emitter); /* End "empty_slabs" */
931 
932 	COL_HDR(row, size, NULL, right, 20, size)
933 	COL_HDR(row, ind, NULL, right, 4, unsigned)
934 	COL_HDR(row, npageslabs_huge, NULL, right, 16, size)
935 	COL_HDR(row, nactive_huge, NULL, right, 16, size)
936 	COL_HDR(row, ndirty_huge, NULL, right, 16, size)
937 	COL_HDR(row, npageslabs_nonhuge, NULL, right, 20, size)
938 	COL_HDR(row, nactive_nonhuge, NULL, right, 20, size)
939 	COL_HDR(row, ndirty_nonhuge, NULL, right, 20, size)
940 	COL_HDR(row, nretained_nonhuge, NULL, right, 20, size)
941 
942 	size_t stats_arenas_mib[CTL_MAX_DEPTH];
943 	CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
944 	stats_arenas_mib[2] = i;
945 	CTL_LEAF_PREPARE(stats_arenas_mib, 3, "hpa_shard.nonfull_slabs");
946 
947 	emitter_table_row(emitter, &header_row);
948 	emitter_json_array_kv_begin(emitter, "nonfull_slabs");
949 	bool in_gap = false;
950 	for (pszind_t j = 0; j < PSSET_NPSIZES && j < SC_NPSIZES; j++) {
951 		stats_arenas_mib[5] = j;
952 
953 		CTL_LEAF(stats_arenas_mib, 6, "npageslabs_huge",
954 		    &npageslabs_huge, size_t);
955 		CTL_LEAF(stats_arenas_mib, 6, "nactive_huge",
956 		    &nactive_huge, size_t);
957 		CTL_LEAF(stats_arenas_mib, 6, "ndirty_huge",
958 		    &ndirty_huge, size_t);
959 
960 		CTL_LEAF(stats_arenas_mib, 6, "npageslabs_nonhuge",
961 		    &npageslabs_nonhuge, size_t);
962 		CTL_LEAF(stats_arenas_mib, 6, "nactive_nonhuge",
963 		    &nactive_nonhuge, size_t);
964 		CTL_LEAF(stats_arenas_mib, 6, "ndirty_nonhuge",
965 		    &ndirty_nonhuge, size_t);
966 		nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES
967 		    - nactive_nonhuge - ndirty_nonhuge;
968 
969 		bool in_gap_prev = in_gap;
970 		in_gap = (npageslabs_huge == 0 && npageslabs_nonhuge == 0);
971 		if (in_gap_prev && !in_gap) {
972 			emitter_table_printf(emitter,
973 			    "                     ---\n");
974 		}
975 
976 		col_size.size_val = sz_pind2sz(j);
977 		col_ind.size_val = j;
978 		col_npageslabs_huge.size_val = npageslabs_huge;
979 		col_nactive_huge.size_val = nactive_huge;
980 		col_ndirty_huge.size_val = ndirty_huge;
981 		col_npageslabs_nonhuge.size_val = npageslabs_nonhuge;
982 		col_nactive_nonhuge.size_val = nactive_nonhuge;
983 		col_ndirty_nonhuge.size_val = ndirty_nonhuge;
984 		col_nretained_nonhuge.size_val = nretained_nonhuge;
985 		if (!in_gap) {
986 			emitter_table_row(emitter, &row);
987 		}
988 
989 		emitter_json_object_begin(emitter);
990 		emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size,
991 		    &npageslabs_huge);
992 		emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
993 		    &nactive_huge);
994 		emitter_json_kv(emitter, "ndirty_huge", emitter_type_size,
995 		    &ndirty_huge);
996 		emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size,
997 		    &npageslabs_nonhuge);
998 		emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size,
999 		    &nactive_nonhuge);
1000 		emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size,
1001 		    &ndirty_nonhuge);
1002 		emitter_json_object_end(emitter);
1003 	}
1004 	emitter_json_array_end(emitter); /* End "nonfull_slabs" */
1005 	emitter_json_object_end(emitter); /* End "hpa_shard" */
1006 	if (in_gap) {
1007 		emitter_table_printf(emitter, "                     ---\n");
1008 	}
1009 }
1010 
1011 static void
1012 stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind, uint64_t uptime) {
1013 	emitter_row_t row;
1014 	emitter_col_t col_name;
1015 	emitter_col_t col64[mutex_prof_num_uint64_t_counters];
1016 	emitter_col_t col32[mutex_prof_num_uint32_t_counters];
1017 
1018 	emitter_row_init(&row);
1019 	mutex_stats_init_cols(&row, "", &col_name, col64, col32);
1020 
1021 	emitter_json_object_kv_begin(emitter, "mutexes");
1022 	emitter_table_row(emitter, &row);
1023 
1024 	size_t stats_arenas_mib[CTL_MAX_DEPTH];
1025 	CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
1026 	stats_arenas_mib[2] = arena_ind;
1027 	CTL_LEAF_PREPARE(stats_arenas_mib, 3, "mutexes");
1028 
1029 	for (mutex_prof_arena_ind_t i = 0; i < mutex_prof_num_arena_mutexes;
1030 	    i++) {
1031 		const char *name = arena_mutex_names[i];
1032 		emitter_json_object_kv_begin(emitter, name);
1033 		mutex_stats_read_arena(stats_arenas_mib, 4, name, &col_name,
1034 		    col64, col32, uptime);
1035 		mutex_stats_emit(emitter, &row, col64, col32);
1036 		emitter_json_object_end(emitter); /* Close the mutex dict. */
1037 	}
1038 	emitter_json_object_end(emitter); /* End "mutexes". */
1039 }
1040 
1041 JEMALLOC_COLD
1042 static void
1043 stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
1044     bool mutex, bool extents, bool hpa) {
1045 	unsigned nthreads;
1046 	const char *dss;
1047 	ssize_t dirty_decay_ms, muzzy_decay_ms;
1048 	size_t page, pactive, pdirty, pmuzzy, mapped, retained;
1049 	size_t base, internal, resident, metadata_thp, extent_avail;
1050 	uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
1051 	uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
1052 	size_t small_allocated;
1053 	uint64_t small_nmalloc, small_ndalloc, small_nrequests, small_nfills,
1054 	    small_nflushes;
1055 	size_t large_allocated;
1056 	uint64_t large_nmalloc, large_ndalloc, large_nrequests, large_nfills,
1057 	    large_nflushes;
1058 	size_t tcache_bytes, tcache_stashed_bytes, abandoned_vm;
1059 	uint64_t uptime;
1060 
1061 	CTL_GET("arenas.page", &page, size_t);
1062 
1063 	CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned);
1064 	emitter_kv(emitter, "nthreads", "assigned threads",
1065 	    emitter_type_unsigned, &nthreads);
1066 
1067 	CTL_M2_GET("stats.arenas.0.uptime", i, &uptime, uint64_t);
1068 	emitter_kv(emitter, "uptime_ns", "uptime", emitter_type_uint64,
1069 	    &uptime);
1070 
1071 	CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
1072 	emitter_kv(emitter, "dss", "dss allocation precedence",
1073 	    emitter_type_string, &dss);
1074 
1075 	CTL_M2_GET("stats.arenas.0.dirty_decay_ms", i, &dirty_decay_ms,
1076 	    ssize_t);
1077 	CTL_M2_GET("stats.arenas.0.muzzy_decay_ms", i, &muzzy_decay_ms,
1078 	    ssize_t);
1079 	CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
1080 	CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
1081 	CTL_M2_GET("stats.arenas.0.pmuzzy", i, &pmuzzy, size_t);
1082 	CTL_M2_GET("stats.arenas.0.dirty_npurge", i, &dirty_npurge, uint64_t);
1083 	CTL_M2_GET("stats.arenas.0.dirty_nmadvise", i, &dirty_nmadvise,
1084 	    uint64_t);
1085 	CTL_M2_GET("stats.arenas.0.dirty_purged", i, &dirty_purged, uint64_t);
1086 	CTL_M2_GET("stats.arenas.0.muzzy_npurge", i, &muzzy_npurge, uint64_t);
1087 	CTL_M2_GET("stats.arenas.0.muzzy_nmadvise", i, &muzzy_nmadvise,
1088 	    uint64_t);
1089 	CTL_M2_GET("stats.arenas.0.muzzy_purged", i, &muzzy_purged, uint64_t);
1090 
1091 	emitter_row_t decay_row;
1092 	emitter_row_init(&decay_row);
1093 
1094 	/* JSON-style emission. */
1095 	emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize,
1096 	    &dirty_decay_ms);
1097 	emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize,
1098 	    &muzzy_decay_ms);
1099 
1100 	emitter_json_kv(emitter, "pactive", emitter_type_size, &pactive);
1101 	emitter_json_kv(emitter, "pdirty", emitter_type_size, &pdirty);
1102 	emitter_json_kv(emitter, "pmuzzy", emitter_type_size, &pmuzzy);
1103 
1104 	emitter_json_kv(emitter, "dirty_npurge", emitter_type_uint64,
1105 	    &dirty_npurge);
1106 	emitter_json_kv(emitter, "dirty_nmadvise", emitter_type_uint64,
1107 	    &dirty_nmadvise);
1108 	emitter_json_kv(emitter, "dirty_purged", emitter_type_uint64,
1109 	    &dirty_purged);
1110 
1111 	emitter_json_kv(emitter, "muzzy_npurge", emitter_type_uint64,
1112 	    &muzzy_npurge);
1113 	emitter_json_kv(emitter, "muzzy_nmadvise", emitter_type_uint64,
1114 	    &muzzy_nmadvise);
1115 	emitter_json_kv(emitter, "muzzy_purged", emitter_type_uint64,
1116 	    &muzzy_purged);
1117 
1118 	/* Table-style emission. */
1119 	COL(decay_row, decay_type, right, 9, title);
1120 	col_decay_type.str_val = "decaying:";
1121 
1122 	COL(decay_row, decay_time, right, 6, title);
1123 	col_decay_time.str_val = "time";
1124 
1125 	COL(decay_row, decay_npages, right, 13, title);
1126 	col_decay_npages.str_val = "npages";
1127 
1128 	COL(decay_row, decay_sweeps, right, 13, title);
1129 	col_decay_sweeps.str_val = "sweeps";
1130 
1131 	COL(decay_row, decay_madvises, right, 13, title);
1132 	col_decay_madvises.str_val = "madvises";
1133 
1134 	COL(decay_row, decay_purged, right, 13, title);
1135 	col_decay_purged.str_val = "purged";
1136 
1137 	/* Title row. */
1138 	emitter_table_row(emitter, &decay_row);
1139 
1140 	/* Dirty row. */
1141 	col_decay_type.str_val = "dirty:";
1142 
1143 	if (dirty_decay_ms >= 0) {
1144 		col_decay_time.type = emitter_type_ssize;
1145 		col_decay_time.ssize_val = dirty_decay_ms;
1146 	} else {
1147 		col_decay_time.type = emitter_type_title;
1148 		col_decay_time.str_val = "N/A";
1149 	}
1150 
1151 	col_decay_npages.type = emitter_type_size;
1152 	col_decay_npages.size_val = pdirty;
1153 
1154 	col_decay_sweeps.type = emitter_type_uint64;
1155 	col_decay_sweeps.uint64_val = dirty_npurge;
1156 
1157 	col_decay_madvises.type = emitter_type_uint64;
1158 	col_decay_madvises.uint64_val = dirty_nmadvise;
1159 
1160 	col_decay_purged.type = emitter_type_uint64;
1161 	col_decay_purged.uint64_val = dirty_purged;
1162 
1163 	emitter_table_row(emitter, &decay_row);
1164 
1165 	/* Muzzy row. */
1166 	col_decay_type.str_val = "muzzy:";
1167 
1168 	if (muzzy_decay_ms >= 0) {
1169 		col_decay_time.type = emitter_type_ssize;
1170 		col_decay_time.ssize_val = muzzy_decay_ms;
1171 	} else {
1172 		col_decay_time.type = emitter_type_title;
1173 		col_decay_time.str_val = "N/A";
1174 	}
1175 
1176 	col_decay_npages.type = emitter_type_size;
1177 	col_decay_npages.size_val = pmuzzy;
1178 
1179 	col_decay_sweeps.type = emitter_type_uint64;
1180 	col_decay_sweeps.uint64_val = muzzy_npurge;
1181 
1182 	col_decay_madvises.type = emitter_type_uint64;
1183 	col_decay_madvises.uint64_val = muzzy_nmadvise;
1184 
1185 	col_decay_purged.type = emitter_type_uint64;
1186 	col_decay_purged.uint64_val = muzzy_purged;
1187 
1188 	emitter_table_row(emitter, &decay_row);
1189 
1190 	/* Small / large / total allocation counts. */
1191 	emitter_row_t alloc_count_row;
1192 	emitter_row_init(&alloc_count_row);
1193 
1194 	COL(alloc_count_row, count_title, left, 21, title);
1195 	col_count_title.str_val = "";
1196 
1197 	COL(alloc_count_row, count_allocated, right, 16, title);
1198 	col_count_allocated.str_val = "allocated";
1199 
1200 	COL(alloc_count_row, count_nmalloc, right, 16, title);
1201 	col_count_nmalloc.str_val = "nmalloc";
1202 	COL(alloc_count_row, count_nmalloc_ps, right, 10, title);
1203 	col_count_nmalloc_ps.str_val = "(#/sec)";
1204 
1205 	COL(alloc_count_row, count_ndalloc, right, 16, title);
1206 	col_count_ndalloc.str_val = "ndalloc";
1207 	COL(alloc_count_row, count_ndalloc_ps, right, 10, title);
1208 	col_count_ndalloc_ps.str_val = "(#/sec)";
1209 
1210 	COL(alloc_count_row, count_nrequests, right, 16, title);
1211 	col_count_nrequests.str_val = "nrequests";
1212 	COL(alloc_count_row, count_nrequests_ps, right, 10, title);
1213 	col_count_nrequests_ps.str_val = "(#/sec)";
1214 
1215 	COL(alloc_count_row, count_nfills, right, 16, title);
1216 	col_count_nfills.str_val = "nfill";
1217 	COL(alloc_count_row, count_nfills_ps, right, 10, title);
1218 	col_count_nfills_ps.str_val = "(#/sec)";
1219 
1220 	COL(alloc_count_row, count_nflushes, right, 16, title);
1221 	col_count_nflushes.str_val = "nflush";
1222 	COL(alloc_count_row, count_nflushes_ps, right, 10, title);
1223 	col_count_nflushes_ps.str_val = "(#/sec)";
1224 
1225 	emitter_table_row(emitter, &alloc_count_row);
1226 
1227 	col_count_nmalloc_ps.type = emitter_type_uint64;
1228 	col_count_ndalloc_ps.type = emitter_type_uint64;
1229 	col_count_nrequests_ps.type = emitter_type_uint64;
1230 	col_count_nfills_ps.type = emitter_type_uint64;
1231 	col_count_nflushes_ps.type = emitter_type_uint64;
1232 
1233 #define GET_AND_EMIT_ALLOC_STAT(small_or_large, name, valtype)		\
1234 	CTL_M2_GET("stats.arenas.0." #small_or_large "." #name, i,	\
1235 	    &small_or_large##_##name, valtype##_t);			\
1236 	emitter_json_kv(emitter, #name, emitter_type_##valtype,		\
1237 	    &small_or_large##_##name);					\
1238 	col_count_##name.type = emitter_type_##valtype;		\
1239 	col_count_##name.valtype##_val = small_or_large##_##name;
1240 
1241 	emitter_json_object_kv_begin(emitter, "small");
1242 	col_count_title.str_val = "small:";
1243 
1244 	GET_AND_EMIT_ALLOC_STAT(small, allocated, size)
1245 	GET_AND_EMIT_ALLOC_STAT(small, nmalloc, uint64)
1246 	col_count_nmalloc_ps.uint64_val =
1247 	    rate_per_second(col_count_nmalloc.uint64_val, uptime);
1248 	GET_AND_EMIT_ALLOC_STAT(small, ndalloc, uint64)
1249 	col_count_ndalloc_ps.uint64_val =
1250 	    rate_per_second(col_count_ndalloc.uint64_val, uptime);
1251 	GET_AND_EMIT_ALLOC_STAT(small, nrequests, uint64)
1252 	col_count_nrequests_ps.uint64_val =
1253 	    rate_per_second(col_count_nrequests.uint64_val, uptime);
1254 	GET_AND_EMIT_ALLOC_STAT(small, nfills, uint64)
1255 	col_count_nfills_ps.uint64_val =
1256 	    rate_per_second(col_count_nfills.uint64_val, uptime);
1257 	GET_AND_EMIT_ALLOC_STAT(small, nflushes, uint64)
1258 	col_count_nflushes_ps.uint64_val =
1259 	    rate_per_second(col_count_nflushes.uint64_val, uptime);
1260 
1261 	emitter_table_row(emitter, &alloc_count_row);
1262 	emitter_json_object_end(emitter); /* Close "small". */
1263 
1264 	emitter_json_object_kv_begin(emitter, "large");
1265 	col_count_title.str_val = "large:";
1266 
1267 	GET_AND_EMIT_ALLOC_STAT(large, allocated, size)
1268 	GET_AND_EMIT_ALLOC_STAT(large, nmalloc, uint64)
1269 	col_count_nmalloc_ps.uint64_val =
1270 	    rate_per_second(col_count_nmalloc.uint64_val, uptime);
1271 	GET_AND_EMIT_ALLOC_STAT(large, ndalloc, uint64)
1272 	col_count_ndalloc_ps.uint64_val =
1273 	    rate_per_second(col_count_ndalloc.uint64_val, uptime);
1274 	GET_AND_EMIT_ALLOC_STAT(large, nrequests, uint64)
1275 	col_count_nrequests_ps.uint64_val =
1276 	    rate_per_second(col_count_nrequests.uint64_val, uptime);
1277 	GET_AND_EMIT_ALLOC_STAT(large, nfills, uint64)
1278 	col_count_nfills_ps.uint64_val =
1279 	    rate_per_second(col_count_nfills.uint64_val, uptime);
1280 	GET_AND_EMIT_ALLOC_STAT(large, nflushes, uint64)
1281 	col_count_nflushes_ps.uint64_val =
1282 	    rate_per_second(col_count_nflushes.uint64_val, uptime);
1283 
1284 	emitter_table_row(emitter, &alloc_count_row);
1285 	emitter_json_object_end(emitter); /* Close "large". */
1286 
1287 #undef GET_AND_EMIT_ALLOC_STAT
1288 
1289 	/* Aggregated small + large stats are emitter only in table mode. */
1290 	col_count_title.str_val = "total:";
1291 	col_count_allocated.size_val = small_allocated + large_allocated;
1292 	col_count_nmalloc.uint64_val = small_nmalloc + large_nmalloc;
1293 	col_count_ndalloc.uint64_val = small_ndalloc + large_ndalloc;
1294 	col_count_nrequests.uint64_val = small_nrequests + large_nrequests;
1295 	col_count_nfills.uint64_val = small_nfills + large_nfills;
1296 	col_count_nflushes.uint64_val = small_nflushes + large_nflushes;
1297 	col_count_nmalloc_ps.uint64_val =
1298 	    rate_per_second(col_count_nmalloc.uint64_val, uptime);
1299 	col_count_ndalloc_ps.uint64_val =
1300 	    rate_per_second(col_count_ndalloc.uint64_val, uptime);
1301 	col_count_nrequests_ps.uint64_val =
1302 	    rate_per_second(col_count_nrequests.uint64_val, uptime);
1303 	col_count_nfills_ps.uint64_val =
1304 	    rate_per_second(col_count_nfills.uint64_val, uptime);
1305 	col_count_nflushes_ps.uint64_val =
1306 	    rate_per_second(col_count_nflushes.uint64_val, uptime);
1307 	emitter_table_row(emitter, &alloc_count_row);
1308 
1309 	emitter_row_t mem_count_row;
1310 	emitter_row_init(&mem_count_row);
1311 
1312 	emitter_col_t mem_count_title;
1313 	emitter_col_init(&mem_count_title, &mem_count_row);
1314 	mem_count_title.justify = emitter_justify_left;
1315 	mem_count_title.width = 21;
1316 	mem_count_title.type = emitter_type_title;
1317 	mem_count_title.str_val = "";
1318 
1319 	emitter_col_t mem_count_val;
1320 	emitter_col_init(&mem_count_val, &mem_count_row);
1321 	mem_count_val.justify = emitter_justify_right;
1322 	mem_count_val.width = 16;
1323 	mem_count_val.type = emitter_type_title;
1324 	mem_count_val.str_val = "";
1325 
1326 	emitter_table_row(emitter, &mem_count_row);
1327 	mem_count_val.type = emitter_type_size;
1328 
1329 	/* Active count in bytes is emitted only in table mode. */
1330 	mem_count_title.str_val = "active:";
1331 	mem_count_val.size_val = pactive * page;
1332 	emitter_table_row(emitter, &mem_count_row);
1333 
1334 #define GET_AND_EMIT_MEM_STAT(stat)					\
1335 	CTL_M2_GET("stats.arenas.0."#stat, i, &stat, size_t);		\
1336 	emitter_json_kv(emitter, #stat, emitter_type_size, &stat);	\
1337 	mem_count_title.str_val = #stat":";				\
1338 	mem_count_val.size_val = stat;					\
1339 	emitter_table_row(emitter, &mem_count_row);
1340 
1341 	GET_AND_EMIT_MEM_STAT(mapped)
1342 	GET_AND_EMIT_MEM_STAT(retained)
1343 	GET_AND_EMIT_MEM_STAT(base)
1344 	GET_AND_EMIT_MEM_STAT(internal)
1345 	GET_AND_EMIT_MEM_STAT(metadata_thp)
1346 	GET_AND_EMIT_MEM_STAT(tcache_bytes)
1347 	GET_AND_EMIT_MEM_STAT(tcache_stashed_bytes)
1348 	GET_AND_EMIT_MEM_STAT(resident)
1349 	GET_AND_EMIT_MEM_STAT(abandoned_vm)
1350 	GET_AND_EMIT_MEM_STAT(extent_avail)
1351 #undef GET_AND_EMIT_MEM_STAT
1352 
1353 	if (mutex) {
1354 		stats_arena_mutexes_print(emitter, i, uptime);
1355 	}
1356 	if (bins) {
1357 		stats_arena_bins_print(emitter, mutex, i, uptime);
1358 	}
1359 	if (large) {
1360 		stats_arena_lextents_print(emitter, i, uptime);
1361 	}
1362 	if (extents) {
1363 		stats_arena_extents_print(emitter, i);
1364 	}
1365 	if (hpa) {
1366 		stats_arena_hpa_shard_print(emitter, i, uptime);
1367 	}
1368 }
1369 
1370 JEMALLOC_COLD
1371 static void
1372 stats_general_print(emitter_t *emitter) {
1373 	const char *cpv;
1374 	bool bv, bv2;
1375 	unsigned uv;
1376 	uint32_t u32v;
1377 	uint64_t u64v;
1378 	int64_t i64v;
1379 	ssize_t ssv, ssv2;
1380 	size_t sv, bsz, usz, u32sz, u64sz, i64sz, ssz, sssz, cpsz;
1381 
1382 	bsz = sizeof(bool);
1383 	usz = sizeof(unsigned);
1384 	ssz = sizeof(size_t);
1385 	sssz = sizeof(ssize_t);
1386 	cpsz = sizeof(const char *);
1387 	u32sz = sizeof(uint32_t);
1388 	i64sz = sizeof(int64_t);
1389 	u64sz = sizeof(uint64_t);
1390 
1391 	CTL_GET("version", &cpv, const char *);
1392 	emitter_kv(emitter, "version", "Version", emitter_type_string, &cpv);
1393 
1394 	/* config. */
1395 	emitter_dict_begin(emitter, "config", "Build-time option settings");
1396 #define CONFIG_WRITE_BOOL(name)						\
1397 	do {								\
1398 		CTL_GET("config."#name, &bv, bool);			\
1399 		emitter_kv(emitter, #name, "config."#name,		\
1400 		    emitter_type_bool, &bv);				\
1401 	} while (0)
1402 
1403 	CONFIG_WRITE_BOOL(cache_oblivious);
1404 	CONFIG_WRITE_BOOL(debug);
1405 	CONFIG_WRITE_BOOL(fill);
1406 	CONFIG_WRITE_BOOL(lazy_lock);
1407 	emitter_kv(emitter, "malloc_conf", "config.malloc_conf",
1408 	    emitter_type_string, &config_malloc_conf);
1409 
1410 	CONFIG_WRITE_BOOL(opt_safety_checks);
1411 	CONFIG_WRITE_BOOL(prof);
1412 	CONFIG_WRITE_BOOL(prof_libgcc);
1413 	CONFIG_WRITE_BOOL(prof_libunwind);
1414 	CONFIG_WRITE_BOOL(stats);
1415 	CONFIG_WRITE_BOOL(utrace);
1416 	CONFIG_WRITE_BOOL(xmalloc);
1417 #undef CONFIG_WRITE_BOOL
1418 	emitter_dict_end(emitter); /* Close "config" dict. */
1419 
1420 	/* opt. */
1421 #define OPT_WRITE(name, var, size, emitter_type)			\
1422 	if (je_mallctl("opt."name, (void *)&var, &size, NULL, 0) ==	\
1423 	    0) {							\
1424 		emitter_kv(emitter, name, "opt."name, emitter_type,	\
1425 		    &var);						\
1426 	}
1427 
1428 #define OPT_WRITE_MUTABLE(name, var1, var2, size, emitter_type,		\
1429     altname)								\
1430 	if (je_mallctl("opt."name, (void *)&var1, &size, NULL, 0) ==	\
1431 	    0 && je_mallctl(altname, (void *)&var2, &size, NULL, 0)	\
1432 	    == 0) {							\
1433 		emitter_kv_note(emitter, name, "opt."name,		\
1434 		    emitter_type, &var1, altname, emitter_type,		\
1435 		    &var2);						\
1436 	}
1437 
1438 #define OPT_WRITE_BOOL(name) OPT_WRITE(name, bv, bsz, emitter_type_bool)
1439 #define OPT_WRITE_BOOL_MUTABLE(name, altname)				\
1440 	OPT_WRITE_MUTABLE(name, bv, bv2, bsz, emitter_type_bool, altname)
1441 
1442 #define OPT_WRITE_UNSIGNED(name)					\
1443 	OPT_WRITE(name, uv, usz, emitter_type_unsigned)
1444 
1445 #define OPT_WRITE_INT64(name)						\
1446 	OPT_WRITE(name, i64v, i64sz, emitter_type_int64)
1447 #define OPT_WRITE_UINT64(name)						\
1448 	OPT_WRITE(name, u64v, u64sz, emitter_type_uint64)
1449 
1450 #define OPT_WRITE_SIZE_T(name)						\
1451 	OPT_WRITE(name, sv, ssz, emitter_type_size)
1452 #define OPT_WRITE_SSIZE_T(name)						\
1453 	OPT_WRITE(name, ssv, sssz, emitter_type_ssize)
1454 #define OPT_WRITE_SSIZE_T_MUTABLE(name, altname)			\
1455 	OPT_WRITE_MUTABLE(name, ssv, ssv2, sssz, emitter_type_ssize,	\
1456 	    altname)
1457 
1458 #define OPT_WRITE_CHAR_P(name)						\
1459 	OPT_WRITE(name, cpv, cpsz, emitter_type_string)
1460 
1461 	emitter_dict_begin(emitter, "opt", "Run-time option settings");
1462 
1463 	OPT_WRITE_BOOL("abort")
1464 	OPT_WRITE_BOOL("abort_conf")
1465 	OPT_WRITE_BOOL("cache_oblivious")
1466 	OPT_WRITE_BOOL("confirm_conf")
1467 	OPT_WRITE_BOOL("retain")
1468 	OPT_WRITE_CHAR_P("dss")
1469 	OPT_WRITE_UNSIGNED("narenas")
1470 	OPT_WRITE_CHAR_P("percpu_arena")
1471 	OPT_WRITE_SIZE_T("oversize_threshold")
1472 	OPT_WRITE_BOOL("hpa")
1473 	OPT_WRITE_SIZE_T("hpa_slab_max_alloc")
1474 	OPT_WRITE_SIZE_T("hpa_hugification_threshold")
1475 	OPT_WRITE_UINT64("hpa_hugify_delay_ms")
1476 	OPT_WRITE_UINT64("hpa_min_purge_interval_ms")
1477 	if (je_mallctl("opt.hpa_dirty_mult", (void *)&u32v, &u32sz, NULL, 0)
1478 	    == 0) {
1479 		/*
1480 		 * We cheat a little and "know" the secret meaning of this
1481 		 * representation.
1482 		 */
1483 		if (u32v == (uint32_t)-1) {
1484 			const char *neg1 = "-1";
1485 			emitter_kv(emitter, "hpa_dirty_mult",
1486 			    "opt.hpa_dirty_mult", emitter_type_string, &neg1);
1487 		} else {
1488 			char buf[FXP_BUF_SIZE];
1489 			fxp_print(u32v, buf);
1490 			const char *bufp = buf;
1491 			emitter_kv(emitter, "hpa_dirty_mult",
1492 			    "opt.hpa_dirty_mult", emitter_type_string, &bufp);
1493 		}
1494 	}
1495 	OPT_WRITE_SIZE_T("hpa_sec_nshards")
1496 	OPT_WRITE_SIZE_T("hpa_sec_max_alloc")
1497 	OPT_WRITE_SIZE_T("hpa_sec_max_bytes")
1498 	OPT_WRITE_SIZE_T("hpa_sec_bytes_after_flush")
1499 	OPT_WRITE_SIZE_T("hpa_sec_batch_fill_extra")
1500 	OPT_WRITE_CHAR_P("metadata_thp")
1501 	OPT_WRITE_INT64("mutex_max_spin")
1502 	OPT_WRITE_BOOL_MUTABLE("background_thread", "background_thread")
1503 	OPT_WRITE_SSIZE_T_MUTABLE("dirty_decay_ms", "arenas.dirty_decay_ms")
1504 	OPT_WRITE_SSIZE_T_MUTABLE("muzzy_decay_ms", "arenas.muzzy_decay_ms")
1505 	OPT_WRITE_SIZE_T("lg_extent_max_active_fit")
1506 	OPT_WRITE_CHAR_P("junk")
1507 	OPT_WRITE_BOOL("zero")
1508 	OPT_WRITE_BOOL("utrace")
1509 	OPT_WRITE_BOOL("xmalloc")
1510 	OPT_WRITE_BOOL("experimental_infallible_new")
1511 	OPT_WRITE_BOOL("tcache")
1512 	OPT_WRITE_SIZE_T("tcache_max")
1513 	OPT_WRITE_UNSIGNED("tcache_nslots_small_min")
1514 	OPT_WRITE_UNSIGNED("tcache_nslots_small_max")
1515 	OPT_WRITE_UNSIGNED("tcache_nslots_large")
1516 	OPT_WRITE_SSIZE_T("lg_tcache_nslots_mul")
1517 	OPT_WRITE_SIZE_T("tcache_gc_incr_bytes")
1518 	OPT_WRITE_SIZE_T("tcache_gc_delay_bytes")
1519 	OPT_WRITE_UNSIGNED("lg_tcache_flush_small_div")
1520 	OPT_WRITE_UNSIGNED("lg_tcache_flush_large_div")
1521 	OPT_WRITE_CHAR_P("thp")
1522 	OPT_WRITE_BOOL("prof")
1523 	OPT_WRITE_CHAR_P("prof_prefix")
1524 	OPT_WRITE_BOOL_MUTABLE("prof_active", "prof.active")
1525 	OPT_WRITE_BOOL_MUTABLE("prof_thread_active_init",
1526 	    "prof.thread_active_init")
1527 	OPT_WRITE_SSIZE_T_MUTABLE("lg_prof_sample", "prof.lg_sample")
1528 	OPT_WRITE_BOOL("prof_accum")
1529 	OPT_WRITE_SSIZE_T("lg_prof_interval")
1530 	OPT_WRITE_BOOL("prof_gdump")
1531 	OPT_WRITE_BOOL("prof_final")
1532 	OPT_WRITE_BOOL("prof_leak")
1533 	OPT_WRITE_BOOL("prof_leak_error")
1534 	OPT_WRITE_BOOL("stats_print")
1535 	OPT_WRITE_CHAR_P("stats_print_opts")
1536 	OPT_WRITE_BOOL("stats_print")
1537 	OPT_WRITE_CHAR_P("stats_print_opts")
1538 	OPT_WRITE_INT64("stats_interval")
1539 	OPT_WRITE_CHAR_P("stats_interval_opts")
1540 	OPT_WRITE_CHAR_P("zero_realloc")
1541 
1542 	emitter_dict_end(emitter);
1543 
1544 #undef OPT_WRITE
1545 #undef OPT_WRITE_MUTABLE
1546 #undef OPT_WRITE_BOOL
1547 #undef OPT_WRITE_BOOL_MUTABLE
1548 #undef OPT_WRITE_UNSIGNED
1549 #undef OPT_WRITE_SSIZE_T
1550 #undef OPT_WRITE_SSIZE_T_MUTABLE
1551 #undef OPT_WRITE_CHAR_P
1552 
1553 	/* prof. */
1554 	if (config_prof) {
1555 		emitter_dict_begin(emitter, "prof", "Profiling settings");
1556 
1557 		CTL_GET("prof.thread_active_init", &bv, bool);
1558 		emitter_kv(emitter, "thread_active_init",
1559 		    "prof.thread_active_init", emitter_type_bool, &bv);
1560 
1561 		CTL_GET("prof.active", &bv, bool);
1562 		emitter_kv(emitter, "active", "prof.active", emitter_type_bool,
1563 		    &bv);
1564 
1565 		CTL_GET("prof.gdump", &bv, bool);
1566 		emitter_kv(emitter, "gdump", "prof.gdump", emitter_type_bool,
1567 		    &bv);
1568 
1569 		CTL_GET("prof.interval", &u64v, uint64_t);
1570 		emitter_kv(emitter, "interval", "prof.interval",
1571 		    emitter_type_uint64, &u64v);
1572 
1573 		CTL_GET("prof.lg_sample", &ssv, ssize_t);
1574 		emitter_kv(emitter, "lg_sample", "prof.lg_sample",
1575 		    emitter_type_ssize, &ssv);
1576 
1577 		emitter_dict_end(emitter); /* Close "prof". */
1578 	}
1579 
1580 	/* arenas. */
1581 	/*
1582 	 * The json output sticks arena info into an "arenas" dict; the table
1583 	 * output puts them at the top-level.
1584 	 */
1585 	emitter_json_object_kv_begin(emitter, "arenas");
1586 
1587 	CTL_GET("arenas.narenas", &uv, unsigned);
1588 	emitter_kv(emitter, "narenas", "Arenas", emitter_type_unsigned, &uv);
1589 
1590 	/*
1591 	 * Decay settings are emitted only in json mode; in table mode, they're
1592 	 * emitted as notes with the opt output, above.
1593 	 */
1594 	CTL_GET("arenas.dirty_decay_ms", &ssv, ssize_t);
1595 	emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize, &ssv);
1596 
1597 	CTL_GET("arenas.muzzy_decay_ms", &ssv, ssize_t);
1598 	emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize, &ssv);
1599 
1600 	CTL_GET("arenas.quantum", &sv, size_t);
1601 	emitter_kv(emitter, "quantum", "Quantum size", emitter_type_size, &sv);
1602 
1603 	CTL_GET("arenas.page", &sv, size_t);
1604 	emitter_kv(emitter, "page", "Page size", emitter_type_size, &sv);
1605 
1606 	if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) {
1607 		emitter_kv(emitter, "tcache_max",
1608 		    "Maximum thread-cached size class", emitter_type_size, &sv);
1609 	}
1610 
1611 	unsigned arenas_nbins;
1612 	CTL_GET("arenas.nbins", &arenas_nbins, unsigned);
1613 	emitter_kv(emitter, "nbins", "Number of bin size classes",
1614 	    emitter_type_unsigned, &arenas_nbins);
1615 
1616 	unsigned arenas_nhbins;
1617 	CTL_GET("arenas.nhbins", &arenas_nhbins, unsigned);
1618 	emitter_kv(emitter, "nhbins", "Number of thread-cache bin size classes",
1619 	    emitter_type_unsigned, &arenas_nhbins);
1620 
1621 	/*
1622 	 * We do enough mallctls in a loop that we actually want to omit them
1623 	 * (not just omit the printing).
1624 	 */
1625 	if (emitter_outputs_json(emitter)) {
1626 		emitter_json_array_kv_begin(emitter, "bin");
1627 		size_t arenas_bin_mib[CTL_MAX_DEPTH];
1628 		CTL_LEAF_PREPARE(arenas_bin_mib, 0, "arenas.bin");
1629 		for (unsigned i = 0; i < arenas_nbins; i++) {
1630 			arenas_bin_mib[2] = i;
1631 			emitter_json_object_begin(emitter);
1632 
1633 			CTL_LEAF(arenas_bin_mib, 3, "size", &sv, size_t);
1634 			emitter_json_kv(emitter, "size", emitter_type_size,
1635 			    &sv);
1636 
1637 			CTL_LEAF(arenas_bin_mib, 3, "nregs", &u32v, uint32_t);
1638 			emitter_json_kv(emitter, "nregs", emitter_type_uint32,
1639 			    &u32v);
1640 
1641 			CTL_LEAF(arenas_bin_mib, 3, "slab_size", &sv, size_t);
1642 			emitter_json_kv(emitter, "slab_size", emitter_type_size,
1643 			    &sv);
1644 
1645 			CTL_LEAF(arenas_bin_mib, 3, "nshards", &u32v, uint32_t);
1646 			emitter_json_kv(emitter, "nshards", emitter_type_uint32,
1647 			    &u32v);
1648 
1649 			emitter_json_object_end(emitter);
1650 		}
1651 		emitter_json_array_end(emitter); /* Close "bin". */
1652 	}
1653 
1654 	unsigned nlextents;
1655 	CTL_GET("arenas.nlextents", &nlextents, unsigned);
1656 	emitter_kv(emitter, "nlextents", "Number of large size classes",
1657 	    emitter_type_unsigned, &nlextents);
1658 
1659 	if (emitter_outputs_json(emitter)) {
1660 		emitter_json_array_kv_begin(emitter, "lextent");
1661 		size_t arenas_lextent_mib[CTL_MAX_DEPTH];
1662 		CTL_LEAF_PREPARE(arenas_lextent_mib, 0, "arenas.lextent");
1663 		for (unsigned i = 0; i < nlextents; i++) {
1664 			arenas_lextent_mib[2] = i;
1665 			emitter_json_object_begin(emitter);
1666 
1667 			CTL_LEAF(arenas_lextent_mib, 3, "size", &sv, size_t);
1668 			emitter_json_kv(emitter, "size", emitter_type_size,
1669 			    &sv);
1670 
1671 			emitter_json_object_end(emitter);
1672 		}
1673 		emitter_json_array_end(emitter); /* Close "lextent". */
1674 	}
1675 
1676 	emitter_json_object_end(emitter); /* Close "arenas" */
1677 }
1678 
1679 JEMALLOC_COLD
1680 static void
1681 stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
1682     bool unmerged, bool bins, bool large, bool mutex, bool extents, bool hpa) {
1683 	/*
1684 	 * These should be deleted.  We keep them around for a while, to aid in
1685 	 * the transition to the emitter code.
1686 	 */
1687 	size_t allocated, active, metadata, metadata_thp, resident, mapped,
1688 	    retained;
1689 	size_t num_background_threads;
1690 	size_t zero_reallocs;
1691 	uint64_t background_thread_num_runs, background_thread_run_interval;
1692 
1693 	CTL_GET("stats.allocated", &allocated, size_t);
1694 	CTL_GET("stats.active", &active, size_t);
1695 	CTL_GET("stats.metadata", &metadata, size_t);
1696 	CTL_GET("stats.metadata_thp", &metadata_thp, size_t);
1697 	CTL_GET("stats.resident", &resident, size_t);
1698 	CTL_GET("stats.mapped", &mapped, size_t);
1699 	CTL_GET("stats.retained", &retained, size_t);
1700 
1701 	CTL_GET("stats.zero_reallocs", &zero_reallocs, size_t);
1702 
1703 	if (have_background_thread) {
1704 		CTL_GET("stats.background_thread.num_threads",
1705 		    &num_background_threads, size_t);
1706 		CTL_GET("stats.background_thread.num_runs",
1707 		    &background_thread_num_runs, uint64_t);
1708 		CTL_GET("stats.background_thread.run_interval",
1709 		    &background_thread_run_interval, uint64_t);
1710 	} else {
1711 		num_background_threads = 0;
1712 		background_thread_num_runs = 0;
1713 		background_thread_run_interval = 0;
1714 	}
1715 
1716 	/* Generic global stats. */
1717 	emitter_json_object_kv_begin(emitter, "stats");
1718 	emitter_json_kv(emitter, "allocated", emitter_type_size, &allocated);
1719 	emitter_json_kv(emitter, "active", emitter_type_size, &active);
1720 	emitter_json_kv(emitter, "metadata", emitter_type_size, &metadata);
1721 	emitter_json_kv(emitter, "metadata_thp", emitter_type_size,
1722 	    &metadata_thp);
1723 	emitter_json_kv(emitter, "resident", emitter_type_size, &resident);
1724 	emitter_json_kv(emitter, "mapped", emitter_type_size, &mapped);
1725 	emitter_json_kv(emitter, "retained", emitter_type_size, &retained);
1726 	emitter_json_kv(emitter, "zero_reallocs", emitter_type_size,
1727 	    &zero_reallocs);
1728 
1729 	emitter_table_printf(emitter, "Allocated: %zu, active: %zu, "
1730 	    "metadata: %zu (n_thp %zu), resident: %zu, mapped: %zu, "
1731 	    "retained: %zu\n", allocated, active, metadata, metadata_thp,
1732 	    resident, mapped, retained);
1733 
1734 	/* Strange behaviors */
1735 	emitter_table_printf(emitter,
1736 	    "Count of realloc(non-null-ptr, 0) calls: %zu\n", zero_reallocs);
1737 
1738 	/* Background thread stats. */
1739 	emitter_json_object_kv_begin(emitter, "background_thread");
1740 	emitter_json_kv(emitter, "num_threads", emitter_type_size,
1741 	    &num_background_threads);
1742 	emitter_json_kv(emitter, "num_runs", emitter_type_uint64,
1743 	    &background_thread_num_runs);
1744 	emitter_json_kv(emitter, "run_interval", emitter_type_uint64,
1745 	    &background_thread_run_interval);
1746 	emitter_json_object_end(emitter); /* Close "background_thread". */
1747 
1748 	emitter_table_printf(emitter, "Background threads: %zu, "
1749 	    "num_runs: %"FMTu64", run_interval: %"FMTu64" ns\n",
1750 	    num_background_threads, background_thread_num_runs,
1751 	    background_thread_run_interval);
1752 
1753 	if (mutex) {
1754 		emitter_row_t row;
1755 		emitter_col_t name;
1756 		emitter_col_t col64[mutex_prof_num_uint64_t_counters];
1757 		emitter_col_t col32[mutex_prof_num_uint32_t_counters];
1758 		uint64_t uptime;
1759 
1760 		emitter_row_init(&row);
1761 		mutex_stats_init_cols(&row, "", &name, col64, col32);
1762 
1763 		emitter_table_row(emitter, &row);
1764 		emitter_json_object_kv_begin(emitter, "mutexes");
1765 
1766 		CTL_M2_GET("stats.arenas.0.uptime", 0, &uptime, uint64_t);
1767 
1768 		size_t stats_mutexes_mib[CTL_MAX_DEPTH];
1769 		CTL_LEAF_PREPARE(stats_mutexes_mib, 0, "stats.mutexes");
1770 		for (int i = 0; i < mutex_prof_num_global_mutexes; i++) {
1771 			mutex_stats_read_global(stats_mutexes_mib, 2,
1772 			    global_mutex_names[i], &name, col64, col32, uptime);
1773 			emitter_json_object_kv_begin(emitter, global_mutex_names[i]);
1774 			mutex_stats_emit(emitter, &row, col64, col32);
1775 			emitter_json_object_end(emitter);
1776 		}
1777 
1778 		emitter_json_object_end(emitter); /* Close "mutexes". */
1779 	}
1780 
1781 	emitter_json_object_end(emitter); /* Close "stats". */
1782 
1783 	if (merged || destroyed || unmerged) {
1784 		unsigned narenas;
1785 
1786 		emitter_json_object_kv_begin(emitter, "stats.arenas");
1787 
1788 		CTL_GET("arenas.narenas", &narenas, unsigned);
1789 		size_t mib[3];
1790 		size_t miblen = sizeof(mib) / sizeof(size_t);
1791 		size_t sz;
1792 		VARIABLE_ARRAY(bool, initialized, narenas);
1793 		bool destroyed_initialized;
1794 		unsigned i, j, ninitialized;
1795 
1796 		xmallctlnametomib("arena.0.initialized", mib, &miblen);
1797 		for (i = ninitialized = 0; i < narenas; i++) {
1798 			mib[1] = i;
1799 			sz = sizeof(bool);
1800 			xmallctlbymib(mib, miblen, &initialized[i], &sz,
1801 			    NULL, 0);
1802 			if (initialized[i]) {
1803 				ninitialized++;
1804 			}
1805 		}
1806 		mib[1] = MALLCTL_ARENAS_DESTROYED;
1807 		sz = sizeof(bool);
1808 		xmallctlbymib(mib, miblen, &destroyed_initialized, &sz,
1809 		    NULL, 0);
1810 
1811 		/* Merged stats. */
1812 		if (merged && (ninitialized > 1 || !unmerged)) {
1813 			/* Print merged arena stats. */
1814 			emitter_table_printf(emitter, "Merged arenas stats:\n");
1815 			emitter_json_object_kv_begin(emitter, "merged");
1816 			stats_arena_print(emitter, MALLCTL_ARENAS_ALL, bins,
1817 			    large, mutex, extents, hpa);
1818 			emitter_json_object_end(emitter); /* Close "merged". */
1819 		}
1820 
1821 		/* Destroyed stats. */
1822 		if (destroyed_initialized && destroyed) {
1823 			/* Print destroyed arena stats. */
1824 			emitter_table_printf(emitter,
1825 			    "Destroyed arenas stats:\n");
1826 			emitter_json_object_kv_begin(emitter, "destroyed");
1827 			stats_arena_print(emitter, MALLCTL_ARENAS_DESTROYED,
1828 			    bins, large, mutex, extents, hpa);
1829 			emitter_json_object_end(emitter); /* Close "destroyed". */
1830 		}
1831 
1832 		/* Unmerged stats. */
1833 		if (unmerged) {
1834 			for (i = j = 0; i < narenas; i++) {
1835 				if (initialized[i]) {
1836 					char arena_ind_str[20];
1837 					malloc_snprintf(arena_ind_str,
1838 					    sizeof(arena_ind_str), "%u", i);
1839 					emitter_json_object_kv_begin(emitter,
1840 					    arena_ind_str);
1841 					emitter_table_printf(emitter,
1842 					    "arenas[%s]:\n", arena_ind_str);
1843 					stats_arena_print(emitter, i, bins,
1844 					    large, mutex, extents, hpa);
1845 					/* Close "<arena-ind>". */
1846 					emitter_json_object_end(emitter);
1847 				}
1848 			}
1849 		}
1850 		emitter_json_object_end(emitter); /* Close "stats.arenas". */
1851 	}
1852 }
1853 
1854 void
1855 stats_print(write_cb_t *write_cb, void *cbopaque, const char *opts) {
1856 	int err;
1857 	uint64_t epoch;
1858 	size_t u64sz;
1859 #define OPTION(o, v, d, s) bool v = d;
1860 	STATS_PRINT_OPTIONS
1861 #undef OPTION
1862 
1863 	/*
1864 	 * Refresh stats, in case mallctl() was called by the application.
1865 	 *
1866 	 * Check for OOM here, since refreshing the ctl cache can trigger
1867 	 * allocation.  In practice, none of the subsequent mallctl()-related
1868 	 * calls in this function will cause OOM if this one succeeds.
1869 	 * */
1870 	epoch = 1;
1871 	u64sz = sizeof(uint64_t);
1872 	err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch,
1873 	    sizeof(uint64_t));
1874 	if (err != 0) {
1875 		if (err == EAGAIN) {
1876 			malloc_write("<jemalloc>: Memory allocation failure in "
1877 			    "mallctl(\"epoch\", ...)\n");
1878 			return;
1879 		}
1880 		malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
1881 		    "...)\n");
1882 		abort();
1883 	}
1884 
1885 	if (opts != NULL) {
1886 		for (unsigned i = 0; opts[i] != '\0'; i++) {
1887 			switch (opts[i]) {
1888 #define OPTION(o, v, d, s) case o: v = s; break;
1889 				STATS_PRINT_OPTIONS
1890 #undef OPTION
1891 			default:;
1892 			}
1893 		}
1894 	}
1895 
1896 	emitter_t emitter;
1897 	emitter_init(&emitter,
1898 	    json ? emitter_output_json_compact : emitter_output_table,
1899 	    write_cb, cbopaque);
1900 	emitter_begin(&emitter);
1901 	emitter_table_printf(&emitter, "___ Begin jemalloc statistics ___\n");
1902 	emitter_json_object_kv_begin(&emitter, "jemalloc");
1903 
1904 	if (general) {
1905 		stats_general_print(&emitter);
1906 	}
1907 	if (config_stats) {
1908 		stats_print_helper(&emitter, merged, destroyed, unmerged,
1909 		    bins, large, mutex, extents, hpa);
1910 	}
1911 
1912 	emitter_json_object_end(&emitter); /* Closes the "jemalloc" dict. */
1913 	emitter_table_printf(&emitter, "--- End jemalloc statistics ---\n");
1914 	emitter_end(&emitter);
1915 }
1916 
1917 uint64_t
1918 stats_interval_new_event_wait(tsd_t *tsd) {
1919 	return stats_interval_accum_batch;
1920 }
1921 
1922 uint64_t
1923 stats_interval_postponed_event_wait(tsd_t *tsd) {
1924 	return TE_MIN_START_WAIT;
1925 }
1926 
1927 void
1928 stats_interval_event_handler(tsd_t *tsd, uint64_t elapsed) {
1929 	assert(elapsed > 0 && elapsed != TE_INVALID_ELAPSED);
1930 	if (counter_accum(tsd_tsdn(tsd), &stats_interval_accumulated,
1931 	    elapsed)) {
1932 		je_malloc_stats_print(NULL, NULL, opt_stats_interval_opts);
1933 	}
1934 }
1935 
1936 bool
1937 stats_boot(void) {
1938 	uint64_t stats_interval;
1939 	if (opt_stats_interval < 0) {
1940 		assert(opt_stats_interval == -1);
1941 		stats_interval = 0;
1942 		stats_interval_accum_batch = 0;
1943 	} else{
1944 		/* See comments in stats.h */
1945 		stats_interval = (opt_stats_interval > 0) ?
1946 		    opt_stats_interval : 1;
1947 		uint64_t batch = stats_interval >>
1948 		    STATS_INTERVAL_ACCUM_LG_BATCH_SIZE;
1949 		if (batch > STATS_INTERVAL_ACCUM_BATCH_MAX) {
1950 			batch = STATS_INTERVAL_ACCUM_BATCH_MAX;
1951 		} else if (batch == 0) {
1952 			batch = 1;
1953 		}
1954 		stats_interval_accum_batch = batch;
1955 	}
1956 
1957 	return counter_accum_init(&stats_interval_accumulated, stats_interval);
1958 }
1959 
1960 void
1961 stats_prefork(tsdn_t *tsdn) {
1962 	counter_prefork(tsdn, &stats_interval_accumulated);
1963 }
1964 
1965 void
1966 stats_postfork_parent(tsdn_t *tsdn) {
1967 	counter_postfork_parent(tsdn, &stats_interval_accumulated);
1968 }
1969 
1970 void
1971 stats_postfork_child(tsdn_t *tsdn) {
1972 	counter_postfork_child(tsdn, &stats_interval_accumulated);
1973 }
1974