xref: /netbsd-src/external/bsd/jemalloc.old/dist/src/tcache.c (revision 8e33eff89e26cf71871ead62f0d5063e1313c33a)
1 #define JEMALLOC_TCACHE_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4 
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/mutex.h"
7 #include "jemalloc/internal/size_classes.h"
8 
9 /******************************************************************************/
10 /* Data. */
11 
12 bool	opt_tcache = true;
13 ssize_t	opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
14 
15 cache_bin_info_t	*tcache_bin_info;
16 static unsigned		stack_nelms; /* Total stack elms per tcache. */
17 
18 unsigned		nhbins;
19 size_t			tcache_maxclass;
20 
21 tcaches_t		*tcaches;
22 
23 /* Index of first element within tcaches that has never been used. */
24 static unsigned		tcaches_past;
25 
26 /* Head of singly linked list tracking available tcaches elements. */
27 static tcaches_t	*tcaches_avail;
28 
29 /* Protects tcaches{,_past,_avail}. */
30 static malloc_mutex_t	tcaches_mtx;
31 
32 /******************************************************************************/
33 
34 size_t
35 tcache_salloc(tsdn_t *tsdn, const void *ptr) {
36 	return arena_salloc(tsdn, ptr);
37 }
38 
39 void
40 tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
41 	szind_t binind = tcache->next_gc_bin;
42 
43 	cache_bin_t *tbin;
44 	if (binind < NBINS) {
45 		tbin = tcache_small_bin_get(tcache, binind);
46 	} else {
47 		tbin = tcache_large_bin_get(tcache, binind);
48 	}
49 	if (tbin->low_water > 0) {
50 		/*
51 		 * Flush (ceiling) 3/4 of the objects below the low water mark.
52 		 */
53 		if (binind < NBINS) {
54 			tcache_bin_flush_small(tsd, tcache, tbin, binind,
55 			    tbin->ncached - tbin->low_water + (tbin->low_water
56 			    >> 2));
57 			/*
58 			 * Reduce fill count by 2X.  Limit lg_fill_div such that
59 			 * the fill count is always at least 1.
60 			 */
61 			cache_bin_info_t *tbin_info = &tcache_bin_info[binind];
62 			if ((tbin_info->ncached_max >>
63 			     (tcache->lg_fill_div[binind] + 1)) >= 1) {
64 				tcache->lg_fill_div[binind]++;
65 			}
66 		} else {
67 			tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
68 			    - tbin->low_water + (tbin->low_water >> 2), tcache);
69 		}
70 	} else if (tbin->low_water < 0) {
71 		/*
72 		 * Increase fill count by 2X for small bins.  Make sure
73 		 * lg_fill_div stays greater than 0.
74 		 */
75 		if (binind < NBINS && tcache->lg_fill_div[binind] > 1) {
76 			tcache->lg_fill_div[binind]--;
77 		}
78 	}
79 	tbin->low_water = tbin->ncached;
80 
81 	tcache->next_gc_bin++;
82 	if (tcache->next_gc_bin == nhbins) {
83 		tcache->next_gc_bin = 0;
84 	}
85 }
86 
87 void *
88 tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
89     cache_bin_t *tbin, szind_t binind, bool *tcache_success) {
90 	void *ret;
91 
92 	assert(tcache->arena != NULL);
93 	arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind,
94 	    config_prof ? tcache->prof_accumbytes : 0);
95 	if (config_prof) {
96 		tcache->prof_accumbytes = 0;
97 	}
98 	ret = cache_bin_alloc_easy(tbin, tcache_success);
99 
100 	return ret;
101 }
102 
103 void
104 tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
105     szind_t binind, unsigned rem) {
106 	bool merged_stats = false;
107 
108 	assert(binind < NBINS);
109 	assert((cache_bin_sz_t)rem <= tbin->ncached);
110 
111 	arena_t *arena = tcache->arena;
112 	assert(arena != NULL);
113 	unsigned nflush = tbin->ncached - rem;
114 	/* Variable length array must have > 0 length. */
115 	VARIABLE_ARRAY(extent_t *, item_extent, nflush + + 1);
116 	/* Look up extent once per item. */
117 	for (unsigned i = 0 ; i < nflush; i++) {
118 		item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
119 	}
120 
121 	while (nflush > 0) {
122 		/* Lock the arena bin associated with the first object. */
123 		extent_t *extent = item_extent[0];
124 		arena_t *bin_arena = extent_arena_get(extent);
125 		bin_t *bin = &bin_arena->bins[binind];
126 
127 		if (config_prof && bin_arena == arena) {
128 			if (arena_prof_accum(tsd_tsdn(tsd), arena,
129 			    tcache->prof_accumbytes)) {
130 				prof_idump(tsd_tsdn(tsd));
131 			}
132 			tcache->prof_accumbytes = 0;
133 		}
134 
135 		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
136 		if (config_stats && bin_arena == arena) {
137 			assert(!merged_stats);
138 			merged_stats = true;
139 			bin->stats.nflushes++;
140 			bin->stats.nrequests += tbin->tstats.nrequests;
141 			tbin->tstats.nrequests = 0;
142 		}
143 		unsigned ndeferred = 0;
144 		for (unsigned i = 0; i < nflush; i++) {
145 			void *ptr = *(tbin->avail - 1 - i);
146 			extent = item_extent[i];
147 			assert(ptr != NULL && extent != NULL);
148 
149 			if (extent_arena_get(extent) == bin_arena) {
150 				arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
151 				    bin_arena, extent, ptr);
152 			} else {
153 				/*
154 				 * This object was allocated via a different
155 				 * arena bin than the one that is currently
156 				 * locked.  Stash the object, so that it can be
157 				 * handled in a future pass.
158 				 */
159 				*(tbin->avail - 1 - ndeferred) = ptr;
160 				item_extent[ndeferred] = extent;
161 				ndeferred++;
162 			}
163 		}
164 		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
165 		arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
166 		nflush = ndeferred;
167 	}
168 	if (config_stats && !merged_stats) {
169 		/*
170 		 * The flush loop didn't happen to flush to this thread's
171 		 * arena, so the stats didn't get merged.  Manually do so now.
172 		 */
173 		bin_t *bin = &arena->bins[binind];
174 		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
175 		bin->stats.nflushes++;
176 		bin->stats.nrequests += tbin->tstats.nrequests;
177 		tbin->tstats.nrequests = 0;
178 		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
179 	}
180 
181 	memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
182 	    sizeof(void *));
183 	tbin->ncached = rem;
184 	if (tbin->ncached < tbin->low_water) {
185 		tbin->low_water = tbin->ncached;
186 	}
187 }
188 
189 void
190 tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
191     unsigned rem, tcache_t *tcache) {
192 	bool merged_stats = false;
193 
194 	assert(binind < nhbins);
195 	assert((cache_bin_sz_t)rem <= tbin->ncached);
196 
197 	arena_t *arena = tcache->arena;
198 	assert(arena != NULL);
199 	unsigned nflush = tbin->ncached - rem;
200 	/* Variable length array must have > 0 length. */
201 	VARIABLE_ARRAY(extent_t *, item_extent, nflush + 1);
202 	/* Look up extent once per item. */
203 	for (unsigned i = 0 ; i < nflush; i++) {
204 		item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
205 	}
206 
207 	while (nflush > 0) {
208 		/* Lock the arena associated with the first object. */
209 		extent_t *extent = item_extent[0];
210 		arena_t *locked_arena = extent_arena_get(extent);
211 		UNUSED bool idump;
212 
213 		if (config_prof) {
214 			idump = false;
215 		}
216 
217 		malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx);
218 		for (unsigned i = 0; i < nflush; i++) {
219 			void *ptr = *(tbin->avail - 1 - i);
220 			assert(ptr != NULL);
221 			extent = item_extent[i];
222 			if (extent_arena_get(extent) == locked_arena) {
223 				large_dalloc_prep_junked_locked(tsd_tsdn(tsd),
224 				    extent);
225 			}
226 		}
227 		if ((config_prof || config_stats) && locked_arena == arena) {
228 			if (config_prof) {
229 				idump = arena_prof_accum(tsd_tsdn(tsd), arena,
230 				    tcache->prof_accumbytes);
231 				tcache->prof_accumbytes = 0;
232 			}
233 			if (config_stats) {
234 				merged_stats = true;
235 				arena_stats_large_nrequests_add(tsd_tsdn(tsd),
236 				    &arena->stats, binind,
237 				    tbin->tstats.nrequests);
238 				tbin->tstats.nrequests = 0;
239 			}
240 		}
241 		malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx);
242 
243 		unsigned ndeferred = 0;
244 		for (unsigned i = 0; i < nflush; i++) {
245 			void *ptr = *(tbin->avail - 1 - i);
246 			extent = item_extent[i];
247 			assert(ptr != NULL && extent != NULL);
248 
249 			if (extent_arena_get(extent) == locked_arena) {
250 				large_dalloc_finish(tsd_tsdn(tsd), extent);
251 			} else {
252 				/*
253 				 * This object was allocated via a different
254 				 * arena than the one that is currently locked.
255 				 * Stash the object, so that it can be handled
256 				 * in a future pass.
257 				 */
258 				*(tbin->avail - 1 - ndeferred) = ptr;
259 				item_extent[ndeferred] = extent;
260 				ndeferred++;
261 			}
262 		}
263 		if (config_prof && idump) {
264 			prof_idump(tsd_tsdn(tsd));
265 		}
266 		arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
267 		    ndeferred);
268 		nflush = ndeferred;
269 	}
270 	if (config_stats && !merged_stats) {
271 		/*
272 		 * The flush loop didn't happen to flush to this thread's
273 		 * arena, so the stats didn't get merged.  Manually do so now.
274 		 */
275 		arena_stats_large_nrequests_add(tsd_tsdn(tsd), &arena->stats,
276 		    binind, tbin->tstats.nrequests);
277 		tbin->tstats.nrequests = 0;
278 	}
279 
280 	memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
281 	    sizeof(void *));
282 	tbin->ncached = rem;
283 	if (tbin->ncached < tbin->low_water) {
284 		tbin->low_water = tbin->ncached;
285 	}
286 }
287 
288 void
289 tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
290 	assert(tcache->arena == NULL);
291 	tcache->arena = arena;
292 
293 	if (config_stats) {
294 		/* Link into list of extant tcaches. */
295 		malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
296 
297 		ql_elm_new(tcache, link);
298 		ql_tail_insert(&arena->tcache_ql, tcache, link);
299 		cache_bin_array_descriptor_init(
300 		    &tcache->cache_bin_array_descriptor, tcache->bins_small,
301 		    tcache->bins_large);
302 		ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
303 		    &tcache->cache_bin_array_descriptor, link);
304 
305 		malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
306 	}
307 }
308 
309 static void
310 tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) {
311 	arena_t *arena = tcache->arena;
312 	assert(arena != NULL);
313 	if (config_stats) {
314 		/* Unlink from list of extant tcaches. */
315 		malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
316 		if (config_debug) {
317 			bool in_ql = false;
318 			tcache_t *iter;
319 			ql_foreach(iter, &arena->tcache_ql, link) {
320 				if (iter == tcache) {
321 					in_ql = true;
322 					break;
323 				}
324 			}
325 			assert(in_ql);
326 		}
327 		ql_remove(&arena->tcache_ql, tcache, link);
328 		ql_remove(&arena->cache_bin_array_descriptor_ql,
329 		    &tcache->cache_bin_array_descriptor, link);
330 		tcache_stats_merge(tsdn, tcache, arena);
331 		malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
332 	}
333 	tcache->arena = NULL;
334 }
335 
336 void
337 tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
338 	tcache_arena_dissociate(tsdn, tcache);
339 	tcache_arena_associate(tsdn, tcache, arena);
340 }
341 
342 bool
343 tsd_tcache_enabled_data_init(tsd_t *tsd) {
344 	/* Called upon tsd initialization. */
345 	tsd_tcache_enabled_set(tsd, opt_tcache);
346 	tsd_slow_update(tsd);
347 
348 	if (opt_tcache) {
349 		/* Trigger tcache init. */
350 		tsd_tcache_data_init(tsd);
351 	}
352 
353 	return false;
354 }
355 
356 /* Initialize auto tcache (embedded in TSD). */
357 static void
358 tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) {
359 	memset(&tcache->link, 0, sizeof(ql_elm(tcache_t)));
360 	tcache->prof_accumbytes = 0;
361 	tcache->next_gc_bin = 0;
362 	tcache->arena = NULL;
363 
364 	ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
365 
366 	size_t stack_offset = 0;
367 	assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
368 	memset(tcache->bins_small, 0, sizeof(cache_bin_t) * NBINS);
369 	memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbins - NBINS));
370 	unsigned i = 0;
371 	for (; i < NBINS; i++) {
372 		tcache->lg_fill_div[i] = 1;
373 		stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
374 		/*
375 		 * avail points past the available space.  Allocations will
376 		 * access the slots toward higher addresses (for the benefit of
377 		 * prefetch).
378 		 */
379 		tcache_small_bin_get(tcache, i)->avail =
380 		    (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
381 	}
382 	for (; i < nhbins; i++) {
383 		stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
384 		tcache_large_bin_get(tcache, i)->avail =
385 		    (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
386 	}
387 	assert(stack_offset == stack_nelms * sizeof(void *));
388 }
389 
390 /* Initialize auto tcache (embedded in TSD). */
391 bool
392 tsd_tcache_data_init(tsd_t *tsd) {
393 	tcache_t *tcache = tsd_tcachep_get_unsafe(tsd);
394 	assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
395 	size_t size = stack_nelms * sizeof(void *);
396 	/* Avoid false cacheline sharing. */
397 	size = sz_sa2u(size, CACHELINE);
398 
399 	void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true,
400 	    NULL, true, arena_get(TSDN_NULL, 0, true));
401 	if (avail_array == NULL) {
402 		return true;
403 	}
404 
405 	tcache_init(tsd, tcache, avail_array);
406 	/*
407 	 * Initialization is a bit tricky here.  After malloc init is done, all
408 	 * threads can rely on arena_choose and associate tcache accordingly.
409 	 * However, the thread that does actual malloc bootstrapping relies on
410 	 * functional tsd, and it can only rely on a0.  In that case, we
411 	 * associate its tcache to a0 temporarily, and later on
412 	 * arena_choose_hard() will re-associate properly.
413 	 */
414 	tcache->arena = NULL;
415 	arena_t *arena;
416 	if (!malloc_initialized()) {
417 		/* If in initialization, assign to a0. */
418 		arena = arena_get(tsd_tsdn(tsd), 0, false);
419 		tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
420 	} else {
421 		arena = arena_choose(tsd, NULL);
422 		/* This may happen if thread.tcache.enabled is used. */
423 		if (tcache->arena == NULL) {
424 			tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
425 		}
426 	}
427 	assert(arena == tcache->arena);
428 
429 	return false;
430 }
431 
432 /* Created manual tcache for tcache.create mallctl. */
433 tcache_t *
434 tcache_create_explicit(tsd_t *tsd) {
435 	tcache_t *tcache;
436 	size_t size, stack_offset;
437 
438 	size = sizeof(tcache_t);
439 	/* Naturally align the pointer stacks. */
440 	size = PTR_CEILING(size);
441 	stack_offset = size;
442 	size += stack_nelms * sizeof(void *);
443 	/* Avoid false cacheline sharing. */
444 	size = sz_sa2u(size, CACHELINE);
445 
446 	tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true,
447 	    arena_get(TSDN_NULL, 0, true));
448 	if (tcache == NULL) {
449 		return NULL;
450 	}
451 
452 	tcache_init(tsd, tcache,
453 	    (void *)((uintptr_t)tcache + (uintptr_t)stack_offset));
454 	tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL));
455 
456 	return tcache;
457 }
458 
459 static void
460 tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
461 	assert(tcache->arena != NULL);
462 
463 	for (unsigned i = 0; i < NBINS; i++) {
464 		cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
465 		tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
466 
467 		if (config_stats) {
468 			assert(tbin->tstats.nrequests == 0);
469 		}
470 	}
471 	for (unsigned i = NBINS; i < nhbins; i++) {
472 		cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
473 		tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
474 
475 		if (config_stats) {
476 			assert(tbin->tstats.nrequests == 0);
477 		}
478 	}
479 
480 	if (config_prof && tcache->prof_accumbytes > 0 &&
481 	    arena_prof_accum(tsd_tsdn(tsd), tcache->arena,
482 	    tcache->prof_accumbytes)) {
483 		prof_idump(tsd_tsdn(tsd));
484 	}
485 }
486 
487 void
488 tcache_flush(tsd_t *tsd) {
489 	assert(tcache_available(tsd));
490 	tcache_flush_cache(tsd, tsd_tcachep_get(tsd));
491 }
492 
493 static void
494 tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
495 	tcache_flush_cache(tsd, tcache);
496 	tcache_arena_dissociate(tsd_tsdn(tsd), tcache);
497 
498 	if (tsd_tcache) {
499 		/* Release the avail array for the TSD embedded auto tcache. */
500 		void *avail_array =
501 		    (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail -
502 		    (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *));
503 		idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true);
504 	} else {
505 		/* Release both the tcache struct and avail array. */
506 		idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true);
507 	}
508 }
509 
510 /* For auto tcache (embedded in TSD) only. */
511 void
512 tcache_cleanup(tsd_t *tsd) {
513 	tcache_t *tcache = tsd_tcachep_get(tsd);
514 	if (!tcache_available(tsd)) {
515 		assert(tsd_tcache_enabled_get(tsd) == false);
516 		if (config_debug) {
517 			assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
518 		}
519 		return;
520 	}
521 	assert(tsd_tcache_enabled_get(tsd));
522 	assert(tcache_small_bin_get(tcache, 0)->avail != NULL);
523 
524 	tcache_destroy(tsd, tcache, true);
525 	if (config_debug) {
526 		tcache_small_bin_get(tcache, 0)->avail = NULL;
527 	}
528 }
529 
530 void
531 tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
532 	unsigned i;
533 
534 	cassert(config_stats);
535 
536 	/* Merge and reset tcache stats. */
537 	for (i = 0; i < NBINS; i++) {
538 		bin_t *bin = &arena->bins[i];
539 		cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
540 		malloc_mutex_lock(tsdn, &bin->lock);
541 		bin->stats.nrequests += tbin->tstats.nrequests;
542 		malloc_mutex_unlock(tsdn, &bin->lock);
543 		tbin->tstats.nrequests = 0;
544 	}
545 
546 	for (; i < nhbins; i++) {
547 		cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
548 		arena_stats_large_nrequests_add(tsdn, &arena->stats, i,
549 		    tbin->tstats.nrequests);
550 		tbin->tstats.nrequests = 0;
551 	}
552 }
553 
554 static bool
555 tcaches_create_prep(tsd_t *tsd) {
556 	bool err;
557 
558 	malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
559 
560 	if (tcaches == NULL) {
561 		tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *)
562 		    * (MALLOCX_TCACHE_MAX+1), CACHELINE);
563 		if (tcaches == NULL) {
564 			err = true;
565 			goto label_return;
566 		}
567 	}
568 
569 	if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) {
570 		err = true;
571 		goto label_return;
572 	}
573 
574 	err = false;
575 label_return:
576 	malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
577 	return err;
578 }
579 
580 bool
581 tcaches_create(tsd_t *tsd, unsigned *r_ind) {
582 	witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
583 
584 	bool err;
585 
586 	if (tcaches_create_prep(tsd)) {
587 		err = true;
588 		goto label_return;
589 	}
590 
591 	tcache_t *tcache = tcache_create_explicit(tsd);
592 	if (tcache == NULL) {
593 		err = true;
594 		goto label_return;
595 	}
596 
597 	tcaches_t *elm;
598 	malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
599 	if (tcaches_avail != NULL) {
600 		elm = tcaches_avail;
601 		tcaches_avail = tcaches_avail->next;
602 		elm->tcache = tcache;
603 		*r_ind = (unsigned)(elm - tcaches);
604 	} else {
605 		elm = &tcaches[tcaches_past];
606 		elm->tcache = tcache;
607 		*r_ind = tcaches_past;
608 		tcaches_past++;
609 	}
610 	malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
611 
612 	err = false;
613 label_return:
614 	witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
615 	return err;
616 }
617 
618 static tcache_t *
619 tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm) {
620 	malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx);
621 
622 	if (elm->tcache == NULL) {
623 		return NULL;
624 	}
625 	tcache_t *tcache = elm->tcache;
626 	elm->tcache = NULL;
627 	return tcache;
628 }
629 
630 void
631 tcaches_flush(tsd_t *tsd, unsigned ind) {
632 	malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
633 	tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind]);
634 	malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
635 	if (tcache != NULL) {
636 		tcache_destroy(tsd, tcache, false);
637 	}
638 }
639 
640 void
641 tcaches_destroy(tsd_t *tsd, unsigned ind) {
642 	malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
643 	tcaches_t *elm = &tcaches[ind];
644 	tcache_t *tcache = tcaches_elm_remove(tsd, elm);
645 	elm->next = tcaches_avail;
646 	tcaches_avail = elm;
647 	malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
648 	if (tcache != NULL) {
649 		tcache_destroy(tsd, tcache, false);
650 	}
651 }
652 
653 bool
654 tcache_boot(tsdn_t *tsdn) {
655 	/* If necessary, clamp opt_lg_tcache_max. */
656 	if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) <
657 	    SMALL_MAXCLASS) {
658 		tcache_maxclass = SMALL_MAXCLASS;
659 	} else {
660 		tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
661 	}
662 
663 	if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES,
664 	    malloc_mutex_rank_exclusive)) {
665 		return true;
666 	}
667 
668 	nhbins = sz_size2index(tcache_maxclass) + 1;
669 
670 	/* Initialize tcache_bin_info. */
671 	tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins
672 	    * sizeof(cache_bin_info_t), CACHELINE);
673 	if (tcache_bin_info == NULL) {
674 		return true;
675 	}
676 	stack_nelms = 0;
677 	unsigned i;
678 	for (i = 0; i < NBINS; i++) {
679 		if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
680 			tcache_bin_info[i].ncached_max =
681 			    TCACHE_NSLOTS_SMALL_MIN;
682 		} else if ((bin_infos[i].nregs << 1) <=
683 		    TCACHE_NSLOTS_SMALL_MAX) {
684 			tcache_bin_info[i].ncached_max =
685 			    (bin_infos[i].nregs << 1);
686 		} else {
687 			tcache_bin_info[i].ncached_max =
688 			    TCACHE_NSLOTS_SMALL_MAX;
689 		}
690 		stack_nelms += tcache_bin_info[i].ncached_max;
691 	}
692 	for (; i < nhbins; i++) {
693 		tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
694 		stack_nelms += tcache_bin_info[i].ncached_max;
695 	}
696 
697 	return false;
698 }
699 
700 void
701 tcache_prefork(tsdn_t *tsdn) {
702 	if (!config_prof && opt_tcache) {
703 		malloc_mutex_prefork(tsdn, &tcaches_mtx);
704 	}
705 }
706 
707 void
708 tcache_postfork_parent(tsdn_t *tsdn) {
709 	if (!config_prof && opt_tcache) {
710 		malloc_mutex_postfork_parent(tsdn, &tcaches_mtx);
711 	}
712 }
713 
714 void
715 tcache_postfork_child(tsdn_t *tsdn) {
716 	if (!config_prof && opt_tcache) {
717 		malloc_mutex_postfork_child(tsdn, &tcaches_mtx);
718 	}
719 }
720