xref: /netbsd-src/external/bsd/jemalloc.old/include/jemalloc/internal/cache_bin.h (revision 8e33eff89e26cf71871ead62f0d5063e1313c33a)
1*8e33eff8Schristos #ifndef JEMALLOC_INTERNAL_CACHE_BIN_H
2*8e33eff8Schristos #define JEMALLOC_INTERNAL_CACHE_BIN_H
3*8e33eff8Schristos 
4*8e33eff8Schristos #include "jemalloc/internal/ql.h"
5*8e33eff8Schristos 
6*8e33eff8Schristos /*
7*8e33eff8Schristos  * The cache_bins are the mechanism that the tcache and the arena use to
8*8e33eff8Schristos  * communicate.  The tcache fills from and flushes to the arena by passing a
9*8e33eff8Schristos  * cache_bin_t to fill/flush.  When the arena needs to pull stats from the
10*8e33eff8Schristos  * tcaches associated with it, it does so by iterating over its
11*8e33eff8Schristos  * cache_bin_array_descriptor_t objects and reading out per-bin stats it
12*8e33eff8Schristos  * contains.  This makes it so that the arena need not know about the existence
13*8e33eff8Schristos  * of the tcache at all.
14*8e33eff8Schristos  */
15*8e33eff8Schristos 
16*8e33eff8Schristos 
17*8e33eff8Schristos /*
18*8e33eff8Schristos  * The count of the number of cached allocations in a bin.  We make this signed
19*8e33eff8Schristos  * so that negative numbers can encode "invalid" states (e.g. a low water mark
20*8e33eff8Schristos  * of -1 for a cache that has been depleted).
21*8e33eff8Schristos  */
22*8e33eff8Schristos typedef int32_t cache_bin_sz_t;
23*8e33eff8Schristos 
24*8e33eff8Schristos typedef struct cache_bin_stats_s cache_bin_stats_t;
25*8e33eff8Schristos struct cache_bin_stats_s {
26*8e33eff8Schristos 	/*
27*8e33eff8Schristos 	 * Number of allocation requests that corresponded to the size of this
28*8e33eff8Schristos 	 * bin.
29*8e33eff8Schristos 	 */
30*8e33eff8Schristos 	uint64_t nrequests;
31*8e33eff8Schristos };
32*8e33eff8Schristos 
33*8e33eff8Schristos /*
34*8e33eff8Schristos  * Read-only information associated with each element of tcache_t's tbins array
35*8e33eff8Schristos  * is stored separately, mainly to reduce memory usage.
36*8e33eff8Schristos  */
37*8e33eff8Schristos typedef struct cache_bin_info_s cache_bin_info_t;
38*8e33eff8Schristos struct cache_bin_info_s {
39*8e33eff8Schristos 	/* Upper limit on ncached. */
40*8e33eff8Schristos 	cache_bin_sz_t ncached_max;
41*8e33eff8Schristos };
42*8e33eff8Schristos 
43*8e33eff8Schristos typedef struct cache_bin_s cache_bin_t;
44*8e33eff8Schristos struct cache_bin_s {
45*8e33eff8Schristos 	/* Min # cached since last GC. */
46*8e33eff8Schristos 	cache_bin_sz_t low_water;
47*8e33eff8Schristos 	/* # of cached objects. */
48*8e33eff8Schristos 	cache_bin_sz_t ncached;
49*8e33eff8Schristos 	/*
50*8e33eff8Schristos 	 * ncached and stats are both modified frequently.  Let's keep them
51*8e33eff8Schristos 	 * close so that they have a higher chance of being on the same
52*8e33eff8Schristos 	 * cacheline, thus less write-backs.
53*8e33eff8Schristos 	 */
54*8e33eff8Schristos 	cache_bin_stats_t tstats;
55*8e33eff8Schristos 	/*
56*8e33eff8Schristos 	 * Stack of available objects.
57*8e33eff8Schristos 	 *
58*8e33eff8Schristos 	 * To make use of adjacent cacheline prefetch, the items in the avail
59*8e33eff8Schristos 	 * stack goes to higher address for newer allocations.  avail points
60*8e33eff8Schristos 	 * just above the available space, which means that
61*8e33eff8Schristos 	 * avail[-ncached, ... -1] are available items and the lowest item will
62*8e33eff8Schristos 	 * be allocated first.
63*8e33eff8Schristos 	 */
64*8e33eff8Schristos 	void **avail;
65*8e33eff8Schristos };
66*8e33eff8Schristos 
67*8e33eff8Schristos typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t;
68*8e33eff8Schristos struct cache_bin_array_descriptor_s {
69*8e33eff8Schristos 	/*
70*8e33eff8Schristos 	 * The arena keeps a list of the cache bins associated with it, for
71*8e33eff8Schristos 	 * stats collection.
72*8e33eff8Schristos 	 */
73*8e33eff8Schristos 	ql_elm(cache_bin_array_descriptor_t) link;
74*8e33eff8Schristos 	/* Pointers to the tcache bins. */
75*8e33eff8Schristos 	cache_bin_t *bins_small;
76*8e33eff8Schristos 	cache_bin_t *bins_large;
77*8e33eff8Schristos };
78*8e33eff8Schristos 
79*8e33eff8Schristos static inline void
80*8e33eff8Schristos cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor,
81*8e33eff8Schristos     cache_bin_t *bins_small, cache_bin_t *bins_large) {
82*8e33eff8Schristos 	ql_elm_new(descriptor, link);
83*8e33eff8Schristos 	descriptor->bins_small = bins_small;
84*8e33eff8Schristos 	descriptor->bins_large = bins_large;
85*8e33eff8Schristos }
86*8e33eff8Schristos 
87*8e33eff8Schristos JEMALLOC_ALWAYS_INLINE void *
88*8e33eff8Schristos cache_bin_alloc_easy(cache_bin_t *bin, bool *success) {
89*8e33eff8Schristos 	void *ret;
90*8e33eff8Schristos 
91*8e33eff8Schristos 	if (unlikely(bin->ncached == 0)) {
92*8e33eff8Schristos 		bin->low_water = -1;
93*8e33eff8Schristos 		*success = false;
94*8e33eff8Schristos 		return NULL;
95*8e33eff8Schristos 	}
96*8e33eff8Schristos 	/*
97*8e33eff8Schristos 	 * success (instead of ret) should be checked upon the return of this
98*8e33eff8Schristos 	 * function.  We avoid checking (ret == NULL) because there is never a
99*8e33eff8Schristos 	 * null stored on the avail stack (which is unknown to the compiler),
100*8e33eff8Schristos 	 * and eagerly checking ret would cause pipeline stall (waiting for the
101*8e33eff8Schristos 	 * cacheline).
102*8e33eff8Schristos 	 */
103*8e33eff8Schristos 	*success = true;
104*8e33eff8Schristos 	ret = *(bin->avail - bin->ncached);
105*8e33eff8Schristos 	bin->ncached--;
106*8e33eff8Schristos 
107*8e33eff8Schristos 	if (unlikely(bin->ncached < bin->low_water)) {
108*8e33eff8Schristos 		bin->low_water = bin->ncached;
109*8e33eff8Schristos 	}
110*8e33eff8Schristos 
111*8e33eff8Schristos 	return ret;
112*8e33eff8Schristos }
113*8e33eff8Schristos 
114*8e33eff8Schristos #endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */
115