xref: /netbsd-src/external/bsd/jemalloc.old/include/jemalloc/internal/arena_structs_b.h (revision 8e33eff89e26cf71871ead62f0d5063e1313c33a)
1*8e33eff8Schristos #ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
2*8e33eff8Schristos #define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
3*8e33eff8Schristos 
4*8e33eff8Schristos #include "jemalloc/internal/arena_stats.h"
5*8e33eff8Schristos #include "jemalloc/internal/atomic.h"
6*8e33eff8Schristos #include "jemalloc/internal/bin.h"
7*8e33eff8Schristos #include "jemalloc/internal/bitmap.h"
8*8e33eff8Schristos #include "jemalloc/internal/extent_dss.h"
9*8e33eff8Schristos #include "jemalloc/internal/jemalloc_internal_types.h"
10*8e33eff8Schristos #include "jemalloc/internal/mutex.h"
11*8e33eff8Schristos #include "jemalloc/internal/nstime.h"
12*8e33eff8Schristos #include "jemalloc/internal/ql.h"
13*8e33eff8Schristos #include "jemalloc/internal/size_classes.h"
14*8e33eff8Schristos #include "jemalloc/internal/smoothstep.h"
15*8e33eff8Schristos #include "jemalloc/internal/ticker.h"
16*8e33eff8Schristos 
17*8e33eff8Schristos struct arena_decay_s {
18*8e33eff8Schristos 	/* Synchronizes all non-atomic fields. */
19*8e33eff8Schristos 	malloc_mutex_t		mtx;
20*8e33eff8Schristos 	/*
21*8e33eff8Schristos 	 * True if a thread is currently purging the extents associated with
22*8e33eff8Schristos 	 * this decay structure.
23*8e33eff8Schristos 	 */
24*8e33eff8Schristos 	bool			purging;
25*8e33eff8Schristos 	/*
26*8e33eff8Schristos 	 * Approximate time in milliseconds from the creation of a set of unused
27*8e33eff8Schristos 	 * dirty pages until an equivalent set of unused dirty pages is purged
28*8e33eff8Schristos 	 * and/or reused.
29*8e33eff8Schristos 	 */
30*8e33eff8Schristos 	atomic_zd_t		time_ms;
31*8e33eff8Schristos 	/* time / SMOOTHSTEP_NSTEPS. */
32*8e33eff8Schristos 	nstime_t		interval;
33*8e33eff8Schristos 	/*
34*8e33eff8Schristos 	 * Time at which the current decay interval logically started.  We do
35*8e33eff8Schristos 	 * not actually advance to a new epoch until sometime after it starts
36*8e33eff8Schristos 	 * because of scheduling and computation delays, and it is even possible
37*8e33eff8Schristos 	 * to completely skip epochs.  In all cases, during epoch advancement we
38*8e33eff8Schristos 	 * merge all relevant activity into the most recently recorded epoch.
39*8e33eff8Schristos 	 */
40*8e33eff8Schristos 	nstime_t		epoch;
41*8e33eff8Schristos 	/* Deadline randomness generator. */
42*8e33eff8Schristos 	uint64_t		jitter_state;
43*8e33eff8Schristos 	/*
44*8e33eff8Schristos 	 * Deadline for current epoch.  This is the sum of interval and per
45*8e33eff8Schristos 	 * epoch jitter which is a uniform random variable in [0..interval).
46*8e33eff8Schristos 	 * Epochs always advance by precise multiples of interval, but we
47*8e33eff8Schristos 	 * randomize the deadline to reduce the likelihood of arenas purging in
48*8e33eff8Schristos 	 * lockstep.
49*8e33eff8Schristos 	 */
50*8e33eff8Schristos 	nstime_t		deadline;
51*8e33eff8Schristos 	/*
52*8e33eff8Schristos 	 * Number of unpurged pages at beginning of current epoch.  During epoch
53*8e33eff8Schristos 	 * advancement we use the delta between arena->decay_*.nunpurged and
54*8e33eff8Schristos 	 * extents_npages_get(&arena->extents_*) to determine how many dirty
55*8e33eff8Schristos 	 * pages, if any, were generated.
56*8e33eff8Schristos 	 */
57*8e33eff8Schristos 	size_t			nunpurged;
58*8e33eff8Schristos 	/*
59*8e33eff8Schristos 	 * Trailing log of how many unused dirty pages were generated during
60*8e33eff8Schristos 	 * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
61*8e33eff8Schristos 	 * element is the most recent epoch.  Corresponding epoch times are
62*8e33eff8Schristos 	 * relative to epoch.
63*8e33eff8Schristos 	 */
64*8e33eff8Schristos 	size_t			backlog[SMOOTHSTEP_NSTEPS];
65*8e33eff8Schristos 
66*8e33eff8Schristos 	/*
67*8e33eff8Schristos 	 * Pointer to associated stats.  These stats are embedded directly in
68*8e33eff8Schristos 	 * the arena's stats due to how stats structures are shared between the
69*8e33eff8Schristos 	 * arena and ctl code.
70*8e33eff8Schristos 	 *
71*8e33eff8Schristos 	 * Synchronization: Same as associated arena's stats field. */
72*8e33eff8Schristos 	arena_stats_decay_t	*stats;
73*8e33eff8Schristos 	/* Peak number of pages in associated extents.  Used for debug only. */
74*8e33eff8Schristos 	uint64_t		ceil_npages;
75*8e33eff8Schristos };
76*8e33eff8Schristos 
77*8e33eff8Schristos struct arena_s {
78*8e33eff8Schristos 	/*
79*8e33eff8Schristos 	 * Number of threads currently assigned to this arena.  Each thread has
80*8e33eff8Schristos 	 * two distinct assignments, one for application-serving allocation, and
81*8e33eff8Schristos 	 * the other for internal metadata allocation.  Internal metadata must
82*8e33eff8Schristos 	 * not be allocated from arenas explicitly created via the arenas.create
83*8e33eff8Schristos 	 * mallctl, because the arena.<i>.reset mallctl indiscriminately
84*8e33eff8Schristos 	 * discards all allocations for the affected arena.
85*8e33eff8Schristos 	 *
86*8e33eff8Schristos 	 *   0: Application allocation.
87*8e33eff8Schristos 	 *   1: Internal metadata allocation.
88*8e33eff8Schristos 	 *
89*8e33eff8Schristos 	 * Synchronization: atomic.
90*8e33eff8Schristos 	 */
91*8e33eff8Schristos 	atomic_u_t		nthreads[2];
92*8e33eff8Schristos 
93*8e33eff8Schristos 	/*
94*8e33eff8Schristos 	 * When percpu_arena is enabled, to amortize the cost of reading /
95*8e33eff8Schristos 	 * updating the current CPU id, track the most recent thread accessing
96*8e33eff8Schristos 	 * this arena, and only read CPU if there is a mismatch.
97*8e33eff8Schristos 	 */
98*8e33eff8Schristos 	tsdn_t		*last_thd;
99*8e33eff8Schristos 
100*8e33eff8Schristos 	/* Synchronization: internal. */
101*8e33eff8Schristos 	arena_stats_t		stats;
102*8e33eff8Schristos 
103*8e33eff8Schristos 	/*
104*8e33eff8Schristos 	 * Lists of tcaches and cache_bin_array_descriptors for extant threads
105*8e33eff8Schristos 	 * associated with this arena.  Stats from these are merged
106*8e33eff8Schristos 	 * incrementally, and at exit if opt_stats_print is enabled.
107*8e33eff8Schristos 	 *
108*8e33eff8Schristos 	 * Synchronization: tcache_ql_mtx.
109*8e33eff8Schristos 	 */
110*8e33eff8Schristos 	ql_head(tcache_t)			tcache_ql;
111*8e33eff8Schristos 	ql_head(cache_bin_array_descriptor_t)	cache_bin_array_descriptor_ql;
112*8e33eff8Schristos 	malloc_mutex_t				tcache_ql_mtx;
113*8e33eff8Schristos 
114*8e33eff8Schristos 	/* Synchronization: internal. */
115*8e33eff8Schristos 	prof_accum_t		prof_accum;
116*8e33eff8Schristos 	uint64_t		prof_accumbytes;
117*8e33eff8Schristos 
118*8e33eff8Schristos 	/*
119*8e33eff8Schristos 	 * PRNG state for cache index randomization of large allocation base
120*8e33eff8Schristos 	 * pointers.
121*8e33eff8Schristos 	 *
122*8e33eff8Schristos 	 * Synchronization: atomic.
123*8e33eff8Schristos 	 */
124*8e33eff8Schristos 	atomic_zu_t		offset_state;
125*8e33eff8Schristos 
126*8e33eff8Schristos 	/*
127*8e33eff8Schristos 	 * Extent serial number generator state.
128*8e33eff8Schristos 	 *
129*8e33eff8Schristos 	 * Synchronization: atomic.
130*8e33eff8Schristos 	 */
131*8e33eff8Schristos 	atomic_zu_t		extent_sn_next;
132*8e33eff8Schristos 
133*8e33eff8Schristos 	/*
134*8e33eff8Schristos 	 * Represents a dss_prec_t, but atomically.
135*8e33eff8Schristos 	 *
136*8e33eff8Schristos 	 * Synchronization: atomic.
137*8e33eff8Schristos 	 */
138*8e33eff8Schristos 	atomic_u_t		dss_prec;
139*8e33eff8Schristos 
140*8e33eff8Schristos 	/*
141*8e33eff8Schristos 	 * Number of pages in active extents.
142*8e33eff8Schristos 	 *
143*8e33eff8Schristos 	 * Synchronization: atomic.
144*8e33eff8Schristos 	 */
145*8e33eff8Schristos 	atomic_zu_t		nactive;
146*8e33eff8Schristos 
147*8e33eff8Schristos 	/*
148*8e33eff8Schristos 	 * Extant large allocations.
149*8e33eff8Schristos 	 *
150*8e33eff8Schristos 	 * Synchronization: large_mtx.
151*8e33eff8Schristos 	 */
152*8e33eff8Schristos 	extent_list_t		large;
153*8e33eff8Schristos 	/* Synchronizes all large allocation/update/deallocation. */
154*8e33eff8Schristos 	malloc_mutex_t		large_mtx;
155*8e33eff8Schristos 
156*8e33eff8Schristos 	/*
157*8e33eff8Schristos 	 * Collections of extents that were previously allocated.  These are
158*8e33eff8Schristos 	 * used when allocating extents, in an attempt to re-use address space.
159*8e33eff8Schristos 	 *
160*8e33eff8Schristos 	 * Synchronization: internal.
161*8e33eff8Schristos 	 */
162*8e33eff8Schristos 	extents_t		extents_dirty;
163*8e33eff8Schristos 	extents_t		extents_muzzy;
164*8e33eff8Schristos 	extents_t		extents_retained;
165*8e33eff8Schristos 
166*8e33eff8Schristos 	/*
167*8e33eff8Schristos 	 * Decay-based purging state, responsible for scheduling extent state
168*8e33eff8Schristos 	 * transitions.
169*8e33eff8Schristos 	 *
170*8e33eff8Schristos 	 * Synchronization: internal.
171*8e33eff8Schristos 	 */
172*8e33eff8Schristos 	arena_decay_t		decay_dirty; /* dirty --> muzzy */
173*8e33eff8Schristos 	arena_decay_t		decay_muzzy; /* muzzy --> retained */
174*8e33eff8Schristos 
175*8e33eff8Schristos 	/*
176*8e33eff8Schristos 	 * Next extent size class in a growing series to use when satisfying a
177*8e33eff8Schristos 	 * request via the extent hooks (only if opt_retain).  This limits the
178*8e33eff8Schristos 	 * number of disjoint virtual memory ranges so that extent merging can
179*8e33eff8Schristos 	 * be effective even if multiple arenas' extent allocation requests are
180*8e33eff8Schristos 	 * highly interleaved.
181*8e33eff8Schristos 	 *
182*8e33eff8Schristos 	 * retain_grow_limit is the max allowed size ind to expand (unless the
183*8e33eff8Schristos 	 * required size is greater).  Default is no limit, and controlled
184*8e33eff8Schristos 	 * through mallctl only.
185*8e33eff8Schristos 	 *
186*8e33eff8Schristos 	 * Synchronization: extent_grow_mtx
187*8e33eff8Schristos 	 */
188*8e33eff8Schristos 	pszind_t		extent_grow_next;
189*8e33eff8Schristos 	pszind_t		retain_grow_limit;
190*8e33eff8Schristos 	malloc_mutex_t		extent_grow_mtx;
191*8e33eff8Schristos 
192*8e33eff8Schristos 	/*
193*8e33eff8Schristos 	 * Available extent structures that were allocated via
194*8e33eff8Schristos 	 * base_alloc_extent().
195*8e33eff8Schristos 	 *
196*8e33eff8Schristos 	 * Synchronization: extent_avail_mtx.
197*8e33eff8Schristos 	 */
198*8e33eff8Schristos 	extent_tree_t		extent_avail;
199*8e33eff8Schristos 	malloc_mutex_t		extent_avail_mtx;
200*8e33eff8Schristos 
201*8e33eff8Schristos 	/*
202*8e33eff8Schristos 	 * bins is used to store heaps of free regions.
203*8e33eff8Schristos 	 *
204*8e33eff8Schristos 	 * Synchronization: internal.
205*8e33eff8Schristos 	 */
206*8e33eff8Schristos 	bin_t			bins[NBINS];
207*8e33eff8Schristos 
208*8e33eff8Schristos 	/*
209*8e33eff8Schristos 	 * Base allocator, from which arena metadata are allocated.
210*8e33eff8Schristos 	 *
211*8e33eff8Schristos 	 * Synchronization: internal.
212*8e33eff8Schristos 	 */
213*8e33eff8Schristos 	base_t			*base;
214*8e33eff8Schristos 	/* Used to determine uptime.  Read-only after initialization. */
215*8e33eff8Schristos 	nstime_t		create_time;
216*8e33eff8Schristos };
217*8e33eff8Schristos 
218*8e33eff8Schristos /* Used in conjunction with tsd for fast arena-related context lookup. */
219*8e33eff8Schristos struct arena_tdata_s {
220*8e33eff8Schristos 	ticker_t		decay_ticker;
221*8e33eff8Schristos };
222*8e33eff8Schristos 
223*8e33eff8Schristos /* Used to pass rtree lookup context down the path. */
224*8e33eff8Schristos struct alloc_ctx_s {
225*8e33eff8Schristos 	szind_t szind;
226*8e33eff8Schristos 	bool slab;
227*8e33eff8Schristos };
228*8e33eff8Schristos 
229*8e33eff8Schristos #endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */
230