xref: /netbsd-src/external/bsd/jemalloc.old/include/jemalloc/internal/arena_stats.h (revision 8e33eff89e26cf71871ead62f0d5063e1313c33a)
1*8e33eff8Schristos #ifndef JEMALLOC_INTERNAL_ARENA_STATS_H
2*8e33eff8Schristos #define JEMALLOC_INTERNAL_ARENA_STATS_H
3*8e33eff8Schristos 
4*8e33eff8Schristos #include "jemalloc/internal/atomic.h"
5*8e33eff8Schristos #include "jemalloc/internal/mutex.h"
6*8e33eff8Schristos #include "jemalloc/internal/mutex_prof.h"
7*8e33eff8Schristos #include "jemalloc/internal/size_classes.h"
8*8e33eff8Schristos 
9*8e33eff8Schristos /*
10*8e33eff8Schristos  * In those architectures that support 64-bit atomics, we use atomic updates for
11*8e33eff8Schristos  * our 64-bit values.  Otherwise, we use a plain uint64_t and synchronize
12*8e33eff8Schristos  * externally.
13*8e33eff8Schristos  */
14*8e33eff8Schristos #ifdef JEMALLOC_ATOMIC_U64
15*8e33eff8Schristos typedef atomic_u64_t arena_stats_u64_t;
16*8e33eff8Schristos #else
17*8e33eff8Schristos /* Must hold the arena stats mutex while reading atomically. */
18*8e33eff8Schristos typedef uint64_t arena_stats_u64_t;
19*8e33eff8Schristos #endif
20*8e33eff8Schristos 
21*8e33eff8Schristos typedef struct arena_stats_large_s arena_stats_large_t;
22*8e33eff8Schristos struct arena_stats_large_s {
23*8e33eff8Schristos 	/*
24*8e33eff8Schristos 	 * Total number of allocation/deallocation requests served directly by
25*8e33eff8Schristos 	 * the arena.
26*8e33eff8Schristos 	 */
27*8e33eff8Schristos 	arena_stats_u64_t	nmalloc;
28*8e33eff8Schristos 	arena_stats_u64_t	ndalloc;
29*8e33eff8Schristos 
30*8e33eff8Schristos 	/*
31*8e33eff8Schristos 	 * Number of allocation requests that correspond to this size class.
32*8e33eff8Schristos 	 * This includes requests served by tcache, though tcache only
33*8e33eff8Schristos 	 * periodically merges into this counter.
34*8e33eff8Schristos 	 */
35*8e33eff8Schristos 	arena_stats_u64_t	nrequests; /* Partially derived. */
36*8e33eff8Schristos 
37*8e33eff8Schristos 	/* Current number of allocations of this size class. */
38*8e33eff8Schristos 	size_t		curlextents; /* Derived. */
39*8e33eff8Schristos };
40*8e33eff8Schristos 
41*8e33eff8Schristos typedef struct arena_stats_decay_s arena_stats_decay_t;
42*8e33eff8Schristos struct arena_stats_decay_s {
43*8e33eff8Schristos 	/* Total number of purge sweeps. */
44*8e33eff8Schristos 	arena_stats_u64_t	npurge;
45*8e33eff8Schristos 	/* Total number of madvise calls made. */
46*8e33eff8Schristos 	arena_stats_u64_t	nmadvise;
47*8e33eff8Schristos 	/* Total number of pages purged. */
48*8e33eff8Schristos 	arena_stats_u64_t	purged;
49*8e33eff8Schristos };
50*8e33eff8Schristos 
51*8e33eff8Schristos /*
52*8e33eff8Schristos  * Arena stats.  Note that fields marked "derived" are not directly maintained
53*8e33eff8Schristos  * within the arena code; rather their values are derived during stats merge
54*8e33eff8Schristos  * requests.
55*8e33eff8Schristos  */
56*8e33eff8Schristos typedef struct arena_stats_s arena_stats_t;
57*8e33eff8Schristos struct arena_stats_s {
58*8e33eff8Schristos #ifndef JEMALLOC_ATOMIC_U64
59*8e33eff8Schristos 	malloc_mutex_t		mtx;
60*8e33eff8Schristos #endif
61*8e33eff8Schristos 
62*8e33eff8Schristos 	/* Number of bytes currently mapped, excluding retained memory. */
63*8e33eff8Schristos 	atomic_zu_t		mapped; /* Partially derived. */
64*8e33eff8Schristos 
65*8e33eff8Schristos 	/*
66*8e33eff8Schristos 	 * Number of unused virtual memory bytes currently retained.  Retained
67*8e33eff8Schristos 	 * bytes are technically mapped (though always decommitted or purged),
68*8e33eff8Schristos 	 * but they are excluded from the mapped statistic (above).
69*8e33eff8Schristos 	 */
70*8e33eff8Schristos 	atomic_zu_t		retained; /* Derived. */
71*8e33eff8Schristos 
72*8e33eff8Schristos 	arena_stats_decay_t	decay_dirty;
73*8e33eff8Schristos 	arena_stats_decay_t	decay_muzzy;
74*8e33eff8Schristos 
75*8e33eff8Schristos 	atomic_zu_t		base; /* Derived. */
76*8e33eff8Schristos 	atomic_zu_t		internal;
77*8e33eff8Schristos 	atomic_zu_t		resident; /* Derived. */
78*8e33eff8Schristos 	atomic_zu_t		metadata_thp;
79*8e33eff8Schristos 
80*8e33eff8Schristos 	atomic_zu_t		allocated_large; /* Derived. */
81*8e33eff8Schristos 	arena_stats_u64_t	nmalloc_large; /* Derived. */
82*8e33eff8Schristos 	arena_stats_u64_t	ndalloc_large; /* Derived. */
83*8e33eff8Schristos 	arena_stats_u64_t	nrequests_large; /* Derived. */
84*8e33eff8Schristos 
85*8e33eff8Schristos 	/* Number of bytes cached in tcache associated with this arena. */
86*8e33eff8Schristos 	atomic_zu_t		tcache_bytes; /* Derived. */
87*8e33eff8Schristos 
88*8e33eff8Schristos 	mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
89*8e33eff8Schristos 
90*8e33eff8Schristos 	/* One element for each large size class. */
91*8e33eff8Schristos 	arena_stats_large_t	lstats[NSIZES - NBINS];
92*8e33eff8Schristos 
93*8e33eff8Schristos 	/* Arena uptime. */
94*8e33eff8Schristos 	nstime_t		uptime;
95*8e33eff8Schristos };
96*8e33eff8Schristos 
97*8e33eff8Schristos static inline bool
98*8e33eff8Schristos arena_stats_init(UNUSED tsdn_t *tsdn, arena_stats_t *arena_stats) {
99*8e33eff8Schristos 	if (config_debug) {
100*8e33eff8Schristos 		for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
101*8e33eff8Schristos 			assert(((char *)arena_stats)[i] == 0);
102*8e33eff8Schristos 		}
103*8e33eff8Schristos 	}
104*8e33eff8Schristos #ifndef JEMALLOC_ATOMIC_U64
105*8e33eff8Schristos 	if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
106*8e33eff8Schristos 	    WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
107*8e33eff8Schristos 		return true;
108*8e33eff8Schristos 	}
109*8e33eff8Schristos #endif
110*8e33eff8Schristos 	/* Memory is zeroed, so there is no need to clear stats. */
111*8e33eff8Schristos 	return false;
112*8e33eff8Schristos }
113*8e33eff8Schristos 
114*8e33eff8Schristos static inline void
115*8e33eff8Schristos arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
116*8e33eff8Schristos #ifndef JEMALLOC_ATOMIC_U64
117*8e33eff8Schristos 	malloc_mutex_lock(tsdn, &arena_stats->mtx);
118*8e33eff8Schristos #endif
119*8e33eff8Schristos }
120*8e33eff8Schristos 
121*8e33eff8Schristos static inline void
122*8e33eff8Schristos arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
123*8e33eff8Schristos #ifndef JEMALLOC_ATOMIC_U64
124*8e33eff8Schristos 	malloc_mutex_unlock(tsdn, &arena_stats->mtx);
125*8e33eff8Schristos #endif
126*8e33eff8Schristos }
127*8e33eff8Schristos 
128*8e33eff8Schristos static inline uint64_t
129*8e33eff8Schristos arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
130*8e33eff8Schristos     arena_stats_u64_t *p) {
131*8e33eff8Schristos #ifdef JEMALLOC_ATOMIC_U64
132*8e33eff8Schristos 	return atomic_load_u64(p, ATOMIC_RELAXED);
133*8e33eff8Schristos #else
134*8e33eff8Schristos 	malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
135*8e33eff8Schristos 	return *p;
136*8e33eff8Schristos #endif
137*8e33eff8Schristos }
138*8e33eff8Schristos 
139*8e33eff8Schristos static inline void
140*8e33eff8Schristos arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
141*8e33eff8Schristos     arena_stats_u64_t *p, uint64_t x) {
142*8e33eff8Schristos #ifdef JEMALLOC_ATOMIC_U64
143*8e33eff8Schristos 	atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
144*8e33eff8Schristos #else
145*8e33eff8Schristos 	malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
146*8e33eff8Schristos 	*p += x;
147*8e33eff8Schristos #endif
148*8e33eff8Schristos }
149*8e33eff8Schristos 
150*8e33eff8Schristos UNUSED static inline void
151*8e33eff8Schristos arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
152*8e33eff8Schristos     arena_stats_u64_t *p, uint64_t x) {
153*8e33eff8Schristos #ifdef JEMALLOC_ATOMIC_U64
154*8e33eff8Schristos 	UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
155*8e33eff8Schristos 	assert(r - x <= r);
156*8e33eff8Schristos #else
157*8e33eff8Schristos 	malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
158*8e33eff8Schristos 	*p -= x;
159*8e33eff8Schristos 	assert(*p + x >= *p);
160*8e33eff8Schristos #endif
161*8e33eff8Schristos }
162*8e33eff8Schristos 
163*8e33eff8Schristos /*
164*8e33eff8Schristos  * Non-atomically sets *dst += src.  *dst needs external synchronization.
165*8e33eff8Schristos  * This lets us avoid the cost of a fetch_add when its unnecessary (note that
166*8e33eff8Schristos  * the types here are atomic).
167*8e33eff8Schristos  */
168*8e33eff8Schristos static inline void
169*8e33eff8Schristos arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
170*8e33eff8Schristos #ifdef JEMALLOC_ATOMIC_U64
171*8e33eff8Schristos 	uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
172*8e33eff8Schristos 	atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
173*8e33eff8Schristos #else
174*8e33eff8Schristos 	*dst += src;
175*8e33eff8Schristos #endif
176*8e33eff8Schristos }
177*8e33eff8Schristos 
178*8e33eff8Schristos static inline size_t
179*8e33eff8Schristos arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) {
180*8e33eff8Schristos #ifdef JEMALLOC_ATOMIC_U64
181*8e33eff8Schristos 	return atomic_load_zu(p, ATOMIC_RELAXED);
182*8e33eff8Schristos #else
183*8e33eff8Schristos 	malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
184*8e33eff8Schristos 	return atomic_load_zu(p, ATOMIC_RELAXED);
185*8e33eff8Schristos #endif
186*8e33eff8Schristos }
187*8e33eff8Schristos 
188*8e33eff8Schristos static inline void
189*8e33eff8Schristos arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
190*8e33eff8Schristos     size_t x) {
191*8e33eff8Schristos #ifdef JEMALLOC_ATOMIC_U64
192*8e33eff8Schristos 	atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
193*8e33eff8Schristos #else
194*8e33eff8Schristos 	malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
195*8e33eff8Schristos 	size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
196*8e33eff8Schristos 	atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
197*8e33eff8Schristos #endif
198*8e33eff8Schristos }
199*8e33eff8Schristos 
200*8e33eff8Schristos static inline void
201*8e33eff8Schristos arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
202*8e33eff8Schristos     size_t x) {
203*8e33eff8Schristos #ifdef JEMALLOC_ATOMIC_U64
204*8e33eff8Schristos 	UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
205*8e33eff8Schristos 	assert(r - x <= r);
206*8e33eff8Schristos #else
207*8e33eff8Schristos 	malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
208*8e33eff8Schristos 	size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
209*8e33eff8Schristos 	atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
210*8e33eff8Schristos #endif
211*8e33eff8Schristos }
212*8e33eff8Schristos 
213*8e33eff8Schristos /* Like the _u64 variant, needs an externally synchronized *dst. */
214*8e33eff8Schristos static inline void
215*8e33eff8Schristos arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
216*8e33eff8Schristos 	size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
217*8e33eff8Schristos 	atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
218*8e33eff8Schristos }
219*8e33eff8Schristos 
220*8e33eff8Schristos static inline void
221*8e33eff8Schristos arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
222*8e33eff8Schristos     szind_t szind, uint64_t nrequests) {
223*8e33eff8Schristos 	arena_stats_lock(tsdn, arena_stats);
224*8e33eff8Schristos 	arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind -
225*8e33eff8Schristos 	    NBINS].nrequests, nrequests);
226*8e33eff8Schristos 	arena_stats_unlock(tsdn, arena_stats);
227*8e33eff8Schristos }
228*8e33eff8Schristos 
229*8e33eff8Schristos static inline void
230*8e33eff8Schristos arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
231*8e33eff8Schristos 	arena_stats_lock(tsdn, arena_stats);
232*8e33eff8Schristos 	arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
233*8e33eff8Schristos 	arena_stats_unlock(tsdn, arena_stats);
234*8e33eff8Schristos }
235*8e33eff8Schristos 
236*8e33eff8Schristos 
237*8e33eff8Schristos #endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */
238