xref: /netbsd-src/external/bsd/jemalloc/dist/include/jemalloc/internal/arena_structs.h (revision f8cf1a9151c7af1cb0bd8b09c13c66bca599c027)
1 #ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_H
2 #define JEMALLOC_INTERNAL_ARENA_STRUCTS_H
3 
4 #include "jemalloc/internal/arena_stats.h"
5 #include "jemalloc/internal/atomic.h"
6 #include "jemalloc/internal/bin.h"
7 #include "jemalloc/internal/bitmap.h"
8 #include "jemalloc/internal/counter.h"
9 #include "jemalloc/internal/ecache.h"
10 #include "jemalloc/internal/edata_cache.h"
11 #include "jemalloc/internal/extent_dss.h"
12 #include "jemalloc/internal/jemalloc_internal_types.h"
13 #include "jemalloc/internal/mutex.h"
14 #include "jemalloc/internal/nstime.h"
15 #include "jemalloc/internal/pa.h"
16 #include "jemalloc/internal/ql.h"
17 #include "jemalloc/internal/sc.h"
18 #include "jemalloc/internal/ticker.h"
19 
20 struct arena_s {
21 	/*
22 	 * Number of threads currently assigned to this arena.  Each thread has
23 	 * two distinct assignments, one for application-serving allocation, and
24 	 * the other for internal metadata allocation.  Internal metadata must
25 	 * not be allocated from arenas explicitly created via the arenas.create
26 	 * mallctl, because the arena.<i>.reset mallctl indiscriminately
27 	 * discards all allocations for the affected arena.
28 	 *
29 	 *   0: Application allocation.
30 	 *   1: Internal metadata allocation.
31 	 *
32 	 * Synchronization: atomic.
33 	 */
34 	atomic_u_t		nthreads[2];
35 
36 	/* Next bin shard for binding new threads. Synchronization: atomic. */
37 	atomic_u_t		binshard_next;
38 
39 	/*
40 	 * When percpu_arena is enabled, to amortize the cost of reading /
41 	 * updating the current CPU id, track the most recent thread accessing
42 	 * this arena, and only read CPU if there is a mismatch.
43 	 */
44 	tsdn_t		*last_thd;
45 
46 	/* Synchronization: internal. */
47 	arena_stats_t		stats;
48 
49 	/*
50 	 * Lists of tcaches and cache_bin_array_descriptors for extant threads
51 	 * associated with this arena.  Stats from these are merged
52 	 * incrementally, and at exit if opt_stats_print is enabled.
53 	 *
54 	 * Synchronization: tcache_ql_mtx.
55 	 */
56 	ql_head(tcache_slow_t)			tcache_ql;
57 	ql_head(cache_bin_array_descriptor_t)	cache_bin_array_descriptor_ql;
58 	malloc_mutex_t				tcache_ql_mtx;
59 
60 	/*
61 	 * Represents a dss_prec_t, but atomically.
62 	 *
63 	 * Synchronization: atomic.
64 	 */
65 	atomic_u_t		dss_prec;
66 
67 	/*
68 	 * Extant large allocations.
69 	 *
70 	 * Synchronization: large_mtx.
71 	 */
72 	edata_list_active_t	large;
73 	/* Synchronizes all large allocation/update/deallocation. */
74 	malloc_mutex_t		large_mtx;
75 
76 	/* The page-level allocator shard this arena uses. */
77 	pa_shard_t		pa_shard;
78 
79 	/*
80 	 * A cached copy of base->ind.  This can get accessed on hot paths;
81 	 * looking it up in base requires an extra pointer hop / cache miss.
82 	 */
83 	unsigned ind;
84 
85 	/*
86 	 * Base allocator, from which arena metadata are allocated.
87 	 *
88 	 * Synchronization: internal.
89 	 */
90 	base_t			*base;
91 	/* Used to determine uptime.  Read-only after initialization. */
92 	nstime_t		create_time;
93 
94 	/*
95 	 * The arena is allocated alongside its bins; really this is a
96 	 * dynamically sized array determined by the binshard settings.
97 	 */
98 	bin_t			bins[0];
99 };
100 
101 #endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_H */
102