xref: /netbsd-src/external/bsd/jemalloc.old/include/jemalloc/internal/mutex_prof.h (revision 8e33eff89e26cf71871ead62f0d5063e1313c33a)
1*8e33eff8Schristos #ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H
2*8e33eff8Schristos #define JEMALLOC_INTERNAL_MUTEX_PROF_H
3*8e33eff8Schristos 
4*8e33eff8Schristos #include "jemalloc/internal/atomic.h"
5*8e33eff8Schristos #include "jemalloc/internal/nstime.h"
6*8e33eff8Schristos #include "jemalloc/internal/tsd_types.h"
7*8e33eff8Schristos 
8*8e33eff8Schristos #define MUTEX_PROF_GLOBAL_MUTEXES					\
9*8e33eff8Schristos     OP(background_thread)						\
10*8e33eff8Schristos     OP(ctl)								\
11*8e33eff8Schristos     OP(prof)
12*8e33eff8Schristos 
13*8e33eff8Schristos typedef enum {
14*8e33eff8Schristos #define OP(mtx) global_prof_mutex_##mtx,
15*8e33eff8Schristos 	MUTEX_PROF_GLOBAL_MUTEXES
16*8e33eff8Schristos #undef OP
17*8e33eff8Schristos 	mutex_prof_num_global_mutexes
18*8e33eff8Schristos } mutex_prof_global_ind_t;
19*8e33eff8Schristos 
20*8e33eff8Schristos #define MUTEX_PROF_ARENA_MUTEXES					\
21*8e33eff8Schristos     OP(large)								\
22*8e33eff8Schristos     OP(extent_avail)							\
23*8e33eff8Schristos     OP(extents_dirty)							\
24*8e33eff8Schristos     OP(extents_muzzy)							\
25*8e33eff8Schristos     OP(extents_retained)						\
26*8e33eff8Schristos     OP(decay_dirty)							\
27*8e33eff8Schristos     OP(decay_muzzy)							\
28*8e33eff8Schristos     OP(base)								\
29*8e33eff8Schristos     OP(tcache_list)
30*8e33eff8Schristos 
31*8e33eff8Schristos typedef enum {
32*8e33eff8Schristos #define OP(mtx) arena_prof_mutex_##mtx,
33*8e33eff8Schristos 	MUTEX_PROF_ARENA_MUTEXES
34*8e33eff8Schristos #undef OP
35*8e33eff8Schristos 	mutex_prof_num_arena_mutexes
36*8e33eff8Schristos } mutex_prof_arena_ind_t;
37*8e33eff8Schristos 
38*8e33eff8Schristos #define MUTEX_PROF_UINT64_COUNTERS					\
39*8e33eff8Schristos     OP(num_ops, uint64_t, "n_lock_ops")					\
40*8e33eff8Schristos     OP(num_wait, uint64_t, "n_waiting")					\
41*8e33eff8Schristos     OP(num_spin_acq, uint64_t, "n_spin_acq")				\
42*8e33eff8Schristos     OP(num_owner_switch, uint64_t, "n_owner_switch")			\
43*8e33eff8Schristos     OP(total_wait_time, uint64_t, "total_wait_ns")			\
44*8e33eff8Schristos     OP(max_wait_time, uint64_t, "max_wait_ns")
45*8e33eff8Schristos 
46*8e33eff8Schristos #define MUTEX_PROF_UINT32_COUNTERS					\
47*8e33eff8Schristos     OP(max_num_thds, uint32_t, "max_n_thds")
48*8e33eff8Schristos 
49*8e33eff8Schristos #define MUTEX_PROF_COUNTERS						\
50*8e33eff8Schristos 		MUTEX_PROF_UINT64_COUNTERS				\
51*8e33eff8Schristos 		MUTEX_PROF_UINT32_COUNTERS
52*8e33eff8Schristos 
53*8e33eff8Schristos #define OP(counter, type, human) mutex_counter_##counter,
54*8e33eff8Schristos 
55*8e33eff8Schristos #define COUNTER_ENUM(counter_list, t)					\
56*8e33eff8Schristos 		typedef enum {						\
57*8e33eff8Schristos 			counter_list					\
58*8e33eff8Schristos 			mutex_prof_num_##t##_counters			\
59*8e33eff8Schristos 		} mutex_prof_##t##_counter_ind_t;
60*8e33eff8Schristos 
61*8e33eff8Schristos COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t)
62*8e33eff8Schristos COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t)
63*8e33eff8Schristos 
64*8e33eff8Schristos #undef COUNTER_ENUM
65*8e33eff8Schristos #undef OP
66*8e33eff8Schristos 
67*8e33eff8Schristos typedef struct {
68*8e33eff8Schristos 	/*
69*8e33eff8Schristos 	 * Counters touched on the slow path, i.e. when there is lock
70*8e33eff8Schristos 	 * contention.  We update them once we have the lock.
71*8e33eff8Schristos 	 */
72*8e33eff8Schristos 	/* Total time (in nano seconds) spent waiting on this mutex. */
73*8e33eff8Schristos 	nstime_t		tot_wait_time;
74*8e33eff8Schristos 	/* Max time (in nano seconds) spent on a single lock operation. */
75*8e33eff8Schristos 	nstime_t		max_wait_time;
76*8e33eff8Schristos 	/* # of times have to wait for this mutex (after spinning). */
77*8e33eff8Schristos 	uint64_t		n_wait_times;
78*8e33eff8Schristos 	/* # of times acquired the mutex through local spinning. */
79*8e33eff8Schristos 	uint64_t		n_spin_acquired;
80*8e33eff8Schristos 	/* Max # of threads waiting for the mutex at the same time. */
81*8e33eff8Schristos 	uint32_t		max_n_thds;
82*8e33eff8Schristos 	/* Current # of threads waiting on the lock.  Atomic synced. */
83*8e33eff8Schristos 	atomic_u32_t		n_waiting_thds;
84*8e33eff8Schristos 
85*8e33eff8Schristos 	/*
86*8e33eff8Schristos 	 * Data touched on the fast path.  These are modified right after we
87*8e33eff8Schristos 	 * grab the lock, so it's placed closest to the end (i.e. right before
88*8e33eff8Schristos 	 * the lock) so that we have a higher chance of them being on the same
89*8e33eff8Schristos 	 * cacheline.
90*8e33eff8Schristos 	 */
91*8e33eff8Schristos 	/* # of times the mutex holder is different than the previous one. */
92*8e33eff8Schristos 	uint64_t		n_owner_switches;
93*8e33eff8Schristos 	/* Previous mutex holder, to facilitate n_owner_switches. */
94*8e33eff8Schristos 	tsdn_t			*prev_owner;
95*8e33eff8Schristos 	/* # of lock() operations in total. */
96*8e33eff8Schristos 	uint64_t		n_lock_ops;
97*8e33eff8Schristos } mutex_prof_data_t;
98*8e33eff8Schristos 
99*8e33eff8Schristos #define MUTEX_PROF_DATA_INITIALIZER \
100*8e33eff8Schristos 	{ \
101*8e33eff8Schristos 		.tot_wait_time = NSTIME_INITIALIZER, \
102*8e33eff8Schristos 		.max_wait_time = NSTIME_INITIALIZER, \
103*8e33eff8Schristos 		.n_wait_times = 0, \
104*8e33eff8Schristos 		.n_spin_acquired = 0, \
105*8e33eff8Schristos 		.max_n_thds = 0, \
106*8e33eff8Schristos 		.n_waiting_thds = ATOMIC_INIT(0), \
107*8e33eff8Schristos 		.n_owner_switches = 0, \
108*8e33eff8Schristos 		.prev_owner = NULL, \
109*8e33eff8Schristos 		.n_lock_ops = 0, \
110*8e33eff8Schristos 	}
111*8e33eff8Schristos #endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */
112