xref: /netbsd-src/external/bsd/jemalloc.old/include/jemalloc/internal/ticker.h (revision 8e33eff89e26cf71871ead62f0d5063e1313c33a)
1*8e33eff8Schristos #ifndef JEMALLOC_INTERNAL_TICKER_H
2*8e33eff8Schristos #define JEMALLOC_INTERNAL_TICKER_H
3*8e33eff8Schristos 
4*8e33eff8Schristos #include "jemalloc/internal/util.h"
5*8e33eff8Schristos 
6*8e33eff8Schristos /**
7*8e33eff8Schristos  * A ticker makes it easy to count-down events until some limit.  You
8*8e33eff8Schristos  * ticker_init the ticker to trigger every nticks events.  You then notify it
9*8e33eff8Schristos  * that an event has occurred with calls to ticker_tick (or that nticks events
10*8e33eff8Schristos  * have occurred with a call to ticker_ticks), which will return true (and reset
11*8e33eff8Schristos  * the counter) if the countdown hit zero.
12*8e33eff8Schristos  */
13*8e33eff8Schristos 
14*8e33eff8Schristos typedef struct {
15*8e33eff8Schristos 	int32_t tick;
16*8e33eff8Schristos 	int32_t nticks;
17*8e33eff8Schristos } ticker_t;
18*8e33eff8Schristos 
19*8e33eff8Schristos static inline void
20*8e33eff8Schristos ticker_init(ticker_t *ticker, int32_t nticks) {
21*8e33eff8Schristos 	ticker->tick = nticks;
22*8e33eff8Schristos 	ticker->nticks = nticks;
23*8e33eff8Schristos }
24*8e33eff8Schristos 
25*8e33eff8Schristos static inline void
26*8e33eff8Schristos ticker_copy(ticker_t *ticker, const ticker_t *other) {
27*8e33eff8Schristos 	*ticker = *other;
28*8e33eff8Schristos }
29*8e33eff8Schristos 
30*8e33eff8Schristos static inline int32_t
31*8e33eff8Schristos ticker_read(const ticker_t *ticker) {
32*8e33eff8Schristos 	return ticker->tick;
33*8e33eff8Schristos }
34*8e33eff8Schristos 
35*8e33eff8Schristos /*
36*8e33eff8Schristos  * Not intended to be a public API.  Unfortunately, on x86, neither gcc nor
37*8e33eff8Schristos  * clang seems smart enough to turn
38*8e33eff8Schristos  *   ticker->tick -= nticks;
39*8e33eff8Schristos  *   if (unlikely(ticker->tick < 0)) {
40*8e33eff8Schristos  *     fixup ticker
41*8e33eff8Schristos  *     return true;
42*8e33eff8Schristos  *   }
43*8e33eff8Schristos  *   return false;
44*8e33eff8Schristos  * into
45*8e33eff8Schristos  *   subq %nticks_reg, (%ticker_reg)
46*8e33eff8Schristos  *   js fixup ticker
47*8e33eff8Schristos  *
48*8e33eff8Schristos  * unless we force "fixup ticker" out of line.  In that case, gcc gets it right,
49*8e33eff8Schristos  * but clang now does worse than before.  So, on x86 with gcc, we force it out
50*8e33eff8Schristos  * of line, but otherwise let the inlining occur.  Ordinarily this wouldn't be
51*8e33eff8Schristos  * worth the hassle, but this is on the fast path of both malloc and free (via
52*8e33eff8Schristos  * tcache_event).
53*8e33eff8Schristos  */
54*8e33eff8Schristos #if defined(__GNUC__) && !defined(__clang__)				\
55*8e33eff8Schristos     && (defined(__x86_64__) || defined(__i386__))
56*8e33eff8Schristos JEMALLOC_NOINLINE
57*8e33eff8Schristos #endif
58*8e33eff8Schristos static bool
59*8e33eff8Schristos ticker_fixup(ticker_t *ticker) {
60*8e33eff8Schristos 	ticker->tick = ticker->nticks;
61*8e33eff8Schristos 	return true;
62*8e33eff8Schristos }
63*8e33eff8Schristos 
64*8e33eff8Schristos static inline bool
65*8e33eff8Schristos ticker_ticks(ticker_t *ticker, int32_t nticks) {
66*8e33eff8Schristos 	ticker->tick -= nticks;
67*8e33eff8Schristos 	if (unlikely(ticker->tick < 0)) {
68*8e33eff8Schristos 		return ticker_fixup(ticker);
69*8e33eff8Schristos 	}
70*8e33eff8Schristos 	return false;
71*8e33eff8Schristos }
72*8e33eff8Schristos 
73*8e33eff8Schristos static inline bool
74*8e33eff8Schristos ticker_tick(ticker_t *ticker) {
75*8e33eff8Schristos 	return ticker_ticks(ticker, 1);
76*8e33eff8Schristos }
77*8e33eff8Schristos 
78*8e33eff8Schristos #endif /* JEMALLOC_INTERNAL_TICKER_H */
79