ticker.h revision 1.1 1 1.1 christos #ifndef JEMALLOC_INTERNAL_TICKER_H
2 1.1 christos #define JEMALLOC_INTERNAL_TICKER_H
3 1.1 christos
4 1.1 christos #include "jemalloc/internal/util.h"
5 1.1 christos
6 1.1 christos /**
7 1.1 christos * A ticker makes it easy to count-down events until some limit. You
8 1.1 christos * ticker_init the ticker to trigger every nticks events. You then notify it
9 1.1 christos * that an event has occurred with calls to ticker_tick (or that nticks events
10 1.1 christos * have occurred with a call to ticker_ticks), which will return true (and reset
11 1.1 christos * the counter) if the countdown hit zero.
12 1.1 christos */
13 1.1 christos
14 1.1 christos typedef struct {
15 1.1 christos int32_t tick;
16 1.1 christos int32_t nticks;
17 1.1 christos } ticker_t;
18 1.1 christos
19 1.1 christos static inline void
20 1.1 christos ticker_init(ticker_t *ticker, int32_t nticks) {
21 1.1 christos ticker->tick = nticks;
22 1.1 christos ticker->nticks = nticks;
23 1.1 christos }
24 1.1 christos
25 1.1 christos static inline void
26 1.1 christos ticker_copy(ticker_t *ticker, const ticker_t *other) {
27 1.1 christos *ticker = *other;
28 1.1 christos }
29 1.1 christos
30 1.1 christos static inline int32_t
31 1.1 christos ticker_read(const ticker_t *ticker) {
32 1.1 christos return ticker->tick;
33 1.1 christos }
34 1.1 christos
35 1.1 christos /*
36 1.1 christos * Not intended to be a public API. Unfortunately, on x86, neither gcc nor
37 1.1 christos * clang seems smart enough to turn
38 1.1 christos * ticker->tick -= nticks;
39 1.1 christos * if (unlikely(ticker->tick < 0)) {
40 1.1 christos * fixup ticker
41 1.1 christos * return true;
42 1.1 christos * }
43 1.1 christos * return false;
44 1.1 christos * into
45 1.1 christos * subq %nticks_reg, (%ticker_reg)
46 1.1 christos * js fixup ticker
47 1.1 christos *
48 1.1 christos * unless we force "fixup ticker" out of line. In that case, gcc gets it right,
49 1.1 christos * but clang now does worse than before. So, on x86 with gcc, we force it out
50 1.1 christos * of line, but otherwise let the inlining occur. Ordinarily this wouldn't be
51 1.1 christos * worth the hassle, but this is on the fast path of both malloc and free (via
52 1.1 christos * tcache_event).
53 1.1 christos */
54 1.1 christos #if defined(__GNUC__) && !defined(__clang__) \
55 1.1 christos && (defined(__x86_64__) || defined(__i386__))
56 1.1 christos JEMALLOC_NOINLINE
57 1.1 christos #endif
58 1.1 christos static bool
59 1.1 christos ticker_fixup(ticker_t *ticker) {
60 1.1 christos ticker->tick = ticker->nticks;
61 1.1 christos return true;
62 1.1 christos }
63 1.1 christos
64 1.1 christos static inline bool
65 1.1 christos ticker_ticks(ticker_t *ticker, int32_t nticks) {
66 1.1 christos ticker->tick -= nticks;
67 1.1 christos if (unlikely(ticker->tick < 0)) {
68 1.1 christos return ticker_fixup(ticker);
69 1.1 christos }
70 1.1 christos return false;
71 1.1 christos }
72 1.1 christos
73 1.1 christos static inline bool
74 1.1 christos ticker_tick(ticker_t *ticker) {
75 1.1 christos return ticker_ticks(ticker, 1);
76 1.1 christos }
77 1.1 christos
78 1.1 christos #endif /* JEMALLOC_INTERNAL_TICKER_H */
79