1 #ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H 2 #define JEMALLOC_INTERNAL_ARENA_INLINES_A_H 3 4 static inline unsigned 5 arena_ind_get(const arena_t *arena) { 6 return base_ind_get(arena->base); 7 } 8 9 static inline void 10 arena_internal_add(arena_t *arena, size_t size) { 11 atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED); 12 } 13 14 static inline void 15 arena_internal_sub(arena_t *arena, size_t size) { 16 atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED); 17 } 18 19 static inline size_t 20 arena_internal_get(arena_t *arena) { 21 return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED); 22 } 23 24 static inline bool 25 arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) { 26 cassert(config_prof); 27 28 if (likely(prof_interval == 0 || !prof_active_get_unlocked())) { 29 return false; 30 } 31 32 return prof_accum_add(tsdn, &arena->prof_accum, accumbytes); 33 } 34 35 #ifdef JEMALLOC_PERCPU_ARENA 36 static 37 inline void 38 percpu_arena_update(tsd_t *tsd, unsigned cpu) { 39 arena_t *oldarena = tsd_arena_get(tsd); 40 assert(oldarena != NULL); 41 unsigned oldind = arena_ind_get(oldarena); 42 43 if (oldind != cpu) { 44 unsigned newind = cpu; 45 arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true); 46 assert(newarena != NULL); 47 48 /* Set new arena/tcache associations. */ 49 arena_migrate(tsd, oldind, newind); 50 tcache_t *tcache = tcache_get(tsd); 51 if (tcache != NULL) { 52 tcache_arena_reassociate(tsd_tsdn(tsd), tcache, 53 newarena); 54 } 55 } 56 } 57 #else 58 static JEMALLOC_NORETURN inline void 59 percpu_arena_update(tsd_t *tsd, unsigned cpu) { 60 abort(); 61 } 62 #endif 63 64 #endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */ 65