1*8e33eff8Schristos #define JEMALLOC_ARENA_C_ 2*8e33eff8Schristos #include "jemalloc/internal/jemalloc_preamble.h" 3*8e33eff8Schristos #include "jemalloc/internal/jemalloc_internal_includes.h" 4*8e33eff8Schristos 5*8e33eff8Schristos #include "jemalloc/internal/assert.h" 6*8e33eff8Schristos #include "jemalloc/internal/div.h" 7*8e33eff8Schristos #include "jemalloc/internal/extent_dss.h" 8*8e33eff8Schristos #include "jemalloc/internal/extent_mmap.h" 9*8e33eff8Schristos #include "jemalloc/internal/mutex.h" 10*8e33eff8Schristos #include "jemalloc/internal/rtree.h" 11*8e33eff8Schristos #include "jemalloc/internal/size_classes.h" 12*8e33eff8Schristos #include "jemalloc/internal/util.h" 13*8e33eff8Schristos 14*8e33eff8Schristos /******************************************************************************/ 15*8e33eff8Schristos /* Data. */ 16*8e33eff8Schristos 17*8e33eff8Schristos /* 18*8e33eff8Schristos * Define names for both unininitialized and initialized phases, so that 19*8e33eff8Schristos * options and mallctl processing are straightforward. 20*8e33eff8Schristos */ 21*8e33eff8Schristos const char *percpu_arena_mode_names[] = { 22*8e33eff8Schristos "percpu", 23*8e33eff8Schristos "phycpu", 24*8e33eff8Schristos "disabled", 25*8e33eff8Schristos "percpu", 26*8e33eff8Schristos "phycpu" 27*8e33eff8Schristos }; 28*8e33eff8Schristos percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT; 29*8e33eff8Schristos 30*8e33eff8Schristos ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT; 31*8e33eff8Schristos ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT; 32*8e33eff8Schristos 33*8e33eff8Schristos static atomic_zd_t dirty_decay_ms_default; 34*8e33eff8Schristos static atomic_zd_t muzzy_decay_ms_default; 35*8e33eff8Schristos 36*8e33eff8Schristos const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { 37*8e33eff8Schristos #define STEP(step, h, x, y) \ 38*8e33eff8Schristos h, 39*8e33eff8Schristos SMOOTHSTEP 40*8e33eff8Schristos #undef STEP 41*8e33eff8Schristos }; 42*8e33eff8Schristos 43*8e33eff8Schristos static div_info_t arena_binind_div_info[NBINS]; 44*8e33eff8Schristos 45*8e33eff8Schristos /******************************************************************************/ 46*8e33eff8Schristos /* 47*8e33eff8Schristos * Function prototypes for static functions that are referenced prior to 48*8e33eff8Schristos * definition. 49*8e33eff8Schristos */ 50*8e33eff8Schristos 51*8e33eff8Schristos static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, 52*8e33eff8Schristos arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit, 53*8e33eff8Schristos size_t npages_decay_max, bool is_background_thread); 54*8e33eff8Schristos static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, 55*8e33eff8Schristos bool is_background_thread, bool all); 56*8e33eff8Schristos static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 57*8e33eff8Schristos bin_t *bin); 58*8e33eff8Schristos static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 59*8e33eff8Schristos bin_t *bin); 60*8e33eff8Schristos 61*8e33eff8Schristos /******************************************************************************/ 62*8e33eff8Schristos 63*8e33eff8Schristos void 64*8e33eff8Schristos arena_basic_stats_merge(UNUSED tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 65*8e33eff8Schristos const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, 66*8e33eff8Schristos size_t *nactive, size_t *ndirty, size_t *nmuzzy) { 67*8e33eff8Schristos *nthreads += arena_nthreads_get(arena, false); 68*8e33eff8Schristos *dss = dss_prec_names[arena_dss_prec_get(arena)]; 69*8e33eff8Schristos *dirty_decay_ms = arena_dirty_decay_ms_get(arena); 70*8e33eff8Schristos *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); 71*8e33eff8Schristos *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED); 72*8e33eff8Schristos *ndirty += extents_npages_get(&arena->extents_dirty); 73*8e33eff8Schristos *nmuzzy += extents_npages_get(&arena->extents_muzzy); 74*8e33eff8Schristos } 75*8e33eff8Schristos 76*8e33eff8Schristos void 77*8e33eff8Schristos arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 78*8e33eff8Schristos const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, 79*8e33eff8Schristos size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, 80*8e33eff8Schristos bin_stats_t *bstats, arena_stats_large_t *lstats) { 81*8e33eff8Schristos cassert(config_stats); 82*8e33eff8Schristos 83*8e33eff8Schristos arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, 84*8e33eff8Schristos muzzy_decay_ms, nactive, ndirty, nmuzzy); 85*8e33eff8Schristos 86*8e33eff8Schristos size_t base_allocated, base_resident, base_mapped, metadata_thp; 87*8e33eff8Schristos base_stats_get(tsdn, arena->base, &base_allocated, &base_resident, 88*8e33eff8Schristos &base_mapped, &metadata_thp); 89*8e33eff8Schristos 90*8e33eff8Schristos arena_stats_lock(tsdn, &arena->stats); 91*8e33eff8Schristos 92*8e33eff8Schristos arena_stats_accum_zu(&astats->mapped, base_mapped 93*8e33eff8Schristos + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped)); 94*8e33eff8Schristos arena_stats_accum_zu(&astats->retained, 95*8e33eff8Schristos extents_npages_get(&arena->extents_retained) << LG_PAGE); 96*8e33eff8Schristos 97*8e33eff8Schristos arena_stats_accum_u64(&astats->decay_dirty.npurge, 98*8e33eff8Schristos arena_stats_read_u64(tsdn, &arena->stats, 99*8e33eff8Schristos &arena->stats.decay_dirty.npurge)); 100*8e33eff8Schristos arena_stats_accum_u64(&astats->decay_dirty.nmadvise, 101*8e33eff8Schristos arena_stats_read_u64(tsdn, &arena->stats, 102*8e33eff8Schristos &arena->stats.decay_dirty.nmadvise)); 103*8e33eff8Schristos arena_stats_accum_u64(&astats->decay_dirty.purged, 104*8e33eff8Schristos arena_stats_read_u64(tsdn, &arena->stats, 105*8e33eff8Schristos &arena->stats.decay_dirty.purged)); 106*8e33eff8Schristos 107*8e33eff8Schristos arena_stats_accum_u64(&astats->decay_muzzy.npurge, 108*8e33eff8Schristos arena_stats_read_u64(tsdn, &arena->stats, 109*8e33eff8Schristos &arena->stats.decay_muzzy.npurge)); 110*8e33eff8Schristos arena_stats_accum_u64(&astats->decay_muzzy.nmadvise, 111*8e33eff8Schristos arena_stats_read_u64(tsdn, &arena->stats, 112*8e33eff8Schristos &arena->stats.decay_muzzy.nmadvise)); 113*8e33eff8Schristos arena_stats_accum_u64(&astats->decay_muzzy.purged, 114*8e33eff8Schristos arena_stats_read_u64(tsdn, &arena->stats, 115*8e33eff8Schristos &arena->stats.decay_muzzy.purged)); 116*8e33eff8Schristos 117*8e33eff8Schristos arena_stats_accum_zu(&astats->base, base_allocated); 118*8e33eff8Schristos arena_stats_accum_zu(&astats->internal, arena_internal_get(arena)); 119*8e33eff8Schristos arena_stats_accum_zu(&astats->metadata_thp, metadata_thp); 120*8e33eff8Schristos arena_stats_accum_zu(&astats->resident, base_resident + 121*8e33eff8Schristos (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) + 122*8e33eff8Schristos extents_npages_get(&arena->extents_dirty) + 123*8e33eff8Schristos extents_npages_get(&arena->extents_muzzy)) << LG_PAGE))); 124*8e33eff8Schristos 125*8e33eff8Schristos for (szind_t i = 0; i < NSIZES - NBINS; i++) { 126*8e33eff8Schristos uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats, 127*8e33eff8Schristos &arena->stats.lstats[i].nmalloc); 128*8e33eff8Schristos arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc); 129*8e33eff8Schristos arena_stats_accum_u64(&astats->nmalloc_large, nmalloc); 130*8e33eff8Schristos 131*8e33eff8Schristos uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats, 132*8e33eff8Schristos &arena->stats.lstats[i].ndalloc); 133*8e33eff8Schristos arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc); 134*8e33eff8Schristos arena_stats_accum_u64(&astats->ndalloc_large, ndalloc); 135*8e33eff8Schristos 136*8e33eff8Schristos uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats, 137*8e33eff8Schristos &arena->stats.lstats[i].nrequests); 138*8e33eff8Schristos arena_stats_accum_u64(&lstats[i].nrequests, 139*8e33eff8Schristos nmalloc + nrequests); 140*8e33eff8Schristos arena_stats_accum_u64(&astats->nrequests_large, 141*8e33eff8Schristos nmalloc + nrequests); 142*8e33eff8Schristos 143*8e33eff8Schristos assert(nmalloc >= ndalloc); 144*8e33eff8Schristos assert(nmalloc - ndalloc <= SIZE_T_MAX); 145*8e33eff8Schristos size_t curlextents = (size_t)(nmalloc - ndalloc); 146*8e33eff8Schristos lstats[i].curlextents += curlextents; 147*8e33eff8Schristos arena_stats_accum_zu(&astats->allocated_large, 148*8e33eff8Schristos curlextents * sz_index2size(NBINS + i)); 149*8e33eff8Schristos } 150*8e33eff8Schristos 151*8e33eff8Schristos arena_stats_unlock(tsdn, &arena->stats); 152*8e33eff8Schristos 153*8e33eff8Schristos /* tcache_bytes counts currently cached bytes. */ 154*8e33eff8Schristos atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED); 155*8e33eff8Schristos malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); 156*8e33eff8Schristos cache_bin_array_descriptor_t *descriptor; 157*8e33eff8Schristos ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) { 158*8e33eff8Schristos szind_t i = 0; 159*8e33eff8Schristos for (; i < NBINS; i++) { 160*8e33eff8Schristos cache_bin_t *tbin = &descriptor->bins_small[i]; 161*8e33eff8Schristos arena_stats_accum_zu(&astats->tcache_bytes, 162*8e33eff8Schristos tbin->ncached * sz_index2size(i)); 163*8e33eff8Schristos } 164*8e33eff8Schristos for (; i < nhbins; i++) { 165*8e33eff8Schristos cache_bin_t *tbin = &descriptor->bins_large[i]; 166*8e33eff8Schristos arena_stats_accum_zu(&astats->tcache_bytes, 167*8e33eff8Schristos tbin->ncached * sz_index2size(i)); 168*8e33eff8Schristos } 169*8e33eff8Schristos } 170*8e33eff8Schristos malloc_mutex_prof_read(tsdn, 171*8e33eff8Schristos &astats->mutex_prof_data[arena_prof_mutex_tcache_list], 172*8e33eff8Schristos &arena->tcache_ql_mtx); 173*8e33eff8Schristos malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); 174*8e33eff8Schristos 175*8e33eff8Schristos #define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \ 176*8e33eff8Schristos malloc_mutex_lock(tsdn, &arena->mtx); \ 177*8e33eff8Schristos malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \ 178*8e33eff8Schristos &arena->mtx); \ 179*8e33eff8Schristos malloc_mutex_unlock(tsdn, &arena->mtx); 180*8e33eff8Schristos 181*8e33eff8Schristos /* Gather per arena mutex profiling data. */ 182*8e33eff8Schristos READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); 183*8e33eff8Schristos READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx, 184*8e33eff8Schristos arena_prof_mutex_extent_avail) 185*8e33eff8Schristos READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx, 186*8e33eff8Schristos arena_prof_mutex_extents_dirty) 187*8e33eff8Schristos READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx, 188*8e33eff8Schristos arena_prof_mutex_extents_muzzy) 189*8e33eff8Schristos READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx, 190*8e33eff8Schristos arena_prof_mutex_extents_retained) 191*8e33eff8Schristos READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx, 192*8e33eff8Schristos arena_prof_mutex_decay_dirty) 193*8e33eff8Schristos READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx, 194*8e33eff8Schristos arena_prof_mutex_decay_muzzy) 195*8e33eff8Schristos READ_ARENA_MUTEX_PROF_DATA(base->mtx, 196*8e33eff8Schristos arena_prof_mutex_base) 197*8e33eff8Schristos #undef READ_ARENA_MUTEX_PROF_DATA 198*8e33eff8Schristos 199*8e33eff8Schristos nstime_copy(&astats->uptime, &arena->create_time); 200*8e33eff8Schristos nstime_update(&astats->uptime); 201*8e33eff8Schristos nstime_subtract(&astats->uptime, &arena->create_time); 202*8e33eff8Schristos 203*8e33eff8Schristos for (szind_t i = 0; i < NBINS; i++) { 204*8e33eff8Schristos bin_stats_merge(tsdn, &bstats[i], &arena->bins[i]); 205*8e33eff8Schristos } 206*8e33eff8Schristos } 207*8e33eff8Schristos 208*8e33eff8Schristos void 209*8e33eff8Schristos arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, 210*8e33eff8Schristos extent_hooks_t **r_extent_hooks, extent_t *extent) { 211*8e33eff8Schristos witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 212*8e33eff8Schristos WITNESS_RANK_CORE, 0); 213*8e33eff8Schristos 214*8e33eff8Schristos extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty, 215*8e33eff8Schristos extent); 216*8e33eff8Schristos if (arena_dirty_decay_ms_get(arena) == 0) { 217*8e33eff8Schristos arena_decay_dirty(tsdn, arena, false, true); 218*8e33eff8Schristos } else { 219*8e33eff8Schristos arena_background_thread_inactivity_check(tsdn, arena, false); 220*8e33eff8Schristos } 221*8e33eff8Schristos } 222*8e33eff8Schristos 223*8e33eff8Schristos static void * 224*8e33eff8Schristos arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) { 225*8e33eff8Schristos void *ret; 226*8e33eff8Schristos arena_slab_data_t *slab_data = extent_slab_data_get(slab); 227*8e33eff8Schristos size_t regind; 228*8e33eff8Schristos 229*8e33eff8Schristos assert(extent_nfree_get(slab) > 0); 230*8e33eff8Schristos assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); 231*8e33eff8Schristos 232*8e33eff8Schristos regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info); 233*8e33eff8Schristos ret = (void *)((uintptr_t)extent_addr_get(slab) + 234*8e33eff8Schristos (uintptr_t)(bin_info->reg_size * regind)); 235*8e33eff8Schristos extent_nfree_dec(slab); 236*8e33eff8Schristos return ret; 237*8e33eff8Schristos } 238*8e33eff8Schristos 239*8e33eff8Schristos #ifndef JEMALLOC_JET 240*8e33eff8Schristos static 241*8e33eff8Schristos #endif 242*8e33eff8Schristos size_t 243*8e33eff8Schristos arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { 244*8e33eff8Schristos size_t diff, regind; 245*8e33eff8Schristos 246*8e33eff8Schristos /* Freeing a pointer outside the slab can cause assertion failure. */ 247*8e33eff8Schristos assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab)); 248*8e33eff8Schristos assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab)); 249*8e33eff8Schristos /* Freeing an interior pointer can cause assertion failure. */ 250*8e33eff8Schristos assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) % 251*8e33eff8Schristos (uintptr_t)bin_infos[binind].reg_size == 0); 252*8e33eff8Schristos 253*8e33eff8Schristos diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)); 254*8e33eff8Schristos 255*8e33eff8Schristos /* Avoid doing division with a variable divisor. */ 256*8e33eff8Schristos regind = div_compute(&arena_binind_div_info[binind], diff); 257*8e33eff8Schristos 258*8e33eff8Schristos assert(regind < bin_infos[binind].nregs); 259*8e33eff8Schristos 260*8e33eff8Schristos return regind; 261*8e33eff8Schristos } 262*8e33eff8Schristos 263*8e33eff8Schristos static void 264*8e33eff8Schristos arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) { 265*8e33eff8Schristos szind_t binind = extent_szind_get(slab); 266*8e33eff8Schristos const bin_info_t *bin_info = &bin_infos[binind]; 267*8e33eff8Schristos size_t regind = arena_slab_regind(slab, binind, ptr); 268*8e33eff8Schristos 269*8e33eff8Schristos assert(extent_nfree_get(slab) < bin_info->nregs); 270*8e33eff8Schristos /* Freeing an unallocated pointer can cause assertion failure. */ 271*8e33eff8Schristos assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind)); 272*8e33eff8Schristos 273*8e33eff8Schristos bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind); 274*8e33eff8Schristos extent_nfree_inc(slab); 275*8e33eff8Schristos } 276*8e33eff8Schristos 277*8e33eff8Schristos static void 278*8e33eff8Schristos arena_nactive_add(arena_t *arena, size_t add_pages) { 279*8e33eff8Schristos atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED); 280*8e33eff8Schristos } 281*8e33eff8Schristos 282*8e33eff8Schristos static void 283*8e33eff8Schristos arena_nactive_sub(arena_t *arena, size_t sub_pages) { 284*8e33eff8Schristos assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages); 285*8e33eff8Schristos atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED); 286*8e33eff8Schristos } 287*8e33eff8Schristos 288*8e33eff8Schristos static void 289*8e33eff8Schristos arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { 290*8e33eff8Schristos szind_t index, hindex; 291*8e33eff8Schristos 292*8e33eff8Schristos cassert(config_stats); 293*8e33eff8Schristos 294*8e33eff8Schristos if (usize < LARGE_MINCLASS) { 295*8e33eff8Schristos usize = LARGE_MINCLASS; 296*8e33eff8Schristos } 297*8e33eff8Schristos index = sz_size2index(usize); 298*8e33eff8Schristos hindex = (index >= NBINS) ? index - NBINS : 0; 299*8e33eff8Schristos 300*8e33eff8Schristos arena_stats_add_u64(tsdn, &arena->stats, 301*8e33eff8Schristos &arena->stats.lstats[hindex].nmalloc, 1); 302*8e33eff8Schristos } 303*8e33eff8Schristos 304*8e33eff8Schristos static void 305*8e33eff8Schristos arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { 306*8e33eff8Schristos szind_t index, hindex; 307*8e33eff8Schristos 308*8e33eff8Schristos cassert(config_stats); 309*8e33eff8Schristos 310*8e33eff8Schristos if (usize < LARGE_MINCLASS) { 311*8e33eff8Schristos usize = LARGE_MINCLASS; 312*8e33eff8Schristos } 313*8e33eff8Schristos index = sz_size2index(usize); 314*8e33eff8Schristos hindex = (index >= NBINS) ? index - NBINS : 0; 315*8e33eff8Schristos 316*8e33eff8Schristos arena_stats_add_u64(tsdn, &arena->stats, 317*8e33eff8Schristos &arena->stats.lstats[hindex].ndalloc, 1); 318*8e33eff8Schristos } 319*8e33eff8Schristos 320*8e33eff8Schristos static void 321*8e33eff8Schristos arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize, 322*8e33eff8Schristos size_t usize) { 323*8e33eff8Schristos arena_large_dalloc_stats_update(tsdn, arena, oldusize); 324*8e33eff8Schristos arena_large_malloc_stats_update(tsdn, arena, usize); 325*8e33eff8Schristos } 326*8e33eff8Schristos 327*8e33eff8Schristos extent_t * 328*8e33eff8Schristos arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, 329*8e33eff8Schristos size_t alignment, bool *zero) { 330*8e33eff8Schristos extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 331*8e33eff8Schristos 332*8e33eff8Schristos witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 333*8e33eff8Schristos WITNESS_RANK_CORE, 0); 334*8e33eff8Schristos 335*8e33eff8Schristos szind_t szind = sz_size2index(usize); 336*8e33eff8Schristos size_t mapped_add; 337*8e33eff8Schristos bool commit = true; 338*8e33eff8Schristos extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks, 339*8e33eff8Schristos &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false, 340*8e33eff8Schristos szind, zero, &commit); 341*8e33eff8Schristos if (extent == NULL) { 342*8e33eff8Schristos extent = extents_alloc(tsdn, arena, &extent_hooks, 343*8e33eff8Schristos &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment, 344*8e33eff8Schristos false, szind, zero, &commit); 345*8e33eff8Schristos } 346*8e33eff8Schristos size_t size = usize + sz_large_pad; 347*8e33eff8Schristos if (extent == NULL) { 348*8e33eff8Schristos extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL, 349*8e33eff8Schristos usize, sz_large_pad, alignment, false, szind, zero, 350*8e33eff8Schristos &commit); 351*8e33eff8Schristos if (config_stats) { 352*8e33eff8Schristos /* 353*8e33eff8Schristos * extent may be NULL on OOM, but in that case 354*8e33eff8Schristos * mapped_add isn't used below, so there's no need to 355*8e33eff8Schristos * conditionlly set it to 0 here. 356*8e33eff8Schristos */ 357*8e33eff8Schristos mapped_add = size; 358*8e33eff8Schristos } 359*8e33eff8Schristos } else if (config_stats) { 360*8e33eff8Schristos mapped_add = 0; 361*8e33eff8Schristos } 362*8e33eff8Schristos 363*8e33eff8Schristos if (extent != NULL) { 364*8e33eff8Schristos if (config_stats) { 365*8e33eff8Schristos arena_stats_lock(tsdn, &arena->stats); 366*8e33eff8Schristos arena_large_malloc_stats_update(tsdn, arena, usize); 367*8e33eff8Schristos if (mapped_add != 0) { 368*8e33eff8Schristos arena_stats_add_zu(tsdn, &arena->stats, 369*8e33eff8Schristos &arena->stats.mapped, mapped_add); 370*8e33eff8Schristos } 371*8e33eff8Schristos arena_stats_unlock(tsdn, &arena->stats); 372*8e33eff8Schristos } 373*8e33eff8Schristos arena_nactive_add(arena, size >> LG_PAGE); 374*8e33eff8Schristos } 375*8e33eff8Schristos 376*8e33eff8Schristos return extent; 377*8e33eff8Schristos } 378*8e33eff8Schristos 379*8e33eff8Schristos void 380*8e33eff8Schristos arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { 381*8e33eff8Schristos if (config_stats) { 382*8e33eff8Schristos arena_stats_lock(tsdn, &arena->stats); 383*8e33eff8Schristos arena_large_dalloc_stats_update(tsdn, arena, 384*8e33eff8Schristos extent_usize_get(extent)); 385*8e33eff8Schristos arena_stats_unlock(tsdn, &arena->stats); 386*8e33eff8Schristos } 387*8e33eff8Schristos arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE); 388*8e33eff8Schristos } 389*8e33eff8Schristos 390*8e33eff8Schristos void 391*8e33eff8Schristos arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, 392*8e33eff8Schristos size_t oldusize) { 393*8e33eff8Schristos size_t usize = extent_usize_get(extent); 394*8e33eff8Schristos size_t udiff = oldusize - usize; 395*8e33eff8Schristos 396*8e33eff8Schristos if (config_stats) { 397*8e33eff8Schristos arena_stats_lock(tsdn, &arena->stats); 398*8e33eff8Schristos arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); 399*8e33eff8Schristos arena_stats_unlock(tsdn, &arena->stats); 400*8e33eff8Schristos } 401*8e33eff8Schristos arena_nactive_sub(arena, udiff >> LG_PAGE); 402*8e33eff8Schristos } 403*8e33eff8Schristos 404*8e33eff8Schristos void 405*8e33eff8Schristos arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, 406*8e33eff8Schristos size_t oldusize) { 407*8e33eff8Schristos size_t usize = extent_usize_get(extent); 408*8e33eff8Schristos size_t udiff = usize - oldusize; 409*8e33eff8Schristos 410*8e33eff8Schristos if (config_stats) { 411*8e33eff8Schristos arena_stats_lock(tsdn, &arena->stats); 412*8e33eff8Schristos arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); 413*8e33eff8Schristos arena_stats_unlock(tsdn, &arena->stats); 414*8e33eff8Schristos } 415*8e33eff8Schristos arena_nactive_add(arena, udiff >> LG_PAGE); 416*8e33eff8Schristos } 417*8e33eff8Schristos 418*8e33eff8Schristos static ssize_t 419*8e33eff8Schristos arena_decay_ms_read(arena_decay_t *decay) { 420*8e33eff8Schristos return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); 421*8e33eff8Schristos } 422*8e33eff8Schristos 423*8e33eff8Schristos static void 424*8e33eff8Schristos arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) { 425*8e33eff8Schristos atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED); 426*8e33eff8Schristos } 427*8e33eff8Schristos 428*8e33eff8Schristos static void 429*8e33eff8Schristos arena_decay_deadline_init(arena_decay_t *decay) { 430*8e33eff8Schristos /* 431*8e33eff8Schristos * Generate a new deadline that is uniformly random within the next 432*8e33eff8Schristos * epoch after the current one. 433*8e33eff8Schristos */ 434*8e33eff8Schristos nstime_copy(&decay->deadline, &decay->epoch); 435*8e33eff8Schristos nstime_add(&decay->deadline, &decay->interval); 436*8e33eff8Schristos if (arena_decay_ms_read(decay) > 0) { 437*8e33eff8Schristos nstime_t jitter; 438*8e33eff8Schristos 439*8e33eff8Schristos nstime_init(&jitter, prng_range_u64(&decay->jitter_state, 440*8e33eff8Schristos nstime_ns(&decay->interval))); 441*8e33eff8Schristos nstime_add(&decay->deadline, &jitter); 442*8e33eff8Schristos } 443*8e33eff8Schristos } 444*8e33eff8Schristos 445*8e33eff8Schristos static bool 446*8e33eff8Schristos arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) { 447*8e33eff8Schristos return (nstime_compare(&decay->deadline, time) <= 0); 448*8e33eff8Schristos } 449*8e33eff8Schristos 450*8e33eff8Schristos static size_t 451*8e33eff8Schristos arena_decay_backlog_npages_limit(const arena_decay_t *decay) { 452*8e33eff8Schristos uint64_t sum; 453*8e33eff8Schristos size_t npages_limit_backlog; 454*8e33eff8Schristos unsigned i; 455*8e33eff8Schristos 456*8e33eff8Schristos /* 457*8e33eff8Schristos * For each element of decay_backlog, multiply by the corresponding 458*8e33eff8Schristos * fixed-point smoothstep decay factor. Sum the products, then divide 459*8e33eff8Schristos * to round down to the nearest whole number of pages. 460*8e33eff8Schristos */ 461*8e33eff8Schristos sum = 0; 462*8e33eff8Schristos for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { 463*8e33eff8Schristos sum += decay->backlog[i] * h_steps[i]; 464*8e33eff8Schristos } 465*8e33eff8Schristos npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); 466*8e33eff8Schristos 467*8e33eff8Schristos return npages_limit_backlog; 468*8e33eff8Schristos } 469*8e33eff8Schristos 470*8e33eff8Schristos static void 471*8e33eff8Schristos arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) { 472*8e33eff8Schristos size_t npages_delta = (current_npages > decay->nunpurged) ? 473*8e33eff8Schristos current_npages - decay->nunpurged : 0; 474*8e33eff8Schristos decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta; 475*8e33eff8Schristos 476*8e33eff8Schristos if (config_debug) { 477*8e33eff8Schristos if (current_npages > decay->ceil_npages) { 478*8e33eff8Schristos decay->ceil_npages = current_npages; 479*8e33eff8Schristos } 480*8e33eff8Schristos size_t npages_limit = arena_decay_backlog_npages_limit(decay); 481*8e33eff8Schristos assert(decay->ceil_npages >= npages_limit); 482*8e33eff8Schristos if (decay->ceil_npages > npages_limit) { 483*8e33eff8Schristos decay->ceil_npages = npages_limit; 484*8e33eff8Schristos } 485*8e33eff8Schristos } 486*8e33eff8Schristos } 487*8e33eff8Schristos 488*8e33eff8Schristos static void 489*8e33eff8Schristos arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64, 490*8e33eff8Schristos size_t current_npages) { 491*8e33eff8Schristos if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { 492*8e33eff8Schristos memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) * 493*8e33eff8Schristos sizeof(size_t)); 494*8e33eff8Schristos } else { 495*8e33eff8Schristos size_t nadvance_z = (size_t)nadvance_u64; 496*8e33eff8Schristos 497*8e33eff8Schristos assert((uint64_t)nadvance_z == nadvance_u64); 498*8e33eff8Schristos 499*8e33eff8Schristos memmove(decay->backlog, &decay->backlog[nadvance_z], 500*8e33eff8Schristos (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); 501*8e33eff8Schristos if (nadvance_z > 1) { 502*8e33eff8Schristos memset(&decay->backlog[SMOOTHSTEP_NSTEPS - 503*8e33eff8Schristos nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); 504*8e33eff8Schristos } 505*8e33eff8Schristos } 506*8e33eff8Schristos 507*8e33eff8Schristos arena_decay_backlog_update_last(decay, current_npages); 508*8e33eff8Schristos } 509*8e33eff8Schristos 510*8e33eff8Schristos static void 511*8e33eff8Schristos arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 512*8e33eff8Schristos extents_t *extents, size_t current_npages, size_t npages_limit, 513*8e33eff8Schristos bool is_background_thread) { 514*8e33eff8Schristos if (current_npages > npages_limit) { 515*8e33eff8Schristos arena_decay_to_limit(tsdn, arena, decay, extents, false, 516*8e33eff8Schristos npages_limit, current_npages - npages_limit, 517*8e33eff8Schristos is_background_thread); 518*8e33eff8Schristos } 519*8e33eff8Schristos } 520*8e33eff8Schristos 521*8e33eff8Schristos static void 522*8e33eff8Schristos arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time, 523*8e33eff8Schristos size_t current_npages) { 524*8e33eff8Schristos assert(arena_decay_deadline_reached(decay, time)); 525*8e33eff8Schristos 526*8e33eff8Schristos nstime_t delta; 527*8e33eff8Schristos nstime_copy(&delta, time); 528*8e33eff8Schristos nstime_subtract(&delta, &decay->epoch); 529*8e33eff8Schristos 530*8e33eff8Schristos uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval); 531*8e33eff8Schristos assert(nadvance_u64 > 0); 532*8e33eff8Schristos 533*8e33eff8Schristos /* Add nadvance_u64 decay intervals to epoch. */ 534*8e33eff8Schristos nstime_copy(&delta, &decay->interval); 535*8e33eff8Schristos nstime_imultiply(&delta, nadvance_u64); 536*8e33eff8Schristos nstime_add(&decay->epoch, &delta); 537*8e33eff8Schristos 538*8e33eff8Schristos /* Set a new deadline. */ 539*8e33eff8Schristos arena_decay_deadline_init(decay); 540*8e33eff8Schristos 541*8e33eff8Schristos /* Update the backlog. */ 542*8e33eff8Schristos arena_decay_backlog_update(decay, nadvance_u64, current_npages); 543*8e33eff8Schristos } 544*8e33eff8Schristos 545*8e33eff8Schristos static void 546*8e33eff8Schristos arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 547*8e33eff8Schristos extents_t *extents, const nstime_t *time, bool is_background_thread) { 548*8e33eff8Schristos size_t current_npages = extents_npages_get(extents); 549*8e33eff8Schristos arena_decay_epoch_advance_helper(decay, time, current_npages); 550*8e33eff8Schristos 551*8e33eff8Schristos size_t npages_limit = arena_decay_backlog_npages_limit(decay); 552*8e33eff8Schristos /* We may unlock decay->mtx when try_purge(). Finish logging first. */ 553*8e33eff8Schristos decay->nunpurged = (npages_limit > current_npages) ? npages_limit : 554*8e33eff8Schristos current_npages; 555*8e33eff8Schristos 556*8e33eff8Schristos if (!background_thread_enabled() || is_background_thread) { 557*8e33eff8Schristos arena_decay_try_purge(tsdn, arena, decay, extents, 558*8e33eff8Schristos current_npages, npages_limit, is_background_thread); 559*8e33eff8Schristos } 560*8e33eff8Schristos } 561*8e33eff8Schristos 562*8e33eff8Schristos static void 563*8e33eff8Schristos arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) { 564*8e33eff8Schristos arena_decay_ms_write(decay, decay_ms); 565*8e33eff8Schristos if (decay_ms > 0) { 566*8e33eff8Schristos nstime_init(&decay->interval, (uint64_t)decay_ms * 567*8e33eff8Schristos KQU(1000000)); 568*8e33eff8Schristos nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS); 569*8e33eff8Schristos } 570*8e33eff8Schristos 571*8e33eff8Schristos nstime_init(&decay->epoch, 0); 572*8e33eff8Schristos nstime_update(&decay->epoch); 573*8e33eff8Schristos decay->jitter_state = (uint64_t)(uintptr_t)decay; 574*8e33eff8Schristos arena_decay_deadline_init(decay); 575*8e33eff8Schristos decay->nunpurged = 0; 576*8e33eff8Schristos memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); 577*8e33eff8Schristos } 578*8e33eff8Schristos 579*8e33eff8Schristos static bool 580*8e33eff8Schristos arena_decay_init(arena_decay_t *decay, ssize_t decay_ms, 581*8e33eff8Schristos arena_stats_decay_t *stats) { 582*8e33eff8Schristos if (config_debug) { 583*8e33eff8Schristos for (size_t i = 0; i < sizeof(arena_decay_t); i++) { 584*8e33eff8Schristos assert(((char *)decay)[i] == 0); 585*8e33eff8Schristos } 586*8e33eff8Schristos decay->ceil_npages = 0; 587*8e33eff8Schristos } 588*8e33eff8Schristos if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY, 589*8e33eff8Schristos malloc_mutex_rank_exclusive)) { 590*8e33eff8Schristos return true; 591*8e33eff8Schristos } 592*8e33eff8Schristos decay->purging = false; 593*8e33eff8Schristos arena_decay_reinit(decay, decay_ms); 594*8e33eff8Schristos /* Memory is zeroed, so there is no need to clear stats. */ 595*8e33eff8Schristos if (config_stats) { 596*8e33eff8Schristos decay->stats = stats; 597*8e33eff8Schristos } 598*8e33eff8Schristos return false; 599*8e33eff8Schristos } 600*8e33eff8Schristos 601*8e33eff8Schristos static bool 602*8e33eff8Schristos arena_decay_ms_valid(ssize_t decay_ms) { 603*8e33eff8Schristos if (decay_ms < -1) { 604*8e33eff8Schristos return false; 605*8e33eff8Schristos } 606*8e33eff8Schristos if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX * 607*8e33eff8Schristos KQU(1000)) { 608*8e33eff8Schristos return true; 609*8e33eff8Schristos } 610*8e33eff8Schristos return false; 611*8e33eff8Schristos } 612*8e33eff8Schristos 613*8e33eff8Schristos static bool 614*8e33eff8Schristos arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 615*8e33eff8Schristos extents_t *extents, bool is_background_thread) { 616*8e33eff8Schristos malloc_mutex_assert_owner(tsdn, &decay->mtx); 617*8e33eff8Schristos 618*8e33eff8Schristos /* Purge all or nothing if the option is disabled. */ 619*8e33eff8Schristos ssize_t decay_ms = arena_decay_ms_read(decay); 620*8e33eff8Schristos if (decay_ms <= 0) { 621*8e33eff8Schristos if (decay_ms == 0) { 622*8e33eff8Schristos arena_decay_to_limit(tsdn, arena, decay, extents, false, 623*8e33eff8Schristos 0, extents_npages_get(extents), 624*8e33eff8Schristos is_background_thread); 625*8e33eff8Schristos } 626*8e33eff8Schristos return false; 627*8e33eff8Schristos } 628*8e33eff8Schristos 629*8e33eff8Schristos nstime_t time; 630*8e33eff8Schristos nstime_init(&time, 0); 631*8e33eff8Schristos nstime_update(&time); 632*8e33eff8Schristos if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time) 633*8e33eff8Schristos > 0)) { 634*8e33eff8Schristos /* 635*8e33eff8Schristos * Time went backwards. Move the epoch back in time and 636*8e33eff8Schristos * generate a new deadline, with the expectation that time 637*8e33eff8Schristos * typically flows forward for long enough periods of time that 638*8e33eff8Schristos * epochs complete. Unfortunately, this strategy is susceptible 639*8e33eff8Schristos * to clock jitter triggering premature epoch advances, but 640*8e33eff8Schristos * clock jitter estimation and compensation isn't feasible here 641*8e33eff8Schristos * because calls into this code are event-driven. 642*8e33eff8Schristos */ 643*8e33eff8Schristos nstime_copy(&decay->epoch, &time); 644*8e33eff8Schristos arena_decay_deadline_init(decay); 645*8e33eff8Schristos #ifndef __NetBSD__ 646*8e33eff8Schristos } else { 647*8e33eff8Schristos /* Verify that time does not go backwards. */ 648*8e33eff8Schristos assert(nstime_compare(&decay->epoch, &time) <= 0); 649*8e33eff8Schristos #endif 650*8e33eff8Schristos } 651*8e33eff8Schristos 652*8e33eff8Schristos /* 653*8e33eff8Schristos * If the deadline has been reached, advance to the current epoch and 654*8e33eff8Schristos * purge to the new limit if necessary. Note that dirty pages created 655*8e33eff8Schristos * during the current epoch are not subject to purge until a future 656*8e33eff8Schristos * epoch, so as a result purging only happens during epoch advances, or 657*8e33eff8Schristos * being triggered by background threads (scheduled event). 658*8e33eff8Schristos */ 659*8e33eff8Schristos bool advance_epoch = arena_decay_deadline_reached(decay, &time); 660*8e33eff8Schristos if (advance_epoch) { 661*8e33eff8Schristos arena_decay_epoch_advance(tsdn, arena, decay, extents, &time, 662*8e33eff8Schristos is_background_thread); 663*8e33eff8Schristos } else if (is_background_thread) { 664*8e33eff8Schristos arena_decay_try_purge(tsdn, arena, decay, extents, 665*8e33eff8Schristos extents_npages_get(extents), 666*8e33eff8Schristos arena_decay_backlog_npages_limit(decay), 667*8e33eff8Schristos is_background_thread); 668*8e33eff8Schristos } 669*8e33eff8Schristos 670*8e33eff8Schristos return advance_epoch; 671*8e33eff8Schristos } 672*8e33eff8Schristos 673*8e33eff8Schristos static ssize_t 674*8e33eff8Schristos arena_decay_ms_get(arena_decay_t *decay) { 675*8e33eff8Schristos return arena_decay_ms_read(decay); 676*8e33eff8Schristos } 677*8e33eff8Schristos 678*8e33eff8Schristos ssize_t 679*8e33eff8Schristos arena_dirty_decay_ms_get(arena_t *arena) { 680*8e33eff8Schristos return arena_decay_ms_get(&arena->decay_dirty); 681*8e33eff8Schristos } 682*8e33eff8Schristos 683*8e33eff8Schristos ssize_t 684*8e33eff8Schristos arena_muzzy_decay_ms_get(arena_t *arena) { 685*8e33eff8Schristos return arena_decay_ms_get(&arena->decay_muzzy); 686*8e33eff8Schristos } 687*8e33eff8Schristos 688*8e33eff8Schristos static bool 689*8e33eff8Schristos arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 690*8e33eff8Schristos extents_t *extents, ssize_t decay_ms) { 691*8e33eff8Schristos if (!arena_decay_ms_valid(decay_ms)) { 692*8e33eff8Schristos return true; 693*8e33eff8Schristos } 694*8e33eff8Schristos 695*8e33eff8Schristos malloc_mutex_lock(tsdn, &decay->mtx); 696*8e33eff8Schristos /* 697*8e33eff8Schristos * Restart decay backlog from scratch, which may cause many dirty pages 698*8e33eff8Schristos * to be immediately purged. It would conceptually be possible to map 699*8e33eff8Schristos * the old backlog onto the new backlog, but there is no justification 700*8e33eff8Schristos * for such complexity since decay_ms changes are intended to be 701*8e33eff8Schristos * infrequent, either between the {-1, 0, >0} states, or a one-time 702*8e33eff8Schristos * arbitrary change during initial arena configuration. 703*8e33eff8Schristos */ 704*8e33eff8Schristos arena_decay_reinit(decay, decay_ms); 705*8e33eff8Schristos arena_maybe_decay(tsdn, arena, decay, extents, false); 706*8e33eff8Schristos malloc_mutex_unlock(tsdn, &decay->mtx); 707*8e33eff8Schristos 708*8e33eff8Schristos return false; 709*8e33eff8Schristos } 710*8e33eff8Schristos 711*8e33eff8Schristos bool 712*8e33eff8Schristos arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, 713*8e33eff8Schristos ssize_t decay_ms) { 714*8e33eff8Schristos return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty, 715*8e33eff8Schristos &arena->extents_dirty, decay_ms); 716*8e33eff8Schristos } 717*8e33eff8Schristos 718*8e33eff8Schristos bool 719*8e33eff8Schristos arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, 720*8e33eff8Schristos ssize_t decay_ms) { 721*8e33eff8Schristos return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy, 722*8e33eff8Schristos &arena->extents_muzzy, decay_ms); 723*8e33eff8Schristos } 724*8e33eff8Schristos 725*8e33eff8Schristos static size_t 726*8e33eff8Schristos arena_stash_decayed(tsdn_t *tsdn, arena_t *arena, 727*8e33eff8Schristos extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit, 728*8e33eff8Schristos size_t npages_decay_max, extent_list_t *decay_extents) { 729*8e33eff8Schristos witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 730*8e33eff8Schristos WITNESS_RANK_CORE, 0); 731*8e33eff8Schristos 732*8e33eff8Schristos /* Stash extents according to npages_limit. */ 733*8e33eff8Schristos size_t nstashed = 0; 734*8e33eff8Schristos extent_t *extent; 735*8e33eff8Schristos while (nstashed < npages_decay_max && 736*8e33eff8Schristos (extent = extents_evict(tsdn, arena, r_extent_hooks, extents, 737*8e33eff8Schristos npages_limit)) != NULL) { 738*8e33eff8Schristos extent_list_append(decay_extents, extent); 739*8e33eff8Schristos nstashed += extent_size_get(extent) >> LG_PAGE; 740*8e33eff8Schristos } 741*8e33eff8Schristos return nstashed; 742*8e33eff8Schristos } 743*8e33eff8Schristos 744*8e33eff8Schristos static size_t 745*8e33eff8Schristos arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, 746*8e33eff8Schristos extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, 747*8e33eff8Schristos bool all, extent_list_t *decay_extents, bool is_background_thread) { 748*8e33eff8Schristos UNUSED size_t nmadvise, nunmapped; 749*8e33eff8Schristos size_t npurged; 750*8e33eff8Schristos 751*8e33eff8Schristos if (config_stats) { 752*8e33eff8Schristos nmadvise = 0; 753*8e33eff8Schristos nunmapped = 0; 754*8e33eff8Schristos } 755*8e33eff8Schristos npurged = 0; 756*8e33eff8Schristos 757*8e33eff8Schristos ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); 758*8e33eff8Schristos for (extent_t *extent = extent_list_first(decay_extents); extent != 759*8e33eff8Schristos NULL; extent = extent_list_first(decay_extents)) { 760*8e33eff8Schristos if (config_stats) { 761*8e33eff8Schristos nmadvise++; 762*8e33eff8Schristos } 763*8e33eff8Schristos size_t npages = extent_size_get(extent) >> LG_PAGE; 764*8e33eff8Schristos npurged += npages; 765*8e33eff8Schristos extent_list_remove(decay_extents, extent); 766*8e33eff8Schristos switch (extents_state_get(extents)) { 767*8e33eff8Schristos case extent_state_active: 768*8e33eff8Schristos not_reached(); 769*8e33eff8Schristos case extent_state_dirty: 770*8e33eff8Schristos if (!all && muzzy_decay_ms != 0 && 771*8e33eff8Schristos !extent_purge_lazy_wrapper(tsdn, arena, 772*8e33eff8Schristos r_extent_hooks, extent, 0, 773*8e33eff8Schristos extent_size_get(extent))) { 774*8e33eff8Schristos extents_dalloc(tsdn, arena, r_extent_hooks, 775*8e33eff8Schristos &arena->extents_muzzy, extent); 776*8e33eff8Schristos arena_background_thread_inactivity_check(tsdn, 777*8e33eff8Schristos arena, is_background_thread); 778*8e33eff8Schristos break; 779*8e33eff8Schristos } 780*8e33eff8Schristos /* Fall through. */ 781*8e33eff8Schristos case extent_state_muzzy: 782*8e33eff8Schristos extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, 783*8e33eff8Schristos extent); 784*8e33eff8Schristos if (config_stats) { 785*8e33eff8Schristos nunmapped += npages; 786*8e33eff8Schristos } 787*8e33eff8Schristos break; 788*8e33eff8Schristos case extent_state_retained: 789*8e33eff8Schristos default: 790*8e33eff8Schristos not_reached(); 791*8e33eff8Schristos } 792*8e33eff8Schristos } 793*8e33eff8Schristos 794*8e33eff8Schristos if (config_stats) { 795*8e33eff8Schristos arena_stats_lock(tsdn, &arena->stats); 796*8e33eff8Schristos arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge, 797*8e33eff8Schristos 1); 798*8e33eff8Schristos arena_stats_add_u64(tsdn, &arena->stats, 799*8e33eff8Schristos &decay->stats->nmadvise, nmadvise); 800*8e33eff8Schristos arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged, 801*8e33eff8Schristos npurged); 802*8e33eff8Schristos arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped, 803*8e33eff8Schristos nunmapped << LG_PAGE); 804*8e33eff8Schristos arena_stats_unlock(tsdn, &arena->stats); 805*8e33eff8Schristos } 806*8e33eff8Schristos 807*8e33eff8Schristos return npurged; 808*8e33eff8Schristos } 809*8e33eff8Schristos 810*8e33eff8Schristos /* 811*8e33eff8Schristos * npages_limit: Decay at most npages_decay_max pages without violating the 812*8e33eff8Schristos * invariant: (extents_npages_get(extents) >= npages_limit). We need an upper 813*8e33eff8Schristos * bound on number of pages in order to prevent unbounded growth (namely in 814*8e33eff8Schristos * stashed), otherwise unbounded new pages could be added to extents during the 815*8e33eff8Schristos * current decay run, so that the purging thread never finishes. 816*8e33eff8Schristos */ 817*8e33eff8Schristos static void 818*8e33eff8Schristos arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 819*8e33eff8Schristos extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max, 820*8e33eff8Schristos bool is_background_thread) { 821*8e33eff8Schristos witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 822*8e33eff8Schristos WITNESS_RANK_CORE, 1); 823*8e33eff8Schristos malloc_mutex_assert_owner(tsdn, &decay->mtx); 824*8e33eff8Schristos 825*8e33eff8Schristos if (decay->purging) { 826*8e33eff8Schristos return; 827*8e33eff8Schristos } 828*8e33eff8Schristos decay->purging = true; 829*8e33eff8Schristos malloc_mutex_unlock(tsdn, &decay->mtx); 830*8e33eff8Schristos 831*8e33eff8Schristos extent_hooks_t *extent_hooks = extent_hooks_get(arena); 832*8e33eff8Schristos 833*8e33eff8Schristos extent_list_t decay_extents; 834*8e33eff8Schristos extent_list_init(&decay_extents); 835*8e33eff8Schristos 836*8e33eff8Schristos size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents, 837*8e33eff8Schristos npages_limit, npages_decay_max, &decay_extents); 838*8e33eff8Schristos if (npurge != 0) { 839*8e33eff8Schristos UNUSED size_t npurged = arena_decay_stashed(tsdn, arena, 840*8e33eff8Schristos &extent_hooks, decay, extents, all, &decay_extents, 841*8e33eff8Schristos is_background_thread); 842*8e33eff8Schristos assert(npurged == npurge); 843*8e33eff8Schristos } 844*8e33eff8Schristos 845*8e33eff8Schristos malloc_mutex_lock(tsdn, &decay->mtx); 846*8e33eff8Schristos decay->purging = false; 847*8e33eff8Schristos } 848*8e33eff8Schristos 849*8e33eff8Schristos static bool 850*8e33eff8Schristos arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 851*8e33eff8Schristos extents_t *extents, bool is_background_thread, bool all) { 852*8e33eff8Schristos if (all) { 853*8e33eff8Schristos malloc_mutex_lock(tsdn, &decay->mtx); 854*8e33eff8Schristos arena_decay_to_limit(tsdn, arena, decay, extents, all, 0, 855*8e33eff8Schristos extents_npages_get(extents), is_background_thread); 856*8e33eff8Schristos malloc_mutex_unlock(tsdn, &decay->mtx); 857*8e33eff8Schristos 858*8e33eff8Schristos return false; 859*8e33eff8Schristos } 860*8e33eff8Schristos 861*8e33eff8Schristos if (malloc_mutex_trylock(tsdn, &decay->mtx)) { 862*8e33eff8Schristos /* No need to wait if another thread is in progress. */ 863*8e33eff8Schristos return true; 864*8e33eff8Schristos } 865*8e33eff8Schristos 866*8e33eff8Schristos bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents, 867*8e33eff8Schristos is_background_thread); 868*8e33eff8Schristos UNUSED size_t npages_new; 869*8e33eff8Schristos if (epoch_advanced) { 870*8e33eff8Schristos /* Backlog is updated on epoch advance. */ 871*8e33eff8Schristos npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1]; 872*8e33eff8Schristos } else 873*8e33eff8Schristos npages_new = 0; // XXX: gcc without -O 874*8e33eff8Schristos malloc_mutex_unlock(tsdn, &decay->mtx); 875*8e33eff8Schristos 876*8e33eff8Schristos if (have_background_thread && background_thread_enabled() && 877*8e33eff8Schristos epoch_advanced && !is_background_thread) { 878*8e33eff8Schristos background_thread_interval_check(tsdn, arena, decay, 879*8e33eff8Schristos npages_new); 880*8e33eff8Schristos } 881*8e33eff8Schristos 882*8e33eff8Schristos return false; 883*8e33eff8Schristos } 884*8e33eff8Schristos 885*8e33eff8Schristos static bool 886*8e33eff8Schristos arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, 887*8e33eff8Schristos bool all) { 888*8e33eff8Schristos return arena_decay_impl(tsdn, arena, &arena->decay_dirty, 889*8e33eff8Schristos &arena->extents_dirty, is_background_thread, all); 890*8e33eff8Schristos } 891*8e33eff8Schristos 892*8e33eff8Schristos static bool 893*8e33eff8Schristos arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, 894*8e33eff8Schristos bool all) { 895*8e33eff8Schristos return arena_decay_impl(tsdn, arena, &arena->decay_muzzy, 896*8e33eff8Schristos &arena->extents_muzzy, is_background_thread, all); 897*8e33eff8Schristos } 898*8e33eff8Schristos 899*8e33eff8Schristos void 900*8e33eff8Schristos arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { 901*8e33eff8Schristos if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) { 902*8e33eff8Schristos return; 903*8e33eff8Schristos } 904*8e33eff8Schristos arena_decay_muzzy(tsdn, arena, is_background_thread, all); 905*8e33eff8Schristos } 906*8e33eff8Schristos 907*8e33eff8Schristos static void 908*8e33eff8Schristos arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) { 909*8e33eff8Schristos arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE); 910*8e33eff8Schristos 911*8e33eff8Schristos extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 912*8e33eff8Schristos arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab); 913*8e33eff8Schristos } 914*8e33eff8Schristos 915*8e33eff8Schristos static void 916*8e33eff8Schristos arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) { 917*8e33eff8Schristos assert(extent_nfree_get(slab) > 0); 918*8e33eff8Schristos extent_heap_insert(&bin->slabs_nonfull, slab); 919*8e33eff8Schristos } 920*8e33eff8Schristos 921*8e33eff8Schristos static void 922*8e33eff8Schristos arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) { 923*8e33eff8Schristos extent_heap_remove(&bin->slabs_nonfull, slab); 924*8e33eff8Schristos } 925*8e33eff8Schristos 926*8e33eff8Schristos static extent_t * 927*8e33eff8Schristos arena_bin_slabs_nonfull_tryget(bin_t *bin) { 928*8e33eff8Schristos extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); 929*8e33eff8Schristos if (slab == NULL) { 930*8e33eff8Schristos return NULL; 931*8e33eff8Schristos } 932*8e33eff8Schristos if (config_stats) { 933*8e33eff8Schristos bin->stats.reslabs++; 934*8e33eff8Schristos } 935*8e33eff8Schristos return slab; 936*8e33eff8Schristos } 937*8e33eff8Schristos 938*8e33eff8Schristos static void 939*8e33eff8Schristos arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) { 940*8e33eff8Schristos assert(extent_nfree_get(slab) == 0); 941*8e33eff8Schristos /* 942*8e33eff8Schristos * Tracking extents is required by arena_reset, which is not allowed 943*8e33eff8Schristos * for auto arenas. Bypass this step to avoid touching the extent 944*8e33eff8Schristos * linkage (often results in cache misses) for auto arenas. 945*8e33eff8Schristos */ 946*8e33eff8Schristos if (arena_is_auto(arena)) { 947*8e33eff8Schristos return; 948*8e33eff8Schristos } 949*8e33eff8Schristos extent_list_append(&bin->slabs_full, slab); 950*8e33eff8Schristos } 951*8e33eff8Schristos 952*8e33eff8Schristos static void 953*8e33eff8Schristos arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) { 954*8e33eff8Schristos if (arena_is_auto(arena)) { 955*8e33eff8Schristos return; 956*8e33eff8Schristos } 957*8e33eff8Schristos extent_list_remove(&bin->slabs_full, slab); 958*8e33eff8Schristos } 959*8e33eff8Schristos 960*8e33eff8Schristos void 961*8e33eff8Schristos arena_reset(tsd_t *tsd, arena_t *arena) { 962*8e33eff8Schristos /* 963*8e33eff8Schristos * Locking in this function is unintuitive. The caller guarantees that 964*8e33eff8Schristos * no concurrent operations are happening in this arena, but there are 965*8e33eff8Schristos * still reasons that some locking is necessary: 966*8e33eff8Schristos * 967*8e33eff8Schristos * - Some of the functions in the transitive closure of calls assume 968*8e33eff8Schristos * appropriate locks are held, and in some cases these locks are 969*8e33eff8Schristos * temporarily dropped to avoid lock order reversal or deadlock due to 970*8e33eff8Schristos * reentry. 971*8e33eff8Schristos * - mallctl("epoch", ...) may concurrently refresh stats. While 972*8e33eff8Schristos * strictly speaking this is a "concurrent operation", disallowing 973*8e33eff8Schristos * stats refreshes would impose an inconvenient burden. 974*8e33eff8Schristos */ 975*8e33eff8Schristos 976*8e33eff8Schristos /* Large allocations. */ 977*8e33eff8Schristos malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); 978*8e33eff8Schristos 979*8e33eff8Schristos for (extent_t *extent = extent_list_first(&arena->large); extent != 980*8e33eff8Schristos NULL; extent = extent_list_first(&arena->large)) { 981*8e33eff8Schristos void *ptr = extent_base_get(extent); 982*8e33eff8Schristos size_t usize; 983*8e33eff8Schristos 984*8e33eff8Schristos malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); 985*8e33eff8Schristos alloc_ctx_t alloc_ctx; 986*8e33eff8Schristos rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 987*8e33eff8Schristos rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 988*8e33eff8Schristos (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 989*8e33eff8Schristos assert(alloc_ctx.szind != NSIZES); 990*8e33eff8Schristos 991*8e33eff8Schristos if (config_stats || (config_prof && opt_prof)) { 992*8e33eff8Schristos usize = sz_index2size(alloc_ctx.szind); 993*8e33eff8Schristos assert(usize == isalloc(tsd_tsdn(tsd), ptr)); 994*8e33eff8Schristos } 995*8e33eff8Schristos /* Remove large allocation from prof sample set. */ 996*8e33eff8Schristos if (config_prof && opt_prof) { 997*8e33eff8Schristos prof_free(tsd, ptr, usize, &alloc_ctx); 998*8e33eff8Schristos } 999*8e33eff8Schristos large_dalloc(tsd_tsdn(tsd), extent); 1000*8e33eff8Schristos malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); 1001*8e33eff8Schristos } 1002*8e33eff8Schristos malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); 1003*8e33eff8Schristos 1004*8e33eff8Schristos /* Bins. */ 1005*8e33eff8Schristos for (unsigned i = 0; i < NBINS; i++) { 1006*8e33eff8Schristos extent_t *slab; 1007*8e33eff8Schristos bin_t *bin = &arena->bins[i]; 1008*8e33eff8Schristos malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1009*8e33eff8Schristos if (bin->slabcur != NULL) { 1010*8e33eff8Schristos slab = bin->slabcur; 1011*8e33eff8Schristos bin->slabcur = NULL; 1012*8e33eff8Schristos malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1013*8e33eff8Schristos arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); 1014*8e33eff8Schristos malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1015*8e33eff8Schristos } 1016*8e33eff8Schristos while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != 1017*8e33eff8Schristos NULL) { 1018*8e33eff8Schristos malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1019*8e33eff8Schristos arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); 1020*8e33eff8Schristos malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1021*8e33eff8Schristos } 1022*8e33eff8Schristos for (slab = extent_list_first(&bin->slabs_full); slab != NULL; 1023*8e33eff8Schristos slab = extent_list_first(&bin->slabs_full)) { 1024*8e33eff8Schristos arena_bin_slabs_full_remove(arena, bin, slab); 1025*8e33eff8Schristos malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1026*8e33eff8Schristos arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); 1027*8e33eff8Schristos malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1028*8e33eff8Schristos } 1029*8e33eff8Schristos if (config_stats) { 1030*8e33eff8Schristos bin->stats.curregs = 0; 1031*8e33eff8Schristos bin->stats.curslabs = 0; 1032*8e33eff8Schristos } 1033*8e33eff8Schristos malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1034*8e33eff8Schristos } 1035*8e33eff8Schristos 1036*8e33eff8Schristos atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); 1037*8e33eff8Schristos } 1038*8e33eff8Schristos 1039*8e33eff8Schristos static void 1040*8e33eff8Schristos arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { 1041*8e33eff8Schristos /* 1042*8e33eff8Schristos * Iterate over the retained extents and destroy them. This gives the 1043*8e33eff8Schristos * extent allocator underlying the extent hooks an opportunity to unmap 1044*8e33eff8Schristos * all retained memory without having to keep its own metadata 1045*8e33eff8Schristos * structures. In practice, virtual memory for dss-allocated extents is 1046*8e33eff8Schristos * leaked here, so best practice is to avoid dss for arenas to be 1047*8e33eff8Schristos * destroyed, or provide custom extent hooks that track retained 1048*8e33eff8Schristos * dss-based extents for later reuse. 1049*8e33eff8Schristos */ 1050*8e33eff8Schristos extent_hooks_t *extent_hooks = extent_hooks_get(arena); 1051*8e33eff8Schristos extent_t *extent; 1052*8e33eff8Schristos while ((extent = extents_evict(tsdn, arena, &extent_hooks, 1053*8e33eff8Schristos &arena->extents_retained, 0)) != NULL) { 1054*8e33eff8Schristos extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent); 1055*8e33eff8Schristos } 1056*8e33eff8Schristos } 1057*8e33eff8Schristos 1058*8e33eff8Schristos void 1059*8e33eff8Schristos arena_destroy(tsd_t *tsd, arena_t *arena) { 1060*8e33eff8Schristos assert(base_ind_get(arena->base) >= narenas_auto); 1061*8e33eff8Schristos assert(arena_nthreads_get(arena, false) == 0); 1062*8e33eff8Schristos assert(arena_nthreads_get(arena, true) == 0); 1063*8e33eff8Schristos 1064*8e33eff8Schristos /* 1065*8e33eff8Schristos * No allocations have occurred since arena_reset() was called. 1066*8e33eff8Schristos * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached 1067*8e33eff8Schristos * extents, so only retained extents may remain. 1068*8e33eff8Schristos */ 1069*8e33eff8Schristos assert(extents_npages_get(&arena->extents_dirty) == 0); 1070*8e33eff8Schristos assert(extents_npages_get(&arena->extents_muzzy) == 0); 1071*8e33eff8Schristos 1072*8e33eff8Schristos /* Deallocate retained memory. */ 1073*8e33eff8Schristos arena_destroy_retained(tsd_tsdn(tsd), arena); 1074*8e33eff8Schristos 1075*8e33eff8Schristos /* 1076*8e33eff8Schristos * Remove the arena pointer from the arenas array. We rely on the fact 1077*8e33eff8Schristos * that there is no way for the application to get a dirty read from the 1078*8e33eff8Schristos * arenas array unless there is an inherent race in the application 1079*8e33eff8Schristos * involving access of an arena being concurrently destroyed. The 1080*8e33eff8Schristos * application must synchronize knowledge of the arena's validity, so as 1081*8e33eff8Schristos * long as we use an atomic write to update the arenas array, the 1082*8e33eff8Schristos * application will get a clean read any time after it synchronizes 1083*8e33eff8Schristos * knowledge that the arena is no longer valid. 1084*8e33eff8Schristos */ 1085*8e33eff8Schristos arena_set(base_ind_get(arena->base), NULL); 1086*8e33eff8Schristos 1087*8e33eff8Schristos /* 1088*8e33eff8Schristos * Destroy the base allocator, which manages all metadata ever mapped by 1089*8e33eff8Schristos * this arena. 1090*8e33eff8Schristos */ 1091*8e33eff8Schristos base_delete(tsd_tsdn(tsd), arena->base); 1092*8e33eff8Schristos } 1093*8e33eff8Schristos 1094*8e33eff8Schristos static extent_t * 1095*8e33eff8Schristos arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, 1096*8e33eff8Schristos extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info, 1097*8e33eff8Schristos szind_t szind) { 1098*8e33eff8Schristos extent_t *slab; 1099*8e33eff8Schristos bool zero, commit; 1100*8e33eff8Schristos 1101*8e33eff8Schristos witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1102*8e33eff8Schristos WITNESS_RANK_CORE, 0); 1103*8e33eff8Schristos 1104*8e33eff8Schristos zero = false; 1105*8e33eff8Schristos commit = true; 1106*8e33eff8Schristos slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL, 1107*8e33eff8Schristos bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit); 1108*8e33eff8Schristos 1109*8e33eff8Schristos if (config_stats && slab != NULL) { 1110*8e33eff8Schristos arena_stats_mapped_add(tsdn, &arena->stats, 1111*8e33eff8Schristos bin_info->slab_size); 1112*8e33eff8Schristos } 1113*8e33eff8Schristos 1114*8e33eff8Schristos return slab; 1115*8e33eff8Schristos } 1116*8e33eff8Schristos 1117*8e33eff8Schristos static extent_t * 1118*8e33eff8Schristos arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, 1119*8e33eff8Schristos const bin_info_t *bin_info) { 1120*8e33eff8Schristos witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1121*8e33eff8Schristos WITNESS_RANK_CORE, 0); 1122*8e33eff8Schristos 1123*8e33eff8Schristos extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 1124*8e33eff8Schristos szind_t szind = sz_size2index(bin_info->reg_size); 1125*8e33eff8Schristos bool zero = false; 1126*8e33eff8Schristos bool commit = true; 1127*8e33eff8Schristos extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks, 1128*8e33eff8Schristos &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true, 1129*8e33eff8Schristos binind, &zero, &commit); 1130*8e33eff8Schristos if (slab == NULL) { 1131*8e33eff8Schristos slab = extents_alloc(tsdn, arena, &extent_hooks, 1132*8e33eff8Schristos &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE, 1133*8e33eff8Schristos true, binind, &zero, &commit); 1134*8e33eff8Schristos } 1135*8e33eff8Schristos if (slab == NULL) { 1136*8e33eff8Schristos slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks, 1137*8e33eff8Schristos bin_info, szind); 1138*8e33eff8Schristos if (slab == NULL) { 1139*8e33eff8Schristos return NULL; 1140*8e33eff8Schristos } 1141*8e33eff8Schristos } 1142*8e33eff8Schristos assert(extent_slab_get(slab)); 1143*8e33eff8Schristos 1144*8e33eff8Schristos /* Initialize slab internals. */ 1145*8e33eff8Schristos arena_slab_data_t *slab_data = extent_slab_data_get(slab); 1146*8e33eff8Schristos extent_nfree_set(slab, bin_info->nregs); 1147*8e33eff8Schristos bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false); 1148*8e33eff8Schristos 1149*8e33eff8Schristos arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE); 1150*8e33eff8Schristos 1151*8e33eff8Schristos return slab; 1152*8e33eff8Schristos } 1153*8e33eff8Schristos 1154*8e33eff8Schristos static extent_t * 1155*8e33eff8Schristos arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin, 1156*8e33eff8Schristos szind_t binind) { 1157*8e33eff8Schristos extent_t *slab; 1158*8e33eff8Schristos const bin_info_t *bin_info; 1159*8e33eff8Schristos 1160*8e33eff8Schristos /* Look for a usable slab. */ 1161*8e33eff8Schristos slab = arena_bin_slabs_nonfull_tryget(bin); 1162*8e33eff8Schristos if (slab != NULL) { 1163*8e33eff8Schristos return slab; 1164*8e33eff8Schristos } 1165*8e33eff8Schristos /* No existing slabs have any space available. */ 1166*8e33eff8Schristos 1167*8e33eff8Schristos bin_info = &bin_infos[binind]; 1168*8e33eff8Schristos 1169*8e33eff8Schristos /* Allocate a new slab. */ 1170*8e33eff8Schristos malloc_mutex_unlock(tsdn, &bin->lock); 1171*8e33eff8Schristos /******************************/ 1172*8e33eff8Schristos slab = arena_slab_alloc(tsdn, arena, binind, bin_info); 1173*8e33eff8Schristos /********************************/ 1174*8e33eff8Schristos malloc_mutex_lock(tsdn, &bin->lock); 1175*8e33eff8Schristos if (slab != NULL) { 1176*8e33eff8Schristos if (config_stats) { 1177*8e33eff8Schristos bin->stats.nslabs++; 1178*8e33eff8Schristos bin->stats.curslabs++; 1179*8e33eff8Schristos } 1180*8e33eff8Schristos return slab; 1181*8e33eff8Schristos } 1182*8e33eff8Schristos 1183*8e33eff8Schristos /* 1184*8e33eff8Schristos * arena_slab_alloc() failed, but another thread may have made 1185*8e33eff8Schristos * sufficient memory available while this one dropped bin->lock above, 1186*8e33eff8Schristos * so search one more time. 1187*8e33eff8Schristos */ 1188*8e33eff8Schristos slab = arena_bin_slabs_nonfull_tryget(bin); 1189*8e33eff8Schristos if (slab != NULL) { 1190*8e33eff8Schristos return slab; 1191*8e33eff8Schristos } 1192*8e33eff8Schristos 1193*8e33eff8Schristos return NULL; 1194*8e33eff8Schristos } 1195*8e33eff8Schristos 1196*8e33eff8Schristos /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ 1197*8e33eff8Schristos static void * 1198*8e33eff8Schristos arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin, 1199*8e33eff8Schristos szind_t binind) { 1200*8e33eff8Schristos const bin_info_t *bin_info; 1201*8e33eff8Schristos extent_t *slab; 1202*8e33eff8Schristos 1203*8e33eff8Schristos bin_info = &bin_infos[binind]; 1204*8e33eff8Schristos if (!arena_is_auto(arena) && bin->slabcur != NULL) { 1205*8e33eff8Schristos arena_bin_slabs_full_insert(arena, bin, bin->slabcur); 1206*8e33eff8Schristos bin->slabcur = NULL; 1207*8e33eff8Schristos } 1208*8e33eff8Schristos slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind); 1209*8e33eff8Schristos if (bin->slabcur != NULL) { 1210*8e33eff8Schristos /* 1211*8e33eff8Schristos * Another thread updated slabcur while this one ran without the 1212*8e33eff8Schristos * bin lock in arena_bin_nonfull_slab_get(). 1213*8e33eff8Schristos */ 1214*8e33eff8Schristos if (extent_nfree_get(bin->slabcur) > 0) { 1215*8e33eff8Schristos void *ret = arena_slab_reg_alloc(bin->slabcur, 1216*8e33eff8Schristos bin_info); 1217*8e33eff8Schristos if (slab != NULL) { 1218*8e33eff8Schristos /* 1219*8e33eff8Schristos * arena_slab_alloc() may have allocated slab, 1220*8e33eff8Schristos * or it may have been pulled from 1221*8e33eff8Schristos * slabs_nonfull. Therefore it is unsafe to 1222*8e33eff8Schristos * make any assumptions about how slab has 1223*8e33eff8Schristos * previously been used, and 1224*8e33eff8Schristos * arena_bin_lower_slab() must be called, as if 1225*8e33eff8Schristos * a region were just deallocated from the slab. 1226*8e33eff8Schristos */ 1227*8e33eff8Schristos if (extent_nfree_get(slab) == bin_info->nregs) { 1228*8e33eff8Schristos arena_dalloc_bin_slab(tsdn, arena, slab, 1229*8e33eff8Schristos bin); 1230*8e33eff8Schristos } else { 1231*8e33eff8Schristos arena_bin_lower_slab(tsdn, arena, slab, 1232*8e33eff8Schristos bin); 1233*8e33eff8Schristos } 1234*8e33eff8Schristos } 1235*8e33eff8Schristos return ret; 1236*8e33eff8Schristos } 1237*8e33eff8Schristos 1238*8e33eff8Schristos arena_bin_slabs_full_insert(arena, bin, bin->slabcur); 1239*8e33eff8Schristos bin->slabcur = NULL; 1240*8e33eff8Schristos } 1241*8e33eff8Schristos 1242*8e33eff8Schristos if (slab == NULL) { 1243*8e33eff8Schristos return NULL; 1244*8e33eff8Schristos } 1245*8e33eff8Schristos bin->slabcur = slab; 1246*8e33eff8Schristos 1247*8e33eff8Schristos assert(extent_nfree_get(bin->slabcur) > 0); 1248*8e33eff8Schristos 1249*8e33eff8Schristos return arena_slab_reg_alloc(slab, bin_info); 1250*8e33eff8Schristos } 1251*8e33eff8Schristos 1252*8e33eff8Schristos void 1253*8e33eff8Schristos arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, 1254*8e33eff8Schristos cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { 1255*8e33eff8Schristos unsigned i, nfill; 1256*8e33eff8Schristos bin_t *bin; 1257*8e33eff8Schristos 1258*8e33eff8Schristos assert(tbin->ncached == 0); 1259*8e33eff8Schristos 1260*8e33eff8Schristos if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) { 1261*8e33eff8Schristos prof_idump(tsdn); 1262*8e33eff8Schristos } 1263*8e33eff8Schristos bin = &arena->bins[binind]; 1264*8e33eff8Schristos malloc_mutex_lock(tsdn, &bin->lock); 1265*8e33eff8Schristos for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> 1266*8e33eff8Schristos tcache->lg_fill_div[binind]); i < nfill; i++) { 1267*8e33eff8Schristos extent_t *slab; 1268*8e33eff8Schristos void *ptr; 1269*8e33eff8Schristos if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 1270*8e33eff8Schristos 0) { 1271*8e33eff8Schristos ptr = arena_slab_reg_alloc(slab, &bin_infos[binind]); 1272*8e33eff8Schristos } else { 1273*8e33eff8Schristos ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind); 1274*8e33eff8Schristos } 1275*8e33eff8Schristos if (ptr == NULL) { 1276*8e33eff8Schristos /* 1277*8e33eff8Schristos * OOM. tbin->avail isn't yet filled down to its first 1278*8e33eff8Schristos * element, so the successful allocations (if any) must 1279*8e33eff8Schristos * be moved just before tbin->avail before bailing out. 1280*8e33eff8Schristos */ 1281*8e33eff8Schristos if (i > 0) { 1282*8e33eff8Schristos memmove(tbin->avail - i, tbin->avail - nfill, 1283*8e33eff8Schristos i * sizeof(void *)); 1284*8e33eff8Schristos } 1285*8e33eff8Schristos break; 1286*8e33eff8Schristos } 1287*8e33eff8Schristos if (config_fill && unlikely(opt_junk_alloc)) { 1288*8e33eff8Schristos arena_alloc_junk_small(ptr, &bin_infos[binind], true); 1289*8e33eff8Schristos } 1290*8e33eff8Schristos /* Insert such that low regions get used first. */ 1291*8e33eff8Schristos *(tbin->avail - nfill + i) = ptr; 1292*8e33eff8Schristos } 1293*8e33eff8Schristos if (config_stats) { 1294*8e33eff8Schristos bin->stats.nmalloc += i; 1295*8e33eff8Schristos bin->stats.nrequests += tbin->tstats.nrequests; 1296*8e33eff8Schristos bin->stats.curregs += i; 1297*8e33eff8Schristos bin->stats.nfills++; 1298*8e33eff8Schristos tbin->tstats.nrequests = 0; 1299*8e33eff8Schristos } 1300*8e33eff8Schristos malloc_mutex_unlock(tsdn, &bin->lock); 1301*8e33eff8Schristos tbin->ncached = i; 1302*8e33eff8Schristos arena_decay_tick(tsdn, arena); 1303*8e33eff8Schristos } 1304*8e33eff8Schristos 1305*8e33eff8Schristos void 1306*8e33eff8Schristos arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) { 1307*8e33eff8Schristos if (!zero) { 1308*8e33eff8Schristos memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size); 1309*8e33eff8Schristos } 1310*8e33eff8Schristos } 1311*8e33eff8Schristos 1312*8e33eff8Schristos static void 1313*8e33eff8Schristos arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) { 1314*8e33eff8Schristos memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size); 1315*8e33eff8Schristos } 1316*8e33eff8Schristos arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small = 1317*8e33eff8Schristos arena_dalloc_junk_small_impl; 1318*8e33eff8Schristos 1319*8e33eff8Schristos static void * 1320*8e33eff8Schristos arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { 1321*8e33eff8Schristos void *ret; 1322*8e33eff8Schristos bin_t *bin; 1323*8e33eff8Schristos size_t usize; 1324*8e33eff8Schristos extent_t *slab; 1325*8e33eff8Schristos 1326*8e33eff8Schristos assert(binind < NBINS); 1327*8e33eff8Schristos bin = &arena->bins[binind]; 1328*8e33eff8Schristos usize = sz_index2size(binind); 1329*8e33eff8Schristos 1330*8e33eff8Schristos malloc_mutex_lock(tsdn, &bin->lock); 1331*8e33eff8Schristos if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { 1332*8e33eff8Schristos ret = arena_slab_reg_alloc(slab, &bin_infos[binind]); 1333*8e33eff8Schristos } else { 1334*8e33eff8Schristos ret = arena_bin_malloc_hard(tsdn, arena, bin, binind); 1335*8e33eff8Schristos } 1336*8e33eff8Schristos 1337*8e33eff8Schristos if (ret == NULL) { 1338*8e33eff8Schristos malloc_mutex_unlock(tsdn, &bin->lock); 1339*8e33eff8Schristos return NULL; 1340*8e33eff8Schristos } 1341*8e33eff8Schristos 1342*8e33eff8Schristos if (config_stats) { 1343*8e33eff8Schristos bin->stats.nmalloc++; 1344*8e33eff8Schristos bin->stats.nrequests++; 1345*8e33eff8Schristos bin->stats.curregs++; 1346*8e33eff8Schristos } 1347*8e33eff8Schristos malloc_mutex_unlock(tsdn, &bin->lock); 1348*8e33eff8Schristos if (config_prof && arena_prof_accum(tsdn, arena, usize)) { 1349*8e33eff8Schristos prof_idump(tsdn); 1350*8e33eff8Schristos } 1351*8e33eff8Schristos 1352*8e33eff8Schristos if (!zero) { 1353*8e33eff8Schristos if (config_fill) { 1354*8e33eff8Schristos if (unlikely(opt_junk_alloc)) { 1355*8e33eff8Schristos arena_alloc_junk_small(ret, 1356*8e33eff8Schristos &bin_infos[binind], false); 1357*8e33eff8Schristos } else if (unlikely(opt_zero)) { 1358*8e33eff8Schristos memset(ret, 0, usize); 1359*8e33eff8Schristos } 1360*8e33eff8Schristos } 1361*8e33eff8Schristos } else { 1362*8e33eff8Schristos if (config_fill && unlikely(opt_junk_alloc)) { 1363*8e33eff8Schristos arena_alloc_junk_small(ret, &bin_infos[binind], 1364*8e33eff8Schristos true); 1365*8e33eff8Schristos } 1366*8e33eff8Schristos memset(ret, 0, usize); 1367*8e33eff8Schristos } 1368*8e33eff8Schristos 1369*8e33eff8Schristos arena_decay_tick(tsdn, arena); 1370*8e33eff8Schristos return ret; 1371*8e33eff8Schristos } 1372*8e33eff8Schristos 1373*8e33eff8Schristos void * 1374*8e33eff8Schristos arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, 1375*8e33eff8Schristos bool zero) { 1376*8e33eff8Schristos assert(!tsdn_null(tsdn) || arena != NULL); 1377*8e33eff8Schristos 1378*8e33eff8Schristos if (likely(!tsdn_null(tsdn))) { 1379*8e33eff8Schristos arena = arena_choose(tsdn_tsd(tsdn), arena); 1380*8e33eff8Schristos } 1381*8e33eff8Schristos if (unlikely(arena == NULL)) { 1382*8e33eff8Schristos return NULL; 1383*8e33eff8Schristos } 1384*8e33eff8Schristos 1385*8e33eff8Schristos if (likely(size <= SMALL_MAXCLASS)) { 1386*8e33eff8Schristos return arena_malloc_small(tsdn, arena, ind, zero); 1387*8e33eff8Schristos } 1388*8e33eff8Schristos return large_malloc(tsdn, arena, sz_index2size(ind), zero); 1389*8e33eff8Schristos } 1390*8e33eff8Schristos 1391*8e33eff8Schristos void * 1392*8e33eff8Schristos arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, 1393*8e33eff8Schristos bool zero, tcache_t *tcache) { 1394*8e33eff8Schristos void *ret; 1395*8e33eff8Schristos 1396*8e33eff8Schristos if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE 1397*8e33eff8Schristos && (usize & PAGE_MASK) == 0))) { 1398*8e33eff8Schristos /* Small; alignment doesn't require special slab placement. */ 1399*8e33eff8Schristos ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize), 1400*8e33eff8Schristos zero, tcache, true); 1401*8e33eff8Schristos } else { 1402*8e33eff8Schristos if (likely(alignment <= CACHELINE)) { 1403*8e33eff8Schristos ret = large_malloc(tsdn, arena, usize, zero); 1404*8e33eff8Schristos } else { 1405*8e33eff8Schristos ret = large_palloc(tsdn, arena, usize, alignment, zero); 1406*8e33eff8Schristos } 1407*8e33eff8Schristos } 1408*8e33eff8Schristos return ret; 1409*8e33eff8Schristos } 1410*8e33eff8Schristos 1411*8e33eff8Schristos void 1412*8e33eff8Schristos arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) { 1413*8e33eff8Schristos cassert(config_prof); 1414*8e33eff8Schristos assert(ptr != NULL); 1415*8e33eff8Schristos assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); 1416*8e33eff8Schristos assert(usize <= SMALL_MAXCLASS); 1417*8e33eff8Schristos 1418*8e33eff8Schristos rtree_ctx_t rtree_ctx_fallback; 1419*8e33eff8Schristos rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1420*8e33eff8Schristos 1421*8e33eff8Schristos extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, 1422*8e33eff8Schristos (uintptr_t)ptr, true); 1423*8e33eff8Schristos arena_t *arena = extent_arena_get(extent); 1424*8e33eff8Schristos 1425*8e33eff8Schristos szind_t szind = sz_size2index(usize); 1426*8e33eff8Schristos extent_szind_set(extent, szind); 1427*8e33eff8Schristos rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, 1428*8e33eff8Schristos szind, false); 1429*8e33eff8Schristos 1430*8e33eff8Schristos prof_accum_cancel(tsdn, &arena->prof_accum, usize); 1431*8e33eff8Schristos 1432*8e33eff8Schristos assert(isalloc(tsdn, ptr) == usize); 1433*8e33eff8Schristos } 1434*8e33eff8Schristos 1435*8e33eff8Schristos static size_t 1436*8e33eff8Schristos arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { 1437*8e33eff8Schristos cassert(config_prof); 1438*8e33eff8Schristos assert(ptr != NULL); 1439*8e33eff8Schristos 1440*8e33eff8Schristos extent_szind_set(extent, NBINS); 1441*8e33eff8Schristos rtree_ctx_t rtree_ctx_fallback; 1442*8e33eff8Schristos rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1443*8e33eff8Schristos rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, 1444*8e33eff8Schristos NBINS, false); 1445*8e33eff8Schristos 1446*8e33eff8Schristos assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); 1447*8e33eff8Schristos 1448*8e33eff8Schristos return LARGE_MINCLASS; 1449*8e33eff8Schristos } 1450*8e33eff8Schristos 1451*8e33eff8Schristos void 1452*8e33eff8Schristos arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, 1453*8e33eff8Schristos bool slow_path) { 1454*8e33eff8Schristos cassert(config_prof); 1455*8e33eff8Schristos assert(opt_prof); 1456*8e33eff8Schristos 1457*8e33eff8Schristos extent_t *extent = iealloc(tsdn, ptr); 1458*8e33eff8Schristos size_t usize = arena_prof_demote(tsdn, extent, ptr); 1459*8e33eff8Schristos if (usize <= tcache_maxclass) { 1460*8e33eff8Schristos tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, 1461*8e33eff8Schristos sz_size2index(usize), slow_path); 1462*8e33eff8Schristos } else { 1463*8e33eff8Schristos large_dalloc(tsdn, extent); 1464*8e33eff8Schristos } 1465*8e33eff8Schristos } 1466*8e33eff8Schristos 1467*8e33eff8Schristos static void 1468*8e33eff8Schristos arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) { 1469*8e33eff8Schristos /* Dissociate slab from bin. */ 1470*8e33eff8Schristos if (slab == bin->slabcur) { 1471*8e33eff8Schristos bin->slabcur = NULL; 1472*8e33eff8Schristos } else { 1473*8e33eff8Schristos szind_t binind = extent_szind_get(slab); 1474*8e33eff8Schristos const bin_info_t *bin_info = &bin_infos[binind]; 1475*8e33eff8Schristos 1476*8e33eff8Schristos /* 1477*8e33eff8Schristos * The following block's conditional is necessary because if the 1478*8e33eff8Schristos * slab only contains one region, then it never gets inserted 1479*8e33eff8Schristos * into the non-full slabs heap. 1480*8e33eff8Schristos */ 1481*8e33eff8Schristos if (bin_info->nregs == 1) { 1482*8e33eff8Schristos arena_bin_slabs_full_remove(arena, bin, slab); 1483*8e33eff8Schristos } else { 1484*8e33eff8Schristos arena_bin_slabs_nonfull_remove(bin, slab); 1485*8e33eff8Schristos } 1486*8e33eff8Schristos } 1487*8e33eff8Schristos } 1488*8e33eff8Schristos 1489*8e33eff8Schristos static void 1490*8e33eff8Schristos arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 1491*8e33eff8Schristos bin_t *bin) { 1492*8e33eff8Schristos assert(slab != bin->slabcur); 1493*8e33eff8Schristos 1494*8e33eff8Schristos malloc_mutex_unlock(tsdn, &bin->lock); 1495*8e33eff8Schristos /******************************/ 1496*8e33eff8Schristos arena_slab_dalloc(tsdn, arena, slab); 1497*8e33eff8Schristos /****************************/ 1498*8e33eff8Schristos malloc_mutex_lock(tsdn, &bin->lock); 1499*8e33eff8Schristos if (config_stats) { 1500*8e33eff8Schristos bin->stats.curslabs--; 1501*8e33eff8Schristos } 1502*8e33eff8Schristos } 1503*8e33eff8Schristos 1504*8e33eff8Schristos static void 1505*8e33eff8Schristos arena_bin_lower_slab(UNUSED tsdn_t *tsdn, arena_t *arena, extent_t *slab, 1506*8e33eff8Schristos bin_t *bin) { 1507*8e33eff8Schristos assert(extent_nfree_get(slab) > 0); 1508*8e33eff8Schristos 1509*8e33eff8Schristos /* 1510*8e33eff8Schristos * Make sure that if bin->slabcur is non-NULL, it refers to the 1511*8e33eff8Schristos * oldest/lowest non-full slab. It is okay to NULL slabcur out rather 1512*8e33eff8Schristos * than proactively keeping it pointing at the oldest/lowest non-full 1513*8e33eff8Schristos * slab. 1514*8e33eff8Schristos */ 1515*8e33eff8Schristos if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) { 1516*8e33eff8Schristos /* Switch slabcur. */ 1517*8e33eff8Schristos if (extent_nfree_get(bin->slabcur) > 0) { 1518*8e33eff8Schristos arena_bin_slabs_nonfull_insert(bin, bin->slabcur); 1519*8e33eff8Schristos } else { 1520*8e33eff8Schristos arena_bin_slabs_full_insert(arena, bin, bin->slabcur); 1521*8e33eff8Schristos } 1522*8e33eff8Schristos bin->slabcur = slab; 1523*8e33eff8Schristos if (config_stats) { 1524*8e33eff8Schristos bin->stats.reslabs++; 1525*8e33eff8Schristos } 1526*8e33eff8Schristos } else { 1527*8e33eff8Schristos arena_bin_slabs_nonfull_insert(bin, slab); 1528*8e33eff8Schristos } 1529*8e33eff8Schristos } 1530*8e33eff8Schristos 1531*8e33eff8Schristos static void 1532*8e33eff8Schristos arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 1533*8e33eff8Schristos void *ptr, bool junked) { 1534*8e33eff8Schristos arena_slab_data_t *slab_data = extent_slab_data_get(slab); 1535*8e33eff8Schristos szind_t binind = extent_szind_get(slab); 1536*8e33eff8Schristos bin_t *bin = &arena->bins[binind]; 1537*8e33eff8Schristos const bin_info_t *bin_info = &bin_infos[binind]; 1538*8e33eff8Schristos 1539*8e33eff8Schristos if (!junked && config_fill && unlikely(opt_junk_free)) { 1540*8e33eff8Schristos arena_dalloc_junk_small(ptr, bin_info); 1541*8e33eff8Schristos } 1542*8e33eff8Schristos 1543*8e33eff8Schristos arena_slab_reg_dalloc(slab, slab_data, ptr); 1544*8e33eff8Schristos unsigned nfree = extent_nfree_get(slab); 1545*8e33eff8Schristos if (nfree == bin_info->nregs) { 1546*8e33eff8Schristos arena_dissociate_bin_slab(arena, slab, bin); 1547*8e33eff8Schristos arena_dalloc_bin_slab(tsdn, arena, slab, bin); 1548*8e33eff8Schristos } else if (nfree == 1 && slab != bin->slabcur) { 1549*8e33eff8Schristos arena_bin_slabs_full_remove(arena, bin, slab); 1550*8e33eff8Schristos arena_bin_lower_slab(tsdn, arena, slab, bin); 1551*8e33eff8Schristos } 1552*8e33eff8Schristos 1553*8e33eff8Schristos if (config_stats) { 1554*8e33eff8Schristos bin->stats.ndalloc++; 1555*8e33eff8Schristos bin->stats.curregs--; 1556*8e33eff8Schristos } 1557*8e33eff8Schristos } 1558*8e33eff8Schristos 1559*8e33eff8Schristos void 1560*8e33eff8Schristos arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent, 1561*8e33eff8Schristos void *ptr) { 1562*8e33eff8Schristos arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true); 1563*8e33eff8Schristos } 1564*8e33eff8Schristos 1565*8e33eff8Schristos static void 1566*8e33eff8Schristos arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { 1567*8e33eff8Schristos szind_t binind = extent_szind_get(extent); 1568*8e33eff8Schristos bin_t *bin = &arena->bins[binind]; 1569*8e33eff8Schristos 1570*8e33eff8Schristos malloc_mutex_lock(tsdn, &bin->lock); 1571*8e33eff8Schristos arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false); 1572*8e33eff8Schristos malloc_mutex_unlock(tsdn, &bin->lock); 1573*8e33eff8Schristos } 1574*8e33eff8Schristos 1575*8e33eff8Schristos void 1576*8e33eff8Schristos arena_dalloc_small(tsdn_t *tsdn, void *ptr) { 1577*8e33eff8Schristos extent_t *extent = iealloc(tsdn, ptr); 1578*8e33eff8Schristos arena_t *arena = extent_arena_get(extent); 1579*8e33eff8Schristos 1580*8e33eff8Schristos arena_dalloc_bin(tsdn, arena, extent, ptr); 1581*8e33eff8Schristos arena_decay_tick(tsdn, arena); 1582*8e33eff8Schristos } 1583*8e33eff8Schristos 1584*8e33eff8Schristos bool 1585*8e33eff8Schristos arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, 1586*8e33eff8Schristos size_t extra, bool zero) { 1587*8e33eff8Schristos /* Calls with non-zero extra had to clamp extra. */ 1588*8e33eff8Schristos assert(extra == 0 || size + extra <= LARGE_MAXCLASS); 1589*8e33eff8Schristos 1590*8e33eff8Schristos if (unlikely(size > LARGE_MAXCLASS)) { 1591*8e33eff8Schristos return true; 1592*8e33eff8Schristos } 1593*8e33eff8Schristos 1594*8e33eff8Schristos extent_t *extent = iealloc(tsdn, ptr); 1595*8e33eff8Schristos size_t usize_min = sz_s2u(size); 1596*8e33eff8Schristos size_t usize_max = sz_s2u(size + extra); 1597*8e33eff8Schristos if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) { 1598*8e33eff8Schristos /* 1599*8e33eff8Schristos * Avoid moving the allocation if the size class can be left the 1600*8e33eff8Schristos * same. 1601*8e33eff8Schristos */ 1602*8e33eff8Schristos assert(bin_infos[sz_size2index(oldsize)].reg_size == 1603*8e33eff8Schristos oldsize); 1604*8e33eff8Schristos if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) != 1605*8e33eff8Schristos sz_size2index(oldsize)) && (size > oldsize || usize_max < 1606*8e33eff8Schristos oldsize)) { 1607*8e33eff8Schristos return true; 1608*8e33eff8Schristos } 1609*8e33eff8Schristos 1610*8e33eff8Schristos arena_decay_tick(tsdn, extent_arena_get(extent)); 1611*8e33eff8Schristos return false; 1612*8e33eff8Schristos } else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) { 1613*8e33eff8Schristos return large_ralloc_no_move(tsdn, extent, usize_min, usize_max, 1614*8e33eff8Schristos zero); 1615*8e33eff8Schristos } 1616*8e33eff8Schristos 1617*8e33eff8Schristos return true; 1618*8e33eff8Schristos } 1619*8e33eff8Schristos 1620*8e33eff8Schristos static void * 1621*8e33eff8Schristos arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, 1622*8e33eff8Schristos size_t alignment, bool zero, tcache_t *tcache) { 1623*8e33eff8Schristos if (alignment == 0) { 1624*8e33eff8Schristos return arena_malloc(tsdn, arena, usize, sz_size2index(usize), 1625*8e33eff8Schristos zero, tcache, true); 1626*8e33eff8Schristos } 1627*8e33eff8Schristos usize = sz_sa2u(usize, alignment); 1628*8e33eff8Schristos if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { 1629*8e33eff8Schristos return NULL; 1630*8e33eff8Schristos } 1631*8e33eff8Schristos return ipalloct(tsdn, usize, alignment, zero, tcache, arena); 1632*8e33eff8Schristos } 1633*8e33eff8Schristos 1634*8e33eff8Schristos void * 1635*8e33eff8Schristos arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, 1636*8e33eff8Schristos size_t size, size_t alignment, bool zero, tcache_t *tcache) { 1637*8e33eff8Schristos size_t usize = sz_s2u(size); 1638*8e33eff8Schristos if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) { 1639*8e33eff8Schristos return NULL; 1640*8e33eff8Schristos } 1641*8e33eff8Schristos 1642*8e33eff8Schristos if (likely(usize <= SMALL_MAXCLASS)) { 1643*8e33eff8Schristos /* Try to avoid moving the allocation. */ 1644*8e33eff8Schristos if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) { 1645*8e33eff8Schristos return ptr; 1646*8e33eff8Schristos } 1647*8e33eff8Schristos } 1648*8e33eff8Schristos 1649*8e33eff8Schristos if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) { 1650*8e33eff8Schristos return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize, 1651*8e33eff8Schristos alignment, zero, tcache); 1652*8e33eff8Schristos } 1653*8e33eff8Schristos 1654*8e33eff8Schristos /* 1655*8e33eff8Schristos * size and oldsize are different enough that we need to move the 1656*8e33eff8Schristos * object. In that case, fall back to allocating new space and copying. 1657*8e33eff8Schristos */ 1658*8e33eff8Schristos void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, 1659*8e33eff8Schristos zero, tcache); 1660*8e33eff8Schristos if (ret == NULL) { 1661*8e33eff8Schristos return NULL; 1662*8e33eff8Schristos } 1663*8e33eff8Schristos 1664*8e33eff8Schristos /* 1665*8e33eff8Schristos * Junk/zero-filling were already done by 1666*8e33eff8Schristos * ipalloc()/arena_malloc(). 1667*8e33eff8Schristos */ 1668*8e33eff8Schristos 1669*8e33eff8Schristos size_t copysize = (usize < oldsize) ? usize : oldsize; 1670*8e33eff8Schristos memcpy(ret, ptr, copysize); 1671*8e33eff8Schristos isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); 1672*8e33eff8Schristos return ret; 1673*8e33eff8Schristos } 1674*8e33eff8Schristos 1675*8e33eff8Schristos dss_prec_t 1676*8e33eff8Schristos arena_dss_prec_get(arena_t *arena) { 1677*8e33eff8Schristos return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE); 1678*8e33eff8Schristos } 1679*8e33eff8Schristos 1680*8e33eff8Schristos bool 1681*8e33eff8Schristos arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) { 1682*8e33eff8Schristos if (!have_dss) { 1683*8e33eff8Schristos return (dss_prec != dss_prec_disabled); 1684*8e33eff8Schristos } 1685*8e33eff8Schristos atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE); 1686*8e33eff8Schristos return false; 1687*8e33eff8Schristos } 1688*8e33eff8Schristos 1689*8e33eff8Schristos ssize_t 1690*8e33eff8Schristos arena_dirty_decay_ms_default_get(void) { 1691*8e33eff8Schristos return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED); 1692*8e33eff8Schristos } 1693*8e33eff8Schristos 1694*8e33eff8Schristos bool 1695*8e33eff8Schristos arena_dirty_decay_ms_default_set(ssize_t decay_ms) { 1696*8e33eff8Schristos if (!arena_decay_ms_valid(decay_ms)) { 1697*8e33eff8Schristos return true; 1698*8e33eff8Schristos } 1699*8e33eff8Schristos atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED); 1700*8e33eff8Schristos return false; 1701*8e33eff8Schristos } 1702*8e33eff8Schristos 1703*8e33eff8Schristos ssize_t 1704*8e33eff8Schristos arena_muzzy_decay_ms_default_get(void) { 1705*8e33eff8Schristos return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED); 1706*8e33eff8Schristos } 1707*8e33eff8Schristos 1708*8e33eff8Schristos bool 1709*8e33eff8Schristos arena_muzzy_decay_ms_default_set(ssize_t decay_ms) { 1710*8e33eff8Schristos if (!arena_decay_ms_valid(decay_ms)) { 1711*8e33eff8Schristos return true; 1712*8e33eff8Schristos } 1713*8e33eff8Schristos atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED); 1714*8e33eff8Schristos return false; 1715*8e33eff8Schristos } 1716*8e33eff8Schristos 1717*8e33eff8Schristos bool 1718*8e33eff8Schristos arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit, 1719*8e33eff8Schristos size_t *new_limit) { 1720*8e33eff8Schristos assert(opt_retain); 1721*8e33eff8Schristos 1722*8e33eff8Schristos pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0); 1723*8e33eff8Schristos if (new_limit != NULL) { 1724*8e33eff8Schristos size_t limit = *new_limit; 1725*8e33eff8Schristos /* Grow no more than the new limit. */ 1726*8e33eff8Schristos if ((new_ind = sz_psz2ind(limit + 1) - 1) > 1727*8e33eff8Schristos EXTENT_GROW_MAX_PIND) { 1728*8e33eff8Schristos return true; 1729*8e33eff8Schristos } 1730*8e33eff8Schristos } 1731*8e33eff8Schristos 1732*8e33eff8Schristos malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx); 1733*8e33eff8Schristos if (old_limit != NULL) { 1734*8e33eff8Schristos *old_limit = sz_pind2sz(arena->retain_grow_limit); 1735*8e33eff8Schristos } 1736*8e33eff8Schristos if (new_limit != NULL) { 1737*8e33eff8Schristos arena->retain_grow_limit = new_ind; 1738*8e33eff8Schristos } 1739*8e33eff8Schristos malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx); 1740*8e33eff8Schristos 1741*8e33eff8Schristos return false; 1742*8e33eff8Schristos } 1743*8e33eff8Schristos 1744*8e33eff8Schristos unsigned 1745*8e33eff8Schristos arena_nthreads_get(arena_t *arena, bool internal) { 1746*8e33eff8Schristos return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED); 1747*8e33eff8Schristos } 1748*8e33eff8Schristos 1749*8e33eff8Schristos void 1750*8e33eff8Schristos arena_nthreads_inc(arena_t *arena, bool internal) { 1751*8e33eff8Schristos atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); 1752*8e33eff8Schristos } 1753*8e33eff8Schristos 1754*8e33eff8Schristos void 1755*8e33eff8Schristos arena_nthreads_dec(arena_t *arena, bool internal) { 1756*8e33eff8Schristos atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); 1757*8e33eff8Schristos } 1758*8e33eff8Schristos 1759*8e33eff8Schristos size_t 1760*8e33eff8Schristos arena_extent_sn_next(arena_t *arena) { 1761*8e33eff8Schristos return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED); 1762*8e33eff8Schristos } 1763*8e33eff8Schristos 1764*8e33eff8Schristos arena_t * 1765*8e33eff8Schristos arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { 1766*8e33eff8Schristos arena_t *arena; 1767*8e33eff8Schristos base_t *base; 1768*8e33eff8Schristos unsigned i; 1769*8e33eff8Schristos 1770*8e33eff8Schristos if (ind == 0) { 1771*8e33eff8Schristos base = b0get(); 1772*8e33eff8Schristos } else { 1773*8e33eff8Schristos base = base_new(tsdn, ind, extent_hooks); 1774*8e33eff8Schristos if (base == NULL) { 1775*8e33eff8Schristos return NULL; 1776*8e33eff8Schristos } 1777*8e33eff8Schristos } 1778*8e33eff8Schristos 1779*8e33eff8Schristos arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE); 1780*8e33eff8Schristos if (arena == NULL) { 1781*8e33eff8Schristos goto label_error; 1782*8e33eff8Schristos } 1783*8e33eff8Schristos 1784*8e33eff8Schristos atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); 1785*8e33eff8Schristos atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); 1786*8e33eff8Schristos arena->last_thd = NULL; 1787*8e33eff8Schristos 1788*8e33eff8Schristos if (config_stats) { 1789*8e33eff8Schristos if (arena_stats_init(tsdn, &arena->stats)) { 1790*8e33eff8Schristos goto label_error; 1791*8e33eff8Schristos } 1792*8e33eff8Schristos 1793*8e33eff8Schristos ql_new(&arena->tcache_ql); 1794*8e33eff8Schristos ql_new(&arena->cache_bin_array_descriptor_ql); 1795*8e33eff8Schristos if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql", 1796*8e33eff8Schristos WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) { 1797*8e33eff8Schristos goto label_error; 1798*8e33eff8Schristos } 1799*8e33eff8Schristos } 1800*8e33eff8Schristos 1801*8e33eff8Schristos if (config_prof) { 1802*8e33eff8Schristos if (prof_accum_init(tsdn, &arena->prof_accum)) { 1803*8e33eff8Schristos goto label_error; 1804*8e33eff8Schristos } 1805*8e33eff8Schristos } 1806*8e33eff8Schristos 1807*8e33eff8Schristos if (config_cache_oblivious) { 1808*8e33eff8Schristos /* 1809*8e33eff8Schristos * A nondeterministic seed based on the address of arena reduces 1810*8e33eff8Schristos * the likelihood of lockstep non-uniform cache index 1811*8e33eff8Schristos * utilization among identical concurrent processes, but at the 1812*8e33eff8Schristos * cost of test repeatability. For debug builds, instead use a 1813*8e33eff8Schristos * deterministic seed. 1814*8e33eff8Schristos */ 1815*8e33eff8Schristos atomic_store_zu(&arena->offset_state, config_debug ? ind : 1816*8e33eff8Schristos (size_t)(uintptr_t)arena, ATOMIC_RELAXED); 1817*8e33eff8Schristos } 1818*8e33eff8Schristos 1819*8e33eff8Schristos atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED); 1820*8e33eff8Schristos 1821*8e33eff8Schristos atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(), 1822*8e33eff8Schristos ATOMIC_RELAXED); 1823*8e33eff8Schristos 1824*8e33eff8Schristos atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); 1825*8e33eff8Schristos 1826*8e33eff8Schristos extent_list_init(&arena->large); 1827*8e33eff8Schristos if (malloc_mutex_init(&arena->large_mtx, "arena_large", 1828*8e33eff8Schristos WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { 1829*8e33eff8Schristos goto label_error; 1830*8e33eff8Schristos } 1831*8e33eff8Schristos 1832*8e33eff8Schristos /* 1833*8e33eff8Schristos * Delay coalescing for dirty extents despite the disruptive effect on 1834*8e33eff8Schristos * memory layout for best-fit extent allocation, since cached extents 1835*8e33eff8Schristos * are likely to be reused soon after deallocation, and the cost of 1836*8e33eff8Schristos * merging/splitting extents is non-trivial. 1837*8e33eff8Schristos */ 1838*8e33eff8Schristos if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty, 1839*8e33eff8Schristos true)) { 1840*8e33eff8Schristos goto label_error; 1841*8e33eff8Schristos } 1842*8e33eff8Schristos /* 1843*8e33eff8Schristos * Coalesce muzzy extents immediately, because operations on them are in 1844*8e33eff8Schristos * the critical path much less often than for dirty extents. 1845*8e33eff8Schristos */ 1846*8e33eff8Schristos if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy, 1847*8e33eff8Schristos false)) { 1848*8e33eff8Schristos goto label_error; 1849*8e33eff8Schristos } 1850*8e33eff8Schristos /* 1851*8e33eff8Schristos * Coalesce retained extents immediately, in part because they will 1852*8e33eff8Schristos * never be evicted (and therefore there's no opportunity for delayed 1853*8e33eff8Schristos * coalescing), but also because operations on retained extents are not 1854*8e33eff8Schristos * in the critical path. 1855*8e33eff8Schristos */ 1856*8e33eff8Schristos if (extents_init(tsdn, &arena->extents_retained, extent_state_retained, 1857*8e33eff8Schristos false)) { 1858*8e33eff8Schristos goto label_error; 1859*8e33eff8Schristos } 1860*8e33eff8Schristos 1861*8e33eff8Schristos if (arena_decay_init(&arena->decay_dirty, 1862*8e33eff8Schristos arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) { 1863*8e33eff8Schristos goto label_error; 1864*8e33eff8Schristos } 1865*8e33eff8Schristos if (arena_decay_init(&arena->decay_muzzy, 1866*8e33eff8Schristos arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) { 1867*8e33eff8Schristos goto label_error; 1868*8e33eff8Schristos } 1869*8e33eff8Schristos 1870*8e33eff8Schristos arena->extent_grow_next = sz_psz2ind(HUGEPAGE); 1871*8e33eff8Schristos arena->retain_grow_limit = EXTENT_GROW_MAX_PIND; 1872*8e33eff8Schristos if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow", 1873*8e33eff8Schristos WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { 1874*8e33eff8Schristos goto label_error; 1875*8e33eff8Schristos } 1876*8e33eff8Schristos 1877*8e33eff8Schristos extent_avail_new(&arena->extent_avail); 1878*8e33eff8Schristos if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail", 1879*8e33eff8Schristos WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) { 1880*8e33eff8Schristos goto label_error; 1881*8e33eff8Schristos } 1882*8e33eff8Schristos 1883*8e33eff8Schristos /* Initialize bins. */ 1884*8e33eff8Schristos for (i = 0; i < NBINS; i++) { 1885*8e33eff8Schristos bool err = bin_init(&arena->bins[i]); 1886*8e33eff8Schristos if (err) { 1887*8e33eff8Schristos goto label_error; 1888*8e33eff8Schristos } 1889*8e33eff8Schristos } 1890*8e33eff8Schristos 1891*8e33eff8Schristos arena->base = base; 1892*8e33eff8Schristos /* Set arena before creating background threads. */ 1893*8e33eff8Schristos arena_set(ind, arena); 1894*8e33eff8Schristos 1895*8e33eff8Schristos nstime_init(&arena->create_time, 0); 1896*8e33eff8Schristos nstime_update(&arena->create_time); 1897*8e33eff8Schristos 1898*8e33eff8Schristos /* We don't support reentrancy for arena 0 bootstrapping. */ 1899*8e33eff8Schristos if (ind != 0) { 1900*8e33eff8Schristos /* 1901*8e33eff8Schristos * If we're here, then arena 0 already exists, so bootstrapping 1902*8e33eff8Schristos * is done enough that we should have tsd. 1903*8e33eff8Schristos */ 1904*8e33eff8Schristos assert(!tsdn_null(tsdn)); 1905*8e33eff8Schristos pre_reentrancy(tsdn_tsd(tsdn), arena); 1906*8e33eff8Schristos if (hooks_arena_new_hook) { 1907*8e33eff8Schristos hooks_arena_new_hook(); 1908*8e33eff8Schristos } 1909*8e33eff8Schristos post_reentrancy(tsdn_tsd(tsdn)); 1910*8e33eff8Schristos } 1911*8e33eff8Schristos 1912*8e33eff8Schristos return arena; 1913*8e33eff8Schristos label_error: 1914*8e33eff8Schristos if (ind != 0) { 1915*8e33eff8Schristos base_delete(tsdn, base); 1916*8e33eff8Schristos } 1917*8e33eff8Schristos return NULL; 1918*8e33eff8Schristos } 1919*8e33eff8Schristos 1920*8e33eff8Schristos void 1921*8e33eff8Schristos arena_boot(void) { 1922*8e33eff8Schristos arena_dirty_decay_ms_default_set(opt_dirty_decay_ms); 1923*8e33eff8Schristos arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms); 1924*8e33eff8Schristos #define REGIND_bin_yes(index, reg_size) \ 1925*8e33eff8Schristos div_init(&arena_binind_div_info[(index)], (reg_size)); 1926*8e33eff8Schristos #define REGIND_bin_no(index, reg_size) 1927*8e33eff8Schristos #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ 1928*8e33eff8Schristos lg_delta_lookup) \ 1929*8e33eff8Schristos REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta << lg_delta)) 1930*8e33eff8Schristos SIZE_CLASSES 1931*8e33eff8Schristos #undef REGIND_bin_yes 1932*8e33eff8Schristos #undef REGIND_bin_no 1933*8e33eff8Schristos #undef SC 1934*8e33eff8Schristos } 1935*8e33eff8Schristos 1936*8e33eff8Schristos void 1937*8e33eff8Schristos arena_prefork0(tsdn_t *tsdn, arena_t *arena) { 1938*8e33eff8Schristos malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx); 1939*8e33eff8Schristos malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx); 1940*8e33eff8Schristos } 1941*8e33eff8Schristos 1942*8e33eff8Schristos void 1943*8e33eff8Schristos arena_prefork1(tsdn_t *tsdn, arena_t *arena) { 1944*8e33eff8Schristos if (config_stats) { 1945*8e33eff8Schristos malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx); 1946*8e33eff8Schristos } 1947*8e33eff8Schristos } 1948*8e33eff8Schristos 1949*8e33eff8Schristos void 1950*8e33eff8Schristos arena_prefork2(tsdn_t *tsdn, arena_t *arena) { 1951*8e33eff8Schristos malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx); 1952*8e33eff8Schristos } 1953*8e33eff8Schristos 1954*8e33eff8Schristos void 1955*8e33eff8Schristos arena_prefork3(tsdn_t *tsdn, arena_t *arena) { 1956*8e33eff8Schristos extents_prefork(tsdn, &arena->extents_dirty); 1957*8e33eff8Schristos extents_prefork(tsdn, &arena->extents_muzzy); 1958*8e33eff8Schristos extents_prefork(tsdn, &arena->extents_retained); 1959*8e33eff8Schristos } 1960*8e33eff8Schristos 1961*8e33eff8Schristos void 1962*8e33eff8Schristos arena_prefork4(tsdn_t *tsdn, arena_t *arena) { 1963*8e33eff8Schristos malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx); 1964*8e33eff8Schristos } 1965*8e33eff8Schristos 1966*8e33eff8Schristos void 1967*8e33eff8Schristos arena_prefork5(tsdn_t *tsdn, arena_t *arena) { 1968*8e33eff8Schristos base_prefork(tsdn, arena->base); 1969*8e33eff8Schristos } 1970*8e33eff8Schristos 1971*8e33eff8Schristos void 1972*8e33eff8Schristos arena_prefork6(tsdn_t *tsdn, arena_t *arena) { 1973*8e33eff8Schristos malloc_mutex_prefork(tsdn, &arena->large_mtx); 1974*8e33eff8Schristos } 1975*8e33eff8Schristos 1976*8e33eff8Schristos void 1977*8e33eff8Schristos arena_prefork7(tsdn_t *tsdn, arena_t *arena) { 1978*8e33eff8Schristos for (unsigned i = 0; i < NBINS; i++) { 1979*8e33eff8Schristos bin_prefork(tsdn, &arena->bins[i]); 1980*8e33eff8Schristos } 1981*8e33eff8Schristos } 1982*8e33eff8Schristos 1983*8e33eff8Schristos void 1984*8e33eff8Schristos arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { 1985*8e33eff8Schristos unsigned i; 1986*8e33eff8Schristos 1987*8e33eff8Schristos for (i = 0; i < NBINS; i++) { 1988*8e33eff8Schristos bin_postfork_parent(tsdn, &arena->bins[i]); 1989*8e33eff8Schristos } 1990*8e33eff8Schristos malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); 1991*8e33eff8Schristos base_postfork_parent(tsdn, arena->base); 1992*8e33eff8Schristos malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx); 1993*8e33eff8Schristos extents_postfork_parent(tsdn, &arena->extents_dirty); 1994*8e33eff8Schristos extents_postfork_parent(tsdn, &arena->extents_muzzy); 1995*8e33eff8Schristos extents_postfork_parent(tsdn, &arena->extents_retained); 1996*8e33eff8Schristos malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx); 1997*8e33eff8Schristos malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx); 1998*8e33eff8Schristos malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx); 1999*8e33eff8Schristos if (config_stats) { 2000*8e33eff8Schristos malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx); 2001*8e33eff8Schristos } 2002*8e33eff8Schristos } 2003*8e33eff8Schristos 2004*8e33eff8Schristos void 2005*8e33eff8Schristos arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { 2006*8e33eff8Schristos unsigned i; 2007*8e33eff8Schristos 2008*8e33eff8Schristos atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); 2009*8e33eff8Schristos atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); 2010*8e33eff8Schristos if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) { 2011*8e33eff8Schristos arena_nthreads_inc(arena, false); 2012*8e33eff8Schristos } 2013*8e33eff8Schristos if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) { 2014*8e33eff8Schristos arena_nthreads_inc(arena, true); 2015*8e33eff8Schristos } 2016*8e33eff8Schristos if (config_stats) { 2017*8e33eff8Schristos ql_new(&arena->tcache_ql); 2018*8e33eff8Schristos ql_new(&arena->cache_bin_array_descriptor_ql); 2019*8e33eff8Schristos tcache_t *tcache = tcache_get(tsdn_tsd(tsdn)); 2020*8e33eff8Schristos if (tcache != NULL && tcache->arena == arena) { 2021*8e33eff8Schristos ql_elm_new(tcache, link); 2022*8e33eff8Schristos ql_tail_insert(&arena->tcache_ql, tcache, link); 2023*8e33eff8Schristos cache_bin_array_descriptor_init( 2024*8e33eff8Schristos &tcache->cache_bin_array_descriptor, 2025*8e33eff8Schristos tcache->bins_small, tcache->bins_large); 2026*8e33eff8Schristos ql_tail_insert(&arena->cache_bin_array_descriptor_ql, 2027*8e33eff8Schristos &tcache->cache_bin_array_descriptor, link); 2028*8e33eff8Schristos } 2029*8e33eff8Schristos } 2030*8e33eff8Schristos 2031*8e33eff8Schristos for (i = 0; i < NBINS; i++) { 2032*8e33eff8Schristos bin_postfork_child(tsdn, &arena->bins[i]); 2033*8e33eff8Schristos } 2034*8e33eff8Schristos malloc_mutex_postfork_child(tsdn, &arena->large_mtx); 2035*8e33eff8Schristos base_postfork_child(tsdn, arena->base); 2036*8e33eff8Schristos malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx); 2037*8e33eff8Schristos extents_postfork_child(tsdn, &arena->extents_dirty); 2038*8e33eff8Schristos extents_postfork_child(tsdn, &arena->extents_muzzy); 2039*8e33eff8Schristos extents_postfork_child(tsdn, &arena->extents_retained); 2040*8e33eff8Schristos malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx); 2041*8e33eff8Schristos malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx); 2042*8e33eff8Schristos malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx); 2043*8e33eff8Schristos if (config_stats) { 2044*8e33eff8Schristos malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx); 2045*8e33eff8Schristos } 2046*8e33eff8Schristos } 2047