1*8e33eff8Schristos #define JEMALLOC_LARGE_C_ 2*8e33eff8Schristos #include "jemalloc/internal/jemalloc_preamble.h" 3*8e33eff8Schristos #include "jemalloc/internal/jemalloc_internal_includes.h" 4*8e33eff8Schristos 5*8e33eff8Schristos #include "jemalloc/internal/assert.h" 6*8e33eff8Schristos #include "jemalloc/internal/extent_mmap.h" 7*8e33eff8Schristos #include "jemalloc/internal/mutex.h" 8*8e33eff8Schristos #include "jemalloc/internal/rtree.h" 9*8e33eff8Schristos #include "jemalloc/internal/util.h" 10*8e33eff8Schristos 11*8e33eff8Schristos /******************************************************************************/ 12*8e33eff8Schristos 13*8e33eff8Schristos void * 14*8e33eff8Schristos large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { 15*8e33eff8Schristos assert(usize == sz_s2u(usize)); 16*8e33eff8Schristos 17*8e33eff8Schristos return large_palloc(tsdn, arena, usize, CACHELINE, zero); 18*8e33eff8Schristos } 19*8e33eff8Schristos 20*8e33eff8Schristos void * 21*8e33eff8Schristos large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, 22*8e33eff8Schristos bool zero) { 23*8e33eff8Schristos size_t ausize; 24*8e33eff8Schristos extent_t *extent; 25*8e33eff8Schristos bool is_zeroed; 26*8e33eff8Schristos UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); 27*8e33eff8Schristos 28*8e33eff8Schristos assert(!tsdn_null(tsdn) || arena != NULL); 29*8e33eff8Schristos 30*8e33eff8Schristos ausize = sz_sa2u(usize, alignment); 31*8e33eff8Schristos if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) { 32*8e33eff8Schristos return NULL; 33*8e33eff8Schristos } 34*8e33eff8Schristos 35*8e33eff8Schristos if (config_fill && unlikely(opt_zero)) { 36*8e33eff8Schristos zero = true; 37*8e33eff8Schristos } 38*8e33eff8Schristos /* 39*8e33eff8Schristos * Copy zero into is_zeroed and pass the copy when allocating the 40*8e33eff8Schristos * extent, so that it is possible to make correct junk/zero fill 41*8e33eff8Schristos * decisions below, even if is_zeroed ends up true when zero is false. 42*8e33eff8Schristos */ 43*8e33eff8Schristos is_zeroed = zero; 44*8e33eff8Schristos if (likely(!tsdn_null(tsdn))) { 45*8e33eff8Schristos arena = arena_choose(tsdn_tsd(tsdn), arena); 46*8e33eff8Schristos } 47*8e33eff8Schristos if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn, 48*8e33eff8Schristos arena, usize, alignment, &is_zeroed)) == NULL) { 49*8e33eff8Schristos return NULL; 50*8e33eff8Schristos } 51*8e33eff8Schristos 52*8e33eff8Schristos /* See comments in arena_bin_slabs_full_insert(). */ 53*8e33eff8Schristos if (!arena_is_auto(arena)) { 54*8e33eff8Schristos /* Insert extent into large. */ 55*8e33eff8Schristos malloc_mutex_lock(tsdn, &arena->large_mtx); 56*8e33eff8Schristos extent_list_append(&arena->large, extent); 57*8e33eff8Schristos malloc_mutex_unlock(tsdn, &arena->large_mtx); 58*8e33eff8Schristos } 59*8e33eff8Schristos if (config_prof && arena_prof_accum(tsdn, arena, usize)) { 60*8e33eff8Schristos prof_idump(tsdn); 61*8e33eff8Schristos } 62*8e33eff8Schristos 63*8e33eff8Schristos if (zero) { 64*8e33eff8Schristos assert(is_zeroed); 65*8e33eff8Schristos } else if (config_fill && unlikely(opt_junk_alloc)) { 66*8e33eff8Schristos memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK, 67*8e33eff8Schristos extent_usize_get(extent)); 68*8e33eff8Schristos } 69*8e33eff8Schristos 70*8e33eff8Schristos arena_decay_tick(tsdn, arena); 71*8e33eff8Schristos return extent_addr_get(extent); 72*8e33eff8Schristos } 73*8e33eff8Schristos 74*8e33eff8Schristos static void 75*8e33eff8Schristos large_dalloc_junk_impl(void *ptr, size_t size) { 76*8e33eff8Schristos memset(ptr, JEMALLOC_FREE_JUNK, size); 77*8e33eff8Schristos } 78*8e33eff8Schristos large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl; 79*8e33eff8Schristos 80*8e33eff8Schristos static void 81*8e33eff8Schristos large_dalloc_maybe_junk_impl(void *ptr, size_t size) { 82*8e33eff8Schristos if (config_fill && have_dss && unlikely(opt_junk_free)) { 83*8e33eff8Schristos /* 84*8e33eff8Schristos * Only bother junk filling if the extent isn't about to be 85*8e33eff8Schristos * unmapped. 86*8e33eff8Schristos */ 87*8e33eff8Schristos if (opt_retain || (have_dss && extent_in_dss(ptr))) { 88*8e33eff8Schristos large_dalloc_junk(ptr, size); 89*8e33eff8Schristos } 90*8e33eff8Schristos } 91*8e33eff8Schristos } 92*8e33eff8Schristos large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk = 93*8e33eff8Schristos large_dalloc_maybe_junk_impl; 94*8e33eff8Schristos 95*8e33eff8Schristos static bool 96*8e33eff8Schristos large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) { 97*8e33eff8Schristos arena_t *arena = extent_arena_get(extent); 98*8e33eff8Schristos size_t oldusize = extent_usize_get(extent); 99*8e33eff8Schristos extent_hooks_t *extent_hooks = extent_hooks_get(arena); 100*8e33eff8Schristos size_t diff = extent_size_get(extent) - (usize + sz_large_pad); 101*8e33eff8Schristos 102*8e33eff8Schristos assert(oldusize > usize); 103*8e33eff8Schristos 104*8e33eff8Schristos if (extent_hooks->split == NULL) { 105*8e33eff8Schristos return true; 106*8e33eff8Schristos } 107*8e33eff8Schristos 108*8e33eff8Schristos /* Split excess pages. */ 109*8e33eff8Schristos if (diff != 0) { 110*8e33eff8Schristos extent_t *trail = extent_split_wrapper(tsdn, arena, 111*8e33eff8Schristos &extent_hooks, extent, usize + sz_large_pad, 112*8e33eff8Schristos sz_size2index(usize), false, diff, NSIZES, false); 113*8e33eff8Schristos if (trail == NULL) { 114*8e33eff8Schristos return true; 115*8e33eff8Schristos } 116*8e33eff8Schristos 117*8e33eff8Schristos if (config_fill && unlikely(opt_junk_free)) { 118*8e33eff8Schristos large_dalloc_maybe_junk(extent_addr_get(trail), 119*8e33eff8Schristos extent_size_get(trail)); 120*8e33eff8Schristos } 121*8e33eff8Schristos 122*8e33eff8Schristos arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail); 123*8e33eff8Schristos } 124*8e33eff8Schristos 125*8e33eff8Schristos arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize); 126*8e33eff8Schristos 127*8e33eff8Schristos return false; 128*8e33eff8Schristos } 129*8e33eff8Schristos 130*8e33eff8Schristos static bool 131*8e33eff8Schristos large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, 132*8e33eff8Schristos bool zero) { 133*8e33eff8Schristos arena_t *arena = extent_arena_get(extent); 134*8e33eff8Schristos size_t oldusize = extent_usize_get(extent); 135*8e33eff8Schristos extent_hooks_t *extent_hooks = extent_hooks_get(arena); 136*8e33eff8Schristos size_t trailsize = usize - oldusize; 137*8e33eff8Schristos 138*8e33eff8Schristos if (extent_hooks->merge == NULL) { 139*8e33eff8Schristos return true; 140*8e33eff8Schristos } 141*8e33eff8Schristos 142*8e33eff8Schristos if (config_fill && unlikely(opt_zero)) { 143*8e33eff8Schristos zero = true; 144*8e33eff8Schristos } 145*8e33eff8Schristos /* 146*8e33eff8Schristos * Copy zero into is_zeroed_trail and pass the copy when allocating the 147*8e33eff8Schristos * extent, so that it is possible to make correct junk/zero fill 148*8e33eff8Schristos * decisions below, even if is_zeroed_trail ends up true when zero is 149*8e33eff8Schristos * false. 150*8e33eff8Schristos */ 151*8e33eff8Schristos bool is_zeroed_trail = zero; 152*8e33eff8Schristos bool commit = true; 153*8e33eff8Schristos extent_t *trail; 154*8e33eff8Schristos bool new_mapping; 155*8e33eff8Schristos if ((trail = extents_alloc(tsdn, arena, &extent_hooks, 156*8e33eff8Schristos &arena->extents_dirty, extent_past_get(extent), trailsize, 0, 157*8e33eff8Schristos CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL 158*8e33eff8Schristos || (trail = extents_alloc(tsdn, arena, &extent_hooks, 159*8e33eff8Schristos &arena->extents_muzzy, extent_past_get(extent), trailsize, 0, 160*8e33eff8Schristos CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL) { 161*8e33eff8Schristos if (config_stats) { 162*8e33eff8Schristos new_mapping = false; 163*8e33eff8Schristos } 164*8e33eff8Schristos } else { 165*8e33eff8Schristos if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks, 166*8e33eff8Schristos extent_past_get(extent), trailsize, 0, CACHELINE, false, 167*8e33eff8Schristos NSIZES, &is_zeroed_trail, &commit)) == NULL) { 168*8e33eff8Schristos return true; 169*8e33eff8Schristos } 170*8e33eff8Schristos if (config_stats) { 171*8e33eff8Schristos new_mapping = true; 172*8e33eff8Schristos } 173*8e33eff8Schristos } 174*8e33eff8Schristos 175*8e33eff8Schristos if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) { 176*8e33eff8Schristos extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail); 177*8e33eff8Schristos return true; 178*8e33eff8Schristos } 179*8e33eff8Schristos rtree_ctx_t rtree_ctx_fallback; 180*8e33eff8Schristos rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 181*8e33eff8Schristos szind_t szind = sz_size2index(usize); 182*8e33eff8Schristos extent_szind_set(extent, szind); 183*8e33eff8Schristos rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, 184*8e33eff8Schristos (uintptr_t)extent_addr_get(extent), szind, false); 185*8e33eff8Schristos 186*8e33eff8Schristos if (config_stats && new_mapping) { 187*8e33eff8Schristos arena_stats_mapped_add(tsdn, &arena->stats, trailsize); 188*8e33eff8Schristos } 189*8e33eff8Schristos 190*8e33eff8Schristos if (zero) { 191*8e33eff8Schristos if (config_cache_oblivious) { 192*8e33eff8Schristos /* 193*8e33eff8Schristos * Zero the trailing bytes of the original allocation's 194*8e33eff8Schristos * last page, since they are in an indeterminate state. 195*8e33eff8Schristos * There will always be trailing bytes, because ptr's 196*8e33eff8Schristos * offset from the beginning of the extent is a multiple 197*8e33eff8Schristos * of CACHELINE in [0 .. PAGE). 198*8e33eff8Schristos */ 199*8e33eff8Schristos void *zbase = (void *) 200*8e33eff8Schristos ((uintptr_t)extent_addr_get(extent) + oldusize); 201*8e33eff8Schristos void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + 202*8e33eff8Schristos PAGE)); 203*8e33eff8Schristos size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; 204*8e33eff8Schristos assert(nzero > 0); 205*8e33eff8Schristos memset(zbase, 0, nzero); 206*8e33eff8Schristos } 207*8e33eff8Schristos assert(is_zeroed_trail); 208*8e33eff8Schristos } else if (config_fill && unlikely(opt_junk_alloc)) { 209*8e33eff8Schristos memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize), 210*8e33eff8Schristos JEMALLOC_ALLOC_JUNK, usize - oldusize); 211*8e33eff8Schristos } 212*8e33eff8Schristos 213*8e33eff8Schristos arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize); 214*8e33eff8Schristos 215*8e33eff8Schristos return false; 216*8e33eff8Schristos } 217*8e33eff8Schristos 218*8e33eff8Schristos bool 219*8e33eff8Schristos large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, 220*8e33eff8Schristos size_t usize_max, bool zero) { 221*8e33eff8Schristos size_t oldusize = extent_usize_get(extent); 222*8e33eff8Schristos 223*8e33eff8Schristos /* The following should have been caught by callers. */ 224*8e33eff8Schristos assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS); 225*8e33eff8Schristos /* Both allocation sizes must be large to avoid a move. */ 226*8e33eff8Schristos assert(oldusize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS); 227*8e33eff8Schristos 228*8e33eff8Schristos if (usize_max > oldusize) { 229*8e33eff8Schristos /* Attempt to expand the allocation in-place. */ 230*8e33eff8Schristos if (!large_ralloc_no_move_expand(tsdn, extent, usize_max, 231*8e33eff8Schristos zero)) { 232*8e33eff8Schristos arena_decay_tick(tsdn, extent_arena_get(extent)); 233*8e33eff8Schristos return false; 234*8e33eff8Schristos } 235*8e33eff8Schristos /* Try again, this time with usize_min. */ 236*8e33eff8Schristos if (usize_min < usize_max && usize_min > oldusize && 237*8e33eff8Schristos large_ralloc_no_move_expand(tsdn, extent, usize_min, 238*8e33eff8Schristos zero)) { 239*8e33eff8Schristos arena_decay_tick(tsdn, extent_arena_get(extent)); 240*8e33eff8Schristos return false; 241*8e33eff8Schristos } 242*8e33eff8Schristos } 243*8e33eff8Schristos 244*8e33eff8Schristos /* 245*8e33eff8Schristos * Avoid moving the allocation if the existing extent size accommodates 246*8e33eff8Schristos * the new size. 247*8e33eff8Schristos */ 248*8e33eff8Schristos if (oldusize >= usize_min && oldusize <= usize_max) { 249*8e33eff8Schristos arena_decay_tick(tsdn, extent_arena_get(extent)); 250*8e33eff8Schristos return false; 251*8e33eff8Schristos } 252*8e33eff8Schristos 253*8e33eff8Schristos /* Attempt to shrink the allocation in-place. */ 254*8e33eff8Schristos if (oldusize > usize_max) { 255*8e33eff8Schristos if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) { 256*8e33eff8Schristos arena_decay_tick(tsdn, extent_arena_get(extent)); 257*8e33eff8Schristos return false; 258*8e33eff8Schristos } 259*8e33eff8Schristos } 260*8e33eff8Schristos return true; 261*8e33eff8Schristos } 262*8e33eff8Schristos 263*8e33eff8Schristos static void * 264*8e33eff8Schristos large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, 265*8e33eff8Schristos size_t alignment, bool zero) { 266*8e33eff8Schristos if (alignment <= CACHELINE) { 267*8e33eff8Schristos return large_malloc(tsdn, arena, usize, zero); 268*8e33eff8Schristos } 269*8e33eff8Schristos return large_palloc(tsdn, arena, usize, alignment, zero); 270*8e33eff8Schristos } 271*8e33eff8Schristos 272*8e33eff8Schristos void * 273*8e33eff8Schristos large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, 274*8e33eff8Schristos size_t alignment, bool zero, tcache_t *tcache) { 275*8e33eff8Schristos size_t oldusize = extent_usize_get(extent); 276*8e33eff8Schristos 277*8e33eff8Schristos /* The following should have been caught by callers. */ 278*8e33eff8Schristos assert(usize > 0 && usize <= LARGE_MAXCLASS); 279*8e33eff8Schristos /* Both allocation sizes must be large to avoid a move. */ 280*8e33eff8Schristos assert(oldusize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS); 281*8e33eff8Schristos 282*8e33eff8Schristos /* Try to avoid moving the allocation. */ 283*8e33eff8Schristos if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) { 284*8e33eff8Schristos return extent_addr_get(extent); 285*8e33eff8Schristos } 286*8e33eff8Schristos 287*8e33eff8Schristos /* 288*8e33eff8Schristos * usize and old size are different enough that we need to use a 289*8e33eff8Schristos * different size class. In that case, fall back to allocating new 290*8e33eff8Schristos * space and copying. 291*8e33eff8Schristos */ 292*8e33eff8Schristos void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment, 293*8e33eff8Schristos zero); 294*8e33eff8Schristos if (ret == NULL) { 295*8e33eff8Schristos return NULL; 296*8e33eff8Schristos } 297*8e33eff8Schristos 298*8e33eff8Schristos size_t copysize = (usize < oldusize) ? usize : oldusize; 299*8e33eff8Schristos memcpy(ret, extent_addr_get(extent), copysize); 300*8e33eff8Schristos isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true); 301*8e33eff8Schristos return ret; 302*8e33eff8Schristos } 303*8e33eff8Schristos 304*8e33eff8Schristos /* 305*8e33eff8Schristos * junked_locked indicates whether the extent's data have been junk-filled, and 306*8e33eff8Schristos * whether the arena's large_mtx is currently held. 307*8e33eff8Schristos */ 308*8e33eff8Schristos static void 309*8e33eff8Schristos large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent, 310*8e33eff8Schristos bool junked_locked) { 311*8e33eff8Schristos if (!junked_locked) { 312*8e33eff8Schristos /* See comments in arena_bin_slabs_full_insert(). */ 313*8e33eff8Schristos if (!arena_is_auto(arena)) { 314*8e33eff8Schristos malloc_mutex_lock(tsdn, &arena->large_mtx); 315*8e33eff8Schristos extent_list_remove(&arena->large, extent); 316*8e33eff8Schristos malloc_mutex_unlock(tsdn, &arena->large_mtx); 317*8e33eff8Schristos } 318*8e33eff8Schristos large_dalloc_maybe_junk(extent_addr_get(extent), 319*8e33eff8Schristos extent_usize_get(extent)); 320*8e33eff8Schristos } else { 321*8e33eff8Schristos malloc_mutex_assert_owner(tsdn, &arena->large_mtx); 322*8e33eff8Schristos if (!arena_is_auto(arena)) { 323*8e33eff8Schristos extent_list_remove(&arena->large, extent); 324*8e33eff8Schristos } 325*8e33eff8Schristos } 326*8e33eff8Schristos arena_extent_dalloc_large_prep(tsdn, arena, extent); 327*8e33eff8Schristos } 328*8e33eff8Schristos 329*8e33eff8Schristos static void 330*8e33eff8Schristos large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { 331*8e33eff8Schristos extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 332*8e33eff8Schristos arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent); 333*8e33eff8Schristos } 334*8e33eff8Schristos 335*8e33eff8Schristos void 336*8e33eff8Schristos large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) { 337*8e33eff8Schristos large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true); 338*8e33eff8Schristos } 339*8e33eff8Schristos 340*8e33eff8Schristos void 341*8e33eff8Schristos large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) { 342*8e33eff8Schristos large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent); 343*8e33eff8Schristos } 344*8e33eff8Schristos 345*8e33eff8Schristos void 346*8e33eff8Schristos large_dalloc(tsdn_t *tsdn, extent_t *extent) { 347*8e33eff8Schristos arena_t *arena = extent_arena_get(extent); 348*8e33eff8Schristos large_dalloc_prep_impl(tsdn, arena, extent, false); 349*8e33eff8Schristos large_dalloc_finish_impl(tsdn, arena, extent); 350*8e33eff8Schristos arena_decay_tick(tsdn, arena); 351*8e33eff8Schristos } 352*8e33eff8Schristos 353*8e33eff8Schristos size_t 354*8e33eff8Schristos large_salloc(tsdn_t *tsdn, const extent_t *extent) { 355*8e33eff8Schristos return extent_usize_get(extent); 356*8e33eff8Schristos } 357*8e33eff8Schristos 358*8e33eff8Schristos prof_tctx_t * 359*8e33eff8Schristos large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) { 360*8e33eff8Schristos return extent_prof_tctx_get(extent); 361*8e33eff8Schristos } 362*8e33eff8Schristos 363*8e33eff8Schristos void 364*8e33eff8Schristos large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) { 365*8e33eff8Schristos extent_prof_tctx_set(extent, tctx); 366*8e33eff8Schristos } 367*8e33eff8Schristos 368*8e33eff8Schristos void 369*8e33eff8Schristos large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) { 370*8e33eff8Schristos large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U); 371*8e33eff8Schristos } 372