1*8e33eff8Schristos #include "test/jemalloc_test.h" 2*8e33eff8Schristos 3*8e33eff8Schristos #include "jemalloc/internal/spin.h" 4*8e33eff8Schristos 5*8e33eff8Schristos static unsigned arena_ind; 6*8e33eff8Schristos static size_t sz; 7*8e33eff8Schristos static size_t esz; 8*8e33eff8Schristos #define NEPOCHS 8 9*8e33eff8Schristos #define PER_THD_NALLOCS 1 10*8e33eff8Schristos static atomic_u_t epoch; 11*8e33eff8Schristos static atomic_u_t nfinished; 12*8e33eff8Schristos 13*8e33eff8Schristos static unsigned 14*8e33eff8Schristos do_arena_create(extent_hooks_t *h) { 15*8e33eff8Schristos unsigned arena_ind; 16*8e33eff8Schristos size_t sz = sizeof(unsigned); 17*8e33eff8Schristos assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, 18*8e33eff8Schristos (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0, 19*8e33eff8Schristos "Unexpected mallctl() failure"); 20*8e33eff8Schristos return arena_ind; 21*8e33eff8Schristos } 22*8e33eff8Schristos 23*8e33eff8Schristos static void 24*8e33eff8Schristos do_arena_destroy(unsigned arena_ind) { 25*8e33eff8Schristos size_t mib[3]; 26*8e33eff8Schristos size_t miblen; 27*8e33eff8Schristos 28*8e33eff8Schristos miblen = sizeof(mib)/sizeof(size_t); 29*8e33eff8Schristos assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0, 30*8e33eff8Schristos "Unexpected mallctlnametomib() failure"); 31*8e33eff8Schristos mib[1] = (size_t)arena_ind; 32*8e33eff8Schristos assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, 33*8e33eff8Schristos "Unexpected mallctlbymib() failure"); 34*8e33eff8Schristos } 35*8e33eff8Schristos 36*8e33eff8Schristos static void 37*8e33eff8Schristos do_refresh(void) { 38*8e33eff8Schristos uint64_t epoch = 1; 39*8e33eff8Schristos assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, 40*8e33eff8Schristos sizeof(epoch)), 0, "Unexpected mallctl() failure"); 41*8e33eff8Schristos } 42*8e33eff8Schristos 43*8e33eff8Schristos static size_t 44*8e33eff8Schristos do_get_size_impl(const char *cmd, unsigned arena_ind) { 45*8e33eff8Schristos size_t mib[4]; 46*8e33eff8Schristos size_t miblen = sizeof(mib) / sizeof(size_t); 47*8e33eff8Schristos size_t z = sizeof(size_t); 48*8e33eff8Schristos 49*8e33eff8Schristos assert_d_eq(mallctlnametomib(cmd, mib, &miblen), 50*8e33eff8Schristos 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); 51*8e33eff8Schristos mib[2] = arena_ind; 52*8e33eff8Schristos size_t size; 53*8e33eff8Schristos assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0), 54*8e33eff8Schristos 0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd); 55*8e33eff8Schristos 56*8e33eff8Schristos return size; 57*8e33eff8Schristos } 58*8e33eff8Schristos 59*8e33eff8Schristos static size_t 60*8e33eff8Schristos do_get_active(unsigned arena_ind) { 61*8e33eff8Schristos return do_get_size_impl("stats.arenas.0.pactive", arena_ind) * PAGE; 62*8e33eff8Schristos } 63*8e33eff8Schristos 64*8e33eff8Schristos static size_t 65*8e33eff8Schristos do_get_mapped(unsigned arena_ind) { 66*8e33eff8Schristos return do_get_size_impl("stats.arenas.0.mapped", arena_ind); 67*8e33eff8Schristos } 68*8e33eff8Schristos 69*8e33eff8Schristos static void * 70*8e33eff8Schristos thd_start(void *arg) { 71*8e33eff8Schristos for (unsigned next_epoch = 1; next_epoch < NEPOCHS; next_epoch++) { 72*8e33eff8Schristos /* Busy-wait for next epoch. */ 73*8e33eff8Schristos unsigned cur_epoch; 74*8e33eff8Schristos spin_t spinner = SPIN_INITIALIZER; 75*8e33eff8Schristos while ((cur_epoch = atomic_load_u(&epoch, ATOMIC_ACQUIRE)) != 76*8e33eff8Schristos next_epoch) { 77*8e33eff8Schristos spin_adaptive(&spinner); 78*8e33eff8Schristos } 79*8e33eff8Schristos assert_u_eq(cur_epoch, next_epoch, "Unexpected epoch"); 80*8e33eff8Schristos 81*8e33eff8Schristos /* 82*8e33eff8Schristos * Allocate. The main thread will reset the arena, so there's 83*8e33eff8Schristos * no need to deallocate. 84*8e33eff8Schristos */ 85*8e33eff8Schristos for (unsigned i = 0; i < PER_THD_NALLOCS; i++) { 86*8e33eff8Schristos void *p = mallocx(sz, MALLOCX_ARENA(arena_ind) | 87*8e33eff8Schristos MALLOCX_TCACHE_NONE 88*8e33eff8Schristos ); 89*8e33eff8Schristos assert_ptr_not_null(p, 90*8e33eff8Schristos "Unexpected mallocx() failure\n"); 91*8e33eff8Schristos } 92*8e33eff8Schristos 93*8e33eff8Schristos /* Let the main thread know we've finished this iteration. */ 94*8e33eff8Schristos atomic_fetch_add_u(&nfinished, 1, ATOMIC_RELEASE); 95*8e33eff8Schristos } 96*8e33eff8Schristos 97*8e33eff8Schristos return NULL; 98*8e33eff8Schristos } 99*8e33eff8Schristos 100*8e33eff8Schristos TEST_BEGIN(test_retained) { 101*8e33eff8Schristos test_skip_if(!config_stats); 102*8e33eff8Schristos 103*8e33eff8Schristos arena_ind = do_arena_create(NULL); 104*8e33eff8Schristos sz = nallocx(HUGEPAGE, 0); 105*8e33eff8Schristos esz = sz + sz_large_pad; 106*8e33eff8Schristos 107*8e33eff8Schristos atomic_store_u(&epoch, 0, ATOMIC_RELAXED); 108*8e33eff8Schristos 109*8e33eff8Schristos unsigned nthreads = ncpus * 2; 110*8e33eff8Schristos VARIABLE_ARRAY(thd_t, threads, nthreads); 111*8e33eff8Schristos for (unsigned i = 0; i < nthreads; i++) { 112*8e33eff8Schristos thd_create(&threads[i], thd_start, NULL); 113*8e33eff8Schristos } 114*8e33eff8Schristos 115*8e33eff8Schristos for (unsigned e = 1; e < NEPOCHS; e++) { 116*8e33eff8Schristos atomic_store_u(&nfinished, 0, ATOMIC_RELEASE); 117*8e33eff8Schristos atomic_store_u(&epoch, e, ATOMIC_RELEASE); 118*8e33eff8Schristos 119*8e33eff8Schristos /* Wait for threads to finish allocating. */ 120*8e33eff8Schristos spin_t spinner = SPIN_INITIALIZER; 121*8e33eff8Schristos while (atomic_load_u(&nfinished, ATOMIC_ACQUIRE) < nthreads) { 122*8e33eff8Schristos spin_adaptive(&spinner); 123*8e33eff8Schristos } 124*8e33eff8Schristos 125*8e33eff8Schristos /* 126*8e33eff8Schristos * Assert that retained is no more than the sum of size classes 127*8e33eff8Schristos * that should have been used to satisfy the worker threads' 128*8e33eff8Schristos * requests, discounting per growth fragmentation. 129*8e33eff8Schristos */ 130*8e33eff8Schristos do_refresh(); 131*8e33eff8Schristos 132*8e33eff8Schristos size_t allocated = esz * nthreads * PER_THD_NALLOCS; 133*8e33eff8Schristos size_t active = do_get_active(arena_ind); 134*8e33eff8Schristos assert_zu_le(allocated, active, "Unexpected active memory"); 135*8e33eff8Schristos size_t mapped = do_get_mapped(arena_ind); 136*8e33eff8Schristos assert_zu_le(active, mapped, "Unexpected mapped memory"); 137*8e33eff8Schristos 138*8e33eff8Schristos arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false); 139*8e33eff8Schristos size_t usable = 0; 140*8e33eff8Schristos size_t fragmented = 0; 141*8e33eff8Schristos for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind < 142*8e33eff8Schristos arena->extent_grow_next; pind++) { 143*8e33eff8Schristos size_t psz = sz_pind2sz(pind); 144*8e33eff8Schristos size_t psz_fragmented = psz % esz; 145*8e33eff8Schristos size_t psz_usable = psz - psz_fragmented; 146*8e33eff8Schristos /* 147*8e33eff8Schristos * Only consider size classes that wouldn't be skipped. 148*8e33eff8Schristos */ 149*8e33eff8Schristos if (psz_usable > 0) { 150*8e33eff8Schristos assert_zu_lt(usable, allocated, 151*8e33eff8Schristos "Excessive retained memory " 152*8e33eff8Schristos "(%#zx[+%#zx] > %#zx)", usable, psz_usable, 153*8e33eff8Schristos allocated); 154*8e33eff8Schristos fragmented += psz_fragmented; 155*8e33eff8Schristos usable += psz_usable; 156*8e33eff8Schristos } 157*8e33eff8Schristos } 158*8e33eff8Schristos 159*8e33eff8Schristos /* 160*8e33eff8Schristos * Clean up arena. Destroying and recreating the arena 161*8e33eff8Schristos * is simpler that specifying extent hooks that deallocate 162*8e33eff8Schristos * (rather than retaining) during reset. 163*8e33eff8Schristos */ 164*8e33eff8Schristos do_arena_destroy(arena_ind); 165*8e33eff8Schristos assert_u_eq(do_arena_create(NULL), arena_ind, 166*8e33eff8Schristos "Unexpected arena index"); 167*8e33eff8Schristos } 168*8e33eff8Schristos 169*8e33eff8Schristos for (unsigned i = 0; i < nthreads; i++) { 170*8e33eff8Schristos thd_join(threads[i], NULL); 171*8e33eff8Schristos } 172*8e33eff8Schristos 173*8e33eff8Schristos do_arena_destroy(arena_ind); 174*8e33eff8Schristos } 175*8e33eff8Schristos TEST_END 176*8e33eff8Schristos 177*8e33eff8Schristos int 178*8e33eff8Schristos main(void) { 179*8e33eff8Schristos return test( 180*8e33eff8Schristos test_retained); 181*8e33eff8Schristos } 182