1*8e33eff8Schristos #include "test/jemalloc_test.h" 2*8e33eff8Schristos 3*8e33eff8Schristos static unsigned 4*8e33eff8Schristos get_nsizes_impl(const char *cmd) { 5*8e33eff8Schristos unsigned ret; 6*8e33eff8Schristos size_t z; 7*8e33eff8Schristos 8*8e33eff8Schristos z = sizeof(unsigned); 9*8e33eff8Schristos assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, 10*8e33eff8Schristos "Unexpected mallctl(\"%s\", ...) failure", cmd); 11*8e33eff8Schristos 12*8e33eff8Schristos return ret; 13*8e33eff8Schristos } 14*8e33eff8Schristos 15*8e33eff8Schristos static unsigned 16*8e33eff8Schristos get_nlarge(void) { 17*8e33eff8Schristos return get_nsizes_impl("arenas.nlextents"); 18*8e33eff8Schristos } 19*8e33eff8Schristos 20*8e33eff8Schristos static size_t 21*8e33eff8Schristos get_size_impl(const char *cmd, size_t ind) { 22*8e33eff8Schristos size_t ret; 23*8e33eff8Schristos size_t z; 24*8e33eff8Schristos size_t mib[4]; 25*8e33eff8Schristos size_t miblen = 4; 26*8e33eff8Schristos 27*8e33eff8Schristos z = sizeof(size_t); 28*8e33eff8Schristos assert_d_eq(mallctlnametomib(cmd, mib, &miblen), 29*8e33eff8Schristos 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); 30*8e33eff8Schristos mib[2] = ind; 31*8e33eff8Schristos z = sizeof(size_t); 32*8e33eff8Schristos assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 33*8e33eff8Schristos 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); 34*8e33eff8Schristos 35*8e33eff8Schristos return ret; 36*8e33eff8Schristos } 37*8e33eff8Schristos 38*8e33eff8Schristos static size_t 39*8e33eff8Schristos get_large_size(size_t ind) { 40*8e33eff8Schristos return get_size_impl("arenas.lextent.0.size", ind); 41*8e33eff8Schristos } 42*8e33eff8Schristos 43*8e33eff8Schristos /* 44*8e33eff8Schristos * On systems which can't merge extents, tests that call this function generate 45*8e33eff8Schristos * a lot of dirty memory very quickly. Purging between cycles mitigates 46*8e33eff8Schristos * potential OOM on e.g. 32-bit Windows. 47*8e33eff8Schristos */ 48*8e33eff8Schristos static void 49*8e33eff8Schristos purge(void) { 50*8e33eff8Schristos assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, 51*8e33eff8Schristos "Unexpected mallctl error"); 52*8e33eff8Schristos } 53*8e33eff8Schristos 54*8e33eff8Schristos TEST_BEGIN(test_overflow) { 55*8e33eff8Schristos size_t largemax; 56*8e33eff8Schristos 57*8e33eff8Schristos largemax = get_large_size(get_nlarge()-1); 58*8e33eff8Schristos 59*8e33eff8Schristos assert_ptr_null(mallocx(largemax+1, 0), 60*8e33eff8Schristos "Expected OOM for mallocx(size=%#zx, 0)", largemax+1); 61*8e33eff8Schristos 62*8e33eff8Schristos assert_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0), 63*8e33eff8Schristos "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); 64*8e33eff8Schristos 65*8e33eff8Schristos assert_ptr_null(mallocx(SIZE_T_MAX, 0), 66*8e33eff8Schristos "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX); 67*8e33eff8Schristos 68*8e33eff8Schristos assert_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), 69*8e33eff8Schristos "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))", 70*8e33eff8Schristos ZU(PTRDIFF_MAX)+1); 71*8e33eff8Schristos } 72*8e33eff8Schristos TEST_END 73*8e33eff8Schristos 74*8e33eff8Schristos TEST_BEGIN(test_oom) { 75*8e33eff8Schristos size_t largemax; 76*8e33eff8Schristos bool oom; 77*8e33eff8Schristos void *ptrs[3]; 78*8e33eff8Schristos unsigned i; 79*8e33eff8Schristos 80*8e33eff8Schristos /* 81*8e33eff8Schristos * It should be impossible to allocate three objects that each consume 82*8e33eff8Schristos * nearly half the virtual address space. 83*8e33eff8Schristos */ 84*8e33eff8Schristos largemax = get_large_size(get_nlarge()-1); 85*8e33eff8Schristos oom = false; 86*8e33eff8Schristos for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { 87*8e33eff8Schristos ptrs[i] = mallocx(largemax, 0); 88*8e33eff8Schristos if (ptrs[i] == NULL) { 89*8e33eff8Schristos oom = true; 90*8e33eff8Schristos } 91*8e33eff8Schristos } 92*8e33eff8Schristos assert_true(oom, 93*8e33eff8Schristos "Expected OOM during series of calls to mallocx(size=%zu, 0)", 94*8e33eff8Schristos largemax); 95*8e33eff8Schristos for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { 96*8e33eff8Schristos if (ptrs[i] != NULL) { 97*8e33eff8Schristos dallocx(ptrs[i], 0); 98*8e33eff8Schristos } 99*8e33eff8Schristos } 100*8e33eff8Schristos purge(); 101*8e33eff8Schristos 102*8e33eff8Schristos #if LG_SIZEOF_PTR == 3 103*8e33eff8Schristos assert_ptr_null(mallocx(0x8000000000000000ULL, 104*8e33eff8Schristos MALLOCX_ALIGN(0x8000000000000000ULL)), 105*8e33eff8Schristos "Expected OOM for mallocx()"); 106*8e33eff8Schristos assert_ptr_null(mallocx(0x8000000000000000ULL, 107*8e33eff8Schristos MALLOCX_ALIGN(0x80000000)), 108*8e33eff8Schristos "Expected OOM for mallocx()"); 109*8e33eff8Schristos #else 110*8e33eff8Schristos assert_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)), 111*8e33eff8Schristos "Expected OOM for mallocx()"); 112*8e33eff8Schristos #endif 113*8e33eff8Schristos } 114*8e33eff8Schristos TEST_END 115*8e33eff8Schristos 116*8e33eff8Schristos TEST_BEGIN(test_basic) { 117*8e33eff8Schristos #define MAXSZ (((size_t)1) << 23) 118*8e33eff8Schristos size_t sz; 119*8e33eff8Schristos 120*8e33eff8Schristos for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { 121*8e33eff8Schristos size_t nsz, rsz; 122*8e33eff8Schristos void *p; 123*8e33eff8Schristos nsz = nallocx(sz, 0); 124*8e33eff8Schristos assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); 125*8e33eff8Schristos p = mallocx(sz, 0); 126*8e33eff8Schristos assert_ptr_not_null(p, 127*8e33eff8Schristos "Unexpected mallocx(size=%zx, flags=0) error", sz); 128*8e33eff8Schristos rsz = sallocx(p, 0); 129*8e33eff8Schristos assert_zu_ge(rsz, sz, "Real size smaller than expected"); 130*8e33eff8Schristos assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); 131*8e33eff8Schristos dallocx(p, 0); 132*8e33eff8Schristos 133*8e33eff8Schristos p = mallocx(sz, 0); 134*8e33eff8Schristos assert_ptr_not_null(p, 135*8e33eff8Schristos "Unexpected mallocx(size=%zx, flags=0) error", sz); 136*8e33eff8Schristos dallocx(p, 0); 137*8e33eff8Schristos 138*8e33eff8Schristos nsz = nallocx(sz, MALLOCX_ZERO); 139*8e33eff8Schristos assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); 140*8e33eff8Schristos p = mallocx(sz, MALLOCX_ZERO); 141*8e33eff8Schristos assert_ptr_not_null(p, 142*8e33eff8Schristos "Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error", 143*8e33eff8Schristos nsz); 144*8e33eff8Schristos rsz = sallocx(p, 0); 145*8e33eff8Schristos assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); 146*8e33eff8Schristos dallocx(p, 0); 147*8e33eff8Schristos purge(); 148*8e33eff8Schristos } 149*8e33eff8Schristos #undef MAXSZ 150*8e33eff8Schristos } 151*8e33eff8Schristos TEST_END 152*8e33eff8Schristos 153*8e33eff8Schristos TEST_BEGIN(test_alignment_and_size) { 154*8e33eff8Schristos const char *percpu_arena; 155*8e33eff8Schristos size_t sz = sizeof(percpu_arena); 156*8e33eff8Schristos 157*8e33eff8Schristos if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) || 158*8e33eff8Schristos strcmp(percpu_arena, "disabled") != 0) { 159*8e33eff8Schristos test_skip("test_alignment_and_size skipped: " 160*8e33eff8Schristos "not working with percpu arena."); 161*8e33eff8Schristos }; 162*8e33eff8Schristos #define MAXALIGN (((size_t)1) << 23) 163*8e33eff8Schristos #define NITER 4 164*8e33eff8Schristos size_t nsz, rsz, alignment, total; 165*8e33eff8Schristos unsigned i; 166*8e33eff8Schristos void *ps[NITER]; 167*8e33eff8Schristos 168*8e33eff8Schristos for (i = 0; i < NITER; i++) { 169*8e33eff8Schristos ps[i] = NULL; 170*8e33eff8Schristos } 171*8e33eff8Schristos 172*8e33eff8Schristos for (alignment = 8; 173*8e33eff8Schristos alignment <= MAXALIGN; 174*8e33eff8Schristos alignment <<= 1) { 175*8e33eff8Schristos total = 0; 176*8e33eff8Schristos for (sz = 1; 177*8e33eff8Schristos sz < 3 * alignment && sz < (1U << 31); 178*8e33eff8Schristos sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { 179*8e33eff8Schristos for (i = 0; i < NITER; i++) { 180*8e33eff8Schristos nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | 181*8e33eff8Schristos MALLOCX_ZERO); 182*8e33eff8Schristos assert_zu_ne(nsz, 0, 183*8e33eff8Schristos "nallocx() error for alignment=%zu, " 184*8e33eff8Schristos "size=%zu (%#zx)", alignment, sz, sz); 185*8e33eff8Schristos ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | 186*8e33eff8Schristos MALLOCX_ZERO); 187*8e33eff8Schristos assert_ptr_not_null(ps[i], 188*8e33eff8Schristos "mallocx() error for alignment=%zu, " 189*8e33eff8Schristos "size=%zu (%#zx)", alignment, sz, sz); 190*8e33eff8Schristos rsz = sallocx(ps[i], 0); 191*8e33eff8Schristos assert_zu_ge(rsz, sz, 192*8e33eff8Schristos "Real size smaller than expected for " 193*8e33eff8Schristos "alignment=%zu, size=%zu", alignment, sz); 194*8e33eff8Schristos assert_zu_eq(nsz, rsz, 195*8e33eff8Schristos "nallocx()/sallocx() size mismatch for " 196*8e33eff8Schristos "alignment=%zu, size=%zu", alignment, sz); 197*8e33eff8Schristos assert_ptr_null( 198*8e33eff8Schristos (void *)((uintptr_t)ps[i] & (alignment-1)), 199*8e33eff8Schristos "%p inadequately aligned for" 200*8e33eff8Schristos " alignment=%zu, size=%zu", ps[i], 201*8e33eff8Schristos alignment, sz); 202*8e33eff8Schristos total += rsz; 203*8e33eff8Schristos if (total >= (MAXALIGN << 1)) { 204*8e33eff8Schristos break; 205*8e33eff8Schristos } 206*8e33eff8Schristos } 207*8e33eff8Schristos for (i = 0; i < NITER; i++) { 208*8e33eff8Schristos if (ps[i] != NULL) { 209*8e33eff8Schristos dallocx(ps[i], 0); 210*8e33eff8Schristos ps[i] = NULL; 211*8e33eff8Schristos } 212*8e33eff8Schristos } 213*8e33eff8Schristos } 214*8e33eff8Schristos purge(); 215*8e33eff8Schristos } 216*8e33eff8Schristos #undef MAXALIGN 217*8e33eff8Schristos #undef NITER 218*8e33eff8Schristos } 219*8e33eff8Schristos TEST_END 220*8e33eff8Schristos 221*8e33eff8Schristos int 222*8e33eff8Schristos main(void) { 223*8e33eff8Schristos return test( 224*8e33eff8Schristos test_overflow, 225*8e33eff8Schristos test_oom, 226*8e33eff8Schristos test_basic, 227*8e33eff8Schristos test_alignment_and_size); 228*8e33eff8Schristos } 229