1 #include "test/jemalloc_test.h" 2 3 static unsigned 4 get_nsizes_impl(const char *cmd) { 5 unsigned ret; 6 size_t z; 7 8 z = sizeof(unsigned); 9 expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, 10 "Unexpected mallctl(\"%s\", ...) failure", cmd); 11 12 return ret; 13 } 14 15 static unsigned 16 get_nlarge(void) { 17 return get_nsizes_impl("arenas.nlextents"); 18 } 19 20 static size_t 21 get_size_impl(const char *cmd, size_t ind) { 22 size_t ret; 23 size_t z; 24 size_t mib[4]; 25 size_t miblen = 4; 26 27 z = sizeof(size_t); 28 expect_d_eq(mallctlnametomib(cmd, mib, &miblen), 29 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); 30 mib[2] = ind; 31 z = sizeof(size_t); 32 expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 33 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); 34 35 return ret; 36 } 37 38 static size_t 39 get_large_size(size_t ind) { 40 return get_size_impl("arenas.lextent.0.size", ind); 41 } 42 43 /* 44 * On systems which can't merge extents, tests that call this function generate 45 * a lot of dirty memory very quickly. Purging between cycles mitigates 46 * potential OOM on e.g. 32-bit Windows. 47 */ 48 static void 49 purge(void) { 50 expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, 51 "Unexpected mallctl error"); 52 } 53 54 /* 55 * GCC "-Walloc-size-larger-than" warning detects when one of the memory 56 * allocation functions is called with a size larger than the maximum size that 57 * they support. Here we want to explicitly test that the allocation functions 58 * do indeed fail properly when this is the case, which triggers the warning. 59 * Therefore we disable the warning for these tests. 60 */ 61 JEMALLOC_DIAGNOSTIC_PUSH 62 JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN 63 64 TEST_BEGIN(test_overflow) { 65 size_t largemax; 66 67 largemax = get_large_size(get_nlarge()-1); 68 69 expect_ptr_null(mallocx(largemax+1, 0), 70 "Expected OOM for mallocx(size=%#zx, 0)", largemax+1); 71 72 expect_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0), 73 "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); 74 75 expect_ptr_null(mallocx(SIZE_T_MAX, 0), 76 "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX); 77 78 expect_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), 79 "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))", 80 ZU(PTRDIFF_MAX)+1); 81 } 82 TEST_END 83 84 static void * 85 remote_alloc(void *arg) { 86 unsigned arena; 87 size_t sz = sizeof(unsigned); 88 expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, 89 "Unexpected mallctl() failure"); 90 size_t large_sz; 91 sz = sizeof(size_t); 92 expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz, 93 NULL, 0), 0, "Unexpected mallctl failure"); 94 95 void *ptr = mallocx(large_sz, MALLOCX_ARENA(arena) 96 | MALLOCX_TCACHE_NONE); 97 void **ret = (void **)arg; 98 *ret = ptr; 99 100 return NULL; 101 } 102 103 TEST_BEGIN(test_remote_free) { 104 thd_t thd; 105 void *ret; 106 thd_create(&thd, remote_alloc, (void *)&ret); 107 thd_join(thd, NULL); 108 expect_ptr_not_null(ret, "Unexpected mallocx failure"); 109 110 /* Avoid TCACHE_NONE to explicitly test tcache_flush(). */ 111 dallocx(ret, 0); 112 mallctl("thread.tcache.flush", NULL, NULL, NULL, 0); 113 } 114 TEST_END 115 116 TEST_BEGIN(test_oom) { 117 size_t largemax; 118 bool oom; 119 void *ptrs[3]; 120 unsigned i; 121 122 /* 123 * It should be impossible to allocate three objects that each consume 124 * nearly half the virtual address space. 125 */ 126 largemax = get_large_size(get_nlarge()-1); 127 oom = false; 128 for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { 129 ptrs[i] = mallocx(largemax, MALLOCX_ARENA(0)); 130 if (ptrs[i] == NULL) { 131 oom = true; 132 } 133 } 134 expect_true(oom, 135 "Expected OOM during series of calls to mallocx(size=%zu, 0)", 136 largemax); 137 for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { 138 if (ptrs[i] != NULL) { 139 dallocx(ptrs[i], 0); 140 } 141 } 142 purge(); 143 144 #if LG_SIZEOF_PTR == 3 145 expect_ptr_null(mallocx(0x8000000000000000ULL, 146 MALLOCX_ALIGN(0x8000000000000000ULL)), 147 "Expected OOM for mallocx()"); 148 expect_ptr_null(mallocx(0x8000000000000000ULL, 149 MALLOCX_ALIGN(0x80000000)), 150 "Expected OOM for mallocx()"); 151 #else 152 expect_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)), 153 "Expected OOM for mallocx()"); 154 #endif 155 } 156 TEST_END 157 158 /* Re-enable the "-Walloc-size-larger-than=" warning */ 159 JEMALLOC_DIAGNOSTIC_POP 160 161 TEST_BEGIN(test_basic) { 162 #define MAXSZ (((size_t)1) << 23) 163 size_t sz; 164 165 for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { 166 size_t nsz, rsz; 167 void *p; 168 nsz = nallocx(sz, 0); 169 expect_zu_ne(nsz, 0, "Unexpected nallocx() error"); 170 p = mallocx(sz, 0); 171 expect_ptr_not_null(p, 172 "Unexpected mallocx(size=%zx, flags=0) error", sz); 173 rsz = sallocx(p, 0); 174 expect_zu_ge(rsz, sz, "Real size smaller than expected"); 175 expect_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); 176 dallocx(p, 0); 177 178 p = mallocx(sz, 0); 179 expect_ptr_not_null(p, 180 "Unexpected mallocx(size=%zx, flags=0) error", sz); 181 dallocx(p, 0); 182 183 nsz = nallocx(sz, MALLOCX_ZERO); 184 expect_zu_ne(nsz, 0, "Unexpected nallocx() error"); 185 p = mallocx(sz, MALLOCX_ZERO); 186 expect_ptr_not_null(p, 187 "Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error", 188 nsz); 189 rsz = sallocx(p, 0); 190 expect_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); 191 dallocx(p, 0); 192 purge(); 193 } 194 #undef MAXSZ 195 } 196 TEST_END 197 198 TEST_BEGIN(test_alignment_and_size) { 199 const char *percpu_arena; 200 size_t sz = sizeof(percpu_arena); 201 202 if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) || 203 strcmp(percpu_arena, "disabled") != 0) { 204 test_skip("test_alignment_and_size skipped: " 205 "not working with percpu arena."); 206 }; 207 #define MAXALIGN (((size_t)1) << 23) 208 #define NITER 4 209 size_t nsz, rsz, alignment, total; 210 unsigned i; 211 void *ps[NITER]; 212 213 for (i = 0; i < NITER; i++) { 214 ps[i] = NULL; 215 } 216 217 for (alignment = 8; 218 alignment <= MAXALIGN; 219 alignment <<= 1) { 220 total = 0; 221 for (sz = 1; 222 sz < 3 * alignment && sz < (1U << 31); 223 sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { 224 for (i = 0; i < NITER; i++) { 225 nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | 226 MALLOCX_ZERO | MALLOCX_ARENA(0)); 227 expect_zu_ne(nsz, 0, 228 "nallocx() error for alignment=%zu, " 229 "size=%zu (%#zx)", alignment, sz, sz); 230 ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | 231 MALLOCX_ZERO | MALLOCX_ARENA(0)); 232 expect_ptr_not_null(ps[i], 233 "mallocx() error for alignment=%zu, " 234 "size=%zu (%#zx)", alignment, sz, sz); 235 rsz = sallocx(ps[i], 0); 236 expect_zu_ge(rsz, sz, 237 "Real size smaller than expected for " 238 "alignment=%zu, size=%zu", alignment, sz); 239 expect_zu_eq(nsz, rsz, 240 "nallocx()/sallocx() size mismatch for " 241 "alignment=%zu, size=%zu", alignment, sz); 242 expect_ptr_null( 243 (void *)((uintptr_t)ps[i] & (alignment-1)), 244 "%p inadequately aligned for" 245 " alignment=%zu, size=%zu", ps[i], 246 alignment, sz); 247 total += rsz; 248 if (total >= (MAXALIGN << 1)) { 249 break; 250 } 251 } 252 for (i = 0; i < NITER; i++) { 253 if (ps[i] != NULL) { 254 dallocx(ps[i], 0); 255 ps[i] = NULL; 256 } 257 } 258 } 259 purge(); 260 } 261 #undef MAXALIGN 262 #undef NITER 263 } 264 TEST_END 265 266 int 267 main(void) { 268 return test( 269 test_overflow, 270 test_oom, 271 test_remote_free, 272 test_basic, 273 test_alignment_and_size); 274 } 275