1 #include "test/jemalloc_test.h" 2 3 #define TEST_UTIL_EINVAL(node, a, b, c, d, why_inval) do { \ 4 assert_d_eq(mallctl("experimental.utilization." node, \ 5 a, b, c, d), EINVAL, "Should fail when " why_inval); \ 6 assert_zu_eq(out_sz, out_sz_ref, \ 7 "Output size touched when given invalid arguments"); \ 8 assert_d_eq(memcmp(out, out_ref, out_sz_ref), 0, \ 9 "Output content touched when given invalid arguments"); \ 10 } while (0) 11 12 #define TEST_UTIL_QUERY_EINVAL(a, b, c, d, why_inval) \ 13 TEST_UTIL_EINVAL("query", a, b, c, d, why_inval) 14 #define TEST_UTIL_BATCH_EINVAL(a, b, c, d, why_inval) \ 15 TEST_UTIL_EINVAL("batch_query", a, b, c, d, why_inval) 16 17 #define TEST_UTIL_VALID(node) do { \ 18 assert_d_eq(mallctl("experimental.utilization." node, \ 19 out, &out_sz, in, in_sz), 0, \ 20 "Should return 0 on correct arguments"); \ 21 expect_zu_eq(out_sz, out_sz_ref, "incorrect output size"); \ 22 expect_d_ne(memcmp(out, out_ref, out_sz_ref), 0, \ 23 "Output content should be changed"); \ 24 } while (0) 25 26 #define TEST_UTIL_BATCH_VALID TEST_UTIL_VALID("batch_query") 27 28 #define TEST_MAX_SIZE (1 << 20) 29 30 TEST_BEGIN(test_query) { 31 size_t sz; 32 /* 33 * Select some sizes that can span both small and large sizes, and are 34 * numerically unrelated to any size boundaries. 35 */ 36 for (sz = 7; sz <= TEST_MAX_SIZE && sz <= SC_LARGE_MAXCLASS; 37 sz += (sz <= SC_SMALL_MAXCLASS ? 1009 : 99989)) { 38 void *p = mallocx(sz, 0); 39 void **in = &p; 40 size_t in_sz = sizeof(const void *); 41 size_t out_sz = sizeof(void *) + sizeof(size_t) * 5; 42 void *out = mallocx(out_sz, 0); 43 void *out_ref = mallocx(out_sz, 0); 44 size_t out_sz_ref = out_sz; 45 46 assert_ptr_not_null(p, 47 "test pointer allocation failed"); 48 assert_ptr_not_null(out, 49 "test output allocation failed"); 50 assert_ptr_not_null(out_ref, 51 "test reference output allocation failed"); 52 53 #define SLABCUR_READ(out) (*(void **)out) 54 #define COUNTS(out) ((size_t *)((void **)out + 1)) 55 #define NFREE_READ(out) COUNTS(out)[0] 56 #define NREGS_READ(out) COUNTS(out)[1] 57 #define SIZE_READ(out) COUNTS(out)[2] 58 #define BIN_NFREE_READ(out) COUNTS(out)[3] 59 #define BIN_NREGS_READ(out) COUNTS(out)[4] 60 61 SLABCUR_READ(out) = NULL; 62 NFREE_READ(out) = NREGS_READ(out) = SIZE_READ(out) = -1; 63 BIN_NFREE_READ(out) = BIN_NREGS_READ(out) = -1; 64 memcpy(out_ref, out, out_sz); 65 66 /* Test invalid argument(s) errors */ 67 TEST_UTIL_QUERY_EINVAL(NULL, &out_sz, in, in_sz, 68 "old is NULL"); 69 TEST_UTIL_QUERY_EINVAL(out, NULL, in, in_sz, 70 "oldlenp is NULL"); 71 TEST_UTIL_QUERY_EINVAL(out, &out_sz, NULL, in_sz, 72 "newp is NULL"); 73 TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, 0, 74 "newlen is zero"); 75 in_sz -= 1; 76 TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz, 77 "invalid newlen"); 78 in_sz += 1; 79 out_sz_ref = out_sz -= 2 * sizeof(size_t); 80 TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz, 81 "invalid *oldlenp"); 82 out_sz_ref = out_sz += 2 * sizeof(size_t); 83 84 /* Examine output for valid call */ 85 TEST_UTIL_VALID("query"); 86 expect_zu_le(sz, SIZE_READ(out), 87 "Extent size should be at least allocation size"); 88 expect_zu_eq(SIZE_READ(out) & (PAGE - 1), 0, 89 "Extent size should be a multiple of page size"); 90 91 /* 92 * We don't do much bin checking if prof is on, since profiling 93 * can produce extents that are for small size classes but not 94 * slabs, which interferes with things like region counts. 95 */ 96 if (!opt_prof && sz <= SC_SMALL_MAXCLASS) { 97 expect_zu_le(NFREE_READ(out), NREGS_READ(out), 98 "Extent free count exceeded region count"); 99 expect_zu_le(NREGS_READ(out), SIZE_READ(out), 100 "Extent region count exceeded size"); 101 expect_zu_ne(NREGS_READ(out), 0, 102 "Extent region count must be positive"); 103 expect_true(NFREE_READ(out) == 0 || (SLABCUR_READ(out) 104 != NULL && SLABCUR_READ(out) <= p), 105 "Allocation should follow first fit principle"); 106 107 if (config_stats) { 108 expect_zu_le(BIN_NFREE_READ(out), 109 BIN_NREGS_READ(out), 110 "Bin free count exceeded region count"); 111 expect_zu_ne(BIN_NREGS_READ(out), 0, 112 "Bin region count must be positive"); 113 expect_zu_le(NFREE_READ(out), 114 BIN_NFREE_READ(out), 115 "Extent free count exceeded bin free count"); 116 expect_zu_le(NREGS_READ(out), 117 BIN_NREGS_READ(out), 118 "Extent region count exceeded " 119 "bin region count"); 120 expect_zu_eq(BIN_NREGS_READ(out) 121 % NREGS_READ(out), 0, 122 "Bin region count isn't a multiple of " 123 "extent region count"); 124 expect_zu_le( 125 BIN_NFREE_READ(out) - NFREE_READ(out), 126 BIN_NREGS_READ(out) - NREGS_READ(out), 127 "Free count in other extents in the bin " 128 "exceeded region count in other extents " 129 "in the bin"); 130 expect_zu_le(NREGS_READ(out) - NFREE_READ(out), 131 BIN_NREGS_READ(out) - BIN_NFREE_READ(out), 132 "Extent utilized count exceeded " 133 "bin utilized count"); 134 } 135 } else if (sz > SC_SMALL_MAXCLASS) { 136 expect_zu_eq(NFREE_READ(out), 0, 137 "Extent free count should be zero"); 138 expect_zu_eq(NREGS_READ(out), 1, 139 "Extent region count should be one"); 140 expect_ptr_null(SLABCUR_READ(out), 141 "Current slab must be null for large size classes"); 142 if (config_stats) { 143 expect_zu_eq(BIN_NFREE_READ(out), 0, 144 "Bin free count must be zero for " 145 "large sizes"); 146 expect_zu_eq(BIN_NREGS_READ(out), 0, 147 "Bin region count must be zero for " 148 "large sizes"); 149 } 150 } 151 152 #undef BIN_NREGS_READ 153 #undef BIN_NFREE_READ 154 #undef SIZE_READ 155 #undef NREGS_READ 156 #undef NFREE_READ 157 #undef COUNTS 158 #undef SLABCUR_READ 159 160 free(out_ref); 161 free(out); 162 free(p); 163 } 164 } 165 TEST_END 166 167 TEST_BEGIN(test_batch) { 168 size_t sz; 169 /* 170 * Select some sizes that can span both small and large sizes, and are 171 * numerically unrelated to any size boundaries. 172 */ 173 for (sz = 17; sz <= TEST_MAX_SIZE && sz <= SC_LARGE_MAXCLASS; 174 sz += (sz <= SC_SMALL_MAXCLASS ? 1019 : 99991)) { 175 void *p = mallocx(sz, 0); 176 void *q = mallocx(sz, 0); 177 void *in[] = {p, q}; 178 size_t in_sz = sizeof(const void *) * 2; 179 size_t out[] = {-1, -1, -1, -1, -1, -1}; 180 size_t out_sz = sizeof(size_t) * 6; 181 size_t out_ref[] = {-1, -1, -1, -1, -1, -1}; 182 size_t out_sz_ref = out_sz; 183 184 assert_ptr_not_null(p, "test pointer allocation failed"); 185 assert_ptr_not_null(q, "test pointer allocation failed"); 186 187 /* Test invalid argument(s) errors */ 188 TEST_UTIL_BATCH_EINVAL(NULL, &out_sz, in, in_sz, 189 "old is NULL"); 190 TEST_UTIL_BATCH_EINVAL(out, NULL, in, in_sz, 191 "oldlenp is NULL"); 192 TEST_UTIL_BATCH_EINVAL(out, &out_sz, NULL, in_sz, 193 "newp is NULL"); 194 TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, 0, 195 "newlen is zero"); 196 in_sz -= 1; 197 TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, 198 "newlen is not an exact multiple"); 199 in_sz += 1; 200 out_sz_ref = out_sz -= 2 * sizeof(size_t); 201 TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, 202 "*oldlenp is not an exact multiple"); 203 out_sz_ref = out_sz += 2 * sizeof(size_t); 204 in_sz -= sizeof(const void *); 205 TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, 206 "*oldlenp and newlen do not match"); 207 in_sz += sizeof(const void *); 208 209 /* Examine output for valid calls */ 210 #define TEST_EQUAL_REF(i, message) \ 211 assert_d_eq(memcmp(out + (i) * 3, out_ref + (i) * 3, 3), 0, message) 212 213 #define NFREE_READ(out, i) out[(i) * 3] 214 #define NREGS_READ(out, i) out[(i) * 3 + 1] 215 #define SIZE_READ(out, i) out[(i) * 3 + 2] 216 217 out_sz_ref = out_sz /= 2; 218 in_sz /= 2; 219 TEST_UTIL_BATCH_VALID; 220 expect_zu_le(sz, SIZE_READ(out, 0), 221 "Extent size should be at least allocation size"); 222 expect_zu_eq(SIZE_READ(out, 0) & (PAGE - 1), 0, 223 "Extent size should be a multiple of page size"); 224 /* 225 * See the corresponding comment in test_query; profiling breaks 226 * our slab count expectations. 227 */ 228 if (sz <= SC_SMALL_MAXCLASS && !opt_prof) { 229 expect_zu_le(NFREE_READ(out, 0), NREGS_READ(out, 0), 230 "Extent free count exceeded region count"); 231 expect_zu_le(NREGS_READ(out, 0), SIZE_READ(out, 0), 232 "Extent region count exceeded size"); 233 expect_zu_ne(NREGS_READ(out, 0), 0, 234 "Extent region count must be positive"); 235 } else if (sz > SC_SMALL_MAXCLASS) { 236 expect_zu_eq(NFREE_READ(out, 0), 0, 237 "Extent free count should be zero"); 238 expect_zu_eq(NREGS_READ(out, 0), 1, 239 "Extent region count should be one"); 240 } 241 TEST_EQUAL_REF(1, 242 "Should not overwrite content beyond what's needed"); 243 in_sz *= 2; 244 out_sz_ref = out_sz *= 2; 245 246 memcpy(out_ref, out, 3 * sizeof(size_t)); 247 TEST_UTIL_BATCH_VALID; 248 TEST_EQUAL_REF(0, "Statistics should be stable across calls"); 249 if (sz <= SC_SMALL_MAXCLASS) { 250 expect_zu_le(NFREE_READ(out, 1), NREGS_READ(out, 1), 251 "Extent free count exceeded region count"); 252 } else { 253 expect_zu_eq(NFREE_READ(out, 0), 0, 254 "Extent free count should be zero"); 255 } 256 expect_zu_eq(NREGS_READ(out, 0), NREGS_READ(out, 1), 257 "Extent region count should be same for same region size"); 258 expect_zu_eq(SIZE_READ(out, 0), SIZE_READ(out, 1), 259 "Extent size should be same for same region size"); 260 261 #undef SIZE_READ 262 #undef NREGS_READ 263 #undef NFREE_READ 264 265 #undef TEST_EQUAL_REF 266 267 free(q); 268 free(p); 269 } 270 } 271 TEST_END 272 273 int 274 main(void) { 275 assert_zu_lt(SC_SMALL_MAXCLASS + 100000, TEST_MAX_SIZE, 276 "Test case cannot cover large classes"); 277 return test(test_query, test_batch); 278 } 279