1 /* 2 * Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net> 3 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 4 * Copyright (c) 2008 Damien Miller <djm@openbsd.org> 5 * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /* 21 * If we meet some day, and you think this stuff is worth it, you 22 * can buy me a beer in return. Poul-Henning Kamp 23 */ 24 25 #include <sys/param.h> /* PAGE_SHIFT ALIGN */ 26 #include <sys/queue.h> 27 #include <sys/mman.h> 28 #include <sys/uio.h> 29 #include <stdint.h> 30 #include <unistd.h> 31 32 #include "archdep.h" 33 #include "resolve.h" 34 35 #if defined(__mips64__) 36 #define MALLOC_PAGESHIFT (14U) 37 #else 38 #define MALLOC_PAGESHIFT (PAGE_SHIFT) 39 #endif 40 41 #define MALLOC_MINSHIFT 4 42 #define MALLOC_MAXSHIFT (MALLOC_PAGESHIFT - 1) 43 #define MALLOC_PAGESIZE (1UL << MALLOC_PAGESHIFT) 44 #define MALLOC_MINSIZE (1UL << MALLOC_MINSHIFT) 45 #define MALLOC_PAGEMASK (MALLOC_PAGESIZE - 1) 46 #define MASK_POINTER(p) ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK)) 47 48 #define MALLOC_MAXCHUNK (1 << MALLOC_MAXSHIFT) 49 #define MALLOC_MAXCACHE 256 50 #define MALLOC_DELAYED_CHUNK_MASK 15 51 #define MALLOC_INITIAL_REGIONS 512 52 #define MALLOC_DEFAULT_CACHE 64 53 #define MALLOC_CHUNK_LISTS 4 54 55 /* 56 * When the P option is active, we move allocations between half a page 57 * and a whole page towards the end, subject to alignment constraints. 58 * This is the extra headroom we allow. Set to zero to be the most 59 * strict. 60 */ 61 #define MALLOC_LEEWAY 0 62 63 #define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK) 64 65 /* 66 * What to use for Junk. This is the byte value we use to fill with 67 * when the 'J' option is enabled. Use SOME_JUNK right after alloc, 68 * and SOME_FREEJUNK right before free. 69 */ 70 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */ 71 #define SOME_FREEJUNK 0xdf 72 73 #define MMAP(sz) _dl_mmap(NULL, (size_t)(sz), PROT_READ | PROT_WRITE, \ 74 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0) 75 76 #define MMAP_ERROR(p) (_dl_mmap_error(p) ? MAP_FAILED : (p)) 77 78 struct region_info { 79 void *p; /* page; low bits used to mark chunks */ 80 uintptr_t size; /* size for pages, or chunk_info pointer */ 81 }; 82 83 LIST_HEAD(chunk_head, chunk_info); 84 85 struct dir_info { 86 u_int32_t canary1; 87 int active; /* status of malloc */ 88 struct region_info *r; /* region slots */ 89 size_t regions_total; /* number of region slots */ 90 size_t regions_free; /* number of free slots */ 91 /* lists of free chunk info structs */ 92 struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1]; 93 /* lists of chunks with free slots */ 94 struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS]; 95 size_t free_regions_size; /* free pages cached */ 96 /* free pages cache */ 97 struct region_info free_regions[MALLOC_MAXCACHE]; 98 /* delayed free chunk slots */ 99 void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1]; 100 size_t rbytesused; /* random bytes used */ 101 char *func; /* current function */ 102 u_char rbytes[256]; /* random bytes */ 103 u_short chunk_start; 104 u_int32_t canary2; 105 }; 106 #define DIR_INFO_RSZ ((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \ 107 ~MALLOC_PAGEMASK) 108 109 /* 110 * This structure describes a page worth of chunks. 111 * 112 * How many bits per u_short in the bitmap 113 */ 114 #define MALLOC_BITS (NBBY * sizeof(u_short)) 115 struct chunk_info { 116 LIST_ENTRY(chunk_info) entries; 117 void *page; /* pointer to the page */ 118 u_int32_t canary; 119 u_short size; /* size of this page's chunks */ 120 u_short shift; /* how far to shift for this size */ 121 u_short free; /* how many free chunks */ 122 u_short total; /* how many chunk */ 123 /* which chunks are free */ 124 u_short bits[1]; 125 }; 126 127 struct malloc_readonly { 128 struct dir_info *g_pool; /* Main bookkeeping information */ 129 int malloc_freenow; /* Free quickly - disable chunk rnd */ 130 int malloc_freeunmap; /* mprotect free pages PROT_NONE? */ 131 int malloc_junk; /* junk fill? */ 132 int malloc_move; /* move allocations to end of page? */ 133 size_t malloc_guard; /* use guard pages after allocations? */ 134 u_int malloc_cache; /* free pages we cache */ 135 u_int32_t malloc_canary; /* Matched against ones in g_pool */ 136 }; 137 138 /* This object is mapped PROT_READ after initialisation to prevent tampering */ 139 static union { 140 struct malloc_readonly mopts; 141 u_char _pad[MALLOC_PAGESIZE]; 142 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE))); 143 #define mopts malloc_readonly.mopts 144 #define g_pool mopts.g_pool 145 146 static u_char getrbyte(struct dir_info *d); 147 148 /* low bits of r->p determine size: 0 means >= page size and p->size holding 149 * real size, otherwise r->size is a shift count, or 1 for malloc(0) 150 */ 151 #define REALSIZE(sz, r) \ 152 (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \ 153 (sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1)))) 154 155 static inline size_t 156 hash(void *p) 157 { 158 size_t sum; 159 uintptr_t u; 160 161 u = (uintptr_t)p >> MALLOC_PAGESHIFT; 162 sum = u; 163 sum = (sum << 7) - sum + (u >> 16); 164 #ifdef __LP64__ 165 sum = (sum << 7) - sum + (u >> 32); 166 sum = (sum << 7) - sum + (u >> 48); 167 #endif 168 return sum; 169 } 170 171 static void 172 wrterror(char *msg) 173 { 174 char *q = " error: "; 175 struct iovec iov[4]; 176 177 iov[0].iov_base = g_pool->func; 178 iov[0].iov_len = _dl_strlen(g_pool->func); 179 iov[1].iov_base = q; 180 iov[1].iov_len = _dl_strlen(q); 181 iov[2].iov_base = msg; 182 iov[2].iov_len = _dl_strlen(msg); 183 iov[3].iov_base = "\n"; 184 iov[3].iov_len = 1; 185 _dl_write(STDERR_FILENO, iov[0].iov_base, iov[0].iov_len); 186 _dl_write(STDERR_FILENO, iov[1].iov_base, iov[1].iov_len); 187 _dl_write(STDERR_FILENO, iov[2].iov_base, iov[2].iov_len); 188 _dl_write(STDERR_FILENO, iov[3].iov_base, iov[3].iov_len); 189 _dl_exit(7); 190 } 191 192 static void 193 rbytes_init(struct dir_info *d) 194 { 195 _dl_arc4randombuf(d->rbytes, sizeof(d->rbytes)); 196 /* add 1 to account for using d->rbytes[0] */ 197 d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2); 198 } 199 200 static inline u_char 201 getrbyte(struct dir_info *d) 202 { 203 u_char x; 204 205 if (d->rbytesused >= sizeof(d->rbytes)) 206 rbytes_init(d); 207 x = d->rbytes[d->rbytesused++]; 208 return x; 209 } 210 211 /* 212 * Cache maintenance. We keep at most malloc_cache pages cached. 213 * If the cache is becoming full, unmap pages in the cache for real, 214 * and then add the region to the cache 215 * Opposed to the regular region data structure, the sizes in the 216 * cache are in MALLOC_PAGESIZE units. 217 */ 218 static void 219 unmap(struct dir_info *d, void *p, size_t sz) 220 { 221 size_t psz = sz >> MALLOC_PAGESHIFT; 222 size_t rsz, tounmap; 223 struct region_info *r; 224 u_int i, offset; 225 226 if (sz != PAGEROUND(sz)) 227 wrterror("munmap round"); 228 229 if (psz > mopts.malloc_cache) { 230 if (_dl_munmap(p, sz)) 231 wrterror("munmap"); 232 return; 233 } 234 tounmap = 0; 235 rsz = mopts.malloc_cache - d->free_regions_size; 236 if (psz > rsz) 237 tounmap = psz - rsz; 238 offset = getrbyte(d); 239 for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) { 240 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 241 if (r->p != NULL) { 242 rsz = r->size << MALLOC_PAGESHIFT; 243 if (_dl_munmap(r->p, rsz)) 244 wrterror("munmap"); 245 r->p = NULL; 246 if (tounmap > r->size) 247 tounmap -= r->size; 248 else 249 tounmap = 0; 250 d->free_regions_size -= r->size; 251 r->size = 0; 252 } 253 } 254 if (tounmap > 0) 255 wrterror("malloc cache underflow"); 256 for (i = 0; i < mopts.malloc_cache; i++) { 257 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 258 if (r->p == NULL) { 259 if (mopts.malloc_junk && !mopts.malloc_freeunmap) { 260 size_t amt = mopts.malloc_junk == 1 ? 261 MALLOC_MAXCHUNK : sz; 262 _dl_memset(p, SOME_FREEJUNK, amt); 263 } 264 if (mopts.malloc_freeunmap) 265 _dl_mprotect(p, sz, PROT_NONE); 266 r->p = p; 267 r->size = psz; 268 d->free_regions_size += psz; 269 break; 270 } 271 } 272 if (i == mopts.malloc_cache) 273 wrterror("malloc free slot lost"); 274 if (d->free_regions_size > mopts.malloc_cache) 275 wrterror("malloc cache overflow"); 276 } 277 278 static void * 279 map(struct dir_info *d, size_t sz, int zero_fill) 280 { 281 size_t psz = sz >> MALLOC_PAGESHIFT; 282 struct region_info *r, *big = NULL; 283 u_int i, offset; 284 void *p; 285 286 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 287 d->canary1 != ~d->canary2) 288 wrterror("internal struct corrupt"); 289 if (sz != PAGEROUND(sz)) { 290 wrterror("map round"); 291 return MAP_FAILED; 292 } 293 if (psz > d->free_regions_size) { 294 p = MMAP(sz); 295 p = MMAP_ERROR(p); 296 /* zero fill not needed */ 297 return p; 298 } 299 offset = getrbyte(d); 300 for (i = 0; i < mopts.malloc_cache; i++) { 301 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 302 if (r->p != NULL) { 303 if (r->size == psz) { 304 p = r->p; 305 if (mopts.malloc_freeunmap) 306 _dl_mprotect(p, sz, PROT_READ | PROT_WRITE); 307 r->p = NULL; 308 r->size = 0; 309 d->free_regions_size -= psz; 310 if (zero_fill) 311 _dl_memset(p, 0, sz); 312 else if (mopts.malloc_junk == 2 && 313 mopts.malloc_freeunmap) 314 _dl_memset(p, SOME_FREEJUNK, sz); 315 return p; 316 } else if (r->size > psz) 317 big = r; 318 } 319 } 320 if (big != NULL) { 321 r = big; 322 p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT); 323 if (mopts.malloc_freeunmap) 324 _dl_mprotect(p, sz, PROT_READ | PROT_WRITE); 325 r->size -= psz; 326 d->free_regions_size -= psz; 327 if (zero_fill) 328 _dl_memset(p, 0, sz); 329 else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap) 330 _dl_memset(p, SOME_FREEJUNK, sz); 331 return p; 332 } 333 p = MMAP(sz); 334 p = MMAP_ERROR(p); 335 if (d->free_regions_size > mopts.malloc_cache) 336 wrterror("malloc cache"); 337 /* zero fill not needed */ 338 return p; 339 } 340 341 /* 342 * Initialize a dir_info, which should have been cleared by caller 343 */ 344 static void 345 omalloc_init(struct dir_info **dp) 346 { 347 char *p; 348 int i, j; 349 size_t d_avail, regioninfo_size, tmp; 350 struct dir_info *d; 351 352 /* 353 * Default options 354 */ 355 mopts.malloc_junk = 1; 356 mopts.malloc_move = 1; 357 mopts.malloc_cache = MALLOC_DEFAULT_CACHE; 358 mopts.malloc_guard = MALLOC_PAGESIZE; 359 360 do { 361 _dl_arc4randombuf(&mopts.malloc_canary, 362 sizeof(mopts.malloc_canary)); 363 } while (mopts.malloc_canary == 0); 364 365 /* 366 * Allocate dir_info with a guard page on either side. Also 367 * randomise offset inside the page at which the dir_info 368 * lies (subject to alignment by 1 << MALLOC_MINSHIFT) 369 */ 370 p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2)); 371 p = MMAP_ERROR(p); 372 if (p == MAP_FAILED) 373 wrterror("malloc init mmap failed"); 374 _dl_mprotect(p, MALLOC_PAGESIZE, PROT_NONE); 375 _dl_mprotect(p + MALLOC_PAGESIZE + DIR_INFO_RSZ, 376 MALLOC_PAGESIZE, PROT_NONE); 377 d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT; 378 379 _dl_arc4randombuf(&tmp, sizeof(tmp)); 380 d = (struct dir_info *)(p + MALLOC_PAGESIZE + 381 ((tmp % d_avail) << MALLOC_MINSHIFT)); /* not uniform */ 382 383 rbytes_init(d); 384 d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS; 385 regioninfo_size = d->regions_total * sizeof(struct region_info); 386 d->r = MMAP(regioninfo_size); 387 d->r = MMAP_ERROR(d->r); 388 if (d->r == MAP_FAILED) 389 wrterror("malloc init mmap failed"); 390 for (i = 0; i <= MALLOC_MAXSHIFT; i++) { 391 LIST_INIT(&d->chunk_info_list[i]); 392 for (j = 0; j < MALLOC_CHUNK_LISTS; j++) 393 LIST_INIT(&d->chunk_dir[i][j]); 394 } 395 d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d; 396 d->canary2 = ~d->canary1; 397 398 *dp = d; 399 400 /* 401 * Options have been set and will never be reset. 402 * Prevent further tampering with them. 403 */ 404 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) 405 _dl_mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ); 406 407 } 408 409 static int 410 omalloc_grow(struct dir_info *d) 411 { 412 size_t newtotal; 413 size_t newsize; 414 size_t mask; 415 size_t i; 416 struct region_info *p; 417 418 if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 ) 419 return 1; 420 421 newtotal = d->regions_total * 2; 422 newsize = newtotal * sizeof(struct region_info); 423 mask = newtotal - 1; 424 425 p = MMAP(newsize); 426 p = MMAP_ERROR(p); 427 if (p == MAP_FAILED) 428 return 1; 429 430 for (i = 0; i < d->regions_total; i++) { 431 void *q = d->r[i].p; 432 if (q != NULL) { 433 size_t index = hash(q) & mask; 434 while (p[index].p != NULL) { 435 index = (index - 1) & mask; 436 } 437 p[index] = d->r[i]; 438 } 439 } 440 /* avoid pages containing meta info to end up in cache */ 441 if (_dl_munmap(d->r, d->regions_total * sizeof(struct region_info))) 442 wrterror("munmap"); 443 d->regions_free = d->regions_free + d->regions_total; 444 d->regions_total = newtotal; 445 d->r = p; 446 return 0; 447 } 448 449 static struct chunk_info * 450 alloc_chunk_info(struct dir_info *d, int bits) 451 { 452 struct chunk_info *p; 453 size_t size, count; 454 455 if (bits == 0) 456 count = MALLOC_PAGESIZE / MALLOC_MINSIZE; 457 else 458 count = MALLOC_PAGESIZE >> bits; 459 460 size = howmany(count, MALLOC_BITS); 461 size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short); 462 size = ALIGN(size); 463 464 if (LIST_EMPTY(&d->chunk_info_list[bits])) { 465 char *q; 466 int i; 467 468 q = MMAP(MALLOC_PAGESIZE); 469 q = MMAP_ERROR(q); 470 if (q == MAP_FAILED) 471 return NULL; 472 count = MALLOC_PAGESIZE / size; 473 for (i = 0; i < count; i++, q += size) 474 LIST_INSERT_HEAD(&d->chunk_info_list[bits], 475 (struct chunk_info *)q, entries); 476 } 477 p = LIST_FIRST(&d->chunk_info_list[bits]); 478 LIST_REMOVE(p, entries); 479 _dl_memset(p, 0, size); 480 p->canary = d->canary1; 481 return p; 482 } 483 484 485 /* 486 * The hashtable uses the assumption that p is never NULL. This holds since 487 * non-MAP_FIXED mappings with hint 0 start at BRKSIZ. 488 */ 489 static int 490 insert(struct dir_info *d, void *p, size_t sz) 491 { 492 size_t index; 493 size_t mask; 494 void *q; 495 496 if (d->regions_free * 4 < d->regions_total) { 497 if (omalloc_grow(d)) 498 return 1; 499 } 500 mask = d->regions_total - 1; 501 index = hash(p) & mask; 502 q = d->r[index].p; 503 while (q != NULL) { 504 index = (index - 1) & mask; 505 q = d->r[index].p; 506 } 507 d->r[index].p = p; 508 d->r[index].size = sz; 509 d->regions_free--; 510 return 0; 511 } 512 513 static struct region_info * 514 find(struct dir_info *d, void *p) 515 { 516 size_t index; 517 size_t mask = d->regions_total - 1; 518 void *q, *r; 519 520 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 521 d->canary1 != ~d->canary2) 522 wrterror("internal struct corrupt"); 523 p = MASK_POINTER(p); 524 index = hash(p) & mask; 525 r = d->r[index].p; 526 q = MASK_POINTER(r); 527 while (q != p && r != NULL) { 528 index = (index - 1) & mask; 529 r = d->r[index].p; 530 q = MASK_POINTER(r); 531 } 532 return (q == p && r != NULL) ? &d->r[index] : NULL; 533 } 534 535 static void 536 delete(struct dir_info *d, struct region_info *ri) 537 { 538 /* algorithm R, Knuth Vol III section 6.4 */ 539 size_t mask = d->regions_total - 1; 540 size_t i, j, r; 541 542 if (d->regions_total & (d->regions_total - 1)) 543 wrterror("regions_total not 2^x"); 544 d->regions_free++; 545 546 i = ri - d->r; 547 for (;;) { 548 d->r[i].p = NULL; 549 d->r[i].size = 0; 550 j = i; 551 for (;;) { 552 i = (i - 1) & mask; 553 if (d->r[i].p == NULL) 554 return; 555 r = hash(d->r[i].p) & mask; 556 if ((i <= r && r < j) || (r < j && j < i) || 557 (j < i && i <= r)) 558 continue; 559 d->r[j] = d->r[i]; 560 break; 561 } 562 563 } 564 } 565 566 /* 567 * Allocate a page of chunks 568 */ 569 static struct chunk_info * 570 omalloc_make_chunks(struct dir_info *d, int bits, int listnum) 571 { 572 struct chunk_info *bp; 573 void *pp; 574 int i, k; 575 576 /* Allocate a new bucket */ 577 pp = map(d, MALLOC_PAGESIZE, 0); 578 if (pp == MAP_FAILED) 579 return NULL; 580 581 bp = alloc_chunk_info(d, bits); 582 if (bp == NULL) { 583 unmap(d, pp, MALLOC_PAGESIZE); 584 return NULL; 585 } 586 587 /* memory protect the page allocated in the malloc(0) case */ 588 if (bits == 0) { 589 bp->size = 0; 590 bp->shift = 1; 591 i = MALLOC_MINSIZE - 1; 592 while (i >>= 1) 593 bp->shift++; 594 bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift; 595 bp->page = pp; 596 597 k = _dl_mprotect(pp, MALLOC_PAGESIZE, PROT_NONE); 598 if (k < 0) { 599 unmap(d, pp, MALLOC_PAGESIZE); 600 LIST_INSERT_HEAD(&d->chunk_info_list[0], bp, entries); 601 return NULL; 602 } 603 } else { 604 bp->size = 1U << bits; 605 bp->shift = bits; 606 bp->total = bp->free = MALLOC_PAGESIZE >> bits; 607 bp->page = pp; 608 } 609 610 /* set all valid bits in the bitmap */ 611 k = bp->total; 612 i = 0; 613 614 /* Do a bunch at a time */ 615 for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS) 616 bp->bits[i / MALLOC_BITS] = (u_short)~0U; 617 618 for (; i < k; i++) 619 bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS); 620 621 LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries); 622 623 bits++; 624 if ((uintptr_t)pp & bits) 625 wrterror("pp & bits"); 626 627 insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp); 628 return bp; 629 } 630 631 632 /* 633 * Allocate a chunk 634 */ 635 static void * 636 malloc_bytes(struct dir_info *d, size_t size) 637 { 638 int i, j, listnum; 639 size_t k; 640 u_short u, *lp; 641 struct chunk_info *bp; 642 643 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 644 d->canary1 != ~d->canary2) 645 wrterror("internal struct corrupt"); 646 /* Don't bother with anything less than this */ 647 /* unless we have a malloc(0) requests */ 648 if (size != 0 && size < MALLOC_MINSIZE) 649 size = MALLOC_MINSIZE; 650 651 /* Find the right bucket */ 652 if (size == 0) 653 j = 0; 654 else { 655 j = MALLOC_MINSHIFT; 656 i = (size - 1) >> (MALLOC_MINSHIFT - 1); 657 while (i >>= 1) 658 j++; 659 } 660 661 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; 662 /* If it's empty, make a page more of that size chunks */ 663 if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) { 664 bp = omalloc_make_chunks(d, j, listnum); 665 if (bp == NULL) 666 return NULL; 667 } 668 669 if (bp->canary != d->canary1) 670 wrterror("chunk info corrupted"); 671 672 i = d->chunk_start; 673 if (bp->free > 1) 674 i += getrbyte(d); 675 if (i >= bp->total) 676 i &= bp->total - 1; 677 for (;;) { 678 for (;;) { 679 lp = &bp->bits[i / MALLOC_BITS]; 680 if (!*lp) { 681 i += MALLOC_BITS; 682 i &= ~(MALLOC_BITS - 1); 683 if (i >= bp->total) 684 i = 0; 685 } else 686 break; 687 } 688 k = i % MALLOC_BITS; 689 u = 1 << k; 690 if (*lp & u) 691 break; 692 if (++i >= bp->total) 693 i = 0; 694 } 695 d->chunk_start += i + 1; 696 *lp ^= u; 697 698 /* If there are no more free, remove from free-list */ 699 if (!--bp->free) 700 LIST_REMOVE(bp, entries); 701 702 /* Adjust to the real offset of that chunk */ 703 k += (lp - bp->bits) * MALLOC_BITS; 704 k <<= bp->shift; 705 706 if (mopts.malloc_junk == 2 && bp->size > 0) 707 _dl_memset((char *)bp->page + k, SOME_JUNK, bp->size); 708 return ((char *)bp->page + k); 709 } 710 711 static uint32_t 712 find_chunknum(struct dir_info *d, struct region_info *r, void *ptr) 713 { 714 struct chunk_info *info; 715 uint32_t chunknum; 716 717 info = (struct chunk_info *)r->size; 718 if (info->canary != d->canary1) 719 wrterror("chunk info corrupted"); 720 721 /* Find the chunk number on the page */ 722 chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift; 723 724 if ((uintptr_t)ptr & ((1U << (info->shift)) - 1)) { 725 wrterror("modified chunk-pointer"); 726 return -1; 727 } 728 if (info->bits[chunknum / MALLOC_BITS] & 729 (1U << (chunknum % MALLOC_BITS))) { 730 wrterror("chunk is already free"); 731 return -1; 732 } 733 return chunknum; 734 } 735 736 /* 737 * Free a chunk, and possibly the page it's on, if the page becomes empty. 738 */ 739 static void 740 free_bytes(struct dir_info *d, struct region_info *r, void *ptr) 741 { 742 struct chunk_head *mp; 743 struct chunk_info *info; 744 uint32_t chunknum; 745 int listnum; 746 747 info = (struct chunk_info *)r->size; 748 if ((chunknum = find_chunknum(d, r, ptr)) == -1) 749 return; 750 751 info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS); 752 info->free++; 753 754 if (info->free == 1) { 755 /* Page became non-full */ 756 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; 757 if (info->size != 0) 758 mp = &d->chunk_dir[info->shift][listnum]; 759 else 760 mp = &d->chunk_dir[0][listnum]; 761 762 LIST_INSERT_HEAD(mp, info, entries); 763 return; 764 } 765 766 if (info->free != info->total) 767 return; 768 769 LIST_REMOVE(info, entries); 770 771 if (info->size == 0 && !mopts.malloc_freeunmap) 772 _dl_mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE); 773 unmap(d, info->page, MALLOC_PAGESIZE); 774 775 delete(d, r); 776 if (info->size != 0) 777 mp = &d->chunk_info_list[info->shift]; 778 else 779 mp = &d->chunk_info_list[0]; 780 LIST_INSERT_HEAD(mp, info, entries); 781 } 782 783 784 785 static void * 786 omalloc(size_t sz, int zero_fill) 787 { 788 void *p; 789 size_t psz; 790 791 if (sz > MALLOC_MAXCHUNK) { 792 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 793 return NULL; 794 } 795 sz += mopts.malloc_guard; 796 psz = PAGEROUND(sz); 797 p = map(g_pool, psz, zero_fill); 798 if (p == MAP_FAILED) { 799 return NULL; 800 } 801 if (insert(g_pool, p, sz)) { 802 unmap(g_pool, p, psz); 803 return NULL; 804 } 805 if (mopts.malloc_guard) { 806 if (_dl_mprotect((char *)p + psz - mopts.malloc_guard, 807 mopts.malloc_guard, PROT_NONE)) 808 wrterror("mprotect"); 809 } 810 811 if (mopts.malloc_move && 812 sz - mopts.malloc_guard < MALLOC_PAGESIZE - 813 MALLOC_LEEWAY) { 814 /* fill whole allocation */ 815 if (mopts.malloc_junk == 2) 816 _dl_memset(p, SOME_JUNK, psz - mopts.malloc_guard); 817 /* shift towards the end */ 818 p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY - 819 (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1)); 820 /* fill zeros if needed and overwritten above */ 821 if (zero_fill && mopts.malloc_junk == 2) 822 _dl_memset(p, 0, sz - mopts.malloc_guard); 823 } else { 824 if (mopts.malloc_junk == 2) { 825 if (zero_fill) 826 _dl_memset((char *)p + sz - mopts.malloc_guard, 827 SOME_JUNK, psz - sz); 828 else 829 _dl_memset(p, SOME_JUNK, 830 psz - mopts.malloc_guard); 831 } 832 } 833 834 } else { 835 /* takes care of SOME_JUNK */ 836 p = malloc_bytes(g_pool, sz); 837 if (zero_fill && p != NULL && sz > 0) 838 _dl_memset(p, 0, sz); 839 } 840 841 return p; 842 } 843 844 /* 845 * Common function for handling recursion. Only 846 * print the error message once, to avoid making the problem 847 * potentially worse. 848 */ 849 static void 850 malloc_recurse(void) 851 { 852 static int noprint; 853 854 if (noprint == 0) { 855 noprint = 1; 856 wrterror("recursive call"); 857 } 858 g_pool->active--; 859 } 860 861 void * 862 _dl_malloc(size_t size) 863 { 864 void *r = NULL; 865 866 _dl_thread_kern_stop(); 867 if (g_pool == NULL) 868 omalloc_init(&g_pool); 869 g_pool->func = "malloc():"; 870 if (g_pool->active++) { 871 malloc_recurse(); 872 goto ret; 873 } 874 r = omalloc(size, 0); 875 g_pool->active--; 876 ret: 877 _dl_thread_kern_go(); 878 return r; 879 } 880 881 static void 882 ofree(void *p) 883 { 884 struct region_info *r; 885 size_t sz; 886 887 r = find(g_pool, p); 888 if (r == NULL) 889 wrterror("bogus pointer (double free?)"); 890 REALSIZE(sz, r); 891 if (sz > MALLOC_MAXCHUNK) { 892 if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE - 893 MALLOC_LEEWAY) { 894 if (r->p != p) 895 wrterror("bogus pointer"); 896 } else { 897 #if notyetbecause_of_realloc 898 /* shifted towards the end */ 899 if (p != ((char *)r->p) + ((MALLOC_PAGESIZE - 900 MALLOC_MINSIZE - sz - mopts.malloc_guard) & 901 ~(MALLOC_MINSIZE-1))) { 902 } 903 #endif 904 p = r->p; 905 } 906 if (mopts.malloc_guard) { 907 if (sz < mopts.malloc_guard) 908 wrterror("guard size"); 909 if (!mopts.malloc_freeunmap) { 910 if (_dl_mprotect((char *)p + PAGEROUND(sz) - 911 mopts.malloc_guard, mopts.malloc_guard, 912 PROT_READ | PROT_WRITE)) 913 wrterror("mprotect"); 914 } 915 } 916 unmap(g_pool, p, PAGEROUND(sz)); 917 delete(g_pool, r); 918 } else { 919 void *tmp; 920 int i; 921 922 if (mopts.malloc_junk && sz > 0) 923 _dl_memset(p, SOME_FREEJUNK, sz); 924 if (!mopts.malloc_freenow) { 925 if (find_chunknum(g_pool, r, p) == -1) 926 return; 927 i = getrbyte(g_pool) & MALLOC_DELAYED_CHUNK_MASK; 928 tmp = p; 929 p = g_pool->delayed_chunks[i]; 930 if (tmp == p) 931 wrterror("double free"); 932 g_pool->delayed_chunks[i] = tmp; 933 } 934 if (p != NULL) { 935 r = find(g_pool, p); 936 if (r == NULL) 937 wrterror("bogus pointer (double free?)"); 938 free_bytes(g_pool, r, p); 939 } 940 } 941 } 942 943 void 944 _dl_free(void *ptr) 945 { 946 /* This is legal. */ 947 if (ptr == NULL) 948 return; 949 950 _dl_thread_kern_stop(); 951 if (g_pool == NULL) 952 wrterror("free() called before allocation"); 953 g_pool->func = "free():"; 954 if (g_pool->active++) { 955 malloc_recurse(); 956 goto ret; 957 } 958 ofree(ptr); 959 g_pool->active--; 960 ret: 961 _dl_thread_kern_go(); 962 } 963 964 965 /* 966 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX 967 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW 968 */ 969 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4)) 970 971 void * 972 _dl_calloc(size_t nmemb, size_t size) 973 { 974 void *r = NULL; 975 976 _dl_thread_kern_stop(); 977 if (g_pool == NULL) 978 omalloc_init(&g_pool); 979 g_pool->func = "calloc():"; 980 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 981 nmemb > 0 && SIZE_MAX / nmemb < size) { 982 goto ret; 983 } 984 985 if (g_pool->active++) { 986 malloc_recurse(); 987 goto ret; 988 } 989 990 size *= nmemb; 991 r = omalloc(size, 1); 992 g_pool->active--; 993 ret: 994 _dl_thread_kern_go(); 995 return r; 996 } 997 998 999 static void * 1000 orealloc(void *p, size_t newsz) 1001 { 1002 struct region_info *r; 1003 void *q; 1004 size_t oldsz; 1005 1006 q = omalloc(newsz, 0); 1007 if (p == NULL || q == NULL) 1008 return q; 1009 r = find(g_pool, p); 1010 if (r == NULL) 1011 wrterror("bogus pointer (double free?)"); 1012 REALSIZE(oldsz, r); 1013 if (oldsz > MALLOC_MAXCHUNK) { 1014 if (oldsz < mopts.malloc_guard) 1015 wrterror("guard size"); 1016 oldsz -= mopts.malloc_guard; 1017 } 1018 _dl_bcopy(p, q, oldsz < newsz ? oldsz : newsz); 1019 _dl_free(p); 1020 return q; 1021 } 1022 1023 1024 void * 1025 _dl_realloc(void *ptr, size_t size) 1026 { 1027 void *r = NULL; 1028 1029 _dl_thread_kern_stop(); 1030 if (g_pool == NULL) 1031 omalloc_init(&g_pool); 1032 g_pool->func = "realloc():"; 1033 if (g_pool->active++) { 1034 malloc_recurse(); 1035 goto ret; 1036 } 1037 r = orealloc(ptr, size); 1038 g_pool->active--; 1039 ret: 1040 _dl_thread_kern_go(); 1041 return r; 1042 } 1043 1044