1 /* $OpenBSD: malloc.c,v 1.98 2008/08/25 17:56:17 otto Exp $ */ 2 /* 3 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 /* 19 * Parts of this code, mainly the sub page sized chunk management code is 20 * derived from the malloc implementation with the following license: 21 */ 22 /* 23 * ---------------------------------------------------------------------------- 24 * "THE BEER-WARE LICENSE" (Revision 42): 25 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 26 * can do whatever you want with this stuff. If we meet some day, and you think 27 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 28 * ---------------------------------------------------------------------------- 29 */ 30 31 /* #define MALLOC_STATS */ 32 33 #include <sys/types.h> 34 #include <sys/param.h> 35 #include <sys/mman.h> 36 #include <sys/uio.h> 37 #include <errno.h> 38 #include <stdint.h> 39 #include <stdlib.h> 40 #include <string.h> 41 #include <stdio.h> 42 #include <unistd.h> 43 44 #ifdef MALLOC_STATS 45 #include <fcntl.h> 46 #endif 47 48 #include "thread_private.h" 49 50 #define MALLOC_MINSHIFT 4 51 #define MALLOC_MAXSHIFT 16 52 53 #if defined(__sparc__) && !defined(__sparcv9__) 54 #define MALLOC_PAGESHIFT (13U) 55 #else 56 #define MALLOC_PAGESHIFT (PGSHIFT) 57 #endif 58 59 #define MALLOC_PAGESIZE (1UL << MALLOC_PAGESHIFT) 60 #define MALLOC_MINSIZE (1UL << MALLOC_MINSHIFT) 61 #define MALLOC_PAGEMASK (MALLOC_PAGESIZE - 1) 62 #define MASK_POINTER(p) ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK)) 63 64 #define MALLOC_MAXCHUNK (1 << (MALLOC_PAGESHIFT-1)) 65 #define MALLOC_MAXCACHE 256 66 #define MALLOC_DELAYED_CHUNKS 16 /* should be power of 2 */ 67 68 #define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK) 69 70 /* 71 * What to use for Junk. This is the byte value we use to fill with 72 * when the 'J' option is enabled. Use SOME_JUNK right after alloc, 73 * and SOME_FREEJUNK right before free. 74 */ 75 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */ 76 #define SOME_FREEJUNK 0xdf 77 78 #define MMAP(sz) mmap(NULL, (size_t)(sz), PROT_READ | PROT_WRITE, \ 79 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0) 80 81 struct region_info { 82 void *p; /* page; low bits used to mark chunks */ 83 uintptr_t size; /* size for pages, or chunk_info pointer */ 84 }; 85 86 struct dir_info { 87 u_int32_t canary1; 88 struct region_info *r; /* region slots */ 89 size_t regions_total; /* number of region slots */ 90 size_t regions_bits; /* log2 of total */ 91 size_t regions_free; /* number of free slots */ 92 /* list of free chunk info structs */ 93 struct chunk_info *chunk_info_list; 94 /* lists of chunks with free slots */ 95 struct chunk_info *chunk_dir[MALLOC_MAXSHIFT]; 96 size_t free_regions_size; /* free pages cached */ 97 /* free pages cache */ 98 struct region_info free_regions[MALLOC_MAXCACHE]; 99 /* delayed free chunk slots */ 100 void *delayed_chunks[MALLOC_DELAYED_CHUNKS]; 101 #ifdef MALLOC_STATS 102 size_t inserts; 103 size_t insert_collisions; 104 size_t finds; 105 size_t find_collisions; 106 size_t deletes; 107 size_t delete_moves; 108 #define STATS_INC(x) ((x)++) 109 #define STATS_ZERO(x) ((x) = 0) 110 #else 111 #define STATS_INC(x) /* nothing */ 112 #define STATS_ZERO(x) /* nothing */ 113 #endif /* MALLOC_STATS */ 114 u_int32_t canary2; 115 }; 116 117 118 /* 119 * This structure describes a page worth of chunks. 120 * 121 * How many bits per u_long in the bitmap 122 */ 123 #define MALLOC_BITS (NBBY * sizeof(u_long)) 124 struct chunk_info { 125 struct chunk_info *next; /* next on the free list */ 126 void *page; /* pointer to the page */ 127 u_int32_t canary; 128 u_short size; /* size of this page's chunks */ 129 u_short shift; /* how far to shift for this size */ 130 u_short free; /* how many free chunks */ 131 u_short total; /* how many chunk */ 132 /* which chunks are free */ 133 u_long bits[(MALLOC_PAGESIZE / MALLOC_MINSIZE) / MALLOC_BITS]; 134 }; 135 136 static struct dir_info g_pool; 137 static char *malloc_func; /* current function */ 138 char *malloc_options; /* compile-time options */ 139 140 static int malloc_abort = 1; /* abort() on error */ 141 static int malloc_active; /* status of malloc */ 142 static int malloc_freeprot; /* mprotect free pages PROT_NONE? */ 143 static int malloc_hint; /* call madvice on free pages? */ 144 static int malloc_junk; /* junk fill? */ 145 static int malloc_move; /* move allocations to end of page? */ 146 static int malloc_realloc; /* always realloc? */ 147 static int malloc_silent; /* avoid outputting warnings? */ 148 static int malloc_xmalloc; /* xmalloc behaviour? */ 149 static int malloc_zero; /* zero fill? */ 150 static size_t malloc_guard; /* use guard pages after allocations? */ 151 152 static u_int malloc_cache = 64; /* free pages we cache */ 153 static size_t malloc_guarded; /* bytes used for guards */ 154 static size_t malloc_used; /* bytes allocated */ 155 156 #ifdef MALLOC_STATS 157 static int malloc_stats; /* dump statistics at end */ 158 #endif 159 160 static size_t rbytesused; /* random bytes used */ 161 static u_char rbytes[4096]; /* random bytes */ 162 static u_char getrbyte(void); 163 164 extern char *__progname; 165 166 /* low bits of r->p determine size: 0 means >= page size and p->size holding 167 * real size, otherwise r->size is a shift count, or 1 for malloc(0) 168 */ 169 #define REALSIZE(sz, r) \ 170 (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \ 171 (sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1)))) 172 173 static inline size_t 174 hash(void *p) 175 { 176 size_t sum; 177 union { 178 uintptr_t p; 179 unsigned short a[sizeof(void *) / sizeof(short)]; 180 } u; 181 u.p = (uintptr_t)p >> MALLOC_PAGESHIFT; 182 sum = u.a[0]; 183 sum = (sum << 7) - sum + u.a[1]; 184 #ifdef __LP64__ 185 sum = (sum << 7) - sum + u.a[2]; 186 sum = (sum << 7) - sum + u.a[3]; 187 #endif 188 return sum; 189 } 190 191 #ifdef MALLOC_STATS 192 static void 193 dump_chunk(int fd, struct chunk_info *p, int fromfreelist) 194 { 195 char buf[64]; 196 197 while (p) { 198 snprintf(buf, sizeof(buf), "chunk %d %d/%d %p\n", p->size, 199 p->free, p->total, p->page); 200 write(fd, buf, strlen(buf)); 201 if (!fromfreelist) 202 break; 203 p = p->next; 204 if (p != NULL) { 205 snprintf(buf, sizeof(buf), " "); 206 write(fd, buf, strlen(buf)); 207 } 208 } 209 } 210 211 static void 212 dump_free_chunk_info(int fd, struct dir_info *d) 213 { 214 char buf[64]; 215 int i; 216 217 snprintf(buf, sizeof(buf), "Free chunk structs:\n"); 218 write(fd, buf, strlen(buf)); 219 for (i = 0; i < MALLOC_MAXSHIFT; i++) { 220 struct chunk_info *p = d->chunk_dir[i]; 221 if (p != NULL) { 222 snprintf(buf, sizeof(buf), "%2d) ", i); 223 write(fd, buf, strlen(buf)); 224 dump_chunk(fd, p, 1); 225 } 226 } 227 228 } 229 230 static void 231 dump_free_page_info(int fd, struct dir_info *d) 232 { 233 char buf[64]; 234 int i; 235 236 snprintf(buf, sizeof(buf), "Free pages cached: %zu\n", 237 d->free_regions_size); 238 write(fd, buf, strlen(buf)); 239 for (i = 0; i < malloc_cache; i++) { 240 if (d->free_regions[i].p != NULL) { 241 snprintf(buf, sizeof(buf), "%2d) ", i); 242 write(fd, buf, strlen(buf)); 243 snprintf(buf, sizeof(buf), "free at %p: %zu\n", 244 d->free_regions[i].p, d->free_regions[i].size); 245 write(fd, buf, strlen(buf)); 246 } 247 } 248 } 249 250 static void 251 malloc_dump1(int fd, struct dir_info *d) 252 { 253 char buf[64]; 254 size_t i, realsize; 255 256 snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d); 257 write(fd, buf, strlen(buf)); 258 snprintf(buf, sizeof(buf), "Regions slots %zu\n", d->regions_total); 259 write(fd, buf, strlen(buf)); 260 snprintf(buf, sizeof(buf), "Finds %zu/%zu %f\n", d->finds, 261 d->find_collisions, 262 1.0 + (double)d->find_collisions / d->finds); 263 write(fd, buf, strlen(buf)); 264 snprintf(buf, sizeof(buf), "Inserts %zu/%zu %f\n", d->inserts, 265 d->insert_collisions, 266 1.0 + (double)d->insert_collisions / d->inserts); 267 write(fd, buf, strlen(buf)); 268 snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes, 269 d->delete_moves); 270 write(fd, buf, strlen(buf)); 271 snprintf(buf, sizeof(buf), "Regions slots free %zu\n", d->regions_free); 272 write(fd, buf, strlen(buf)); 273 for (i = 0; i < d->regions_total; i++) { 274 if (d->r[i].p != NULL) { 275 size_t h = hash(d->r[i].p) & 276 (d->regions_total - 1); 277 snprintf(buf, sizeof(buf), "%4zx) #%zx %zd ", 278 i, h, h - i); 279 write(fd, buf, strlen(buf)); 280 REALSIZE(realsize, &d->r[i]); 281 if (realsize > MALLOC_MAXCHUNK) { 282 snprintf(buf, sizeof(buf), 283 "%p: %zu\n", d->r[i].p, realsize); 284 write(fd, buf, strlen(buf)); 285 } else 286 dump_chunk(fd, 287 (struct chunk_info *)d->r[i].size, 0); 288 } 289 } 290 dump_free_chunk_info(fd, d); 291 dump_free_page_info(fd, d); 292 snprintf(buf, sizeof(buf), "In use %zu\n", malloc_used); 293 write(fd, buf, strlen(buf)); 294 snprintf(buf, sizeof(buf), "Guarded %zu\n", malloc_guarded); 295 write(fd, buf, strlen(buf)); 296 } 297 298 299 void 300 malloc_dump(int fd) 301 { 302 malloc_dump1(fd, &g_pool); 303 } 304 305 static void 306 malloc_exit(void) 307 { 308 char *q = "malloc() warning: Couldn't dump stats\n"; 309 int save_errno = errno, fd; 310 311 fd = open("malloc.out", O_RDWR|O_APPEND); 312 if (fd != -1) { 313 malloc_dump(fd); 314 close(fd); 315 } else 316 write(STDERR_FILENO, q, strlen(q)); 317 errno = save_errno; 318 } 319 #endif /* MALLOC_STATS */ 320 321 322 323 static void 324 wrterror(char *p) 325 { 326 char *q = " error: "; 327 struct iovec iov[5]; 328 329 iov[0].iov_base = __progname; 330 iov[0].iov_len = strlen(__progname); 331 iov[1].iov_base = malloc_func; 332 iov[1].iov_len = strlen(malloc_func); 333 iov[2].iov_base = q; 334 iov[2].iov_len = strlen(q); 335 iov[3].iov_base = p; 336 iov[3].iov_len = strlen(p); 337 iov[4].iov_base = "\n"; 338 iov[4].iov_len = 1; 339 writev(STDERR_FILENO, iov, 5); 340 341 #ifdef MALLOC_STATS 342 if (malloc_stats) 343 malloc_dump(STDERR_FILENO); 344 #endif /* MALLOC_STATS */ 345 //malloc_active--; 346 if (malloc_abort) 347 abort(); 348 } 349 350 static void 351 wrtwarning(char *p) 352 { 353 char *q = " warning: "; 354 struct iovec iov[5]; 355 356 if (malloc_abort) 357 wrterror(p); 358 else if (malloc_silent) 359 return; 360 361 iov[0].iov_base = __progname; 362 iov[0].iov_len = strlen(__progname); 363 iov[1].iov_base = malloc_func; 364 iov[1].iov_len = strlen(malloc_func); 365 iov[2].iov_base = q; 366 iov[2].iov_len = strlen(q); 367 iov[3].iov_base = p; 368 iov[3].iov_len = strlen(p); 369 iov[4].iov_base = "\n"; 370 iov[4].iov_len = 1; 371 372 writev(STDERR_FILENO, iov, 5); 373 } 374 375 /* 376 * Cache maintenance. We keep at most malloc_cache pages cached. 377 * If the cache is becoming full, unmap pages in the cache for real, 378 * and then add the region to the cache 379 * Opposed to the regular region data structure, the sizes in the 380 * cache are in MALLOC_PAGESIZE units. 381 */ 382 static void 383 unmap(struct dir_info *d, void *p, size_t sz) 384 { 385 size_t psz = sz >> MALLOC_PAGESHIFT; 386 size_t rsz, tounmap; 387 struct region_info *r; 388 u_int i, offset; 389 390 if (sz != PAGEROUND(sz)) { 391 wrterror("munmap round"); 392 return; 393 } 394 395 if (psz > malloc_cache) { 396 if (munmap(p, sz)) 397 wrterror("munmap"); 398 malloc_used -= sz; 399 return; 400 } 401 tounmap = 0; 402 rsz = malloc_cache - d->free_regions_size; 403 if (psz > rsz) 404 tounmap = psz - rsz; 405 d->free_regions_size -= tounmap; 406 offset = getrbyte(); 407 for (i = 0; tounmap > 0 && i < malloc_cache; i++) { 408 r = &d->free_regions[(i + offset) & (malloc_cache - 1)]; 409 if (r->p != NULL) { 410 if (r->size <= tounmap) { 411 rsz = r->size << MALLOC_PAGESHIFT; 412 if (munmap(r->p, rsz)) 413 wrterror("munmap"); 414 tounmap -= r->size; 415 r->p = NULL; 416 r->size = 0; 417 malloc_used -= rsz; 418 } else { 419 rsz = tounmap << MALLOC_PAGESHIFT; 420 if (munmap((char *)r->p + ((r->size - tounmap) 421 << MALLOC_PAGESHIFT), rsz)) 422 wrterror("munmap"); 423 r->size -= tounmap ; 424 tounmap = 0; 425 malloc_used -= rsz; 426 } 427 } 428 } 429 if (tounmap > 0) 430 wrtwarning("malloc cache underflow"); 431 for (i = 0; i < malloc_cache; i++) { 432 r = &d->free_regions[i]; 433 if (r->p == NULL) { 434 if (malloc_hint) 435 madvise(p, sz, MADV_FREE); 436 if (malloc_freeprot) 437 mprotect(p, sz, PROT_NONE); 438 r->p = p; 439 r->size = psz; 440 d->free_regions_size += psz; 441 break; 442 } 443 } 444 if (i == malloc_cache) 445 wrtwarning("malloc free slot lost"); 446 if (d->free_regions_size > malloc_cache) 447 wrtwarning("malloc cache overflow"); 448 } 449 450 static void * 451 map(struct dir_info *d, size_t sz, int zero_fill) 452 { 453 size_t psz = sz >> MALLOC_PAGESHIFT; 454 struct region_info *r, *big = NULL; 455 u_int i, offset; 456 void *p; 457 458 if (sz != PAGEROUND(sz)) { 459 wrterror("map round"); 460 return NULL; 461 } 462 if (psz > d->free_regions_size) { 463 p = MMAP(sz); 464 if (p != MAP_FAILED) 465 malloc_used += sz; 466 /* zero fill not needed */ 467 return p; 468 } 469 offset = getrbyte(); 470 for (i = 0; i < malloc_cache; i++) { 471 r = &d->free_regions[(i + offset) & (malloc_cache - 1)]; 472 if (r->p != NULL) { 473 if (r->size == psz) { 474 p = r->p; 475 if (malloc_freeprot) 476 mprotect(p, sz, PROT_READ | PROT_WRITE); 477 if (malloc_hint) 478 madvise(p, sz, MADV_NORMAL); 479 r->p = NULL; 480 r->size = 0; 481 d->free_regions_size -= psz; 482 if (zero_fill) 483 memset(p, 0, sz); 484 return p; 485 } else if (r->size > psz) 486 big = r; 487 } 488 } 489 if (big != NULL) { 490 r = big; 491 p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT); 492 if (malloc_freeprot) 493 mprotect(p, sz, PROT_READ | PROT_WRITE); 494 if (malloc_hint) 495 madvise(p, sz, MADV_NORMAL); 496 r->size -= psz; 497 d->free_regions_size -= psz; 498 if (zero_fill) 499 memset(p, 0, sz); 500 return p; 501 } 502 p = MMAP(sz); 503 if (p != MAP_FAILED) 504 malloc_used += sz; 505 if (d->free_regions_size > malloc_cache) 506 wrtwarning("malloc cache"); 507 /* zero fill not needed */ 508 return p; 509 } 510 511 static void 512 rbytes_init(void) 513 { 514 arc4random_buf(rbytes, sizeof(rbytes)); 515 rbytesused = 0; 516 } 517 518 static u_char 519 getrbyte(void) 520 { 521 if (rbytesused >= sizeof(rbytes)) 522 rbytes_init(); 523 return rbytes[rbytesused++]; 524 } 525 526 /* 527 * Initialize a dir_info, which should have been cleared by caller 528 */ 529 static int 530 omalloc_init(struct dir_info *d) 531 { 532 char *p, b[64]; 533 int i, j, save_errno = errno; 534 size_t regioninfo_size; 535 536 rbytes_init(); 537 538 for (i = 0; i < 3; i++) { 539 switch (i) { 540 case 0: 541 j = readlink("/etc/malloc.conf", b, sizeof b - 1); 542 if (j <= 0) 543 continue; 544 b[j] = '\0'; 545 p = b; 546 break; 547 case 1: 548 if (issetugid() == 0) 549 p = getenv("MALLOC_OPTIONS"); 550 else 551 continue; 552 break; 553 case 2: 554 p = malloc_options; 555 break; 556 default: 557 p = NULL; 558 } 559 560 for (; p != NULL && *p != '\0'; p++) { 561 switch (*p) { 562 case '>': 563 malloc_cache <<= 1; 564 if (malloc_cache > MALLOC_MAXCACHE) 565 malloc_cache = MALLOC_MAXCACHE; 566 break; 567 case '<': 568 malloc_cache >>= 1; 569 break; 570 case 'a': 571 malloc_abort = 0; 572 break; 573 case 'A': 574 malloc_abort = 1; 575 break; 576 #ifdef MALLOC_STATS 577 case 'd': 578 malloc_stats = 0; 579 break; 580 case 'D': 581 malloc_stats = 1; 582 break; 583 #endif /* MALLOC_STATS */ 584 case 'f': 585 malloc_freeprot = 0; 586 break; 587 case 'F': 588 malloc_freeprot = 1; 589 break; 590 case 'g': 591 malloc_guard = 0; 592 break; 593 case 'G': 594 malloc_guard = MALLOC_PAGESIZE; 595 break; 596 case 'h': 597 malloc_hint = 0; 598 break; 599 case 'H': 600 malloc_hint = 1; 601 break; 602 case 'j': 603 malloc_junk = 0; 604 break; 605 case 'J': 606 malloc_junk = 1; 607 break; 608 case 'n': 609 malloc_silent = 0; 610 break; 611 case 'N': 612 malloc_silent = 1; 613 break; 614 case 'p': 615 malloc_move = 0; 616 break; 617 case 'P': 618 malloc_move = 1; 619 break; 620 case 'r': 621 malloc_realloc = 0; 622 break; 623 case 'R': 624 malloc_realloc = 1; 625 break; 626 case 'x': 627 malloc_xmalloc = 0; 628 break; 629 case 'X': 630 malloc_xmalloc = 1; 631 break; 632 case 'z': 633 malloc_zero = 0; 634 break; 635 case 'Z': 636 malloc_zero = 1; 637 break; 638 default: 639 j = malloc_abort; 640 malloc_abort = 0; 641 wrtwarning("unknown char in MALLOC_OPTIONS"); 642 malloc_abort = j; 643 break; 644 } 645 } 646 } 647 648 /* 649 * We want junk in the entire allocation, and zero only in the part 650 * the user asked for. 651 */ 652 if (malloc_zero) 653 malloc_junk = 1; 654 655 #ifdef MALLOC_STATS 656 if (malloc_stats && (atexit(malloc_exit) == -1)) 657 wrtwarning("atexit(2) failed." 658 " Will not be able to dump malloc stats on exit"); 659 #endif /* MALLOC_STATS */ 660 661 errno = save_errno; 662 663 d->regions_bits = 9; 664 d->regions_free = d->regions_total = 1 << d->regions_bits; 665 regioninfo_size = d->regions_total * sizeof(struct region_info); 666 d->r = MMAP(regioninfo_size); 667 if (d->r == MAP_FAILED) { 668 wrterror("malloc init mmap failed"); 669 d->regions_total = 0; 670 return 1; 671 } 672 malloc_used += regioninfo_size; 673 memset(d->r, 0, regioninfo_size); 674 d->canary1 = arc4random(); 675 d->canary2 = ~d->canary1; 676 return 0; 677 } 678 679 static int 680 omalloc_grow(struct dir_info *d) 681 { 682 size_t newbits; 683 size_t newtotal; 684 size_t newsize; 685 size_t mask; 686 size_t i; 687 struct region_info *p; 688 689 if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 ) 690 return 1; 691 692 newbits = d->regions_bits + 1; 693 newtotal = d->regions_total * 2; 694 newsize = newtotal * sizeof(struct region_info); 695 mask = newtotal - 1; 696 697 p = MMAP(newsize); 698 if (p == MAP_FAILED) 699 return 1; 700 701 malloc_used += newsize; 702 memset(p, 0, newsize); 703 STATS_ZERO(d->inserts); 704 STATS_ZERO(d->insert_collisions); 705 for (i = 0; i < d->regions_total; i++) { 706 void *q = d->r[i].p; 707 if (q != NULL) { 708 size_t index = hash(q) & mask; 709 STATS_INC(d->inserts); 710 while (p[index].p != NULL) { 711 index = (index - 1) & mask; 712 STATS_INC(d->insert_collisions); 713 } 714 p[index] = d->r[i]; 715 } 716 } 717 /* avoid pages containing meta info to end up in cache */ 718 if (munmap(d->r, d->regions_total * sizeof(struct region_info))) 719 wrterror("munmap"); 720 else 721 malloc_used -= d->regions_total * sizeof(struct region_info); 722 d->regions_free = d->regions_free + d->regions_total; 723 d->regions_total = newtotal; 724 d->regions_bits = newbits; 725 d->r = p; 726 return 0; 727 } 728 729 static struct chunk_info * 730 alloc_chunk_info(struct dir_info *d) 731 { 732 struct chunk_info *p; 733 int i; 734 735 if (d->chunk_info_list == NULL) { 736 p = MMAP(MALLOC_PAGESIZE); 737 if (p == MAP_FAILED) 738 return NULL; 739 malloc_used += MALLOC_PAGESIZE; 740 for (i = 0; i < MALLOC_PAGESIZE / sizeof(*p); i++) { 741 p[i].next = d->chunk_info_list; 742 d->chunk_info_list = &p[i]; 743 } 744 } 745 p = d->chunk_info_list; 746 d->chunk_info_list = p->next; 747 memset(p, 0, sizeof *p); 748 p->canary = d->canary1; 749 return p; 750 } 751 752 753 static void 754 put_chunk_info(struct dir_info *d, struct chunk_info *p) 755 { 756 p->next = d->chunk_info_list; 757 d->chunk_info_list = p; 758 } 759 760 static int 761 insert(struct dir_info *d, void *p, size_t sz) 762 { 763 size_t index; 764 size_t mask; 765 void *q; 766 767 if (d->regions_free * 4 < d->regions_total) { 768 if (omalloc_grow(d)) 769 return 1; 770 } 771 mask = d->regions_total - 1; 772 index = hash(p) & mask; 773 q = d->r[index].p; 774 STATS_INC(d->inserts); 775 while (q != NULL) { 776 index = (index - 1) & mask; 777 q = d->r[index].p; 778 STATS_INC(d->insert_collisions); 779 } 780 d->r[index].p = p; 781 d->r[index].size = sz; 782 d->regions_free--; 783 return 0; 784 } 785 786 static struct region_info * 787 find(struct dir_info *d, void *p) 788 { 789 size_t index; 790 size_t mask = d->regions_total - 1; 791 void *q, *r; 792 793 if (d->canary1 != ~d->canary2) 794 wrterror("internal struct corrupt"); 795 p = MASK_POINTER(p); 796 index = hash(p) & mask; 797 r = d->r[index].p; 798 q = MASK_POINTER(r); 799 STATS_INC(d->finds); 800 while (q != p && r != NULL) { 801 index = (index - 1) & mask; 802 r = d->r[index].p; 803 q = MASK_POINTER(r); 804 STATS_INC(d->find_collisions); 805 } 806 return q == p ? &d->r[index] : NULL; 807 } 808 809 static void 810 delete(struct dir_info *d, struct region_info *ri) 811 { 812 /* algorithm R, Knuth Vol III section 6.4 */ 813 size_t mask = d->regions_total - 1; 814 size_t i, j, r; 815 816 if (d->regions_total & (d->regions_total - 1)) 817 wrterror("regions_total not 2^x"); 818 d->regions_free++; 819 STATS_INC(g_pool.deletes); 820 821 i = ri - d->r; 822 for (;;) { 823 d->r[i].p = NULL; 824 d->r[i].size = 0; 825 j = i; 826 for (;;) { 827 i = (i - 1) & mask; 828 if (d->r[i].p == NULL) 829 return; 830 r = hash(d->r[i].p) & mask; 831 if ((i <= r && r < j) || (r < j && j < i) || 832 (j < i && i <= r)) 833 continue; 834 d->r[j] = d->r[i]; 835 STATS_INC(g_pool.delete_moves); 836 break; 837 } 838 839 } 840 } 841 842 /* 843 * Allocate a page of chunks 844 */ 845 static struct chunk_info * 846 omalloc_make_chunks(struct dir_info *d, int bits) 847 { 848 struct chunk_info *bp; 849 void *pp; 850 long i, k; 851 852 /* Allocate a new bucket */ 853 pp = map(d, MALLOC_PAGESIZE, 0); 854 if (pp == MAP_FAILED) 855 return NULL; 856 857 bp = alloc_chunk_info(d); 858 if (bp == NULL) { 859 unmap(d, pp, MALLOC_PAGESIZE); 860 return NULL; 861 } 862 863 /* memory protect the page allocated in the malloc(0) case */ 864 if (bits == 0) { 865 bp->size = 0; 866 bp->shift = 1; 867 i = MALLOC_MINSIZE - 1; 868 while (i >>= 1) 869 bp->shift++; 870 bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift; 871 bp->page = pp; 872 873 k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE); 874 if (k < 0) { 875 unmap(d, pp, MALLOC_PAGESIZE); 876 put_chunk_info(d, bp); 877 return NULL; 878 } 879 } else { 880 bp->size = (1UL << bits); 881 bp->shift = bits; 882 bp->total = bp->free = MALLOC_PAGESIZE >> bits; 883 bp->page = pp; 884 } 885 886 /* set all valid bits in the bitmap */ 887 k = bp->total; 888 i = 0; 889 890 /* Do a bunch at a time */ 891 for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS) 892 bp->bits[i / MALLOC_BITS] = ~0UL; 893 894 for (; i < k; i++) 895 bp->bits[i / MALLOC_BITS] |= 1UL << (i % MALLOC_BITS); 896 897 bp->next = d->chunk_dir[bits]; 898 d->chunk_dir[bits] = bp; 899 900 bits++; 901 if ((uintptr_t)pp & bits) 902 wrterror("pp & bits"); 903 904 insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp); 905 return bp; 906 } 907 908 909 /* 910 * Allocate a chunk 911 */ 912 static void * 913 malloc_bytes(struct dir_info *d, size_t size) 914 { 915 int i, j; 916 size_t k; 917 u_long u, *lp; 918 struct chunk_info *bp; 919 920 /* Don't bother with anything less than this */ 921 /* unless we have a malloc(0) requests */ 922 if (size != 0 && size < MALLOC_MINSIZE) 923 size = MALLOC_MINSIZE; 924 925 /* Find the right bucket */ 926 if (size == 0) 927 j = 0; 928 else { 929 j = MALLOC_MINSHIFT; 930 i = (size - 1) >> (MALLOC_MINSHIFT - 1); 931 while (i >>= 1) 932 j++; 933 } 934 935 /* If it's empty, make a page more of that size chunks */ 936 bp = d->chunk_dir[j]; 937 if (bp == NULL && (bp = omalloc_make_chunks(d, j)) == NULL) 938 return NULL; 939 940 if (bp->canary != d->canary1) 941 wrterror("chunk info corrupted"); 942 /* Find first word of bitmap which isn't empty */ 943 for (lp = bp->bits; !*lp; lp++) 944 /* EMPTY */; 945 946 /* Find that bit, and tweak it */ 947 u = 1; 948 k = 0; 949 while (!(*lp & u)) { 950 u += u; 951 k++; 952 } 953 954 /* advance a random # of positions */ 955 i = (getrbyte() & (MALLOC_DELAYED_CHUNKS - 1)) % bp->free; 956 while (i > 0) { 957 u += u; 958 k++; 959 if (k >= MALLOC_BITS) { 960 lp++; 961 u = 1; 962 k = 0; 963 } 964 if (lp - bp->bits > (bp->total - 1) / MALLOC_BITS) { 965 wrterror("chunk overflow"); 966 errno = EFAULT; 967 return (NULL); 968 } 969 if (*lp & u) 970 i--; 971 } 972 973 *lp ^= u; 974 975 /* If there are no more free, remove from free-list */ 976 if (!--bp->free) { 977 d->chunk_dir[j] = bp->next; 978 bp->next = NULL; 979 } 980 /* Adjust to the real offset of that chunk */ 981 k += (lp - bp->bits) * MALLOC_BITS; 982 k <<= bp->shift; 983 984 if (malloc_junk && bp->size > 0) 985 memset((char *)bp->page + k, SOME_JUNK, bp->size); 986 return ((char *)bp->page + k); 987 } 988 989 990 /* 991 * Free a chunk, and possibly the page it's on, if the page becomes empty. 992 */ 993 static void 994 free_bytes(struct dir_info *d, struct region_info *r, void *ptr) 995 { 996 struct chunk_info *info, **mp; 997 long i; 998 999 info = (struct chunk_info *)r->size; 1000 if (info->canary != d->canary1) 1001 wrterror("chunk info corrupted"); 1002 1003 /* Find the chunk number on the page */ 1004 i = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift; 1005 1006 if ((uintptr_t)ptr & ((1UL << (info->shift)) - 1)) { 1007 wrtwarning("modified chunk-pointer"); 1008 return; 1009 } 1010 if (info->bits[i / MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) { 1011 wrtwarning("chunk is already free"); 1012 return; 1013 } 1014 1015 info->bits[i / MALLOC_BITS] |= 1UL << (i % MALLOC_BITS); 1016 info->free++; 1017 1018 if (info->size != 0) 1019 mp = d->chunk_dir + info->shift; 1020 else 1021 mp = d->chunk_dir; 1022 1023 if (info->free == 1) { 1024 /* Page became non-full */ 1025 1026 /* Insert in address order */ 1027 while (*mp != NULL && (*mp)->next != NULL && 1028 (*mp)->next->page < info->page) 1029 mp = &(*mp)->next; 1030 info->next = *mp; 1031 *mp = info; 1032 return; 1033 } 1034 if (info->free != info->total) 1035 return; 1036 1037 /* Find & remove this page in the queue */ 1038 while (*mp != info) { 1039 mp = &((*mp)->next); 1040 if (!*mp) { 1041 wrterror("not on queue"); 1042 errno = EFAULT; 1043 return; 1044 } 1045 } 1046 *mp = info->next; 1047 1048 if (info->size == 0 && !malloc_freeprot) 1049 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE); 1050 unmap(d, info->page, MALLOC_PAGESIZE); 1051 1052 delete(d, r); 1053 put_chunk_info(d, info); 1054 } 1055 1056 1057 1058 static void * 1059 omalloc(size_t sz, int zero_fill) 1060 { 1061 void *p; 1062 size_t psz; 1063 1064 if (sz > MALLOC_MAXCHUNK) { 1065 if (sz >= SIZE_MAX - malloc_guard - MALLOC_PAGESIZE) { 1066 errno = ENOMEM; 1067 return NULL; 1068 } 1069 sz += malloc_guard; 1070 psz = PAGEROUND(sz); 1071 p = map(&g_pool, psz, zero_fill); 1072 if (p == MAP_FAILED) { 1073 errno = ENOMEM; 1074 return NULL; 1075 } 1076 if (insert(&g_pool, p, sz)) { 1077 unmap(&g_pool, p, psz); 1078 errno = ENOMEM; 1079 return NULL; 1080 } 1081 if (malloc_guard) { 1082 if (mprotect((char *)p + psz - malloc_guard, 1083 malloc_guard, PROT_NONE)) 1084 wrterror("mprotect"); 1085 malloc_guarded += malloc_guard; 1086 } 1087 1088 if (malloc_move && 1089 sz - malloc_guard < MALLOC_PAGESIZE - MALLOC_MINSIZE) { 1090 /* fill whole allocation */ 1091 if (malloc_junk) 1092 memset(p, SOME_JUNK, psz - malloc_guard); 1093 /* shift towards the end */ 1094 p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_MINSIZE - 1095 (sz - malloc_guard)) & ~(MALLOC_MINSIZE-1)); 1096 /* fill zeros if needed and overwritten above */ 1097 if (zero_fill && malloc_junk) 1098 memset(p, 0, sz - malloc_guard); 1099 } else { 1100 if (malloc_junk) { 1101 if (zero_fill) 1102 memset(p + sz - malloc_guard, 1103 SOME_JUNK, psz - sz); 1104 else 1105 memset(p, 1106 SOME_JUNK, psz - malloc_guard); 1107 } 1108 } 1109 1110 } else { 1111 /* takes care of SOME_JUNK */ 1112 p = malloc_bytes(&g_pool, sz); 1113 if (zero_fill && p != NULL && sz > 0) 1114 memset(p, 0, sz); 1115 } 1116 1117 return p; 1118 } 1119 1120 /* 1121 * Common function for handling recursion. Only 1122 * print the error message once, to avoid making the problem 1123 * potentially worse. 1124 */ 1125 static void 1126 malloc_recurse(void) 1127 { 1128 static int noprint; 1129 1130 if (noprint == 0) { 1131 noprint = 1; 1132 wrtwarning("recursive call"); 1133 } 1134 malloc_active--; 1135 _MALLOC_UNLOCK(); 1136 errno = EDEADLK; 1137 } 1138 1139 void * 1140 malloc(size_t size) 1141 { 1142 void *r; 1143 1144 _MALLOC_LOCK(); 1145 malloc_func = " in malloc():"; 1146 if (!g_pool.regions_total) { 1147 if (omalloc_init(&g_pool)) { 1148 _MALLOC_UNLOCK(); 1149 if (malloc_xmalloc) 1150 wrterror("out of memory"); 1151 errno = ENOMEM; 1152 return NULL; 1153 } 1154 } 1155 if (malloc_active++) { 1156 malloc_recurse(); 1157 return NULL; 1158 } 1159 r = omalloc(size, malloc_zero); 1160 malloc_active--; 1161 _MALLOC_UNLOCK(); 1162 if (r == NULL && malloc_xmalloc) { 1163 wrterror("out of memory"); 1164 errno = ENOMEM; 1165 } 1166 return r; 1167 } 1168 1169 static void 1170 ofree(void *p) 1171 { 1172 struct region_info *r; 1173 size_t sz; 1174 1175 r = find(&g_pool, p); 1176 if (r == NULL) { 1177 wrtwarning("bogus pointer (double free?)"); 1178 return; 1179 } 1180 REALSIZE(sz, r); 1181 if (sz > MALLOC_MAXCHUNK) { 1182 if (sz - malloc_guard >= MALLOC_PAGESIZE - MALLOC_MINSIZE) { 1183 if (r->p != p) 1184 wrtwarning("bogus pointer"); 1185 } else { 1186 #if notyetbecause_of_realloc 1187 /* shifted towards the end */ 1188 if (p != ((char *)r->p) + ((MALLOC_PAGESIZE - 1189 MALLOC_MINSIZE - sz - malloc_guard) & 1190 ~(MALLOC_MINSIZE-1))) { 1191 } 1192 #endif 1193 p = r->p; 1194 } 1195 if (malloc_guard) { 1196 if (sz < malloc_guard) 1197 wrtwarning("guard size"); 1198 if (!malloc_freeprot) { 1199 if (mprotect((char *)p + PAGEROUND(sz) - 1200 malloc_guard, malloc_guard, 1201 PROT_READ | PROT_WRITE)) 1202 wrterror("mprotect"); 1203 } 1204 malloc_guarded -= malloc_guard; 1205 } 1206 if (malloc_junk) 1207 memset(p, SOME_FREEJUNK, PAGEROUND(sz) - malloc_guard); 1208 unmap(&g_pool, p, PAGEROUND(sz)); 1209 delete(&g_pool, r); 1210 } else { 1211 void *tmp; 1212 int i; 1213 1214 if (malloc_junk && sz > 0) 1215 memset(p, SOME_FREEJUNK, sz); 1216 i = getrbyte() & (MALLOC_DELAYED_CHUNKS - 1); 1217 tmp = p; 1218 p = g_pool.delayed_chunks[i]; 1219 g_pool.delayed_chunks[i] = tmp; 1220 if (p != NULL) { 1221 r = find(&g_pool, p); 1222 if (r == NULL) { 1223 wrtwarning("bogus pointer (double free?)"); 1224 return; 1225 } 1226 free_bytes(&g_pool, r, p); 1227 } 1228 } 1229 } 1230 1231 void 1232 free(void *ptr) 1233 { 1234 /* This is legal. */ 1235 if (ptr == NULL) 1236 return; 1237 1238 _MALLOC_LOCK(); 1239 malloc_func = " in free():"; 1240 if (malloc_active++) { 1241 malloc_recurse(); 1242 return; 1243 } 1244 ofree(ptr); 1245 malloc_active--; 1246 _MALLOC_UNLOCK(); 1247 } 1248 1249 1250 static void * 1251 orealloc(void *p, size_t newsz) 1252 { 1253 struct region_info *r; 1254 size_t oldsz, goldsz, gnewsz; 1255 void *q; 1256 1257 if (p == NULL) 1258 return omalloc(newsz, 0); 1259 1260 r = find(&g_pool, p); 1261 if (r == NULL) { 1262 wrtwarning("bogus pointer (double free?)"); 1263 return NULL; 1264 } 1265 if (newsz >= SIZE_MAX - malloc_guard - MALLOC_PAGESIZE) { 1266 errno = ENOMEM; 1267 return NULL; 1268 } 1269 1270 REALSIZE(oldsz, r); 1271 goldsz = oldsz; 1272 if (oldsz > MALLOC_MAXCHUNK) { 1273 if (oldsz < malloc_guard) 1274 wrtwarning("guard size"); 1275 oldsz -= malloc_guard; 1276 } 1277 1278 gnewsz = newsz; 1279 if (gnewsz > MALLOC_MAXCHUNK) 1280 gnewsz += malloc_guard; 1281 1282 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p && 1283 !malloc_realloc) { 1284 size_t roldsz = PAGEROUND(goldsz); 1285 size_t rnewsz = PAGEROUND(gnewsz); 1286 1287 if (rnewsz < roldsz) { 1288 if (malloc_guard) { 1289 if (mprotect((char *)p + roldsz - malloc_guard, 1290 malloc_guard, PROT_READ | PROT_WRITE)) 1291 wrterror("mprotect"); 1292 if (mprotect((char *)p + rnewsz - malloc_guard, 1293 malloc_guard, PROT_NONE)) 1294 wrterror("mprotect"); 1295 } 1296 unmap(&g_pool, (char *)p + rnewsz, roldsz - rnewsz); 1297 r->size = gnewsz; 1298 return p; 1299 } else if (rnewsz == roldsz) { 1300 if (newsz > oldsz && malloc_junk) 1301 memset((char *)p + newsz, SOME_JUNK, 1302 rnewsz - malloc_guard - newsz); 1303 r->size = gnewsz; 1304 return p; 1305 } 1306 } 1307 if (newsz <= oldsz && newsz > oldsz / 2 && !malloc_realloc) { 1308 if (malloc_junk && newsz > 0) 1309 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz); 1310 return p; 1311 } else if (newsz != oldsz || malloc_realloc) { 1312 q = omalloc(newsz, 0); 1313 if (q == NULL) 1314 return NULL; 1315 if (newsz != 0 && oldsz != 0) 1316 memcpy(q, p, oldsz < newsz ? oldsz : newsz); 1317 ofree(p); 1318 return q; 1319 } else 1320 return p; 1321 } 1322 1323 void * 1324 realloc(void *ptr, size_t size) 1325 { 1326 void *r; 1327 1328 _MALLOC_LOCK(); 1329 malloc_func = " in realloc():"; 1330 if (!g_pool.regions_total) { 1331 if (omalloc_init(&g_pool)) { 1332 _MALLOC_UNLOCK(); 1333 if (malloc_xmalloc) 1334 wrterror("out of memory"); 1335 errno = ENOMEM; 1336 return NULL; 1337 } 1338 } 1339 if (malloc_active++) { 1340 malloc_recurse(); 1341 return NULL; 1342 } 1343 1344 r = orealloc(ptr, size); 1345 1346 malloc_active--; 1347 _MALLOC_UNLOCK(); 1348 if (r == NULL && malloc_xmalloc) { 1349 wrterror("out of memory"); 1350 errno = ENOMEM; 1351 } 1352 return r; 1353 } 1354 1355 1356 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4)) 1357 1358 void * 1359 calloc(size_t nmemb, size_t size) 1360 { 1361 void *r; 1362 1363 _MALLOC_LOCK(); 1364 malloc_func = " in calloc():"; 1365 if (!g_pool.regions_total) { 1366 if (omalloc_init(&g_pool)) { 1367 _MALLOC_UNLOCK(); 1368 if (malloc_xmalloc) 1369 wrterror("out of memory"); 1370 errno = ENOMEM; 1371 return NULL; 1372 } 1373 } 1374 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1375 nmemb > 0 && SIZE_MAX / nmemb < size) { 1376 _MALLOC_UNLOCK(); 1377 if (malloc_xmalloc) 1378 wrterror("out of memory"); 1379 errno = ENOMEM; 1380 return NULL; 1381 } 1382 1383 if (malloc_active++) { 1384 malloc_recurse(); 1385 return NULL; 1386 } 1387 1388 size *= nmemb; 1389 r = omalloc(size, 1); 1390 1391 malloc_active--; 1392 _MALLOC_UNLOCK(); 1393 if (r == NULL && malloc_xmalloc) { 1394 wrterror("out of memory"); 1395 errno = ENOMEM; 1396 } 1397 return r; 1398 } 1399