1 /* 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 */ 9 10 #if defined(LIBC_SCCS) && !defined(lint) 11 static char rcsid[] = "$OpenBSD: malloc.c,v 1.67 2004/04/12 09:25:11 tdeval Exp $"; 12 #endif /* LIBC_SCCS and not lint */ 13 14 /* 15 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are 16 * related to internal conditions and consistency in malloc.c. This has 17 * a noticeable runtime performance hit, and generally will not do you 18 * any good unless you fiddle with the internals of malloc or want 19 * to catch random pointer corruption as early as possible. 20 */ 21 #ifndef MALLOC_EXTRA_SANITY 22 #undef MALLOC_EXTRA_SANITY 23 #endif 24 25 /* 26 * Defining MALLOC_STATS will enable you to call malloc_dump() and set 27 * the [dD] options in the MALLOC_OPTIONS environment variable. 28 * It has no run-time performance hit, but does pull in stdio... 29 */ 30 #ifndef MALLOC_STATS 31 #undef MALLOC_STATS 32 #endif 33 34 /* 35 * What to use for Junk. This is the byte value we use to fill with 36 * when the 'J' option is enabled. 37 */ 38 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */ 39 40 #include <sys/types.h> 41 #include <sys/param.h> 42 #include <sys/mman.h> 43 #include <sys/uio.h> 44 #include <stdio.h> 45 #include <stdlib.h> 46 #include <string.h> 47 #include <unistd.h> 48 #include <fcntl.h> 49 #include <limits.h> 50 #include <errno.h> 51 52 #include "thread_private.h" 53 54 /* 55 * The basic parameters you can tweak. 56 * 57 * malloc_pageshift pagesize = 1 << malloc_pageshift 58 * It's probably best if this is the native 59 * page size, but it shouldn't have to be. 60 * 61 * malloc_minsize minimum size of an allocation in bytes. 62 * If this is too small it's too much work 63 * to manage them. This is also the smallest 64 * unit of alignment used for the storage 65 * returned by malloc/realloc. 66 * 67 */ 68 69 #if defined(__OpenBSD__) && defined(__sparc__) 70 # define malloc_pageshift 13U 71 #endif /* __OpenBSD__ */ 72 73 /* 74 * No user serviceable parts behind this point. 75 * 76 * This structure describes a page worth of chunks. 77 */ 78 79 struct pginfo { 80 struct pginfo *next; /* next on the free list */ 81 void *page; /* Pointer to the page */ 82 u_short size; /* size of this page's chunks */ 83 u_short shift; /* How far to shift for this size chunks */ 84 u_short free; /* How many free chunks */ 85 u_short total; /* How many chunk */ 86 u_long bits[1]; /* Which chunks are free */ 87 }; 88 89 /* 90 * This structure describes a number of free pages. 91 */ 92 93 struct pgfree { 94 struct pgfree *next; /* next run of free pages */ 95 struct pgfree *prev; /* prev run of free pages */ 96 void *page; /* pointer to free pages */ 97 void *end; /* pointer to end of free pages */ 98 u_long size; /* number of bytes free */ 99 }; 100 101 /* 102 * How many bits per u_long in the bitmap. 103 * Change only if not 8 bits/byte 104 */ 105 #define MALLOC_BITS (8*sizeof(u_long)) 106 107 /* 108 * Magic values to put in the page_directory 109 */ 110 #define MALLOC_NOT_MINE ((struct pginfo*) 0) 111 #define MALLOC_FREE ((struct pginfo*) 1) 112 #define MALLOC_FIRST ((struct pginfo*) 2) 113 #define MALLOC_FOLLOW ((struct pginfo*) 3) 114 #define MALLOC_MAGIC ((struct pginfo*) 4) 115 116 #ifndef malloc_pageshift 117 #define malloc_pageshift (PGSHIFT) 118 #endif 119 120 #ifndef malloc_minsize 121 #define malloc_minsize 16U 122 #endif 123 124 #ifndef malloc_pageshift 125 #error "malloc_pageshift undefined" 126 #endif 127 128 #if !defined(malloc_pagesize) 129 #define malloc_pagesize (1UL<<malloc_pageshift) 130 #endif 131 132 #if ((1UL<<malloc_pageshift) != malloc_pagesize) 133 #error "(1UL<<malloc_pageshift) != malloc_pagesize" 134 #endif 135 136 #ifndef malloc_maxsize 137 #define malloc_maxsize ((malloc_pagesize)>>1) 138 #endif 139 140 /* A mask for the offset inside a page. */ 141 #define malloc_pagemask ((malloc_pagesize)-1) 142 143 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask))) 144 #define ptr2index(foo) (((u_long)(foo) >> malloc_pageshift)-malloc_origo) 145 146 /* fd of /dev/zero */ 147 #ifdef USE_DEV_ZERO 148 static int fdzero; 149 #define MMAP_FD fdzero 150 #define INIT_MMAP() \ 151 { if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \ 152 wrterror("open of /dev/zero\n"); } 153 #else 154 #define MMAP_FD (-1) 155 #define INIT_MMAP() 156 #endif 157 158 /* Set when initialization has been done */ 159 static unsigned int malloc_started; 160 161 /* Number of free pages we cache */ 162 static unsigned int malloc_cache = 16; 163 164 /* The offset from pagenumber to index into the page directory */ 165 static u_long malloc_origo; 166 167 /* The last index in the page directory we care about */ 168 static u_long last_index; 169 170 /* Pointer to page directory. Allocated "as if with" malloc */ 171 static struct pginfo **page_dir; 172 173 /* How many slots in the page directory */ 174 static size_t malloc_ninfo; 175 176 /* Free pages line up here */ 177 static struct pgfree free_list; 178 179 /* Abort(), user doesn't handle problems. */ 180 static int malloc_abort = 2; 181 182 /* Are we trying to die ? */ 183 static int suicide; 184 185 #ifdef MALLOC_STATS 186 /* dump statistics */ 187 static int malloc_stats; 188 #endif 189 190 /* avoid outputting warnings? */ 191 static int malloc_silent; 192 193 /* always realloc ? */ 194 static int malloc_realloc; 195 196 /* mprotect free pages PROT_NONE? */ 197 static int malloc_freeprot; 198 199 /* use guard pages after allocations? */ 200 static int malloc_guard = 0; 201 202 #if defined(__FreeBSD__) || (defined(__OpenBSD__) && defined(MADV_FREE)) 203 /* pass the kernel a hint on free pages ? */ 204 static int malloc_hint; 205 #endif 206 207 /* xmalloc behaviour ? */ 208 static int malloc_xmalloc; 209 210 /* zero fill ? */ 211 static int malloc_zero; 212 213 /* junk fill ? */ 214 static int malloc_junk; 215 216 #ifdef __FreeBSD__ 217 /* utrace ? */ 218 static int malloc_utrace; 219 220 struct ut { void *p; size_t s; void *r; }; 221 222 void utrace(struct ut *, int); 223 224 #define UTRACE(a, b, c) \ 225 if (malloc_utrace) \ 226 {struct ut u; u.p=a; u.s = b; u.r=c; utrace(&u, sizeof u);} 227 #else /* !__FreeBSD__ */ 228 #define UTRACE(a,b,c) 229 #endif 230 231 /* Status of malloc. */ 232 static int malloc_active; 233 234 /* my last break. */ 235 static void *malloc_brk; 236 237 /* one location cache for free-list holders */ 238 static struct pgfree *px; 239 240 /* compile-time options */ 241 char *malloc_options; 242 243 /* Name of the current public function */ 244 static char *malloc_func; 245 246 /* Macro for mmap */ 247 #define MMAP(size) \ 248 mmap((void *)0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \ 249 MMAP_FD, (off_t)0) 250 251 /* 252 * Necessary function declarations 253 */ 254 static int extend_pgdir(u_long index); 255 static void *imalloc(size_t size); 256 static void ifree(void *ptr); 257 static void *irealloc(void *ptr, size_t size); 258 static void *malloc_bytes(size_t size); 259 260 #ifdef MALLOC_STATS 261 void 262 malloc_dump(FILE *fd) 263 { 264 struct pginfo **pd; 265 struct pgfree *pf; 266 int j; 267 268 pd = page_dir; 269 270 /* print out all the pages */ 271 for(j=0;j<=last_index;j++) { 272 fprintf(fd, "%08lx %5d ", (j+malloc_origo) << malloc_pageshift, j); 273 if (pd[j] == MALLOC_NOT_MINE) { 274 for(j++;j<=last_index && pd[j] == MALLOC_NOT_MINE;j++) 275 ; 276 j--; 277 fprintf(fd, ".. %5d not mine\n", j); 278 } else if (pd[j] == MALLOC_FREE) { 279 for(j++;j<=last_index && pd[j] == MALLOC_FREE;j++) 280 ; 281 j--; 282 fprintf(fd, ".. %5d free\n", j); 283 } else if (pd[j] == MALLOC_FIRST) { 284 for(j++;j<=last_index && pd[j] == MALLOC_FOLLOW;j++) 285 ; 286 j--; 287 fprintf(fd, ".. %5d in use\n", j); 288 } else if (pd[j] < MALLOC_MAGIC) { 289 fprintf(fd, "(%p)\n", pd[j]); 290 } else { 291 fprintf(fd, "%p %d (of %d) x %d @ %p --> %p\n", 292 pd[j], pd[j]->free, pd[j]->total, 293 pd[j]->size, pd[j]->page, pd[j]->next); 294 } 295 } 296 297 for(pf=free_list.next; pf; pf=pf->next) { 298 fprintf(fd, "Free: @%p [%p...%p[ %ld ->%p <-%p\n", 299 pf, pf->page, pf->end, pf->size, pf->prev, pf->next); 300 if (pf == pf->next) { 301 fprintf(fd, "Free_list loops\n"); 302 break; 303 } 304 } 305 306 /* print out various info */ 307 fprintf(fd, "Minsize\t%d\n", malloc_minsize); 308 fprintf(fd, "Maxsize\t%d\n", malloc_maxsize); 309 fprintf(fd, "Pagesize\t%lu\n", (u_long)malloc_pagesize); 310 fprintf(fd, "Pageshift\t%d\n", malloc_pageshift); 311 fprintf(fd, "FirstPage\t%ld\n", malloc_origo); 312 fprintf(fd, "LastPage\t%ld %lx\n", last_index+malloc_pageshift, 313 (last_index + malloc_pageshift) << malloc_pageshift); 314 fprintf(fd, "Break\t%ld\n", (u_long)sbrk(0) >> malloc_pageshift); 315 } 316 #endif /* MALLOC_STATS */ 317 318 extern char *__progname; 319 320 static void 321 wrterror(char *p) 322 { 323 char *q = " error: "; 324 struct iovec iov[4]; 325 326 iov[0].iov_base = __progname; 327 iov[0].iov_len = strlen(__progname); 328 iov[1].iov_base = malloc_func; 329 iov[1].iov_len = strlen(malloc_func); 330 iov[2].iov_base = q; 331 iov[2].iov_len = strlen(q); 332 iov[3].iov_base = p; 333 iov[3].iov_len = strlen(p); 334 writev(STDERR_FILENO, iov, 4); 335 336 suicide = 1; 337 #ifdef MALLOC_STATS 338 if (malloc_stats) 339 malloc_dump(stderr); 340 #endif /* MALLOC_STATS */ 341 malloc_active--; 342 abort(); 343 } 344 345 static void 346 wrtwarning(char *p) 347 { 348 char *q = " warning: "; 349 struct iovec iov[4]; 350 351 if (malloc_abort) 352 wrterror(p); 353 else if (malloc_silent) 354 return; 355 356 iov[0].iov_base = __progname; 357 iov[0].iov_len = strlen(__progname); 358 iov[1].iov_base = malloc_func; 359 iov[1].iov_len = strlen(malloc_func); 360 iov[2].iov_base = q; 361 iov[2].iov_len = strlen(q); 362 iov[3].iov_base = p; 363 iov[3].iov_len = strlen(p); 364 writev(STDERR_FILENO, iov, 4); 365 } 366 367 #ifdef MALLOC_STATS 368 static void 369 malloc_exit(void) 370 { 371 FILE *fd = fopen("malloc.out", "a"); 372 char *q = "malloc() warning: Couldn't dump stats\n"; 373 if (fd != NULL) { 374 malloc_dump(fd); 375 fclose(fd); 376 } else 377 write(STDERR_FILENO, q, strlen(q)); 378 } 379 #endif /* MALLOC_STATS */ 380 381 382 /* 383 * Allocate a number of pages from the OS 384 */ 385 static void * 386 map_pages(size_t pages) 387 { 388 caddr_t result, tail; 389 390 result = (caddr_t)pageround((u_long)sbrk(0)); 391 pages <<= malloc_pageshift; 392 if (pages > SIZE_T_MAX - (size_t)result) { 393 #ifdef MALLOC_EXTRA_SANITY 394 wrtwarning("(ES): overflow in map_pages fails\n"); 395 #endif /* MALLOC_EXTRA_SANITY */ 396 errno = ENOMEM; 397 return (NULL); 398 } 399 tail = result + pages + malloc_guard; 400 401 if (brk(tail) == (char *)-1) { 402 #ifdef MALLOC_EXTRA_SANITY 403 wrtwarning("(ES): map_pages fails\n"); 404 #endif /* MALLOC_EXTRA_SANITY */ 405 return (NULL); 406 } 407 if (malloc_guard) 408 mprotect(result + pages, malloc_pagesize, PROT_NONE); 409 410 last_index = ptr2index(tail) - 1; 411 malloc_brk = tail; 412 413 if ((last_index+1) >= malloc_ninfo && !extend_pgdir(last_index)) 414 return (NULL); 415 416 return (result); 417 } 418 419 /* 420 * Extend page directory 421 */ 422 static int 423 extend_pgdir(u_long index) 424 { 425 struct pginfo **new, **old; 426 size_t i, oldlen; 427 428 /* Make it this many pages */ 429 i = index * sizeof *page_dir; 430 i /= malloc_pagesize; 431 i += 2; 432 433 /* remember the old mapping size */ 434 oldlen = malloc_ninfo * sizeof *page_dir; 435 436 /* 437 * NOTE: we allocate new pages and copy the directory rather than tempt 438 * fate by trying to "grow" the region.. There is nothing to prevent 439 * us from accidently re-mapping space that's been allocated by our caller 440 * via dlopen() or other mmap(). 441 * 442 * The copy problem is not too bad, as there is 4K of page index per 443 * 4MB of malloc arena. 444 * 445 * We can totally avoid the copy if we open a file descriptor to associate 446 * the anon mappings with. Then, when we remap the pages at the new 447 * address, the old pages will be "magically" remapped.. But this means 448 * keeping open a "secret" file descriptor..... 449 */ 450 451 /* Get new pages */ 452 new = (struct pginfo**) MMAP(i * malloc_pagesize); 453 if (new == MAP_FAILED) 454 return (0); 455 456 /* Copy the old stuff */ 457 memcpy(new, page_dir, 458 malloc_ninfo * sizeof *page_dir); 459 460 /* register the new size */ 461 malloc_ninfo = i * malloc_pagesize / sizeof *page_dir; 462 463 /* swap the pointers */ 464 old = page_dir; 465 page_dir = new; 466 467 /* Now free the old stuff */ 468 munmap(old, oldlen); 469 return (1); 470 } 471 472 /* 473 * Initialize the world 474 */ 475 static void 476 malloc_init(void) 477 { 478 char *p, b[64]; 479 int i, j; 480 int save_errno = errno; 481 482 _MALLOC_LOCK_INIT(); 483 484 INIT_MMAP(); 485 486 #ifdef MALLOC_EXTRA_SANITY 487 malloc_junk = 1; 488 #endif /* MALLOC_EXTRA_SANITY */ 489 490 for (i = 0; i < 3; i++) { 491 if (i == 0) { 492 j = readlink("/etc/malloc.conf", b, sizeof b - 1); 493 if (j <= 0) 494 continue; 495 b[j] = '\0'; 496 p = b; 497 } else if (i == 1) { 498 if (issetugid() == 0) 499 p = getenv("MALLOC_OPTIONS"); 500 else 501 continue; 502 } else if (i == 2) { 503 p = malloc_options; 504 } 505 for (; p != NULL && *p != '\0'; p++) { 506 switch (*p) { 507 case '>': malloc_cache <<= 1; break; 508 case '<': malloc_cache >>= 1; break; 509 case 'a': malloc_abort = 0; break; 510 case 'A': malloc_abort = 1; break; 511 #ifdef MALLOC_STATS 512 case 'd': malloc_stats = 0; break; 513 case 'D': malloc_stats = 1; break; 514 #endif /* MALLOC_STATS */ 515 case 'f': malloc_freeprot = 0; break; 516 case 'F': malloc_freeprot = 1; break; 517 case 'g': malloc_guard = 0; break; 518 case 'G': malloc_guard = malloc_pagesize; break; 519 #if defined(__FreeBSD__) || (defined(__OpenBSD__) && defined(MADV_FREE)) 520 case 'h': malloc_hint = 0; break; 521 case 'H': malloc_hint = 1; break; 522 #endif /* __FreeBSD__ */ 523 case 'r': malloc_realloc = 0; break; 524 case 'R': malloc_realloc = 1; break; 525 case 'j': malloc_junk = 0; break; 526 case 'J': malloc_junk = 1; break; 527 case 'n': malloc_silent = 0; break; 528 case 'N': malloc_silent = 1; break; 529 #ifdef __FreeBSD__ 530 case 'u': malloc_utrace = 0; break; 531 case 'U': malloc_utrace = 1; break; 532 #endif /* __FreeBSD__ */ 533 case 'x': malloc_xmalloc = 0; break; 534 case 'X': malloc_xmalloc = 1; break; 535 case 'z': malloc_zero = 0; break; 536 case 'Z': malloc_zero = 1; break; 537 default: 538 j = malloc_abort; 539 malloc_abort = 0; 540 wrtwarning("unknown char in MALLOC_OPTIONS\n"); 541 malloc_abort = j; 542 break; 543 } 544 } 545 } 546 547 UTRACE(0, 0, 0); 548 549 /* 550 * We want junk in the entire allocation, and zero only in the part 551 * the user asked for. 552 */ 553 if (malloc_zero) 554 malloc_junk=1; 555 556 #ifdef MALLOC_STATS 557 if (malloc_stats && (atexit(malloc_exit) == -1)) 558 wrtwarning("atexit(2) failed. Will not be able to dump malloc stats on exit\n"); 559 #endif /* MALLOC_STATS */ 560 561 /* Allocate one page for the page directory */ 562 page_dir = (struct pginfo **) MMAP(malloc_pagesize); 563 564 if (page_dir == MAP_FAILED) 565 wrterror("mmap(2) failed, check limits\n"); 566 567 /* 568 * We need a maximum of malloc_pageshift buckets, steal these from the 569 * front of the page_directory; 570 */ 571 malloc_origo = ((u_long)pageround((u_long)sbrk(0))) >> malloc_pageshift; 572 malloc_origo -= malloc_pageshift; 573 574 malloc_ninfo = malloc_pagesize / sizeof *page_dir; 575 576 /* Been here, done that */ 577 malloc_started++; 578 579 /* Recalculate the cache size in bytes, and make sure it's nonzero */ 580 581 if (!malloc_cache) 582 malloc_cache++; 583 584 malloc_cache <<= malloc_pageshift; 585 586 /* 587 * This is a nice hack from Kaleb Keithly (kaleb@x.org). 588 * We can sbrk(2) further back when we keep this on a low address. 589 */ 590 px = (struct pgfree *) imalloc (sizeof *px); 591 errno = save_errno; 592 } 593 594 /* 595 * Allocate a number of complete pages 596 */ 597 static void * 598 malloc_pages(size_t size) 599 { 600 void *p, *delay_free = NULL; 601 int i; 602 struct pgfree *pf; 603 u_long index; 604 605 size = pageround(size) + malloc_guard; 606 607 p = NULL; 608 /* Look for free pages before asking for more */ 609 for(pf = free_list.next; pf; pf = pf->next) { 610 611 #ifdef MALLOC_EXTRA_SANITY 612 if (pf->size & malloc_pagemask) 613 wrterror("(ES): junk length entry on free_list\n"); 614 if (!pf->size) 615 wrterror("(ES): zero length entry on free_list\n"); 616 if (pf->page == pf->end) 617 wrterror("(ES): zero entry on free_list\n"); 618 if (pf->page > pf->end) 619 wrterror("(ES): sick entry on free_list\n"); 620 if ((void*)pf->page >= (void*)sbrk(0)) 621 wrterror("(ES): entry on free_list past brk\n"); 622 if (page_dir[ptr2index(pf->page)] != MALLOC_FREE) 623 wrterror("(ES): non-free first page on free-list\n"); 624 if (page_dir[ptr2index(pf->end)-1] != MALLOC_FREE) 625 wrterror("(ES): non-free last page on free-list\n"); 626 #endif /* MALLOC_EXTRA_SANITY */ 627 628 if (pf->size < size) 629 continue; 630 631 if (pf->size == size) { 632 p = pf->page; 633 if (pf->next != NULL) 634 pf->next->prev = pf->prev; 635 pf->prev->next = pf->next; 636 delay_free = pf; 637 break; 638 } 639 640 p = pf->page; 641 pf->page = (char *)pf->page + size; 642 pf->size -= size; 643 break; 644 } 645 646 size -= malloc_guard; 647 648 #ifdef MALLOC_EXTRA_SANITY 649 if (p != NULL && page_dir[ptr2index(p)] != MALLOC_FREE) 650 wrterror("(ES): allocated non-free page on free-list\n"); 651 #endif /* MALLOC_EXTRA_SANITY */ 652 653 if ((malloc_guard || malloc_freeprot) && p != NULL) 654 mprotect(p, size, PROT_READ|PROT_WRITE); 655 656 size >>= malloc_pageshift; 657 658 /* Map new pages */ 659 if (p == NULL) 660 p = map_pages(size); 661 662 if (p != NULL) { 663 664 index = ptr2index(p); 665 page_dir[index] = MALLOC_FIRST; 666 for (i=1;i<size;i++) 667 page_dir[index+i] = MALLOC_FOLLOW; 668 669 if (malloc_junk) 670 memset(p, SOME_JUNK, size << malloc_pageshift); 671 } 672 673 if (delay_free) { 674 if (px == NULL) 675 px = delay_free; 676 else 677 ifree(delay_free); 678 } 679 680 return (p); 681 } 682 683 /* 684 * Allocate a page of fragments 685 */ 686 687 static __inline__ int 688 malloc_make_chunks(int bits) 689 { 690 struct pginfo *bp; 691 void *pp; 692 int i, k, l; 693 694 /* Allocate a new bucket */ 695 pp = malloc_pages((size_t)malloc_pagesize); 696 if (pp == NULL) 697 return (0); 698 699 /* Find length of admin structure */ 700 l = sizeof *bp - sizeof(u_long); 701 l += sizeof(u_long) * 702 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS); 703 704 /* Don't waste more than two chunks on this */ 705 /* 706 * If we are to allocate a memory protected page for the malloc(0) 707 * case (when bits=0), it must be from a different page than the 708 * pginfo page. 709 * --> Treat it like the big chunk alloc, get a second data page. 710 */ 711 if (bits != 0 && (1UL<<(bits)) <= l+l) { 712 bp = (struct pginfo *)pp; 713 } else { 714 bp = (struct pginfo *)imalloc(l); 715 if (bp == NULL) { 716 ifree(pp); 717 return (0); 718 } 719 } 720 721 /* memory protect the page allocated in the malloc(0) case */ 722 if (bits == 0) { 723 724 bp->size = 0; 725 bp->shift = 1; 726 i = malloc_minsize-1; 727 while (i >>= 1) 728 bp->shift++; 729 bp->total = bp->free = malloc_pagesize >> bp->shift; 730 bp->page = pp; 731 732 k = mprotect(pp, malloc_pagesize, PROT_NONE); 733 if (k < 0) { 734 ifree(pp); 735 ifree(bp); 736 return (0); 737 } 738 } else { 739 bp->size = (1UL<<bits); 740 bp->shift = bits; 741 bp->total = bp->free = malloc_pagesize >> bits; 742 bp->page = pp; 743 } 744 745 /* set all valid bits in the bitmap */ 746 k = bp->total; 747 i = 0; 748 749 /* Do a bunch at a time */ 750 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS) 751 bp->bits[i / MALLOC_BITS] = ~0UL; 752 753 for(; i < k; i++) 754 bp->bits[i/MALLOC_BITS] |= 1UL<<(i%MALLOC_BITS); 755 756 if (bp == bp->page) { 757 /* Mark the ones we stole for ourselves */ 758 for(i=0;l > 0;i++) { 759 bp->bits[i/MALLOC_BITS] &= ~(1UL<<(i%MALLOC_BITS)); 760 bp->free--; 761 bp->total--; 762 l -= (1 << bits); 763 } 764 } 765 766 /* MALLOC_LOCK */ 767 768 page_dir[ptr2index(pp)] = bp; 769 770 bp->next = page_dir[bits]; 771 page_dir[bits] = bp; 772 773 /* MALLOC_UNLOCK */ 774 775 return (1); 776 } 777 778 /* 779 * Allocate a fragment 780 */ 781 static void * 782 malloc_bytes(size_t size) 783 { 784 int i,j; 785 u_long u; 786 struct pginfo *bp; 787 int k; 788 u_long *lp; 789 790 /* Don't bother with anything less than this */ 791 /* unless we have a malloc(0) requests */ 792 if (size != 0 && size < malloc_minsize) 793 size = malloc_minsize; 794 795 /* Find the right bucket */ 796 if (size == 0) 797 j=0; 798 else { 799 j = 1; 800 i = size-1; 801 while (i >>= 1) 802 j++; 803 } 804 805 /* If it's empty, make a page more of that size chunks */ 806 if (page_dir[j] == NULL && !malloc_make_chunks(j)) 807 return (NULL); 808 809 bp = page_dir[j]; 810 811 /* Find first word of bitmap which isn't empty */ 812 for (lp = bp->bits; !*lp; lp++) 813 ; 814 815 /* Find that bit, and tweak it */ 816 u = 1; 817 k = 0; 818 while (!(*lp & u)) { 819 u += u; 820 k++; 821 } 822 823 if (malloc_guard) { 824 /* Walk to a random position. */ 825 i = arc4random() % bp->free; 826 while (i > 0) { 827 u += u; 828 k++; 829 if (k >= MALLOC_BITS) { 830 lp++; 831 u = 1; 832 k = 0; 833 } 834 #ifdef MALLOC_EXTRA_SANITY 835 if (lp - bp->bits > (bp->total - 1) / MALLOC_BITS) 836 wrterror("chunk overflow\n"); 837 #endif /* MALLOC_EXTRA_SANITY */ 838 if (*lp & u) 839 i--; 840 } 841 } 842 *lp ^= u; 843 844 /* If there are no more free, remove from free-list */ 845 if (!--bp->free) { 846 page_dir[j] = bp->next; 847 bp->next = NULL; 848 } 849 850 /* Adjust to the real offset of that chunk */ 851 k += (lp-bp->bits)*MALLOC_BITS; 852 k <<= bp->shift; 853 854 if (malloc_junk && bp->size != 0) 855 memset((char *)bp->page + k, SOME_JUNK, bp->size); 856 857 return ((u_char *)bp->page + k); 858 } 859 860 /* 861 * Allocate a piece of memory 862 */ 863 static void * 864 imalloc(size_t size) 865 { 866 void *result; 867 868 if (!malloc_started) 869 malloc_init(); 870 871 if (suicide) 872 abort(); 873 874 if ((size + malloc_pagesize) < size) { /* Check for overflow */ 875 result = NULL; 876 errno = ENOMEM; 877 } 878 else if (size <= malloc_maxsize) 879 result = malloc_bytes(size); 880 else 881 result = malloc_pages(size); 882 883 if (malloc_abort == 1 && result == NULL) 884 wrterror("allocation failed\n"); 885 886 if (malloc_zero && result != NULL) 887 memset(result, 0, size); 888 889 return (result); 890 } 891 892 /* 893 * Change the size of an allocation. 894 */ 895 static void * 896 irealloc(void *ptr, size_t size) 897 { 898 void *p; 899 u_long osize, index; 900 struct pginfo **mp; 901 int i; 902 903 if (suicide) 904 abort(); 905 906 if (!malloc_started) { 907 wrtwarning("malloc() has never been called\n"); 908 return (NULL); 909 } 910 911 index = ptr2index(ptr); 912 913 if (index < malloc_pageshift) { 914 wrtwarning("junk pointer, too low to make sense\n"); 915 return (NULL); 916 } 917 918 if (index > last_index) { 919 wrtwarning("junk pointer, too high to make sense\n"); 920 return (NULL); 921 } 922 923 mp = &page_dir[index]; 924 925 if (*mp == MALLOC_FIRST) { /* Page allocation */ 926 927 /* Check the pointer */ 928 if ((u_long)ptr & malloc_pagemask) { 929 wrtwarning("modified (page-) pointer\n"); 930 return (NULL); 931 } 932 933 /* Find the size in bytes */ 934 for (osize = malloc_pagesize; *(++mp) == MALLOC_FOLLOW;) 935 osize += malloc_pagesize; 936 937 if (!malloc_realloc && /* Unless we have to, */ 938 size <= osize && /* .. or are too small, */ 939 size > (osize - malloc_pagesize)) { /* .. or can free a page, */ 940 if (malloc_junk) 941 memset((char *)ptr + size, SOME_JUNK, osize-size); 942 return (ptr); /* ..don't do anything else. */ 943 } 944 945 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */ 946 947 /* Check the pointer for sane values */ 948 if ((u_long)ptr & ((1UL<<((*mp)->shift))-1)) { 949 wrtwarning("modified (chunk-) pointer\n"); 950 return (NULL); 951 } 952 953 /* Find the chunk index in the page */ 954 i = ((u_long)ptr & malloc_pagemask) >> (*mp)->shift; 955 956 /* Verify that it isn't a free chunk already */ 957 if ((*mp)->bits[i/MALLOC_BITS] & (1UL<<(i%MALLOC_BITS))) { 958 wrtwarning("chunk is already free\n"); 959 return (NULL); 960 } 961 962 osize = (*mp)->size; 963 964 if (!malloc_realloc && /* Unless we have to, */ 965 size <= osize && /* ..or are too small, */ 966 (size > osize/2 || /* ..or could use a smaller size, */ 967 osize == malloc_minsize)) { /* ..(if there is one) */ 968 if (malloc_junk) 969 memset((char *)ptr + size, SOME_JUNK, osize-size); 970 return (ptr); /* ..don't do anything else. */ 971 } 972 973 } else { 974 wrtwarning("pointer to wrong page\n"); 975 return (NULL); 976 } 977 978 p = imalloc(size); 979 980 if (p != NULL) { 981 /* copy the lesser of the two sizes, and free the old one */ 982 /* Don't move from/to 0 sized region !!! */ 983 if (osize != 0 && size != 0) { 984 if (osize < size) 985 memcpy(p, ptr, osize); 986 else 987 memcpy(p, ptr, size); 988 } 989 ifree(ptr); 990 } 991 return (p); 992 } 993 994 /* 995 * Free a sequence of pages 996 */ 997 998 static __inline__ void 999 free_pages(void *ptr, u_long index, struct pginfo *info) 1000 { 1001 u_long i, l; 1002 struct pgfree *pf, *pt=NULL; 1003 void *tail; 1004 1005 if (info == MALLOC_FREE) { 1006 wrtwarning("page is already free\n"); 1007 return; 1008 } 1009 1010 if (info != MALLOC_FIRST) { 1011 wrtwarning("pointer to wrong page\n"); 1012 return; 1013 } 1014 1015 if ((u_long)ptr & malloc_pagemask) { 1016 wrtwarning("modified (page-) pointer\n"); 1017 return; 1018 } 1019 1020 /* Count how many pages and mark them free at the same time */ 1021 page_dir[index] = MALLOC_FREE; 1022 for (i = 1; page_dir[index+i] == MALLOC_FOLLOW; i++) 1023 page_dir[index + i] = MALLOC_FREE; 1024 1025 l = i << malloc_pageshift; 1026 1027 if (malloc_junk) 1028 memset(ptr, SOME_JUNK, l); 1029 1030 #if defined(__FreeBSD__) || (defined(__OpenBSD__) && defined(MADV_FREE)) 1031 if (malloc_hint) 1032 madvise(ptr, l, MADV_FREE); 1033 #endif 1034 1035 if (malloc_guard) { 1036 page_dir[index + i] = MALLOC_FREE; 1037 l += malloc_guard; 1038 } 1039 tail = (char *)ptr+l; 1040 1041 if (malloc_freeprot) 1042 mprotect(ptr, tail - ptr, PROT_NONE); 1043 1044 /* add to free-list */ 1045 if (px == NULL) 1046 px = imalloc(sizeof *px); /* This cannot fail... */ 1047 px->page = ptr; 1048 px->end = tail; 1049 px->size = l; 1050 1051 if (free_list.next == NULL) { 1052 1053 /* Nothing on free list, put this at head */ 1054 px->next = free_list.next; 1055 px->prev = &free_list; 1056 free_list.next = px; 1057 pf = px; 1058 px = NULL; 1059 1060 } else { 1061 1062 /* Find the right spot, leave pf pointing to the modified entry. */ 1063 1064 for(pf = free_list.next; pf->end < ptr && pf->next != NULL; 1065 pf = pf->next) 1066 ; /* Race ahead here */ 1067 1068 if (pf->page > tail) { 1069 /* Insert before entry */ 1070 px->next = pf; 1071 px->prev = pf->prev; 1072 pf->prev = px; 1073 px->prev->next = px; 1074 pf = px; 1075 px = NULL; 1076 } else if (pf->end == ptr ) { 1077 /* Append to the previous entry */ 1078 pf->end = (char *)pf->end + l; 1079 pf->size += l; 1080 if (pf->next != NULL && pf->end == pf->next->page ) { 1081 /* And collapse the next too. */ 1082 pt = pf->next; 1083 pf->end = pt->end; 1084 pf->size += pt->size; 1085 pf->next = pt->next; 1086 if (pf->next != NULL) 1087 pf->next->prev = pf; 1088 } 1089 } else if (pf->page == tail) { 1090 /* Prepend to entry */ 1091 pf->size += l; 1092 pf->page = ptr; 1093 } else if (pf->next == NULL) { 1094 /* Append at tail of chain */ 1095 px->next = NULL; 1096 px->prev = pf; 1097 pf->next = px; 1098 pf = px; 1099 px = NULL; 1100 } else { 1101 wrterror("freelist is destroyed\n"); 1102 } 1103 } 1104 1105 /* Return something to OS ? */ 1106 if (pf->next == NULL && /* If we're the last one, */ 1107 pf->size > malloc_cache && /* ..and the cache is full, */ 1108 pf->end == malloc_brk && /* ..and none behind us, */ 1109 malloc_brk == sbrk(0)) { /* ..and it's OK to do... */ 1110 1111 /* 1112 * Keep the cache intact. Notice that the '>' above guarantees that 1113 * the pf will always have at least one page afterwards. 1114 */ 1115 pf->end = (char *)pf->page + malloc_cache; 1116 pf->size = malloc_cache; 1117 1118 brk(pf->end); 1119 malloc_brk = pf->end; 1120 1121 index = ptr2index(pf->end); 1122 1123 for(i=index;i <= last_index;) 1124 page_dir[i++] = MALLOC_NOT_MINE; 1125 1126 last_index = index - 1; 1127 1128 /* XXX: We could realloc/shrink the pagedir here I guess. */ 1129 } 1130 if (pt != NULL) 1131 ifree(pt); 1132 } 1133 1134 /* 1135 * Free a chunk, and possibly the page it's on, if the page becomes empty. 1136 */ 1137 1138 /* ARGSUSED */ 1139 static __inline__ void 1140 free_bytes(void *ptr, int index, struct pginfo *info) 1141 { 1142 int i; 1143 struct pginfo **mp; 1144 void *vp; 1145 1146 /* Find the chunk number on the page */ 1147 i = ((u_long)ptr & malloc_pagemask) >> info->shift; 1148 1149 if ((u_long)ptr & ((1UL<<(info->shift))-1)) { 1150 wrtwarning("modified (chunk-) pointer\n"); 1151 return; 1152 } 1153 1154 if (info->bits[i/MALLOC_BITS] & (1UL<<(i%MALLOC_BITS))) { 1155 wrtwarning("chunk is already free\n"); 1156 return; 1157 } 1158 1159 if (malloc_junk && info->size != 0) 1160 memset(ptr, SOME_JUNK, info->size); 1161 1162 info->bits[i/MALLOC_BITS] |= 1UL<<(i%MALLOC_BITS); 1163 info->free++; 1164 1165 if (info->size != 0) 1166 mp = page_dir + info->shift; 1167 else 1168 mp = page_dir; 1169 1170 if (info->free == 1) { 1171 1172 /* Page became non-full */ 1173 1174 /* Insert in address order */ 1175 while (*mp && (*mp)->next && (*mp)->next->page < info->page) 1176 mp = &(*mp)->next; 1177 info->next = *mp; 1178 *mp = info; 1179 return; 1180 } 1181 1182 if (info->free != info->total) 1183 return; 1184 1185 /* Find & remove this page in the queue */ 1186 while (*mp != info) { 1187 mp = &((*mp)->next); 1188 #ifdef MALLOC_EXTRA_SANITY 1189 if (!*mp) 1190 wrterror("(ES): Not on queue\n"); 1191 #endif /* MALLOC_EXTRA_SANITY */ 1192 } 1193 *mp = info->next; 1194 1195 /* Free the page & the info structure if need be */ 1196 page_dir[ptr2index(info->page)] = MALLOC_FIRST; 1197 1198 /* If the page was mprotected, unprotect it before releasing it */ 1199 if (info->size == 0) { 1200 mprotect(info->page, malloc_pagesize, PROT_READ|PROT_WRITE); 1201 /* Do we have to care if mprotect succeeds here ? */ 1202 } 1203 1204 vp = info->page; /* Order is important ! */ 1205 if(vp != (void*)info) 1206 ifree(info); 1207 ifree(vp); 1208 } 1209 1210 static void 1211 ifree(void *ptr) 1212 { 1213 struct pginfo *info; 1214 u_long index; 1215 1216 /* This is legal */ 1217 if (ptr == NULL) 1218 return; 1219 1220 if (!malloc_started) { 1221 wrtwarning("malloc() has never been called\n"); 1222 return; 1223 } 1224 1225 /* If we're already sinking, don't make matters any worse. */ 1226 if (suicide) 1227 return; 1228 1229 index = ptr2index(ptr); 1230 1231 if (index < malloc_pageshift) { 1232 wrtwarning("junk pointer, too low to make sense\n"); 1233 return; 1234 } 1235 1236 if (index > last_index) { 1237 wrtwarning("junk pointer, too high to make sense\n"); 1238 return; 1239 } 1240 1241 info = page_dir[index]; 1242 1243 if (info < MALLOC_MAGIC) 1244 free_pages(ptr, index, info); 1245 else 1246 free_bytes(ptr, index, info); 1247 return; 1248 } 1249 1250 /* 1251 * Common function for handling recursion. Only 1252 * print the error message once, to avoid making the problem 1253 * potentially worse. 1254 */ 1255 static void 1256 malloc_recurse(void) 1257 { 1258 static int noprint; 1259 1260 if (noprint == 0) 1261 wrtwarning("recursive call\n"); 1262 noprint = 1; 1263 malloc_active--; 1264 _MALLOC_UNLOCK(); 1265 errno = EDEADLK; 1266 } 1267 1268 /* 1269 * These are the public exported interface routines. 1270 */ 1271 void * 1272 malloc(size_t size) 1273 { 1274 void *r; 1275 1276 _MALLOC_LOCK(); 1277 malloc_func = " in malloc():"; 1278 if (malloc_active++) { 1279 malloc_recurse(); 1280 return (NULL); 1281 } 1282 r = imalloc(size); 1283 UTRACE(0, size, r); 1284 malloc_active--; 1285 _MALLOC_UNLOCK(); 1286 if (malloc_xmalloc && r == NULL) 1287 wrterror("out of memory\n"); 1288 return (r); 1289 } 1290 1291 void 1292 free(void *ptr) 1293 { 1294 _MALLOC_LOCK(); 1295 malloc_func = " in free():"; 1296 if (malloc_active++) { 1297 malloc_recurse(); 1298 return; 1299 } 1300 ifree(ptr); 1301 UTRACE(ptr, 0, 0); 1302 malloc_active--; 1303 _MALLOC_UNLOCK(); 1304 return; 1305 } 1306 1307 void * 1308 realloc(void *ptr, size_t size) 1309 { 1310 void *r; 1311 1312 _MALLOC_LOCK(); 1313 malloc_func = " in realloc():"; 1314 if (malloc_active++) { 1315 malloc_recurse(); 1316 return (NULL); 1317 } 1318 if (ptr == NULL) { 1319 r = imalloc(size); 1320 } else { 1321 r = irealloc(ptr, size); 1322 } 1323 UTRACE(ptr, size, r); 1324 malloc_active--; 1325 _MALLOC_UNLOCK(); 1326 if (malloc_xmalloc && r == NULL) 1327 wrterror("out of memory\n"); 1328 return (r); 1329 } 1330