1 /* 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 */ 9 10 #if defined(LIBC_SCCS) && !defined(lint) 11 static char rcsid[] = "$OpenBSD: malloc.c,v 1.39 2000/03/01 03:09:08 deraadt Exp $"; 12 #endif /* LIBC_SCCS and not lint */ 13 14 /* 15 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are 16 * related to internal conditions and consistency in malloc.c. This has 17 * a noticeable runtime performance hit, and generally will not do you 18 * any good unless you fiddle with the internals of malloc or want 19 * to catch random pointer corruption as early as possible. 20 */ 21 #ifndef MALLOC_EXTRA_SANITY 22 #undef MALLOC_EXTRA_SANITY 23 #endif 24 25 /* 26 * Defining MALLOC_STATS will enable you to call malloc_dump() and set 27 * the [dD] options in the MALLOC_OPTIONS environment variable. 28 * It has no run-time performance hit, but does pull in stdio... 29 */ 30 #ifndef MALLOC_STATS 31 #undef MALLOC_STATS 32 #endif 33 34 /* 35 * What to use for Junk. This is the byte value we use to fill with 36 * when the 'J' option is enabled. 37 */ 38 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */ 39 40 #include <sys/types.h> 41 #include <sys/param.h> 42 #include <sys/mman.h> 43 #include <sys/uio.h> 44 #include <stdio.h> 45 #include <stdlib.h> 46 #include <string.h> 47 #include <unistd.h> 48 #include <fcntl.h> 49 #include <errno.h> 50 51 /* 52 * The basic parameters you can tweak. 53 * 54 * malloc_pageshift pagesize = 1 << malloc_pageshift 55 * It's probably best if this is the native 56 * page size, but it shouldn't have to be. 57 * 58 * malloc_minsize minimum size of an allocation in bytes. 59 * If this is too small it's too much work 60 * to manage them. This is also the smallest 61 * unit of alignment used for the storage 62 * returned by malloc/realloc. 63 * 64 */ 65 66 #if defined(__i386__) && defined(__FreeBSD__) 67 # define malloc_pageshift 12U 68 # define malloc_minsize 16U 69 #endif /* __i386__ && __FreeBSD__ */ 70 71 #if defined(__sparc__) && !defined(__OpenBSD__) 72 # define malloc_pageshift 12U 73 # define malloc_minsize 16U 74 # define MAP_ANON (0) 75 # define USE_DEV_ZERO 76 # define MADV_FREE MADV_DONTNEED 77 #endif /* __sparc__ */ 78 79 /* Insert your combination here... */ 80 #if defined(__FOOCPU__) && defined(__BAROS__) 81 # define malloc_pageshift 12U 82 # define malloc_minsize 16U 83 #endif /* __FOOCPU__ && __BAROS__ */ 84 85 #if defined(__OpenBSD__) && !defined(__sparc__) 86 # define malloc_pageshift (PGSHIFT) 87 # define malloc_minsize 16U 88 #endif /* __OpenBSD__ */ 89 90 #ifdef _THREAD_SAFE 91 # include "thread_private.h" 92 # if 0 93 /* kernel threads */ 94 # include <pthread.h> 95 static pthread_mutex_t malloc_lock; 96 # define THREAD_LOCK() pthread_mutex_lock(&malloc_lock) 97 # define THREAD_UNLOCK() pthread_mutex_unlock(&malloc_lock) 98 # define THREAD_LOCK_INIT() pthread_mutex_init(&malloc_lock, 0); 99 # else 100 /* user threads */ 101 # include "spinlock.h" 102 static spinlock_t malloc_lock = _SPINLOCK_INITIALIZER; 103 # define THREAD_LOCK() if (__isthreaded) _SPINLOCK(&malloc_lock) 104 # define THREAD_UNLOCK() if (__isthreaded) _SPINUNLOCK(&malloc_lock) 105 # define THREAD_LOCK_INIT() 106 /* 107 * Malloc can't use the wrapped write() if it fails very early, so 108 * we use the unwrapped syscall _thread_sys_write() 109 */ 110 # define write _thread_sys_write 111 ssize_t write __P((int, const void *, size_t)); 112 # undef malloc 113 # undef realloc 114 # undef free 115 # endif 116 #else 117 /* no threads */ 118 # define THREAD_LOCK() 119 # define THREAD_UNLOCK() 120 # define THREAD_LOCK_INIT() 121 #endif 122 123 /* 124 * No user serviceable parts behind this point. 125 * 126 * This structure describes a page worth of chunks. 127 */ 128 129 struct pginfo { 130 struct pginfo *next; /* next on the free list */ 131 void *page; /* Pointer to the page */ 132 u_short size; /* size of this page's chunks */ 133 u_short shift; /* How far to shift for this size chunks */ 134 u_short free; /* How many free chunks */ 135 u_short total; /* How many chunk */ 136 u_long bits[1]; /* Which chunks are free */ 137 }; 138 139 /* 140 * This structure describes a number of free pages. 141 */ 142 143 struct pgfree { 144 struct pgfree *next; /* next run of free pages */ 145 struct pgfree *prev; /* prev run of free pages */ 146 void *page; /* pointer to free pages */ 147 void *end; /* pointer to end of free pages */ 148 u_long size; /* number of bytes free */ 149 }; 150 151 /* 152 * How many bits per u_long in the bitmap. 153 * Change only if not 8 bits/byte 154 */ 155 #define MALLOC_BITS (8*sizeof(u_long)) 156 157 /* 158 * Magic values to put in the page_directory 159 */ 160 #define MALLOC_NOT_MINE ((struct pginfo*) 0) 161 #define MALLOC_FREE ((struct pginfo*) 1) 162 #define MALLOC_FIRST ((struct pginfo*) 2) 163 #define MALLOC_FOLLOW ((struct pginfo*) 3) 164 #define MALLOC_MAGIC ((struct pginfo*) 4) 165 166 #ifndef malloc_pageshift 167 #define malloc_pageshift 12U 168 #endif 169 170 #ifndef malloc_minsize 171 #define malloc_minsize 16U 172 #endif 173 174 #ifndef malloc_pageshift 175 #error "malloc_pageshift undefined" 176 #endif 177 178 #if !defined(malloc_pagesize) 179 #define malloc_pagesize (1UL<<malloc_pageshift) 180 #endif 181 182 #if ((1UL<<malloc_pageshift) != malloc_pagesize) 183 #error "(1UL<<malloc_pageshift) != malloc_pagesize" 184 #endif 185 186 #ifndef malloc_maxsize 187 #define malloc_maxsize ((malloc_pagesize)>>1) 188 #endif 189 190 /* A mask for the offset inside a page. */ 191 #define malloc_pagemask ((malloc_pagesize)-1) 192 193 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask))) 194 #define ptr2index(foo) (((u_long)(foo) >> malloc_pageshift)-malloc_origo) 195 196 /* fd of /dev/zero */ 197 #ifdef USE_DEV_ZERO 198 static int fdzero; 199 #define MMAP_FD fdzero 200 #define INIT_MMAP() \ 201 { if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \ 202 wrterror("open of /dev/zero"); } 203 #else 204 #define MMAP_FD (-1) 205 #define INIT_MMAP() 206 #endif 207 208 /* Set when initialization has been done */ 209 static unsigned malloc_started; 210 211 /* Number of free pages we cache */ 212 static unsigned malloc_cache = 16; 213 214 /* The offset from pagenumber to index into the page directory */ 215 static u_long malloc_origo; 216 217 /* The last index in the page directory we care about */ 218 static u_long last_index; 219 220 /* Pointer to page directory. Allocated "as if with" malloc */ 221 static struct pginfo **page_dir; 222 223 /* How many slots in the page directory */ 224 static size_t malloc_ninfo; 225 226 /* Free pages line up here */ 227 static struct pgfree free_list; 228 229 /* Abort(), user doesn't handle problems. */ 230 static int malloc_abort; 231 232 /* Are we trying to die ? */ 233 static int suicide; 234 235 #ifdef MALLOC_STATS 236 /* dump statistics */ 237 static int malloc_stats; 238 #endif 239 240 /* avoid outputting warnings? */ 241 static int malloc_silent; 242 243 /* always realloc ? */ 244 static int malloc_realloc; 245 246 #ifdef __FreeBSD__ 247 /* pass the kernel a hint on free pages ? */ 248 static int malloc_hint; 249 #endif 250 251 /* xmalloc behaviour ? */ 252 static int malloc_xmalloc; 253 254 /* zero fill ? */ 255 static int malloc_zero; 256 257 /* junk fill ? */ 258 static int malloc_junk; 259 260 #ifdef __FreeBSD__ 261 /* utrace ? */ 262 static int malloc_utrace; 263 264 struct ut { void *p; size_t s; void *r; }; 265 266 void utrace __P((struct ut *, int)); 267 268 #define UTRACE(a, b, c) \ 269 if (malloc_utrace) \ 270 {struct ut u; u.p=a; u.s = b; u.r=c; utrace(&u, sizeof u);} 271 #else /* !__FreeBSD__ */ 272 #define UTRACE(a,b,c) 273 #endif 274 275 /* my last break. */ 276 static void *malloc_brk; 277 278 /* one location cache for free-list holders */ 279 static struct pgfree *px; 280 281 /* compile-time options */ 282 char *malloc_options; 283 284 /* Name of the current public function */ 285 static char *malloc_func; 286 287 /* Macro for mmap */ 288 #define MMAP(size) \ 289 mmap((void *)0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \ 290 MMAP_FD, (off_t)0); 291 292 /* 293 * Necessary function declarations 294 */ 295 static int extend_pgdir(u_long index); 296 static void *imalloc(size_t size); 297 static void ifree(void *ptr); 298 static void *irealloc(void *ptr, size_t size); 299 static void *malloc_bytes(size_t size); 300 301 #ifdef MALLOC_STATS 302 void 303 malloc_dump(fd) 304 FILE *fd; 305 { 306 struct pginfo **pd; 307 struct pgfree *pf; 308 int j; 309 310 pd = page_dir; 311 312 /* print out all the pages */ 313 for(j=0;j<=last_index;j++) { 314 fprintf(fd, "%08lx %5d ", (j+malloc_origo) << malloc_pageshift, j); 315 if (pd[j] == MALLOC_NOT_MINE) { 316 for(j++;j<=last_index && pd[j] == MALLOC_NOT_MINE;j++) 317 ; 318 j--; 319 fprintf(fd, ".. %5d not mine\n", j); 320 } else if (pd[j] == MALLOC_FREE) { 321 for(j++;j<=last_index && pd[j] == MALLOC_FREE;j++) 322 ; 323 j--; 324 fprintf(fd, ".. %5d free\n", j); 325 } else if (pd[j] == MALLOC_FIRST) { 326 for(j++;j<=last_index && pd[j] == MALLOC_FOLLOW;j++) 327 ; 328 j--; 329 fprintf(fd, ".. %5d in use\n", j); 330 } else if (pd[j] < MALLOC_MAGIC) { 331 fprintf(fd, "(%p)\n", pd[j]); 332 } else { 333 fprintf(fd, "%p %d (of %d) x %d @ %p --> %p\n", 334 pd[j], pd[j]->free, pd[j]->total, 335 pd[j]->size, pd[j]->page, pd[j]->next); 336 } 337 } 338 339 for(pf=free_list.next; pf; pf=pf->next) { 340 fprintf(fd, "Free: @%p [%p...%p[ %ld ->%p <-%p\n", 341 pf, pf->page, pf->end, pf->size, pf->prev, pf->next); 342 if (pf == pf->next) { 343 fprintf(fd, "Free_list loops.\n"); 344 break; 345 } 346 } 347 348 /* print out various info */ 349 fprintf(fd, "Minsize\t%d\n", malloc_minsize); 350 fprintf(fd, "Maxsize\t%d\n", malloc_maxsize); 351 fprintf(fd, "Pagesize\t%lu\n", (u_long)malloc_pagesize); 352 fprintf(fd, "Pageshift\t%d\n", malloc_pageshift); 353 fprintf(fd, "FirstPage\t%ld\n", malloc_origo); 354 fprintf(fd, "LastPage\t%ld %lx\n", last_index+malloc_pageshift, 355 (last_index + malloc_pageshift) << malloc_pageshift); 356 fprintf(fd, "Break\t%ld\n", (u_long)sbrk(0) >> malloc_pageshift); 357 } 358 #endif /* MALLOC_STATS */ 359 360 extern char *__progname; 361 362 static void 363 wrterror(p) 364 char *p; 365 { 366 char *q = " error: "; 367 struct iovec iov[4]; 368 369 iov[0].iov_base = __progname; 370 iov[0].iov_len = strlen(__progname); 371 iov[1].iov_base = malloc_func; 372 iov[1].iov_len = strlen(malloc_func); 373 iov[2].iov_base = q; 374 iov[2].iov_len = strlen(q); 375 iov[3].iov_base = p; 376 iov[3].iov_len = strlen(p); 377 writev(STDERR_FILENO, iov, 4); 378 379 suicide = 1; 380 #ifdef MALLOC_STATS 381 if (malloc_stats) 382 malloc_dump(stderr); 383 #endif /* MALLOC_STATS */ 384 abort(); 385 } 386 387 static void 388 wrtwarning(p) 389 char *p; 390 { 391 char *q = " warning: "; 392 struct iovec iov[4]; 393 394 if (malloc_abort) 395 wrterror(p); 396 else if (malloc_silent) 397 return; 398 399 iov[0].iov_base = __progname; 400 iov[0].iov_len = strlen(__progname); 401 iov[1].iov_base = malloc_func; 402 iov[1].iov_len = strlen(malloc_func); 403 iov[2].iov_base = q; 404 iov[2].iov_len = strlen(q); 405 iov[3].iov_base = p; 406 iov[3].iov_len = strlen(p); 407 writev(STDERR_FILENO, iov, 4); 408 } 409 410 #ifdef MALLOC_STATS 411 static void 412 malloc_exit() 413 { 414 FILE *fd = fopen("malloc.out", "a"); 415 char *q = "malloc() warning: Couldn't dump stats.\n"; 416 if (fd) { 417 malloc_dump(fd); 418 fclose(fd); 419 } else 420 write(2, q, strlen(q)); 421 } 422 #endif /* MALLOC_STATS */ 423 424 425 /* 426 * Allocate a number of pages from the OS 427 */ 428 static void * 429 map_pages(pages) 430 int pages; 431 { 432 caddr_t result, tail; 433 434 result = (caddr_t)pageround((u_long)sbrk(0)); 435 tail = result + (pages << malloc_pageshift); 436 437 if (brk(tail)) { 438 #ifdef MALLOC_EXTRA_SANITY 439 wrterror("(ES): map_pages fails\n"); 440 #endif /* MALLOC_EXTRA_SANITY */ 441 return 0; 442 } 443 444 last_index = ptr2index(tail) - 1; 445 malloc_brk = tail; 446 447 if ((last_index+1) >= malloc_ninfo && !extend_pgdir(last_index)) 448 return 0; 449 450 return result; 451 } 452 453 /* 454 * Extend page directory 455 */ 456 static int 457 extend_pgdir(index) 458 u_long index; 459 { 460 struct pginfo **new, **old; 461 size_t i, oldlen; 462 463 /* Make it this many pages */ 464 i = index * sizeof *page_dir; 465 i /= malloc_pagesize; 466 i += 2; 467 468 /* remember the old mapping size */ 469 oldlen = malloc_ninfo * sizeof *page_dir; 470 471 /* 472 * NOTE: we allocate new pages and copy the directory rather than tempt 473 * fate by trying to "grow" the region.. There is nothing to prevent 474 * us from accidently re-mapping space that's been allocated by our caller 475 * via dlopen() or other mmap(). 476 * 477 * The copy problem is not too bad, as there is 4K of page index per 478 * 4MB of malloc arena. 479 * 480 * We can totally avoid the copy if we open a file descriptor to associate 481 * the anon mappings with. Then, when we remap the pages at the new 482 * address, the old pages will be "magically" remapped.. But this means 483 * keeping open a "secret" file descriptor..... 484 */ 485 486 /* Get new pages */ 487 new = (struct pginfo**) MMAP(i * malloc_pagesize); 488 if (new == (struct pginfo **)-1) 489 return 0; 490 491 /* Copy the old stuff */ 492 memcpy(new, page_dir, 493 malloc_ninfo * sizeof *page_dir); 494 495 /* register the new size */ 496 malloc_ninfo = i * malloc_pagesize / sizeof *page_dir; 497 498 /* swap the pointers */ 499 old = page_dir; 500 page_dir = new; 501 502 /* Now free the old stuff */ 503 munmap(old, oldlen); 504 return 1; 505 } 506 507 /* 508 * Initialize the world 509 */ 510 static void 511 malloc_init () 512 { 513 char *p, b[64]; 514 int i, j; 515 int save_errno = errno; 516 517 THREAD_LOCK_INIT(); 518 519 INIT_MMAP(); 520 521 #ifdef MALLOC_EXTRA_SANITY 522 malloc_junk = 1; 523 #endif /* MALLOC_EXTRA_SANITY */ 524 525 for (i = 0; i < 3; i++) { 526 if (i == 0) { 527 j = readlink("/etc/malloc.conf", b, sizeof b - 1); 528 if (j <= 0) 529 continue; 530 b[j] = '\0'; 531 p = b; 532 } else if (i == 1) { 533 if (issetugid() == 0) 534 p = getenv("MALLOC_OPTIONS"); 535 else 536 continue; 537 } else if (i == 2) { 538 p = malloc_options; 539 } 540 for (; p && *p; p++) { 541 switch (*p) { 542 case '>': malloc_cache <<= 1; break; 543 case '<': malloc_cache >>= 1; break; 544 case 'a': malloc_abort = 0; break; 545 case 'A': malloc_abort = 1; break; 546 #ifdef MALLOC_STATS 547 case 'd': malloc_stats = 0; break; 548 case 'D': malloc_stats = 1; break; 549 #endif /* MALLOC_STATS */ 550 #ifdef __FreeBSD__ 551 case 'h': malloc_hint = 0; break; 552 case 'H': malloc_hint = 1; break; 553 #endif /* __FreeBSD__ */ 554 case 'r': malloc_realloc = 0; break; 555 case 'R': malloc_realloc = 1; break; 556 case 'j': malloc_junk = 0; break; 557 case 'J': malloc_junk = 1; break; 558 case 'n': malloc_silent = 0; break; 559 case 'N': malloc_silent = 1; break; 560 #ifdef __FreeBSD__ 561 case 'u': malloc_utrace = 0; break; 562 case 'U': malloc_utrace = 1; break; 563 #endif /* __FreeBSD__ */ 564 case 'x': malloc_xmalloc = 0; break; 565 case 'X': malloc_xmalloc = 1; break; 566 case 'z': malloc_zero = 0; break; 567 case 'Z': malloc_zero = 1; break; 568 default: 569 j = malloc_abort; 570 malloc_abort = 0; 571 wrtwarning("unknown char in MALLOC_OPTIONS\n"); 572 malloc_abort = j; 573 break; 574 } 575 } 576 } 577 578 UTRACE(0, 0, 0); 579 580 /* 581 * We want junk in the entire allocation, and zero only in the part 582 * the user asked for. 583 */ 584 if (malloc_zero) 585 malloc_junk=1; 586 587 #ifdef MALLOC_STATS 588 if (malloc_stats) 589 atexit(malloc_exit); 590 #endif /* MALLOC_STATS */ 591 592 /* Allocate one page for the page directory */ 593 page_dir = (struct pginfo **) MMAP(malloc_pagesize); 594 595 if (page_dir == (struct pginfo **) -1) 596 wrterror("mmap(2) failed, check limits.\n"); 597 598 /* 599 * We need a maximum of malloc_pageshift buckets, steal these from the 600 * front of the page_directory; 601 */ 602 malloc_origo = ((u_long)pageround((u_long)sbrk(0))) >> malloc_pageshift; 603 malloc_origo -= malloc_pageshift; 604 605 malloc_ninfo = malloc_pagesize / sizeof *page_dir; 606 607 /* Been here, done that */ 608 malloc_started++; 609 610 /* Recalculate the cache size in bytes, and make sure it's nonzero */ 611 612 if (!malloc_cache) 613 malloc_cache++; 614 615 malloc_cache <<= malloc_pageshift; 616 617 /* 618 * This is a nice hack from Kaleb Keithly (kaleb@x.org). 619 * We can sbrk(2) further back when we keep this on a low address. 620 */ 621 px = (struct pgfree *) imalloc (sizeof *px); 622 errno = save_errno; 623 } 624 625 /* 626 * Allocate a number of complete pages 627 */ 628 static void * 629 malloc_pages(size) 630 size_t size; 631 { 632 void *p, *delay_free = 0; 633 int i; 634 struct pgfree *pf; 635 u_long index; 636 637 size = pageround(size); 638 639 p = 0; 640 /* Look for free pages before asking for more */ 641 for(pf = free_list.next; pf; pf = pf->next) { 642 643 #ifdef MALLOC_EXTRA_SANITY 644 if (pf->size & malloc_pagemask) 645 wrterror("(ES): junk length entry on free_list\n"); 646 if (!pf->size) 647 wrterror("(ES): zero length entry on free_list\n"); 648 if (pf->page == pf->end) 649 wrterror("(ES): zero entry on free_list\n"); 650 if (pf->page > pf->end) 651 wrterror("(ES): sick entry on free_list\n"); 652 if ((void*)pf->page >= (void*)sbrk(0)) 653 wrterror("(ES): entry on free_list past brk\n"); 654 if (page_dir[ptr2index(pf->page)] != MALLOC_FREE) 655 wrterror("(ES): non-free first page on free-list\n"); 656 if (page_dir[ptr2index(pf->end)-1] != MALLOC_FREE) 657 wrterror("(ES): non-free last page on free-list\n"); 658 #endif /* MALLOC_EXTRA_SANITY */ 659 660 if (pf->size < size) 661 continue; 662 663 if (pf->size == size) { 664 p = pf->page; 665 if (pf->next) 666 pf->next->prev = pf->prev; 667 pf->prev->next = pf->next; 668 delay_free = pf; 669 break; 670 } 671 672 p = pf->page; 673 pf->page = (char *)pf->page + size; 674 pf->size -= size; 675 break; 676 } 677 678 #ifdef MALLOC_EXTRA_SANITY 679 if (p && page_dir[ptr2index(p)] != MALLOC_FREE) 680 wrterror("(ES): allocated non-free page on free-list\n"); 681 #endif /* MALLOC_EXTRA_SANITY */ 682 683 size >>= malloc_pageshift; 684 685 /* Map new pages */ 686 if (!p) 687 p = map_pages(size); 688 689 if (p) { 690 691 index = ptr2index(p); 692 page_dir[index] = MALLOC_FIRST; 693 for (i=1;i<size;i++) 694 page_dir[index+i] = MALLOC_FOLLOW; 695 696 if (malloc_junk) 697 memset(p, SOME_JUNK, size << malloc_pageshift); 698 } 699 700 if (delay_free) { 701 if (!px) 702 px = delay_free; 703 else 704 ifree(delay_free); 705 } 706 707 return p; 708 } 709 710 /* 711 * Allocate a page of fragments 712 */ 713 714 static __inline__ int 715 malloc_make_chunks(bits) 716 int bits; 717 { 718 struct pginfo *bp; 719 void *pp; 720 int i, k, l; 721 722 /* Allocate a new bucket */ 723 pp = malloc_pages((size_t)malloc_pagesize); 724 if (!pp) 725 return 0; 726 727 /* Find length of admin structure */ 728 l = sizeof *bp - sizeof(u_long); 729 l += sizeof(u_long) * 730 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS); 731 732 /* Don't waste more than two chunks on this */ 733 if ((1UL<<(bits)) <= l+l) { 734 bp = (struct pginfo *)pp; 735 } else { 736 bp = (struct pginfo *)imalloc(l); 737 if (!bp) { 738 ifree(pp); 739 return 0; 740 } 741 } 742 743 bp->size = (1UL<<bits); 744 bp->shift = bits; 745 bp->total = bp->free = malloc_pagesize >> bits; 746 bp->page = pp; 747 748 /* set all valid bits in the bitmap */ 749 k = bp->total; 750 i = 0; 751 752 /* Do a bunch at a time */ 753 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS) 754 bp->bits[i / MALLOC_BITS] = ~0UL; 755 756 for(; i < k; i++) 757 bp->bits[i/MALLOC_BITS] |= 1UL<<(i%MALLOC_BITS); 758 759 if (bp == bp->page) { 760 /* Mark the ones we stole for ourselves */ 761 for(i=0;l > 0;i++) { 762 bp->bits[i/MALLOC_BITS] &= ~(1UL<<(i%MALLOC_BITS)); 763 bp->free--; 764 bp->total--; 765 l -= (1 << bits); 766 } 767 } 768 769 /* MALLOC_LOCK */ 770 771 page_dir[ptr2index(pp)] = bp; 772 773 bp->next = page_dir[bits]; 774 page_dir[bits] = bp; 775 776 /* MALLOC_UNLOCK */ 777 778 return 1; 779 } 780 781 /* 782 * Allocate a fragment 783 */ 784 static void * 785 malloc_bytes(size) 786 size_t size; 787 { 788 int i,j; 789 u_long u; 790 struct pginfo *bp; 791 int k; 792 u_long *lp; 793 794 /* Don't bother with anything less than this */ 795 if (size < malloc_minsize) 796 size = malloc_minsize; 797 798 /* Find the right bucket */ 799 j = 1; 800 i = size-1; 801 while (i >>= 1) 802 j++; 803 804 /* If it's empty, make a page more of that size chunks */ 805 if (!page_dir[j] && !malloc_make_chunks(j)) 806 return 0; 807 808 bp = page_dir[j]; 809 810 /* Find first word of bitmap which isn't empty */ 811 for (lp = bp->bits; !*lp; lp++) 812 ; 813 814 /* Find that bit, and tweak it */ 815 u = 1; 816 k = 0; 817 while (!(*lp & u)) { 818 u += u; 819 k++; 820 } 821 *lp ^= u; 822 823 /* If there are no more free, remove from free-list */ 824 if (!--bp->free) { 825 page_dir[j] = bp->next; 826 bp->next = 0; 827 } 828 829 /* Adjust to the real offset of that chunk */ 830 k += (lp-bp->bits)*MALLOC_BITS; 831 k <<= bp->shift; 832 833 if (malloc_junk) 834 memset((char *)bp->page + k, SOME_JUNK, bp->size); 835 836 return (u_char *)bp->page + k; 837 } 838 839 /* 840 * Allocate a piece of memory 841 */ 842 static void * 843 imalloc(size) 844 size_t size; 845 { 846 void *result; 847 848 if (!malloc_started) 849 malloc_init(); 850 851 if (suicide) 852 abort(); 853 854 if ((size + malloc_pagesize) < size) /* Check for overflow */ 855 result = 0; 856 else if (size <= malloc_maxsize) 857 result = malloc_bytes(size); 858 else 859 result = malloc_pages(size); 860 861 if (malloc_abort && !result) 862 wrterror("allocation failed.\n"); 863 864 if (malloc_zero && result) 865 memset(result, 0, size); 866 867 return result; 868 } 869 870 /* 871 * Change the size of an allocation. 872 */ 873 static void * 874 irealloc(ptr, size) 875 void *ptr; 876 size_t size; 877 { 878 void *p; 879 u_long osize, index; 880 struct pginfo **mp; 881 int i; 882 883 if (suicide) 884 abort(); 885 886 if (!malloc_started) { 887 wrtwarning("malloc() has never been called.\n"); 888 return 0; 889 } 890 891 index = ptr2index(ptr); 892 893 if (index < malloc_pageshift) { 894 wrtwarning("junk pointer, too low to make sense.\n"); 895 return 0; 896 } 897 898 if (index > last_index) { 899 wrtwarning("junk pointer, too high to make sense.\n"); 900 return 0; 901 } 902 903 mp = &page_dir[index]; 904 905 if (*mp == MALLOC_FIRST) { /* Page allocation */ 906 907 /* Check the pointer */ 908 if ((u_long)ptr & malloc_pagemask) { 909 wrtwarning("modified (page-) pointer.\n"); 910 return 0; 911 } 912 913 /* Find the size in bytes */ 914 for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;) 915 osize += malloc_pagesize; 916 917 if (!malloc_realloc && /* unless we have to, */ 918 size <= osize && /* .. or are too small, */ 919 size > (osize - malloc_pagesize)) { /* .. or can free a page, */ 920 return ptr; /* don't do anything. */ 921 } 922 923 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */ 924 925 /* Check the pointer for sane values */ 926 if (((u_long)ptr & ((*mp)->size-1))) { 927 wrtwarning("modified (chunk-) pointer.\n"); 928 return 0; 929 } 930 931 /* Find the chunk index in the page */ 932 i = ((u_long)ptr & malloc_pagemask) >> (*mp)->shift; 933 934 /* Verify that it isn't a free chunk already */ 935 if ((*mp)->bits[i/MALLOC_BITS] & (1UL<<(i%MALLOC_BITS))) { 936 wrtwarning("chunk is already free.\n"); 937 return 0; 938 } 939 940 osize = (*mp)->size; 941 942 if (!malloc_realloc && /* Unless we have to, */ 943 size < osize && /* ..or are too small, */ 944 (size > osize/2 || /* ..or could use a smaller size, */ 945 osize == malloc_minsize)) { /* ..(if there is one) */ 946 return ptr; /* ..Don't do anything */ 947 } 948 949 } else { 950 wrtwarning("pointer to wrong page.\n"); 951 return 0; 952 } 953 954 p = imalloc(size); 955 956 if (p) { 957 /* copy the lesser of the two sizes, and free the old one */ 958 if (osize < size) 959 memcpy(p, ptr, osize); 960 else 961 memcpy(p, ptr, size); 962 ifree(ptr); 963 } 964 return p; 965 } 966 967 /* 968 * Free a sequence of pages 969 */ 970 971 static __inline__ void 972 free_pages(ptr, index, info) 973 void *ptr; 974 int index; 975 struct pginfo *info; 976 { 977 int i; 978 struct pgfree *pf, *pt=0; 979 u_long l; 980 void *tail; 981 982 if (info == MALLOC_FREE) { 983 wrtwarning("page is already free.\n"); 984 return; 985 } 986 987 if (info != MALLOC_FIRST) { 988 wrtwarning("pointer to wrong page.\n"); 989 return; 990 } 991 992 if ((u_long)ptr & malloc_pagemask) { 993 wrtwarning("modified (page-) pointer.\n"); 994 return; 995 } 996 997 /* Count how many pages and mark them free at the same time */ 998 page_dir[index] = MALLOC_FREE; 999 for (i = 1; page_dir[index+i] == MALLOC_FOLLOW; i++) 1000 page_dir[index + i] = MALLOC_FREE; 1001 1002 l = i << malloc_pageshift; 1003 1004 if (malloc_junk) 1005 memset(ptr, SOME_JUNK, l); 1006 1007 #ifdef __FreeBSD__ 1008 if (malloc_hint) 1009 madvise(ptr, l, MADV_FREE); 1010 #endif 1011 1012 tail = (char *)ptr+l; 1013 1014 /* add to free-list */ 1015 if (!px) 1016 px = imalloc(sizeof *px); /* This cannot fail... */ 1017 px->page = ptr; 1018 px->end = tail; 1019 px->size = l; 1020 if (!free_list.next) { 1021 1022 /* Nothing on free list, put this at head */ 1023 px->next = free_list.next; 1024 px->prev = &free_list; 1025 free_list.next = px; 1026 pf = px; 1027 px = 0; 1028 1029 } else { 1030 1031 /* Find the right spot, leave pf pointing to the modified entry. */ 1032 tail = (char *)ptr+l; 1033 1034 for(pf = free_list.next; pf->end < ptr && pf->next; pf = pf->next) 1035 ; /* Race ahead here */ 1036 1037 if (pf->page > tail) { 1038 /* Insert before entry */ 1039 px->next = pf; 1040 px->prev = pf->prev; 1041 pf->prev = px; 1042 px->prev->next = px; 1043 pf = px; 1044 px = 0; 1045 } else if (pf->end == ptr ) { 1046 /* Append to the previous entry */ 1047 pf->end = (char *)pf->end + l; 1048 pf->size += l; 1049 if (pf->next && pf->end == pf->next->page ) { 1050 /* And collapse the next too. */ 1051 pt = pf->next; 1052 pf->end = pt->end; 1053 pf->size += pt->size; 1054 pf->next = pt->next; 1055 if (pf->next) 1056 pf->next->prev = pf; 1057 } 1058 } else if (pf->page == tail) { 1059 /* Prepend to entry */ 1060 pf->size += l; 1061 pf->page = ptr; 1062 } else if (!pf->next) { 1063 /* Append at tail of chain */ 1064 px->next = 0; 1065 px->prev = pf; 1066 pf->next = px; 1067 pf = px; 1068 px = 0; 1069 } else { 1070 wrterror("freelist is destroyed.\n"); 1071 } 1072 } 1073 1074 /* Return something to OS ? */ 1075 if (!pf->next && /* If we're the last one, */ 1076 pf->size > malloc_cache && /* ..and the cache is full, */ 1077 pf->end == malloc_brk && /* ..and none behind us, */ 1078 malloc_brk == sbrk(0)) { /* ..and it's OK to do... */ 1079 1080 /* 1081 * Keep the cache intact. Notice that the '>' above guarantees that 1082 * the pf will always have at least one page afterwards. 1083 */ 1084 pf->end = (char *)pf->page + malloc_cache; 1085 pf->size = malloc_cache; 1086 1087 brk(pf->end); 1088 malloc_brk = pf->end; 1089 1090 index = ptr2index(pf->end); 1091 last_index = index - 1; 1092 1093 for(i=index;i <= last_index;) 1094 page_dir[i++] = MALLOC_NOT_MINE; 1095 1096 /* XXX: We could realloc/shrink the pagedir here I guess. */ 1097 } 1098 if (pt) 1099 ifree(pt); 1100 } 1101 1102 /* 1103 * Free a chunk, and possibly the page it's on, if the page becomes empty. 1104 */ 1105 1106 /* ARGSUSED */ 1107 static __inline__ void 1108 free_bytes(ptr, index, info) 1109 void *ptr; 1110 int index; 1111 struct pginfo *info; 1112 { 1113 int i; 1114 struct pginfo **mp; 1115 void *vp; 1116 1117 /* Find the chunk number on the page */ 1118 i = ((u_long)ptr & malloc_pagemask) >> info->shift; 1119 1120 if (((u_long)ptr & (info->size-1))) { 1121 wrtwarning("modified (chunk-) pointer.\n"); 1122 return; 1123 } 1124 1125 if (info->bits[i/MALLOC_BITS] & (1UL<<(i%MALLOC_BITS))) { 1126 wrtwarning("chunk is already free.\n"); 1127 return; 1128 } 1129 1130 if (malloc_junk) 1131 memset(ptr, SOME_JUNK, info->size); 1132 1133 info->bits[i/MALLOC_BITS] |= 1UL<<(i%MALLOC_BITS); 1134 info->free++; 1135 1136 mp = page_dir + info->shift; 1137 1138 if (info->free == 1) { 1139 1140 /* Page became non-full */ 1141 1142 mp = page_dir + info->shift; 1143 /* Insert in address order */ 1144 while (*mp && (*mp)->next && (*mp)->next->page < info->page) 1145 mp = &(*mp)->next; 1146 info->next = *mp; 1147 *mp = info; 1148 return; 1149 } 1150 1151 if (info->free != info->total) 1152 return; 1153 1154 /* Find & remove this page in the queue */ 1155 while (*mp != info) { 1156 mp = &((*mp)->next); 1157 #ifdef MALLOC_EXTRA_SANITY 1158 if (!*mp) 1159 wrterror("(ES): Not on queue\n"); 1160 #endif /* MALLOC_EXTRA_SANITY */ 1161 } 1162 *mp = info->next; 1163 1164 /* Free the page & the info structure if need be */ 1165 page_dir[ptr2index(info->page)] = MALLOC_FIRST; 1166 vp = info->page; /* Order is important ! */ 1167 if(vp != (void*)info) 1168 ifree(info); 1169 ifree(vp); 1170 } 1171 1172 static void 1173 ifree(ptr) 1174 void *ptr; 1175 { 1176 struct pginfo *info; 1177 int index; 1178 1179 /* This is legal */ 1180 if (!ptr) 1181 return; 1182 1183 if (!malloc_started) { 1184 wrtwarning("malloc() has never been called.\n"); 1185 return; 1186 } 1187 1188 /* If we're already sinking, don't make matters any worse. */ 1189 if (suicide) 1190 return; 1191 1192 index = ptr2index(ptr); 1193 1194 if (index < malloc_pageshift) { 1195 wrtwarning("junk pointer, too low to make sense.\n"); 1196 return; 1197 } 1198 1199 if (index > last_index) { 1200 wrtwarning("junk pointer, too high to make sense.\n"); 1201 return; 1202 } 1203 1204 info = page_dir[index]; 1205 1206 if (info < MALLOC_MAGIC) 1207 free_pages(ptr, index, info); 1208 else 1209 free_bytes(ptr, index, info); 1210 return; 1211 } 1212 1213 /* 1214 * These are the public exported interface routines. 1215 */ 1216 1217 static int malloc_active; 1218 1219 void * 1220 malloc(size_t size) 1221 { 1222 register void *r; 1223 1224 malloc_func = " in malloc():"; 1225 THREAD_LOCK(); 1226 if (malloc_active++) { 1227 wrtwarning("recursive call.\n"); 1228 malloc_active--; 1229 return (0); 1230 } 1231 r = imalloc(size); 1232 UTRACE(0, size, r); 1233 malloc_active--; 1234 THREAD_UNLOCK(); 1235 if (malloc_xmalloc && !r) 1236 wrterror("out of memory.\n"); 1237 return (r); 1238 } 1239 1240 void 1241 free(void *ptr) 1242 { 1243 malloc_func = " in free():"; 1244 THREAD_LOCK(); 1245 if (malloc_active++) { 1246 wrtwarning("recursive call.\n"); 1247 malloc_active--; 1248 return; 1249 } 1250 ifree(ptr); 1251 UTRACE(ptr, 0, 0); 1252 malloc_active--; 1253 THREAD_UNLOCK(); 1254 return; 1255 } 1256 1257 void * 1258 realloc(void *ptr, size_t size) 1259 { 1260 register void *r; 1261 1262 malloc_func = " in realloc():"; 1263 THREAD_LOCK(); 1264 if (malloc_active++) { 1265 wrtwarning("recursive call.\n"); 1266 malloc_active--; 1267 return (0); 1268 } 1269 if (!ptr) { 1270 r = imalloc(size); 1271 } else { 1272 r = irealloc(ptr, size); 1273 } 1274 UTRACE(ptr, size, r); 1275 malloc_active--; 1276 THREAD_UNLOCK(); 1277 if (malloc_xmalloc && !r) 1278 wrterror("out of memory.\n"); 1279 return (r); 1280 } 1281