1 /* 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 */ 9 10 #if defined(LIBC_SCCS) && !defined(lint) 11 static char rcsid[] = "$OpenBSD: malloc.c,v 1.33 1998/11/20 11:18:50 d Exp $"; 12 #endif /* LIBC_SCCS and not lint */ 13 14 /* 15 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are 16 * related to internal conditions and consistency in malloc.c. This has 17 * a noticeable runtime performance hit, and generally will not do you 18 * any good unless you fiddle with the internals of malloc or want 19 * to catch random pointer corruption as early as possible. 20 */ 21 #ifndef MALLOC_EXTRA_SANITY 22 #undef MALLOC_EXTRA_SANITY 23 #endif 24 25 /* 26 * Defining MALLOC_STATS will enable you to call malloc_dump() and set 27 * the [dD] options in the MALLOC_OPTIONS environment variable. 28 * It has no run-time performance hit, but does pull in stdio... 29 */ 30 #ifndef MALLOC_STATS 31 #undef MALLOC_STATS 32 #endif 33 34 /* 35 * What to use for Junk. This is the byte value we use to fill with 36 * when the 'J' option is enabled. 37 */ 38 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */ 39 40 #include <stdio.h> 41 #include <stdlib.h> 42 #include <string.h> 43 #include <unistd.h> 44 #include <fcntl.h> 45 #include <errno.h> 46 #include <sys/types.h> 47 #include <sys/param.h> 48 #include <sys/mman.h> 49 50 /* 51 * The basic parameters you can tweak. 52 * 53 * malloc_pageshift pagesize = 1 << malloc_pageshift 54 * It's probably best if this is the native 55 * page size, but it shouldn't have to be. 56 * 57 * malloc_minsize minimum size of an allocation in bytes. 58 * If this is too small it's too much work 59 * to manage them. This is also the smallest 60 * unit of alignment used for the storage 61 * returned by malloc/realloc. 62 * 63 */ 64 65 #if defined(__i386__) && defined(__FreeBSD__) 66 # define malloc_pageshift 12U 67 # define malloc_minsize 16U 68 #endif /* __i386__ && __FreeBSD__ */ 69 70 #if defined(__sparc__) && !defined(__OpenBSD__) 71 # define malloc_pageshirt 12U 72 # define malloc_minsize 16U 73 # define MAP_ANON (0) 74 # define USE_DEV_ZERO 75 # define MADV_FREE MADV_DONTNEED 76 #endif /* __sparc__ */ 77 78 /* Insert your combination here... */ 79 #if defined(__FOOCPU__) && defined(__BAROS__) 80 # define malloc_pageshift 12U 81 # define malloc_minsize 16U 82 #endif /* __FOOCPU__ && __BAROS__ */ 83 84 #if defined(__OpenBSD__) && !defined(__sparc__) 85 # define malloc_pageshift (PGSHIFT) 86 # define malloc_minsize 16U 87 #endif /* __OpenBSD__ */ 88 89 #ifdef _THREAD_SAFE 90 # include "thread_private.h" 91 # if 0 92 /* kernel threads */ 93 # include <pthread.h> 94 static pthread_mutex_t malloc_lock; 95 # define THREAD_LOCK() pthread_mutex_lock(&malloc_lock) 96 # define THREAD_UNLOCK() pthread_mutex_unlock(&malloc_lock) 97 # define THREAD_LOCK_INIT() pthread_mutex_init(&malloc_lock, 0); 98 # else 99 /* user threads */ 100 # include "spinlock.h" 101 static spinlock_t malloc_lock = _SPINLOCK_INITIALIZER; 102 # define THREAD_LOCK() if (__isthreaded) _SPINLOCK(&malloc_lock) 103 # define THREAD_UNLOCK() if (__isthreaded) _SPINUNLOCK(&malloc_lock) 104 # define THREAD_LOCK_INIT() 105 # endif 106 #else 107 /* no threads */ 108 # define THREAD_LOCK() 109 # define THREAD_UNLOCK() 110 # define THREAD_LOCK_INIT() 111 #endif 112 113 /* 114 * No user serviceable parts behind this point. 115 * 116 * This structure describes a page worth of chunks. 117 */ 118 119 struct pginfo { 120 struct pginfo *next; /* next on the free list */ 121 void *page; /* Pointer to the page */ 122 u_short size; /* size of this page's chunks */ 123 u_short shift; /* How far to shift for this size chunks */ 124 u_short free; /* How many free chunks */ 125 u_short total; /* How many chunk */ 126 u_long bits[1]; /* Which chunks are free */ 127 }; 128 129 /* 130 * This structure describes a number of free pages. 131 */ 132 133 struct pgfree { 134 struct pgfree *next; /* next run of free pages */ 135 struct pgfree *prev; /* prev run of free pages */ 136 void *page; /* pointer to free pages */ 137 void *end; /* pointer to end of free pages */ 138 u_long size; /* number of bytes free */ 139 }; 140 141 /* 142 * How many bits per u_long in the bitmap. 143 * Change only if not 8 bits/byte 144 */ 145 #define MALLOC_BITS (8*sizeof(u_long)) 146 147 /* 148 * Magic values to put in the page_directory 149 */ 150 #define MALLOC_NOT_MINE ((struct pginfo*) 0) 151 #define MALLOC_FREE ((struct pginfo*) 1) 152 #define MALLOC_FIRST ((struct pginfo*) 2) 153 #define MALLOC_FOLLOW ((struct pginfo*) 3) 154 #define MALLOC_MAGIC ((struct pginfo*) 4) 155 156 #ifndef malloc_pageshift 157 #define malloc_pageshift 12U 158 #endif 159 160 #ifndef malloc_minsize 161 #define malloc_minsize 16U 162 #endif 163 164 #ifndef malloc_pageshift 165 #error "malloc_pageshift undefined" 166 #endif 167 168 #if !defined(malloc_pagesize) 169 #define malloc_pagesize (1UL<<malloc_pageshift) 170 #endif 171 172 #if ((1UL<<malloc_pageshift) != malloc_pagesize) 173 #error "(1UL<<malloc_pageshift) != malloc_pagesize" 174 #endif 175 176 #ifndef malloc_maxsize 177 #define malloc_maxsize ((malloc_pagesize)>>1) 178 #endif 179 180 /* A mask for the offset inside a page. */ 181 #define malloc_pagemask ((malloc_pagesize)-1) 182 183 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask))) 184 #define ptr2index(foo) (((u_long)(foo) >> malloc_pageshift)-malloc_origo) 185 186 /* fd of /dev/zero */ 187 #ifdef USE_DEV_ZERO 188 static int fdzero; 189 #define MMAP_FD fdzero 190 #define INIT_MMAP() \ 191 { if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \ 192 wrterror("open of /dev/zero"); } 193 #else 194 #define MMAP_FD (-1) 195 #define INIT_MMAP() 196 #endif 197 198 /* Set when initialization has been done */ 199 static unsigned malloc_started; 200 201 /* Number of free pages we cache */ 202 static unsigned malloc_cache = 16; 203 204 /* The offset from pagenumber to index into the page directory */ 205 static u_long malloc_origo; 206 207 /* The last index in the page directory we care about */ 208 static u_long last_index; 209 210 /* Pointer to page directory. Allocated "as if with" malloc */ 211 static struct pginfo **page_dir; 212 213 /* How many slots in the page directory */ 214 static size_t malloc_ninfo; 215 216 /* Free pages line up here */ 217 static struct pgfree free_list; 218 219 /* Abort(), user doesn't handle problems. */ 220 static int malloc_abort; 221 222 /* Are we trying to die ? */ 223 static int suicide; 224 225 #ifdef MALLOC_STATS 226 /* dump statistics */ 227 static int malloc_stats; 228 #endif 229 230 /* avoid outputting warnings? */ 231 static int malloc_silent; 232 233 /* always realloc ? */ 234 static int malloc_realloc; 235 236 #ifdef __FreeBSD__ 237 /* pass the kernel a hint on free pages ? */ 238 static int malloc_hint; 239 #endif 240 241 /* xmalloc behaviour ? */ 242 static int malloc_xmalloc; 243 244 /* zero fill ? */ 245 static int malloc_zero; 246 247 /* junk fill ? */ 248 static int malloc_junk; 249 250 #ifdef __FreeBSD__ 251 /* utrace ? */ 252 static int malloc_utrace; 253 254 struct ut { void *p; size_t s; void *r; }; 255 256 void utrace __P((struct ut *, int)); 257 258 #define UTRACE(a, b, c) \ 259 if (malloc_utrace) \ 260 {struct ut u; u.p=a; u.s = b; u.r=c; utrace(&u, sizeof u);} 261 #else /* !__FreeBSD__ */ 262 #define UTRACE(a,b,c) 263 #endif 264 265 /* my last break. */ 266 static void *malloc_brk; 267 268 /* one location cache for free-list holders */ 269 static struct pgfree *px; 270 271 /* compile-time options */ 272 char *malloc_options; 273 274 /* Name of the current public function */ 275 static char *malloc_func; 276 277 /* Macro for mmap */ 278 #define MMAP(size) \ 279 mmap((void *)0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \ 280 MMAP_FD, (off_t)0); 281 282 /* 283 * Necessary function declarations 284 */ 285 static int extend_pgdir(u_long index); 286 static void *imalloc(size_t size); 287 static void ifree(void *ptr); 288 static void *irealloc(void *ptr, size_t size); 289 static void *malloc_bytes(size_t size); 290 291 #ifdef MALLOC_STATS 292 void 293 malloc_dump(fd) 294 FILE *fd; 295 { 296 struct pginfo **pd; 297 struct pgfree *pf; 298 int j; 299 300 pd = page_dir; 301 302 /* print out all the pages */ 303 for(j=0;j<=last_index;j++) { 304 fprintf(fd, "%08lx %5d ", (j+malloc_origo) << malloc_pageshift, j); 305 if (pd[j] == MALLOC_NOT_MINE) { 306 for(j++;j<=last_index && pd[j] == MALLOC_NOT_MINE;j++) 307 ; 308 j--; 309 fprintf(fd, ".. %5d not mine\n", j); 310 } else if (pd[j] == MALLOC_FREE) { 311 for(j++;j<=last_index && pd[j] == MALLOC_FREE;j++) 312 ; 313 j--; 314 fprintf(fd, ".. %5d free\n", j); 315 } else if (pd[j] == MALLOC_FIRST) { 316 for(j++;j<=last_index && pd[j] == MALLOC_FOLLOW;j++) 317 ; 318 j--; 319 fprintf(fd, ".. %5d in use\n", j); 320 } else if (pd[j] < MALLOC_MAGIC) { 321 fprintf(fd, "(%p)\n", pd[j]); 322 } else { 323 fprintf(fd, "%p %d (of %d) x %d @ %p --> %p\n", 324 pd[j], pd[j]->free, pd[j]->total, 325 pd[j]->size, pd[j]->page, pd[j]->next); 326 } 327 } 328 329 for(pf=free_list.next; pf; pf=pf->next) { 330 fprintf(fd, "Free: @%p [%p...%p[ %ld ->%p <-%p\n", 331 pf, pf->page, pf->end, pf->size, pf->prev, pf->next); 332 if (pf == pf->next) { 333 fprintf(fd, "Free_list loops.\n"); 334 break; 335 } 336 } 337 338 /* print out various info */ 339 fprintf(fd, "Minsize\t%d\n", malloc_minsize); 340 fprintf(fd, "Maxsize\t%d\n", malloc_maxsize); 341 fprintf(fd, "Pagesize\t%lu\n", (u_long)malloc_pagesize); 342 fprintf(fd, "Pageshift\t%d\n", malloc_pageshift); 343 fprintf(fd, "FirstPage\t%ld\n", malloc_origo); 344 fprintf(fd, "LastPage\t%ld %lx\n", last_index+malloc_pageshift, 345 (last_index + malloc_pageshift) << malloc_pageshift); 346 fprintf(fd, "Break\t%ld\n", (u_long)sbrk(0) >> malloc_pageshift); 347 } 348 #endif /* MALLOC_STATS */ 349 350 extern char *__progname; 351 352 static void 353 wrterror(p) 354 char *p; 355 { 356 char *q = " error: "; 357 write(2, __progname, strlen(__progname)); 358 write(2, malloc_func, strlen(malloc_func)); 359 write(2, q, strlen(q)); 360 write(2, p, strlen(p)); 361 suicide = 1; 362 #ifdef MALLOC_STATS 363 if (malloc_stats) 364 malloc_dump(stderr); 365 #endif /* MALLOC_STATS */ 366 abort(); 367 } 368 369 static void 370 wrtwarning(p) 371 char *p; 372 { 373 char *q = " warning: "; 374 if (malloc_abort) 375 wrterror(p); 376 else if (malloc_silent) 377 return; 378 write(2, __progname, strlen(__progname)); 379 write(2, malloc_func, strlen(malloc_func)); 380 write(2, q, strlen(q)); 381 write(2, p, strlen(p)); 382 } 383 384 #ifdef MALLOC_STATS 385 static void 386 malloc_exit() 387 { 388 FILE *fd = fopen("malloc.out", "a"); 389 char *q = "malloc() warning: Couldn't dump stats.\n"; 390 if (fd) { 391 malloc_dump(fd); 392 fclose(fd); 393 } else 394 write(2, q, strlen(q)); 395 } 396 #endif /* MALLOC_STATS */ 397 398 399 /* 400 * Allocate a number of pages from the OS 401 */ 402 static void * 403 map_pages(pages) 404 int pages; 405 { 406 caddr_t result, tail; 407 408 result = (caddr_t)pageround((u_long)sbrk(0)); 409 tail = result + (pages << malloc_pageshift); 410 411 if (brk(tail)) { 412 #ifdef MALLOC_EXTRA_SANITY 413 wrterror("(ES): map_pages fails\n"); 414 #endif /* MALLOC_EXTRA_SANITY */ 415 return 0; 416 } 417 418 last_index = ptr2index(tail) - 1; 419 malloc_brk = tail; 420 421 if ((last_index+1) >= malloc_ninfo && !extend_pgdir(last_index)) 422 return 0; 423 424 return result; 425 } 426 427 /* 428 * Extend page directory 429 */ 430 static int 431 extend_pgdir(index) 432 u_long index; 433 { 434 struct pginfo **new, **old; 435 size_t i, oldlen; 436 437 /* Make it this many pages */ 438 i = index * sizeof *page_dir; 439 i /= malloc_pagesize; 440 i += 2; 441 442 /* remember the old mapping size */ 443 oldlen = malloc_ninfo * sizeof *page_dir; 444 445 /* 446 * NOTE: we allocate new pages and copy the directory rather than tempt 447 * fate by trying to "grow" the region.. There is nothing to prevent 448 * us from accidently re-mapping space that's been allocated by our caller 449 * via dlopen() or other mmap(). 450 * 451 * The copy problem is not too bad, as there is 4K of page index per 452 * 4MB of malloc arena. 453 * 454 * We can totally avoid the copy if we open a file descriptor to associate 455 * the anon mappings with. Then, when we remap the pages at the new 456 * address, the old pages will be "magically" remapped.. But this means 457 * keeping open a "secret" file descriptor..... 458 */ 459 460 /* Get new pages */ 461 new = (struct pginfo**) MMAP(i * malloc_pagesize); 462 if (new == (struct pginfo **)-1) 463 return 0; 464 465 /* Copy the old stuff */ 466 memcpy(new, page_dir, 467 malloc_ninfo * sizeof *page_dir); 468 469 /* register the new size */ 470 malloc_ninfo = i * malloc_pagesize / sizeof *page_dir; 471 472 /* swap the pointers */ 473 old = page_dir; 474 page_dir = new; 475 476 /* Now free the old stuff */ 477 munmap(old, oldlen); 478 return 1; 479 } 480 481 /* 482 * Initialize the world 483 */ 484 static void 485 malloc_init () 486 { 487 char *p, b[64]; 488 int i, j; 489 int save_errno = errno; 490 491 THREAD_LOCK_INIT(); 492 493 INIT_MMAP(); 494 495 #ifdef MALLOC_EXTRA_SANITY 496 malloc_junk = 1; 497 #endif /* MALLOC_EXTRA_SANITY */ 498 499 for (i = 0; i < 3; i++) { 500 if (i == 0) { 501 j = readlink("/etc/malloc.conf", b, sizeof b - 1); 502 if (j <= 0) 503 continue; 504 b[j] = '\0'; 505 p = b; 506 } else if (i == 1) { 507 if (issetugid() == 0) 508 p = getenv("MALLOC_OPTIONS"); 509 else 510 continue; 511 } else if (i == 2) { 512 p = malloc_options; 513 } 514 for (; p && *p; p++) { 515 switch (*p) { 516 case '>': malloc_cache <<= 1; break; 517 case '<': malloc_cache >>= 1; break; 518 case 'a': malloc_abort = 0; break; 519 case 'A': malloc_abort = 1; break; 520 #ifdef MALLOC_STATS 521 case 'd': malloc_stats = 0; break; 522 case 'D': malloc_stats = 1; break; 523 #endif /* MALLOC_STATS */ 524 #ifdef __FreeBSD__ 525 case 'h': malloc_hint = 0; break; 526 case 'H': malloc_hint = 1; break; 527 #endif /* __FreeBSD__ */ 528 case 'r': malloc_realloc = 0; break; 529 case 'R': malloc_realloc = 1; break; 530 case 'j': malloc_junk = 0; break; 531 case 'J': malloc_junk = 1; break; 532 case 'n': malloc_silent = 0; break; 533 case 'N': malloc_silent = 1; break; 534 #ifdef __FreeBSD__ 535 case 'u': malloc_utrace = 0; break; 536 case 'U': malloc_utrace = 1; break; 537 #endif /* __FreeBSD__ */ 538 case 'x': malloc_xmalloc = 0; break; 539 case 'X': malloc_xmalloc = 1; break; 540 case 'z': malloc_zero = 0; break; 541 case 'Z': malloc_zero = 1; break; 542 default: 543 j = malloc_abort; 544 malloc_abort = 0; 545 wrtwarning("unknown char in MALLOC_OPTIONS\n"); 546 malloc_abort = j; 547 break; 548 } 549 } 550 } 551 552 UTRACE(0, 0, 0); 553 554 /* 555 * We want junk in the entire allocation, and zero only in the part 556 * the user asked for. 557 */ 558 if (malloc_zero) 559 malloc_junk=1; 560 561 #ifdef MALLOC_STATS 562 if (malloc_stats) 563 atexit(malloc_exit); 564 #endif /* MALLOC_STATS */ 565 566 /* Allocate one page for the page directory */ 567 page_dir = (struct pginfo **) MMAP(malloc_pagesize); 568 569 if (page_dir == (struct pginfo **) -1) 570 wrterror("mmap(2) failed, check limits.\n"); 571 572 /* 573 * We need a maximum of malloc_pageshift buckets, steal these from the 574 * front of the page_directory; 575 */ 576 malloc_origo = ((u_long)pageround((u_long)sbrk(0))) >> malloc_pageshift; 577 malloc_origo -= malloc_pageshift; 578 579 malloc_ninfo = malloc_pagesize / sizeof *page_dir; 580 581 /* Been here, done that */ 582 malloc_started++; 583 584 /* Recalculate the cache size in bytes, and make sure it's nonzero */ 585 586 if (!malloc_cache) 587 malloc_cache++; 588 589 malloc_cache <<= malloc_pageshift; 590 591 /* 592 * This is a nice hack from Kaleb Keithly (kaleb@x.org). 593 * We can sbrk(2) further back when we keep this on a low address. 594 */ 595 px = (struct pgfree *) imalloc (sizeof *px); 596 errno = save_errno; 597 } 598 599 /* 600 * Allocate a number of complete pages 601 */ 602 static void * 603 malloc_pages(size) 604 size_t size; 605 { 606 void *p, *delay_free = 0; 607 int i; 608 struct pgfree *pf; 609 u_long index; 610 611 size = pageround(size); 612 613 p = 0; 614 /* Look for free pages before asking for more */ 615 for(pf = free_list.next; pf; pf = pf->next) { 616 617 #ifdef MALLOC_EXTRA_SANITY 618 if (pf->size & malloc_pagemask) 619 wrterror("(ES): junk length entry on free_list\n"); 620 if (!pf->size) 621 wrterror("(ES): zero length entry on free_list\n"); 622 if (pf->page == pf->end) 623 wrterror("(ES): zero entry on free_list\n"); 624 if (pf->page > pf->end) 625 wrterror("(ES): sick entry on free_list\n"); 626 if ((void*)pf->page >= (void*)sbrk(0)) 627 wrterror("(ES): entry on free_list past brk\n"); 628 if (page_dir[ptr2index(pf->page)] != MALLOC_FREE) 629 wrterror("(ES): non-free first page on free-list\n"); 630 if (page_dir[ptr2index(pf->end)-1] != MALLOC_FREE) 631 wrterror("(ES): non-free last page on free-list\n"); 632 #endif /* MALLOC_EXTRA_SANITY */ 633 634 if (pf->size < size) 635 continue; 636 637 if (pf->size == size) { 638 p = pf->page; 639 if (pf->next) 640 pf->next->prev = pf->prev; 641 pf->prev->next = pf->next; 642 delay_free = pf; 643 break; 644 } 645 646 p = pf->page; 647 pf->page = (char *)pf->page + size; 648 pf->size -= size; 649 break; 650 } 651 652 #ifdef MALLOC_EXTRA_SANITY 653 if (p && page_dir[ptr2index(p)] != MALLOC_FREE) 654 wrterror("(ES): allocated non-free page on free-list\n"); 655 #endif /* MALLOC_EXTRA_SANITY */ 656 657 size >>= malloc_pageshift; 658 659 /* Map new pages */ 660 if (!p) 661 p = map_pages(size); 662 663 if (p) { 664 665 index = ptr2index(p); 666 page_dir[index] = MALLOC_FIRST; 667 for (i=1;i<size;i++) 668 page_dir[index+i] = MALLOC_FOLLOW; 669 670 if (malloc_junk) 671 memset(p, SOME_JUNK, size << malloc_pageshift); 672 } 673 674 if (delay_free) { 675 if (!px) 676 px = delay_free; 677 else 678 ifree(delay_free); 679 } 680 681 return p; 682 } 683 684 /* 685 * Allocate a page of fragments 686 */ 687 688 static __inline__ int 689 malloc_make_chunks(bits) 690 int bits; 691 { 692 struct pginfo *bp; 693 void *pp; 694 int i, k, l; 695 696 /* Allocate a new bucket */ 697 pp = malloc_pages((size_t)malloc_pagesize); 698 if (!pp) 699 return 0; 700 701 /* Find length of admin structure */ 702 l = sizeof *bp - sizeof(u_long); 703 l += sizeof(u_long) * 704 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS); 705 706 /* Don't waste more than two chunks on this */ 707 if ((1UL<<(bits)) <= l+l) { 708 bp = (struct pginfo *)pp; 709 } else { 710 bp = (struct pginfo *)imalloc(l); 711 if (!bp) { 712 ifree(pp); 713 return 0; 714 } 715 } 716 717 bp->size = (1UL<<bits); 718 bp->shift = bits; 719 bp->total = bp->free = malloc_pagesize >> bits; 720 bp->page = pp; 721 722 /* set all valid bits in the bitmap */ 723 k = bp->total; 724 i = 0; 725 726 /* Do a bunch at a time */ 727 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS) 728 bp->bits[i / MALLOC_BITS] = ~0UL; 729 730 for(; i < k; i++) 731 bp->bits[i/MALLOC_BITS] |= 1UL<<(i%MALLOC_BITS); 732 733 if (bp == bp->page) { 734 /* Mark the ones we stole for ourselves */ 735 for(i=0;l > 0;i++) { 736 bp->bits[i/MALLOC_BITS] &= ~(1UL<<(i%MALLOC_BITS)); 737 bp->free--; 738 bp->total--; 739 l -= (1 << bits); 740 } 741 } 742 743 /* MALLOC_LOCK */ 744 745 page_dir[ptr2index(pp)] = bp; 746 747 bp->next = page_dir[bits]; 748 page_dir[bits] = bp; 749 750 /* MALLOC_UNLOCK */ 751 752 return 1; 753 } 754 755 /* 756 * Allocate a fragment 757 */ 758 static void * 759 malloc_bytes(size) 760 size_t size; 761 { 762 int i,j; 763 u_long u; 764 struct pginfo *bp; 765 int k; 766 u_long *lp; 767 768 /* Don't bother with anything less than this */ 769 if (size < malloc_minsize) 770 size = malloc_minsize; 771 772 /* Find the right bucket */ 773 j = 1; 774 i = size-1; 775 while (i >>= 1) 776 j++; 777 778 /* If it's empty, make a page more of that size chunks */ 779 if (!page_dir[j] && !malloc_make_chunks(j)) 780 return 0; 781 782 bp = page_dir[j]; 783 784 /* Find first word of bitmap which isn't empty */ 785 for (lp = bp->bits; !*lp; lp++) 786 ; 787 788 /* Find that bit, and tweak it */ 789 u = 1; 790 k = 0; 791 while (!(*lp & u)) { 792 u += u; 793 k++; 794 } 795 *lp ^= u; 796 797 /* If there are no more free, remove from free-list */ 798 if (!--bp->free) { 799 page_dir[j] = bp->next; 800 bp->next = 0; 801 } 802 803 /* Adjust to the real offset of that chunk */ 804 k += (lp-bp->bits)*MALLOC_BITS; 805 k <<= bp->shift; 806 807 if (malloc_junk) 808 memset((char *)bp->page + k, SOME_JUNK, bp->size); 809 810 return (u_char *)bp->page + k; 811 } 812 813 /* 814 * Allocate a piece of memory 815 */ 816 static void * 817 imalloc(size) 818 size_t size; 819 { 820 void *result; 821 822 if (!malloc_started) 823 malloc_init(); 824 825 if (suicide) 826 abort(); 827 828 if ((size + malloc_pagesize) < size) /* Check for overflow */ 829 result = 0; 830 else if (size <= malloc_maxsize) 831 result = malloc_bytes(size); 832 else 833 result = malloc_pages(size); 834 835 if (malloc_abort && !result) 836 wrterror("allocation failed.\n"); 837 838 if (malloc_zero && result) 839 memset(result, 0, size); 840 841 return result; 842 } 843 844 /* 845 * Change the size of an allocation. 846 */ 847 static void * 848 irealloc(ptr, size) 849 void *ptr; 850 size_t size; 851 { 852 void *p; 853 u_long osize, index; 854 struct pginfo **mp; 855 int i; 856 857 if (suicide) 858 abort(); 859 860 if (!malloc_started) { 861 wrtwarning("malloc() has never been called.\n"); 862 return 0; 863 } 864 865 index = ptr2index(ptr); 866 867 if (index < malloc_pageshift) { 868 wrtwarning("junk pointer, too low to make sense.\n"); 869 return 0; 870 } 871 872 if (index > last_index) { 873 wrtwarning("junk pointer, too high to make sense.\n"); 874 return 0; 875 } 876 877 mp = &page_dir[index]; 878 879 if (*mp == MALLOC_FIRST) { /* Page allocation */ 880 881 /* Check the pointer */ 882 if ((u_long)ptr & malloc_pagemask) { 883 wrtwarning("modified (page-) pointer.\n"); 884 return 0; 885 } 886 887 /* Find the size in bytes */ 888 for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;) 889 osize += malloc_pagesize; 890 891 if (!malloc_realloc && /* unless we have to, */ 892 size <= osize && /* .. or are too small, */ 893 size > (osize - malloc_pagesize)) { /* .. or can free a page, */ 894 return ptr; /* don't do anything. */ 895 } 896 897 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */ 898 899 /* Check the pointer for sane values */ 900 if (((u_long)ptr & ((*mp)->size-1))) { 901 wrtwarning("modified (chunk-) pointer.\n"); 902 return 0; 903 } 904 905 /* Find the chunk index in the page */ 906 i = ((u_long)ptr & malloc_pagemask) >> (*mp)->shift; 907 908 /* Verify that it isn't a free chunk already */ 909 if ((*mp)->bits[i/MALLOC_BITS] & (1UL<<(i%MALLOC_BITS))) { 910 wrtwarning("chunk is already free.\n"); 911 return 0; 912 } 913 914 osize = (*mp)->size; 915 916 if (!malloc_realloc && /* Unless we have to, */ 917 size < osize && /* ..or are too small, */ 918 (size > osize/2 || /* ..or could use a smaller size, */ 919 osize == malloc_minsize)) { /* ..(if there is one) */ 920 return ptr; /* ..Don't do anything */ 921 } 922 923 } else { 924 wrtwarning("pointer to wrong page.\n"); 925 return 0; 926 } 927 928 p = imalloc(size); 929 930 if (p) { 931 /* copy the lesser of the two sizes, and free the old one */ 932 if (osize < size) 933 memcpy(p, ptr, osize); 934 else 935 memcpy(p, ptr, size); 936 ifree(ptr); 937 } 938 return p; 939 } 940 941 /* 942 * Free a sequence of pages 943 */ 944 945 static __inline__ void 946 free_pages(ptr, index, info) 947 void *ptr; 948 int index; 949 struct pginfo *info; 950 { 951 int i; 952 struct pgfree *pf, *pt=0; 953 u_long l; 954 void *tail; 955 956 if (info == MALLOC_FREE) { 957 wrtwarning("page is already free.\n"); 958 return; 959 } 960 961 if (info != MALLOC_FIRST) { 962 wrtwarning("pointer to wrong page.\n"); 963 return; 964 } 965 966 if ((u_long)ptr & malloc_pagemask) { 967 wrtwarning("modified (page-) pointer.\n"); 968 return; 969 } 970 971 /* Count how many pages and mark them free at the same time */ 972 page_dir[index] = MALLOC_FREE; 973 for (i = 1; page_dir[index+i] == MALLOC_FOLLOW; i++) 974 page_dir[index + i] = MALLOC_FREE; 975 976 l = i << malloc_pageshift; 977 978 if (malloc_junk) 979 memset(ptr, SOME_JUNK, l); 980 981 #ifdef __FreeBSD__ 982 if (malloc_hint) 983 madvise(ptr, l, MADV_FREE); 984 #endif 985 986 tail = (char *)ptr+l; 987 988 /* add to free-list */ 989 if (!px) 990 px = imalloc(sizeof *px); /* This cannot fail... */ 991 px->page = ptr; 992 px->end = tail; 993 px->size = l; 994 if (!free_list.next) { 995 996 /* Nothing on free list, put this at head */ 997 px->next = free_list.next; 998 px->prev = &free_list; 999 free_list.next = px; 1000 pf = px; 1001 px = 0; 1002 1003 } else { 1004 1005 /* Find the right spot, leave pf pointing to the modified entry. */ 1006 tail = (char *)ptr+l; 1007 1008 for(pf = free_list.next; pf->end < ptr && pf->next; pf = pf->next) 1009 ; /* Race ahead here */ 1010 1011 if (pf->page > tail) { 1012 /* Insert before entry */ 1013 px->next = pf; 1014 px->prev = pf->prev; 1015 pf->prev = px; 1016 px->prev->next = px; 1017 pf = px; 1018 px = 0; 1019 } else if (pf->end == ptr ) { 1020 /* Append to the previous entry */ 1021 pf->end = (char *)pf->end + l; 1022 pf->size += l; 1023 if (pf->next && pf->end == pf->next->page ) { 1024 /* And collapse the next too. */ 1025 pt = pf->next; 1026 pf->end = pt->end; 1027 pf->size += pt->size; 1028 pf->next = pt->next; 1029 if (pf->next) 1030 pf->next->prev = pf; 1031 } 1032 } else if (pf->page == tail) { 1033 /* Prepend to entry */ 1034 pf->size += l; 1035 pf->page = ptr; 1036 } else if (!pf->next) { 1037 /* Append at tail of chain */ 1038 px->next = 0; 1039 px->prev = pf; 1040 pf->next = px; 1041 pf = px; 1042 px = 0; 1043 } else { 1044 wrterror("freelist is destroyed.\n"); 1045 } 1046 } 1047 1048 /* Return something to OS ? */ 1049 if (!pf->next && /* If we're the last one, */ 1050 pf->size > malloc_cache && /* ..and the cache is full, */ 1051 pf->end == malloc_brk && /* ..and none behind us, */ 1052 malloc_brk == sbrk(0)) { /* ..and it's OK to do... */ 1053 1054 /* 1055 * Keep the cache intact. Notice that the '>' above guarantees that 1056 * the pf will always have at least one page afterwards. 1057 */ 1058 pf->end = (char *)pf->page + malloc_cache; 1059 pf->size = malloc_cache; 1060 1061 brk(pf->end); 1062 malloc_brk = pf->end; 1063 1064 index = ptr2index(pf->end); 1065 last_index = index - 1; 1066 1067 for(i=index;i <= last_index;) 1068 page_dir[i++] = MALLOC_NOT_MINE; 1069 1070 /* XXX: We could realloc/shrink the pagedir here I guess. */ 1071 } 1072 if (pt) 1073 ifree(pt); 1074 } 1075 1076 /* 1077 * Free a chunk, and possibly the page it's on, if the page becomes empty. 1078 */ 1079 1080 /* ARGSUSED */ 1081 static __inline__ void 1082 free_bytes(ptr, index, info) 1083 void *ptr; 1084 int index; 1085 struct pginfo *info; 1086 { 1087 int i; 1088 struct pginfo **mp; 1089 void *vp; 1090 1091 /* Find the chunk number on the page */ 1092 i = ((u_long)ptr & malloc_pagemask) >> info->shift; 1093 1094 if (((u_long)ptr & (info->size-1))) { 1095 wrtwarning("modified (chunk-) pointer.\n"); 1096 return; 1097 } 1098 1099 if (info->bits[i/MALLOC_BITS] & (1UL<<(i%MALLOC_BITS))) { 1100 wrtwarning("chunk is already free.\n"); 1101 return; 1102 } 1103 1104 if (malloc_junk) 1105 memset(ptr, SOME_JUNK, info->size); 1106 1107 info->bits[i/MALLOC_BITS] |= 1UL<<(i%MALLOC_BITS); 1108 info->free++; 1109 1110 mp = page_dir + info->shift; 1111 1112 if (info->free == 1) { 1113 1114 /* Page became non-full */ 1115 1116 mp = page_dir + info->shift; 1117 /* Insert in address order */ 1118 while (*mp && (*mp)->next && (*mp)->next->page < info->page) 1119 mp = &(*mp)->next; 1120 info->next = *mp; 1121 *mp = info; 1122 return; 1123 } 1124 1125 if (info->free != info->total) 1126 return; 1127 1128 /* Find & remove this page in the queue */ 1129 while (*mp != info) { 1130 mp = &((*mp)->next); 1131 #ifdef MALLOC_EXTRA_SANITY 1132 if (!*mp) 1133 wrterror("(ES): Not on queue\n"); 1134 #endif /* MALLOC_EXTRA_SANITY */ 1135 } 1136 *mp = info->next; 1137 1138 /* Free the page & the info structure if need be */ 1139 page_dir[ptr2index(info->page)] = MALLOC_FIRST; 1140 vp = info->page; /* Order is important ! */ 1141 if(vp != (void*)info) 1142 ifree(info); 1143 ifree(vp); 1144 } 1145 1146 static void 1147 ifree(ptr) 1148 void *ptr; 1149 { 1150 struct pginfo *info; 1151 int index; 1152 1153 /* This is legal */ 1154 if (!ptr) 1155 return; 1156 1157 if (!malloc_started) { 1158 wrtwarning("malloc() has never been called.\n"); 1159 return; 1160 } 1161 1162 /* If we're already sinking, don't make matters any worse. */ 1163 if (suicide) 1164 return; 1165 1166 index = ptr2index(ptr); 1167 1168 if (index < malloc_pageshift) { 1169 wrtwarning("junk pointer, too low to make sense.\n"); 1170 return; 1171 } 1172 1173 if (index > last_index) { 1174 wrtwarning("junk pointer, too high to make sense.\n"); 1175 return; 1176 } 1177 1178 info = page_dir[index]; 1179 1180 if (info < MALLOC_MAGIC) 1181 free_pages(ptr, index, info); 1182 else 1183 free_bytes(ptr, index, info); 1184 return; 1185 } 1186 1187 /* 1188 * These are the public exported interface routines. 1189 */ 1190 1191 static int malloc_active; 1192 1193 void * 1194 malloc(size_t size) 1195 { 1196 register void *r; 1197 1198 malloc_func = " in malloc():"; 1199 THREAD_LOCK(); 1200 if (malloc_active++) { 1201 wrtwarning("recursive call.\n"); 1202 malloc_active--; 1203 return (0); 1204 } 1205 r = imalloc(size); 1206 UTRACE(0, size, r); 1207 malloc_active--; 1208 THREAD_UNLOCK(); 1209 if (malloc_xmalloc && !r) 1210 wrterror("out of memory.\n"); 1211 return (r); 1212 } 1213 1214 void 1215 free(void *ptr) 1216 { 1217 malloc_func = " in free():"; 1218 THREAD_LOCK(); 1219 if (malloc_active++) { 1220 wrtwarning("recursive call.\n"); 1221 malloc_active--; 1222 return; 1223 } 1224 ifree(ptr); 1225 UTRACE(ptr, 0, 0); 1226 malloc_active--; 1227 THREAD_UNLOCK(); 1228 return; 1229 } 1230 1231 void * 1232 realloc(void *ptr, size_t size) 1233 { 1234 register void *r; 1235 1236 malloc_func = " in realloc():"; 1237 THREAD_LOCK(); 1238 if (malloc_active++) { 1239 wrtwarning("recursive call.\n"); 1240 malloc_active--; 1241 return (0); 1242 } 1243 if (!ptr) { 1244 r = imalloc(size); 1245 } else { 1246 r = irealloc(ptr, size); 1247 } 1248 UTRACE(ptr, size, r); 1249 malloc_active--; 1250 THREAD_UNLOCK(); 1251 if (malloc_xmalloc && !r) 1252 wrterror("out of memory.\n"); 1253 return (r); 1254 } 1255