1 /* $NetBSD: malloc.c,v 1.37 2001/05/03 15:35:12 christos Exp $ */ 2 3 /* 4 * ---------------------------------------------------------------------------- 5 * "THE BEER-WARE LICENSE" (Revision 42): 6 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 7 * can do whatever you want with this stuff. If we meet some day, and you think 8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 9 * ---------------------------------------------------------------------------- 10 * 11 * From FreeBSD: malloc.c,v 1.43 1998/09/30 06:13:59 jb 12 * 13 */ 14 15 /* 16 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related 17 * to internal conditions and consistency in malloc.c. This has a 18 * noticeable runtime performance hit, and generally will not do you 19 * any good unless you fiddle with the internals of malloc or want 20 * to catch random pointer corruption as early as possible. 21 */ 22 #ifndef MALLOC_EXTRA_SANITY 23 #undef MALLOC_EXTRA_SANITY 24 #endif 25 26 /* 27 * What to use for Junk. This is the byte value we use to fill with 28 * when the 'J' option is enabled. 29 */ 30 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */ 31 32 /* 33 * The basic parameters you can tweak. 34 * 35 * malloc_minsize minimum size of an allocation in bytes. 36 * If this is too small it's too much work 37 * to manage them. This is also the smallest 38 * unit of alignment used for the storage 39 * returned by malloc/realloc. 40 * 41 */ 42 43 #if defined(__FreeBSD__) 44 # if defined(__i386__) 45 # define malloc_minsize 16U 46 # endif 47 # if defined(__alpha__) 48 # define malloc_minsize 16U 49 # endif 50 # define HAS_UTRACE 51 # define UTRACE_LABEL 52 53 #include <sys/cdefs.h> 54 void utrace __P((struct ut *, int)); 55 56 /* 57 * Make malloc/free/realloc thread-safe in libc for use with 58 * kernel threads. 59 */ 60 # include "libc_private.h" 61 # include "spinlock.h" 62 static spinlock_t thread_lock = _SPINLOCK_INITIALIZER; 63 # define THREAD_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock); 64 # define THREAD_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock); 65 #endif /* __FreeBSD__ */ 66 67 #if defined(__NetBSD__) 68 # define malloc_minsize 16U 69 # define HAS_UTRACE 70 # define UTRACE_LABEL "malloc", 71 #include <sys/cdefs.h> 72 #include <sys/types.h> 73 int utrace __P((const char *, void *, size_t)); 74 #endif /* __NetBSD__ */ 75 76 #if defined(__sparc__) && defined(sun) 77 # define malloc_minsize 16U 78 # define MAP_ANON (0) 79 static int fdzero; 80 # define MMAP_FD fdzero 81 # define INIT_MMAP() \ 82 { if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \ 83 wrterror("open of /dev/zero"); } 84 #endif /* __sparc__ */ 85 86 /* Insert your combination here... */ 87 #if defined(__FOOCPU__) && defined(__BAROS__) 88 # define malloc_minsize 16U 89 #endif /* __FOOCPU__ && __BAROS__ */ 90 91 92 /* 93 * No user serviceable parts behind this point. 94 */ 95 #include "namespace.h" 96 #include <sys/types.h> 97 #include <sys/mman.h> 98 #include <errno.h> 99 #include <fcntl.h> 100 #include <stddef.h> 101 #include <stdio.h> 102 #include <stdlib.h> 103 #include <string.h> 104 #include <unistd.h> 105 106 /* 107 * This structure describes a page worth of chunks. 108 */ 109 110 struct pginfo { 111 struct pginfo *next; /* next on the free list */ 112 void *page; /* Pointer to the page */ 113 u_short size; /* size of this page's chunks */ 114 u_short shift; /* How far to shift for this size chunks */ 115 u_short free; /* How many free chunks */ 116 u_short total; /* How many chunk */ 117 u_int bits[1]; /* Which chunks are free */ 118 }; 119 120 /* 121 * This structure describes a number of free pages. 122 */ 123 124 struct pgfree { 125 struct pgfree *next; /* next run of free pages */ 126 struct pgfree *prev; /* prev run of free pages */ 127 void *page; /* pointer to free pages */ 128 void *end; /* pointer to end of free pages */ 129 size_t size; /* number of bytes free */ 130 }; 131 132 /* 133 * How many bits per u_int in the bitmap. 134 * Change only if not 8 bits/byte 135 */ 136 #define MALLOC_BITS (8*sizeof(u_int)) 137 138 /* 139 * Magic values to put in the page_directory 140 */ 141 #define MALLOC_NOT_MINE ((struct pginfo*) 0) 142 #define MALLOC_FREE ((struct pginfo*) 1) 143 #define MALLOC_FIRST ((struct pginfo*) 2) 144 #define MALLOC_FOLLOW ((struct pginfo*) 3) 145 #define MALLOC_MAGIC ((struct pginfo*) 4) 146 147 /* 148 * Page size related parameters, computed at run-time. 149 */ 150 static size_t malloc_pagesize; 151 static size_t malloc_pageshift; 152 static size_t malloc_pagemask; 153 154 #ifndef malloc_minsize 155 #define malloc_minsize 16U 156 #endif 157 158 #ifndef malloc_maxsize 159 #define malloc_maxsize ((malloc_pagesize)>>1) 160 #endif 161 162 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask))) 163 #define ptr2idx(foo) (((size_t)(u_long)(foo) >> malloc_pageshift)-malloc_origo) 164 165 #ifndef THREAD_LOCK 166 #define THREAD_LOCK() 167 #endif 168 169 #ifndef THREAD_UNLOCK 170 #define THREAD_UNLOCK() 171 #endif 172 173 #ifndef MMAP_FD 174 #define MMAP_FD (-1) 175 #endif 176 177 #ifndef INIT_MMAP 178 #define INIT_MMAP() 179 #endif 180 181 #ifndef MADV_FREE 182 #define MADV_FREE MADV_DONTNEED 183 #endif 184 185 /* Set when initialization has been done */ 186 static unsigned malloc_started; 187 188 /* Recusion flag for public interface. */ 189 static int malloc_active; 190 191 /* Number of free pages we cache */ 192 static unsigned malloc_cache = 16; 193 194 /* The offset from pagenumber to index into the page directory */ 195 static size_t malloc_origo; 196 197 /* The last index in the page directory we care about */ 198 static size_t last_idx; 199 200 /* Pointer to page directory. Allocated "as if with" malloc */ 201 static struct pginfo **page_dir; 202 203 /* How many slots in the page directory */ 204 static unsigned malloc_ninfo; 205 206 /* Free pages line up here */ 207 static struct pgfree free_list; 208 209 /* Abort(), user doesn't handle problems. */ 210 static int malloc_abort; 211 212 /* Are we trying to die ? */ 213 static int suicide; 214 215 /* always realloc ? */ 216 static int malloc_realloc; 217 218 /* pass the kernel a hint on free pages ? */ 219 static int malloc_hint = 0; 220 221 /* xmalloc behaviour ? */ 222 static int malloc_xmalloc; 223 224 /* sysv behaviour for malloc(0) ? */ 225 static int malloc_sysv; 226 227 /* zero fill ? */ 228 static int malloc_zero; 229 230 /* junk fill ? */ 231 static int malloc_junk; 232 233 #ifdef HAS_UTRACE 234 235 /* utrace ? */ 236 static int malloc_utrace; 237 238 struct ut { void *p; size_t s; void *r; }; 239 240 #define UTRACE(a, b, c) \ 241 if (malloc_utrace) { \ 242 struct ut u; \ 243 u.p=a; u.s = b; u.r=c; \ 244 utrace(UTRACE_LABEL (void *) &u, sizeof u); \ 245 } 246 #else /* !HAS_UTRACE */ 247 #define UTRACE(a,b,c) 248 #endif /* HAS_UTRACE */ 249 250 /* my last break. */ 251 static void *malloc_brk; 252 253 /* one location cache for free-list holders */ 254 static struct pgfree *px; 255 256 /* compile-time options */ 257 char *malloc_options; 258 259 /* Name of the current public function */ 260 static char *malloc_func; 261 262 /* Macro for mmap */ 263 #define MMAP(size) \ 264 mmap(0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \ 265 MMAP_FD, (off_t)0); 266 267 /* 268 * Necessary function declarations 269 */ 270 static int extend_pgdir(size_t idx); 271 static void *imalloc(size_t size); 272 static void ifree(void *ptr); 273 static void *irealloc(void *ptr, size_t size); 274 275 static void 276 wrterror(char *p) 277 { 278 const char *progname = getprogname(); 279 char *q = " error: "; 280 write(STDERR_FILENO, progname, strlen(progname)); 281 write(STDERR_FILENO, malloc_func, strlen(malloc_func)); 282 write(STDERR_FILENO, q, strlen(q)); 283 write(STDERR_FILENO, p, strlen(p)); 284 suicide = 1; 285 abort(); 286 } 287 288 static void 289 wrtwarning(char *p) 290 { 291 const char *progname = getprogname(); 292 char *q = " warning: "; 293 if (malloc_abort) 294 wrterror(p); 295 write(STDERR_FILENO, progname, strlen(progname)); 296 write(STDERR_FILENO, malloc_func, strlen(malloc_func)); 297 write(STDERR_FILENO, q, strlen(q)); 298 write(STDERR_FILENO, p, strlen(p)); 299 } 300 301 302 /* 303 * Allocate a number of pages from the OS 304 */ 305 static void * 306 map_pages(size_t pages) 307 { 308 caddr_t result, tail; 309 310 result = (caddr_t)pageround((size_t)(u_long)sbrk((intptr_t)0)); 311 tail = result + (pages << malloc_pageshift); 312 313 if (brk(tail)) { 314 #ifdef MALLOC_EXTRA_SANITY 315 wrterror("(ES): map_pages fails\n"); 316 #endif /* MALLOC_EXTRA_SANITY */ 317 return 0; 318 } 319 last_idx = ptr2idx(tail) - 1; 320 malloc_brk = tail; 321 322 if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx)) 323 return 0;; 324 325 return result; 326 } 327 328 /* 329 * Extend page directory 330 */ 331 static int 332 extend_pgdir(size_t idx) 333 { 334 struct pginfo **new, **old; 335 size_t newlen, oldlen; 336 337 /* check for overflow */ 338 if ((((~(1UL << ((sizeof(size_t) * NBBY) - 1)) / sizeof(*page_dir)) + 1) 339 + (malloc_pagesize / sizeof *page_dir)) < idx) { 340 errno = ENOMEM; 341 return 0; 342 } 343 344 /* Make it this many pages */ 345 newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize; 346 347 /* remember the old mapping size */ 348 oldlen = malloc_ninfo * sizeof *page_dir; 349 350 /* 351 * NOTE: we allocate new pages and copy the directory rather than tempt 352 * fate by trying to "grow" the region.. There is nothing to prevent 353 * us from accidently re-mapping space that's been allocated by our caller 354 * via dlopen() or other mmap(). 355 * 356 * The copy problem is not too bad, as there is 4K of page index per 357 * 4MB of malloc arena. 358 * 359 * We can totally avoid the copy if we open a file descriptor to associate 360 * the anon mappings with. Then, when we remap the pages at the new 361 * address, the old pages will be "magically" remapped.. But this means 362 * keeping open a "secret" file descriptor..... 363 */ 364 365 /* Get new pages */ 366 new = (struct pginfo**) MMAP(newlen); 367 if (new == (struct pginfo **)-1) 368 return 0; 369 370 /* Copy the old stuff */ 371 memcpy(new, page_dir, oldlen); 372 373 /* register the new size */ 374 malloc_ninfo = newlen / sizeof *page_dir; 375 376 /* swap the pointers */ 377 old = page_dir; 378 page_dir = new; 379 380 /* Now free the old stuff */ 381 munmap(old, oldlen); 382 return 1; 383 } 384 385 /* 386 * Initialize the world 387 */ 388 static void 389 malloc_init (void) 390 { 391 char *p, b[64]; 392 int i, j; 393 int errnosave; 394 395 /* 396 * Compute page-size related variables. 397 */ 398 malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE); 399 malloc_pagemask = malloc_pagesize - 1; 400 for (malloc_pageshift = 0; 401 (1UL << malloc_pageshift) != malloc_pagesize; 402 malloc_pageshift++) 403 /* nothing */ ; 404 405 INIT_MMAP(); 406 407 #ifdef MALLOC_EXTRA_SANITY 408 malloc_junk = 1; 409 #endif /* MALLOC_EXTRA_SANITY */ 410 411 for (i = 0; i < 3; i++) { 412 if (i == 0) { 413 errnosave = errno; 414 j = readlink("/etc/malloc.conf", b, sizeof b - 1); 415 errno = errnosave; 416 if (j <= 0) 417 continue; 418 b[j] = '\0'; 419 p = b; 420 } else if (i == 1) { 421 p = getenv("MALLOC_OPTIONS"); 422 } else { 423 p = malloc_options; 424 } 425 for (; p && *p; p++) { 426 switch (*p) { 427 case '>': malloc_cache <<= 1; break; 428 case '<': malloc_cache >>= 1; break; 429 case 'a': malloc_abort = 0; break; 430 case 'A': malloc_abort = 1; break; 431 case 'h': malloc_hint = 0; break; 432 case 'H': malloc_hint = 1; break; 433 case 'r': malloc_realloc = 0; break; 434 case 'R': malloc_realloc = 1; break; 435 case 'j': malloc_junk = 0; break; 436 case 'J': malloc_junk = 1; break; 437 #ifdef HAS_UTRACE 438 case 'u': malloc_utrace = 0; break; 439 case 'U': malloc_utrace = 1; break; 440 #endif 441 case 'v': malloc_sysv = 0; break; 442 case 'V': malloc_sysv = 1; break; 443 case 'x': malloc_xmalloc = 0; break; 444 case 'X': malloc_xmalloc = 1; break; 445 case 'z': malloc_zero = 0; break; 446 case 'Z': malloc_zero = 1; break; 447 default: 448 j = malloc_abort; 449 malloc_abort = 0; 450 wrtwarning("unknown char in MALLOC_OPTIONS\n"); 451 malloc_abort = j; 452 break; 453 } 454 } 455 } 456 457 UTRACE(0, 0, 0); 458 459 /* 460 * We want junk in the entire allocation, and zero only in the part 461 * the user asked for. 462 */ 463 if (malloc_zero) 464 malloc_junk=1; 465 466 /* 467 * If we run with junk (or implicitly from above: zero), we want to 468 * force realloc() to get new storage, so we can DTRT with it. 469 */ 470 if (malloc_junk) 471 malloc_realloc=1; 472 473 /* Allocate one page for the page directory */ 474 page_dir = (struct pginfo **) MMAP(malloc_pagesize); 475 476 if (page_dir == (struct pginfo **) -1) 477 wrterror("mmap(2) failed, check limits.\n"); 478 479 /* 480 * We need a maximum of malloc_pageshift buckets, steal these from the 481 * front of the page_directory; 482 */ 483 malloc_origo = pageround((size_t)(u_long)sbrk((intptr_t)0)) 484 >> malloc_pageshift; 485 malloc_origo -= malloc_pageshift; 486 487 malloc_ninfo = malloc_pagesize / sizeof *page_dir; 488 489 /* Recalculate the cache size in bytes, and make sure it's nonzero */ 490 491 if (!malloc_cache) 492 malloc_cache++; 493 494 malloc_cache <<= malloc_pageshift; 495 496 /* 497 * This is a nice hack from Kaleb Keithly (kaleb@x.org). 498 * We can sbrk(2) further back when we keep this on a low address. 499 */ 500 px = (struct pgfree *) imalloc (sizeof *px); 501 502 /* Been here, done that */ 503 malloc_started++; 504 } 505 506 /* 507 * Allocate a number of complete pages 508 */ 509 static void * 510 malloc_pages(size_t size) 511 { 512 void *p, *delay_free = 0; 513 int i; 514 struct pgfree *pf; 515 size_t idx; 516 517 size = pageround(size); 518 519 p = 0; 520 521 /* Look for free pages before asking for more */ 522 for(pf = free_list.next; pf; pf = pf->next) { 523 524 #ifdef MALLOC_EXTRA_SANITY 525 if (pf->size & malloc_pagemask) 526 wrterror("(ES): junk length entry on free_list\n"); 527 if (!pf->size) 528 wrterror("(ES): zero length entry on free_list\n"); 529 if (pf->page == pf->end) 530 wrterror("(ES): zero entry on free_list\n"); 531 if (pf->page > pf->end) 532 wrterror("(ES): sick entry on free_list\n"); 533 if ((void*)pf->page >= (void*)sbrk(0)) 534 wrterror("(ES): entry on free_list past brk\n"); 535 if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE) 536 wrterror("(ES): non-free first page on free-list\n"); 537 if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE) 538 wrterror("(ES): non-free last page on free-list\n"); 539 #endif /* MALLOC_EXTRA_SANITY */ 540 541 if (pf->size < size) 542 continue; 543 544 if (pf->size == size) { 545 p = pf->page; 546 if (pf->next) 547 pf->next->prev = pf->prev; 548 pf->prev->next = pf->next; 549 delay_free = pf; 550 break; 551 } 552 553 p = pf->page; 554 pf->page = (char *)pf->page + size; 555 pf->size -= size; 556 break; 557 } 558 559 #ifdef MALLOC_EXTRA_SANITY 560 if (p && page_dir[ptr2idx(p)] != MALLOC_FREE) 561 wrterror("(ES): allocated non-free page on free-list\n"); 562 #endif /* MALLOC_EXTRA_SANITY */ 563 564 size >>= malloc_pageshift; 565 566 /* Map new pages */ 567 if (!p) 568 p = map_pages(size); 569 570 if (p) { 571 572 idx = ptr2idx(p); 573 page_dir[idx] = MALLOC_FIRST; 574 for (i=1;i<size;i++) 575 page_dir[idx+i] = MALLOC_FOLLOW; 576 577 if (malloc_junk) 578 memset(p, SOME_JUNK, size << malloc_pageshift); 579 } 580 581 if (delay_free) { 582 if (!px) 583 px = delay_free; 584 else 585 ifree(delay_free); 586 } 587 588 return p; 589 } 590 591 /* 592 * Allocate a page of fragments 593 */ 594 595 static __inline__ int 596 malloc_make_chunks(int bits) 597 { 598 struct pginfo *bp; 599 void *pp; 600 int i, k, l; 601 602 /* Allocate a new bucket */ 603 pp = malloc_pages(malloc_pagesize); 604 if (!pp) 605 return 0; 606 607 /* Find length of admin structure */ 608 l = (int)offsetof(struct pginfo, bits[0]); 609 l += sizeof bp->bits[0] * 610 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS); 611 612 /* Don't waste more than two chunks on this */ 613 if ((1<<(bits)) <= l+l) { 614 bp = (struct pginfo *)pp; 615 } else { 616 bp = (struct pginfo *)imalloc((size_t)l); 617 if (!bp) { 618 ifree(pp); 619 return 0; 620 } 621 } 622 623 bp->size = (1<<bits); 624 bp->shift = bits; 625 bp->total = bp->free = malloc_pagesize >> bits; 626 bp->page = pp; 627 628 /* set all valid bits in the bitmap */ 629 k = bp->total; 630 i = 0; 631 632 /* Do a bunch at a time */ 633 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS) 634 bp->bits[i / MALLOC_BITS] = ~0U; 635 636 for(; i < k; i++) 637 bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS); 638 639 if (bp == bp->page) { 640 /* Mark the ones we stole for ourselves */ 641 for(i=0;l > 0;i++) { 642 bp->bits[i/MALLOC_BITS] &= ~(1<<(i%MALLOC_BITS)); 643 bp->free--; 644 bp->total--; 645 l -= (1 << bits); 646 } 647 } 648 649 /* MALLOC_LOCK */ 650 651 page_dir[ptr2idx(pp)] = bp; 652 653 bp->next = page_dir[bits]; 654 page_dir[bits] = bp; 655 656 /* MALLOC_UNLOCK */ 657 658 return 1; 659 } 660 661 /* 662 * Allocate a fragment 663 */ 664 static void * 665 malloc_bytes(size_t size) 666 { 667 size_t i; 668 int j; 669 u_int u; 670 struct pginfo *bp; 671 int k; 672 u_int *lp; 673 674 /* Don't bother with anything less than this */ 675 if (size < malloc_minsize) 676 size = malloc_minsize; 677 678 /* Find the right bucket */ 679 j = 1; 680 i = size-1; 681 while (i >>= 1) 682 j++; 683 684 /* If it's empty, make a page more of that size chunks */ 685 if (!page_dir[j] && !malloc_make_chunks(j)) 686 return 0; 687 688 bp = page_dir[j]; 689 690 /* Find first word of bitmap which isn't empty */ 691 for (lp = bp->bits; !*lp; lp++) 692 ; 693 694 /* Find that bit, and tweak it */ 695 u = 1; 696 k = 0; 697 while (!(*lp & u)) { 698 u += u; 699 k++; 700 } 701 *lp ^= u; 702 703 /* If there are no more free, remove from free-list */ 704 if (!--bp->free) { 705 page_dir[j] = bp->next; 706 bp->next = 0; 707 } 708 709 /* Adjust to the real offset of that chunk */ 710 k += (lp-bp->bits)*MALLOC_BITS; 711 k <<= bp->shift; 712 713 if (malloc_junk) 714 memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size); 715 716 return (u_char *)bp->page + k; 717 } 718 719 /* 720 * Allocate a piece of memory 721 */ 722 static void * 723 imalloc(size_t size) 724 { 725 void *result; 726 727 if (suicide) 728 abort(); 729 730 if ((size + malloc_pagesize) < size) /* Check for overflow */ 731 result = 0; 732 else if (size <= malloc_maxsize) 733 result = malloc_bytes(size); 734 else 735 result = malloc_pages(size); 736 737 if (malloc_abort && !result) 738 wrterror("allocation failed.\n"); 739 740 if (malloc_zero && result) 741 memset(result, 0, size); 742 743 return result; 744 } 745 746 /* 747 * Change the size of an allocation. 748 */ 749 static void * 750 irealloc(void *ptr, size_t size) 751 { 752 void *p; 753 size_t osize, idx; 754 struct pginfo **mp; 755 size_t i; 756 757 if (suicide) 758 abort(); 759 760 idx = ptr2idx(ptr); 761 762 if (idx < malloc_pageshift) { 763 wrtwarning("junk pointer, too low to make sense.\n"); 764 return 0; 765 } 766 767 if (idx > last_idx) { 768 wrtwarning("junk pointer, too high to make sense.\n"); 769 return 0; 770 } 771 772 mp = &page_dir[idx]; 773 774 if (*mp == MALLOC_FIRST) { /* Page allocation */ 775 776 /* Check the pointer */ 777 if ((size_t)(u_long)ptr & malloc_pagemask) { 778 wrtwarning("modified (page-) pointer.\n"); 779 return 0; 780 } 781 782 /* Find the size in bytes */ 783 for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;) 784 osize += malloc_pagesize; 785 786 if (!malloc_realloc && /* unless we have to, */ 787 size <= osize && /* .. or are too small, */ 788 size > (osize - malloc_pagesize)) { /* .. or can free a page, */ 789 return ptr; /* don't do anything. */ 790 } 791 792 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */ 793 794 /* Check the pointer for sane values */ 795 if (((size_t)(u_long)ptr & ((*mp)->size-1))) { 796 wrtwarning("modified (chunk-) pointer.\n"); 797 return 0; 798 } 799 800 /* Find the chunk index in the page */ 801 i = ((size_t)(u_long)ptr & malloc_pagemask) >> (*mp)->shift; 802 803 /* Verify that it isn't a free chunk already */ 804 if ((*mp)->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) { 805 wrtwarning("chunk is already free.\n"); 806 return 0; 807 } 808 809 osize = (*mp)->size; 810 811 if (!malloc_realloc && /* Unless we have to, */ 812 size < osize && /* ..or are too small, */ 813 (size > osize/2 || /* ..or could use a smaller size, */ 814 osize == malloc_minsize)) { /* ..(if there is one) */ 815 return ptr; /* ..Don't do anything */ 816 } 817 818 } else { 819 wrtwarning("pointer to wrong page.\n"); 820 return 0; 821 } 822 823 p = imalloc(size); 824 825 if (p) { 826 /* copy the lesser of the two sizes, and free the old one */ 827 if (!size || !osize) 828 ; 829 else if (osize < size) 830 memcpy(p, ptr, osize); 831 else 832 memcpy(p, ptr, size); 833 ifree(ptr); 834 } 835 return p; 836 } 837 838 /* 839 * Free a sequence of pages 840 */ 841 842 static __inline__ void 843 free_pages(void *ptr, size_t idx, struct pginfo *info) 844 { 845 size_t i; 846 struct pgfree *pf, *pt=0; 847 size_t l; 848 void *tail; 849 850 if (info == MALLOC_FREE) { 851 wrtwarning("page is already free.\n"); 852 return; 853 } 854 855 if (info != MALLOC_FIRST) { 856 wrtwarning("pointer to wrong page.\n"); 857 return; 858 } 859 860 if ((size_t)(u_long)ptr & malloc_pagemask) { 861 wrtwarning("modified (page-) pointer.\n"); 862 return; 863 } 864 865 /* Count how many pages and mark them free at the same time */ 866 page_dir[idx] = MALLOC_FREE; 867 for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++) 868 page_dir[idx + i] = MALLOC_FREE; 869 870 l = i << malloc_pageshift; 871 872 if (malloc_junk) 873 memset(ptr, SOME_JUNK, l); 874 875 if (malloc_hint) 876 madvise(ptr, l, MADV_FREE); 877 878 tail = (char *)ptr+l; 879 880 /* add to free-list */ 881 if (!px) 882 px = imalloc(sizeof *pt); /* This cannot fail... */ 883 px->page = ptr; 884 px->end = tail; 885 px->size = l; 886 if (!free_list.next) { 887 888 /* Nothing on free list, put this at head */ 889 px->next = free_list.next; 890 px->prev = &free_list; 891 free_list.next = px; 892 pf = px; 893 px = 0; 894 895 } else { 896 897 /* Find the right spot, leave pf pointing to the modified entry. */ 898 tail = (char *)ptr+l; 899 900 for(pf = free_list.next; pf->end < ptr && pf->next; pf = pf->next) 901 ; /* Race ahead here */ 902 903 if (pf->page > tail) { 904 /* Insert before entry */ 905 px->next = pf; 906 px->prev = pf->prev; 907 pf->prev = px; 908 px->prev->next = px; 909 pf = px; 910 px = 0; 911 } else if (pf->end == ptr ) { 912 /* Append to the previous entry */ 913 pf->end = (char *)pf->end + l; 914 pf->size += l; 915 if (pf->next && pf->end == pf->next->page ) { 916 /* And collapse the next too. */ 917 pt = pf->next; 918 pf->end = pt->end; 919 pf->size += pt->size; 920 pf->next = pt->next; 921 if (pf->next) 922 pf->next->prev = pf; 923 } 924 } else if (pf->page == tail) { 925 /* Prepend to entry */ 926 pf->size += l; 927 pf->page = ptr; 928 } else if (!pf->next) { 929 /* Append at tail of chain */ 930 px->next = 0; 931 px->prev = pf; 932 pf->next = px; 933 pf = px; 934 px = 0; 935 } else { 936 wrterror("freelist is destroyed.\n"); 937 } 938 } 939 940 /* Return something to OS ? */ 941 if (!pf->next && /* If we're the last one, */ 942 pf->size > malloc_cache && /* ..and the cache is full, */ 943 pf->end == malloc_brk && /* ..and none behind us, */ 944 malloc_brk == sbrk((intptr_t)0)) { /* ..and it's OK to do... */ 945 946 /* 947 * Keep the cache intact. Notice that the '>' above guarantees that 948 * the pf will always have at least one page afterwards. 949 */ 950 pf->end = (char *)pf->page + malloc_cache; 951 pf->size = malloc_cache; 952 953 brk(pf->end); 954 malloc_brk = pf->end; 955 956 idx = ptr2idx(pf->end); 957 last_idx = idx - 1; 958 959 for(i=idx;i <= last_idx;) 960 page_dir[i++] = MALLOC_NOT_MINE; 961 962 /* XXX: We could realloc/shrink the pagedir here I guess. */ 963 } 964 if (pt) 965 ifree(pt); 966 } 967 968 /* 969 * Free a chunk, and possibly the page it's on, if the page becomes empty. 970 */ 971 972 static __inline__ void 973 free_bytes(void *ptr, size_t idx, struct pginfo *info) 974 { 975 size_t i; 976 struct pginfo **mp; 977 void *vp; 978 979 /* Find the chunk number on the page */ 980 i = ((size_t)(u_long)ptr & malloc_pagemask) >> info->shift; 981 982 if (((size_t)(u_long)ptr & (info->size-1))) { 983 wrtwarning("modified (chunk-) pointer.\n"); 984 return; 985 } 986 987 if (info->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) { 988 wrtwarning("chunk is already free.\n"); 989 return; 990 } 991 992 if (malloc_junk) 993 memset(ptr, SOME_JUNK, (size_t)info->size); 994 995 info->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS); 996 info->free++; 997 998 mp = page_dir + info->shift; 999 1000 if (info->free == 1) { 1001 1002 /* Page became non-full */ 1003 1004 mp = page_dir + info->shift; 1005 /* Insert in address order */ 1006 while (*mp && (*mp)->next && (*mp)->next->page < info->page) 1007 mp = &(*mp)->next; 1008 info->next = *mp; 1009 *mp = info; 1010 return; 1011 } 1012 1013 if (info->free != info->total) 1014 return; 1015 1016 /* Find & remove this page in the queue */ 1017 while (*mp != info) { 1018 mp = &((*mp)->next); 1019 #ifdef MALLOC_EXTRA_SANITY 1020 if (!*mp) 1021 wrterror("(ES): Not on queue\n"); 1022 #endif /* MALLOC_EXTRA_SANITY */ 1023 } 1024 *mp = info->next; 1025 1026 /* Free the page & the info structure if need be */ 1027 page_dir[idx] = MALLOC_FIRST; 1028 vp = info->page; /* Order is important ! */ 1029 if(vp != (void*)info) 1030 ifree(info); 1031 ifree(vp); 1032 } 1033 1034 static void 1035 ifree(void *ptr) 1036 { 1037 struct pginfo *info; 1038 size_t idx; 1039 1040 /* This is legal */ 1041 if (!ptr) 1042 return; 1043 1044 if (!malloc_started) { 1045 wrtwarning("malloc() has never been called.\n"); 1046 return; 1047 } 1048 1049 /* If we're already sinking, don't make matters any worse. */ 1050 if (suicide) 1051 return; 1052 1053 idx = ptr2idx(ptr); 1054 1055 if (idx < malloc_pageshift) { 1056 wrtwarning("junk pointer, too low to make sense.\n"); 1057 return; 1058 } 1059 1060 if (idx > last_idx) { 1061 wrtwarning("junk pointer, too high to make sense.\n"); 1062 return; 1063 } 1064 1065 info = page_dir[idx]; 1066 1067 if (info < MALLOC_MAGIC) 1068 free_pages(ptr, idx, info); 1069 else 1070 free_bytes(ptr, idx, info); 1071 return; 1072 } 1073 1074 /* 1075 * These are the public exported interface routines. 1076 */ 1077 1078 1079 void * 1080 malloc(size_t size) 1081 { 1082 register void *r; 1083 1084 THREAD_LOCK(); 1085 malloc_func = " in malloc():"; 1086 if (malloc_active++) { 1087 wrtwarning("recursive call.\n"); 1088 malloc_active--; 1089 return (0); 1090 } 1091 if (!malloc_started) 1092 malloc_init(); 1093 if (malloc_sysv && !size) 1094 r = 0; 1095 else 1096 r = imalloc(size); 1097 UTRACE(0, size, r); 1098 malloc_active--; 1099 THREAD_UNLOCK(); 1100 if (r == NULL && (size != 0 || !malloc_sysv)) { 1101 if (malloc_xmalloc) 1102 wrterror("out of memory.\n"); 1103 errno = ENOMEM; 1104 } 1105 return (r); 1106 } 1107 1108 void 1109 free(void *ptr) 1110 { 1111 THREAD_LOCK(); 1112 malloc_func = " in free():"; 1113 if (malloc_active++) { 1114 wrtwarning("recursive call.\n"); 1115 malloc_active--; 1116 return; 1117 } else { 1118 ifree(ptr); 1119 UTRACE(ptr, 0, 0); 1120 } 1121 malloc_active--; 1122 THREAD_UNLOCK(); 1123 return; 1124 } 1125 1126 void * 1127 realloc(void *ptr, size_t size) 1128 { 1129 register void *r; 1130 1131 THREAD_LOCK(); 1132 malloc_func = " in realloc():"; 1133 if (malloc_active++) { 1134 wrtwarning("recursive call.\n"); 1135 malloc_active--; 1136 return (0); 1137 } 1138 if (ptr && !malloc_started) { 1139 wrtwarning("malloc() has never been called.\n"); 1140 ptr = 0; 1141 } 1142 if (!malloc_started) 1143 malloc_init(); 1144 if (malloc_sysv && !size) { 1145 ifree(ptr); 1146 r = 0; 1147 } else if (!ptr) { 1148 r = imalloc(size); 1149 } else { 1150 r = irealloc(ptr, size); 1151 } 1152 UTRACE(ptr, size, r); 1153 malloc_active--; 1154 THREAD_UNLOCK(); 1155 if (r == NULL && (size != 0 || !malloc_sysv)) { 1156 if (malloc_xmalloc) 1157 wrterror("out of memory.\n"); 1158 errno = ENOMEM; 1159 } 1160 return (r); 1161 } 1162