1 /* $NetBSD: malloc.c,v 1.48 2006/11/24 19:37:02 christos Exp $ */ 2 3 /* 4 * ---------------------------------------------------------------------------- 5 * "THE BEER-WARE LICENSE" (Revision 42): 6 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 7 * can do whatever you want with this stuff. If we meet some day, and you think 8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 9 * ---------------------------------------------------------------------------- 10 * 11 * From FreeBSD: malloc.c,v 1.43 1998/09/30 06:13:59 jb 12 * 13 */ 14 15 /* 16 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related 17 * to internal conditions and consistency in malloc.c. This has a 18 * noticeable runtime performance hit, and generally will not do you 19 * any good unless you fiddle with the internals of malloc or want 20 * to catch random pointer corruption as early as possible. 21 */ 22 #ifndef MALLOC_EXTRA_SANITY 23 #undef MALLOC_EXTRA_SANITY 24 #endif 25 26 /* 27 * What to use for Junk. This is the byte value we use to fill with 28 * when the 'J' option is enabled. 29 */ 30 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */ 31 32 /* 33 * The basic parameters you can tweak. 34 * 35 * malloc_minsize minimum size of an allocation in bytes. 36 * If this is too small it's too much work 37 * to manage them. This is also the smallest 38 * unit of alignment used for the storage 39 * returned by malloc/realloc. 40 * 41 */ 42 43 #if defined(__FreeBSD__) 44 # if defined(__i386__) 45 # define malloc_minsize 16U 46 # endif 47 # if defined(__alpha__) 48 # define malloc_minsize 16U 49 # endif 50 # define HAS_UTRACE 51 # define UTRACE_LABEL 52 53 #include <sys/cdefs.h> 54 void utrace(struct ut *, int); 55 56 /* 57 * Make malloc/free/realloc thread-safe in libc for use with 58 * kernel threads. 59 */ 60 # include "libc_private.h" 61 # include "spinlock.h" 62 static spinlock_t thread_lock = _SPINLOCK_INITIALIZER; 63 # define THREAD_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock); 64 # define THREAD_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock); 65 #endif /* __FreeBSD__ */ 66 67 #if defined(__NetBSD__) 68 # define malloc_minsize 16U 69 # define HAS_UTRACE 70 # define UTRACE_LABEL "malloc", 71 #include <sys/cdefs.h> 72 #if defined(LIBC_SCCS) && !defined(lint) 73 __RCSID("$NetBSD: malloc.c,v 1.48 2006/11/24 19:37:02 christos Exp $"); 74 #endif /* LIBC_SCCS and not lint */ 75 #include <sys/types.h> 76 int utrace(const char *, void *, size_t); 77 78 #include <reentrant.h> 79 extern int __isthreaded; 80 static mutex_t thread_lock = MUTEX_INITIALIZER; 81 #define THREAD_LOCK() if (__isthreaded) mutex_lock(&thread_lock); 82 #define THREAD_UNLOCK() if (__isthreaded) mutex_unlock(&thread_lock); 83 #endif /* __NetBSD__ */ 84 85 #if defined(__sparc__) && defined(sun) 86 # define malloc_minsize 16U 87 # define MAP_ANON (0) 88 static int fdzero; 89 # define MMAP_FD fdzero 90 # define INIT_MMAP() \ 91 { if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \ 92 wrterror("open of /dev/zero"); } 93 #endif /* __sparc__ */ 94 95 /* Insert your combination here... */ 96 #if defined(__FOOCPU__) && defined(__BAROS__) 97 # define malloc_minsize 16U 98 #endif /* __FOOCPU__ && __BAROS__ */ 99 100 101 /* 102 * No user serviceable parts behind this point. 103 */ 104 #include "namespace.h" 105 #include <sys/types.h> 106 #include <sys/mman.h> 107 #include <errno.h> 108 #include <fcntl.h> 109 #include <stddef.h> 110 #include <stdio.h> 111 #include <stdlib.h> 112 #include <string.h> 113 #include <unistd.h> 114 115 /* 116 * This structure describes a page worth of chunks. 117 */ 118 119 struct pginfo { 120 struct pginfo *next; /* next on the free list */ 121 void *page; /* Pointer to the page */ 122 u_short size; /* size of this page's chunks */ 123 u_short shift; /* How far to shift for this size chunks */ 124 u_short free; /* How many free chunks */ 125 u_short total; /* How many chunk */ 126 u_int bits[1]; /* Which chunks are free */ 127 }; 128 129 /* 130 * This structure describes a number of free pages. 131 */ 132 133 struct pgfree { 134 struct pgfree *next; /* next run of free pages */ 135 struct pgfree *prev; /* prev run of free pages */ 136 void *page; /* pointer to free pages */ 137 void *end; /* pointer to end of free pages */ 138 size_t size; /* number of bytes free */ 139 }; 140 141 /* 142 * How many bits per u_int in the bitmap. 143 * Change only if not 8 bits/byte 144 */ 145 #define MALLOC_BITS ((int)(8*sizeof(u_int))) 146 147 /* 148 * Magic values to put in the page_directory 149 */ 150 #define MALLOC_NOT_MINE ((struct pginfo*) 0) 151 #define MALLOC_FREE ((struct pginfo*) 1) 152 #define MALLOC_FIRST ((struct pginfo*) 2) 153 #define MALLOC_FOLLOW ((struct pginfo*) 3) 154 #define MALLOC_MAGIC ((struct pginfo*) 4) 155 156 /* 157 * Page size related parameters, computed at run-time. 158 */ 159 static size_t malloc_pagesize; 160 static size_t malloc_pageshift; 161 static size_t malloc_pagemask; 162 163 #ifndef malloc_minsize 164 #define malloc_minsize 16U 165 #endif 166 167 #ifndef malloc_maxsize 168 #define malloc_maxsize ((malloc_pagesize)>>1) 169 #endif 170 171 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask))) 172 #define ptr2idx(foo) \ 173 (((size_t)(uintptr_t)(foo) >> malloc_pageshift)-malloc_origo) 174 175 #ifndef THREAD_LOCK 176 #define THREAD_LOCK() 177 #endif 178 179 #ifndef THREAD_UNLOCK 180 #define THREAD_UNLOCK() 181 #endif 182 183 #ifndef MMAP_FD 184 #define MMAP_FD (-1) 185 #endif 186 187 #ifndef INIT_MMAP 188 #define INIT_MMAP() 189 #endif 190 191 #ifndef MADV_FREE 192 #define MADV_FREE MADV_DONTNEED 193 #endif 194 195 /* Set when initialization has been done */ 196 static unsigned malloc_started; 197 198 /* Recusion flag for public interface. */ 199 static int malloc_active; 200 201 /* Number of free pages we cache */ 202 static unsigned malloc_cache = 16; 203 204 /* The offset from pagenumber to index into the page directory */ 205 static size_t malloc_origo; 206 207 /* The last index in the page directory we care about */ 208 static size_t last_idx; 209 210 /* Pointer to page directory. Allocated "as if with" malloc */ 211 static struct pginfo **page_dir; 212 213 /* How many slots in the page directory */ 214 static unsigned malloc_ninfo; 215 216 /* Free pages line up here */ 217 static struct pgfree free_list; 218 219 /* Abort(), user doesn't handle problems. */ 220 static int malloc_abort; 221 222 /* Are we trying to die ? */ 223 static int suicide; 224 225 /* always realloc ? */ 226 static int malloc_realloc; 227 228 /* pass the kernel a hint on free pages ? */ 229 static int malloc_hint = 0; 230 231 /* xmalloc behaviour ? */ 232 static int malloc_xmalloc; 233 234 /* sysv behaviour for malloc(0) ? */ 235 static int malloc_sysv; 236 237 /* zero fill ? */ 238 static int malloc_zero; 239 240 /* junk fill ? */ 241 static int malloc_junk; 242 243 #ifdef HAS_UTRACE 244 245 /* utrace ? */ 246 static int malloc_utrace; 247 248 struct ut { void *p; size_t s; void *r; }; 249 250 #define UTRACE(a, b, c) \ 251 if (malloc_utrace) { \ 252 struct ut u; \ 253 u.p=a; u.s = b; u.r=c; \ 254 utrace(UTRACE_LABEL (void *) &u, sizeof u); \ 255 } 256 #else /* !HAS_UTRACE */ 257 #define UTRACE(a,b,c) 258 #endif /* HAS_UTRACE */ 259 260 /* my last break. */ 261 static void *malloc_brk; 262 263 /* one location cache for free-list holders */ 264 static struct pgfree *px; 265 266 /* compile-time options */ 267 char *malloc_options; 268 269 /* Name of the current public function */ 270 static const char *malloc_func; 271 272 /* Macro for mmap */ 273 #define MMAP(size) \ 274 mmap(0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \ 275 MMAP_FD, (off_t)0); 276 277 /* 278 * Necessary function declarations 279 */ 280 static int extend_pgdir(size_t idx); 281 static void *imalloc(size_t size); 282 static void ifree(void *ptr); 283 static void *irealloc(void *ptr, size_t size); 284 285 static void 286 wrterror(const char *p) 287 { 288 const char *progname = getprogname(); 289 const char *q = " error: "; 290 write(STDERR_FILENO, progname, strlen(progname)); 291 write(STDERR_FILENO, malloc_func, strlen(malloc_func)); 292 write(STDERR_FILENO, q, strlen(q)); 293 write(STDERR_FILENO, p, strlen(p)); 294 suicide = 1; 295 abort(); 296 } 297 298 static void 299 wrtwarning(const char *p) 300 { 301 const char *progname = getprogname(); 302 const char *q = " warning: "; 303 if (malloc_abort) 304 wrterror(p); 305 write(STDERR_FILENO, progname, strlen(progname)); 306 write(STDERR_FILENO, malloc_func, strlen(malloc_func)); 307 write(STDERR_FILENO, q, strlen(q)); 308 write(STDERR_FILENO, p, strlen(p)); 309 } 310 311 /* 312 * Allocate a number of pages from the OS 313 */ 314 static void * 315 map_pages(size_t pages) 316 { 317 caddr_t result, rresult, tail; 318 intptr_t bytes = pages << malloc_pageshift; 319 320 if (bytes < 0 || (size_t)bytes < pages) { 321 errno = ENOMEM; 322 return NULL; 323 } 324 325 if ((result = sbrk(bytes)) == (void *)-1) 326 return NULL; 327 328 /* 329 * Round to a page, in case sbrk(2) did not do this for us 330 */ 331 rresult = (caddr_t)pageround((size_t)(uintptr_t)result); 332 if (result < rresult) { 333 /* make sure we have enough space to fit bytes */ 334 if (sbrk((intptr_t)(rresult - result)) == (void *) -1) { 335 /* we failed, put everything back */ 336 if (brk(result)) { 337 wrterror("brk(2) failed [internal error]\n"); 338 } 339 } 340 } 341 tail = rresult + (size_t)bytes; 342 343 last_idx = ptr2idx(tail) - 1; 344 malloc_brk = tail; 345 346 if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx)) { 347 malloc_brk = result; 348 last_idx = ptr2idx(malloc_brk) - 1; 349 /* Put back break point since we failed. */ 350 if (brk(malloc_brk)) 351 wrterror("brk(2) failed [internal error]\n"); 352 return 0; 353 } 354 355 return rresult; 356 } 357 358 /* 359 * Extend page directory 360 */ 361 static int 362 extend_pgdir(size_t idx) 363 { 364 struct pginfo **new, **old; 365 size_t newlen, oldlen; 366 367 /* check for overflow */ 368 if ((((~(1UL << ((sizeof(size_t) * NBBY) - 1)) / sizeof(*page_dir)) + 1) 369 + (malloc_pagesize / sizeof *page_dir)) < idx) { 370 errno = ENOMEM; 371 return 0; 372 } 373 374 /* Make it this many pages */ 375 newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize; 376 377 /* remember the old mapping size */ 378 oldlen = malloc_ninfo * sizeof *page_dir; 379 380 /* 381 * NOTE: we allocate new pages and copy the directory rather than tempt 382 * fate by trying to "grow" the region.. There is nothing to prevent 383 * us from accidentally re-mapping space that's been allocated by our caller 384 * via dlopen() or other mmap(). 385 * 386 * The copy problem is not too bad, as there is 4K of page index per 387 * 4MB of malloc arena. 388 * 389 * We can totally avoid the copy if we open a file descriptor to associate 390 * the anon mappings with. Then, when we remap the pages at the new 391 * address, the old pages will be "magically" remapped.. But this means 392 * keeping open a "secret" file descriptor..... 393 */ 394 395 /* Get new pages */ 396 new = (struct pginfo**) MMAP(newlen); 397 if (new == MAP_FAILED) 398 return 0; 399 400 /* Copy the old stuff */ 401 memcpy(new, page_dir, oldlen); 402 403 /* register the new size */ 404 malloc_ninfo = newlen / sizeof *page_dir; 405 406 /* swap the pointers */ 407 old = page_dir; 408 page_dir = new; 409 410 /* Now free the old stuff */ 411 munmap(old, oldlen); 412 return 1; 413 } 414 415 /* 416 * Initialize the world 417 */ 418 static void 419 malloc_init (void) 420 { 421 char *p, b[64]; 422 int i, j; 423 int errnosave; 424 425 /* 426 * Compute page-size related variables. 427 */ 428 malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE); 429 malloc_pagemask = malloc_pagesize - 1; 430 for (malloc_pageshift = 0; 431 (1UL << malloc_pageshift) != malloc_pagesize; 432 malloc_pageshift++) 433 /* nothing */ ; 434 435 INIT_MMAP(); 436 437 #ifdef MALLOC_EXTRA_SANITY 438 malloc_junk = 1; 439 #endif /* MALLOC_EXTRA_SANITY */ 440 441 for (i = 0; i < 3; i++) { 442 if (i == 0) { 443 errnosave = errno; 444 j = readlink("/etc/malloc.conf", b, sizeof b - 1); 445 errno = errnosave; 446 if (j <= 0) 447 continue; 448 b[j] = '\0'; 449 p = b; 450 } else if (i == 1) { 451 p = getenv("MALLOC_OPTIONS"); 452 } else { 453 p = malloc_options; 454 } 455 for (; p != NULL && *p != '\0'; p++) { 456 switch (*p) { 457 case '>': malloc_cache <<= 1; break; 458 case '<': malloc_cache >>= 1; break; 459 case 'a': malloc_abort = 0; break; 460 case 'A': malloc_abort = 1; break; 461 case 'h': malloc_hint = 0; break; 462 case 'H': malloc_hint = 1; break; 463 case 'r': malloc_realloc = 0; break; 464 case 'R': malloc_realloc = 1; break; 465 case 'j': malloc_junk = 0; break; 466 case 'J': malloc_junk = 1; break; 467 #ifdef HAS_UTRACE 468 case 'u': malloc_utrace = 0; break; 469 case 'U': malloc_utrace = 1; break; 470 #endif 471 case 'v': malloc_sysv = 0; break; 472 case 'V': malloc_sysv = 1; break; 473 case 'x': malloc_xmalloc = 0; break; 474 case 'X': malloc_xmalloc = 1; break; 475 case 'z': malloc_zero = 0; break; 476 case 'Z': malloc_zero = 1; break; 477 default: 478 j = malloc_abort; 479 malloc_abort = 0; 480 wrtwarning("unknown char in MALLOC_OPTIONS.\n"); 481 malloc_abort = j; 482 break; 483 } 484 } 485 } 486 487 UTRACE(0, 0, 0); 488 489 /* 490 * We want junk in the entire allocation, and zero only in the part 491 * the user asked for. 492 */ 493 if (malloc_zero) 494 malloc_junk=1; 495 496 /* 497 * If we run with junk (or implicitly from above: zero), we want to 498 * force realloc() to get new storage, so we can DTRT with it. 499 */ 500 if (malloc_junk) 501 malloc_realloc=1; 502 503 /* Allocate one page for the page directory */ 504 page_dir = (struct pginfo **) MMAP(malloc_pagesize); 505 506 if (page_dir == MAP_FAILED) 507 wrterror("mmap(2) failed, check limits.\n"); 508 509 /* 510 * We need a maximum of malloc_pageshift buckets, steal these from the 511 * front of the page_directory; 512 */ 513 malloc_origo = pageround((size_t)(uintptr_t)sbrk((intptr_t)0)) 514 >> malloc_pageshift; 515 malloc_origo -= malloc_pageshift; 516 517 malloc_ninfo = malloc_pagesize / sizeof *page_dir; 518 519 /* Recalculate the cache size in bytes, and make sure it's nonzero */ 520 521 if (!malloc_cache) 522 malloc_cache++; 523 524 malloc_cache <<= malloc_pageshift; 525 526 /* 527 * This is a nice hack from Kaleb Keithly (kaleb@x.org). 528 * We can sbrk(2) further back when we keep this on a low address. 529 */ 530 px = (struct pgfree *) imalloc (sizeof *px); 531 532 /* Been here, done that */ 533 malloc_started++; 534 } 535 536 /* 537 * Allocate a number of complete pages 538 */ 539 static void * 540 malloc_pages(size_t size) 541 { 542 void *p, *delay_free = NULL; 543 size_t i; 544 struct pgfree *pf; 545 size_t idx; 546 547 idx = pageround(size); 548 if (idx < size) { 549 errno = ENOMEM; 550 return NULL; 551 } else 552 size = idx; 553 554 p = NULL; 555 556 /* Look for free pages before asking for more */ 557 for(pf = free_list.next; pf; pf = pf->next) { 558 559 #ifdef MALLOC_EXTRA_SANITY 560 if (pf->size & malloc_pagemask) 561 wrterror("(ES): junk length entry on free_list.\n"); 562 if (!pf->size) 563 wrterror("(ES): zero length entry on free_list.\n"); 564 if (pf->page == pf->end) 565 wrterror("(ES): zero entry on free_list.\n"); 566 if (pf->page > pf->end) 567 wrterror("(ES): sick entry on free_list.\n"); 568 if ((void*)pf->page >= (void*)sbrk(0)) 569 wrterror("(ES): entry on free_list past brk.\n"); 570 if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE) 571 wrterror("(ES): non-free first page on free-list.\n"); 572 if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE) 573 wrterror("(ES): non-free last page on free-list.\n"); 574 #endif /* MALLOC_EXTRA_SANITY */ 575 576 if (pf->size < size) 577 continue; 578 579 if (pf->size == size) { 580 p = pf->page; 581 if (pf->next != NULL) 582 pf->next->prev = pf->prev; 583 pf->prev->next = pf->next; 584 delay_free = pf; 585 break; 586 } 587 588 p = pf->page; 589 pf->page = (char *)pf->page + size; 590 pf->size -= size; 591 break; 592 } 593 594 #ifdef MALLOC_EXTRA_SANITY 595 if (p != NULL && page_dir[ptr2idx(p)] != MALLOC_FREE) 596 wrterror("(ES): allocated non-free page on free-list.\n"); 597 #endif /* MALLOC_EXTRA_SANITY */ 598 599 size >>= malloc_pageshift; 600 601 /* Map new pages */ 602 if (p == NULL) 603 p = map_pages(size); 604 605 if (p != NULL) { 606 607 idx = ptr2idx(p); 608 page_dir[idx] = MALLOC_FIRST; 609 for (i=1;i<size;i++) 610 page_dir[idx+i] = MALLOC_FOLLOW; 611 612 if (malloc_junk) 613 memset(p, SOME_JUNK, size << malloc_pageshift); 614 } 615 616 if (delay_free) { 617 if (px == NULL) 618 px = delay_free; 619 else 620 ifree(delay_free); 621 } 622 623 return p; 624 } 625 626 /* 627 * Allocate a page of fragments 628 */ 629 630 static inline int 631 malloc_make_chunks(int bits) 632 { 633 struct pginfo *bp; 634 void *pp; 635 int i, k, l; 636 637 /* Allocate a new bucket */ 638 pp = malloc_pages(malloc_pagesize); 639 if (pp == NULL) 640 return 0; 641 642 /* Find length of admin structure */ 643 l = (int)offsetof(struct pginfo, bits[0]); 644 l += sizeof bp->bits[0] * 645 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS); 646 647 /* Don't waste more than two chunks on this */ 648 if ((1<<(bits)) <= l+l) { 649 bp = (struct pginfo *)pp; 650 } else { 651 bp = (struct pginfo *)imalloc((size_t)l); 652 if (bp == NULL) { 653 ifree(pp); 654 return 0; 655 } 656 } 657 658 bp->size = (1<<bits); 659 bp->shift = bits; 660 bp->total = bp->free = malloc_pagesize >> bits; 661 bp->page = pp; 662 663 /* set all valid bits in the bitmap */ 664 k = bp->total; 665 i = 0; 666 667 /* Do a bunch at a time */ 668 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS) 669 bp->bits[i / MALLOC_BITS] = ~0U; 670 671 for(; i < k; i++) 672 bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS); 673 674 if (bp == bp->page) { 675 /* Mark the ones we stole for ourselves */ 676 for(i=0;l > 0;i++) { 677 bp->bits[i/MALLOC_BITS] &= ~(1<<(i%MALLOC_BITS)); 678 bp->free--; 679 bp->total--; 680 l -= (1 << bits); 681 } 682 } 683 684 /* MALLOC_LOCK */ 685 686 page_dir[ptr2idx(pp)] = bp; 687 688 bp->next = page_dir[bits]; 689 page_dir[bits] = bp; 690 691 /* MALLOC_UNLOCK */ 692 693 return 1; 694 } 695 696 /* 697 * Allocate a fragment 698 */ 699 static void * 700 malloc_bytes(size_t size) 701 { 702 size_t i; 703 int j; 704 u_int u; 705 struct pginfo *bp; 706 int k; 707 u_int *lp; 708 709 /* Don't bother with anything less than this */ 710 if (size < malloc_minsize) 711 size = malloc_minsize; 712 713 /* Find the right bucket */ 714 j = 1; 715 i = size-1; 716 while (i >>= 1) 717 j++; 718 719 /* If it's empty, make a page more of that size chunks */ 720 if (page_dir[j] == NULL && !malloc_make_chunks(j)) 721 return NULL; 722 723 bp = page_dir[j]; 724 725 /* Find first word of bitmap which isn't empty */ 726 for (lp = bp->bits; !*lp; lp++) 727 ; 728 729 /* Find that bit, and tweak it */ 730 u = 1; 731 k = 0; 732 while (!(*lp & u)) { 733 u += u; 734 k++; 735 } 736 *lp ^= u; 737 738 /* If there are no more free, remove from free-list */ 739 if (!--bp->free) { 740 page_dir[j] = bp->next; 741 bp->next = NULL; 742 } 743 744 /* Adjust to the real offset of that chunk */ 745 k += (lp-bp->bits)*MALLOC_BITS; 746 k <<= bp->shift; 747 748 if (malloc_junk) 749 memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size); 750 751 return (u_char *)bp->page + k; 752 } 753 754 /* 755 * Allocate a piece of memory 756 */ 757 static void * 758 imalloc(size_t size) 759 { 760 void *result; 761 762 if (suicide) 763 abort(); 764 765 if ((size + malloc_pagesize) < size) /* Check for overflow */ 766 result = NULL; 767 else if (size <= malloc_maxsize) 768 result = malloc_bytes(size); 769 else 770 result = malloc_pages(size); 771 772 if (malloc_abort && result == NULL) 773 wrterror("allocation failed.\n"); 774 775 if (malloc_zero && result != NULL) 776 memset(result, 0, size); 777 778 return result; 779 } 780 781 /* 782 * Change the size of an allocation. 783 */ 784 static void * 785 irealloc(void *ptr, size_t size) 786 { 787 void *p; 788 size_t osize, idx; 789 struct pginfo **mp; 790 size_t i; 791 792 if (suicide) 793 abort(); 794 795 idx = ptr2idx(ptr); 796 797 if (idx < malloc_pageshift) { 798 wrtwarning("junk pointer, too low to make sense.\n"); 799 return 0; 800 } 801 802 if (idx > last_idx) { 803 wrtwarning("junk pointer, too high to make sense.\n"); 804 return 0; 805 } 806 807 mp = &page_dir[idx]; 808 809 if (*mp == MALLOC_FIRST) { /* Page allocation */ 810 811 /* Check the pointer */ 812 if ((size_t)(uintptr_t)ptr & malloc_pagemask) { 813 wrtwarning("modified (page-) pointer.\n"); 814 return NULL; 815 } 816 817 /* Find the size in bytes */ 818 for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;) 819 osize += malloc_pagesize; 820 821 if (!malloc_realloc && /* unless we have to, */ 822 size <= osize && /* .. or are too small, */ 823 size > (osize - malloc_pagesize)) { /* .. or can free a page, */ 824 return ptr; /* don't do anything. */ 825 } 826 827 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */ 828 829 /* Check the pointer for sane values */ 830 if (((size_t)(uintptr_t)ptr & ((*mp)->size-1))) { 831 wrtwarning("modified (chunk-) pointer.\n"); 832 return NULL; 833 } 834 835 /* Find the chunk index in the page */ 836 i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> (*mp)->shift; 837 838 /* Verify that it isn't a free chunk already */ 839 if ((*mp)->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) { 840 wrtwarning("chunk is already free.\n"); 841 return NULL; 842 } 843 844 osize = (*mp)->size; 845 846 if (!malloc_realloc && /* Unless we have to, */ 847 size < osize && /* ..or are too small, */ 848 (size > osize/2 || /* ..or could use a smaller size, */ 849 osize == malloc_minsize)) { /* ..(if there is one) */ 850 return ptr; /* ..Don't do anything */ 851 } 852 853 } else { 854 wrtwarning("pointer to wrong page.\n"); 855 return NULL; 856 } 857 858 p = imalloc(size); 859 860 if (p != NULL) { 861 /* copy the lesser of the two sizes, and free the old one */ 862 if (!size || !osize) 863 ; 864 else if (osize < size) 865 memcpy(p, ptr, osize); 866 else 867 memcpy(p, ptr, size); 868 ifree(ptr); 869 } 870 return p; 871 } 872 873 /* 874 * Free a sequence of pages 875 */ 876 877 static inline void 878 free_pages(void *ptr, size_t idx, struct pginfo *info) 879 { 880 size_t i; 881 struct pgfree *pf, *pt=NULL; 882 size_t l; 883 void *tail; 884 885 if (info == MALLOC_FREE) { 886 wrtwarning("page is already free.\n"); 887 return; 888 } 889 890 if (info != MALLOC_FIRST) { 891 wrtwarning("pointer to wrong page.\n"); 892 return; 893 } 894 895 if ((size_t)(uintptr_t)ptr & malloc_pagemask) { 896 wrtwarning("modified (page-) pointer.\n"); 897 return; 898 } 899 900 /* Count how many pages and mark them free at the same time */ 901 page_dir[idx] = MALLOC_FREE; 902 for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++) 903 page_dir[idx + i] = MALLOC_FREE; 904 905 l = i << malloc_pageshift; 906 907 if (malloc_junk) 908 memset(ptr, SOME_JUNK, l); 909 910 if (malloc_hint) 911 madvise(ptr, l, MADV_FREE); 912 913 tail = (char *)ptr+l; 914 915 /* add to free-list */ 916 if (px == NULL) 917 px = imalloc(sizeof *pt); /* This cannot fail... */ 918 px->page = ptr; 919 px->end = tail; 920 px->size = l; 921 if (free_list.next == NULL) { 922 923 /* Nothing on free list, put this at head */ 924 px->next = free_list.next; 925 px->prev = &free_list; 926 free_list.next = px; 927 pf = px; 928 px = NULL; 929 930 } else { 931 932 /* Find the right spot, leave pf pointing to the modified entry. */ 933 tail = (char *)ptr+l; 934 935 for(pf = free_list.next; pf->end < ptr && pf->next != NULL; 936 pf = pf->next) 937 ; /* Race ahead here */ 938 939 if (pf->page > tail) { 940 /* Insert before entry */ 941 px->next = pf; 942 px->prev = pf->prev; 943 pf->prev = px; 944 px->prev->next = px; 945 pf = px; 946 px = NULL; 947 } else if (pf->end == ptr ) { 948 /* Append to the previous entry */ 949 pf->end = (char *)pf->end + l; 950 pf->size += l; 951 if (pf->next != NULL && pf->end == pf->next->page ) { 952 /* And collapse the next too. */ 953 pt = pf->next; 954 pf->end = pt->end; 955 pf->size += pt->size; 956 pf->next = pt->next; 957 if (pf->next != NULL) 958 pf->next->prev = pf; 959 } 960 } else if (pf->page == tail) { 961 /* Prepend to entry */ 962 pf->size += l; 963 pf->page = ptr; 964 } else if (pf->next == NULL) { 965 /* Append at tail of chain */ 966 px->next = NULL; 967 px->prev = pf; 968 pf->next = px; 969 pf = px; 970 px = NULL; 971 } else { 972 wrterror("freelist is destroyed.\n"); 973 } 974 } 975 976 /* Return something to OS ? */ 977 if (pf->next == NULL && /* If we're the last one, */ 978 pf->size > malloc_cache && /* ..and the cache is full, */ 979 pf->end == malloc_brk && /* ..and none behind us, */ 980 malloc_brk == sbrk((intptr_t)0)) { /* ..and it's OK to do... */ 981 982 /* 983 * Keep the cache intact. Notice that the '>' above guarantees that 984 * the pf will always have at least one page afterwards. 985 */ 986 pf->end = (char *)pf->page + malloc_cache; 987 pf->size = malloc_cache; 988 989 brk(pf->end); 990 malloc_brk = pf->end; 991 992 idx = ptr2idx(pf->end); 993 994 for(i=idx;i <= last_idx;) 995 page_dir[i++] = MALLOC_NOT_MINE; 996 997 last_idx = idx - 1; 998 999 /* XXX: We could realloc/shrink the pagedir here I guess. */ 1000 } 1001 if (pt != NULL) 1002 ifree(pt); 1003 } 1004 1005 /* 1006 * Free a chunk, and possibly the page it's on, if the page becomes empty. 1007 */ 1008 1009 static inline void 1010 free_bytes(void *ptr, size_t idx, struct pginfo *info) 1011 { 1012 size_t i; 1013 struct pginfo **mp; 1014 void *vp; 1015 1016 /* Find the chunk number on the page */ 1017 i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> info->shift; 1018 1019 if (((size_t)(uintptr_t)ptr & (info->size-1))) { 1020 wrtwarning("modified (chunk-) pointer.\n"); 1021 return; 1022 } 1023 1024 if (info->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) { 1025 wrtwarning("chunk is already free.\n"); 1026 return; 1027 } 1028 1029 if (malloc_junk) 1030 memset(ptr, SOME_JUNK, (size_t)info->size); 1031 1032 info->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS); 1033 info->free++; 1034 1035 mp = page_dir + info->shift; 1036 1037 if (info->free == 1) { 1038 1039 /* Page became non-full */ 1040 1041 mp = page_dir + info->shift; 1042 /* Insert in address order */ 1043 while (*mp && (*mp)->next && (*mp)->next->page < info->page) 1044 mp = &(*mp)->next; 1045 info->next = *mp; 1046 *mp = info; 1047 return; 1048 } 1049 1050 if (info->free != info->total) 1051 return; 1052 1053 /* Find & remove this page in the queue */ 1054 while (*mp != info) { 1055 mp = &((*mp)->next); 1056 #ifdef MALLOC_EXTRA_SANITY 1057 if (!*mp) 1058 wrterror("(ES): Not on queue.\n"); 1059 #endif /* MALLOC_EXTRA_SANITY */ 1060 } 1061 *mp = info->next; 1062 1063 /* Free the page & the info structure if need be */ 1064 page_dir[idx] = MALLOC_FIRST; 1065 vp = info->page; /* Order is important ! */ 1066 if(vp != (void*)info) 1067 ifree(info); 1068 ifree(vp); 1069 } 1070 1071 static void 1072 ifree(void *ptr) 1073 { 1074 struct pginfo *info; 1075 size_t idx; 1076 1077 /* This is legal */ 1078 if (ptr == NULL) 1079 return; 1080 1081 /* If we're already sinking, don't make matters any worse. */ 1082 if (suicide) 1083 return; 1084 1085 idx = ptr2idx(ptr); 1086 1087 if (idx < malloc_pageshift) { 1088 wrtwarning("junk pointer, too low to make sense.\n"); 1089 return; 1090 } 1091 1092 if (idx > last_idx) { 1093 wrtwarning("junk pointer, too high to make sense.\n"); 1094 return; 1095 } 1096 1097 info = page_dir[idx]; 1098 1099 if (info < MALLOC_MAGIC) 1100 free_pages(ptr, idx, info); 1101 else 1102 free_bytes(ptr, idx, info); 1103 return; 1104 } 1105 1106 /* 1107 * These are the public exported interface routines. 1108 */ 1109 1110 1111 void * 1112 malloc(size_t size) 1113 { 1114 void *r; 1115 1116 THREAD_LOCK(); 1117 malloc_func = " in malloc():"; 1118 if (malloc_active++) { 1119 wrtwarning("recursive call.\n"); 1120 malloc_active--; 1121 THREAD_UNLOCK(); 1122 return (NULL); 1123 } 1124 if (!malloc_started) 1125 malloc_init(); 1126 if (malloc_sysv && !size) 1127 r = NULL; 1128 else 1129 r = imalloc(size); 1130 UTRACE(0, size, r); 1131 malloc_active--; 1132 THREAD_UNLOCK(); 1133 if (r == NULL && (size != 0 || !malloc_sysv)) { 1134 if (malloc_xmalloc) 1135 wrterror("out of memory.\n"); 1136 errno = ENOMEM; 1137 } 1138 return (r); 1139 } 1140 1141 void 1142 free(void *ptr) 1143 { 1144 THREAD_LOCK(); 1145 malloc_func = " in free():"; 1146 if (malloc_active++) { 1147 wrtwarning("recursive call.\n"); 1148 malloc_active--; 1149 THREAD_UNLOCK(); 1150 return; 1151 } 1152 if (!malloc_started) 1153 malloc_init(); 1154 ifree(ptr); 1155 UTRACE(ptr, 0, 0); 1156 malloc_active--; 1157 THREAD_UNLOCK(); 1158 return; 1159 } 1160 1161 void * 1162 realloc(void *ptr, size_t size) 1163 { 1164 void *r; 1165 1166 THREAD_LOCK(); 1167 malloc_func = " in realloc():"; 1168 if (malloc_active++) { 1169 wrtwarning("recursive call.\n"); 1170 malloc_active--; 1171 THREAD_UNLOCK(); 1172 return (NULL); 1173 } 1174 if (!malloc_started) 1175 malloc_init(); 1176 if (malloc_sysv && !size) { 1177 ifree(ptr); 1178 r = NULL; 1179 } else if (!ptr) { 1180 r = imalloc(size); 1181 } else { 1182 r = irealloc(ptr, size); 1183 } 1184 UTRACE(ptr, size, r); 1185 malloc_active--; 1186 THREAD_UNLOCK(); 1187 if (r == NULL && (size != 0 || !malloc_sysv)) { 1188 if (malloc_xmalloc) 1189 wrterror("out of memory.\n"); 1190 errno = ENOMEM; 1191 } 1192 return (r); 1193 } 1194