1 /* $NetBSD: malloc.c,v 1.58 2017/01/12 02:00:42 christos Exp $ */ 2 3 /* 4 * ---------------------------------------------------------------------------- 5 * "THE BEER-WARE LICENSE" (Revision 42): 6 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 7 * can do whatever you want with this stuff. If we meet some day, and you think 8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 9 * ---------------------------------------------------------------------------- 10 * 11 * From FreeBSD: malloc.c,v 1.91 2006/01/12 07:28:20 jasone 12 * 13 */ 14 15 /* 16 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related 17 * to internal conditions and consistency in malloc.c. This has a 18 * noticeable runtime performance hit, and generally will not do you 19 * any good unless you fiddle with the internals of malloc or want 20 * to catch random pointer corruption as early as possible. 21 */ 22 #ifndef MALLOC_EXTRA_SANITY 23 #undef MALLOC_EXTRA_SANITY 24 #endif 25 26 /* 27 * What to use for Junk. This is the byte value we use to fill with 28 * when the 'J' option is enabled. 29 */ 30 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */ 31 32 /* 33 * The basic parameters you can tweak. 34 * 35 * malloc_minsize minimum size of an allocation in bytes. 36 * If this is too small it's too much work 37 * to manage them. This is also the smallest 38 * unit of alignment used for the storage 39 * returned by malloc/realloc. 40 * 41 */ 42 43 #include "namespace.h" 44 #if defined(__FreeBSD__) 45 # if defined(__i386__) 46 # define malloc_minsize 16U 47 # endif 48 # if defined(__ia64__) 49 # define malloc_pageshift 13U 50 # define malloc_minsize 16U 51 # endif 52 # if defined(__alpha__) 53 # define malloc_pageshift 13U 54 # define malloc_minsize 16U 55 # endif 56 # if defined(__sparc64__) 57 # define malloc_pageshift 13U 58 # define malloc_minsize 16U 59 # endif 60 # if defined(__amd64__) 61 # define malloc_pageshift 12U 62 # define malloc_minsize 16U 63 # endif 64 # if defined(__arm__) 65 # define malloc_pageshift 12U 66 # define malloc_minsize 16U 67 # endif 68 # define HAS_UTRACE 69 # define UTRACE_LABEL 70 71 #include <sys/cdefs.h> 72 void utrace(struct ut *, int); 73 74 /* 75 * Make malloc/free/realloc thread-safe in libc for use with 76 * kernel threads. 77 */ 78 # include "libc_private.h" 79 # include "spinlock.h" 80 static spinlock_t thread_lock = _SPINLOCK_INITIALIZER; 81 # define _MALLOC_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock); 82 # define _MALLOC_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock); 83 #endif /* __FreeBSD__ */ 84 85 #include <sys/types.h> 86 #if defined(__NetBSD__) 87 # define malloc_minsize 16U 88 # ifdef _LIBC 89 # define HAS_UTRACE 90 # define UTRACE_LABEL "malloc", 91 int utrace(const char *, void *, size_t); 92 # endif 93 # include <sys/cdefs.h> 94 # include "extern.h" 95 # if defined(LIBC_SCCS) && !defined(lint) 96 __RCSID("$NetBSD: malloc.c,v 1.58 2017/01/12 02:00:42 christos Exp $"); 97 # endif /* LIBC_SCCS and not lint */ 98 # include <reentrant.h> 99 # ifdef _REENTRANT 100 extern int __isthreaded; 101 static mutex_t thread_lock = MUTEX_INITIALIZER; 102 # define _MALLOC_LOCK() if (__isthreaded) mutex_lock(&thread_lock); 103 # define _MALLOC_UNLOCK() if (__isthreaded) mutex_unlock(&thread_lock); 104 # else 105 # define _MALLOC_LOCK() 106 # define _MALLOC_UNLOCK() 107 # endif 108 #endif /* __NetBSD__ */ 109 110 #if defined(__sparc__) && defined(sun) 111 # define malloc_minsize 16U 112 # define MAP_ANON (0) 113 static int fdzero; 114 # define MMAP_FD fdzero 115 # define INIT_MMAP() \ 116 { if ((fdzero = open(_PATH_DEVZERO, O_RDWR | O_CLOEXEC, 0000)) == -1) \ 117 wrterror("open of /dev/zero"); } 118 #endif /* __sparc__ */ 119 120 /* Insert your combination here... */ 121 #if defined(__FOOCPU__) && defined(__BAROS__) 122 # define malloc_minsize 16U 123 #endif /* __FOOCPU__ && __BAROS__ */ 124 125 #ifndef ZEROSIZEPTR 126 #define ZEROSIZEPTR ((void *)(uintptr_t)(1UL << (malloc_pageshift - 1))) 127 #endif 128 129 /* 130 * No user serviceable parts behind this point. 131 */ 132 #include <sys/types.h> 133 #include <sys/mman.h> 134 #include <errno.h> 135 #include <fcntl.h> 136 #include <paths.h> 137 #include <stddef.h> 138 #include <stdio.h> 139 #include <stdlib.h> 140 #include <string.h> 141 #include <unistd.h> 142 143 /* 144 * This structure describes a page worth of chunks. 145 */ 146 147 struct pginfo { 148 struct pginfo *next; /* next on the free list */ 149 void *page; /* Pointer to the page */ 150 u_short size; /* size of this page's chunks */ 151 u_short shift; /* How far to shift for this size chunks */ 152 u_short free; /* How many free chunks */ 153 u_short total; /* How many chunk */ 154 u_int bits[1]; /* Which chunks are free */ 155 }; 156 157 /* 158 * This structure describes a number of free pages. 159 */ 160 161 struct pgfree { 162 struct pgfree *next; /* next run of free pages */ 163 struct pgfree *prev; /* prev run of free pages */ 164 void *page; /* pointer to free pages */ 165 void *end; /* pointer to end of free pages */ 166 size_t size; /* number of bytes free */ 167 }; 168 169 /* 170 * How many bits per u_int in the bitmap. 171 * Change only if not 8 bits/byte 172 */ 173 #define MALLOC_BITS ((int)(8*sizeof(u_int))) 174 175 /* 176 * Magic values to put in the page_directory 177 */ 178 #define MALLOC_NOT_MINE ((struct pginfo*) 0) 179 #define MALLOC_FREE ((struct pginfo*) 1) 180 #define MALLOC_FIRST ((struct pginfo*) 2) 181 #define MALLOC_FOLLOW ((struct pginfo*) 3) 182 #define MALLOC_MAGIC ((struct pginfo*) 4) 183 184 /* 185 * Page size related parameters, computed at run-time. 186 */ 187 static size_t malloc_pagesize; 188 static size_t malloc_pageshift; 189 static size_t malloc_pagemask; 190 191 #ifndef malloc_minsize 192 #define malloc_minsize 16U 193 #endif 194 195 #ifndef malloc_maxsize 196 #define malloc_maxsize ((malloc_pagesize)>>1) 197 #endif 198 199 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask))) 200 #define ptr2idx(foo) \ 201 (((size_t)(uintptr_t)(foo) >> malloc_pageshift)-malloc_origo) 202 203 #ifndef _MALLOC_LOCK 204 #define _MALLOC_LOCK() 205 #endif 206 207 #ifndef _MALLOC_UNLOCK 208 #define _MALLOC_UNLOCK() 209 #endif 210 211 #ifndef MMAP_FD 212 #define MMAP_FD (-1) 213 #endif 214 215 #ifndef INIT_MMAP 216 #define INIT_MMAP() 217 #endif 218 219 #ifndef MADV_FREE 220 #define MADV_FREE MADV_DONTNEED 221 #endif 222 223 /* Number of free pages we cache */ 224 static size_t malloc_cache = 16; 225 226 /* The offset from pagenumber to index into the page directory */ 227 static size_t malloc_origo; 228 229 /* The last index in the page directory we care about */ 230 static size_t last_idx; 231 232 /* Pointer to page directory. Allocated "as if with" malloc */ 233 static struct pginfo **page_dir; 234 235 /* How many slots in the page directory */ 236 static size_t malloc_ninfo; 237 238 /* Free pages line up here */ 239 static struct pgfree free_list; 240 241 /* Abort(), user doesn't handle problems. */ 242 static int malloc_abort; 243 244 /* Are we trying to die ? */ 245 static int suicide; 246 247 /* always realloc ? */ 248 static int malloc_realloc; 249 250 /* pass the kernel a hint on free pages ? */ 251 #if defined(MADV_FREE) 252 static int malloc_hint = 0; 253 #endif 254 255 /* xmalloc behaviour ? */ 256 static int malloc_xmalloc; 257 258 /* sysv behaviour for malloc(0) ? */ 259 static int malloc_sysv; 260 261 /* zero fill ? */ 262 static int malloc_zero; 263 264 /* junk fill ? */ 265 static int malloc_junk; 266 267 #ifdef HAS_UTRACE 268 269 /* utrace ? */ 270 static int malloc_utrace; 271 272 struct ut { void *p; size_t s; void *r; }; 273 274 #define UTRACE(a, b, c) \ 275 if (malloc_utrace) { \ 276 struct ut u; \ 277 u.p=a; u.s = b; u.r=c; \ 278 utrace(UTRACE_LABEL (void *) &u, sizeof u); \ 279 } 280 #else /* !HAS_UTRACE */ 281 #define UTRACE(a,b,c) 282 #endif /* HAS_UTRACE */ 283 284 /* my last break. */ 285 static void *malloc_brk; 286 287 /* one location cache for free-list holders */ 288 static struct pgfree *px; 289 290 /* compile-time options */ 291 const char *_malloc_options; 292 293 /* Name of the current public function */ 294 static const char *malloc_func; 295 296 /* Macro for mmap */ 297 #define MMAP(size) \ 298 mmap(NULL, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \ 299 MMAP_FD, (off_t)0); 300 301 /* 302 * Necessary function declarations 303 */ 304 static int extend_pgdir(size_t idx); 305 static void *imalloc(size_t size); 306 static void ifree(void *ptr); 307 static void *irealloc(void *ptr, size_t size); 308 309 static void 310 wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4) 311 { 312 313 write(STDERR_FILENO, p1, strlen(p1)); 314 write(STDERR_FILENO, p2, strlen(p2)); 315 write(STDERR_FILENO, p3, strlen(p3)); 316 write(STDERR_FILENO, p4, strlen(p4)); 317 } 318 319 void (*_malloc_message)(const char *p1, const char *p2, const char *p3, 320 const char *p4) = wrtmessage; 321 static void 322 wrterror(const char *p) 323 { 324 325 suicide = 1; 326 _malloc_message(getprogname(), malloc_func, " error: ", p); 327 abort(); 328 } 329 330 static void 331 wrtwarning(const char *p) 332 { 333 334 /* 335 * Sensitive processes, somewhat arbitrarily defined here as setuid, 336 * setgid, root and wheel cannot afford to have malloc mistakes. 337 */ 338 if (malloc_abort || issetugid() || getuid() == 0 || getgid() == 0) 339 wrterror(p); 340 } 341 342 /* 343 * Allocate a number of pages from the OS 344 */ 345 static void * 346 map_pages(size_t pages) 347 { 348 caddr_t result, rresult, tail; 349 intptr_t bytes = pages << malloc_pageshift; 350 351 if (bytes < 0 || (size_t)bytes < pages) { 352 errno = ENOMEM; 353 return NULL; 354 } 355 356 if ((result = sbrk(bytes)) == (void *)-1) 357 return NULL; 358 359 /* 360 * Round to a page, in case sbrk(2) did not do this for us 361 */ 362 rresult = (caddr_t)pageround((size_t)(uintptr_t)result); 363 if (result < rresult) { 364 /* make sure we have enough space to fit bytes */ 365 if (sbrk((intptr_t)(rresult - result)) == (void *) -1) { 366 /* we failed, put everything back */ 367 if (brk(result)) { 368 wrterror("brk(2) failed [internal error]\n"); 369 } 370 } 371 } 372 tail = rresult + (size_t)bytes; 373 374 last_idx = ptr2idx(tail) - 1; 375 malloc_brk = tail; 376 377 if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx)) { 378 malloc_brk = result; 379 last_idx = ptr2idx(malloc_brk) - 1; 380 /* Put back break point since we failed. */ 381 if (brk(malloc_brk)) 382 wrterror("brk(2) failed [internal error]\n"); 383 return 0; 384 } 385 386 return rresult; 387 } 388 389 /* 390 * Extend page directory 391 */ 392 static int 393 extend_pgdir(size_t idx) 394 { 395 struct pginfo **new, **old; 396 size_t newlen, oldlen; 397 398 /* check for overflow */ 399 if ((((~(1UL << ((sizeof(size_t) * NBBY) - 1)) / sizeof(*page_dir)) + 1) 400 + (malloc_pagesize / sizeof *page_dir)) < idx) { 401 errno = ENOMEM; 402 return 0; 403 } 404 405 /* Make it this many pages */ 406 newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize; 407 408 /* remember the old mapping size */ 409 oldlen = malloc_ninfo * sizeof *page_dir; 410 411 /* 412 * NOTE: we allocate new pages and copy the directory rather than tempt 413 * fate by trying to "grow" the region.. There is nothing to prevent 414 * us from accidentally re-mapping space that's been allocated by our caller 415 * via dlopen() or other mmap(). 416 * 417 * The copy problem is not too bad, as there is 4K of page index per 418 * 4MB of malloc arena. 419 * 420 * We can totally avoid the copy if we open a file descriptor to associate 421 * the anon mappings with. Then, when we remap the pages at the new 422 * address, the old pages will be "magically" remapped.. But this means 423 * keeping open a "secret" file descriptor..... 424 */ 425 426 /* Get new pages */ 427 new = MMAP(newlen); 428 if (new == MAP_FAILED) 429 return 0; 430 431 /* Copy the old stuff */ 432 memcpy(new, page_dir, oldlen); 433 434 /* register the new size */ 435 malloc_ninfo = newlen / sizeof *page_dir; 436 437 /* swap the pointers */ 438 old = page_dir; 439 page_dir = new; 440 441 /* Now free the old stuff */ 442 munmap(old, oldlen); 443 return 1; 444 } 445 446 /* 447 * Initialize the world 448 */ 449 static void 450 malloc_init(void) 451 { 452 const char *p; 453 char b[64]; 454 size_t i; 455 ssize_t j; 456 int serrno = errno; 457 458 /* 459 * Compute page-size related variables. 460 */ 461 #ifdef _LIBC 462 malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE); 463 #else 464 malloc_pagesize = 4096; 465 #endif 466 malloc_pagemask = malloc_pagesize - 1; 467 for (malloc_pageshift = 0; 468 (1UL << malloc_pageshift) != malloc_pagesize; 469 malloc_pageshift++) 470 /* nothing */ ; 471 472 INIT_MMAP(); 473 474 #ifdef MALLOC_EXTRA_SANITY 475 malloc_junk = 1; 476 #endif /* MALLOC_EXTRA_SANITY */ 477 478 for (i = 0; i < 3; i++) { 479 if (i == 0) { 480 j = readlink("/etc/malloc.conf", b, sizeof b - 1); 481 if (j == -1) 482 continue; 483 b[j] = '\0'; 484 p = b; 485 #ifdef _LIBC 486 } else if (i == 1 && issetugid() == 0) { 487 p = getenv("MALLOC_OPTIONS"); 488 #endif 489 } else if (i == 1) { 490 continue; 491 } else { 492 p = _malloc_options; 493 } 494 for (; p != NULL && *p != '\0'; p++) { 495 switch (*p) { 496 case '>': malloc_cache <<= 1; break; 497 case '<': malloc_cache >>= 1; break; 498 case 'a': malloc_abort = 0; break; 499 case 'A': malloc_abort = 1; break; 500 case 'h': malloc_hint = 0; break; 501 case 'H': malloc_hint = 1; break; 502 case 'r': malloc_realloc = 0; break; 503 case 'R': malloc_realloc = 1; break; 504 case 'j': malloc_junk = 0; break; 505 case 'J': malloc_junk = 1; break; 506 #ifdef HAS_UTRACE 507 case 'u': malloc_utrace = 0; break; 508 case 'U': malloc_utrace = 1; break; 509 #endif 510 case 'v': malloc_sysv = 0; break; 511 case 'V': malloc_sysv = 1; break; 512 case 'x': malloc_xmalloc = 0; break; 513 case 'X': malloc_xmalloc = 1; break; 514 case 'z': malloc_zero = 0; break; 515 case 'Z': malloc_zero = 1; break; 516 default: 517 _malloc_message(getprogname(), malloc_func, 518 " warning: ", "unknown char in MALLOC_OPTIONS\n"); 519 break; 520 } 521 } 522 } 523 524 UTRACE(0, 0, 0); 525 526 /* 527 * We want junk in the entire allocation, and zero only in the part 528 * the user asked for. 529 */ 530 if (malloc_zero) 531 malloc_junk = 1; 532 533 /* Allocate one page for the page directory */ 534 page_dir = MMAP(malloc_pagesize); 535 536 if (page_dir == MAP_FAILED) 537 wrterror("mmap(2) failed, check limits.\n"); 538 539 /* 540 * We need a maximum of malloc_pageshift buckets, steal these from the 541 * front of the page_directory; 542 */ 543 malloc_origo = pageround((size_t)(uintptr_t)sbrk((intptr_t)0)) 544 >> malloc_pageshift; 545 malloc_origo -= malloc_pageshift; 546 547 malloc_ninfo = malloc_pagesize / sizeof *page_dir; 548 549 /* Recalculate the cache size in bytes, and make sure it's nonzero */ 550 551 if (!malloc_cache) 552 malloc_cache++; 553 554 malloc_cache <<= malloc_pageshift; 555 556 /* 557 * This is a nice hack from Kaleb Keithly (kaleb@x.org). 558 * We can sbrk(2) further back when we keep this on a low address. 559 */ 560 px = imalloc(sizeof *px); 561 562 errno = serrno; 563 } 564 565 /* 566 * Allocate a number of complete pages 567 */ 568 static void * 569 malloc_pages(size_t size) 570 { 571 void *p, *delay_free = NULL; 572 size_t i; 573 struct pgfree *pf; 574 size_t idx; 575 576 idx = pageround(size); 577 if (idx < size) { 578 errno = ENOMEM; 579 return NULL; 580 } else 581 size = idx; 582 583 p = NULL; 584 585 /* Look for free pages before asking for more */ 586 for(pf = free_list.next; pf; pf = pf->next) { 587 588 #ifdef MALLOC_EXTRA_SANITY 589 if (pf->size & malloc_pagemask) 590 wrterror("(ES): junk length entry on free_list.\n"); 591 if (!pf->size) 592 wrterror("(ES): zero length entry on free_list.\n"); 593 if (pf->page == pf->end) 594 wrterror("(ES): zero entry on free_list.\n"); 595 if (pf->page > pf->end) 596 wrterror("(ES): sick entry on free_list.\n"); 597 if ((void*)pf->page >= (void*)sbrk(0)) 598 wrterror("(ES): entry on free_list past brk.\n"); 599 if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE) 600 wrterror("(ES): non-free first page on free-list.\n"); 601 if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE) 602 wrterror("(ES): non-free last page on free-list.\n"); 603 #endif /* MALLOC_EXTRA_SANITY */ 604 605 if (pf->size < size) 606 continue; 607 608 if (pf->size == size) { 609 p = pf->page; 610 if (pf->next != NULL) 611 pf->next->prev = pf->prev; 612 pf->prev->next = pf->next; 613 delay_free = pf; 614 break; 615 } 616 617 p = pf->page; 618 pf->page = (char *)pf->page + size; 619 pf->size -= size; 620 break; 621 } 622 623 #ifdef MALLOC_EXTRA_SANITY 624 if (p != NULL && page_dir[ptr2idx(p)] != MALLOC_FREE) 625 wrterror("(ES): allocated non-free page on free-list.\n"); 626 #endif /* MALLOC_EXTRA_SANITY */ 627 628 size >>= malloc_pageshift; 629 630 /* Map new pages */ 631 if (p == NULL) 632 p = map_pages(size); 633 634 if (p != NULL) { 635 636 idx = ptr2idx(p); 637 page_dir[idx] = MALLOC_FIRST; 638 for (i=1;i<size;i++) 639 page_dir[idx+i] = MALLOC_FOLLOW; 640 641 if (malloc_junk) 642 memset(p, SOME_JUNK, size << malloc_pageshift); 643 } 644 645 if (delay_free) { 646 if (px == NULL) 647 px = delay_free; 648 else 649 ifree(delay_free); 650 } 651 652 return p; 653 } 654 655 /* 656 * Allocate a page of fragments 657 */ 658 659 static inline int 660 malloc_make_chunks(int bits) 661 { 662 struct pginfo *bp; 663 void *pp; 664 int i, k; 665 long l; 666 667 /* Allocate a new bucket */ 668 pp = malloc_pages(malloc_pagesize); 669 if (pp == NULL) 670 return 0; 671 672 /* Find length of admin structure */ 673 l = (long)offsetof(struct pginfo, bits[0]); 674 l += (long)sizeof bp->bits[0] * 675 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS); 676 677 /* Don't waste more than two chunks on this */ 678 if ((1<<(bits)) <= l+l) { 679 bp = (struct pginfo *)pp; 680 } else { 681 bp = imalloc((size_t)l); 682 if (bp == NULL) { 683 ifree(pp); 684 return 0; 685 } 686 } 687 688 bp->size = (1<<bits); 689 bp->shift = bits; 690 bp->total = bp->free = (u_short)(malloc_pagesize >> bits); 691 bp->page = pp; 692 693 /* set all valid bits in the bitmap */ 694 k = bp->total; 695 i = 0; 696 697 /* Do a bunch at a time */ 698 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS) 699 bp->bits[i / MALLOC_BITS] = ~0U; 700 701 for(; i < k; i++) 702 bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS); 703 704 if (bp == bp->page) { 705 /* Mark the ones we stole for ourselves */ 706 for(i = 0; l > 0; i++) { 707 bp->bits[i / MALLOC_BITS] &= ~(1 << (i % MALLOC_BITS)); 708 bp->free--; 709 bp->total--; 710 l -= (long)(1 << bits); 711 } 712 } 713 714 /* MALLOC_LOCK */ 715 716 page_dir[ptr2idx(pp)] = bp; 717 718 bp->next = page_dir[bits]; 719 page_dir[bits] = bp; 720 721 /* MALLOC_UNLOCK */ 722 723 return 1; 724 } 725 726 /* 727 * Allocate a fragment 728 */ 729 static void * 730 malloc_bytes(size_t size) 731 { 732 size_t i; 733 int j; 734 u_int u; 735 struct pginfo *bp; 736 size_t k; 737 u_int *lp; 738 739 /* Don't bother with anything less than this */ 740 if (size < malloc_minsize) 741 size = malloc_minsize; 742 743 744 /* Find the right bucket */ 745 j = 1; 746 i = size-1; 747 while (i >>= 1) 748 j++; 749 750 /* If it's empty, make a page more of that size chunks */ 751 if (page_dir[j] == NULL && !malloc_make_chunks(j)) 752 return NULL; 753 754 bp = page_dir[j]; 755 756 /* Find first word of bitmap which isn't empty */ 757 for (lp = bp->bits; !*lp; lp++) 758 ; 759 760 /* Find that bit, and tweak it */ 761 u = 1; 762 k = 0; 763 while (!(*lp & u)) { 764 u += u; 765 k++; 766 } 767 *lp ^= u; 768 769 /* If there are no more free, remove from free-list */ 770 if (!--bp->free) { 771 page_dir[j] = bp->next; 772 bp->next = NULL; 773 } 774 775 /* Adjust to the real offset of that chunk */ 776 k += (lp-bp->bits)*MALLOC_BITS; 777 k <<= bp->shift; 778 779 if (malloc_junk) 780 memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size); 781 782 return (u_char *)bp->page + k; 783 } 784 785 /* 786 * Allocate a piece of memory 787 */ 788 static void * 789 imalloc(size_t size) 790 { 791 void *result; 792 793 if (suicide) 794 abort(); 795 796 if ((size + malloc_pagesize) < size) /* Check for overflow */ 797 result = NULL; 798 else if ((size + malloc_pagesize) >= (uintptr_t)page_dir) 799 result = NULL; 800 else if (size <= malloc_maxsize) 801 result = malloc_bytes(size); 802 else 803 result = malloc_pages(size); 804 805 if (malloc_abort && result == NULL) 806 wrterror("allocation failed.\n"); 807 808 if (malloc_zero && result != NULL) 809 memset(result, 0, size); 810 811 return result; 812 } 813 814 /* 815 * Change the size of an allocation. 816 */ 817 static void * 818 irealloc(void *ptr, size_t size) 819 { 820 void *p; 821 size_t osize, idx; 822 struct pginfo **mp; 823 size_t i; 824 825 if (suicide) 826 abort(); 827 828 idx = ptr2idx(ptr); 829 830 if (idx < malloc_pageshift) { 831 wrtwarning("junk pointer, too low to make sense.\n"); 832 return 0; 833 } 834 835 if (idx > last_idx) { 836 wrtwarning("junk pointer, too high to make sense.\n"); 837 return 0; 838 } 839 840 mp = &page_dir[idx]; 841 842 if (*mp == MALLOC_FIRST) { /* Page allocation */ 843 844 /* Check the pointer */ 845 if ((size_t)(uintptr_t)ptr & malloc_pagemask) { 846 wrtwarning("modified (page-) pointer.\n"); 847 return NULL; 848 } 849 850 /* Find the size in bytes */ 851 for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;) 852 osize += malloc_pagesize; 853 854 if (!malloc_realloc && /* unless we have to, */ 855 size <= osize && /* .. or are too small, */ 856 size > (osize - malloc_pagesize)) { /* .. or can free a page, */ 857 if (malloc_junk) 858 memset((u_char *)ptr + size, SOME_JUNK, osize-size); 859 return ptr; /* don't do anything. */ 860 } 861 862 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */ 863 864 /* Check the pointer for sane values */ 865 if (((size_t)(uintptr_t)ptr & ((*mp)->size-1))) { 866 wrtwarning("modified (chunk-) pointer.\n"); 867 return NULL; 868 } 869 870 /* Find the chunk index in the page */ 871 i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> (*mp)->shift; 872 873 /* Verify that it isn't a free chunk already */ 874 if ((*mp)->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) { 875 wrtwarning("chunk is already free.\n"); 876 return NULL; 877 } 878 879 osize = (*mp)->size; 880 881 if (!malloc_realloc && /* Unless we have to, */ 882 size <= osize && /* ..or are too small, */ 883 (size > osize / 2 || /* ..or could use a smaller size, */ 884 osize == malloc_minsize)) { /* ..(if there is one) */ 885 if (malloc_junk) 886 memset((u_char *)ptr + size, SOME_JUNK, osize-size); 887 return ptr; /* ..Don't do anything */ 888 } 889 890 } else { 891 wrtwarning("pointer to wrong page.\n"); 892 return NULL; 893 } 894 895 p = imalloc(size); 896 897 if (p != NULL) { 898 /* copy the lesser of the two sizes, and free the old one */ 899 if (!size || !osize) 900 ; 901 else if (osize < size) 902 memcpy(p, ptr, osize); 903 else 904 memcpy(p, ptr, size); 905 ifree(ptr); 906 } 907 return p; 908 } 909 910 /* 911 * Free a sequence of pages 912 */ 913 914 static inline void 915 free_pages(void *ptr, size_t idx, struct pginfo *info) 916 { 917 size_t i; 918 struct pgfree *pf, *pt=NULL; 919 size_t l; 920 void *tail; 921 922 if (info == MALLOC_FREE) { 923 wrtwarning("page is already free.\n"); 924 return; 925 } 926 927 if (info != MALLOC_FIRST) { 928 wrtwarning("pointer to wrong page.\n"); 929 return; 930 } 931 932 if ((size_t)(uintptr_t)ptr & malloc_pagemask) { 933 wrtwarning("modified (page-) pointer.\n"); 934 return; 935 } 936 937 /* Count how many pages and mark them free at the same time */ 938 page_dir[idx] = MALLOC_FREE; 939 for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++) 940 page_dir[idx + i] = MALLOC_FREE; 941 942 l = i << malloc_pageshift; 943 944 if (malloc_junk) 945 memset(ptr, SOME_JUNK, l); 946 947 if (malloc_hint) 948 madvise(ptr, l, MADV_FREE); 949 950 tail = (char *)ptr+l; 951 952 /* add to free-list */ 953 if (px == NULL) 954 px = imalloc(sizeof *px); /* This cannot fail... */ 955 px->page = ptr; 956 px->end = tail; 957 px->size = l; 958 if (free_list.next == NULL) { 959 960 /* Nothing on free list, put this at head */ 961 px->next = free_list.next; 962 px->prev = &free_list; 963 free_list.next = px; 964 pf = px; 965 px = NULL; 966 967 } else { 968 969 /* Find the right spot, leave pf pointing to the modified entry. */ 970 tail = (char *)ptr+l; 971 972 for(pf = free_list.next; pf->end < ptr && pf->next != NULL; 973 pf = pf->next) 974 ; /* Race ahead here */ 975 976 if (pf->page > tail) { 977 /* Insert before entry */ 978 px->next = pf; 979 px->prev = pf->prev; 980 pf->prev = px; 981 px->prev->next = px; 982 pf = px; 983 px = NULL; 984 } else if (pf->end == ptr ) { 985 /* Append to the previous entry */ 986 pf->end = (char *)pf->end + l; 987 pf->size += l; 988 if (pf->next != NULL && pf->end == pf->next->page ) { 989 /* And collapse the next too. */ 990 pt = pf->next; 991 pf->end = pt->end; 992 pf->size += pt->size; 993 pf->next = pt->next; 994 if (pf->next != NULL) 995 pf->next->prev = pf; 996 } 997 } else if (pf->page == tail) { 998 /* Prepend to entry */ 999 pf->size += l; 1000 pf->page = ptr; 1001 } else if (pf->next == NULL) { 1002 /* Append at tail of chain */ 1003 px->next = NULL; 1004 px->prev = pf; 1005 pf->next = px; 1006 pf = px; 1007 px = NULL; 1008 } else { 1009 wrterror("freelist is destroyed.\n"); 1010 } 1011 } 1012 1013 /* Return something to OS ? */ 1014 if (pf->next == NULL && /* If we're the last one, */ 1015 pf->size > malloc_cache && /* ..and the cache is full, */ 1016 pf->end == malloc_brk && /* ..and none behind us, */ 1017 malloc_brk == sbrk((intptr_t)0)) { /* ..and it's OK to do... */ 1018 1019 /* 1020 * Keep the cache intact. Notice that the '>' above guarantees that 1021 * the pf will always have at least one page afterwards. 1022 */ 1023 pf->end = (char *)pf->page + malloc_cache; 1024 pf->size = malloc_cache; 1025 1026 brk(pf->end); 1027 malloc_brk = pf->end; 1028 1029 idx = ptr2idx(pf->end); 1030 1031 for(i=idx;i <= last_idx;) 1032 page_dir[i++] = MALLOC_NOT_MINE; 1033 1034 last_idx = idx - 1; 1035 1036 /* XXX: We could realloc/shrink the pagedir here I guess. */ 1037 } 1038 if (pt != NULL) 1039 ifree(pt); 1040 } 1041 1042 /* 1043 * Free a chunk, and possibly the page it's on, if the page becomes empty. 1044 */ 1045 1046 static inline void 1047 free_bytes(void *ptr, size_t idx, struct pginfo *info) 1048 { 1049 size_t i; 1050 struct pginfo **mp; 1051 void *vp; 1052 1053 /* Find the chunk number on the page */ 1054 i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> info->shift; 1055 1056 if (((size_t)(uintptr_t)ptr & (info->size-1))) { 1057 wrtwarning("modified (chunk-) pointer.\n"); 1058 return; 1059 } 1060 1061 if (info->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) { 1062 wrtwarning("chunk is already free.\n"); 1063 return; 1064 } 1065 1066 if (malloc_junk) 1067 memset(ptr, SOME_JUNK, (size_t)info->size); 1068 1069 info->bits[i/MALLOC_BITS] |= (u_int)(1UL << (i % MALLOC_BITS)); 1070 info->free++; 1071 1072 mp = page_dir + info->shift; 1073 1074 if (info->free == 1) { 1075 1076 /* Page became non-full */ 1077 1078 mp = page_dir + info->shift; 1079 /* Insert in address order */ 1080 while (*mp && (*mp)->next && (*mp)->next->page < info->page) 1081 mp = &(*mp)->next; 1082 info->next = *mp; 1083 *mp = info; 1084 return; 1085 } 1086 1087 if (info->free != info->total) 1088 return; 1089 1090 /* Find & remove this page in the queue */ 1091 while (*mp != info) { 1092 mp = &((*mp)->next); 1093 #ifdef MALLOC_EXTRA_SANITY 1094 if (!*mp) 1095 wrterror("(ES): Not on queue.\n"); 1096 #endif /* MALLOC_EXTRA_SANITY */ 1097 } 1098 *mp = info->next; 1099 1100 /* Free the page & the info structure if need be */ 1101 page_dir[idx] = MALLOC_FIRST; 1102 vp = info->page; /* Order is important ! */ 1103 if(vp != (void*)info) 1104 ifree(info); 1105 ifree(vp); 1106 } 1107 1108 static void 1109 ifree(void *ptr) 1110 { 1111 struct pginfo *info; 1112 size_t idx; 1113 1114 /* This is legal */ 1115 if (ptr == NULL) 1116 return; 1117 1118 /* If we're already sinking, don't make matters any worse. */ 1119 if (suicide) 1120 return; 1121 1122 idx = ptr2idx(ptr); 1123 1124 if (idx < malloc_pageshift) { 1125 wrtwarning("junk pointer, too low to make sense.\n"); 1126 return; 1127 } 1128 1129 if (idx > last_idx) { 1130 wrtwarning("junk pointer, too high to make sense.\n"); 1131 return; 1132 } 1133 1134 info = page_dir[idx]; 1135 1136 if (info < MALLOC_MAGIC) 1137 free_pages(ptr, idx, info); 1138 else 1139 free_bytes(ptr, idx, info); 1140 return; 1141 } 1142 1143 static int malloc_active; /* Recursion flag for public interface. */ 1144 static unsigned malloc_started; /* Set when initialization has been done */ 1145 1146 static void * 1147 pubrealloc(void *ptr, size_t size, const char *func) 1148 { 1149 void *r; 1150 int err = 0; 1151 1152 /* 1153 * If a thread is inside our code with a functional lock held, and then 1154 * catches a signal which calls us again, we would get a deadlock if the 1155 * lock is not of a recursive type. 1156 */ 1157 _MALLOC_LOCK(); 1158 malloc_func = func; 1159 if (malloc_active > 0) { 1160 if (malloc_active == 1) { 1161 wrtwarning("recursive call\n"); 1162 malloc_active = 2; 1163 } 1164 _MALLOC_UNLOCK(); 1165 errno = EINVAL; 1166 return (NULL); 1167 } 1168 malloc_active = 1; 1169 1170 if (!malloc_started) { 1171 if (ptr != NULL) { 1172 wrtwarning("malloc() has never been called\n"); 1173 malloc_active = 0; 1174 _MALLOC_UNLOCK(); 1175 errno = EINVAL; 1176 return (NULL); 1177 } 1178 malloc_init(); 1179 malloc_started = 1; 1180 } 1181 1182 if (ptr == ZEROSIZEPTR) 1183 ptr = NULL; 1184 if (malloc_sysv && !size) { 1185 if (ptr != NULL) 1186 ifree(ptr); 1187 r = NULL; 1188 } else if (!size) { 1189 if (ptr != NULL) 1190 ifree(ptr); 1191 r = ZEROSIZEPTR; 1192 } else if (ptr == NULL) { 1193 r = imalloc(size); 1194 err = (r == NULL); 1195 } else { 1196 r = irealloc(ptr, size); 1197 err = (r == NULL); 1198 } 1199 UTRACE(ptr, size, r); 1200 malloc_active = 0; 1201 _MALLOC_UNLOCK(); 1202 if (malloc_xmalloc && err) 1203 wrterror("out of memory\n"); 1204 if (err) 1205 errno = ENOMEM; 1206 return (r); 1207 } 1208 1209 /* 1210 * These are the public exported interface routines. 1211 */ 1212 1213 void * 1214 malloc(size_t size) 1215 { 1216 1217 return pubrealloc(NULL, size, " in malloc():"); 1218 } 1219 1220 int 1221 posix_memalign(void **memptr, size_t alignment, size_t size) 1222 { 1223 int err; 1224 void *result; 1225 1226 if (!malloc_started) { 1227 malloc_init(); 1228 malloc_started = 1; 1229 } 1230 /* Make sure that alignment is a large enough power of 2. */ 1231 if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *) || 1232 alignment > malloc_pagesize) 1233 return EINVAL; 1234 1235 /* 1236 * (size | alignment) is enough to assure the requested alignment, since 1237 * the allocator always allocates power-of-two blocks. 1238 */ 1239 err = errno; /* Protect errno against changes in pubrealloc(). */ 1240 result = pubrealloc(NULL, (size | alignment), " in posix_memalign()"); 1241 errno = err; 1242 1243 if (result == NULL) 1244 return ENOMEM; 1245 1246 *memptr = result; 1247 return 0; 1248 } 1249 1250 void * 1251 calloc(size_t num, size_t size) 1252 { 1253 void *ret; 1254 1255 if (size != 0 && (num * size) / size != num) { 1256 /* size_t overflow. */ 1257 errno = ENOMEM; 1258 return (NULL); 1259 } 1260 1261 ret = pubrealloc(NULL, num * size, " in calloc():"); 1262 1263 if (ret != NULL) 1264 memset(ret, 0, num * size); 1265 1266 return ret; 1267 } 1268 1269 void 1270 free(void *ptr) 1271 { 1272 1273 pubrealloc(ptr, 0, " in free():"); 1274 } 1275 1276 void * 1277 realloc(void *ptr, size_t size) 1278 { 1279 1280 return pubrealloc(ptr, size, " in realloc():"); 1281 } 1282 1283 /* 1284 * Begin library-private functions, used by threading libraries for protection 1285 * of malloc during fork(). These functions are only called if the program is 1286 * running in threaded mode, so there is no need to check whether the program 1287 * is threaded here. 1288 */ 1289 1290 void 1291 _malloc_prefork(void) 1292 { 1293 1294 _MALLOC_LOCK(); 1295 } 1296 1297 void 1298 _malloc_postfork(void) 1299 { 1300 1301 _MALLOC_UNLOCK(); 1302 } 1303