1 /* $OpenBSD: malloc.c,v 1.174 2015/04/06 09:18:51 tedu Exp $ */ 2 /* 3 * Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 5 * Copyright (c) 2008 Damien Miller <djm@openbsd.org> 6 * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* 22 * If we meet some day, and you think this stuff is worth it, you 23 * can buy me a beer in return. Poul-Henning Kamp 24 */ 25 26 /* #define MALLOC_STATS */ 27 28 #include <sys/types.h> 29 #include <sys/param.h> /* PAGE_SHIFT ALIGN */ 30 #include <sys/queue.h> 31 #include <sys/mman.h> 32 #include <sys/uio.h> 33 #include <errno.h> 34 #include <stdint.h> 35 #include <stdlib.h> 36 #include <string.h> 37 #include <stdio.h> 38 #include <unistd.h> 39 40 #ifdef MALLOC_STATS 41 #include <sys/tree.h> 42 #include <fcntl.h> 43 #endif 44 45 #include "thread_private.h" 46 47 #if defined(__sparc__) && !defined(__sparcv9__) 48 #define MALLOC_PAGESHIFT (13U) 49 #elif defined(__mips64__) 50 #define MALLOC_PAGESHIFT (14U) 51 #else 52 #define MALLOC_PAGESHIFT (PAGE_SHIFT) 53 #endif 54 55 #define MALLOC_MINSHIFT 4 56 #define MALLOC_MAXSHIFT (MALLOC_PAGESHIFT - 1) 57 #define MALLOC_PAGESIZE (1UL << MALLOC_PAGESHIFT) 58 #define MALLOC_MINSIZE (1UL << MALLOC_MINSHIFT) 59 #define MALLOC_PAGEMASK (MALLOC_PAGESIZE - 1) 60 #define MASK_POINTER(p) ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK)) 61 62 #define MALLOC_MAXCHUNK (1 << MALLOC_MAXSHIFT) 63 #define MALLOC_MAXCACHE 256 64 #define MALLOC_DELAYED_CHUNK_MASK 15 65 #define MALLOC_INITIAL_REGIONS 512 66 #define MALLOC_DEFAULT_CACHE 64 67 #define MALLOC_CHUNK_LISTS 4 68 69 /* 70 * When the P option is active, we move allocations between half a page 71 * and a whole page towards the end, subject to alignment constraints. 72 * This is the extra headroom we allow. Set to zero to be the most 73 * strict. 74 */ 75 #define MALLOC_LEEWAY 0 76 77 #define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK) 78 79 /* 80 * What to use for Junk. This is the byte value we use to fill with 81 * when the 'J' option is enabled. Use SOME_JUNK right after alloc, 82 * and SOME_FREEJUNK right before free. 83 */ 84 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */ 85 #define SOME_FREEJUNK 0xdf 86 87 #define MMAP(sz) mmap(NULL, (size_t)(sz), PROT_READ | PROT_WRITE, \ 88 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0) 89 90 #define MMAPA(a,sz) mmap((a), (size_t)(sz), PROT_READ | PROT_WRITE, \ 91 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0) 92 93 #define MQUERY(a, sz) mquery((a), (size_t)(sz), PROT_READ | PROT_WRITE, \ 94 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, (off_t)0) 95 96 #define _MALLOC_LEAVE() if (__isthreaded) do { \ 97 malloc_active--; \ 98 _MALLOC_UNLOCK(); \ 99 } while (0) 100 #define _MALLOC_ENTER() if (__isthreaded) do { \ 101 _MALLOC_LOCK(); \ 102 malloc_active++; \ 103 } while (0) 104 105 struct region_info { 106 void *p; /* page; low bits used to mark chunks */ 107 uintptr_t size; /* size for pages, or chunk_info pointer */ 108 #ifdef MALLOC_STATS 109 void *f; /* where allocated from */ 110 #endif 111 }; 112 113 LIST_HEAD(chunk_head, chunk_info); 114 115 struct dir_info { 116 u_int32_t canary1; 117 struct region_info *r; /* region slots */ 118 size_t regions_total; /* number of region slots */ 119 size_t regions_free; /* number of free slots */ 120 /* lists of free chunk info structs */ 121 struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1]; 122 /* lists of chunks with free slots */ 123 struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS]; 124 size_t free_regions_size; /* free pages cached */ 125 /* free pages cache */ 126 struct region_info free_regions[MALLOC_MAXCACHE]; 127 /* delayed free chunk slots */ 128 void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1]; 129 size_t rbytesused; /* random bytes used */ 130 u_char rbytes[32]; /* random bytes */ 131 u_short chunk_start; 132 #ifdef MALLOC_STATS 133 size_t inserts; 134 size_t insert_collisions; 135 size_t finds; 136 size_t find_collisions; 137 size_t deletes; 138 size_t delete_moves; 139 size_t cheap_realloc_tries; 140 size_t cheap_reallocs; 141 size_t malloc_used; /* bytes allocated */ 142 size_t malloc_guarded; /* bytes used for guards */ 143 #define STATS_ADD(x,y) ((x) += (y)) 144 #define STATS_SUB(x,y) ((x) -= (y)) 145 #define STATS_INC(x) ((x)++) 146 #define STATS_ZERO(x) ((x) = 0) 147 #define STATS_SETF(x,y) ((x)->f = (y)) 148 #else 149 #define STATS_ADD(x,y) /* nothing */ 150 #define STATS_SUB(x,y) /* nothing */ 151 #define STATS_INC(x) /* nothing */ 152 #define STATS_ZERO(x) /* nothing */ 153 #define STATS_SETF(x,y) /* nothing */ 154 #endif /* MALLOC_STATS */ 155 u_int32_t canary2; 156 }; 157 #define DIR_INFO_RSZ ((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \ 158 ~MALLOC_PAGEMASK) 159 160 /* 161 * This structure describes a page worth of chunks. 162 * 163 * How many bits per u_short in the bitmap 164 */ 165 #define MALLOC_BITS (NBBY * sizeof(u_short)) 166 struct chunk_info { 167 LIST_ENTRY(chunk_info) entries; 168 void *page; /* pointer to the page */ 169 u_int32_t canary; 170 u_short size; /* size of this page's chunks */ 171 u_short shift; /* how far to shift for this size */ 172 u_short free; /* how many free chunks */ 173 u_short total; /* how many chunk */ 174 /* which chunks are free */ 175 u_short bits[1]; 176 }; 177 178 struct malloc_readonly { 179 struct dir_info *malloc_pool; /* Main bookkeeping information */ 180 int malloc_abort; /* abort() on error */ 181 int malloc_freenow; /* Free quickly - disable chunk rnd */ 182 int malloc_freeunmap; /* mprotect free pages PROT_NONE? */ 183 int malloc_hint; /* call madvice on free pages? */ 184 int malloc_junk; /* junk fill? */ 185 int malloc_move; /* move allocations to end of page? */ 186 int malloc_realloc; /* always realloc? */ 187 int malloc_xmalloc; /* xmalloc behaviour? */ 188 size_t malloc_guard; /* use guard pages after allocations? */ 189 u_int malloc_cache; /* free pages we cache */ 190 #ifdef MALLOC_STATS 191 int malloc_stats; /* dump statistics at end */ 192 #endif 193 u_int32_t malloc_canary; /* Matched against ones in malloc_pool */ 194 }; 195 196 /* This object is mapped PROT_READ after initialisation to prevent tampering */ 197 static union { 198 struct malloc_readonly mopts; 199 u_char _pad[MALLOC_PAGESIZE]; 200 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE))); 201 #define mopts malloc_readonly.mopts 202 #define getpool() mopts.malloc_pool 203 204 char *malloc_options; /* compile-time options */ 205 static char *malloc_func; /* current function */ 206 static int malloc_active; /* status of malloc */ 207 208 static u_char getrbyte(struct dir_info *d); 209 210 extern char *__progname; 211 212 #ifdef MALLOC_STATS 213 void malloc_dump(int); 214 static void malloc_exit(void); 215 #define CALLER __builtin_return_address(0) 216 #else 217 #define CALLER NULL 218 #endif 219 220 /* low bits of r->p determine size: 0 means >= page size and p->size holding 221 * real size, otherwise r->size is a shift count, or 1 for malloc(0) 222 */ 223 #define REALSIZE(sz, r) \ 224 (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \ 225 (sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1)))) 226 227 static inline size_t 228 hash(void *p) 229 { 230 size_t sum; 231 uintptr_t u; 232 233 u = (uintptr_t)p >> MALLOC_PAGESHIFT; 234 sum = u; 235 sum = (sum << 7) - sum + (u >> 16); 236 #ifdef __LP64__ 237 sum = (sum << 7) - sum + (u >> 32); 238 sum = (sum << 7) - sum + (u >> 48); 239 #endif 240 return sum; 241 } 242 243 static void 244 wrterror(char *msg, void *p) 245 { 246 char *q = " error: "; 247 struct iovec iov[7]; 248 char pidbuf[20]; 249 char buf[20]; 250 int saved_errno = errno; 251 252 iov[0].iov_base = __progname; 253 iov[0].iov_len = strlen(__progname); 254 iov[1].iov_base = pidbuf; 255 snprintf(pidbuf, sizeof(pidbuf), "(%d) in ", getpid()); 256 iov[1].iov_len = strlen(pidbuf); 257 iov[2].iov_base = malloc_func; 258 iov[2].iov_len = strlen(malloc_func); 259 iov[3].iov_base = q; 260 iov[3].iov_len = strlen(q); 261 iov[4].iov_base = msg; 262 iov[4].iov_len = strlen(msg); 263 iov[5].iov_base = buf; 264 if (p == NULL) 265 iov[5].iov_len = 0; 266 else { 267 snprintf(buf, sizeof(buf), " %p", p); 268 iov[5].iov_len = strlen(buf); 269 } 270 iov[6].iov_base = "\n"; 271 iov[6].iov_len = 1; 272 writev(STDERR_FILENO, iov, 7); 273 274 #ifdef MALLOC_STATS 275 if (mopts.malloc_stats) 276 malloc_dump(STDERR_FILENO); 277 #endif /* MALLOC_STATS */ 278 279 errno = saved_errno; 280 if (mopts.malloc_abort) 281 abort(); 282 } 283 284 static void 285 rbytes_init(struct dir_info *d) 286 { 287 arc4random_buf(d->rbytes, sizeof(d->rbytes)); 288 /* add 1 to account for using d->rbytes[0] */ 289 d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2); 290 } 291 292 static inline u_char 293 getrbyte(struct dir_info *d) 294 { 295 u_char x; 296 297 if (d->rbytesused >= sizeof(d->rbytes)) 298 rbytes_init(d); 299 x = d->rbytes[d->rbytesused++]; 300 return x; 301 } 302 303 /* 304 * Cache maintenance. We keep at most malloc_cache pages cached. 305 * If the cache is becoming full, unmap pages in the cache for real, 306 * and then add the region to the cache 307 * Opposed to the regular region data structure, the sizes in the 308 * cache are in MALLOC_PAGESIZE units. 309 */ 310 static void 311 unmap(struct dir_info *d, void *p, size_t sz) 312 { 313 size_t psz = sz >> MALLOC_PAGESHIFT; 314 size_t rsz, tounmap; 315 struct region_info *r; 316 u_int i, offset; 317 318 if (sz != PAGEROUND(sz)) { 319 wrterror("munmap round", NULL); 320 return; 321 } 322 323 if (psz > mopts.malloc_cache) { 324 i = munmap(p, sz); 325 if (i) 326 wrterror("munmap", p); 327 STATS_SUB(d->malloc_used, sz); 328 return; 329 } 330 tounmap = 0; 331 rsz = mopts.malloc_cache - d->free_regions_size; 332 if (psz > rsz) 333 tounmap = psz - rsz; 334 offset = getrbyte(d); 335 for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) { 336 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 337 if (r->p != NULL) { 338 rsz = r->size << MALLOC_PAGESHIFT; 339 if (munmap(r->p, rsz)) 340 wrterror("munmap", r->p); 341 r->p = NULL; 342 if (tounmap > r->size) 343 tounmap -= r->size; 344 else 345 tounmap = 0; 346 d->free_regions_size -= r->size; 347 r->size = 0; 348 STATS_SUB(d->malloc_used, rsz); 349 } 350 } 351 if (tounmap > 0) 352 wrterror("malloc cache underflow", NULL); 353 for (i = 0; i < mopts.malloc_cache; i++) { 354 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 355 if (r->p == NULL) { 356 if (mopts.malloc_hint) 357 madvise(p, sz, MADV_FREE); 358 if (mopts.malloc_freeunmap) 359 mprotect(p, sz, PROT_NONE); 360 r->p = p; 361 r->size = psz; 362 d->free_regions_size += psz; 363 break; 364 } 365 } 366 if (i == mopts.malloc_cache) 367 wrterror("malloc free slot lost", NULL); 368 if (d->free_regions_size > mopts.malloc_cache) 369 wrterror("malloc cache overflow", NULL); 370 } 371 372 static void 373 zapcacheregion(struct dir_info *d, void *p, size_t len) 374 { 375 u_int i; 376 struct region_info *r; 377 size_t rsz; 378 379 for (i = 0; i < mopts.malloc_cache; i++) { 380 r = &d->free_regions[i]; 381 if (r->p >= p && r->p <= (void *)((char *)p + len)) { 382 rsz = r->size << MALLOC_PAGESHIFT; 383 if (munmap(r->p, rsz)) 384 wrterror("munmap", r->p); 385 r->p = NULL; 386 d->free_regions_size -= r->size; 387 r->size = 0; 388 STATS_SUB(d->malloc_used, rsz); 389 } 390 } 391 } 392 393 static void * 394 map(struct dir_info *d, void *hint, size_t sz, int zero_fill) 395 { 396 size_t psz = sz >> MALLOC_PAGESHIFT; 397 struct region_info *r, *big = NULL; 398 u_int i, offset; 399 void *p; 400 401 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 402 d->canary1 != ~d->canary2) 403 wrterror("internal struct corrupt", NULL); 404 if (sz != PAGEROUND(sz)) { 405 wrterror("map round", NULL); 406 return MAP_FAILED; 407 } 408 if (!hint && psz > d->free_regions_size) { 409 _MALLOC_LEAVE(); 410 p = MMAP(sz); 411 _MALLOC_ENTER(); 412 if (p != MAP_FAILED) 413 STATS_ADD(d->malloc_used, sz); 414 /* zero fill not needed */ 415 return p; 416 } 417 offset = getrbyte(d); 418 for (i = 0; i < mopts.malloc_cache; i++) { 419 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 420 if (r->p != NULL) { 421 if (hint && r->p != hint) 422 continue; 423 if (r->size == psz) { 424 p = r->p; 425 r->p = NULL; 426 r->size = 0; 427 d->free_regions_size -= psz; 428 if (mopts.malloc_freeunmap) 429 mprotect(p, sz, PROT_READ | PROT_WRITE); 430 if (mopts.malloc_hint) 431 madvise(p, sz, MADV_NORMAL); 432 if (zero_fill) 433 memset(p, 0, sz); 434 else if (mopts.malloc_junk == 2 && 435 mopts.malloc_freeunmap) 436 memset(p, SOME_FREEJUNK, sz); 437 return p; 438 } else if (r->size > psz) 439 big = r; 440 } 441 } 442 if (big != NULL) { 443 r = big; 444 p = r->p; 445 r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT); 446 if (mopts.malloc_freeunmap) 447 mprotect(p, sz, PROT_READ | PROT_WRITE); 448 if (mopts.malloc_hint) 449 madvise(p, sz, MADV_NORMAL); 450 r->size -= psz; 451 d->free_regions_size -= psz; 452 if (zero_fill) 453 memset(p, 0, sz); 454 else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap) 455 memset(p, SOME_FREEJUNK, sz); 456 return p; 457 } 458 if (hint) 459 return MAP_FAILED; 460 if (d->free_regions_size > mopts.malloc_cache) 461 wrterror("malloc cache", NULL); 462 _MALLOC_LEAVE(); 463 p = MMAP(sz); 464 _MALLOC_ENTER(); 465 if (p != MAP_FAILED) 466 STATS_ADD(d->malloc_used, sz); 467 /* zero fill not needed */ 468 return p; 469 } 470 471 /* 472 * Initialize a dir_info, which should have been cleared by caller 473 */ 474 static int 475 omalloc_init(struct dir_info **dp) 476 { 477 char *p, b[64]; 478 int i, j; 479 size_t d_avail, regioninfo_size; 480 struct dir_info *d; 481 482 /* 483 * Default options 484 */ 485 mopts.malloc_abort = 1; 486 mopts.malloc_junk = 1; 487 mopts.malloc_move = 1; 488 mopts.malloc_cache = MALLOC_DEFAULT_CACHE; 489 490 for (i = 0; i < 3; i++) { 491 switch (i) { 492 case 0: 493 j = readlink("/etc/malloc.conf", b, sizeof b - 1); 494 if (j <= 0) 495 continue; 496 b[j] = '\0'; 497 p = b; 498 break; 499 case 1: 500 if (issetugid() == 0) 501 p = getenv("MALLOC_OPTIONS"); 502 else 503 continue; 504 break; 505 case 2: 506 p = malloc_options; 507 break; 508 default: 509 p = NULL; 510 } 511 512 for (; p != NULL && *p != '\0'; p++) { 513 switch (*p) { 514 case '>': 515 mopts.malloc_cache <<= 1; 516 if (mopts.malloc_cache > MALLOC_MAXCACHE) 517 mopts.malloc_cache = MALLOC_MAXCACHE; 518 break; 519 case '<': 520 mopts.malloc_cache >>= 1; 521 break; 522 case 'a': 523 mopts.malloc_abort = 0; 524 break; 525 case 'A': 526 mopts.malloc_abort = 1; 527 break; 528 #ifdef MALLOC_STATS 529 case 'd': 530 mopts.malloc_stats = 0; 531 break; 532 case 'D': 533 mopts.malloc_stats = 1; 534 break; 535 #endif /* MALLOC_STATS */ 536 case 'f': 537 mopts.malloc_freenow = 0; 538 mopts.malloc_freeunmap = 0; 539 break; 540 case 'F': 541 mopts.malloc_freenow = 1; 542 mopts.malloc_freeunmap = 1; 543 break; 544 case 'g': 545 mopts.malloc_guard = 0; 546 break; 547 case 'G': 548 mopts.malloc_guard = MALLOC_PAGESIZE; 549 break; 550 case 'h': 551 mopts.malloc_hint = 0; 552 break; 553 case 'H': 554 mopts.malloc_hint = 1; 555 break; 556 case 'j': 557 mopts.malloc_junk = 0; 558 break; 559 case 'J': 560 mopts.malloc_junk = 2; 561 break; 562 case 'n': 563 case 'N': 564 break; 565 case 'p': 566 mopts.malloc_move = 0; 567 break; 568 case 'P': 569 mopts.malloc_move = 1; 570 break; 571 case 'r': 572 mopts.malloc_realloc = 0; 573 break; 574 case 'R': 575 mopts.malloc_realloc = 1; 576 break; 577 case 's': 578 mopts.malloc_freeunmap = mopts.malloc_junk = 0; 579 mopts.malloc_guard = 0; 580 mopts.malloc_cache = MALLOC_DEFAULT_CACHE; 581 break; 582 case 'S': 583 mopts.malloc_freeunmap = 1; 584 mopts.malloc_junk = 2; 585 mopts.malloc_guard = MALLOC_PAGESIZE; 586 mopts.malloc_cache = 0; 587 break; 588 case 'u': 589 mopts.malloc_freeunmap = 0; 590 break; 591 case 'U': 592 mopts.malloc_freeunmap = 1; 593 break; 594 case 'x': 595 mopts.malloc_xmalloc = 0; 596 break; 597 case 'X': 598 mopts.malloc_xmalloc = 1; 599 break; 600 default: { 601 static const char q[] = "malloc() warning: " 602 "unknown char in MALLOC_OPTIONS\n"; 603 write(STDERR_FILENO, q, sizeof(q) - 1); 604 break; 605 } 606 } 607 } 608 } 609 610 #ifdef MALLOC_STATS 611 if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) { 612 static const char q[] = "malloc() warning: atexit(2) failed." 613 " Will not be able to dump stats on exit\n"; 614 write(STDERR_FILENO, q, sizeof(q) - 1); 615 } 616 #endif /* MALLOC_STATS */ 617 618 while ((mopts.malloc_canary = arc4random()) == 0) 619 ; 620 621 /* 622 * Allocate dir_info with a guard page on either side. Also 623 * randomise offset inside the page at which the dir_info 624 * lies (subject to alignment by 1 << MALLOC_MINSHIFT) 625 */ 626 if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED) 627 return -1; 628 mprotect(p, MALLOC_PAGESIZE, PROT_NONE); 629 mprotect(p + MALLOC_PAGESIZE + DIR_INFO_RSZ, 630 MALLOC_PAGESIZE, PROT_NONE); 631 d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT; 632 d = (struct dir_info *)(p + MALLOC_PAGESIZE + 633 (arc4random_uniform(d_avail) << MALLOC_MINSHIFT)); 634 635 rbytes_init(d); 636 d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS; 637 regioninfo_size = d->regions_total * sizeof(struct region_info); 638 d->r = MMAP(regioninfo_size); 639 if (d->r == MAP_FAILED) { 640 wrterror("malloc init mmap failed", NULL); 641 d->regions_total = 0; 642 return 1; 643 } 644 for (i = 0; i <= MALLOC_MAXSHIFT; i++) { 645 LIST_INIT(&d->chunk_info_list[i]); 646 for (j = 0; j < MALLOC_CHUNK_LISTS; j++) 647 LIST_INIT(&d->chunk_dir[i][j]); 648 } 649 STATS_ADD(d->malloc_used, regioninfo_size); 650 d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d; 651 d->canary2 = ~d->canary1; 652 653 *dp = d; 654 655 /* 656 * Options have been set and will never be reset. 657 * Prevent further tampering with them. 658 */ 659 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) 660 mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ); 661 662 return 0; 663 } 664 665 static int 666 omalloc_grow(struct dir_info *d) 667 { 668 size_t newtotal; 669 size_t newsize; 670 size_t mask; 671 size_t i; 672 struct region_info *p; 673 674 if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 ) 675 return 1; 676 677 newtotal = d->regions_total * 2; 678 newsize = newtotal * sizeof(struct region_info); 679 mask = newtotal - 1; 680 681 p = MMAP(newsize); 682 if (p == MAP_FAILED) 683 return 1; 684 685 STATS_ADD(d->malloc_used, newsize); 686 memset(p, 0, newsize); 687 STATS_ZERO(d->inserts); 688 STATS_ZERO(d->insert_collisions); 689 for (i = 0; i < d->regions_total; i++) { 690 void *q = d->r[i].p; 691 if (q != NULL) { 692 size_t index = hash(q) & mask; 693 STATS_INC(d->inserts); 694 while (p[index].p != NULL) { 695 index = (index - 1) & mask; 696 STATS_INC(d->insert_collisions); 697 } 698 p[index] = d->r[i]; 699 } 700 } 701 /* avoid pages containing meta info to end up in cache */ 702 if (munmap(d->r, d->regions_total * sizeof(struct region_info))) 703 wrterror("munmap", d->r); 704 else 705 STATS_SUB(d->malloc_used, 706 d->regions_total * sizeof(struct region_info)); 707 d->regions_free = d->regions_free + d->regions_total; 708 d->regions_total = newtotal; 709 d->r = p; 710 return 0; 711 } 712 713 static struct chunk_info * 714 alloc_chunk_info(struct dir_info *d, int bits) 715 { 716 struct chunk_info *p; 717 size_t size, count; 718 719 if (bits == 0) 720 count = MALLOC_PAGESIZE / MALLOC_MINSIZE; 721 else 722 count = MALLOC_PAGESIZE >> bits; 723 724 size = howmany(count, MALLOC_BITS); 725 size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short); 726 size = ALIGN(size); 727 728 if (LIST_EMPTY(&d->chunk_info_list[bits])) { 729 char *q; 730 int i; 731 732 q = MMAP(MALLOC_PAGESIZE); 733 if (q == MAP_FAILED) 734 return NULL; 735 STATS_ADD(d->malloc_used, MALLOC_PAGESIZE); 736 count = MALLOC_PAGESIZE / size; 737 for (i = 0; i < count; i++, q += size) 738 LIST_INSERT_HEAD(&d->chunk_info_list[bits], 739 (struct chunk_info *)q, entries); 740 } 741 p = LIST_FIRST(&d->chunk_info_list[bits]); 742 LIST_REMOVE(p, entries); 743 memset(p, 0, size); 744 p->canary = d->canary1; 745 return p; 746 } 747 748 749 /* 750 * The hashtable uses the assumption that p is never NULL. This holds since 751 * non-MAP_FIXED mappings with hint 0 start at BRKSIZ. 752 */ 753 static int 754 insert(struct dir_info *d, void *p, size_t sz, void *f) 755 { 756 size_t index; 757 size_t mask; 758 void *q; 759 760 if (d->regions_free * 4 < d->regions_total) { 761 if (omalloc_grow(d)) 762 return 1; 763 } 764 mask = d->regions_total - 1; 765 index = hash(p) & mask; 766 q = d->r[index].p; 767 STATS_INC(d->inserts); 768 while (q != NULL) { 769 index = (index - 1) & mask; 770 q = d->r[index].p; 771 STATS_INC(d->insert_collisions); 772 } 773 d->r[index].p = p; 774 d->r[index].size = sz; 775 #ifdef MALLOC_STATS 776 d->r[index].f = f; 777 #endif 778 d->regions_free--; 779 return 0; 780 } 781 782 static struct region_info * 783 find(struct dir_info *d, void *p) 784 { 785 size_t index; 786 size_t mask = d->regions_total - 1; 787 void *q, *r; 788 789 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 790 d->canary1 != ~d->canary2) 791 wrterror("internal struct corrupt", NULL); 792 p = MASK_POINTER(p); 793 index = hash(p) & mask; 794 r = d->r[index].p; 795 q = MASK_POINTER(r); 796 STATS_INC(d->finds); 797 while (q != p && r != NULL) { 798 index = (index - 1) & mask; 799 r = d->r[index].p; 800 q = MASK_POINTER(r); 801 STATS_INC(d->find_collisions); 802 } 803 return (q == p && r != NULL) ? &d->r[index] : NULL; 804 } 805 806 static void 807 delete(struct dir_info *d, struct region_info *ri) 808 { 809 /* algorithm R, Knuth Vol III section 6.4 */ 810 size_t mask = d->regions_total - 1; 811 size_t i, j, r; 812 813 if (d->regions_total & (d->regions_total - 1)) 814 wrterror("regions_total not 2^x", NULL); 815 d->regions_free++; 816 STATS_INC(getpool()->deletes); 817 818 i = ri - d->r; 819 for (;;) { 820 d->r[i].p = NULL; 821 d->r[i].size = 0; 822 j = i; 823 for (;;) { 824 i = (i - 1) & mask; 825 if (d->r[i].p == NULL) 826 return; 827 r = hash(d->r[i].p) & mask; 828 if ((i <= r && r < j) || (r < j && j < i) || 829 (j < i && i <= r)) 830 continue; 831 d->r[j] = d->r[i]; 832 STATS_INC(getpool()->delete_moves); 833 break; 834 } 835 836 } 837 } 838 839 /* 840 * Allocate a page of chunks 841 */ 842 static struct chunk_info * 843 omalloc_make_chunks(struct dir_info *d, int bits, int listnum) 844 { 845 struct chunk_info *bp; 846 void *pp; 847 int i, k; 848 849 /* Allocate a new bucket */ 850 pp = map(d, NULL, MALLOC_PAGESIZE, 0); 851 if (pp == MAP_FAILED) 852 return NULL; 853 854 bp = alloc_chunk_info(d, bits); 855 if (bp == NULL) { 856 unmap(d, pp, MALLOC_PAGESIZE); 857 return NULL; 858 } 859 860 /* memory protect the page allocated in the malloc(0) case */ 861 if (bits == 0) { 862 bp->size = 0; 863 bp->shift = 1; 864 i = MALLOC_MINSIZE - 1; 865 while (i >>= 1) 866 bp->shift++; 867 bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift; 868 bp->page = pp; 869 870 k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE); 871 if (k < 0) { 872 unmap(d, pp, MALLOC_PAGESIZE); 873 LIST_INSERT_HEAD(&d->chunk_info_list[0], bp, entries); 874 return NULL; 875 } 876 } else { 877 bp->size = 1U << bits; 878 bp->shift = bits; 879 bp->total = bp->free = MALLOC_PAGESIZE >> bits; 880 bp->page = pp; 881 } 882 883 /* set all valid bits in the bitmap */ 884 k = bp->total; 885 i = 0; 886 887 /* Do a bunch at a time */ 888 for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS) 889 bp->bits[i / MALLOC_BITS] = (u_short)~0U; 890 891 for (; i < k; i++) 892 bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS); 893 894 LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries); 895 896 bits++; 897 if ((uintptr_t)pp & bits) 898 wrterror("pp & bits", pp); 899 900 insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL); 901 return bp; 902 } 903 904 905 /* 906 * Allocate a chunk 907 */ 908 static void * 909 malloc_bytes(struct dir_info *d, size_t size, void *f) 910 { 911 int i, j, listnum; 912 size_t k; 913 u_short u, *lp; 914 struct chunk_info *bp; 915 916 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 917 d->canary1 != ~d->canary2) 918 wrterror("internal struct corrupt", NULL); 919 /* Don't bother with anything less than this */ 920 /* unless we have a malloc(0) requests */ 921 if (size != 0 && size < MALLOC_MINSIZE) 922 size = MALLOC_MINSIZE; 923 924 /* Find the right bucket */ 925 if (size == 0) 926 j = 0; 927 else { 928 j = MALLOC_MINSHIFT; 929 i = (size - 1) >> (MALLOC_MINSHIFT - 1); 930 while (i >>= 1) 931 j++; 932 } 933 934 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; 935 /* If it's empty, make a page more of that size chunks */ 936 if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) { 937 bp = omalloc_make_chunks(d, j, listnum); 938 if (bp == NULL) 939 return NULL; 940 } 941 942 if (bp->canary != d->canary1) 943 wrterror("chunk info corrupted", NULL); 944 945 i = d->chunk_start; 946 if (bp->free > 1) 947 i += getrbyte(d); 948 if (i >= bp->total) 949 i &= bp->total - 1; 950 for (;;) { 951 for (;;) { 952 lp = &bp->bits[i / MALLOC_BITS]; 953 if (!*lp) { 954 i += MALLOC_BITS; 955 i &= ~(MALLOC_BITS - 1); 956 if (i >= bp->total) 957 i = 0; 958 } else 959 break; 960 } 961 k = i % MALLOC_BITS; 962 u = 1 << k; 963 if (*lp & u) 964 break; 965 if (++i >= bp->total) 966 i = 0; 967 } 968 d->chunk_start += i + 1; 969 #ifdef MALLOC_STATS 970 if (i == 0) { 971 struct region_info *r = find(d, bp->page); 972 r->f = f; 973 } 974 #endif 975 976 *lp ^= u; 977 978 /* If there are no more free, remove from free-list */ 979 if (!--bp->free) 980 LIST_REMOVE(bp, entries); 981 982 /* Adjust to the real offset of that chunk */ 983 k += (lp - bp->bits) * MALLOC_BITS; 984 k <<= bp->shift; 985 986 if (mopts.malloc_junk == 2 && bp->size > 0) 987 memset((char *)bp->page + k, SOME_JUNK, bp->size); 988 return ((char *)bp->page + k); 989 } 990 991 static uint32_t 992 find_chunknum(struct dir_info *d, struct region_info *r, void *ptr) 993 { 994 struct chunk_info *info; 995 uint32_t chunknum; 996 997 info = (struct chunk_info *)r->size; 998 if (info->canary != d->canary1) 999 wrterror("chunk info corrupted", NULL); 1000 1001 /* Find the chunk number on the page */ 1002 chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift; 1003 1004 if ((uintptr_t)ptr & ((1U << (info->shift)) - 1)) { 1005 wrterror("modified chunk-pointer", ptr); 1006 return -1; 1007 } 1008 if (info->bits[chunknum / MALLOC_BITS] & 1009 (1U << (chunknum % MALLOC_BITS))) { 1010 wrterror("chunk is already free", ptr); 1011 return -1; 1012 } 1013 return chunknum; 1014 } 1015 1016 /* 1017 * Free a chunk, and possibly the page it's on, if the page becomes empty. 1018 */ 1019 static void 1020 free_bytes(struct dir_info *d, struct region_info *r, void *ptr) 1021 { 1022 struct chunk_head *mp; 1023 struct chunk_info *info; 1024 uint32_t chunknum; 1025 int listnum; 1026 1027 info = (struct chunk_info *)r->size; 1028 if ((chunknum = find_chunknum(d, r, ptr)) == -1) 1029 return; 1030 1031 info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS); 1032 info->free++; 1033 1034 if (info->free == 1) { 1035 /* Page became non-full */ 1036 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; 1037 if (info->size != 0) 1038 mp = &d->chunk_dir[info->shift][listnum]; 1039 else 1040 mp = &d->chunk_dir[0][listnum]; 1041 1042 LIST_INSERT_HEAD(mp, info, entries); 1043 return; 1044 } 1045 1046 if (info->free != info->total) 1047 return; 1048 1049 LIST_REMOVE(info, entries); 1050 1051 if (info->size == 0 && !mopts.malloc_freeunmap) 1052 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE); 1053 unmap(d, info->page, MALLOC_PAGESIZE); 1054 1055 delete(d, r); 1056 if (info->size != 0) 1057 mp = &d->chunk_info_list[info->shift]; 1058 else 1059 mp = &d->chunk_info_list[0]; 1060 LIST_INSERT_HEAD(mp, info, entries); 1061 } 1062 1063 1064 1065 static void * 1066 omalloc(size_t sz, int zero_fill, void *f) 1067 { 1068 struct dir_info *pool = getpool(); 1069 void *p; 1070 size_t psz; 1071 1072 if (sz > MALLOC_MAXCHUNK) { 1073 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1074 errno = ENOMEM; 1075 return NULL; 1076 } 1077 sz += mopts.malloc_guard; 1078 psz = PAGEROUND(sz); 1079 p = map(pool, NULL, psz, zero_fill); 1080 if (p == MAP_FAILED) { 1081 errno = ENOMEM; 1082 return NULL; 1083 } 1084 if (insert(pool, p, sz, f)) { 1085 unmap(pool, p, psz); 1086 errno = ENOMEM; 1087 return NULL; 1088 } 1089 if (mopts.malloc_guard) { 1090 if (mprotect((char *)p + psz - mopts.malloc_guard, 1091 mopts.malloc_guard, PROT_NONE)) 1092 wrterror("mprotect", NULL); 1093 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 1094 } 1095 1096 if (mopts.malloc_move && 1097 sz - mopts.malloc_guard < MALLOC_PAGESIZE - 1098 MALLOC_LEEWAY) { 1099 /* fill whole allocation */ 1100 if (mopts.malloc_junk == 2) 1101 memset(p, SOME_JUNK, psz - mopts.malloc_guard); 1102 /* shift towards the end */ 1103 p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY - 1104 (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1)); 1105 /* fill zeros if needed and overwritten above */ 1106 if (zero_fill && mopts.malloc_junk == 2) 1107 memset(p, 0, sz - mopts.malloc_guard); 1108 } else { 1109 if (mopts.malloc_junk == 2) { 1110 if (zero_fill) 1111 memset((char *)p + sz - mopts.malloc_guard, 1112 SOME_JUNK, psz - sz); 1113 else 1114 memset(p, SOME_JUNK, 1115 psz - mopts.malloc_guard); 1116 } 1117 } 1118 1119 } else { 1120 /* takes care of SOME_JUNK */ 1121 p = malloc_bytes(pool, sz, f); 1122 if (zero_fill && p != NULL && sz > 0) 1123 memset(p, 0, sz); 1124 } 1125 1126 return p; 1127 } 1128 1129 /* 1130 * Common function for handling recursion. Only 1131 * print the error message once, to avoid making the problem 1132 * potentially worse. 1133 */ 1134 static void 1135 malloc_recurse(void) 1136 { 1137 static int noprint; 1138 1139 if (noprint == 0) { 1140 noprint = 1; 1141 wrterror("recursive call", NULL); 1142 } 1143 malloc_active--; 1144 _MALLOC_UNLOCK(); 1145 errno = EDEADLK; 1146 } 1147 1148 static int 1149 malloc_init(void) 1150 { 1151 if (omalloc_init(&mopts.malloc_pool)) { 1152 _MALLOC_UNLOCK(); 1153 if (mopts.malloc_xmalloc) 1154 wrterror("out of memory", NULL); 1155 errno = ENOMEM; 1156 return -1; 1157 } 1158 return 0; 1159 } 1160 1161 void * 1162 malloc(size_t size) 1163 { 1164 void *r; 1165 int saved_errno = errno; 1166 1167 _MALLOC_LOCK(); 1168 malloc_func = "malloc():"; 1169 if (getpool() == NULL) { 1170 if (malloc_init() != 0) 1171 return NULL; 1172 } 1173 1174 if (malloc_active++) { 1175 malloc_recurse(); 1176 return NULL; 1177 } 1178 r = omalloc(size, 0, CALLER); 1179 malloc_active--; 1180 _MALLOC_UNLOCK(); 1181 if (r == NULL && mopts.malloc_xmalloc) { 1182 wrterror("out of memory", NULL); 1183 errno = ENOMEM; 1184 } 1185 if (r != NULL) 1186 errno = saved_errno; 1187 return r; 1188 } 1189 1190 static void 1191 ofree(void *p) 1192 { 1193 struct dir_info *pool = getpool(); 1194 struct region_info *r; 1195 size_t sz; 1196 1197 r = find(pool, p); 1198 if (r == NULL) { 1199 wrterror("bogus pointer (double free?)", p); 1200 return; 1201 } 1202 REALSIZE(sz, r); 1203 if (sz > MALLOC_MAXCHUNK) { 1204 if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE - 1205 MALLOC_LEEWAY) { 1206 if (r->p != p) { 1207 wrterror("bogus pointer", p); 1208 return; 1209 } 1210 } else { 1211 #if notyetbecause_of_realloc 1212 /* shifted towards the end */ 1213 if (p != ((char *)r->p) + ((MALLOC_PAGESIZE - 1214 MALLOC_MINSIZE - sz - mopts.malloc_guard) & 1215 ~(MALLOC_MINSIZE-1))) { 1216 } 1217 #endif 1218 p = r->p; 1219 } 1220 if (mopts.malloc_guard) { 1221 if (sz < mopts.malloc_guard) 1222 wrterror("guard size", NULL); 1223 if (!mopts.malloc_freeunmap) { 1224 if (mprotect((char *)p + PAGEROUND(sz) - 1225 mopts.malloc_guard, mopts.malloc_guard, 1226 PROT_READ | PROT_WRITE)) 1227 wrterror("mprotect", NULL); 1228 } 1229 STATS_SUB(pool->malloc_guarded, mopts.malloc_guard); 1230 } 1231 if (mopts.malloc_junk && !mopts.malloc_freeunmap) { 1232 size_t amt = mopts.malloc_junk == 1 ? MALLOC_MAXCHUNK : 1233 PAGEROUND(sz) - mopts.malloc_guard; 1234 memset(p, SOME_FREEJUNK, amt); 1235 } 1236 unmap(pool, p, PAGEROUND(sz)); 1237 delete(pool, r); 1238 } else { 1239 void *tmp; 1240 int i; 1241 1242 if (mopts.malloc_junk && sz > 0) 1243 memset(p, SOME_FREEJUNK, sz); 1244 if (!mopts.malloc_freenow) { 1245 if (find_chunknum(pool, r, p) == -1) 1246 return; 1247 i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK; 1248 tmp = p; 1249 p = pool->delayed_chunks[i]; 1250 if (tmp == p) { 1251 wrterror("double free", p); 1252 return; 1253 } 1254 pool->delayed_chunks[i] = tmp; 1255 } 1256 if (p != NULL) { 1257 r = find(pool, p); 1258 if (r == NULL) { 1259 wrterror("bogus pointer (double free?)", p); 1260 return; 1261 } 1262 free_bytes(pool, r, p); 1263 } 1264 } 1265 } 1266 1267 void 1268 free(void *ptr) 1269 { 1270 int saved_errno = errno; 1271 1272 /* This is legal. */ 1273 if (ptr == NULL) 1274 return; 1275 1276 _MALLOC_LOCK(); 1277 malloc_func = "free():"; 1278 if (getpool() == NULL) { 1279 _MALLOC_UNLOCK(); 1280 wrterror("free() called before allocation", NULL); 1281 return; 1282 } 1283 if (malloc_active++) { 1284 malloc_recurse(); 1285 return; 1286 } 1287 ofree(ptr); 1288 malloc_active--; 1289 _MALLOC_UNLOCK(); 1290 errno = saved_errno; 1291 } 1292 1293 1294 static void * 1295 orealloc(void *p, size_t newsz, void *f) 1296 { 1297 struct dir_info *pool = getpool(); 1298 struct region_info *r; 1299 size_t oldsz, goldsz, gnewsz; 1300 void *q; 1301 1302 if (p == NULL) 1303 return omalloc(newsz, 0, f); 1304 1305 r = find(pool, p); 1306 if (r == NULL) { 1307 wrterror("bogus pointer (double free?)", p); 1308 return NULL; 1309 } 1310 if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1311 errno = ENOMEM; 1312 return NULL; 1313 } 1314 1315 REALSIZE(oldsz, r); 1316 goldsz = oldsz; 1317 if (oldsz > MALLOC_MAXCHUNK) { 1318 if (oldsz < mopts.malloc_guard) 1319 wrterror("guard size", NULL); 1320 oldsz -= mopts.malloc_guard; 1321 } 1322 1323 gnewsz = newsz; 1324 if (gnewsz > MALLOC_MAXCHUNK) 1325 gnewsz += mopts.malloc_guard; 1326 1327 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p && 1328 !mopts.malloc_realloc) { 1329 size_t roldsz = PAGEROUND(goldsz); 1330 size_t rnewsz = PAGEROUND(gnewsz); 1331 1332 if (rnewsz > roldsz) { 1333 if (!mopts.malloc_guard) { 1334 void *hint = (char *)p + roldsz; 1335 size_t needed = rnewsz - roldsz; 1336 1337 STATS_INC(pool->cheap_realloc_tries); 1338 q = map(pool, hint, needed, 0); 1339 if (q == hint) 1340 goto gotit; 1341 zapcacheregion(pool, hint, needed); 1342 q = MQUERY(hint, needed); 1343 if (q == hint) 1344 q = MMAPA(hint, needed); 1345 else 1346 q = MAP_FAILED; 1347 if (q == hint) { 1348 gotit: 1349 STATS_ADD(pool->malloc_used, needed); 1350 if (mopts.malloc_junk == 2) 1351 memset(q, SOME_JUNK, needed); 1352 r->size = newsz; 1353 STATS_SETF(r, f); 1354 STATS_INC(pool->cheap_reallocs); 1355 return p; 1356 } else if (q != MAP_FAILED) { 1357 if (munmap(q, needed)) 1358 wrterror("munmap", q); 1359 } 1360 } 1361 } else if (rnewsz < roldsz) { 1362 if (mopts.malloc_guard) { 1363 if (mprotect((char *)p + roldsz - 1364 mopts.malloc_guard, mopts.malloc_guard, 1365 PROT_READ | PROT_WRITE)) 1366 wrterror("mprotect", NULL); 1367 if (mprotect((char *)p + rnewsz - 1368 mopts.malloc_guard, mopts.malloc_guard, 1369 PROT_NONE)) 1370 wrterror("mprotect", NULL); 1371 } 1372 unmap(pool, (char *)p + rnewsz, roldsz - rnewsz); 1373 r->size = gnewsz; 1374 STATS_SETF(r, f); 1375 return p; 1376 } else { 1377 if (newsz > oldsz && mopts.malloc_junk == 2) 1378 memset((char *)p + newsz, SOME_JUNK, 1379 rnewsz - mopts.malloc_guard - newsz); 1380 r->size = gnewsz; 1381 STATS_SETF(r, f); 1382 return p; 1383 } 1384 } 1385 if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) { 1386 if (mopts.malloc_junk == 2 && newsz > 0) 1387 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz); 1388 STATS_SETF(r, f); 1389 return p; 1390 } else if (newsz != oldsz || mopts.malloc_realloc) { 1391 q = omalloc(newsz, 0, f); 1392 if (q == NULL) 1393 return NULL; 1394 if (newsz != 0 && oldsz != 0) 1395 memcpy(q, p, oldsz < newsz ? oldsz : newsz); 1396 ofree(p); 1397 return q; 1398 } else { 1399 STATS_SETF(r, f); 1400 return p; 1401 } 1402 } 1403 1404 void * 1405 realloc(void *ptr, size_t size) 1406 { 1407 void *r; 1408 int saved_errno = errno; 1409 1410 _MALLOC_LOCK(); 1411 malloc_func = "realloc():"; 1412 if (getpool() == NULL) { 1413 if (malloc_init() != 0) 1414 return NULL; 1415 } 1416 if (malloc_active++) { 1417 malloc_recurse(); 1418 return NULL; 1419 } 1420 r = orealloc(ptr, size, CALLER); 1421 1422 malloc_active--; 1423 _MALLOC_UNLOCK(); 1424 if (r == NULL && mopts.malloc_xmalloc) { 1425 wrterror("out of memory", NULL); 1426 errno = ENOMEM; 1427 } 1428 if (r != NULL) 1429 errno = saved_errno; 1430 return r; 1431 } 1432 1433 1434 /* 1435 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX 1436 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW 1437 */ 1438 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4)) 1439 1440 void * 1441 calloc(size_t nmemb, size_t size) 1442 { 1443 void *r; 1444 int saved_errno = errno; 1445 1446 _MALLOC_LOCK(); 1447 malloc_func = "calloc():"; 1448 if (getpool() == NULL) { 1449 if (malloc_init() != 0) 1450 return NULL; 1451 } 1452 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1453 nmemb > 0 && SIZE_MAX / nmemb < size) { 1454 _MALLOC_UNLOCK(); 1455 if (mopts.malloc_xmalloc) 1456 wrterror("out of memory", NULL); 1457 errno = ENOMEM; 1458 return NULL; 1459 } 1460 1461 if (malloc_active++) { 1462 malloc_recurse(); 1463 return NULL; 1464 } 1465 1466 size *= nmemb; 1467 r = omalloc(size, 1, CALLER); 1468 1469 malloc_active--; 1470 _MALLOC_UNLOCK(); 1471 if (r == NULL && mopts.malloc_xmalloc) { 1472 wrterror("out of memory", NULL); 1473 errno = ENOMEM; 1474 } 1475 if (r != NULL) 1476 errno = saved_errno; 1477 return r; 1478 } 1479 1480 static void * 1481 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill) 1482 { 1483 char *p, *q; 1484 1485 if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0) { 1486 wrterror("mapalign bad alignment", NULL); 1487 return MAP_FAILED; 1488 } 1489 if (sz != PAGEROUND(sz)) { 1490 wrterror("mapalign round", NULL); 1491 return MAP_FAILED; 1492 } 1493 1494 /* Allocate sz + alignment bytes of memory, which must include a 1495 * subrange of size bytes that is properly aligned. Unmap the 1496 * other bytes, and then return that subrange. 1497 */ 1498 1499 /* We need sz + alignment to fit into a size_t. */ 1500 if (alignment > SIZE_MAX - sz) 1501 return MAP_FAILED; 1502 1503 p = map(d, NULL, sz + alignment, zero_fill); 1504 if (p == MAP_FAILED) 1505 return MAP_FAILED; 1506 q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1)); 1507 if (q != p) { 1508 if (munmap(p, q - p)) 1509 wrterror("munmap", p); 1510 } 1511 if (munmap(q + sz, alignment - (q - p))) 1512 wrterror("munmap", q + sz); 1513 STATS_SUB(d->malloc_used, alignment); 1514 1515 return q; 1516 } 1517 1518 static void * 1519 omemalign(size_t alignment, size_t sz, int zero_fill, void *f) 1520 { 1521 struct dir_info *pool = getpool(); 1522 size_t psz; 1523 void *p; 1524 1525 if (alignment <= MALLOC_PAGESIZE) { 1526 /* 1527 * max(size, alignment) is enough to assure the requested alignment, 1528 * since the allocator always allocates power-of-two blocks. 1529 */ 1530 if (sz < alignment) 1531 sz = alignment; 1532 return omalloc(sz, zero_fill, f); 1533 } 1534 1535 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1536 errno = ENOMEM; 1537 return NULL; 1538 } 1539 1540 sz += mopts.malloc_guard; 1541 psz = PAGEROUND(sz); 1542 1543 p = mapalign(pool, alignment, psz, zero_fill); 1544 if (p == NULL) { 1545 errno = ENOMEM; 1546 return NULL; 1547 } 1548 1549 if (insert(pool, p, sz, f)) { 1550 unmap(pool, p, psz); 1551 errno = ENOMEM; 1552 return NULL; 1553 } 1554 1555 if (mopts.malloc_guard) { 1556 if (mprotect((char *)p + psz - mopts.malloc_guard, 1557 mopts.malloc_guard, PROT_NONE)) 1558 wrterror("mprotect", NULL); 1559 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 1560 } 1561 1562 if (mopts.malloc_junk == 2) { 1563 if (zero_fill) 1564 memset((char *)p + sz - mopts.malloc_guard, 1565 SOME_JUNK, psz - sz); 1566 else 1567 memset(p, SOME_JUNK, psz - mopts.malloc_guard); 1568 } 1569 1570 return p; 1571 } 1572 1573 int 1574 posix_memalign(void **memptr, size_t alignment, size_t size) 1575 { 1576 int res, saved_errno = errno; 1577 void *r; 1578 1579 /* Make sure that alignment is a large enough power of 2. */ 1580 if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *)) 1581 return EINVAL; 1582 1583 _MALLOC_LOCK(); 1584 malloc_func = "posix_memalign():"; 1585 if (getpool() == NULL) { 1586 if (malloc_init() != 0) 1587 goto err; 1588 } 1589 if (malloc_active++) { 1590 malloc_recurse(); 1591 goto err; 1592 } 1593 r = omemalign(alignment, size, 0, CALLER); 1594 malloc_active--; 1595 _MALLOC_UNLOCK(); 1596 if (r == NULL) { 1597 if (mopts.malloc_xmalloc) { 1598 wrterror("out of memory", NULL); 1599 errno = ENOMEM; 1600 } 1601 goto err; 1602 } 1603 errno = saved_errno; 1604 *memptr = r; 1605 return 0; 1606 1607 err: 1608 res = errno; 1609 errno = saved_errno; 1610 return res; 1611 } 1612 1613 #ifdef MALLOC_STATS 1614 1615 struct malloc_leak { 1616 void (*f)(); 1617 size_t total_size; 1618 int count; 1619 }; 1620 1621 struct leaknode { 1622 RB_ENTRY(leaknode) entry; 1623 struct malloc_leak d; 1624 }; 1625 1626 static int 1627 leakcmp(struct leaknode *e1, struct leaknode *e2) 1628 { 1629 return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f; 1630 } 1631 1632 static RB_HEAD(leaktree, leaknode) leakhead; 1633 RB_GENERATE_STATIC(leaktree, leaknode, entry, leakcmp) 1634 1635 static void 1636 putleakinfo(void *f, size_t sz, int cnt) 1637 { 1638 struct leaknode key, *p; 1639 static struct leaknode *page; 1640 static int used; 1641 1642 if (cnt == 0) 1643 return; 1644 1645 key.d.f = f; 1646 p = RB_FIND(leaktree, &leakhead, &key); 1647 if (p == NULL) { 1648 if (page == NULL || 1649 used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) { 1650 page = MMAP(MALLOC_PAGESIZE); 1651 if (page == MAP_FAILED) 1652 return; 1653 used = 0; 1654 } 1655 p = &page[used++]; 1656 p->d.f = f; 1657 p->d.total_size = sz * cnt; 1658 p->d.count = cnt; 1659 RB_INSERT(leaktree, &leakhead, p); 1660 } else { 1661 p->d.total_size += sz * cnt; 1662 p->d.count += cnt; 1663 } 1664 } 1665 1666 static struct malloc_leak *malloc_leaks; 1667 1668 static void 1669 dump_leaks(int fd) 1670 { 1671 struct leaknode *p; 1672 char buf[64]; 1673 int i = 0; 1674 1675 snprintf(buf, sizeof(buf), "Leak report\n"); 1676 write(fd, buf, strlen(buf)); 1677 snprintf(buf, sizeof(buf), " f sum # avg\n"); 1678 write(fd, buf, strlen(buf)); 1679 /* XXX only one page of summary */ 1680 if (malloc_leaks == NULL) 1681 malloc_leaks = MMAP(MALLOC_PAGESIZE); 1682 if (malloc_leaks != MAP_FAILED) 1683 memset(malloc_leaks, 0, MALLOC_PAGESIZE); 1684 RB_FOREACH(p, leaktree, &leakhead) { 1685 snprintf(buf, sizeof(buf), "%18p %7zu %6u %6zu\n", p->d.f, 1686 p->d.total_size, p->d.count, p->d.total_size / p->d.count); 1687 write(fd, buf, strlen(buf)); 1688 if (malloc_leaks == MAP_FAILED || 1689 i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak)) 1690 continue; 1691 malloc_leaks[i].f = p->d.f; 1692 malloc_leaks[i].total_size = p->d.total_size; 1693 malloc_leaks[i].count = p->d.count; 1694 i++; 1695 } 1696 } 1697 1698 static void 1699 dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist) 1700 { 1701 char buf[64]; 1702 1703 while (p != NULL) { 1704 snprintf(buf, sizeof(buf), "chunk %18p %18p %4d %d/%d\n", 1705 p->page, ((p->bits[0] & 1) ? NULL : f), 1706 p->size, p->free, p->total); 1707 write(fd, buf, strlen(buf)); 1708 if (!fromfreelist) { 1709 if (p->bits[0] & 1) 1710 putleakinfo(NULL, p->size, p->total - p->free); 1711 else { 1712 putleakinfo(f, p->size, 1); 1713 putleakinfo(NULL, p->size, 1714 p->total - p->free - 1); 1715 } 1716 break; 1717 } 1718 p = LIST_NEXT(p, entries); 1719 if (p != NULL) { 1720 snprintf(buf, sizeof(buf), " "); 1721 write(fd, buf, strlen(buf)); 1722 } 1723 } 1724 } 1725 1726 static void 1727 dump_free_chunk_info(int fd, struct dir_info *d) 1728 { 1729 char buf[64]; 1730 int i, j, count; 1731 struct chunk_info *p; 1732 1733 snprintf(buf, sizeof(buf), "Free chunk structs:\n"); 1734 write(fd, buf, strlen(buf)); 1735 for (i = 0; i <= MALLOC_MAXSHIFT; i++) { 1736 count = 0; 1737 LIST_FOREACH(p, &d->chunk_info_list[i], entries) 1738 count++; 1739 for (j = 0; j < MALLOC_CHUNK_LISTS; j++) { 1740 p = LIST_FIRST(&d->chunk_dir[i][j]); 1741 if (p == NULL && count == 0) 1742 continue; 1743 snprintf(buf, sizeof(buf), "%2d) %3d ", i, count); 1744 write(fd, buf, strlen(buf)); 1745 if (p != NULL) 1746 dump_chunk(fd, p, NULL, 1); 1747 else 1748 write(fd, "\n", 1); 1749 } 1750 } 1751 1752 } 1753 1754 static void 1755 dump_free_page_info(int fd, struct dir_info *d) 1756 { 1757 char buf[64]; 1758 int i; 1759 1760 snprintf(buf, sizeof(buf), "Free pages cached: %zu\n", 1761 d->free_regions_size); 1762 write(fd, buf, strlen(buf)); 1763 for (i = 0; i < mopts.malloc_cache; i++) { 1764 if (d->free_regions[i].p != NULL) { 1765 snprintf(buf, sizeof(buf), "%2d) ", i); 1766 write(fd, buf, strlen(buf)); 1767 snprintf(buf, sizeof(buf), "free at %p: %zu\n", 1768 d->free_regions[i].p, d->free_regions[i].size); 1769 write(fd, buf, strlen(buf)); 1770 } 1771 } 1772 } 1773 1774 static void 1775 malloc_dump1(int fd, struct dir_info *d) 1776 { 1777 char buf[100]; 1778 size_t i, realsize; 1779 1780 snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d); 1781 write(fd, buf, strlen(buf)); 1782 if (d == NULL) 1783 return; 1784 snprintf(buf, sizeof(buf), "Region slots free %zu/%zu\n", 1785 d->regions_free, d->regions_total); 1786 write(fd, buf, strlen(buf)); 1787 snprintf(buf, sizeof(buf), "Finds %zu/%zu\n", d->finds, 1788 d->find_collisions); 1789 write(fd, buf, strlen(buf)); 1790 snprintf(buf, sizeof(buf), "Inserts %zu/%zu\n", d->inserts, 1791 d->insert_collisions); 1792 write(fd, buf, strlen(buf)); 1793 snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes, 1794 d->delete_moves); 1795 write(fd, buf, strlen(buf)); 1796 snprintf(buf, sizeof(buf), "Cheap reallocs %zu/%zu\n", 1797 d->cheap_reallocs, d->cheap_realloc_tries); 1798 write(fd, buf, strlen(buf)); 1799 dump_free_chunk_info(fd, d); 1800 dump_free_page_info(fd, d); 1801 snprintf(buf, sizeof(buf), 1802 "slot) hash d type page f size [free/n]\n"); 1803 write(fd, buf, strlen(buf)); 1804 for (i = 0; i < d->regions_total; i++) { 1805 if (d->r[i].p != NULL) { 1806 size_t h = hash(d->r[i].p) & 1807 (d->regions_total - 1); 1808 snprintf(buf, sizeof(buf), "%4zx) #%4zx %zd ", 1809 i, h, h - i); 1810 write(fd, buf, strlen(buf)); 1811 REALSIZE(realsize, &d->r[i]); 1812 if (realsize > MALLOC_MAXCHUNK) { 1813 putleakinfo(d->r[i].f, realsize, 1); 1814 snprintf(buf, sizeof(buf), 1815 "pages %12p %12p %zu\n", d->r[i].p, 1816 d->r[i].f, realsize); 1817 write(fd, buf, strlen(buf)); 1818 } else 1819 dump_chunk(fd, 1820 (struct chunk_info *)d->r[i].size, 1821 d->r[i].f, 0); 1822 } 1823 } 1824 snprintf(buf, sizeof(buf), "In use %zu\n", d->malloc_used); 1825 write(fd, buf, strlen(buf)); 1826 snprintf(buf, sizeof(buf), "Guarded %zu\n", d->malloc_guarded); 1827 write(fd, buf, strlen(buf)); 1828 dump_leaks(fd); 1829 write(fd, "\n", 1); 1830 } 1831 1832 void 1833 malloc_dump(int fd) 1834 { 1835 struct dir_info *pool = getpool(); 1836 int i; 1837 void *p; 1838 struct region_info *r; 1839 int saved_errno = errno; 1840 1841 for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) { 1842 p = pool->delayed_chunks[i]; 1843 if (p == NULL) 1844 continue; 1845 r = find(pool, p); 1846 if (r == NULL) 1847 wrterror("bogus pointer in malloc_dump", p); 1848 free_bytes(pool, r, p); 1849 pool->delayed_chunks[i] = NULL; 1850 } 1851 /* XXX leak when run multiple times */ 1852 RB_INIT(&leakhead); 1853 malloc_dump1(fd, pool); 1854 errno = saved_errno; 1855 } 1856 1857 static void 1858 malloc_exit(void) 1859 { 1860 static const char q[] = "malloc() warning: Couldn't dump stats\n"; 1861 int save_errno = errno, fd; 1862 1863 fd = open("malloc.out", O_RDWR|O_APPEND); 1864 if (fd != -1) { 1865 malloc_dump(fd); 1866 close(fd); 1867 } else 1868 write(STDERR_FILENO, q, sizeof(q) - 1); 1869 errno = save_errno; 1870 } 1871 1872 #endif /* MALLOC_STATS */ 1873