1 /* $OpenBSD: malloc.c,v 1.214 2017/02/02 10:35:34 otto Exp $ */ 2 /* 3 * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 5 * Copyright (c) 2008 Damien Miller <djm@openbsd.org> 6 * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* 22 * If we meet some day, and you think this stuff is worth it, you 23 * can buy me a beer in return. Poul-Henning Kamp 24 */ 25 26 /* #define MALLOC_STATS */ 27 28 #include <sys/types.h> 29 #include <sys/param.h> /* PAGE_SHIFT ALIGN */ 30 #include <sys/queue.h> 31 #include <sys/mman.h> 32 #include <sys/uio.h> 33 #include <errno.h> 34 #include <stdarg.h> 35 #include <stdint.h> 36 #include <stdlib.h> 37 #include <string.h> 38 #include <stdio.h> 39 #include <unistd.h> 40 41 #ifdef MALLOC_STATS 42 #include <sys/tree.h> 43 #include <fcntl.h> 44 #endif 45 46 #include "thread_private.h" 47 #include <tib.h> 48 49 #if defined(__mips64__) 50 #define MALLOC_PAGESHIFT (14U) 51 #else 52 #define MALLOC_PAGESHIFT (PAGE_SHIFT) 53 #endif 54 55 #define MALLOC_MINSHIFT 4 56 #define MALLOC_MAXSHIFT (MALLOC_PAGESHIFT - 1) 57 #define MALLOC_PAGESIZE (1UL << MALLOC_PAGESHIFT) 58 #define MALLOC_MINSIZE (1UL << MALLOC_MINSHIFT) 59 #define MALLOC_PAGEMASK (MALLOC_PAGESIZE - 1) 60 #define MASK_POINTER(p) ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK)) 61 62 #define MALLOC_MAXCHUNK (1 << MALLOC_MAXSHIFT) 63 #define MALLOC_MAXCACHE 256 64 #define MALLOC_DELAYED_CHUNK_MASK 15 65 #define MALLOC_INITIAL_REGIONS 512 66 #define MALLOC_DEFAULT_CACHE 64 67 #define MALLOC_CHUNK_LISTS 4 68 #define CHUNK_CHECK_LENGTH 32 69 70 /* 71 * We move allocations between half a page and a whole page towards the end, 72 * subject to alignment constraints. This is the extra headroom we allow. 73 * Set to zero to be the most strict. 74 */ 75 #define MALLOC_LEEWAY 0 76 #define MALLOC_MOVE_COND(sz) ((sz) - mopts.malloc_guard < \ 77 MALLOC_PAGESIZE - MALLOC_LEEWAY) 78 #define MALLOC_MOVE(p, sz) (((char *)(p)) + \ 79 ((MALLOC_PAGESIZE - MALLOC_LEEWAY - \ 80 ((sz) - mopts.malloc_guard)) & \ 81 ~(MALLOC_MINSIZE - 1))) 82 83 #define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK) 84 85 /* 86 * What to use for Junk. This is the byte value we use to fill with 87 * when the 'J' option is enabled. Use SOME_JUNK right after alloc, 88 * and SOME_FREEJUNK right before free. 89 */ 90 #define SOME_JUNK 0xdb /* deadbeef */ 91 #define SOME_FREEJUNK 0xdf /* dead, free */ 92 93 #define MMAP(sz) mmap(NULL, (sz), PROT_READ | PROT_WRITE, \ 94 MAP_ANON | MAP_PRIVATE, -1, 0) 95 96 #define MMAPA(a,sz) mmap((a), (sz), PROT_READ | PROT_WRITE, \ 97 MAP_ANON | MAP_PRIVATE, -1, 0) 98 99 #define MQUERY(a, sz) mquery((a), (sz), PROT_READ | PROT_WRITE, \ 100 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0) 101 102 struct region_info { 103 void *p; /* page; low bits used to mark chunks */ 104 uintptr_t size; /* size for pages, or chunk_info pointer */ 105 #ifdef MALLOC_STATS 106 void *f; /* where allocated from */ 107 #endif 108 }; 109 110 LIST_HEAD(chunk_head, chunk_info); 111 112 struct dir_info { 113 u_int32_t canary1; 114 int active; /* status of malloc */ 115 struct region_info *r; /* region slots */ 116 size_t regions_total; /* number of region slots */ 117 size_t regions_free; /* number of free slots */ 118 /* lists of free chunk info structs */ 119 struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1]; 120 /* lists of chunks with free slots */ 121 struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS]; 122 size_t free_regions_size; /* free pages cached */ 123 /* free pages cache */ 124 struct region_info free_regions[MALLOC_MAXCACHE]; 125 /* delayed free chunk slots */ 126 void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1]; 127 size_t rbytesused; /* random bytes used */ 128 char *func; /* current function */ 129 int mutex; 130 u_char rbytes[32]; /* random bytes */ 131 u_short chunk_start; 132 #ifdef MALLOC_STATS 133 size_t inserts; 134 size_t insert_collisions; 135 size_t finds; 136 size_t find_collisions; 137 size_t deletes; 138 size_t delete_moves; 139 size_t cheap_realloc_tries; 140 size_t cheap_reallocs; 141 size_t malloc_used; /* bytes allocated */ 142 size_t malloc_guarded; /* bytes used for guards */ 143 #define STATS_ADD(x,y) ((x) += (y)) 144 #define STATS_SUB(x,y) ((x) -= (y)) 145 #define STATS_INC(x) ((x)++) 146 #define STATS_ZERO(x) ((x) = 0) 147 #define STATS_SETF(x,y) ((x)->f = (y)) 148 #else 149 #define STATS_ADD(x,y) /* nothing */ 150 #define STATS_SUB(x,y) /* nothing */ 151 #define STATS_INC(x) /* nothing */ 152 #define STATS_ZERO(x) /* nothing */ 153 #define STATS_SETF(x,y) /* nothing */ 154 #endif /* MALLOC_STATS */ 155 u_int32_t canary2; 156 }; 157 #define DIR_INFO_RSZ ((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \ 158 ~MALLOC_PAGEMASK) 159 160 /* 161 * This structure describes a page worth of chunks. 162 * 163 * How many bits per u_short in the bitmap 164 */ 165 #define MALLOC_BITS (NBBY * sizeof(u_short)) 166 struct chunk_info { 167 LIST_ENTRY(chunk_info) entries; 168 void *page; /* pointer to the page */ 169 u_int32_t canary; 170 u_short size; /* size of this page's chunks */ 171 u_short shift; /* how far to shift for this size */ 172 u_short free; /* how many free chunks */ 173 u_short total; /* how many chunks */ 174 u_short offset; /* requested size table offset */ 175 /* which chunks are free */ 176 u_short bits[1]; 177 }; 178 179 struct malloc_readonly { 180 struct dir_info *malloc_pool[_MALLOC_MUTEXES]; /* Main bookkeeping information */ 181 int malloc_mt; /* multi-threaded mode? */ 182 int malloc_freenow; /* Free quickly - disable chunk rnd */ 183 int malloc_freeunmap; /* mprotect free pages PROT_NONE? */ 184 int malloc_junk; /* junk fill? */ 185 int malloc_realloc; /* always realloc? */ 186 int malloc_xmalloc; /* xmalloc behaviour? */ 187 int chunk_canaries; /* use canaries after chunks? */ 188 u_int malloc_cache; /* free pages we cache */ 189 size_t malloc_guard; /* use guard pages after allocations? */ 190 #ifdef MALLOC_STATS 191 int malloc_stats; /* dump statistics at end */ 192 #endif 193 u_int32_t malloc_canary; /* Matched against ones in malloc_pool */ 194 }; 195 196 /* This object is mapped PROT_READ after initialisation to prevent tampering */ 197 static union { 198 struct malloc_readonly mopts; 199 u_char _pad[MALLOC_PAGESIZE]; 200 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE))); 201 #define mopts malloc_readonly.mopts 202 203 char *malloc_options; /* compile-time options */ 204 205 static u_char getrbyte(struct dir_info *d); 206 static __dead void wrterror(struct dir_info *d, char *msg, ...) 207 __attribute__((__format__ (printf, 2, 3))); 208 static void fill_canary(char *ptr, size_t sz, size_t allocated); 209 210 #ifdef MALLOC_STATS 211 void malloc_dump(int, int, struct dir_info *); 212 PROTO_NORMAL(malloc_dump); 213 static void malloc_exit(void); 214 #define CALLER __builtin_return_address(0) 215 #else 216 #define CALLER NULL 217 #endif 218 219 /* low bits of r->p determine size: 0 means >= page size and r->size holding 220 * real size, otherwise low bits are a shift count, or 1 for malloc(0) 221 */ 222 #define REALSIZE(sz, r) \ 223 (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \ 224 (sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1)))) 225 226 static inline void 227 _MALLOC_LEAVE(struct dir_info *d) 228 { 229 if (mopts.malloc_mt) { 230 d->active--; 231 _MALLOC_UNLOCK(d->mutex); 232 } 233 } 234 235 static inline void 236 _MALLOC_ENTER(struct dir_info *d) 237 { 238 if (mopts.malloc_mt) { 239 _MALLOC_LOCK(d->mutex); 240 d->active++; 241 } 242 } 243 244 static inline size_t 245 hash(void *p) 246 { 247 size_t sum; 248 uintptr_t u; 249 250 u = (uintptr_t)p >> MALLOC_PAGESHIFT; 251 sum = u; 252 sum = (sum << 7) - sum + (u >> 16); 253 #ifdef __LP64__ 254 sum = (sum << 7) - sum + (u >> 32); 255 sum = (sum << 7) - sum + (u >> 48); 256 #endif 257 return sum; 258 } 259 260 static inline 261 struct dir_info *getpool(void) 262 { 263 if (!mopts.malloc_mt) 264 return mopts.malloc_pool[0]; 265 else 266 return mopts.malloc_pool[TIB_GET()->tib_tid & 267 (_MALLOC_MUTEXES - 1)]; 268 } 269 270 static __dead void 271 wrterror(struct dir_info *d, char *msg, ...) 272 { 273 struct iovec iov[3]; 274 char pidbuf[80]; 275 char buf[80]; 276 int saved_errno = errno; 277 va_list ap; 278 279 iov[0].iov_base = pidbuf; 280 snprintf(pidbuf, sizeof(pidbuf), "%s(%d) in %s(): ", __progname, 281 getpid(), d->func ? d->func : "unknown"); 282 iov[0].iov_len = strlen(pidbuf); 283 iov[1].iov_base = buf; 284 va_start(ap, msg); 285 vsnprintf(buf, sizeof(buf), msg, ap); 286 va_end(ap); 287 iov[1].iov_len = strlen(buf); 288 iov[2].iov_base = "\n"; 289 iov[2].iov_len = 1; 290 writev(STDERR_FILENO, iov, 3); 291 292 #ifdef MALLOC_STATS 293 if (mopts.malloc_stats) { 294 int i; 295 296 for (i = 0; i < _MALLOC_MUTEXES; i++) 297 malloc_dump(STDERR_FILENO, i, mopts.malloc_pool[i]); 298 } 299 #endif /* MALLOC_STATS */ 300 301 errno = saved_errno; 302 303 abort(); 304 } 305 306 static void 307 rbytes_init(struct dir_info *d) 308 { 309 arc4random_buf(d->rbytes, sizeof(d->rbytes)); 310 /* add 1 to account for using d->rbytes[0] */ 311 d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2); 312 } 313 314 static inline u_char 315 getrbyte(struct dir_info *d) 316 { 317 u_char x; 318 319 if (d->rbytesused >= sizeof(d->rbytes)) 320 rbytes_init(d); 321 x = d->rbytes[d->rbytesused++]; 322 return x; 323 } 324 325 /* 326 * Cache maintenance. We keep at most malloc_cache pages cached. 327 * If the cache is becoming full, unmap pages in the cache for real, 328 * and then add the region to the cache 329 * Opposed to the regular region data structure, the sizes in the 330 * cache are in MALLOC_PAGESIZE units. 331 */ 332 static void 333 unmap(struct dir_info *d, void *p, size_t sz) 334 { 335 size_t psz = sz >> MALLOC_PAGESHIFT; 336 size_t rsz, tounmap; 337 struct region_info *r; 338 u_int i, offset; 339 340 if (sz != PAGEROUND(sz)) 341 wrterror(d, "munmap round"); 342 343 if (psz > mopts.malloc_cache) { 344 i = munmap(p, sz); 345 if (i) 346 wrterror(d, "munmap %p", p); 347 STATS_SUB(d->malloc_used, sz); 348 return; 349 } 350 tounmap = 0; 351 rsz = mopts.malloc_cache - d->free_regions_size; 352 if (psz > rsz) 353 tounmap = psz - rsz; 354 offset = getrbyte(d); 355 for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) { 356 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 357 if (r->p != NULL) { 358 rsz = r->size << MALLOC_PAGESHIFT; 359 if (munmap(r->p, rsz)) 360 wrterror(d, "munmap %p", r->p); 361 r->p = NULL; 362 if (tounmap > r->size) 363 tounmap -= r->size; 364 else 365 tounmap = 0; 366 d->free_regions_size -= r->size; 367 r->size = 0; 368 STATS_SUB(d->malloc_used, rsz); 369 } 370 } 371 if (tounmap > 0) 372 wrterror(d, "malloc cache underflow"); 373 for (i = 0; i < mopts.malloc_cache; i++) { 374 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 375 if (r->p == NULL) { 376 if (mopts.malloc_junk && !mopts.malloc_freeunmap) { 377 size_t amt = mopts.malloc_junk == 1 ? 378 MALLOC_MAXCHUNK : sz; 379 memset(p, SOME_FREEJUNK, amt); 380 } 381 if (mopts.malloc_freeunmap) 382 mprotect(p, sz, PROT_NONE); 383 r->p = p; 384 r->size = psz; 385 d->free_regions_size += psz; 386 break; 387 } 388 } 389 if (i == mopts.malloc_cache) 390 wrterror(d, "malloc free slot lost"); 391 if (d->free_regions_size > mopts.malloc_cache) 392 wrterror(d, "malloc cache overflow"); 393 } 394 395 static void 396 zapcacheregion(struct dir_info *d, void *p, size_t len) 397 { 398 u_int i; 399 struct region_info *r; 400 size_t rsz; 401 402 for (i = 0; i < mopts.malloc_cache; i++) { 403 r = &d->free_regions[i]; 404 if (r->p >= p && r->p <= (void *)((char *)p + len)) { 405 rsz = r->size << MALLOC_PAGESHIFT; 406 if (munmap(r->p, rsz)) 407 wrterror(d, "munmap %p", r->p); 408 r->p = NULL; 409 d->free_regions_size -= r->size; 410 r->size = 0; 411 STATS_SUB(d->malloc_used, rsz); 412 } 413 } 414 } 415 416 static void * 417 map(struct dir_info *d, void *hint, size_t sz, int zero_fill) 418 { 419 size_t psz = sz >> MALLOC_PAGESHIFT; 420 struct region_info *r, *big = NULL; 421 u_int i, offset; 422 void *p; 423 424 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 425 d->canary1 != ~d->canary2) 426 wrterror(d, "internal struct corrupt"); 427 if (sz != PAGEROUND(sz)) 428 wrterror(d, "map round"); 429 430 if (!hint && psz > d->free_regions_size) { 431 _MALLOC_LEAVE(d); 432 p = MMAP(sz); 433 _MALLOC_ENTER(d); 434 if (p != MAP_FAILED) 435 STATS_ADD(d->malloc_used, sz); 436 /* zero fill not needed */ 437 return p; 438 } 439 offset = getrbyte(d); 440 for (i = 0; i < mopts.malloc_cache; i++) { 441 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 442 if (r->p != NULL) { 443 if (hint && r->p != hint) 444 continue; 445 if (r->size == psz) { 446 p = r->p; 447 r->p = NULL; 448 r->size = 0; 449 d->free_regions_size -= psz; 450 if (mopts.malloc_freeunmap) 451 mprotect(p, sz, PROT_READ | PROT_WRITE); 452 if (zero_fill) 453 memset(p, 0, sz); 454 else if (mopts.malloc_junk == 2 && 455 mopts.malloc_freeunmap) 456 memset(p, SOME_FREEJUNK, sz); 457 return p; 458 } else if (r->size > psz) 459 big = r; 460 } 461 } 462 if (big != NULL) { 463 r = big; 464 p = r->p; 465 r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT); 466 if (mopts.malloc_freeunmap) 467 mprotect(p, sz, PROT_READ | PROT_WRITE); 468 r->size -= psz; 469 d->free_regions_size -= psz; 470 if (zero_fill) 471 memset(p, 0, sz); 472 else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap) 473 memset(p, SOME_FREEJUNK, sz); 474 return p; 475 } 476 if (hint) 477 return MAP_FAILED; 478 if (d->free_regions_size > mopts.malloc_cache) 479 wrterror(d, "malloc cache"); 480 _MALLOC_LEAVE(d); 481 p = MMAP(sz); 482 _MALLOC_ENTER(d); 483 if (p != MAP_FAILED) 484 STATS_ADD(d->malloc_used, sz); 485 /* zero fill not needed */ 486 return p; 487 } 488 489 static void 490 omalloc_parseopt(char opt) 491 { 492 switch (opt) { 493 case '>': 494 mopts.malloc_cache <<= 1; 495 if (mopts.malloc_cache > MALLOC_MAXCACHE) 496 mopts.malloc_cache = MALLOC_MAXCACHE; 497 break; 498 case '<': 499 mopts.malloc_cache >>= 1; 500 break; 501 case 'c': 502 mopts.chunk_canaries = 0; 503 break; 504 case 'C': 505 mopts.chunk_canaries = 1; 506 break; 507 #ifdef MALLOC_STATS 508 case 'd': 509 mopts.malloc_stats = 0; 510 break; 511 case 'D': 512 mopts.malloc_stats = 1; 513 break; 514 #endif /* MALLOC_STATS */ 515 case 'f': 516 mopts.malloc_freenow = 0; 517 mopts.malloc_freeunmap = 0; 518 break; 519 case 'F': 520 mopts.malloc_freenow = 1; 521 mopts.malloc_freeunmap = 1; 522 break; 523 case 'g': 524 mopts.malloc_guard = 0; 525 break; 526 case 'G': 527 mopts.malloc_guard = MALLOC_PAGESIZE; 528 break; 529 case 'j': 530 if (mopts.malloc_junk > 0) 531 mopts.malloc_junk--; 532 break; 533 case 'J': 534 if (mopts.malloc_junk < 2) 535 mopts.malloc_junk++; 536 break; 537 case 'r': 538 mopts.malloc_realloc = 0; 539 break; 540 case 'R': 541 mopts.malloc_realloc = 1; 542 break; 543 case 'u': 544 mopts.malloc_freeunmap = 0; 545 break; 546 case 'U': 547 mopts.malloc_freeunmap = 1; 548 break; 549 case 'x': 550 mopts.malloc_xmalloc = 0; 551 break; 552 case 'X': 553 mopts.malloc_xmalloc = 1; 554 break; 555 default: { 556 static const char q[] = "malloc() warning: " 557 "unknown char in MALLOC_OPTIONS\n"; 558 write(STDERR_FILENO, q, sizeof(q) - 1); 559 break; 560 } 561 } 562 } 563 564 static void 565 omalloc_init(void) 566 { 567 char *p, *q, b[64]; 568 int i, j; 569 570 /* 571 * Default options 572 */ 573 mopts.malloc_junk = 1; 574 mopts.malloc_cache = MALLOC_DEFAULT_CACHE; 575 576 for (i = 0; i < 3; i++) { 577 switch (i) { 578 case 0: 579 j = readlink("/etc/malloc.conf", b, sizeof b - 1); 580 if (j <= 0) 581 continue; 582 b[j] = '\0'; 583 p = b; 584 break; 585 case 1: 586 if (issetugid() == 0) 587 p = getenv("MALLOC_OPTIONS"); 588 else 589 continue; 590 break; 591 case 2: 592 p = malloc_options; 593 break; 594 default: 595 p = NULL; 596 } 597 598 for (; p != NULL && *p != '\0'; p++) { 599 switch (*p) { 600 case 'S': 601 for (q = "CGJ"; *q != '\0'; q++) 602 omalloc_parseopt(*q); 603 mopts.malloc_cache = 0; 604 break; 605 case 's': 606 for (q = "cgj"; *q != '\0'; q++) 607 omalloc_parseopt(*q); 608 mopts.malloc_cache = MALLOC_DEFAULT_CACHE; 609 break; 610 default: 611 omalloc_parseopt(*p); 612 break; 613 } 614 } 615 } 616 617 #ifdef MALLOC_STATS 618 if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) { 619 static const char q[] = "malloc() warning: atexit(2) failed." 620 " Will not be able to dump stats on exit\n"; 621 write(STDERR_FILENO, q, sizeof(q) - 1); 622 } 623 #endif /* MALLOC_STATS */ 624 625 while ((mopts.malloc_canary = arc4random()) == 0) 626 ; 627 } 628 629 /* 630 * Initialize a dir_info, which should have been cleared by caller 631 */ 632 static void 633 omalloc_poolinit(struct dir_info **dp) 634 { 635 void *p; 636 size_t d_avail, regioninfo_size; 637 struct dir_info *d; 638 int i, j; 639 640 /* 641 * Allocate dir_info with a guard page on either side. Also 642 * randomise offset inside the page at which the dir_info 643 * lies (subject to alignment by 1 << MALLOC_MINSHIFT) 644 */ 645 if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED) 646 wrterror(NULL, "malloc init mmap failed"); 647 mprotect(p, MALLOC_PAGESIZE, PROT_NONE); 648 mprotect((char *)p + MALLOC_PAGESIZE + DIR_INFO_RSZ, 649 MALLOC_PAGESIZE, PROT_NONE); 650 d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT; 651 d = (struct dir_info *)((char *)p + MALLOC_PAGESIZE + 652 (arc4random_uniform(d_avail) << MALLOC_MINSHIFT)); 653 654 rbytes_init(d); 655 d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS; 656 regioninfo_size = d->regions_total * sizeof(struct region_info); 657 d->r = MMAP(regioninfo_size); 658 if (d->r == MAP_FAILED) { 659 d->regions_total = 0; 660 wrterror(NULL, "malloc init mmap failed"); 661 } 662 for (i = 0; i <= MALLOC_MAXSHIFT; i++) { 663 LIST_INIT(&d->chunk_info_list[i]); 664 for (j = 0; j < MALLOC_CHUNK_LISTS; j++) 665 LIST_INIT(&d->chunk_dir[i][j]); 666 } 667 STATS_ADD(d->malloc_used, regioninfo_size); 668 d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d; 669 d->canary2 = ~d->canary1; 670 671 *dp = d; 672 } 673 674 static int 675 omalloc_grow(struct dir_info *d) 676 { 677 size_t newtotal; 678 size_t newsize; 679 size_t mask; 680 size_t i; 681 struct region_info *p; 682 683 if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 ) 684 return 1; 685 686 newtotal = d->regions_total * 2; 687 newsize = newtotal * sizeof(struct region_info); 688 mask = newtotal - 1; 689 690 p = MMAP(newsize); 691 if (p == MAP_FAILED) 692 return 1; 693 694 STATS_ADD(d->malloc_used, newsize); 695 STATS_ZERO(d->inserts); 696 STATS_ZERO(d->insert_collisions); 697 for (i = 0; i < d->regions_total; i++) { 698 void *q = d->r[i].p; 699 if (q != NULL) { 700 size_t index = hash(q) & mask; 701 STATS_INC(d->inserts); 702 while (p[index].p != NULL) { 703 index = (index - 1) & mask; 704 STATS_INC(d->insert_collisions); 705 } 706 p[index] = d->r[i]; 707 } 708 } 709 /* avoid pages containing meta info to end up in cache */ 710 if (munmap(d->r, d->regions_total * sizeof(struct region_info))) 711 wrterror(d, "munmap %p", (void *)d->r); 712 else 713 STATS_SUB(d->malloc_used, 714 d->regions_total * sizeof(struct region_info)); 715 d->regions_free = d->regions_free + d->regions_total; 716 d->regions_total = newtotal; 717 d->r = p; 718 return 0; 719 } 720 721 static struct chunk_info * 722 alloc_chunk_info(struct dir_info *d, int bits) 723 { 724 struct chunk_info *p; 725 size_t size, count; 726 727 if (bits == 0) 728 count = MALLOC_PAGESIZE / MALLOC_MINSIZE; 729 else 730 count = MALLOC_PAGESIZE >> bits; 731 732 size = howmany(count, MALLOC_BITS); 733 size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short); 734 if (mopts.chunk_canaries) 735 size += count * sizeof(u_short); 736 size = ALIGN(size); 737 738 if (LIST_EMPTY(&d->chunk_info_list[bits])) { 739 char *q; 740 int i; 741 742 q = MMAP(MALLOC_PAGESIZE); 743 if (q == MAP_FAILED) 744 return NULL; 745 STATS_ADD(d->malloc_used, MALLOC_PAGESIZE); 746 count = MALLOC_PAGESIZE / size; 747 for (i = 0; i < count; i++, q += size) 748 LIST_INSERT_HEAD(&d->chunk_info_list[bits], 749 (struct chunk_info *)q, entries); 750 } 751 p = LIST_FIRST(&d->chunk_info_list[bits]); 752 LIST_REMOVE(p, entries); 753 memset(p, 0, size); 754 p->canary = d->canary1; 755 return p; 756 } 757 758 759 /* 760 * The hashtable uses the assumption that p is never NULL. This holds since 761 * non-MAP_FIXED mappings with hint 0 start at BRKSIZ. 762 */ 763 static int 764 insert(struct dir_info *d, void *p, size_t sz, void *f) 765 { 766 size_t index; 767 size_t mask; 768 void *q; 769 770 if (d->regions_free * 4 < d->regions_total) { 771 if (omalloc_grow(d)) 772 return 1; 773 } 774 mask = d->regions_total - 1; 775 index = hash(p) & mask; 776 q = d->r[index].p; 777 STATS_INC(d->inserts); 778 while (q != NULL) { 779 index = (index - 1) & mask; 780 q = d->r[index].p; 781 STATS_INC(d->insert_collisions); 782 } 783 d->r[index].p = p; 784 d->r[index].size = sz; 785 #ifdef MALLOC_STATS 786 d->r[index].f = f; 787 #endif 788 d->regions_free--; 789 return 0; 790 } 791 792 static struct region_info * 793 find(struct dir_info *d, void *p) 794 { 795 size_t index; 796 size_t mask = d->regions_total - 1; 797 void *q, *r; 798 799 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 800 d->canary1 != ~d->canary2) 801 wrterror(d, "internal struct corrupt"); 802 p = MASK_POINTER(p); 803 index = hash(p) & mask; 804 r = d->r[index].p; 805 q = MASK_POINTER(r); 806 STATS_INC(d->finds); 807 while (q != p && r != NULL) { 808 index = (index - 1) & mask; 809 r = d->r[index].p; 810 q = MASK_POINTER(r); 811 STATS_INC(d->find_collisions); 812 } 813 return (q == p && r != NULL) ? &d->r[index] : NULL; 814 } 815 816 static void 817 delete(struct dir_info *d, struct region_info *ri) 818 { 819 /* algorithm R, Knuth Vol III section 6.4 */ 820 size_t mask = d->regions_total - 1; 821 size_t i, j, r; 822 823 if (d->regions_total & (d->regions_total - 1)) 824 wrterror(d, "regions_total not 2^x"); 825 d->regions_free++; 826 STATS_INC(d->deletes); 827 828 i = ri - d->r; 829 for (;;) { 830 d->r[i].p = NULL; 831 d->r[i].size = 0; 832 j = i; 833 for (;;) { 834 i = (i - 1) & mask; 835 if (d->r[i].p == NULL) 836 return; 837 r = hash(d->r[i].p) & mask; 838 if ((i <= r && r < j) || (r < j && j < i) || 839 (j < i && i <= r)) 840 continue; 841 d->r[j] = d->r[i]; 842 STATS_INC(d->delete_moves); 843 break; 844 } 845 846 } 847 } 848 849 /* 850 * Allocate a page of chunks 851 */ 852 static struct chunk_info * 853 omalloc_make_chunks(struct dir_info *d, int bits, int listnum) 854 { 855 struct chunk_info *bp; 856 void *pp; 857 int i, k; 858 859 /* Allocate a new bucket */ 860 pp = map(d, NULL, MALLOC_PAGESIZE, 0); 861 if (pp == MAP_FAILED) 862 return NULL; 863 864 bp = alloc_chunk_info(d, bits); 865 if (bp == NULL) { 866 unmap(d, pp, MALLOC_PAGESIZE); 867 return NULL; 868 } 869 870 /* memory protect the page allocated in the malloc(0) case */ 871 if (bits == 0) { 872 bp->size = 0; 873 bp->shift = 1; 874 i = MALLOC_MINSIZE - 1; 875 while (i >>= 1) 876 bp->shift++; 877 bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift; 878 bp->page = pp; 879 880 k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE); 881 if (k < 0) { 882 unmap(d, pp, MALLOC_PAGESIZE); 883 LIST_INSERT_HEAD(&d->chunk_info_list[0], bp, entries); 884 return NULL; 885 } 886 } else { 887 bp->size = 1U << bits; 888 bp->shift = bits; 889 bp->total = bp->free = MALLOC_PAGESIZE >> bits; 890 bp->offset = howmany(bp->total, MALLOC_BITS); 891 bp->page = pp; 892 } 893 894 /* set all valid bits in the bitmap */ 895 k = bp->total; 896 i = 0; 897 898 /* Do a bunch at a time */ 899 for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS) 900 bp->bits[i / MALLOC_BITS] = (u_short)~0U; 901 902 for (; i < k; i++) 903 bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS); 904 905 LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries); 906 907 bits++; 908 if ((uintptr_t)pp & bits) 909 wrterror(d, "pp & bits %p", pp); 910 911 insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL); 912 return bp; 913 } 914 915 static int 916 find_chunksize(size_t size) 917 { 918 int i, j; 919 920 /* Don't bother with anything less than this */ 921 /* unless we have a malloc(0) requests */ 922 if (size != 0 && size < MALLOC_MINSIZE) 923 size = MALLOC_MINSIZE; 924 925 /* Find the right bucket */ 926 if (size == 0) 927 j = 0; 928 else { 929 j = MALLOC_MINSHIFT; 930 i = (size - 1) >> (MALLOC_MINSHIFT - 1); 931 while (i >>= 1) 932 j++; 933 } 934 return j; 935 } 936 937 /* 938 * Allocate a chunk 939 */ 940 static void * 941 malloc_bytes(struct dir_info *d, size_t size, void *f) 942 { 943 int i, j, listnum; 944 size_t k; 945 u_short u, *lp; 946 struct chunk_info *bp; 947 948 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 949 d->canary1 != ~d->canary2) 950 wrterror(d, "internal struct corrupt"); 951 952 j = find_chunksize(size); 953 954 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; 955 /* If it's empty, make a page more of that size chunks */ 956 if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) { 957 bp = omalloc_make_chunks(d, j, listnum); 958 if (bp == NULL) 959 return NULL; 960 } 961 962 if (bp->canary != d->canary1) 963 wrterror(d, "chunk info corrupted"); 964 965 i = d->chunk_start; 966 if (bp->free > 1) 967 i += getrbyte(d); 968 if (i >= bp->total) 969 i &= bp->total - 1; 970 for (;;) { 971 for (;;) { 972 lp = &bp->bits[i / MALLOC_BITS]; 973 if (!*lp) { 974 i += MALLOC_BITS; 975 i &= ~(MALLOC_BITS - 1); 976 if (i >= bp->total) 977 i = 0; 978 } else 979 break; 980 } 981 k = i % MALLOC_BITS; 982 u = 1 << k; 983 if (*lp & u) 984 break; 985 if (++i >= bp->total) 986 i = 0; 987 } 988 d->chunk_start += i + 1; 989 #ifdef MALLOC_STATS 990 if (i == 0) { 991 struct region_info *r = find(d, bp->page); 992 r->f = f; 993 } 994 #endif 995 996 *lp ^= u; 997 998 /* If there are no more free, remove from free-list */ 999 if (!--bp->free) 1000 LIST_REMOVE(bp, entries); 1001 1002 /* Adjust to the real offset of that chunk */ 1003 k += (lp - bp->bits) * MALLOC_BITS; 1004 1005 if (mopts.chunk_canaries) 1006 bp->bits[bp->offset + k] = size; 1007 1008 k <<= bp->shift; 1009 1010 if (bp->size > 0) { 1011 if (mopts.malloc_junk == 2) 1012 memset((char *)bp->page + k, SOME_JUNK, bp->size); 1013 else if (mopts.chunk_canaries) 1014 fill_canary((char *)bp->page + k, size, bp->size); 1015 } 1016 return ((char *)bp->page + k); 1017 } 1018 1019 static void 1020 fill_canary(char *ptr, size_t sz, size_t allocated) 1021 { 1022 size_t check_sz = allocated - sz; 1023 1024 if (check_sz > CHUNK_CHECK_LENGTH) 1025 check_sz = CHUNK_CHECK_LENGTH; 1026 memset(ptr + sz, SOME_JUNK, check_sz); 1027 } 1028 1029 static void 1030 validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated) 1031 { 1032 size_t check_sz = allocated - sz; 1033 u_char *p, *q; 1034 1035 if (check_sz > CHUNK_CHECK_LENGTH) 1036 check_sz = CHUNK_CHECK_LENGTH; 1037 p = ptr + sz; 1038 q = p + check_sz; 1039 1040 while (p < q) { 1041 if (*p++ != SOME_JUNK) { 1042 wrterror(d, "chunk canary corrupted %p %#tx@%#zx", 1043 ptr, p - ptr - 1, sz); 1044 } 1045 } 1046 } 1047 1048 static uint32_t 1049 find_chunknum(struct dir_info *d, struct region_info *r, void *ptr, int check) 1050 { 1051 struct chunk_info *info; 1052 uint32_t chunknum; 1053 1054 info = (struct chunk_info *)r->size; 1055 if (info->canary != d->canary1) 1056 wrterror(d, "chunk info corrupted"); 1057 1058 /* Find the chunk number on the page */ 1059 chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift; 1060 if (check && info->size > 0) { 1061 validate_canary(d, ptr, info->bits[info->offset + chunknum], 1062 info->size); 1063 } 1064 1065 if ((uintptr_t)ptr & ((1U << (info->shift)) - 1)) 1066 wrterror(d, "modified chunk-pointer %p", ptr); 1067 if (info->bits[chunknum / MALLOC_BITS] & 1068 (1U << (chunknum % MALLOC_BITS))) 1069 wrterror(d, "chunk is already free %p", ptr); 1070 return chunknum; 1071 } 1072 1073 /* 1074 * Free a chunk, and possibly the page it's on, if the page becomes empty. 1075 */ 1076 static void 1077 free_bytes(struct dir_info *d, struct region_info *r, void *ptr) 1078 { 1079 struct chunk_head *mp; 1080 struct chunk_info *info; 1081 uint32_t chunknum; 1082 int listnum; 1083 1084 info = (struct chunk_info *)r->size; 1085 chunknum = find_chunknum(d, r, ptr, 0); 1086 1087 info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS); 1088 info->free++; 1089 1090 if (info->free == 1) { 1091 /* Page became non-full */ 1092 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; 1093 if (info->size != 0) 1094 mp = &d->chunk_dir[info->shift][listnum]; 1095 else 1096 mp = &d->chunk_dir[0][listnum]; 1097 1098 LIST_INSERT_HEAD(mp, info, entries); 1099 return; 1100 } 1101 1102 if (info->free != info->total) 1103 return; 1104 1105 LIST_REMOVE(info, entries); 1106 1107 if (info->size == 0 && !mopts.malloc_freeunmap) 1108 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE); 1109 unmap(d, info->page, MALLOC_PAGESIZE); 1110 1111 delete(d, r); 1112 if (info->size != 0) 1113 mp = &d->chunk_info_list[info->shift]; 1114 else 1115 mp = &d->chunk_info_list[0]; 1116 LIST_INSERT_HEAD(mp, info, entries); 1117 } 1118 1119 1120 1121 static void * 1122 omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f) 1123 { 1124 void *p; 1125 size_t psz; 1126 1127 if (sz > MALLOC_MAXCHUNK) { 1128 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1129 errno = ENOMEM; 1130 return NULL; 1131 } 1132 sz += mopts.malloc_guard; 1133 psz = PAGEROUND(sz); 1134 p = map(pool, NULL, psz, zero_fill); 1135 if (p == MAP_FAILED) { 1136 errno = ENOMEM; 1137 return NULL; 1138 } 1139 if (insert(pool, p, sz, f)) { 1140 unmap(pool, p, psz); 1141 errno = ENOMEM; 1142 return NULL; 1143 } 1144 if (mopts.malloc_guard) { 1145 if (mprotect((char *)p + psz - mopts.malloc_guard, 1146 mopts.malloc_guard, PROT_NONE)) 1147 wrterror(pool, "mprotect"); 1148 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 1149 } 1150 1151 if (MALLOC_MOVE_COND(sz)) { 1152 /* fill whole allocation */ 1153 if (mopts.malloc_junk == 2) 1154 memset(p, SOME_JUNK, psz - mopts.malloc_guard); 1155 /* shift towards the end */ 1156 p = MALLOC_MOVE(p, sz); 1157 /* fill zeros if needed and overwritten above */ 1158 if (zero_fill && mopts.malloc_junk == 2) 1159 memset(p, 0, sz - mopts.malloc_guard); 1160 } else { 1161 if (mopts.malloc_junk == 2) { 1162 if (zero_fill) 1163 memset((char *)p + sz - mopts.malloc_guard, 1164 SOME_JUNK, psz - sz); 1165 else 1166 memset(p, SOME_JUNK, 1167 psz - mopts.malloc_guard); 1168 } 1169 else if (mopts.chunk_canaries) 1170 fill_canary(p, sz - mopts.malloc_guard, 1171 psz - mopts.malloc_guard); 1172 } 1173 1174 } else { 1175 /* takes care of SOME_JUNK */ 1176 p = malloc_bytes(pool, sz, f); 1177 if (zero_fill && p != NULL && sz > 0) 1178 memset(p, 0, sz); 1179 } 1180 1181 return p; 1182 } 1183 1184 /* 1185 * Common function for handling recursion. Only 1186 * print the error message once, to avoid making the problem 1187 * potentially worse. 1188 */ 1189 static void 1190 malloc_recurse(struct dir_info *d) 1191 { 1192 static int noprint; 1193 1194 if (noprint == 0) { 1195 noprint = 1; 1196 wrterror(d, "recursive call"); 1197 } 1198 d->active--; 1199 _MALLOC_UNLOCK(d->mutex); 1200 errno = EDEADLK; 1201 } 1202 1203 void 1204 _malloc_init(int from_rthreads) 1205 { 1206 int i, max; 1207 struct dir_info *d; 1208 1209 _MALLOC_LOCK(0); 1210 if (!from_rthreads && mopts.malloc_pool[0]) { 1211 _MALLOC_UNLOCK(0); 1212 return; 1213 } 1214 if (!mopts.malloc_canary) 1215 omalloc_init(); 1216 1217 max = from_rthreads ? _MALLOC_MUTEXES : 1; 1218 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) 1219 mprotect(&malloc_readonly, sizeof(malloc_readonly), 1220 PROT_READ | PROT_WRITE); 1221 for (i = 0; i < max; i++) { 1222 if (mopts.malloc_pool[i]) 1223 continue; 1224 omalloc_poolinit(&d); 1225 d->mutex = i; 1226 mopts.malloc_pool[i] = d; 1227 } 1228 1229 if (from_rthreads) 1230 mopts.malloc_mt = 1; 1231 1232 /* 1233 * Options have been set and will never be reset. 1234 * Prevent further tampering with them. 1235 */ 1236 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) 1237 mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ); 1238 _MALLOC_UNLOCK(0); 1239 } 1240 DEF_STRONG(_malloc_init); 1241 1242 void * 1243 malloc(size_t size) 1244 { 1245 void *r; 1246 struct dir_info *d; 1247 int saved_errno = errno; 1248 1249 d = getpool(); 1250 if (d == NULL) { 1251 _malloc_init(0); 1252 d = getpool(); 1253 } 1254 _MALLOC_LOCK(d->mutex); 1255 d->func = "malloc"; 1256 1257 if (d->active++) { 1258 malloc_recurse(d); 1259 return NULL; 1260 } 1261 r = omalloc(d, size, 0, CALLER); 1262 d->active--; 1263 _MALLOC_UNLOCK(d->mutex); 1264 if (r == NULL && mopts.malloc_xmalloc) 1265 wrterror(d, "out of memory"); 1266 if (r != NULL) 1267 errno = saved_errno; 1268 return r; 1269 } 1270 /*DEF_STRONG(malloc);*/ 1271 1272 static void 1273 validate_junk(struct dir_info *pool, void *p) 1274 { 1275 struct region_info *r; 1276 size_t byte, sz; 1277 1278 if (p == NULL) 1279 return; 1280 r = find(pool, p); 1281 if (r == NULL) 1282 wrterror(pool, "bogus pointer in validate_junk %p", p); 1283 REALSIZE(sz, r); 1284 if (sz > CHUNK_CHECK_LENGTH) 1285 sz = CHUNK_CHECK_LENGTH; 1286 for (byte = 0; byte < sz; byte++) { 1287 if (((unsigned char *)p)[byte] != SOME_FREEJUNK) 1288 wrterror(pool, "use after free %p", p); 1289 } 1290 } 1291 1292 static void 1293 ofree(struct dir_info *argpool, void *p) 1294 { 1295 struct dir_info *pool; 1296 struct region_info *r; 1297 size_t sz; 1298 int i; 1299 1300 pool = argpool; 1301 r = find(pool, p); 1302 if (r == NULL) { 1303 if (mopts.malloc_mt) { 1304 for (i = 0; i < _MALLOC_MUTEXES; i++) { 1305 if (i == argpool->mutex) 1306 continue; 1307 pool->active--; 1308 _MALLOC_UNLOCK(pool->mutex); 1309 pool = mopts.malloc_pool[i]; 1310 _MALLOC_LOCK(pool->mutex); 1311 pool->active++; 1312 r = find(pool, p); 1313 if (r != NULL) 1314 break; 1315 } 1316 } 1317 if (r == NULL) 1318 wrterror(pool, "bogus pointer (double free?) %p", p); 1319 } 1320 1321 REALSIZE(sz, r); 1322 if (sz > MALLOC_MAXCHUNK) { 1323 if (!MALLOC_MOVE_COND(sz)) { 1324 if (r->p != p) 1325 wrterror(pool, "bogus pointer %p", p); 1326 if (mopts.chunk_canaries) 1327 validate_canary(pool, p, 1328 sz - mopts.malloc_guard, 1329 PAGEROUND(sz - mopts.malloc_guard)); 1330 } else { 1331 /* shifted towards the end */ 1332 if (p != MALLOC_MOVE(r->p, sz)) 1333 wrterror(pool, "bogus moved pointer %p", p); 1334 p = r->p; 1335 } 1336 if (mopts.malloc_guard) { 1337 if (sz < mopts.malloc_guard) 1338 wrterror(pool, "guard size"); 1339 if (!mopts.malloc_freeunmap) { 1340 if (mprotect((char *)p + PAGEROUND(sz) - 1341 mopts.malloc_guard, mopts.malloc_guard, 1342 PROT_READ | PROT_WRITE)) 1343 wrterror(pool, "mprotect"); 1344 } 1345 STATS_SUB(pool->malloc_guarded, mopts.malloc_guard); 1346 } 1347 unmap(pool, p, PAGEROUND(sz)); 1348 delete(pool, r); 1349 } else { 1350 void *tmp; 1351 int i; 1352 1353 /* Delayed free or canaries? Extra check */ 1354 if (!mopts.malloc_freenow || mopts.chunk_canaries) 1355 find_chunknum(pool, r, p, mopts.chunk_canaries); 1356 if (!mopts.malloc_freenow) { 1357 if (mopts.malloc_junk && sz > 0) 1358 memset(p, SOME_FREEJUNK, sz); 1359 i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK; 1360 tmp = p; 1361 p = pool->delayed_chunks[i]; 1362 if (tmp == p) 1363 wrterror(pool, "double free %p", tmp); 1364 if (mopts.malloc_junk) 1365 validate_junk(pool, p); 1366 pool->delayed_chunks[i] = tmp; 1367 } else { 1368 if (mopts.malloc_junk && sz > 0) 1369 memset(p, SOME_FREEJUNK, sz); 1370 } 1371 if (p != NULL) { 1372 r = find(pool, p); 1373 if (r == NULL) 1374 wrterror(pool, 1375 "bogus pointer (double free?) %p", p); 1376 free_bytes(pool, r, p); 1377 } 1378 } 1379 1380 if (argpool != pool) { 1381 pool->active--; 1382 _MALLOC_UNLOCK(pool->mutex); 1383 _MALLOC_LOCK(argpool->mutex); 1384 argpool->active++; 1385 } 1386 } 1387 1388 void 1389 free(void *ptr) 1390 { 1391 struct dir_info *d; 1392 int saved_errno = errno; 1393 1394 /* This is legal. */ 1395 if (ptr == NULL) 1396 return; 1397 1398 d = getpool(); 1399 if (d == NULL) 1400 wrterror(d, "free() called before allocation"); 1401 _MALLOC_LOCK(d->mutex); 1402 d->func = "free"; 1403 if (d->active++) { 1404 malloc_recurse(d); 1405 return; 1406 } 1407 ofree(d, ptr); 1408 d->active--; 1409 _MALLOC_UNLOCK(d->mutex); 1410 errno = saved_errno; 1411 } 1412 /*DEF_STRONG(free);*/ 1413 1414 1415 static void * 1416 orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f) 1417 { 1418 struct dir_info *pool; 1419 struct region_info *r; 1420 struct chunk_info *info; 1421 size_t oldsz, goldsz, gnewsz; 1422 void *q, *ret; 1423 int i; 1424 uint32_t chunknum; 1425 1426 pool = argpool; 1427 1428 if (p == NULL) 1429 return omalloc(pool, newsz, 0, f); 1430 1431 r = find(pool, p); 1432 if (r == NULL) { 1433 if (mopts.malloc_mt) { 1434 for (i = 0; i < _MALLOC_MUTEXES; i++) { 1435 if (i == argpool->mutex) 1436 continue; 1437 pool->active--; 1438 _MALLOC_UNLOCK(pool->mutex); 1439 pool = mopts.malloc_pool[i]; 1440 _MALLOC_LOCK(pool->mutex); 1441 pool->active++; 1442 r = find(pool, p); 1443 if (r != NULL) 1444 break; 1445 } 1446 } 1447 if (r == NULL) 1448 wrterror(pool, "bogus pointer (double free?) %p", p); 1449 } 1450 if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1451 errno = ENOMEM; 1452 ret = NULL; 1453 goto done; 1454 } 1455 1456 REALSIZE(oldsz, r); 1457 if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) { 1458 chunknum = find_chunknum(pool, r, p, 0); 1459 info = (struct chunk_info *)r->size; 1460 } 1461 1462 goldsz = oldsz; 1463 if (oldsz > MALLOC_MAXCHUNK) { 1464 if (oldsz < mopts.malloc_guard) 1465 wrterror(pool, "guard size"); 1466 oldsz -= mopts.malloc_guard; 1467 } 1468 1469 gnewsz = newsz; 1470 if (gnewsz > MALLOC_MAXCHUNK) 1471 gnewsz += mopts.malloc_guard; 1472 1473 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && 1474 !mopts.malloc_realloc) { 1475 /* First case: from n pages sized allocation to m pages sized 1476 allocation, m > n */ 1477 size_t roldsz = PAGEROUND(goldsz); 1478 size_t rnewsz = PAGEROUND(gnewsz); 1479 1480 if (rnewsz > roldsz) { 1481 /* try to extend existing region */ 1482 if (!mopts.malloc_guard) { 1483 void *hint = (char *)r->p + roldsz; 1484 size_t needed = rnewsz - roldsz; 1485 1486 STATS_INC(pool->cheap_realloc_tries); 1487 q = map(pool, hint, needed, 0); 1488 if (q == hint) 1489 goto gotit; 1490 zapcacheregion(pool, hint, needed); 1491 q = MQUERY(hint, needed); 1492 if (q == hint) 1493 q = MMAPA(hint, needed); 1494 else 1495 q = MAP_FAILED; 1496 if (q == hint) { 1497 gotit: 1498 STATS_ADD(pool->malloc_used, needed); 1499 if (mopts.malloc_junk == 2) 1500 memset(q, SOME_JUNK, needed); 1501 r->size = gnewsz; 1502 if (r->p != p) { 1503 /* old pointer is moved */ 1504 memmove(r->p, p, oldsz); 1505 p = r->p; 1506 } 1507 if (mopts.chunk_canaries) 1508 fill_canary(p, newsz, 1509 PAGEROUND(newsz)); 1510 STATS_SETF(r, f); 1511 STATS_INC(pool->cheap_reallocs); 1512 ret = p; 1513 goto done; 1514 } else if (q != MAP_FAILED) { 1515 if (munmap(q, needed)) 1516 wrterror(pool, "munmap %p", q); 1517 } 1518 } 1519 } else if (rnewsz < roldsz) { 1520 /* shrink number of pages */ 1521 if (mopts.malloc_guard) { 1522 if (mprotect((char *)r->p + roldsz - 1523 mopts.malloc_guard, mopts.malloc_guard, 1524 PROT_READ | PROT_WRITE)) 1525 wrterror(pool, "mprotect"); 1526 if (mprotect((char *)r->p + rnewsz - 1527 mopts.malloc_guard, mopts.malloc_guard, 1528 PROT_NONE)) 1529 wrterror(pool, "mprotect"); 1530 } 1531 unmap(pool, (char *)r->p + rnewsz, roldsz - rnewsz); 1532 r->size = gnewsz; 1533 if (MALLOC_MOVE_COND(gnewsz)) { 1534 void *pp = MALLOC_MOVE(r->p, gnewsz); 1535 memmove(pp, p, newsz); 1536 p = pp; 1537 } else if (mopts.chunk_canaries) 1538 fill_canary(p, newsz, PAGEROUND(newsz)); 1539 STATS_SETF(r, f); 1540 ret = p; 1541 goto done; 1542 } else { 1543 /* number of pages remains the same */ 1544 void *pp = r->p; 1545 1546 r->size = gnewsz; 1547 if (MALLOC_MOVE_COND(gnewsz)) 1548 pp = MALLOC_MOVE(r->p, gnewsz); 1549 if (p != pp) { 1550 memmove(pp, p, oldsz < newsz ? oldsz : newsz); 1551 p = pp; 1552 } 1553 if (p == r->p) { 1554 if (newsz > oldsz && mopts.malloc_junk == 2) 1555 memset((char *)p + newsz, SOME_JUNK, 1556 rnewsz - mopts.malloc_guard - 1557 newsz); 1558 if (mopts.chunk_canaries) 1559 fill_canary(p, newsz, PAGEROUND(newsz)); 1560 } 1561 STATS_SETF(r, f); 1562 ret = p; 1563 goto done; 1564 } 1565 } 1566 if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 && 1567 newsz <= MALLOC_MAXCHUNK && newsz > 0 && 1568 1 << find_chunksize(newsz) == oldsz && !mopts.malloc_realloc) { 1569 /* do not reallocate if new size fits good in existing chunk */ 1570 if (mopts.malloc_junk == 2) 1571 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz); 1572 if (mopts.chunk_canaries) { 1573 info->bits[info->offset + chunknum] = newsz; 1574 fill_canary(p, newsz, info->size); 1575 } 1576 STATS_SETF(r, f); 1577 ret = p; 1578 } else if (newsz != oldsz || mopts.malloc_realloc) { 1579 /* create new allocation */ 1580 q = omalloc(pool, newsz, 0, f); 1581 if (q == NULL) { 1582 ret = NULL; 1583 goto done; 1584 } 1585 if (newsz != 0 && oldsz != 0) 1586 memcpy(q, p, oldsz < newsz ? oldsz : newsz); 1587 ofree(pool, p); 1588 ret = q; 1589 } else { 1590 /* oldsz == newsz */ 1591 if (newsz != 0) 1592 wrterror(pool, "realloc internal inconsistency"); 1593 STATS_SETF(r, f); 1594 ret = p; 1595 } 1596 done: 1597 if (argpool != pool) { 1598 pool->active--; 1599 _MALLOC_UNLOCK(pool->mutex); 1600 _MALLOC_LOCK(argpool->mutex); 1601 argpool->active++; 1602 } 1603 return ret; 1604 } 1605 1606 void * 1607 realloc(void *ptr, size_t size) 1608 { 1609 struct dir_info *d; 1610 void *r; 1611 int saved_errno = errno; 1612 1613 d = getpool(); 1614 if (d == NULL) { 1615 _malloc_init(0); 1616 d = getpool(); 1617 } 1618 _MALLOC_LOCK(d->mutex); 1619 d->func = "realloc"; 1620 if (d->active++) { 1621 malloc_recurse(d); 1622 return NULL; 1623 } 1624 r = orealloc(d, ptr, size, CALLER); 1625 1626 d->active--; 1627 _MALLOC_UNLOCK(d->mutex); 1628 if (r == NULL && mopts.malloc_xmalloc) 1629 wrterror(d, "out of memory"); 1630 if (r != NULL) 1631 errno = saved_errno; 1632 return r; 1633 } 1634 /*DEF_STRONG(realloc);*/ 1635 1636 1637 /* 1638 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX 1639 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW 1640 */ 1641 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4)) 1642 1643 void * 1644 calloc(size_t nmemb, size_t size) 1645 { 1646 struct dir_info *d; 1647 void *r; 1648 int saved_errno = errno; 1649 1650 d = getpool(); 1651 if (d == NULL) { 1652 _malloc_init(0); 1653 d = getpool(); 1654 } 1655 _MALLOC_LOCK(d->mutex); 1656 d->func = "calloc"; 1657 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1658 nmemb > 0 && SIZE_MAX / nmemb < size) { 1659 _MALLOC_UNLOCK(d->mutex); 1660 if (mopts.malloc_xmalloc) 1661 wrterror(d, "out of memory"); 1662 errno = ENOMEM; 1663 return NULL; 1664 } 1665 1666 if (d->active++) { 1667 malloc_recurse(d); 1668 return NULL; 1669 } 1670 1671 size *= nmemb; 1672 r = omalloc(d, size, 1, CALLER); 1673 1674 d->active--; 1675 _MALLOC_UNLOCK(d->mutex); 1676 if (r == NULL && mopts.malloc_xmalloc) 1677 wrterror(d, "out of memory"); 1678 if (r != NULL) 1679 errno = saved_errno; 1680 return r; 1681 } 1682 /*DEF_STRONG(calloc);*/ 1683 1684 static void * 1685 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill) 1686 { 1687 char *p, *q; 1688 1689 if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0) 1690 wrterror(d, "mapalign bad alignment"); 1691 if (sz != PAGEROUND(sz)) 1692 wrterror(d, "mapalign round"); 1693 1694 /* Allocate sz + alignment bytes of memory, which must include a 1695 * subrange of size bytes that is properly aligned. Unmap the 1696 * other bytes, and then return that subrange. 1697 */ 1698 1699 /* We need sz + alignment to fit into a size_t. */ 1700 if (alignment > SIZE_MAX - sz) 1701 return MAP_FAILED; 1702 1703 p = map(d, NULL, sz + alignment, zero_fill); 1704 if (p == MAP_FAILED) 1705 return MAP_FAILED; 1706 q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1)); 1707 if (q != p) { 1708 if (munmap(p, q - p)) 1709 wrterror(d, "munmap %p", p); 1710 } 1711 if (munmap(q + sz, alignment - (q - p))) 1712 wrterror(d, "munmap %p", q + sz); 1713 STATS_SUB(d->malloc_used, alignment); 1714 1715 return q; 1716 } 1717 1718 static void * 1719 omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill, void *f) 1720 { 1721 size_t psz; 1722 void *p; 1723 1724 if (alignment <= MALLOC_PAGESIZE) { 1725 /* 1726 * max(size, alignment) is enough to assure the requested alignment, 1727 * since the allocator always allocates power-of-two blocks. 1728 */ 1729 if (sz < alignment) 1730 sz = alignment; 1731 return omalloc(pool, sz, zero_fill, f); 1732 } 1733 1734 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1735 errno = ENOMEM; 1736 return NULL; 1737 } 1738 1739 sz += mopts.malloc_guard; 1740 psz = PAGEROUND(sz); 1741 1742 p = mapalign(pool, alignment, psz, zero_fill); 1743 if (p == NULL) { 1744 errno = ENOMEM; 1745 return NULL; 1746 } 1747 1748 if (insert(pool, p, sz, f)) { 1749 unmap(pool, p, psz); 1750 errno = ENOMEM; 1751 return NULL; 1752 } 1753 1754 if (mopts.malloc_guard) { 1755 if (mprotect((char *)p + psz - mopts.malloc_guard, 1756 mopts.malloc_guard, PROT_NONE)) 1757 wrterror(pool, "mprotect"); 1758 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 1759 } 1760 1761 if (mopts.malloc_junk == 2) { 1762 if (zero_fill) 1763 memset((char *)p + sz - mopts.malloc_guard, 1764 SOME_JUNK, psz - sz); 1765 else 1766 memset(p, SOME_JUNK, psz - mopts.malloc_guard); 1767 } 1768 1769 return p; 1770 } 1771 1772 int 1773 posix_memalign(void **memptr, size_t alignment, size_t size) 1774 { 1775 struct dir_info *d; 1776 int res, saved_errno = errno; 1777 void *r; 1778 1779 /* Make sure that alignment is a large enough power of 2. */ 1780 if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *)) 1781 return EINVAL; 1782 1783 d = getpool(); 1784 if (d == NULL) { 1785 _malloc_init(0); 1786 d = getpool(); 1787 } 1788 _MALLOC_LOCK(d->mutex); 1789 d->func = "posix_memalign"; 1790 if (d->active++) { 1791 malloc_recurse(d); 1792 goto err; 1793 } 1794 r = omemalign(d, alignment, size, 0, CALLER); 1795 d->active--; 1796 _MALLOC_UNLOCK(d->mutex); 1797 if (r == NULL) { 1798 if (mopts.malloc_xmalloc) 1799 wrterror(d, "out of memory"); 1800 goto err; 1801 } 1802 errno = saved_errno; 1803 *memptr = r; 1804 return 0; 1805 1806 err: 1807 res = errno; 1808 errno = saved_errno; 1809 return res; 1810 } 1811 /*DEF_STRONG(posix_memalign);*/ 1812 1813 #ifdef MALLOC_STATS 1814 1815 struct malloc_leak { 1816 void (*f)(); 1817 size_t total_size; 1818 int count; 1819 }; 1820 1821 struct leaknode { 1822 RB_ENTRY(leaknode) entry; 1823 struct malloc_leak d; 1824 }; 1825 1826 static int 1827 leakcmp(struct leaknode *e1, struct leaknode *e2) 1828 { 1829 return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f; 1830 } 1831 1832 static RB_HEAD(leaktree, leaknode) leakhead; 1833 RB_GENERATE_STATIC(leaktree, leaknode, entry, leakcmp) 1834 1835 static void 1836 putleakinfo(void *f, size_t sz, int cnt) 1837 { 1838 struct leaknode key, *p; 1839 static struct leaknode *page; 1840 static int used; 1841 1842 if (cnt == 0 || page == MAP_FAILED) 1843 return; 1844 1845 key.d.f = f; 1846 p = RB_FIND(leaktree, &leakhead, &key); 1847 if (p == NULL) { 1848 if (page == NULL || 1849 used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) { 1850 page = MMAP(MALLOC_PAGESIZE); 1851 if (page == MAP_FAILED) 1852 return; 1853 used = 0; 1854 } 1855 p = &page[used++]; 1856 p->d.f = f; 1857 p->d.total_size = sz * cnt; 1858 p->d.count = cnt; 1859 RB_INSERT(leaktree, &leakhead, p); 1860 } else { 1861 p->d.total_size += sz * cnt; 1862 p->d.count += cnt; 1863 } 1864 } 1865 1866 static struct malloc_leak *malloc_leaks; 1867 1868 static void 1869 writestr(int fd, const char *p) 1870 { 1871 write(fd, p, strlen(p)); 1872 } 1873 1874 static void 1875 dump_leaks(int fd) 1876 { 1877 struct leaknode *p; 1878 char buf[64]; 1879 int i = 0; 1880 1881 writestr(fd, "Leak report\n"); 1882 writestr(fd, " f sum # avg\n"); 1883 /* XXX only one page of summary */ 1884 if (malloc_leaks == NULL) 1885 malloc_leaks = MMAP(MALLOC_PAGESIZE); 1886 if (malloc_leaks != MAP_FAILED) 1887 memset(malloc_leaks, 0, MALLOC_PAGESIZE); 1888 RB_FOREACH(p, leaktree, &leakhead) { 1889 snprintf(buf, sizeof(buf), "%18p %7zu %6u %6zu\n", p->d.f, 1890 p->d.total_size, p->d.count, p->d.total_size / p->d.count); 1891 write(fd, buf, strlen(buf)); 1892 if (malloc_leaks == MAP_FAILED || 1893 i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak)) 1894 continue; 1895 malloc_leaks[i].f = p->d.f; 1896 malloc_leaks[i].total_size = p->d.total_size; 1897 malloc_leaks[i].count = p->d.count; 1898 i++; 1899 } 1900 } 1901 1902 static void 1903 dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist) 1904 { 1905 char buf[64]; 1906 1907 while (p != NULL) { 1908 snprintf(buf, sizeof(buf), "chunk %18p %18p %4d %d/%d\n", 1909 p->page, ((p->bits[0] & 1) ? NULL : f), 1910 p->size, p->free, p->total); 1911 write(fd, buf, strlen(buf)); 1912 if (!fromfreelist) { 1913 if (p->bits[0] & 1) 1914 putleakinfo(NULL, p->size, p->total - p->free); 1915 else { 1916 putleakinfo(f, p->size, 1); 1917 putleakinfo(NULL, p->size, 1918 p->total - p->free - 1); 1919 } 1920 break; 1921 } 1922 p = LIST_NEXT(p, entries); 1923 if (p != NULL) 1924 writestr(fd, " "); 1925 } 1926 } 1927 1928 static void 1929 dump_free_chunk_info(int fd, struct dir_info *d) 1930 { 1931 char buf[64]; 1932 int i, j, count; 1933 struct chunk_info *p; 1934 1935 writestr(fd, "Free chunk structs:\n"); 1936 for (i = 0; i <= MALLOC_MAXSHIFT; i++) { 1937 count = 0; 1938 LIST_FOREACH(p, &d->chunk_info_list[i], entries) 1939 count++; 1940 for (j = 0; j < MALLOC_CHUNK_LISTS; j++) { 1941 p = LIST_FIRST(&d->chunk_dir[i][j]); 1942 if (p == NULL && count == 0) 1943 continue; 1944 snprintf(buf, sizeof(buf), "%2d) %3d ", i, count); 1945 write(fd, buf, strlen(buf)); 1946 if (p != NULL) 1947 dump_chunk(fd, p, NULL, 1); 1948 else 1949 write(fd, "\n", 1); 1950 } 1951 } 1952 1953 } 1954 1955 static void 1956 dump_free_page_info(int fd, struct dir_info *d) 1957 { 1958 char buf[64]; 1959 int i; 1960 1961 snprintf(buf, sizeof(buf), "Free pages cached: %zu\n", 1962 d->free_regions_size); 1963 write(fd, buf, strlen(buf)); 1964 for (i = 0; i < mopts.malloc_cache; i++) { 1965 if (d->free_regions[i].p != NULL) { 1966 snprintf(buf, sizeof(buf), "%2d) ", i); 1967 write(fd, buf, strlen(buf)); 1968 snprintf(buf, sizeof(buf), "free at %p: %zu\n", 1969 d->free_regions[i].p, d->free_regions[i].size); 1970 write(fd, buf, strlen(buf)); 1971 } 1972 } 1973 } 1974 1975 static void 1976 malloc_dump1(int fd, int poolno, struct dir_info *d) 1977 { 1978 char buf[100]; 1979 size_t i, realsize; 1980 1981 snprintf(buf, sizeof(buf), "Malloc dir of %s pool %d at %p\n", __progname, poolno, d); 1982 write(fd, buf, strlen(buf)); 1983 if (d == NULL) 1984 return; 1985 snprintf(buf, sizeof(buf), "Region slots free %zu/%zu\n", 1986 d->regions_free, d->regions_total); 1987 write(fd, buf, strlen(buf)); 1988 snprintf(buf, sizeof(buf), "Finds %zu/%zu\n", d->finds, 1989 d->find_collisions); 1990 write(fd, buf, strlen(buf)); 1991 snprintf(buf, sizeof(buf), "Inserts %zu/%zu\n", d->inserts, 1992 d->insert_collisions); 1993 write(fd, buf, strlen(buf)); 1994 snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes, 1995 d->delete_moves); 1996 write(fd, buf, strlen(buf)); 1997 snprintf(buf, sizeof(buf), "Cheap reallocs %zu/%zu\n", 1998 d->cheap_reallocs, d->cheap_realloc_tries); 1999 write(fd, buf, strlen(buf)); 2000 snprintf(buf, sizeof(buf), "In use %zu\n", d->malloc_used); 2001 write(fd, buf, strlen(buf)); 2002 snprintf(buf, sizeof(buf), "Guarded %zu\n", d->malloc_guarded); 2003 write(fd, buf, strlen(buf)); 2004 dump_free_chunk_info(fd, d); 2005 dump_free_page_info(fd, d); 2006 writestr(fd, 2007 "slot) hash d type page f size [free/n]\n"); 2008 for (i = 0; i < d->regions_total; i++) { 2009 if (d->r[i].p != NULL) { 2010 size_t h = hash(d->r[i].p) & 2011 (d->regions_total - 1); 2012 snprintf(buf, sizeof(buf), "%4zx) #%4zx %zd ", 2013 i, h, h - i); 2014 write(fd, buf, strlen(buf)); 2015 REALSIZE(realsize, &d->r[i]); 2016 if (realsize > MALLOC_MAXCHUNK) { 2017 putleakinfo(d->r[i].f, realsize, 1); 2018 snprintf(buf, sizeof(buf), 2019 "pages %18p %18p %zu\n", d->r[i].p, 2020 d->r[i].f, realsize); 2021 write(fd, buf, strlen(buf)); 2022 } else 2023 dump_chunk(fd, 2024 (struct chunk_info *)d->r[i].size, 2025 d->r[i].f, 0); 2026 } 2027 } 2028 dump_leaks(fd); 2029 write(fd, "\n", 1); 2030 } 2031 2032 void 2033 malloc_dump(int fd, int poolno, struct dir_info *pool) 2034 { 2035 int i; 2036 void *p; 2037 struct region_info *r; 2038 int saved_errno = errno; 2039 2040 if (pool == NULL) 2041 return; 2042 for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) { 2043 p = pool->delayed_chunks[i]; 2044 if (p == NULL) 2045 continue; 2046 r = find(pool, p); 2047 if (r == NULL) 2048 wrterror(pool, "bogus pointer in malloc_dump %p", p); 2049 free_bytes(pool, r, p); 2050 pool->delayed_chunks[i] = NULL; 2051 } 2052 /* XXX leak when run multiple times */ 2053 RB_INIT(&leakhead); 2054 malloc_dump1(fd, poolno, pool); 2055 errno = saved_errno; 2056 } 2057 DEF_WEAK(malloc_dump); 2058 2059 static void 2060 malloc_exit(void) 2061 { 2062 static const char q[] = "malloc() warning: Couldn't dump stats\n"; 2063 int save_errno = errno, fd, i; 2064 char buf[100]; 2065 2066 fd = open("malloc.out", O_RDWR|O_APPEND); 2067 if (fd != -1) { 2068 snprintf(buf, sizeof(buf), "******** Start dump %s *******\n", 2069 __progname); 2070 write(fd, buf, strlen(buf)); 2071 snprintf(buf, sizeof(buf), 2072 "MT=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u G=%zu\n", 2073 mopts.malloc_mt, mopts.malloc_freenow, 2074 mopts.malloc_freeunmap, mopts.malloc_junk, 2075 mopts.malloc_realloc, mopts.malloc_xmalloc, 2076 mopts.chunk_canaries, mopts.malloc_cache, 2077 mopts.malloc_guard); 2078 write(fd, buf, strlen(buf)); 2079 2080 for (i = 0; i < _MALLOC_MUTEXES; i++) 2081 malloc_dump(fd, i, mopts.malloc_pool[i]); 2082 snprintf(buf, sizeof(buf), "******** End dump %s *******\n", 2083 __progname); 2084 write(fd, buf, strlen(buf)); 2085 close(fd); 2086 } else 2087 write(STDERR_FILENO, q, sizeof(q) - 1); 2088 errno = save_errno; 2089 } 2090 2091 #endif /* MALLOC_STATS */ 2092