1 /* $OpenBSD: malloc.c,v 1.259 2019/01/10 18:47:05 otto Exp $ */ 2 /* 3 * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 5 * Copyright (c) 2008 Damien Miller <djm@openbsd.org> 6 * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* 22 * If we meet some day, and you think this stuff is worth it, you 23 * can buy me a beer in return. Poul-Henning Kamp 24 */ 25 26 /* #define MALLOC_STATS */ 27 28 #include <sys/types.h> 29 #include <sys/queue.h> 30 #include <sys/mman.h> 31 #include <sys/sysctl.h> 32 #include <uvm/uvmexp.h> 33 #include <errno.h> 34 #include <stdarg.h> 35 #include <stdint.h> 36 #include <stdio.h> 37 #include <stdlib.h> 38 #include <string.h> 39 #include <unistd.h> 40 41 #ifdef MALLOC_STATS 42 #include <sys/tree.h> 43 #include <fcntl.h> 44 #endif 45 46 #include "thread_private.h" 47 #include <tib.h> 48 49 #define MALLOC_PAGESHIFT _MAX_PAGE_SHIFT 50 51 #define MALLOC_MINSHIFT 4 52 #define MALLOC_MAXSHIFT (MALLOC_PAGESHIFT - 1) 53 #define MALLOC_PAGESIZE (1UL << MALLOC_PAGESHIFT) 54 #define MALLOC_MINSIZE (1UL << MALLOC_MINSHIFT) 55 #define MALLOC_PAGEMASK (MALLOC_PAGESIZE - 1) 56 #define MASK_POINTER(p) ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK)) 57 58 #define MALLOC_MAXCHUNK (1 << MALLOC_MAXSHIFT) 59 #define MALLOC_MAXCACHE 256 60 #define MALLOC_DELAYED_CHUNK_MASK 15 61 #ifdef MALLOC_STATS 62 #define MALLOC_INITIAL_REGIONS 512 63 #else 64 #define MALLOC_INITIAL_REGIONS (MALLOC_PAGESIZE / sizeof(struct region_info)) 65 #endif 66 #define MALLOC_DEFAULT_CACHE 64 67 #define MALLOC_CHUNK_LISTS 4 68 #define CHUNK_CHECK_LENGTH 32 69 70 /* 71 * We move allocations between half a page and a whole page towards the end, 72 * subject to alignment constraints. This is the extra headroom we allow. 73 * Set to zero to be the most strict. 74 */ 75 #define MALLOC_LEEWAY 0 76 #define MALLOC_MOVE_COND(sz) ((sz) - mopts.malloc_guard < \ 77 MALLOC_PAGESIZE - MALLOC_LEEWAY) 78 #define MALLOC_MOVE(p, sz) (((char *)(p)) + \ 79 ((MALLOC_PAGESIZE - MALLOC_LEEWAY - \ 80 ((sz) - mopts.malloc_guard)) & \ 81 ~(MALLOC_MINSIZE - 1))) 82 83 #define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK) 84 85 /* 86 * What to use for Junk. This is the byte value we use to fill with 87 * when the 'J' option is enabled. Use SOME_JUNK right after alloc, 88 * and SOME_FREEJUNK right before free. 89 */ 90 #define SOME_JUNK 0xdb /* deadbeef */ 91 #define SOME_FREEJUNK 0xdf /* dead, free */ 92 93 #define MMAP(sz) mmap(NULL, (sz), PROT_READ | PROT_WRITE, \ 94 MAP_ANON | MAP_PRIVATE, -1, 0) 95 96 #define MMAPNONE(sz) mmap(NULL, (sz), PROT_NONE, \ 97 MAP_ANON | MAP_PRIVATE, -1, 0) 98 99 #define MMAPA(a,sz) mmap((a), (sz), PROT_READ | PROT_WRITE, \ 100 MAP_ANON | MAP_PRIVATE, -1, 0) 101 102 #define MQUERY(a, sz) mquery((a), (sz), PROT_READ | PROT_WRITE, \ 103 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0) 104 105 struct region_info { 106 void *p; /* page; low bits used to mark chunks */ 107 uintptr_t size; /* size for pages, or chunk_info pointer */ 108 #ifdef MALLOC_STATS 109 void *f; /* where allocated from */ 110 #endif 111 }; 112 113 LIST_HEAD(chunk_head, chunk_info); 114 115 struct dir_info { 116 u_int32_t canary1; 117 int active; /* status of malloc */ 118 struct region_info *r; /* region slots */ 119 size_t regions_total; /* number of region slots */ 120 size_t regions_free; /* number of free slots */ 121 /* lists of free chunk info structs */ 122 struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1]; 123 /* lists of chunks with free slots */ 124 struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS]; 125 size_t free_regions_size; /* free pages cached */ 126 /* free pages cache */ 127 struct region_info free_regions[MALLOC_MAXCACHE]; 128 /* delayed free chunk slots */ 129 u_int rotor; 130 void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1]; 131 size_t rbytesused; /* random bytes used */ 132 char *func; /* current function */ 133 int mutex; 134 u_char rbytes[32]; /* random bytes */ 135 #ifdef MALLOC_STATS 136 size_t inserts; 137 size_t insert_collisions; 138 size_t finds; 139 size_t find_collisions; 140 size_t deletes; 141 size_t delete_moves; 142 size_t cheap_realloc_tries; 143 size_t cheap_reallocs; 144 size_t malloc_used; /* bytes allocated */ 145 size_t malloc_guarded; /* bytes used for guards */ 146 size_t pool_searches; /* searches for pool */ 147 size_t other_pool; /* searches in other pool */ 148 #define STATS_ADD(x,y) ((x) += (y)) 149 #define STATS_SUB(x,y) ((x) -= (y)) 150 #define STATS_INC(x) ((x)++) 151 #define STATS_ZERO(x) ((x) = 0) 152 #define STATS_SETF(x,y) ((x)->f = (y)) 153 #else 154 #define STATS_ADD(x,y) /* nothing */ 155 #define STATS_SUB(x,y) /* nothing */ 156 #define STATS_INC(x) /* nothing */ 157 #define STATS_ZERO(x) /* nothing */ 158 #define STATS_SETF(x,y) /* nothing */ 159 #endif /* MALLOC_STATS */ 160 u_int32_t canary2; 161 }; 162 #define DIR_INFO_RSZ ((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \ 163 ~MALLOC_PAGEMASK) 164 165 /* 166 * This structure describes a page worth of chunks. 167 * 168 * How many bits per u_short in the bitmap 169 */ 170 #define MALLOC_BITS (NBBY * sizeof(u_short)) 171 struct chunk_info { 172 LIST_ENTRY(chunk_info) entries; 173 void *page; /* pointer to the page */ 174 u_short canary; 175 u_short size; /* size of this page's chunks */ 176 u_short shift; /* how far to shift for this size */ 177 u_short free; /* how many free chunks */ 178 u_short total; /* how many chunks */ 179 u_short offset; /* requested size table offset */ 180 u_short bits[1]; /* which chunks are free */ 181 }; 182 183 struct malloc_readonly { 184 /* Main bookkeeping information */ 185 struct dir_info *malloc_pool[_MALLOC_MUTEXES]; 186 u_int malloc_mutexes; /* how much in actual use? */ 187 int malloc_mt; /* multi-threaded mode? */ 188 int malloc_freecheck; /* Extensive double free check */ 189 int malloc_freeunmap; /* mprotect free pages PROT_NONE? */ 190 int malloc_junk; /* junk fill? */ 191 int malloc_realloc; /* always realloc? */ 192 int malloc_xmalloc; /* xmalloc behaviour? */ 193 int chunk_canaries; /* use canaries after chunks? */ 194 int internal_funcs; /* use better recallocarray/freezero? */ 195 u_int malloc_cache; /* free pages we cache */ 196 size_t malloc_guard; /* use guard pages after allocations? */ 197 #ifdef MALLOC_STATS 198 int malloc_stats; /* dump statistics at end */ 199 #endif 200 u_int32_t malloc_canary; /* Matched against ones in malloc_pool */ 201 }; 202 203 /* This object is mapped PROT_READ after initialisation to prevent tampering */ 204 static union { 205 struct malloc_readonly mopts; 206 u_char _pad[MALLOC_PAGESIZE]; 207 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE))); 208 #define mopts malloc_readonly.mopts 209 210 char *malloc_options; /* compile-time options */ 211 212 static __dead void wrterror(struct dir_info *d, char *msg, ...) 213 __attribute__((__format__ (printf, 2, 3))); 214 215 #ifdef MALLOC_STATS 216 void malloc_dump(int, int, struct dir_info *); 217 PROTO_NORMAL(malloc_dump); 218 void malloc_gdump(int); 219 PROTO_NORMAL(malloc_gdump); 220 static void malloc_exit(void); 221 #define CALLER __builtin_return_address(0) 222 #else 223 #define CALLER NULL 224 #endif 225 226 /* low bits of r->p determine size: 0 means >= page size and r->size holding 227 * real size, otherwise low bits are a shift count, or 1 for malloc(0) 228 */ 229 #define REALSIZE(sz, r) \ 230 (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \ 231 (sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1)))) 232 233 static inline void 234 _MALLOC_LEAVE(struct dir_info *d) 235 { 236 if (mopts.malloc_mt) { 237 d->active--; 238 _MALLOC_UNLOCK(d->mutex); 239 } 240 } 241 242 static inline void 243 _MALLOC_ENTER(struct dir_info *d) 244 { 245 if (mopts.malloc_mt) { 246 _MALLOC_LOCK(d->mutex); 247 d->active++; 248 } 249 } 250 251 static inline size_t 252 hash(void *p) 253 { 254 size_t sum; 255 uintptr_t u; 256 257 u = (uintptr_t)p >> MALLOC_PAGESHIFT; 258 sum = u; 259 sum = (sum << 7) - sum + (u >> 16); 260 #ifdef __LP64__ 261 sum = (sum << 7) - sum + (u >> 32); 262 sum = (sum << 7) - sum + (u >> 48); 263 #endif 264 return sum; 265 } 266 267 static inline struct dir_info * 268 getpool(void) 269 { 270 if (!mopts.malloc_mt) 271 return mopts.malloc_pool[0]; 272 else 273 return mopts.malloc_pool[TIB_GET()->tib_tid & 274 (mopts.malloc_mutexes - 1)]; 275 } 276 277 static __dead void 278 wrterror(struct dir_info *d, char *msg, ...) 279 { 280 int saved_errno = errno; 281 va_list ap; 282 283 dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname, 284 getpid(), (d != NULL && d->func) ? d->func : "unknown"); 285 va_start(ap, msg); 286 vdprintf(STDERR_FILENO, msg, ap); 287 va_end(ap); 288 dprintf(STDERR_FILENO, "\n"); 289 290 #ifdef MALLOC_STATS 291 if (mopts.malloc_stats) 292 malloc_gdump(STDERR_FILENO); 293 #endif /* MALLOC_STATS */ 294 295 errno = saved_errno; 296 297 abort(); 298 } 299 300 static void 301 rbytes_init(struct dir_info *d) 302 { 303 arc4random_buf(d->rbytes, sizeof(d->rbytes)); 304 /* add 1 to account for using d->rbytes[0] */ 305 d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2); 306 } 307 308 static inline u_char 309 getrbyte(struct dir_info *d) 310 { 311 u_char x; 312 313 if (d->rbytesused >= sizeof(d->rbytes)) 314 rbytes_init(d); 315 x = d->rbytes[d->rbytesused++]; 316 return x; 317 } 318 319 static void 320 omalloc_parseopt(char opt) 321 { 322 switch (opt) { 323 case '+': 324 mopts.malloc_mutexes <<= 1; 325 if (mopts.malloc_mutexes > _MALLOC_MUTEXES) 326 mopts.malloc_mutexes = _MALLOC_MUTEXES; 327 break; 328 case '-': 329 mopts.malloc_mutexes >>= 1; 330 if (mopts.malloc_mutexes < 1) 331 mopts.malloc_mutexes = 1; 332 break; 333 case '>': 334 mopts.malloc_cache <<= 1; 335 if (mopts.malloc_cache > MALLOC_MAXCACHE) 336 mopts.malloc_cache = MALLOC_MAXCACHE; 337 break; 338 case '<': 339 mopts.malloc_cache >>= 1; 340 break; 341 case 'c': 342 mopts.chunk_canaries = 0; 343 break; 344 case 'C': 345 mopts.chunk_canaries = 1; 346 break; 347 #ifdef MALLOC_STATS 348 case 'd': 349 mopts.malloc_stats = 0; 350 break; 351 case 'D': 352 mopts.malloc_stats = 1; 353 break; 354 #endif /* MALLOC_STATS */ 355 case 'f': 356 mopts.malloc_freecheck = 0; 357 mopts.malloc_freeunmap = 0; 358 break; 359 case 'F': 360 mopts.malloc_freecheck = 1; 361 mopts.malloc_freeunmap = 1; 362 break; 363 case 'g': 364 mopts.malloc_guard = 0; 365 break; 366 case 'G': 367 mopts.malloc_guard = MALLOC_PAGESIZE; 368 break; 369 case 'j': 370 if (mopts.malloc_junk > 0) 371 mopts.malloc_junk--; 372 break; 373 case 'J': 374 if (mopts.malloc_junk < 2) 375 mopts.malloc_junk++; 376 break; 377 case 'r': 378 mopts.malloc_realloc = 0; 379 break; 380 case 'R': 381 mopts.malloc_realloc = 1; 382 break; 383 case 'u': 384 mopts.malloc_freeunmap = 0; 385 break; 386 case 'U': 387 mopts.malloc_freeunmap = 1; 388 break; 389 case 'x': 390 mopts.malloc_xmalloc = 0; 391 break; 392 case 'X': 393 mopts.malloc_xmalloc = 1; 394 break; 395 default: 396 dprintf(STDERR_FILENO, "malloc() warning: " 397 "unknown char in MALLOC_OPTIONS\n"); 398 break; 399 } 400 } 401 402 static void 403 omalloc_init(void) 404 { 405 char *p, *q, b[16]; 406 int i, j, mib[2]; 407 size_t sb; 408 409 /* 410 * Default options 411 */ 412 mopts.malloc_mutexes = 8; 413 mopts.malloc_junk = 1; 414 mopts.malloc_cache = MALLOC_DEFAULT_CACHE; 415 416 for (i = 0; i < 3; i++) { 417 switch (i) { 418 case 0: 419 mib[0] = CTL_VM; 420 mib[1] = VM_MALLOC_CONF; 421 sb = sizeof(b); 422 j = sysctl(mib, 2, b, &sb, NULL, 0); 423 if (j != 0) 424 continue; 425 p = b; 426 break; 427 case 1: 428 if (issetugid() == 0) 429 p = getenv("MALLOC_OPTIONS"); 430 else 431 continue; 432 break; 433 case 2: 434 p = malloc_options; 435 break; 436 default: 437 p = NULL; 438 } 439 440 for (; p != NULL && *p != '\0'; p++) { 441 switch (*p) { 442 case 'S': 443 for (q = "CFGJ"; *q != '\0'; q++) 444 omalloc_parseopt(*q); 445 mopts.malloc_cache = 0; 446 break; 447 case 's': 448 for (q = "cfgj"; *q != '\0'; q++) 449 omalloc_parseopt(*q); 450 mopts.malloc_cache = MALLOC_DEFAULT_CACHE; 451 break; 452 default: 453 omalloc_parseopt(*p); 454 break; 455 } 456 } 457 } 458 459 #ifdef MALLOC_STATS 460 if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) { 461 dprintf(STDERR_FILENO, "malloc() warning: atexit(2) failed." 462 " Will not be able to dump stats on exit\n"); 463 } 464 #endif /* MALLOC_STATS */ 465 466 while ((mopts.malloc_canary = arc4random()) == 0) 467 ; 468 } 469 470 static void 471 omalloc_poolinit(struct dir_info **dp) 472 { 473 char *p; 474 size_t d_avail, regioninfo_size; 475 struct dir_info *d; 476 int i, j; 477 478 /* 479 * Allocate dir_info with a guard page on either side. Also 480 * randomise offset inside the page at which the dir_info 481 * lies (subject to alignment by 1 << MALLOC_MINSHIFT) 482 */ 483 if ((p = MMAPNONE(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED) 484 wrterror(NULL, "malloc init mmap failed"); 485 mprotect(p + MALLOC_PAGESIZE, DIR_INFO_RSZ, PROT_READ | PROT_WRITE); 486 d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT; 487 d = (struct dir_info *)(p + MALLOC_PAGESIZE + 488 (arc4random_uniform(d_avail) << MALLOC_MINSHIFT)); 489 490 rbytes_init(d); 491 d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS; 492 regioninfo_size = d->regions_total * sizeof(struct region_info); 493 d->r = MMAP(regioninfo_size); 494 if (d->r == MAP_FAILED) { 495 d->regions_total = 0; 496 wrterror(NULL, "malloc init mmap failed"); 497 } 498 for (i = 0; i <= MALLOC_MAXSHIFT; i++) { 499 LIST_INIT(&d->chunk_info_list[i]); 500 for (j = 0; j < MALLOC_CHUNK_LISTS; j++) 501 LIST_INIT(&d->chunk_dir[i][j]); 502 } 503 STATS_ADD(d->malloc_used, regioninfo_size + 3 * MALLOC_PAGESIZE); 504 d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d; 505 d->canary2 = ~d->canary1; 506 507 *dp = d; 508 } 509 510 static int 511 omalloc_grow(struct dir_info *d) 512 { 513 size_t newtotal; 514 size_t newsize; 515 size_t mask; 516 size_t i; 517 struct region_info *p; 518 519 if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2) 520 return 1; 521 522 newtotal = d->regions_total * 2; 523 newsize = newtotal * sizeof(struct region_info); 524 mask = newtotal - 1; 525 526 p = MMAP(newsize); 527 if (p == MAP_FAILED) 528 return 1; 529 530 STATS_ADD(d->malloc_used, newsize); 531 STATS_ZERO(d->inserts); 532 STATS_ZERO(d->insert_collisions); 533 for (i = 0; i < d->regions_total; i++) { 534 void *q = d->r[i].p; 535 if (q != NULL) { 536 size_t index = hash(q) & mask; 537 STATS_INC(d->inserts); 538 while (p[index].p != NULL) { 539 index = (index - 1) & mask; 540 STATS_INC(d->insert_collisions); 541 } 542 p[index] = d->r[i]; 543 } 544 } 545 /* avoid pages containing meta info to end up in cache */ 546 if (munmap(d->r, d->regions_total * sizeof(struct region_info))) 547 wrterror(d, "munmap %p", (void *)d->r); 548 else 549 STATS_SUB(d->malloc_used, 550 d->regions_total * sizeof(struct region_info)); 551 d->regions_free = d->regions_free + d->regions_total; 552 d->regions_total = newtotal; 553 d->r = p; 554 return 0; 555 } 556 557 /* 558 * The hashtable uses the assumption that p is never NULL. This holds since 559 * non-MAP_FIXED mappings with hint 0 start at BRKSIZ. 560 */ 561 static int 562 insert(struct dir_info *d, void *p, size_t sz, void *f) 563 { 564 size_t index; 565 size_t mask; 566 void *q; 567 568 if (d->regions_free * 4 < d->regions_total) { 569 if (omalloc_grow(d)) 570 return 1; 571 } 572 mask = d->regions_total - 1; 573 index = hash(p) & mask; 574 q = d->r[index].p; 575 STATS_INC(d->inserts); 576 while (q != NULL) { 577 index = (index - 1) & mask; 578 q = d->r[index].p; 579 STATS_INC(d->insert_collisions); 580 } 581 d->r[index].p = p; 582 d->r[index].size = sz; 583 #ifdef MALLOC_STATS 584 d->r[index].f = f; 585 #endif 586 d->regions_free--; 587 return 0; 588 } 589 590 static struct region_info * 591 find(struct dir_info *d, void *p) 592 { 593 size_t index; 594 size_t mask = d->regions_total - 1; 595 void *q, *r; 596 597 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 598 d->canary1 != ~d->canary2) 599 wrterror(d, "internal struct corrupt"); 600 p = MASK_POINTER(p); 601 index = hash(p) & mask; 602 r = d->r[index].p; 603 q = MASK_POINTER(r); 604 STATS_INC(d->finds); 605 while (q != p && r != NULL) { 606 index = (index - 1) & mask; 607 r = d->r[index].p; 608 q = MASK_POINTER(r); 609 STATS_INC(d->find_collisions); 610 } 611 return (q == p && r != NULL) ? &d->r[index] : NULL; 612 } 613 614 static void 615 delete(struct dir_info *d, struct region_info *ri) 616 { 617 /* algorithm R, Knuth Vol III section 6.4 */ 618 size_t mask = d->regions_total - 1; 619 size_t i, j, r; 620 621 if (d->regions_total & (d->regions_total - 1)) 622 wrterror(d, "regions_total not 2^x"); 623 d->regions_free++; 624 STATS_INC(d->deletes); 625 626 i = ri - d->r; 627 for (;;) { 628 d->r[i].p = NULL; 629 d->r[i].size = 0; 630 j = i; 631 for (;;) { 632 i = (i - 1) & mask; 633 if (d->r[i].p == NULL) 634 return; 635 r = hash(d->r[i].p) & mask; 636 if ((i <= r && r < j) || (r < j && j < i) || 637 (j < i && i <= r)) 638 continue; 639 d->r[j] = d->r[i]; 640 STATS_INC(d->delete_moves); 641 break; 642 } 643 644 } 645 } 646 647 /* 648 * Cache maintenance. We keep at most malloc_cache pages cached. 649 * If the cache is becoming full, unmap pages in the cache for real, 650 * and then add the region to the cache 651 * Opposed to the regular region data structure, the sizes in the 652 * cache are in MALLOC_PAGESIZE units. 653 */ 654 static void 655 unmap(struct dir_info *d, void *p, size_t sz, size_t clear, int junk) 656 { 657 size_t psz = sz >> MALLOC_PAGESHIFT; 658 size_t rsz; 659 struct region_info *r; 660 u_int i, offset, mask; 661 662 if (sz != PAGEROUND(sz)) 663 wrterror(d, "munmap round"); 664 665 rsz = mopts.malloc_cache - d->free_regions_size; 666 667 /* 668 * normally the cache holds recently freed regions, but if the region 669 * to unmap is larger than the cache size or we're clearing and the 670 * cache is full, just munmap 671 */ 672 if (psz > mopts.malloc_cache || (clear > 0 && rsz == 0)) { 673 i = munmap(p, sz); 674 if (i) 675 wrterror(d, "munmap %p", p); 676 STATS_SUB(d->malloc_used, sz); 677 return; 678 } 679 offset = getrbyte(d); 680 mask = mopts.malloc_cache - 1; 681 if (psz > rsz) { 682 size_t tounmap = psz - rsz; 683 for (i = 0; ; i++) { 684 r = &d->free_regions[(i + offset) & mask]; 685 if (r->p != NULL) { 686 rsz = r->size << MALLOC_PAGESHIFT; 687 if (munmap(r->p, rsz)) 688 wrterror(d, "munmap %p", r->p); 689 r->p = NULL; 690 if (tounmap > r->size) 691 tounmap -= r->size; 692 else 693 tounmap = 0; 694 d->free_regions_size -= r->size; 695 STATS_SUB(d->malloc_used, rsz); 696 if (tounmap == 0) { 697 offset = i; 698 break; 699 } 700 } 701 } 702 } 703 for (i = 0; ; i++) { 704 r = &d->free_regions[(i + offset) & mask]; 705 if (r->p == NULL) { 706 if (clear > 0) 707 memset(p, 0, clear); 708 if (junk && !mopts.malloc_freeunmap) { 709 size_t amt = junk == 1 ? MALLOC_MAXCHUNK : sz; 710 memset(p, SOME_FREEJUNK, amt); 711 } 712 if (mopts.malloc_freeunmap) 713 mprotect(p, sz, PROT_NONE); 714 r->p = p; 715 r->size = psz; 716 d->free_regions_size += psz; 717 break; 718 } 719 } 720 if (d->free_regions_size > mopts.malloc_cache) 721 wrterror(d, "malloc cache overflow"); 722 } 723 724 static void 725 zapcacheregion(struct dir_info *d, void *p, size_t len) 726 { 727 u_int i; 728 struct region_info *r; 729 size_t rsz; 730 731 for (i = 0; i < mopts.malloc_cache; i++) { 732 r = &d->free_regions[i]; 733 if (r->p >= p && r->p <= (void *)((char *)p + len)) { 734 rsz = r->size << MALLOC_PAGESHIFT; 735 if (munmap(r->p, rsz)) 736 wrterror(d, "munmap %p", r->p); 737 r->p = NULL; 738 d->free_regions_size -= r->size; 739 STATS_SUB(d->malloc_used, rsz); 740 } 741 } 742 } 743 744 static void * 745 map(struct dir_info *d, void *hint, size_t sz, int zero_fill) 746 { 747 size_t psz = sz >> MALLOC_PAGESHIFT; 748 struct region_info *r, *big = NULL; 749 u_int i; 750 void *p; 751 752 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 753 d->canary1 != ~d->canary2) 754 wrterror(d, "internal struct corrupt"); 755 if (sz != PAGEROUND(sz)) 756 wrterror(d, "map round"); 757 758 if (hint == NULL && psz > d->free_regions_size) { 759 _MALLOC_LEAVE(d); 760 p = MMAP(sz); 761 _MALLOC_ENTER(d); 762 if (p != MAP_FAILED) 763 STATS_ADD(d->malloc_used, sz); 764 /* zero fill not needed */ 765 return p; 766 } 767 for (i = 0; i < mopts.malloc_cache; i++) { 768 r = &d->free_regions[(i + d->rotor) & (mopts.malloc_cache - 1)]; 769 if (r->p != NULL) { 770 if (hint != NULL && r->p != hint) 771 continue; 772 if (r->size == psz) { 773 p = r->p; 774 r->p = NULL; 775 d->free_regions_size -= psz; 776 if (mopts.malloc_freeunmap) 777 mprotect(p, sz, PROT_READ | PROT_WRITE); 778 if (zero_fill) 779 memset(p, 0, sz); 780 else if (mopts.malloc_junk == 2 && 781 mopts.malloc_freeunmap) 782 memset(p, SOME_FREEJUNK, sz); 783 d->rotor += i + 1; 784 return p; 785 } else if (r->size > psz) 786 big = r; 787 } 788 } 789 if (big != NULL) { 790 r = big; 791 p = r->p; 792 r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT); 793 if (mopts.malloc_freeunmap) 794 mprotect(p, sz, PROT_READ | PROT_WRITE); 795 r->size -= psz; 796 d->free_regions_size -= psz; 797 if (zero_fill) 798 memset(p, 0, sz); 799 else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap) 800 memset(p, SOME_FREEJUNK, sz); 801 return p; 802 } 803 if (hint != NULL) 804 return MAP_FAILED; 805 if (d->free_regions_size > mopts.malloc_cache) 806 wrterror(d, "malloc cache"); 807 _MALLOC_LEAVE(d); 808 p = MMAP(sz); 809 _MALLOC_ENTER(d); 810 if (p != MAP_FAILED) 811 STATS_ADD(d->malloc_used, sz); 812 /* zero fill not needed */ 813 return p; 814 } 815 816 static void 817 init_chunk_info(struct dir_info *d, struct chunk_info *p, int bits) 818 { 819 int i; 820 821 if (bits == 0) { 822 p->shift = MALLOC_MINSHIFT; 823 p->total = p->free = MALLOC_PAGESIZE >> p->shift; 824 p->size = 0; 825 p->offset = 0xdead; 826 } else { 827 p->shift = bits; 828 p->total = p->free = MALLOC_PAGESIZE >> p->shift; 829 p->size = 1U << bits; 830 p->offset = howmany(p->total, MALLOC_BITS); 831 } 832 p->canary = (u_short)d->canary1; 833 834 /* set all valid bits in the bitmap */ 835 i = p->total - 1; 836 memset(p->bits, 0xff, sizeof(p->bits[0]) * (i / MALLOC_BITS)); 837 p->bits[i / MALLOC_BITS] = (2U << (i % MALLOC_BITS)) - 1; 838 } 839 840 static struct chunk_info * 841 alloc_chunk_info(struct dir_info *d, int bits) 842 { 843 struct chunk_info *p; 844 845 if (LIST_EMPTY(&d->chunk_info_list[bits])) { 846 size_t size, count, i; 847 char *q; 848 849 if (bits == 0) 850 count = MALLOC_PAGESIZE / MALLOC_MINSIZE; 851 else 852 count = MALLOC_PAGESIZE >> bits; 853 854 size = howmany(count, MALLOC_BITS); 855 size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short); 856 if (mopts.chunk_canaries) 857 size += count * sizeof(u_short); 858 size = _ALIGN(size); 859 860 q = MMAP(MALLOC_PAGESIZE); 861 if (q == MAP_FAILED) 862 return NULL; 863 STATS_ADD(d->malloc_used, MALLOC_PAGESIZE); 864 count = MALLOC_PAGESIZE / size; 865 866 for (i = 0; i < count; i++, q += size) { 867 p = (struct chunk_info *)q; 868 LIST_INSERT_HEAD(&d->chunk_info_list[bits], p, entries); 869 } 870 } 871 p = LIST_FIRST(&d->chunk_info_list[bits]); 872 LIST_REMOVE(p, entries); 873 if (p->shift == 0) 874 init_chunk_info(d, p, bits); 875 return p; 876 } 877 878 /* 879 * Allocate a page of chunks 880 */ 881 static struct chunk_info * 882 omalloc_make_chunks(struct dir_info *d, int bits, int listnum) 883 { 884 struct chunk_info *bp; 885 void *pp; 886 887 /* Allocate a new bucket */ 888 pp = map(d, NULL, MALLOC_PAGESIZE, 0); 889 if (pp == MAP_FAILED) 890 return NULL; 891 892 /* memory protect the page allocated in the malloc(0) case */ 893 if (bits == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) < 0) 894 goto err; 895 896 bp = alloc_chunk_info(d, bits); 897 if (bp == NULL) 898 goto err; 899 bp->page = pp; 900 901 if (insert(d, (void *)((uintptr_t)pp | (bits + 1)), (uintptr_t)bp, 902 NULL)) 903 goto err; 904 LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries); 905 return bp; 906 907 err: 908 unmap(d, pp, MALLOC_PAGESIZE, 0, mopts.malloc_junk); 909 return NULL; 910 } 911 912 static int 913 find_chunksize(size_t size) 914 { 915 int r; 916 917 /* malloc(0) is special */ 918 if (size == 0) 919 return 0; 920 921 if (size < MALLOC_MINSIZE) 922 size = MALLOC_MINSIZE; 923 size--; 924 925 r = MALLOC_MINSHIFT; 926 while (size >> r) 927 r++; 928 return r; 929 } 930 931 static void 932 fill_canary(char *ptr, size_t sz, size_t allocated) 933 { 934 size_t check_sz = allocated - sz; 935 936 if (check_sz > CHUNK_CHECK_LENGTH) 937 check_sz = CHUNK_CHECK_LENGTH; 938 memset(ptr + sz, SOME_JUNK, check_sz); 939 } 940 941 /* 942 * Allocate a chunk 943 */ 944 static void * 945 malloc_bytes(struct dir_info *d, size_t size, void *f) 946 { 947 u_int i, r; 948 int j, listnum; 949 size_t k; 950 u_short *lp; 951 struct chunk_info *bp; 952 void *p; 953 954 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 955 d->canary1 != ~d->canary2) 956 wrterror(d, "internal struct corrupt"); 957 958 j = find_chunksize(size); 959 960 r = ((u_int)getrbyte(d) << 8) | getrbyte(d); 961 listnum = r % MALLOC_CHUNK_LISTS; 962 /* If it's empty, make a page more of that size chunks */ 963 if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) { 964 bp = omalloc_make_chunks(d, j, listnum); 965 if (bp == NULL) 966 return NULL; 967 } 968 969 if (bp->canary != (u_short)d->canary1) 970 wrterror(d, "chunk info corrupted"); 971 972 i = (r / MALLOC_CHUNK_LISTS) & (bp->total - 1); 973 974 /* start somewhere in a short */ 975 lp = &bp->bits[i / MALLOC_BITS]; 976 if (*lp) { 977 j = i % MALLOC_BITS; 978 k = ffs(*lp >> j); 979 if (k != 0) { 980 k += j - 1; 981 goto found; 982 } 983 } 984 /* no bit halfway, go to next full short */ 985 i /= MALLOC_BITS; 986 for (;;) { 987 if (++i >= bp->total / MALLOC_BITS) 988 i = 0; 989 lp = &bp->bits[i]; 990 if (*lp) { 991 k = ffs(*lp) - 1; 992 break; 993 } 994 } 995 found: 996 #ifdef MALLOC_STATS 997 if (i == 0 && k == 0) { 998 struct region_info *r = find(d, bp->page); 999 r->f = f; 1000 } 1001 #endif 1002 1003 *lp ^= 1 << k; 1004 1005 /* If there are no more free, remove from free-list */ 1006 if (--bp->free == 0) 1007 LIST_REMOVE(bp, entries); 1008 1009 /* Adjust to the real offset of that chunk */ 1010 k += (lp - bp->bits) * MALLOC_BITS; 1011 1012 if (mopts.chunk_canaries && size > 0) 1013 bp->bits[bp->offset + k] = size; 1014 1015 k <<= bp->shift; 1016 1017 p = (char *)bp->page + k; 1018 if (bp->size > 0) { 1019 if (mopts.malloc_junk == 2) 1020 memset(p, SOME_JUNK, bp->size); 1021 else if (mopts.chunk_canaries) 1022 fill_canary(p, size, bp->size); 1023 } 1024 return p; 1025 } 1026 1027 static void 1028 validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated) 1029 { 1030 size_t check_sz = allocated - sz; 1031 u_char *p, *q; 1032 1033 if (check_sz > CHUNK_CHECK_LENGTH) 1034 check_sz = CHUNK_CHECK_LENGTH; 1035 p = ptr + sz; 1036 q = p + check_sz; 1037 1038 while (p < q) { 1039 if (*p != SOME_JUNK) { 1040 wrterror(d, "chunk canary corrupted %p %#tx@%#zx%s", 1041 ptr, p - ptr, sz, 1042 *p == SOME_FREEJUNK ? " (double free?)" : ""); 1043 } 1044 p++; 1045 } 1046 } 1047 1048 static uint32_t 1049 find_chunknum(struct dir_info *d, struct chunk_info *info, void *ptr, int check) 1050 { 1051 uint32_t chunknum; 1052 1053 if (info->canary != (u_short)d->canary1) 1054 wrterror(d, "chunk info corrupted"); 1055 1056 /* Find the chunk number on the page */ 1057 chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift; 1058 1059 if ((uintptr_t)ptr & ((1U << (info->shift)) - 1)) 1060 wrterror(d, "modified chunk-pointer %p", ptr); 1061 if (info->bits[chunknum / MALLOC_BITS] & 1062 (1U << (chunknum % MALLOC_BITS))) 1063 wrterror(d, "chunk is already free %p", ptr); 1064 if (check && info->size > 0) { 1065 validate_canary(d, ptr, info->bits[info->offset + chunknum], 1066 info->size); 1067 } 1068 return chunknum; 1069 } 1070 1071 /* 1072 * Free a chunk, and possibly the page it's on, if the page becomes empty. 1073 */ 1074 static void 1075 free_bytes(struct dir_info *d, struct region_info *r, void *ptr) 1076 { 1077 struct chunk_head *mp; 1078 struct chunk_info *info; 1079 uint32_t chunknum; 1080 int listnum; 1081 1082 info = (struct chunk_info *)r->size; 1083 chunknum = find_chunknum(d, info, ptr, 0); 1084 1085 info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS); 1086 info->free++; 1087 1088 if (info->free == 1) { 1089 /* Page became non-full */ 1090 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; 1091 if (info->size != 0) 1092 mp = &d->chunk_dir[info->shift][listnum]; 1093 else 1094 mp = &d->chunk_dir[0][listnum]; 1095 1096 LIST_INSERT_HEAD(mp, info, entries); 1097 return; 1098 } 1099 1100 if (info->free != info->total) 1101 return; 1102 1103 LIST_REMOVE(info, entries); 1104 1105 if (info->size == 0 && !mopts.malloc_freeunmap) 1106 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE); 1107 unmap(d, info->page, MALLOC_PAGESIZE, 0, 0); 1108 1109 delete(d, r); 1110 if (info->size != 0) 1111 mp = &d->chunk_info_list[info->shift]; 1112 else 1113 mp = &d->chunk_info_list[0]; 1114 LIST_INSERT_HEAD(mp, info, entries); 1115 } 1116 1117 1118 1119 static void * 1120 omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f) 1121 { 1122 void *p; 1123 size_t psz; 1124 1125 if (sz > MALLOC_MAXCHUNK) { 1126 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1127 errno = ENOMEM; 1128 return NULL; 1129 } 1130 sz += mopts.malloc_guard; 1131 psz = PAGEROUND(sz); 1132 p = map(pool, NULL, psz, zero_fill); 1133 if (p == MAP_FAILED) { 1134 errno = ENOMEM; 1135 return NULL; 1136 } 1137 if (insert(pool, p, sz, f)) { 1138 unmap(pool, p, psz, 0, 0); 1139 errno = ENOMEM; 1140 return NULL; 1141 } 1142 if (mopts.malloc_guard) { 1143 if (mprotect((char *)p + psz - mopts.malloc_guard, 1144 mopts.malloc_guard, PROT_NONE)) 1145 wrterror(pool, "mprotect"); 1146 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 1147 } 1148 1149 if (MALLOC_MOVE_COND(sz)) { 1150 /* fill whole allocation */ 1151 if (mopts.malloc_junk == 2) 1152 memset(p, SOME_JUNK, psz - mopts.malloc_guard); 1153 /* shift towards the end */ 1154 p = MALLOC_MOVE(p, sz); 1155 /* fill zeros if needed and overwritten above */ 1156 if (zero_fill && mopts.malloc_junk == 2) 1157 memset(p, 0, sz - mopts.malloc_guard); 1158 } else { 1159 if (mopts.malloc_junk == 2) { 1160 if (zero_fill) 1161 memset((char *)p + sz - mopts.malloc_guard, 1162 SOME_JUNK, psz - sz); 1163 else 1164 memset(p, SOME_JUNK, 1165 psz - mopts.malloc_guard); 1166 } else if (mopts.chunk_canaries) 1167 fill_canary(p, sz - mopts.malloc_guard, 1168 psz - mopts.malloc_guard); 1169 } 1170 1171 } else { 1172 /* takes care of SOME_JUNK */ 1173 p = malloc_bytes(pool, sz, f); 1174 if (zero_fill && p != NULL && sz > 0) 1175 memset(p, 0, sz); 1176 } 1177 1178 return p; 1179 } 1180 1181 /* 1182 * Common function for handling recursion. Only 1183 * print the error message once, to avoid making the problem 1184 * potentially worse. 1185 */ 1186 static void 1187 malloc_recurse(struct dir_info *d) 1188 { 1189 static int noprint; 1190 1191 if (noprint == 0) { 1192 noprint = 1; 1193 wrterror(d, "recursive call"); 1194 } 1195 d->active--; 1196 _MALLOC_UNLOCK(d->mutex); 1197 errno = EDEADLK; 1198 } 1199 1200 void 1201 _malloc_init(int from_rthreads) 1202 { 1203 int i, max; 1204 struct dir_info *d; 1205 1206 _MALLOC_LOCK(0); 1207 if (!from_rthreads && mopts.malloc_pool[0]) { 1208 _MALLOC_UNLOCK(0); 1209 return; 1210 } 1211 if (!mopts.malloc_canary) 1212 omalloc_init(); 1213 1214 max = from_rthreads ? mopts.malloc_mutexes : 1; 1215 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) 1216 mprotect(&malloc_readonly, sizeof(malloc_readonly), 1217 PROT_READ | PROT_WRITE); 1218 for (i = 0; i < max; i++) { 1219 if (mopts.malloc_pool[i]) 1220 continue; 1221 omalloc_poolinit(&d); 1222 d->mutex = i; 1223 mopts.malloc_pool[i] = d; 1224 } 1225 1226 if (from_rthreads) 1227 mopts.malloc_mt = 1; 1228 else 1229 mopts.internal_funcs = 1; 1230 1231 /* 1232 * Options have been set and will never be reset. 1233 * Prevent further tampering with them. 1234 */ 1235 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) 1236 mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ); 1237 _MALLOC_UNLOCK(0); 1238 } 1239 DEF_STRONG(_malloc_init); 1240 1241 void * 1242 malloc(size_t size) 1243 { 1244 void *r; 1245 struct dir_info *d; 1246 int saved_errno = errno; 1247 1248 d = getpool(); 1249 if (d == NULL) { 1250 _malloc_init(0); 1251 d = getpool(); 1252 } 1253 _MALLOC_LOCK(d->mutex); 1254 d->func = "malloc"; 1255 1256 if (d->active++) { 1257 malloc_recurse(d); 1258 return NULL; 1259 } 1260 r = omalloc(d, size, 0, CALLER); 1261 d->active--; 1262 _MALLOC_UNLOCK(d->mutex); 1263 if (r == NULL && mopts.malloc_xmalloc) 1264 wrterror(d, "out of memory"); 1265 if (r != NULL) 1266 errno = saved_errno; 1267 return r; 1268 } 1269 /*DEF_STRONG(malloc);*/ 1270 1271 static void 1272 validate_junk(struct dir_info *pool, void *p) 1273 { 1274 struct region_info *r; 1275 size_t byte, sz; 1276 1277 if (p == NULL) 1278 return; 1279 r = find(pool, p); 1280 if (r == NULL) 1281 wrterror(pool, "bogus pointer in validate_junk %p", p); 1282 REALSIZE(sz, r); 1283 if (sz > CHUNK_CHECK_LENGTH) 1284 sz = CHUNK_CHECK_LENGTH; 1285 for (byte = 0; byte < sz; byte++) { 1286 if (((unsigned char *)p)[byte] != SOME_FREEJUNK) 1287 wrterror(pool, "use after free %p", p); 1288 } 1289 } 1290 1291 1292 static struct region_info * 1293 findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool, 1294 char **saved_function) 1295 { 1296 struct dir_info *pool = argpool; 1297 struct region_info *r = find(pool, p); 1298 1299 STATS_INC(pool->pool_searches); 1300 if (r == NULL) { 1301 if (mopts.malloc_mt) { 1302 int i; 1303 1304 STATS_INC(pool->other_pool); 1305 for (i = 1; i < mopts.malloc_mutexes; i++) { 1306 int j = (argpool->mutex + i) & 1307 (mopts.malloc_mutexes - 1); 1308 1309 pool->active--; 1310 _MALLOC_UNLOCK(pool->mutex); 1311 pool = mopts.malloc_pool[j]; 1312 _MALLOC_LOCK(pool->mutex); 1313 pool->active++; 1314 r = find(pool, p); 1315 if (r != NULL) { 1316 *saved_function = pool->func; 1317 pool->func = argpool->func; 1318 break; 1319 } 1320 } 1321 } 1322 if (r == NULL) 1323 wrterror(argpool, "bogus pointer (double free?) %p", p); 1324 } 1325 *foundpool = pool; 1326 return r; 1327 } 1328 1329 static void 1330 ofree(struct dir_info **argpool, void *p, int clear, int check, size_t argsz) 1331 { 1332 struct region_info *r; 1333 struct dir_info *pool; 1334 char *saved_function; 1335 size_t sz; 1336 1337 r = findpool(p, *argpool, &pool, &saved_function); 1338 1339 REALSIZE(sz, r); 1340 if (check) { 1341 if (sz <= MALLOC_MAXCHUNK) { 1342 if (mopts.chunk_canaries && sz > 0) { 1343 struct chunk_info *info = 1344 (struct chunk_info *)r->size; 1345 uint32_t chunknum = 1346 find_chunknum(pool, info, p, 0); 1347 1348 if (info->bits[info->offset + chunknum] < argsz) 1349 wrterror(pool, "recorded size %hu" 1350 " < %zu", 1351 info->bits[info->offset + chunknum], 1352 argsz); 1353 } else { 1354 if (sz < argsz) 1355 wrterror(pool, "chunk size %zu < %zu", 1356 sz, argsz); 1357 } 1358 } else if (sz - mopts.malloc_guard < argsz) { 1359 wrterror(pool, "recorded size %zu < %zu", 1360 sz - mopts.malloc_guard, argsz); 1361 } 1362 } 1363 if (sz > MALLOC_MAXCHUNK) { 1364 if (!MALLOC_MOVE_COND(sz)) { 1365 if (r->p != p) 1366 wrterror(pool, "bogus pointer %p", p); 1367 if (mopts.chunk_canaries) 1368 validate_canary(pool, p, 1369 sz - mopts.malloc_guard, 1370 PAGEROUND(sz - mopts.malloc_guard)); 1371 } else { 1372 /* shifted towards the end */ 1373 if (p != MALLOC_MOVE(r->p, sz)) 1374 wrterror(pool, "bogus moved pointer %p", p); 1375 p = r->p; 1376 } 1377 if (mopts.malloc_guard) { 1378 if (sz < mopts.malloc_guard) 1379 wrterror(pool, "guard size"); 1380 if (!mopts.malloc_freeunmap) { 1381 if (mprotect((char *)p + PAGEROUND(sz) - 1382 mopts.malloc_guard, mopts.malloc_guard, 1383 PROT_READ | PROT_WRITE)) 1384 wrterror(pool, "mprotect"); 1385 } 1386 STATS_SUB(pool->malloc_guarded, mopts.malloc_guard); 1387 } 1388 unmap(pool, p, PAGEROUND(sz), clear ? argsz : 0, 1389 mopts.malloc_junk); 1390 delete(pool, r); 1391 } else { 1392 /* Validate and optionally canary check */ 1393 struct chunk_info *info = (struct chunk_info *)r->size; 1394 find_chunknum(pool, info, p, mopts.chunk_canaries); 1395 if (!clear) { 1396 void *tmp; 1397 int i; 1398 1399 if (mopts.malloc_freecheck) { 1400 for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK; i++) 1401 if (p == pool->delayed_chunks[i]) 1402 wrterror(pool, 1403 "double free %p", p); 1404 } 1405 if (mopts.malloc_junk && sz > 0) 1406 memset(p, SOME_FREEJUNK, sz); 1407 i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK; 1408 tmp = p; 1409 p = pool->delayed_chunks[i]; 1410 if (tmp == p) 1411 wrterror(pool, "double free %p", tmp); 1412 pool->delayed_chunks[i] = tmp; 1413 if (mopts.malloc_junk) 1414 validate_junk(pool, p); 1415 } else if (argsz > 0) 1416 memset(p, 0, argsz); 1417 if (p != NULL) { 1418 r = find(pool, p); 1419 if (r == NULL) 1420 wrterror(pool, 1421 "bogus pointer (double free?) %p", p); 1422 free_bytes(pool, r, p); 1423 } 1424 } 1425 1426 if (*argpool != pool) { 1427 pool->func = saved_function; 1428 *argpool = pool; 1429 } 1430 } 1431 1432 void 1433 free(void *ptr) 1434 { 1435 struct dir_info *d; 1436 int saved_errno = errno; 1437 1438 /* This is legal. */ 1439 if (ptr == NULL) 1440 return; 1441 1442 d = getpool(); 1443 if (d == NULL) 1444 wrterror(d, "free() called before allocation"); 1445 _MALLOC_LOCK(d->mutex); 1446 d->func = "free"; 1447 if (d->active++) { 1448 malloc_recurse(d); 1449 return; 1450 } 1451 ofree(&d, ptr, 0, 0, 0); 1452 d->active--; 1453 _MALLOC_UNLOCK(d->mutex); 1454 errno = saved_errno; 1455 } 1456 /*DEF_STRONG(free);*/ 1457 1458 static void 1459 freezero_p(void *ptr, size_t sz) 1460 { 1461 explicit_bzero(ptr, sz); 1462 free(ptr); 1463 } 1464 1465 void 1466 freezero(void *ptr, size_t sz) 1467 { 1468 struct dir_info *d; 1469 int saved_errno = errno; 1470 1471 /* This is legal. */ 1472 if (ptr == NULL) 1473 return; 1474 1475 if (!mopts.internal_funcs) { 1476 freezero_p(ptr, sz); 1477 return; 1478 } 1479 1480 d = getpool(); 1481 if (d == NULL) 1482 wrterror(d, "freezero() called before allocation"); 1483 _MALLOC_LOCK(d->mutex); 1484 d->func = "freezero"; 1485 if (d->active++) { 1486 malloc_recurse(d); 1487 return; 1488 } 1489 ofree(&d, ptr, 1, 1, sz); 1490 d->active--; 1491 _MALLOC_UNLOCK(d->mutex); 1492 errno = saved_errno; 1493 } 1494 DEF_WEAK(freezero); 1495 1496 static void * 1497 orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f) 1498 { 1499 struct region_info *r; 1500 struct dir_info *pool; 1501 char *saved_function; 1502 struct chunk_info *info; 1503 size_t oldsz, goldsz, gnewsz; 1504 void *q, *ret; 1505 uint32_t chunknum; 1506 1507 if (p == NULL) 1508 return omalloc(*argpool, newsz, 0, f); 1509 1510 if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1511 errno = ENOMEM; 1512 return NULL; 1513 } 1514 1515 r = findpool(p, *argpool, &pool, &saved_function); 1516 1517 REALSIZE(oldsz, r); 1518 if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) { 1519 info = (struct chunk_info *)r->size; 1520 chunknum = find_chunknum(pool, info, p, 0); 1521 } 1522 1523 goldsz = oldsz; 1524 if (oldsz > MALLOC_MAXCHUNK) { 1525 if (oldsz < mopts.malloc_guard) 1526 wrterror(pool, "guard size"); 1527 oldsz -= mopts.malloc_guard; 1528 } 1529 1530 gnewsz = newsz; 1531 if (gnewsz > MALLOC_MAXCHUNK) 1532 gnewsz += mopts.malloc_guard; 1533 1534 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && 1535 !mopts.malloc_realloc) { 1536 /* First case: from n pages sized allocation to m pages sized 1537 allocation, m > n */ 1538 size_t roldsz = PAGEROUND(goldsz); 1539 size_t rnewsz = PAGEROUND(gnewsz); 1540 1541 if (rnewsz > roldsz) { 1542 /* try to extend existing region */ 1543 if (!mopts.malloc_guard) { 1544 void *hint = (char *)r->p + roldsz; 1545 size_t needed = rnewsz - roldsz; 1546 1547 STATS_INC(pool->cheap_realloc_tries); 1548 q = map(pool, hint, needed, 0); 1549 if (q == hint) 1550 goto gotit; 1551 zapcacheregion(pool, hint, needed); 1552 q = MQUERY(hint, needed); 1553 if (q == hint) 1554 q = MMAPA(hint, needed); 1555 else 1556 q = MAP_FAILED; 1557 if (q == hint) { 1558 gotit: 1559 STATS_ADD(pool->malloc_used, needed); 1560 if (mopts.malloc_junk == 2) 1561 memset(q, SOME_JUNK, needed); 1562 r->size = gnewsz; 1563 if (r->p != p) { 1564 /* old pointer is moved */ 1565 memmove(r->p, p, oldsz); 1566 p = r->p; 1567 } 1568 if (mopts.chunk_canaries) 1569 fill_canary(p, newsz, 1570 PAGEROUND(newsz)); 1571 STATS_SETF(r, f); 1572 STATS_INC(pool->cheap_reallocs); 1573 ret = p; 1574 goto done; 1575 } else if (q != MAP_FAILED) { 1576 if (munmap(q, needed)) 1577 wrterror(pool, "munmap %p", q); 1578 } 1579 } 1580 } else if (rnewsz < roldsz) { 1581 /* shrink number of pages */ 1582 if (mopts.malloc_guard) { 1583 if (mprotect((char *)r->p + roldsz - 1584 mopts.malloc_guard, mopts.malloc_guard, 1585 PROT_READ | PROT_WRITE)) 1586 wrterror(pool, "mprotect"); 1587 if (mprotect((char *)r->p + rnewsz - 1588 mopts.malloc_guard, mopts.malloc_guard, 1589 PROT_NONE)) 1590 wrterror(pool, "mprotect"); 1591 } 1592 unmap(pool, (char *)r->p + rnewsz, roldsz - rnewsz, 0, 1593 mopts.malloc_junk); 1594 r->size = gnewsz; 1595 if (MALLOC_MOVE_COND(gnewsz)) { 1596 void *pp = MALLOC_MOVE(r->p, gnewsz); 1597 memmove(pp, p, newsz); 1598 p = pp; 1599 } else if (mopts.chunk_canaries) 1600 fill_canary(p, newsz, PAGEROUND(newsz)); 1601 STATS_SETF(r, f); 1602 ret = p; 1603 goto done; 1604 } else { 1605 /* number of pages remains the same */ 1606 void *pp = r->p; 1607 1608 r->size = gnewsz; 1609 if (MALLOC_MOVE_COND(gnewsz)) 1610 pp = MALLOC_MOVE(r->p, gnewsz); 1611 if (p != pp) { 1612 memmove(pp, p, oldsz < newsz ? oldsz : newsz); 1613 p = pp; 1614 } 1615 if (p == r->p) { 1616 if (newsz > oldsz && mopts.malloc_junk == 2) 1617 memset((char *)p + newsz, SOME_JUNK, 1618 rnewsz - mopts.malloc_guard - 1619 newsz); 1620 if (mopts.chunk_canaries) 1621 fill_canary(p, newsz, PAGEROUND(newsz)); 1622 } 1623 STATS_SETF(r, f); 1624 ret = p; 1625 goto done; 1626 } 1627 } 1628 if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 && 1629 newsz <= MALLOC_MAXCHUNK && newsz > 0 && 1630 1 << find_chunksize(newsz) == oldsz && !mopts.malloc_realloc) { 1631 /* do not reallocate if new size fits good in existing chunk */ 1632 if (mopts.malloc_junk == 2) 1633 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz); 1634 if (mopts.chunk_canaries) { 1635 info->bits[info->offset + chunknum] = newsz; 1636 fill_canary(p, newsz, info->size); 1637 } 1638 STATS_SETF(r, f); 1639 ret = p; 1640 } else if (newsz != oldsz || mopts.malloc_realloc) { 1641 /* create new allocation */ 1642 q = omalloc(pool, newsz, 0, f); 1643 if (q == NULL) { 1644 ret = NULL; 1645 goto done; 1646 } 1647 if (newsz != 0 && oldsz != 0) 1648 memcpy(q, p, oldsz < newsz ? oldsz : newsz); 1649 ofree(&pool, p, 0, 0, 0); 1650 ret = q; 1651 } else { 1652 /* oldsz == newsz */ 1653 if (newsz != 0) 1654 wrterror(pool, "realloc internal inconsistency"); 1655 STATS_SETF(r, f); 1656 ret = p; 1657 } 1658 done: 1659 if (*argpool != pool) { 1660 pool->func = saved_function; 1661 *argpool = pool; 1662 } 1663 return ret; 1664 } 1665 1666 void * 1667 realloc(void *ptr, size_t size) 1668 { 1669 struct dir_info *d; 1670 void *r; 1671 int saved_errno = errno; 1672 1673 d = getpool(); 1674 if (d == NULL) { 1675 _malloc_init(0); 1676 d = getpool(); 1677 } 1678 _MALLOC_LOCK(d->mutex); 1679 d->func = "realloc"; 1680 if (d->active++) { 1681 malloc_recurse(d); 1682 return NULL; 1683 } 1684 r = orealloc(&d, ptr, size, CALLER); 1685 1686 d->active--; 1687 _MALLOC_UNLOCK(d->mutex); 1688 if (r == NULL && mopts.malloc_xmalloc) 1689 wrterror(d, "out of memory"); 1690 if (r != NULL) 1691 errno = saved_errno; 1692 return r; 1693 } 1694 /*DEF_STRONG(realloc);*/ 1695 1696 1697 /* 1698 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX 1699 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW 1700 */ 1701 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4)) 1702 1703 void * 1704 calloc(size_t nmemb, size_t size) 1705 { 1706 struct dir_info *d; 1707 void *r; 1708 int saved_errno = errno; 1709 1710 d = getpool(); 1711 if (d == NULL) { 1712 _malloc_init(0); 1713 d = getpool(); 1714 } 1715 _MALLOC_LOCK(d->mutex); 1716 d->func = "calloc"; 1717 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1718 nmemb > 0 && SIZE_MAX / nmemb < size) { 1719 _MALLOC_UNLOCK(d->mutex); 1720 if (mopts.malloc_xmalloc) 1721 wrterror(d, "out of memory"); 1722 errno = ENOMEM; 1723 return NULL; 1724 } 1725 1726 if (d->active++) { 1727 malloc_recurse(d); 1728 return NULL; 1729 } 1730 1731 size *= nmemb; 1732 r = omalloc(d, size, 1, CALLER); 1733 1734 d->active--; 1735 _MALLOC_UNLOCK(d->mutex); 1736 if (r == NULL && mopts.malloc_xmalloc) 1737 wrterror(d, "out of memory"); 1738 if (r != NULL) 1739 errno = saved_errno; 1740 return r; 1741 } 1742 /*DEF_STRONG(calloc);*/ 1743 1744 static void * 1745 orecallocarray(struct dir_info **argpool, void *p, size_t oldsize, 1746 size_t newsize, void *f) 1747 { 1748 struct region_info *r; 1749 struct dir_info *pool; 1750 char *saved_function; 1751 void *newptr; 1752 size_t sz; 1753 1754 if (p == NULL) 1755 return omalloc(*argpool, newsize, 1, f); 1756 1757 if (oldsize == newsize) 1758 return p; 1759 1760 r = findpool(p, *argpool, &pool, &saved_function); 1761 1762 REALSIZE(sz, r); 1763 if (sz <= MALLOC_MAXCHUNK) { 1764 if (mopts.chunk_canaries && sz > 0) { 1765 struct chunk_info *info = (struct chunk_info *)r->size; 1766 uint32_t chunknum = find_chunknum(pool, info, p, 0); 1767 1768 if (info->bits[info->offset + chunknum] != oldsize) 1769 wrterror(pool, "recorded old size %hu != %zu", 1770 info->bits[info->offset + chunknum], 1771 oldsize); 1772 } 1773 } else if (oldsize != sz - mopts.malloc_guard) 1774 wrterror(pool, "recorded old size %zu != %zu", 1775 sz - mopts.malloc_guard, oldsize); 1776 1777 newptr = omalloc(pool, newsize, 0, f); 1778 if (newptr == NULL) 1779 goto done; 1780 1781 if (newsize > oldsize) { 1782 memcpy(newptr, p, oldsize); 1783 memset((char *)newptr + oldsize, 0, newsize - oldsize); 1784 } else 1785 memcpy(newptr, p, newsize); 1786 1787 ofree(&pool, p, 1, 0, oldsize); 1788 1789 done: 1790 if (*argpool != pool) { 1791 pool->func = saved_function; 1792 *argpool = pool; 1793 } 1794 1795 return newptr; 1796 } 1797 1798 static void * 1799 recallocarray_p(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size) 1800 { 1801 size_t oldsize, newsize; 1802 void *newptr; 1803 1804 if (ptr == NULL) 1805 return calloc(newnmemb, size); 1806 1807 if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1808 newnmemb > 0 && SIZE_MAX / newnmemb < size) { 1809 errno = ENOMEM; 1810 return NULL; 1811 } 1812 newsize = newnmemb * size; 1813 1814 if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1815 oldnmemb > 0 && SIZE_MAX / oldnmemb < size) { 1816 errno = EINVAL; 1817 return NULL; 1818 } 1819 oldsize = oldnmemb * size; 1820 1821 /* 1822 * Don't bother too much if we're shrinking just a bit, 1823 * we do not shrink for series of small steps, oh well. 1824 */ 1825 if (newsize <= oldsize) { 1826 size_t d = oldsize - newsize; 1827 1828 if (d < oldsize / 2 && d < MALLOC_PAGESIZE) { 1829 memset((char *)ptr + newsize, 0, d); 1830 return ptr; 1831 } 1832 } 1833 1834 newptr = malloc(newsize); 1835 if (newptr == NULL) 1836 return NULL; 1837 1838 if (newsize > oldsize) { 1839 memcpy(newptr, ptr, oldsize); 1840 memset((char *)newptr + oldsize, 0, newsize - oldsize); 1841 } else 1842 memcpy(newptr, ptr, newsize); 1843 1844 explicit_bzero(ptr, oldsize); 1845 free(ptr); 1846 1847 return newptr; 1848 } 1849 1850 void * 1851 recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size) 1852 { 1853 struct dir_info *d; 1854 size_t oldsize = 0, newsize; 1855 void *r; 1856 int saved_errno = errno; 1857 1858 if (!mopts.internal_funcs) 1859 return recallocarray_p(ptr, oldnmemb, newnmemb, size); 1860 1861 d = getpool(); 1862 if (d == NULL) { 1863 _malloc_init(0); 1864 d = getpool(); 1865 } 1866 1867 _MALLOC_LOCK(d->mutex); 1868 d->func = "recallocarray"; 1869 1870 if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1871 newnmemb > 0 && SIZE_MAX / newnmemb < size) { 1872 _MALLOC_UNLOCK(d->mutex); 1873 if (mopts.malloc_xmalloc) 1874 wrterror(d, "out of memory"); 1875 errno = ENOMEM; 1876 return NULL; 1877 } 1878 newsize = newnmemb * size; 1879 1880 if (ptr != NULL) { 1881 if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1882 oldnmemb > 0 && SIZE_MAX / oldnmemb < size) { 1883 _MALLOC_UNLOCK(d->mutex); 1884 errno = EINVAL; 1885 return NULL; 1886 } 1887 oldsize = oldnmemb * size; 1888 } 1889 1890 if (d->active++) { 1891 malloc_recurse(d); 1892 return NULL; 1893 } 1894 1895 r = orecallocarray(&d, ptr, oldsize, newsize, CALLER); 1896 1897 d->active--; 1898 _MALLOC_UNLOCK(d->mutex); 1899 if (r == NULL && mopts.malloc_xmalloc) 1900 wrterror(d, "out of memory"); 1901 if (r != NULL) 1902 errno = saved_errno; 1903 return r; 1904 } 1905 DEF_WEAK(recallocarray); 1906 1907 1908 static void * 1909 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill) 1910 { 1911 char *p, *q; 1912 1913 if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0) 1914 wrterror(d, "mapalign bad alignment"); 1915 if (sz != PAGEROUND(sz)) 1916 wrterror(d, "mapalign round"); 1917 1918 /* Allocate sz + alignment bytes of memory, which must include a 1919 * subrange of size bytes that is properly aligned. Unmap the 1920 * other bytes, and then return that subrange. 1921 */ 1922 1923 /* We need sz + alignment to fit into a size_t. */ 1924 if (alignment > SIZE_MAX - sz) 1925 return MAP_FAILED; 1926 1927 p = map(d, NULL, sz + alignment, zero_fill); 1928 if (p == MAP_FAILED) 1929 return MAP_FAILED; 1930 q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1)); 1931 if (q != p) { 1932 if (munmap(p, q - p)) 1933 wrterror(d, "munmap %p", p); 1934 } 1935 if (munmap(q + sz, alignment - (q - p))) 1936 wrterror(d, "munmap %p", q + sz); 1937 STATS_SUB(d->malloc_used, alignment); 1938 1939 return q; 1940 } 1941 1942 static void * 1943 omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill, 1944 void *f) 1945 { 1946 size_t psz; 1947 void *p; 1948 1949 /* If between half a page and a page, avoid MALLOC_MOVE. */ 1950 if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE) 1951 sz = MALLOC_PAGESIZE; 1952 if (alignment <= MALLOC_PAGESIZE) { 1953 /* 1954 * max(size, alignment) is enough to assure the requested 1955 * alignment, since the allocator always allocates 1956 * power-of-two blocks. 1957 */ 1958 if (sz < alignment) 1959 sz = alignment; 1960 return omalloc(pool, sz, zero_fill, f); 1961 } 1962 1963 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1964 errno = ENOMEM; 1965 return NULL; 1966 } 1967 1968 sz += mopts.malloc_guard; 1969 psz = PAGEROUND(sz); 1970 1971 p = mapalign(pool, alignment, psz, zero_fill); 1972 if (p == MAP_FAILED) { 1973 errno = ENOMEM; 1974 return NULL; 1975 } 1976 1977 if (insert(pool, p, sz, f)) { 1978 unmap(pool, p, psz, 0, 0); 1979 errno = ENOMEM; 1980 return NULL; 1981 } 1982 1983 if (mopts.malloc_guard) { 1984 if (mprotect((char *)p + psz - mopts.malloc_guard, 1985 mopts.malloc_guard, PROT_NONE)) 1986 wrterror(pool, "mprotect"); 1987 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 1988 } 1989 1990 if (mopts.malloc_junk == 2) { 1991 if (zero_fill) 1992 memset((char *)p + sz - mopts.malloc_guard, 1993 SOME_JUNK, psz - sz); 1994 else 1995 memset(p, SOME_JUNK, psz - mopts.malloc_guard); 1996 } else if (mopts.chunk_canaries) 1997 fill_canary(p, sz - mopts.malloc_guard, 1998 psz - mopts.malloc_guard); 1999 2000 return p; 2001 } 2002 2003 int 2004 posix_memalign(void **memptr, size_t alignment, size_t size) 2005 { 2006 struct dir_info *d; 2007 int res, saved_errno = errno; 2008 void *r; 2009 2010 /* Make sure that alignment is a large enough power of 2. */ 2011 if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *)) 2012 return EINVAL; 2013 2014 d = getpool(); 2015 if (d == NULL) { 2016 _malloc_init(0); 2017 d = getpool(); 2018 } 2019 _MALLOC_LOCK(d->mutex); 2020 d->func = "posix_memalign"; 2021 if (d->active++) { 2022 malloc_recurse(d); 2023 goto err; 2024 } 2025 r = omemalign(d, alignment, size, 0, CALLER); 2026 d->active--; 2027 _MALLOC_UNLOCK(d->mutex); 2028 if (r == NULL) { 2029 if (mopts.malloc_xmalloc) 2030 wrterror(d, "out of memory"); 2031 goto err; 2032 } 2033 errno = saved_errno; 2034 *memptr = r; 2035 return 0; 2036 2037 err: 2038 res = errno; 2039 errno = saved_errno; 2040 return res; 2041 } 2042 /*DEF_STRONG(posix_memalign);*/ 2043 2044 void * 2045 aligned_alloc(size_t alignment, size_t size) 2046 { 2047 struct dir_info *d; 2048 int saved_errno = errno; 2049 void *r; 2050 2051 /* Make sure that alignment is a positive power of 2. */ 2052 if (((alignment - 1) & alignment) != 0 || alignment == 0) { 2053 errno = EINVAL; 2054 return NULL; 2055 }; 2056 /* Per spec, size should be a multiple of alignment */ 2057 if ((size & (alignment - 1)) != 0) { 2058 errno = EINVAL; 2059 return NULL; 2060 } 2061 2062 d = getpool(); 2063 if (d == NULL) { 2064 _malloc_init(0); 2065 d = getpool(); 2066 } 2067 _MALLOC_LOCK(d->mutex); 2068 d->func = "aligned_alloc"; 2069 if (d->active++) { 2070 malloc_recurse(d); 2071 return NULL; 2072 } 2073 r = omemalign(d, alignment, size, 0, CALLER); 2074 d->active--; 2075 _MALLOC_UNLOCK(d->mutex); 2076 if (r == NULL) { 2077 if (mopts.malloc_xmalloc) 2078 wrterror(d, "out of memory"); 2079 return NULL; 2080 } 2081 errno = saved_errno; 2082 return r; 2083 } 2084 /*DEF_STRONG(aligned_alloc);*/ 2085 2086 #ifdef MALLOC_STATS 2087 2088 struct malloc_leak { 2089 void *f; 2090 size_t total_size; 2091 int count; 2092 }; 2093 2094 struct leaknode { 2095 RBT_ENTRY(leaknode) entry; 2096 struct malloc_leak d; 2097 }; 2098 2099 static inline int 2100 leakcmp(const struct leaknode *e1, const struct leaknode *e2) 2101 { 2102 return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f; 2103 } 2104 2105 static RBT_HEAD(leaktree, leaknode) leakhead; 2106 RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp); 2107 RBT_GENERATE(leaktree, leaknode, entry, leakcmp); 2108 2109 static void 2110 putleakinfo(void *f, size_t sz, int cnt) 2111 { 2112 struct leaknode key, *p; 2113 static struct leaknode *page; 2114 static int used; 2115 2116 if (cnt == 0 || page == MAP_FAILED) 2117 return; 2118 2119 key.d.f = f; 2120 p = RBT_FIND(leaktree, &leakhead, &key); 2121 if (p == NULL) { 2122 if (page == NULL || 2123 used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) { 2124 page = MMAP(MALLOC_PAGESIZE); 2125 if (page == MAP_FAILED) 2126 return; 2127 used = 0; 2128 } 2129 p = &page[used++]; 2130 p->d.f = f; 2131 p->d.total_size = sz * cnt; 2132 p->d.count = cnt; 2133 RBT_INSERT(leaktree, &leakhead, p); 2134 } else { 2135 p->d.total_size += sz * cnt; 2136 p->d.count += cnt; 2137 } 2138 } 2139 2140 static struct malloc_leak *malloc_leaks; 2141 2142 static void 2143 dump_leaks(int fd) 2144 { 2145 struct leaknode *p; 2146 int i = 0; 2147 2148 dprintf(fd, "Leak report\n"); 2149 dprintf(fd, " f sum # avg\n"); 2150 /* XXX only one page of summary */ 2151 if (malloc_leaks == NULL) 2152 malloc_leaks = MMAP(MALLOC_PAGESIZE); 2153 if (malloc_leaks != MAP_FAILED) 2154 memset(malloc_leaks, 0, MALLOC_PAGESIZE); 2155 RBT_FOREACH(p, leaktree, &leakhead) { 2156 dprintf(fd, "%18p %7zu %6u %6zu\n", p->d.f, 2157 p->d.total_size, p->d.count, p->d.total_size / p->d.count); 2158 if (malloc_leaks == MAP_FAILED || 2159 i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak)) 2160 continue; 2161 malloc_leaks[i].f = p->d.f; 2162 malloc_leaks[i].total_size = p->d.total_size; 2163 malloc_leaks[i].count = p->d.count; 2164 i++; 2165 } 2166 } 2167 2168 static void 2169 dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist) 2170 { 2171 while (p != NULL) { 2172 dprintf(fd, "chunk %18p %18p %4d %d/%d\n", 2173 p->page, ((p->bits[0] & 1) ? NULL : f), 2174 p->size, p->free, p->total); 2175 if (!fromfreelist) { 2176 if (p->bits[0] & 1) 2177 putleakinfo(NULL, p->size, p->total - p->free); 2178 else { 2179 putleakinfo(f, p->size, 1); 2180 putleakinfo(NULL, p->size, 2181 p->total - p->free - 1); 2182 } 2183 break; 2184 } 2185 p = LIST_NEXT(p, entries); 2186 if (p != NULL) 2187 dprintf(fd, " "); 2188 } 2189 } 2190 2191 static void 2192 dump_free_chunk_info(int fd, struct dir_info *d) 2193 { 2194 int i, j, count; 2195 struct chunk_info *p; 2196 2197 dprintf(fd, "Free chunk structs:\n"); 2198 for (i = 0; i <= MALLOC_MAXSHIFT; i++) { 2199 count = 0; 2200 LIST_FOREACH(p, &d->chunk_info_list[i], entries) 2201 count++; 2202 for (j = 0; j < MALLOC_CHUNK_LISTS; j++) { 2203 p = LIST_FIRST(&d->chunk_dir[i][j]); 2204 if (p == NULL && count == 0) 2205 continue; 2206 dprintf(fd, "%2d) %3d ", i, count); 2207 if (p != NULL) 2208 dump_chunk(fd, p, NULL, 1); 2209 else 2210 dprintf(fd, "\n"); 2211 } 2212 } 2213 2214 } 2215 2216 static void 2217 dump_free_page_info(int fd, struct dir_info *d) 2218 { 2219 int i; 2220 2221 dprintf(fd, "Free pages cached: %zu\n", d->free_regions_size); 2222 for (i = 0; i < mopts.malloc_cache; i++) { 2223 if (d->free_regions[i].p != NULL) { 2224 dprintf(fd, "%2d) ", i); 2225 dprintf(fd, "free at %p: %zu\n", 2226 d->free_regions[i].p, d->free_regions[i].size); 2227 } 2228 } 2229 } 2230 2231 static void 2232 malloc_dump1(int fd, int poolno, struct dir_info *d) 2233 { 2234 size_t i, realsize; 2235 2236 dprintf(fd, "Malloc dir of %s pool %d at %p\n", __progname, poolno, d); 2237 if (d == NULL) 2238 return; 2239 dprintf(fd, "Region slots free %zu/%zu\n", 2240 d->regions_free, d->regions_total); 2241 dprintf(fd, "Finds %zu/%zu\n", d->finds, d->find_collisions); 2242 dprintf(fd, "Inserts %zu/%zu\n", d->inserts, d->insert_collisions); 2243 dprintf(fd, "Deletes %zu/%zu\n", d->deletes, d->delete_moves); 2244 dprintf(fd, "Cheap reallocs %zu/%zu\n", 2245 d->cheap_reallocs, d->cheap_realloc_tries); 2246 dprintf(fd, "Other pool searches %zu/%zu\n", 2247 d->other_pool, d->pool_searches); 2248 dprintf(fd, "In use %zu\n", d->malloc_used); 2249 dprintf(fd, "Guarded %zu\n", d->malloc_guarded); 2250 dump_free_chunk_info(fd, d); 2251 dump_free_page_info(fd, d); 2252 dprintf(fd, 2253 "slot) hash d type page f size [free/n]\n"); 2254 for (i = 0; i < d->regions_total; i++) { 2255 if (d->r[i].p != NULL) { 2256 size_t h = hash(d->r[i].p) & 2257 (d->regions_total - 1); 2258 dprintf(fd, "%4zx) #%4zx %zd ", 2259 i, h, h - i); 2260 REALSIZE(realsize, &d->r[i]); 2261 if (realsize > MALLOC_MAXCHUNK) { 2262 putleakinfo(d->r[i].f, realsize, 1); 2263 dprintf(fd, 2264 "pages %18p %18p %zu\n", d->r[i].p, 2265 d->r[i].f, realsize); 2266 } else 2267 dump_chunk(fd, 2268 (struct chunk_info *)d->r[i].size, 2269 d->r[i].f, 0); 2270 } 2271 } 2272 dump_leaks(fd); 2273 dprintf(fd, "\n"); 2274 } 2275 2276 void 2277 malloc_dump(int fd, int poolno, struct dir_info *pool) 2278 { 2279 int i; 2280 void *p; 2281 struct region_info *r; 2282 int saved_errno = errno; 2283 2284 if (pool == NULL) 2285 return; 2286 for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) { 2287 p = pool->delayed_chunks[i]; 2288 if (p == NULL) 2289 continue; 2290 r = find(pool, p); 2291 if (r == NULL) 2292 wrterror(pool, "bogus pointer in malloc_dump %p", p); 2293 free_bytes(pool, r, p); 2294 pool->delayed_chunks[i] = NULL; 2295 } 2296 /* XXX leak when run multiple times */ 2297 RBT_INIT(leaktree, &leakhead); 2298 malloc_dump1(fd, poolno, pool); 2299 errno = saved_errno; 2300 } 2301 DEF_WEAK(malloc_dump); 2302 2303 void 2304 malloc_gdump(int fd) 2305 { 2306 int i; 2307 int saved_errno = errno; 2308 2309 for (i = 0; i < mopts.malloc_mutexes; i++) 2310 malloc_dump(fd, i, mopts.malloc_pool[i]); 2311 2312 errno = saved_errno; 2313 } 2314 DEF_WEAK(malloc_gdump); 2315 2316 static void 2317 malloc_exit(void) 2318 { 2319 int save_errno = errno, fd, i; 2320 2321 fd = open("malloc.out", O_RDWR|O_APPEND); 2322 if (fd != -1) { 2323 dprintf(fd, "******** Start dump %s *******\n", __progname); 2324 dprintf(fd, 2325 "MT=%d M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u G=%zu\n", 2326 mopts.malloc_mt, mopts.mallloc_mutexes, 2327 mopts.internal_funcs, mopts.malloc_freecheck, 2328 mopts.malloc_freeunmap, mopts.malloc_junk, 2329 mopts.malloc_realloc, mopts.malloc_xmalloc, 2330 mopts.chunk_canaries, mopts.malloc_cache, 2331 mopts.malloc_guard); 2332 2333 for (i = 0; i < mopts.malloc_mutexes; i++) 2334 malloc_dump(fd, i, mopts.malloc_pool[i]); 2335 dprintf(fd, "******** End dump %s *******\n", __progname); 2336 close(fd); 2337 } else 2338 dprintf(STDERR_FILENO, 2339 "malloc() warning: Couldn't dump stats\n"); 2340 errno = save_errno; 2341 } 2342 2343 #endif /* MALLOC_STATS */ 2344