1 /* $OpenBSD: malloc.c,v 1.285 2023/06/04 06:58:33 otto Exp $ */ 2 /* 3 * Copyright (c) 2008, 2010, 2011, 2016, 2023 Otto Moerbeek <otto@drijf.net> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 5 * Copyright (c) 2008 Damien Miller <djm@openbsd.org> 6 * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* 22 * If we meet some day, and you think this stuff is worth it, you 23 * can buy me a beer in return. Poul-Henning Kamp 24 */ 25 26 #ifndef MALLOC_SMALL 27 #define MALLOC_STATS 28 #endif 29 30 #include <sys/types.h> 31 #include <sys/queue.h> 32 #include <sys/mman.h> 33 #include <sys/sysctl.h> 34 #include <uvm/uvmexp.h> 35 #include <errno.h> 36 #include <stdarg.h> 37 #include <stdint.h> 38 #include <stdio.h> 39 #include <stdlib.h> 40 #include <string.h> 41 #include <unistd.h> 42 43 #ifdef MALLOC_STATS 44 #include <sys/tree.h> 45 #include <sys/ktrace.h> 46 #include <dlfcn.h> 47 #endif 48 49 #include "thread_private.h" 50 #include <tib.h> 51 52 #define MALLOC_PAGESHIFT _MAX_PAGE_SHIFT 53 54 #define MALLOC_MINSHIFT 4 55 #define MALLOC_MAXSHIFT (MALLOC_PAGESHIFT - 1) 56 #define MALLOC_PAGESIZE (1UL << MALLOC_PAGESHIFT) 57 #define MALLOC_MINSIZE (1UL << MALLOC_MINSHIFT) 58 #define MALLOC_PAGEMASK (MALLOC_PAGESIZE - 1) 59 #define MASK_POINTER(p) ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK)) 60 61 #define MALLOC_MAXCHUNK (1 << MALLOC_MAXSHIFT) 62 #define MALLOC_MAXCACHE 256 63 #define MALLOC_DELAYED_CHUNK_MASK 15 64 #ifdef MALLOC_STATS 65 #define MALLOC_INITIAL_REGIONS 512 66 #else 67 #define MALLOC_INITIAL_REGIONS (MALLOC_PAGESIZE / sizeof(struct region_info)) 68 #endif 69 #define MALLOC_DEFAULT_CACHE 64 70 #define MALLOC_CHUNK_LISTS 4 71 #define CHUNK_CHECK_LENGTH 32 72 73 #define B2SIZE(b) ((b) * MALLOC_MINSIZE) 74 #define B2ALLOC(b) ((b) == 0 ? MALLOC_MINSIZE : \ 75 (b) * MALLOC_MINSIZE) 76 #define BUCKETS (MALLOC_MAXCHUNK / MALLOC_MINSIZE) 77 78 /* 79 * We move allocations between half a page and a whole page towards the end, 80 * subject to alignment constraints. This is the extra headroom we allow. 81 * Set to zero to be the most strict. 82 */ 83 #define MALLOC_LEEWAY 0 84 #define MALLOC_MOVE_COND(sz) ((sz) - mopts.malloc_guard < \ 85 MALLOC_PAGESIZE - MALLOC_LEEWAY) 86 #define MALLOC_MOVE(p, sz) (((char *)(p)) + \ 87 ((MALLOC_PAGESIZE - MALLOC_LEEWAY - \ 88 ((sz) - mopts.malloc_guard)) & \ 89 ~(MALLOC_MINSIZE - 1))) 90 91 #define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK) 92 93 /* 94 * What to use for Junk. This is the byte value we use to fill with 95 * when the 'J' option is enabled. Use SOME_JUNK right after alloc, 96 * and SOME_FREEJUNK right before free. 97 */ 98 #define SOME_JUNK 0xdb /* deadbeef */ 99 #define SOME_FREEJUNK 0xdf /* dead, free */ 100 #define SOME_FREEJUNK_ULL 0xdfdfdfdfdfdfdfdfULL 101 102 #define MMAP(sz,f) mmap(NULL, (sz), PROT_READ | PROT_WRITE, \ 103 MAP_ANON | MAP_PRIVATE | (f), -1, 0) 104 105 #define MMAPNONE(sz,f) mmap(NULL, (sz), PROT_NONE, \ 106 MAP_ANON | MAP_PRIVATE | (f), -1, 0) 107 108 #define MMAPA(a,sz,f) mmap((a), (sz), PROT_READ | PROT_WRITE, \ 109 MAP_ANON | MAP_PRIVATE | (f), -1, 0) 110 111 struct region_info { 112 void *p; /* page; low bits used to mark chunks */ 113 uintptr_t size; /* size for pages, or chunk_info pointer */ 114 #ifdef MALLOC_STATS 115 void *f; /* where allocated from */ 116 #endif 117 }; 118 119 LIST_HEAD(chunk_head, chunk_info); 120 121 /* 122 * Two caches, one for "small" regions, one for "big". 123 * Small cache is an array per size, big cache is one array with different 124 * sized regions 125 */ 126 #define MAX_SMALLCACHEABLE_SIZE 32 127 #define MAX_BIGCACHEABLE_SIZE 512 128 /* If the total # of pages is larger than this, evict before inserting */ 129 #define BIGCACHE_FILL(sz) (MAX_BIGCACHEABLE_SIZE * (sz) / 4) 130 131 struct smallcache { 132 void **pages; 133 ushort length; 134 ushort max; 135 }; 136 137 struct bigcache { 138 void *page; 139 size_t psize; 140 }; 141 142 struct dir_info { 143 u_int32_t canary1; 144 int active; /* status of malloc */ 145 struct region_info *r; /* region slots */ 146 size_t regions_total; /* number of region slots */ 147 size_t regions_free; /* number of free slots */ 148 size_t rbytesused; /* random bytes used */ 149 char *func; /* current function */ 150 int malloc_junk; /* junk fill? */ 151 int mmap_flag; /* extra flag for mmap */ 152 int mutex; 153 int malloc_mt; /* multi-threaded mode? */ 154 /* lists of free chunk info structs */ 155 struct chunk_head chunk_info_list[BUCKETS + 1]; 156 /* lists of chunks with free slots */ 157 struct chunk_head chunk_dir[BUCKETS + 1][MALLOC_CHUNK_LISTS]; 158 /* delayed free chunk slots */ 159 void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1]; 160 u_char rbytes[32]; /* random bytes */ 161 /* free pages cache */ 162 struct smallcache smallcache[MAX_SMALLCACHEABLE_SIZE]; 163 size_t bigcache_used; 164 size_t bigcache_size; 165 struct bigcache *bigcache; 166 void *chunk_pages; 167 size_t chunk_pages_used; 168 #ifdef MALLOC_STATS 169 size_t inserts; 170 size_t insert_collisions; 171 size_t finds; 172 size_t find_collisions; 173 size_t deletes; 174 size_t delete_moves; 175 size_t cheap_realloc_tries; 176 size_t cheap_reallocs; 177 size_t malloc_used; /* bytes allocated */ 178 size_t malloc_guarded; /* bytes used for guards */ 179 size_t pool_searches; /* searches for pool */ 180 size_t other_pool; /* searches in other pool */ 181 #define STATS_ADD(x,y) ((x) += (y)) 182 #define STATS_SUB(x,y) ((x) -= (y)) 183 #define STATS_INC(x) ((x)++) 184 #define STATS_ZERO(x) ((x) = 0) 185 #define STATS_SETF(x,y) ((x)->f = (y)) 186 #else 187 #define STATS_ADD(x,y) /* nothing */ 188 #define STATS_SUB(x,y) /* nothing */ 189 #define STATS_INC(x) /* nothing */ 190 #define STATS_ZERO(x) /* nothing */ 191 #define STATS_SETF(x,y) /* nothing */ 192 #endif /* MALLOC_STATS */ 193 u_int32_t canary2; 194 }; 195 196 static void unmap(struct dir_info *d, void *p, size_t sz, size_t clear); 197 198 /* 199 * This structure describes a page worth of chunks. 200 * 201 * How many bits per u_short in the bitmap 202 */ 203 #define MALLOC_BITS (NBBY * sizeof(u_short)) 204 struct chunk_info { 205 LIST_ENTRY(chunk_info) entries; 206 void *page; /* pointer to the page */ 207 u_short canary; 208 u_short bucket; 209 u_short free; /* how many free chunks */ 210 u_short total; /* how many chunks */ 211 u_short offset; /* requested size table offset */ 212 u_short bits[1]; /* which chunks are free */ 213 }; 214 215 struct malloc_readonly { 216 /* Main bookkeeping information */ 217 struct dir_info *malloc_pool[_MALLOC_MUTEXES]; 218 u_int malloc_mutexes; /* how much in actual use? */ 219 int malloc_freecheck; /* Extensive double free check */ 220 int malloc_freeunmap; /* mprotect free pages PROT_NONE? */ 221 int def_malloc_junk; /* junk fill? */ 222 int malloc_realloc; /* always realloc? */ 223 int malloc_xmalloc; /* xmalloc behaviour? */ 224 u_int chunk_canaries; /* use canaries after chunks? */ 225 int internal_funcs; /* use better recallocarray/freezero? */ 226 u_int def_maxcache; /* free pages we cache */ 227 u_int junk_loc; /* variation in location of junk */ 228 size_t malloc_guard; /* use guard pages after allocations? */ 229 #ifdef MALLOC_STATS 230 int malloc_stats; /* dump leak report at end */ 231 int malloc_verbose; /* dump verbose statistics at end */ 232 #define DO_STATS mopts.malloc_stats 233 #else 234 #define DO_STATS 0 235 #endif 236 u_int32_t malloc_canary; /* Matched against ones in pool */ 237 }; 238 239 240 /* This object is mapped PROT_READ after initialisation to prevent tampering */ 241 static union { 242 struct malloc_readonly mopts; 243 u_char _pad[MALLOC_PAGESIZE]; 244 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE))) 245 __attribute__((section(".openbsd.mutable"))); 246 #define mopts malloc_readonly.mopts 247 248 char *malloc_options; /* compile-time options */ 249 250 static __dead void wrterror(struct dir_info *d, char *msg, ...) 251 __attribute__((__format__ (printf, 2, 3))); 252 253 #ifdef MALLOC_STATS 254 void malloc_dump(void); 255 PROTO_NORMAL(malloc_dump); 256 static void malloc_exit(void); 257 #endif 258 #define CALLER (DO_STATS ? __builtin_return_address(0) : NULL) 259 260 /* low bits of r->p determine size: 0 means >= page size and r->size holding 261 * real size, otherwise low bits is the bucket + 1 262 */ 263 #define REALSIZE(sz, r) \ 264 (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \ 265 (sz) = ((sz) == 0 ? (r)->size : B2SIZE((sz) - 1)) 266 267 static inline size_t 268 hash(void *p) 269 { 270 size_t sum; 271 uintptr_t u; 272 273 u = (uintptr_t)p >> MALLOC_PAGESHIFT; 274 sum = u; 275 sum = (sum << 7) - sum + (u >> 16); 276 #ifdef __LP64__ 277 sum = (sum << 7) - sum + (u >> 32); 278 sum = (sum << 7) - sum + (u >> 48); 279 #endif 280 return sum; 281 } 282 283 static inline struct dir_info * 284 getpool(void) 285 { 286 if (mopts.malloc_pool[1] == NULL || !mopts.malloc_pool[1]->malloc_mt) 287 return mopts.malloc_pool[1]; 288 else /* first one reserved for special pool */ 289 return mopts.malloc_pool[1 + TIB_GET()->tib_tid % 290 (mopts.malloc_mutexes - 1)]; 291 } 292 293 static __dead void 294 wrterror(struct dir_info *d, char *msg, ...) 295 { 296 int saved_errno = errno; 297 va_list ap; 298 299 dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname, 300 getpid(), (d != NULL && d->func) ? d->func : "unknown"); 301 va_start(ap, msg); 302 vdprintf(STDERR_FILENO, msg, ap); 303 va_end(ap); 304 dprintf(STDERR_FILENO, "\n"); 305 306 #ifdef MALLOC_STATS 307 if (DO_STATS && mopts.malloc_verbose) 308 malloc_dump(); 309 #endif 310 311 errno = saved_errno; 312 313 abort(); 314 } 315 316 static void 317 rbytes_init(struct dir_info *d) 318 { 319 arc4random_buf(d->rbytes, sizeof(d->rbytes)); 320 /* add 1 to account for using d->rbytes[0] */ 321 d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2); 322 } 323 324 static inline u_char 325 getrbyte(struct dir_info *d) 326 { 327 u_char x; 328 329 if (d->rbytesused >= sizeof(d->rbytes)) 330 rbytes_init(d); 331 x = d->rbytes[d->rbytesused++]; 332 return x; 333 } 334 335 static void 336 omalloc_parseopt(char opt) 337 { 338 switch (opt) { 339 case '+': 340 mopts.malloc_mutexes <<= 1; 341 if (mopts.malloc_mutexes > _MALLOC_MUTEXES) 342 mopts.malloc_mutexes = _MALLOC_MUTEXES; 343 break; 344 case '-': 345 mopts.malloc_mutexes >>= 1; 346 if (mopts.malloc_mutexes < 2) 347 mopts.malloc_mutexes = 2; 348 break; 349 case '>': 350 mopts.def_maxcache <<= 1; 351 if (mopts.def_maxcache > MALLOC_MAXCACHE) 352 mopts.def_maxcache = MALLOC_MAXCACHE; 353 break; 354 case '<': 355 mopts.def_maxcache >>= 1; 356 break; 357 case 'c': 358 mopts.chunk_canaries = 0; 359 break; 360 case 'C': 361 mopts.chunk_canaries = 1; 362 break; 363 #ifdef MALLOC_STATS 364 case 'd': 365 mopts.malloc_stats = 0; 366 break; 367 case 'D': 368 mopts.malloc_stats = 1; 369 break; 370 #endif /* MALLOC_STATS */ 371 case 'f': 372 mopts.malloc_freecheck = 0; 373 mopts.malloc_freeunmap = 0; 374 break; 375 case 'F': 376 mopts.malloc_freecheck = 1; 377 mopts.malloc_freeunmap = 1; 378 break; 379 case 'g': 380 mopts.malloc_guard = 0; 381 break; 382 case 'G': 383 mopts.malloc_guard = MALLOC_PAGESIZE; 384 break; 385 case 'j': 386 if (mopts.def_malloc_junk > 0) 387 mopts.def_malloc_junk--; 388 break; 389 case 'J': 390 if (mopts.def_malloc_junk < 2) 391 mopts.def_malloc_junk++; 392 break; 393 case 'r': 394 mopts.malloc_realloc = 0; 395 break; 396 case 'R': 397 mopts.malloc_realloc = 1; 398 break; 399 case 'u': 400 mopts.malloc_freeunmap = 0; 401 break; 402 case 'U': 403 mopts.malloc_freeunmap = 1; 404 break; 405 #ifdef MALLOC_STATS 406 case 'v': 407 mopts.malloc_verbose = 0; 408 break; 409 case 'V': 410 mopts.malloc_verbose = 1; 411 break; 412 #endif /* MALLOC_STATS */ 413 case 'x': 414 mopts.malloc_xmalloc = 0; 415 break; 416 case 'X': 417 mopts.malloc_xmalloc = 1; 418 break; 419 default: 420 dprintf(STDERR_FILENO, "malloc() warning: " 421 "unknown char in MALLOC_OPTIONS\n"); 422 break; 423 } 424 } 425 426 static void 427 omalloc_init(void) 428 { 429 char *p, *q, b[16]; 430 int i, j; 431 const int mib[2] = { CTL_VM, VM_MALLOC_CONF }; 432 size_t sb; 433 434 /* 435 * Default options 436 */ 437 mopts.malloc_mutexes = 8; 438 mopts.def_malloc_junk = 1; 439 mopts.def_maxcache = MALLOC_DEFAULT_CACHE; 440 441 for (i = 0; i < 3; i++) { 442 switch (i) { 443 case 0: 444 sb = sizeof(b); 445 j = sysctl(mib, 2, b, &sb, NULL, 0); 446 if (j != 0) 447 continue; 448 p = b; 449 break; 450 case 1: 451 if (issetugid() == 0) 452 p = getenv("MALLOC_OPTIONS"); 453 else 454 continue; 455 break; 456 case 2: 457 p = malloc_options; 458 break; 459 default: 460 p = NULL; 461 } 462 463 for (; p != NULL && *p != '\0'; p++) { 464 switch (*p) { 465 case 'S': 466 for (q = "CFGJ"; *q != '\0'; q++) 467 omalloc_parseopt(*q); 468 mopts.def_maxcache = 0; 469 break; 470 case 's': 471 for (q = "cfgj"; *q != '\0'; q++) 472 omalloc_parseopt(*q); 473 mopts.def_maxcache = MALLOC_DEFAULT_CACHE; 474 break; 475 default: 476 omalloc_parseopt(*p); 477 break; 478 } 479 } 480 } 481 482 #ifdef MALLOC_STATS 483 if (DO_STATS && (atexit(malloc_exit) == -1)) { 484 dprintf(STDERR_FILENO, "malloc() warning: atexit(2) failed." 485 " Will not be able to dump stats on exit\n"); 486 } 487 #endif 488 489 while ((mopts.malloc_canary = arc4random()) == 0) 490 ; 491 mopts.junk_loc = arc4random(); 492 if (mopts.chunk_canaries) 493 do { 494 mopts.chunk_canaries = arc4random(); 495 } while ((u_char)mopts.chunk_canaries == 0 || 496 (u_char)mopts.chunk_canaries == SOME_FREEJUNK); 497 } 498 499 static void 500 omalloc_poolinit(struct dir_info *d, int mmap_flag) 501 { 502 int i, j; 503 504 d->r = NULL; 505 d->rbytesused = sizeof(d->rbytes); 506 d->regions_free = d->regions_total = 0; 507 for (i = 0; i <= BUCKETS; i++) { 508 LIST_INIT(&d->chunk_info_list[i]); 509 for (j = 0; j < MALLOC_CHUNK_LISTS; j++) 510 LIST_INIT(&d->chunk_dir[i][j]); 511 } 512 d->mmap_flag = mmap_flag; 513 d->malloc_junk = mopts.def_malloc_junk; 514 d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d; 515 d->canary2 = ~d->canary1; 516 } 517 518 static int 519 omalloc_grow(struct dir_info *d) 520 { 521 size_t newtotal; 522 size_t newsize; 523 size_t mask; 524 size_t i, oldpsz; 525 struct region_info *p; 526 527 if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2) 528 return 1; 529 530 newtotal = d->regions_total == 0 ? MALLOC_INITIAL_REGIONS : 531 d->regions_total * 2; 532 newsize = PAGEROUND(newtotal * sizeof(struct region_info)); 533 mask = newtotal - 1; 534 535 /* Don't use cache here, we don't want user uaf touch this */ 536 p = MMAP(newsize, d->mmap_flag); 537 if (p == MAP_FAILED) 538 return 1; 539 540 STATS_ADD(d->malloc_used, newsize); 541 STATS_ZERO(d->inserts); 542 STATS_ZERO(d->insert_collisions); 543 for (i = 0; i < d->regions_total; i++) { 544 void *q = d->r[i].p; 545 if (q != NULL) { 546 size_t index = hash(q) & mask; 547 STATS_INC(d->inserts); 548 while (p[index].p != NULL) { 549 index = (index - 1) & mask; 550 STATS_INC(d->insert_collisions); 551 } 552 p[index] = d->r[i]; 553 } 554 } 555 556 if (d->regions_total > 0) { 557 oldpsz = PAGEROUND(d->regions_total * sizeof(struct region_info)); 558 /* clear to avoid meta info ending up in the cache */ 559 unmap(d, d->r, oldpsz, oldpsz); 560 } 561 d->regions_free += newtotal - d->regions_total; 562 d->regions_total = newtotal; 563 d->r = p; 564 return 0; 565 } 566 567 /* 568 * The hashtable uses the assumption that p is never NULL. This holds since 569 * non-MAP_FIXED mappings with hint 0 start at BRKSIZ. 570 */ 571 static int 572 insert(struct dir_info *d, void *p, size_t sz, void *f) 573 { 574 size_t index; 575 size_t mask; 576 void *q; 577 578 if (d->regions_free * 4 < d->regions_total || d->regions_total == 0) { 579 if (omalloc_grow(d)) 580 return 1; 581 } 582 mask = d->regions_total - 1; 583 index = hash(p) & mask; 584 q = d->r[index].p; 585 STATS_INC(d->inserts); 586 while (q != NULL) { 587 index = (index - 1) & mask; 588 q = d->r[index].p; 589 STATS_INC(d->insert_collisions); 590 } 591 d->r[index].p = p; 592 d->r[index].size = sz; 593 STATS_SETF(&d->r[index], f); 594 d->regions_free--; 595 return 0; 596 } 597 598 static struct region_info * 599 find(struct dir_info *d, void *p) 600 { 601 size_t index; 602 size_t mask = d->regions_total - 1; 603 void *q, *r; 604 605 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 606 d->canary1 != ~d->canary2) 607 wrterror(d, "internal struct corrupt"); 608 if (d->r == NULL) 609 return NULL; 610 p = MASK_POINTER(p); 611 index = hash(p) & mask; 612 r = d->r[index].p; 613 q = MASK_POINTER(r); 614 STATS_INC(d->finds); 615 while (q != p && r != NULL) { 616 index = (index - 1) & mask; 617 r = d->r[index].p; 618 q = MASK_POINTER(r); 619 STATS_INC(d->find_collisions); 620 } 621 return (q == p && r != NULL) ? &d->r[index] : NULL; 622 } 623 624 static void 625 delete(struct dir_info *d, struct region_info *ri) 626 { 627 /* algorithm R, Knuth Vol III section 6.4 */ 628 size_t mask = d->regions_total - 1; 629 size_t i, j, r; 630 631 if (d->regions_total & (d->regions_total - 1)) 632 wrterror(d, "regions_total not 2^x"); 633 d->regions_free++; 634 STATS_INC(d->deletes); 635 636 i = ri - d->r; 637 for (;;) { 638 d->r[i].p = NULL; 639 d->r[i].size = 0; 640 j = i; 641 for (;;) { 642 i = (i - 1) & mask; 643 if (d->r[i].p == NULL) 644 return; 645 r = hash(d->r[i].p) & mask; 646 if ((i <= r && r < j) || (r < j && j < i) || 647 (j < i && i <= r)) 648 continue; 649 d->r[j] = d->r[i]; 650 STATS_INC(d->delete_moves); 651 break; 652 } 653 654 } 655 } 656 657 static inline void 658 junk_free(int junk, void *p, size_t sz) 659 { 660 size_t i, step = 1; 661 uint64_t *lp = p; 662 663 if (junk == 0 || sz == 0) 664 return; 665 sz /= sizeof(uint64_t); 666 if (junk == 1) { 667 if (sz > MALLOC_PAGESIZE / sizeof(uint64_t)) 668 sz = MALLOC_PAGESIZE / sizeof(uint64_t); 669 step = sz / 4; 670 if (step == 0) 671 step = 1; 672 } 673 /* Do not always put the free junk bytes in the same spot. 674 There is modulo bias here, but we ignore that. */ 675 for (i = mopts.junk_loc % step; i < sz; i += step) 676 lp[i] = SOME_FREEJUNK_ULL; 677 } 678 679 static inline void 680 validate_junk(struct dir_info *pool, void *p, size_t sz) 681 { 682 size_t i, step = 1; 683 uint64_t *lp = p; 684 685 if (pool->malloc_junk == 0 || sz == 0) 686 return; 687 sz /= sizeof(uint64_t); 688 if (pool->malloc_junk == 1) { 689 if (sz > MALLOC_PAGESIZE / sizeof(uint64_t)) 690 sz = MALLOC_PAGESIZE / sizeof(uint64_t); 691 step = sz / 4; 692 if (step == 0) 693 step = 1; 694 } 695 /* see junk_free */ 696 for (i = mopts.junk_loc % step; i < sz; i += step) { 697 if (lp[i] != SOME_FREEJUNK_ULL) 698 wrterror(pool, "write after free %p", p); 699 } 700 } 701 702 703 /* 704 * Cache maintenance. 705 * Opposed to the regular region data structure, the sizes in the 706 * cache are in MALLOC_PAGESIZE units. 707 */ 708 static void 709 unmap(struct dir_info *d, void *p, size_t sz, size_t clear) 710 { 711 size_t psz = sz >> MALLOC_PAGESHIFT; 712 void *r; 713 u_short i; 714 struct smallcache *cache; 715 716 if (sz != PAGEROUND(sz) || psz == 0) 717 wrterror(d, "munmap round"); 718 719 if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE && 720 psz <= MAX_BIGCACHEABLE_SIZE) { 721 u_short base = getrbyte(d); 722 u_short j; 723 724 /* don't look through all slots */ 725 for (j = 0; j < d->bigcache_size / 4; j++) { 726 i = (base + j) & (d->bigcache_size - 1); 727 if (d->bigcache_used < 728 BIGCACHE_FILL(d->bigcache_size)) { 729 if (d->bigcache[i].psize == 0) 730 break; 731 } else { 732 if (d->bigcache[i].psize != 0) 733 break; 734 } 735 } 736 /* if we didn't find a preferred slot, use random one */ 737 if (d->bigcache[i].psize != 0) { 738 size_t tmp; 739 740 r = d->bigcache[i].page; 741 d->bigcache_used -= d->bigcache[i].psize; 742 tmp = d->bigcache[i].psize << MALLOC_PAGESHIFT; 743 if (!mopts.malloc_freeunmap) 744 validate_junk(d, r, tmp); 745 if (munmap(r, tmp)) 746 wrterror(d, "munmap %p", r); 747 STATS_SUB(d->malloc_used, tmp); 748 } 749 750 if (clear > 0) 751 explicit_bzero(p, clear); 752 if (mopts.malloc_freeunmap) { 753 if (mprotect(p, sz, PROT_NONE)) 754 wrterror(d, "mprotect %p", r); 755 } else 756 junk_free(d->malloc_junk, p, sz); 757 d->bigcache[i].page = p; 758 d->bigcache[i].psize = psz; 759 d->bigcache_used += psz; 760 return; 761 } 762 if (psz > MAX_SMALLCACHEABLE_SIZE || d->smallcache[psz - 1].max == 0) { 763 if (munmap(p, sz)) 764 wrterror(d, "munmap %p", p); 765 STATS_SUB(d->malloc_used, sz); 766 return; 767 } 768 cache = &d->smallcache[psz - 1]; 769 if (cache->length == cache->max) { 770 int fresh; 771 /* use a random slot */ 772 i = getrbyte(d) & (cache->max - 1); 773 r = cache->pages[i]; 774 fresh = (uintptr_t)r & 1; 775 *(uintptr_t*)&r &= ~1ULL; 776 if (!fresh && !mopts.malloc_freeunmap) 777 validate_junk(d, r, sz); 778 if (munmap(r, sz)) 779 wrterror(d, "munmap %p", r); 780 STATS_SUB(d->malloc_used, sz); 781 cache->length--; 782 } else 783 i = cache->length; 784 785 /* fill slot */ 786 if (clear > 0) 787 explicit_bzero(p, clear); 788 if (mopts.malloc_freeunmap) 789 mprotect(p, sz, PROT_NONE); 790 else 791 junk_free(d->malloc_junk, p, sz); 792 cache->pages[i] = p; 793 cache->length++; 794 } 795 796 static void * 797 map(struct dir_info *d, size_t sz, int zero_fill) 798 { 799 size_t psz = sz >> MALLOC_PAGESHIFT; 800 u_short i; 801 void *p; 802 struct smallcache *cache; 803 804 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 805 d->canary1 != ~d->canary2) 806 wrterror(d, "internal struct corrupt"); 807 if (sz != PAGEROUND(sz) || psz == 0) 808 wrterror(d, "map round"); 809 810 811 if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE && 812 psz <= MAX_BIGCACHEABLE_SIZE) { 813 size_t base = getrbyte(d); 814 size_t cached = d->bigcache_used; 815 ushort j; 816 817 for (j = 0; j < d->bigcache_size && cached >= psz; j++) { 818 i = (j + base) & (d->bigcache_size - 1); 819 if (d->bigcache[i].psize == psz) { 820 p = d->bigcache[i].page; 821 d->bigcache_used -= psz; 822 d->bigcache[i].page = NULL; 823 d->bigcache[i].psize = 0; 824 825 if (!mopts.malloc_freeunmap) 826 validate_junk(d, p, sz); 827 if (mopts.malloc_freeunmap) 828 mprotect(p, sz, PROT_READ | PROT_WRITE); 829 if (zero_fill) 830 memset(p, 0, sz); 831 else if (mopts.malloc_freeunmap) 832 junk_free(d->malloc_junk, p, sz); 833 return p; 834 } 835 cached -= d->bigcache[i].psize; 836 } 837 } 838 if (psz <= MAX_SMALLCACHEABLE_SIZE && d->smallcache[psz - 1].max > 0) { 839 cache = &d->smallcache[psz - 1]; 840 if (cache->length > 0) { 841 int fresh; 842 if (cache->length == 1) 843 p = cache->pages[--cache->length]; 844 else { 845 i = getrbyte(d) % cache->length; 846 p = cache->pages[i]; 847 cache->pages[i] = cache->pages[--cache->length]; 848 } 849 /* check if page was not junked, i.e. "fresh 850 we use the lsb of the pointer for that */ 851 fresh = (uintptr_t)p & 1UL; 852 *(uintptr_t*)&p &= ~1UL; 853 if (!fresh && !mopts.malloc_freeunmap) 854 validate_junk(d, p, sz); 855 if (mopts.malloc_freeunmap) 856 mprotect(p, sz, PROT_READ | PROT_WRITE); 857 if (zero_fill) 858 memset(p, 0, sz); 859 else if (mopts.malloc_freeunmap) 860 junk_free(d->malloc_junk, p, sz); 861 return p; 862 } 863 if (psz <= 1) { 864 p = MMAP(cache->max * sz, d->mmap_flag); 865 if (p != MAP_FAILED) { 866 STATS_ADD(d->malloc_used, cache->max * sz); 867 cache->length = cache->max - 1; 868 for (i = 0; i < cache->max - 1; i++) { 869 void *q = (char*)p + i * sz; 870 cache->pages[i] = q; 871 /* mark pointer in slot as not junked */ 872 *(uintptr_t*)&cache->pages[i] |= 1UL; 873 } 874 if (mopts.malloc_freeunmap) 875 mprotect(p, (cache->max - 1) * sz, 876 PROT_NONE); 877 p = (char*)p + (cache->max - 1) * sz; 878 /* zero fill not needed, freshly mmapped */ 879 return p; 880 } 881 } 882 883 } 884 p = MMAP(sz, d->mmap_flag); 885 if (p != MAP_FAILED) 886 STATS_ADD(d->malloc_used, sz); 887 /* zero fill not needed */ 888 return p; 889 } 890 891 static void 892 init_chunk_info(struct dir_info *d, struct chunk_info *p, u_int bucket) 893 { 894 u_int i; 895 896 p->bucket = bucket; 897 p->total = p->free = MALLOC_PAGESIZE / B2ALLOC(bucket); 898 p->offset = bucket == 0 ? 0xdead : howmany(p->total, MALLOC_BITS); 899 p->canary = (u_short)d->canary1; 900 901 /* set all valid bits in the bitmap */ 902 i = p->total - 1; 903 memset(p->bits, 0xff, sizeof(p->bits[0]) * (i / MALLOC_BITS)); 904 p->bits[i / MALLOC_BITS] = (2U << (i % MALLOC_BITS)) - 1; 905 } 906 907 static struct chunk_info * 908 alloc_chunk_info(struct dir_info *d, u_int bucket) 909 { 910 struct chunk_info *p; 911 912 if (LIST_EMPTY(&d->chunk_info_list[bucket])) { 913 const size_t chunk_pages = 64; 914 size_t size, count, i; 915 char *q; 916 917 count = MALLOC_PAGESIZE / B2ALLOC(bucket); 918 919 size = howmany(count, MALLOC_BITS); 920 size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short); 921 if (mopts.chunk_canaries) 922 size += count * sizeof(u_short); 923 size = _ALIGN(size); 924 count = MALLOC_PAGESIZE / size; 925 926 /* Don't use cache here, we don't want user uaf touch this */ 927 if (d->chunk_pages_used == chunk_pages || 928 d->chunk_pages == NULL) { 929 q = MMAP(MALLOC_PAGESIZE * chunk_pages, d->mmap_flag); 930 if (q == MAP_FAILED) 931 return NULL; 932 d->chunk_pages = q; 933 d->chunk_pages_used = 0; 934 STATS_ADD(d->malloc_used, MALLOC_PAGESIZE * 935 chunk_pages); 936 } 937 q = (char *)d->chunk_pages + d->chunk_pages_used * 938 MALLOC_PAGESIZE; 939 d->chunk_pages_used++; 940 941 for (i = 0; i < count; i++, q += size) { 942 p = (struct chunk_info *)q; 943 LIST_INSERT_HEAD(&d->chunk_info_list[bucket], p, entries); 944 } 945 } 946 p = LIST_FIRST(&d->chunk_info_list[bucket]); 947 LIST_REMOVE(p, entries); 948 if (p->total == 0) 949 init_chunk_info(d, p, bucket); 950 return p; 951 } 952 953 /* 954 * Allocate a page of chunks 955 */ 956 static struct chunk_info * 957 omalloc_make_chunks(struct dir_info *d, u_int bucket, u_int listnum) 958 { 959 struct chunk_info *bp; 960 void *pp; 961 962 /* Allocate a new bucket */ 963 pp = map(d, MALLOC_PAGESIZE, 0); 964 if (pp == MAP_FAILED) 965 return NULL; 966 967 /* memory protect the page allocated in the malloc(0) case */ 968 if (bucket == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) == -1) 969 goto err; 970 971 bp = alloc_chunk_info(d, bucket); 972 if (bp == NULL) 973 goto err; 974 bp->page = pp; 975 976 if (insert(d, (void *)((uintptr_t)pp | (bucket + 1)), (uintptr_t)bp, 977 NULL)) 978 goto err; 979 LIST_INSERT_HEAD(&d->chunk_dir[bucket][listnum], bp, entries); 980 981 if (bucket > 0 && d->malloc_junk != 0) 982 memset(pp, SOME_FREEJUNK, MALLOC_PAGESIZE); 983 984 return bp; 985 986 err: 987 unmap(d, pp, MALLOC_PAGESIZE, 0); 988 return NULL; 989 } 990 991 static inline unsigned int 992 lb(u_int x) 993 { 994 /* I need an extension just for integer-length (: */ 995 return (sizeof(int) * CHAR_BIT - 1) - __builtin_clz(x); 996 } 997 998 /* https://pvk.ca/Blog/2015/06/27/linear-log-bucketing-fast-versatile-simple/ 999 via Tony Finch */ 1000 static inline unsigned int 1001 bin_of(unsigned int size) 1002 { 1003 const unsigned int linear = 6; 1004 const unsigned int subbin = 2; 1005 1006 unsigned int mask, range, rounded, sub_index, rounded_size; 1007 unsigned int n_bits, shift; 1008 1009 n_bits = lb(size | (1U << linear)); 1010 shift = n_bits - subbin; 1011 mask = (1ULL << shift) - 1; 1012 rounded = size + mask; /* XXX: overflow. */ 1013 sub_index = rounded >> shift; 1014 range = n_bits - linear; 1015 1016 rounded_size = rounded & ~mask; 1017 return rounded_size; 1018 } 1019 1020 static inline u_short 1021 find_bucket(u_short size) 1022 { 1023 /* malloc(0) is special */ 1024 if (size == 0) 1025 return 0; 1026 if (size < MALLOC_MINSIZE) 1027 size = MALLOC_MINSIZE; 1028 if (mopts.def_maxcache != 0) 1029 size = bin_of(size); 1030 return howmany(size, MALLOC_MINSIZE); 1031 } 1032 1033 static void 1034 fill_canary(char *ptr, size_t sz, size_t allocated) 1035 { 1036 size_t check_sz = allocated - sz; 1037 1038 if (check_sz > CHUNK_CHECK_LENGTH) 1039 check_sz = CHUNK_CHECK_LENGTH; 1040 memset(ptr + sz, mopts.chunk_canaries, check_sz); 1041 } 1042 1043 /* 1044 * Allocate a chunk 1045 */ 1046 static void * 1047 malloc_bytes(struct dir_info *d, size_t size, void *f) 1048 { 1049 u_int i, r, bucket, listnum; 1050 size_t k; 1051 u_short *lp; 1052 struct chunk_info *bp; 1053 void *p; 1054 1055 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 1056 d->canary1 != ~d->canary2) 1057 wrterror(d, "internal struct corrupt"); 1058 1059 bucket = find_bucket(size); 1060 1061 r = ((u_int)getrbyte(d) << 8) | getrbyte(d); 1062 listnum = r % MALLOC_CHUNK_LISTS; 1063 1064 /* If it's empty, make a page more of that size chunks */ 1065 if ((bp = LIST_FIRST(&d->chunk_dir[bucket][listnum])) == NULL) { 1066 bp = omalloc_make_chunks(d, bucket, listnum); 1067 if (bp == NULL) 1068 return NULL; 1069 } 1070 1071 if (bp->canary != (u_short)d->canary1) 1072 wrterror(d, "chunk info corrupted"); 1073 1074 /* bias, as bp->total is not a power of 2 */ 1075 i = (r / MALLOC_CHUNK_LISTS) % bp->total; 1076 1077 /* potentially start somewhere in a short */ 1078 lp = &bp->bits[i / MALLOC_BITS]; 1079 if (*lp) { 1080 int j = i % MALLOC_BITS; /* j must be signed */ 1081 k = ffs(*lp >> j); 1082 if (k != 0) { 1083 k += j - 1; 1084 goto found; 1085 } 1086 } 1087 /* no bit halfway, go to next full short */ 1088 i /= MALLOC_BITS; 1089 for (;;) { 1090 if (++i >= howmany(bp->total, MALLOC_BITS)) 1091 i = 0; 1092 lp = &bp->bits[i]; 1093 if (*lp) { 1094 k = ffs(*lp) - 1; 1095 break; 1096 } 1097 } 1098 found: 1099 if (i == 0 && k == 0 && DO_STATS) { 1100 struct region_info *r = find(d, bp->page); 1101 STATS_SETF(r, f); 1102 } 1103 1104 *lp ^= 1 << k; 1105 1106 /* If there are no more free, remove from free-list */ 1107 if (--bp->free == 0) 1108 LIST_REMOVE(bp, entries); 1109 1110 /* Adjust to the real offset of that chunk */ 1111 k += (lp - bp->bits) * MALLOC_BITS; 1112 1113 if (mopts.chunk_canaries && size > 0) 1114 bp->bits[bp->offset + k] = size; 1115 1116 k *= B2ALLOC(bp->bucket); 1117 1118 p = (char *)bp->page + k; 1119 if (bp->bucket > 0) { 1120 validate_junk(d, p, B2SIZE(bp->bucket)); 1121 if (mopts.chunk_canaries) 1122 fill_canary(p, size, B2SIZE(bp->bucket)); 1123 } 1124 return p; 1125 } 1126 1127 static void 1128 validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated) 1129 { 1130 size_t check_sz = allocated - sz; 1131 u_char *p, *q; 1132 1133 if (check_sz > CHUNK_CHECK_LENGTH) 1134 check_sz = CHUNK_CHECK_LENGTH; 1135 p = ptr + sz; 1136 q = p + check_sz; 1137 1138 while (p < q) { 1139 if (*p != (u_char)mopts.chunk_canaries && *p != SOME_JUNK) { 1140 wrterror(d, "canary corrupted %p %#tx@%#zx%s", 1141 ptr, p - ptr, sz, 1142 *p == SOME_FREEJUNK ? " (double free?)" : ""); 1143 } 1144 p++; 1145 } 1146 } 1147 1148 static uint32_t 1149 find_chunknum(struct dir_info *d, struct chunk_info *info, void *ptr, int check) 1150 { 1151 uint32_t chunknum; 1152 1153 if (info->canary != (u_short)d->canary1) 1154 wrterror(d, "chunk info corrupted"); 1155 1156 /* Find the chunk number on the page */ 1157 chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) / B2ALLOC(info->bucket); 1158 1159 if ((uintptr_t)ptr & (MALLOC_MINSIZE - 1)) 1160 wrterror(d, "modified chunk-pointer %p", ptr); 1161 if (info->bits[chunknum / MALLOC_BITS] & 1162 (1U << (chunknum % MALLOC_BITS))) 1163 wrterror(d, "double free %p", ptr); 1164 if (check && info->bucket > 0) { 1165 validate_canary(d, ptr, info->bits[info->offset + chunknum], 1166 B2SIZE(info->bucket)); 1167 } 1168 return chunknum; 1169 } 1170 1171 /* 1172 * Free a chunk, and possibly the page it's on, if the page becomes empty. 1173 */ 1174 static void 1175 free_bytes(struct dir_info *d, struct region_info *r, void *ptr) 1176 { 1177 struct chunk_head *mp; 1178 struct chunk_info *info; 1179 uint32_t chunknum; 1180 uint32_t listnum; 1181 1182 info = (struct chunk_info *)r->size; 1183 chunknum = find_chunknum(d, info, ptr, 0); 1184 1185 if (chunknum == 0) 1186 STATS_SETF(r, NULL); 1187 1188 info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS); 1189 info->free++; 1190 1191 if (info->free == 1) { 1192 /* Page became non-full */ 1193 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; 1194 mp = &d->chunk_dir[info->bucket][listnum]; 1195 LIST_INSERT_HEAD(mp, info, entries); 1196 return; 1197 } 1198 1199 if (info->free != info->total) 1200 return; 1201 1202 LIST_REMOVE(info, entries); 1203 1204 if (info->bucket == 0 && !mopts.malloc_freeunmap) 1205 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE); 1206 unmap(d, info->page, MALLOC_PAGESIZE, 0); 1207 1208 delete(d, r); 1209 mp = &d->chunk_info_list[info->bucket]; 1210 LIST_INSERT_HEAD(mp, info, entries); 1211 } 1212 1213 1214 1215 static void * 1216 omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f) 1217 { 1218 void *p; 1219 size_t psz; 1220 1221 if (sz > MALLOC_MAXCHUNK) { 1222 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1223 errno = ENOMEM; 1224 return NULL; 1225 } 1226 sz += mopts.malloc_guard; 1227 psz = PAGEROUND(sz); 1228 p = map(pool, psz, zero_fill); 1229 if (p == MAP_FAILED) { 1230 errno = ENOMEM; 1231 return NULL; 1232 } 1233 if (insert(pool, p, sz, f)) { 1234 unmap(pool, p, psz, 0); 1235 errno = ENOMEM; 1236 return NULL; 1237 } 1238 if (mopts.malloc_guard) { 1239 if (mprotect((char *)p + psz - mopts.malloc_guard, 1240 mopts.malloc_guard, PROT_NONE)) 1241 wrterror(pool, "mprotect"); 1242 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 1243 } 1244 1245 if (MALLOC_MOVE_COND(sz)) { 1246 /* fill whole allocation */ 1247 if (pool->malloc_junk == 2) 1248 memset(p, SOME_JUNK, psz - mopts.malloc_guard); 1249 /* shift towards the end */ 1250 p = MALLOC_MOVE(p, sz); 1251 /* fill zeros if needed and overwritten above */ 1252 if (zero_fill && pool->malloc_junk == 2) 1253 memset(p, 0, sz - mopts.malloc_guard); 1254 } else { 1255 if (pool->malloc_junk == 2) { 1256 if (zero_fill) 1257 memset((char *)p + sz - 1258 mopts.malloc_guard, SOME_JUNK, 1259 psz - sz); 1260 else 1261 memset(p, SOME_JUNK, 1262 psz - mopts.malloc_guard); 1263 } else if (mopts.chunk_canaries) 1264 fill_canary(p, sz - mopts.malloc_guard, 1265 psz - mopts.malloc_guard); 1266 } 1267 1268 } else { 1269 /* takes care of SOME_JUNK */ 1270 p = malloc_bytes(pool, sz, f); 1271 if (zero_fill && p != NULL && sz > 0) 1272 memset(p, 0, sz); 1273 } 1274 1275 return p; 1276 } 1277 1278 /* 1279 * Common function for handling recursion. Only 1280 * print the error message once, to avoid making the problem 1281 * potentially worse. 1282 */ 1283 static void 1284 malloc_recurse(struct dir_info *d) 1285 { 1286 static int noprint; 1287 1288 if (noprint == 0) { 1289 noprint = 1; 1290 wrterror(d, "recursive call"); 1291 } 1292 d->active--; 1293 _MALLOC_UNLOCK(d->mutex); 1294 errno = EDEADLK; 1295 } 1296 1297 void 1298 _malloc_init(int from_rthreads) 1299 { 1300 u_int i, j, nmutexes; 1301 struct dir_info *d; 1302 1303 _MALLOC_LOCK(1); 1304 if (!from_rthreads && mopts.malloc_pool[1]) { 1305 _MALLOC_UNLOCK(1); 1306 return; 1307 } 1308 if (!mopts.malloc_canary) { 1309 char *p; 1310 size_t sz, d_avail; 1311 1312 omalloc_init(); 1313 /* 1314 * Allocate dir_infos with a guard page on either side. Also 1315 * randomise offset inside the page at which the dir_infos 1316 * lay (subject to alignment by 1 << MALLOC_MINSHIFT) 1317 */ 1318 sz = mopts.malloc_mutexes * sizeof(*d) + 2 * MALLOC_PAGESIZE; 1319 if ((p = MMAPNONE(sz, 0)) == MAP_FAILED) 1320 wrterror(NULL, "malloc_init mmap1 failed"); 1321 if (mprotect(p + MALLOC_PAGESIZE, mopts.malloc_mutexes * sizeof(*d), 1322 PROT_READ | PROT_WRITE)) 1323 wrterror(NULL, "malloc_init mprotect1 failed"); 1324 if (mimmutable(p, sz)) 1325 wrterror(NULL, "malloc_init mimmutable1 failed"); 1326 d_avail = (((mopts.malloc_mutexes * sizeof(*d) + MALLOC_PAGEMASK) & 1327 ~MALLOC_PAGEMASK) - (mopts.malloc_mutexes * sizeof(*d))) >> 1328 MALLOC_MINSHIFT; 1329 d = (struct dir_info *)(p + MALLOC_PAGESIZE + 1330 (arc4random_uniform(d_avail) << MALLOC_MINSHIFT)); 1331 STATS_ADD(d[1].malloc_used, sz); 1332 for (i = 0; i < mopts.malloc_mutexes; i++) 1333 mopts.malloc_pool[i] = &d[i]; 1334 mopts.internal_funcs = 1; 1335 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) { 1336 if (mprotect(&malloc_readonly, sizeof(malloc_readonly), 1337 PROT_READ)) 1338 wrterror(NULL, "malloc_init mprotect r/o failed"); 1339 if (mimmutable(&malloc_readonly, sizeof(malloc_readonly))) 1340 wrterror(NULL, "malloc_init mimmutable r/o failed"); 1341 } 1342 } 1343 1344 nmutexes = from_rthreads ? mopts.malloc_mutexes : 2; 1345 for (i = 0; i < nmutexes; i++) { 1346 d = mopts.malloc_pool[i]; 1347 d->malloc_mt = from_rthreads; 1348 if (d->canary1 == ~d->canary2) 1349 continue; 1350 if (i == 0) { 1351 omalloc_poolinit(d, MAP_CONCEAL); 1352 d->malloc_junk = 2; 1353 d->bigcache_size = 0; 1354 for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) 1355 d->smallcache[j].max = 0; 1356 } else { 1357 size_t sz = 0; 1358 1359 omalloc_poolinit(d, 0); 1360 d->malloc_junk = mopts.def_malloc_junk; 1361 d->bigcache_size = mopts.def_maxcache; 1362 for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) { 1363 d->smallcache[j].max = 1364 mopts.def_maxcache >> (j / 8); 1365 sz += d->smallcache[j].max * sizeof(void *); 1366 } 1367 sz += d->bigcache_size * sizeof(struct bigcache); 1368 if (sz > 0) { 1369 void *p = MMAP(sz, 0); 1370 if (p == MAP_FAILED) 1371 wrterror(NULL, 1372 "malloc_init mmap2 failed"); 1373 if (mimmutable(p, sz)) 1374 wrterror(NULL, "malloc_init mimmutable2 failed"); 1375 for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) { 1376 d->smallcache[j].pages = p; 1377 p = (char *)p + d->smallcache[j].max * 1378 sizeof(void *); 1379 } 1380 d->bigcache = p; 1381 } 1382 } 1383 d->mutex = i; 1384 } 1385 1386 _MALLOC_UNLOCK(1); 1387 } 1388 DEF_STRONG(_malloc_init); 1389 1390 #define PROLOGUE(p, fn) \ 1391 d = (p); \ 1392 if (d == NULL) { \ 1393 _malloc_init(0); \ 1394 d = (p); \ 1395 } \ 1396 _MALLOC_LOCK(d->mutex); \ 1397 d->func = fn; \ 1398 if (d->active++) { \ 1399 malloc_recurse(d); \ 1400 return NULL; \ 1401 } \ 1402 1403 #define EPILOGUE() \ 1404 d->active--; \ 1405 _MALLOC_UNLOCK(d->mutex); \ 1406 if (r == NULL && mopts.malloc_xmalloc) \ 1407 wrterror(d, "out of memory"); \ 1408 if (r != NULL) \ 1409 errno = saved_errno; \ 1410 1411 void * 1412 malloc(size_t size) 1413 { 1414 void *r; 1415 struct dir_info *d; 1416 int saved_errno = errno; 1417 1418 PROLOGUE(getpool(), "malloc") 1419 r = omalloc(d, size, 0, CALLER); 1420 EPILOGUE() 1421 return r; 1422 } 1423 DEF_STRONG(malloc); 1424 1425 void * 1426 malloc_conceal(size_t size) 1427 { 1428 void *r; 1429 struct dir_info *d; 1430 int saved_errno = errno; 1431 1432 PROLOGUE(mopts.malloc_pool[0], "malloc_conceal") 1433 r = omalloc(d, size, 0, CALLER); 1434 EPILOGUE() 1435 return r; 1436 } 1437 DEF_WEAK(malloc_conceal); 1438 1439 static struct region_info * 1440 findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool, 1441 char **saved_function) 1442 { 1443 struct dir_info *pool = argpool; 1444 struct region_info *r = find(pool, p); 1445 1446 STATS_INC(pool->pool_searches); 1447 if (r == NULL) { 1448 u_int i, nmutexes; 1449 1450 nmutexes = mopts.malloc_pool[1]->malloc_mt ? mopts.malloc_mutexes : 2; 1451 STATS_INC(pool->other_pool); 1452 for (i = 1; i < nmutexes; i++) { 1453 u_int j = (argpool->mutex + i) & (nmutexes - 1); 1454 1455 pool->active--; 1456 _MALLOC_UNLOCK(pool->mutex); 1457 pool = mopts.malloc_pool[j]; 1458 _MALLOC_LOCK(pool->mutex); 1459 pool->active++; 1460 r = find(pool, p); 1461 if (r != NULL) { 1462 *saved_function = pool->func; 1463 pool->func = argpool->func; 1464 break; 1465 } 1466 } 1467 if (r == NULL) 1468 wrterror(argpool, "bogus pointer (double free?) %p", p); 1469 } 1470 *foundpool = pool; 1471 return r; 1472 } 1473 1474 static void 1475 ofree(struct dir_info **argpool, void *p, int clear, int check, size_t argsz) 1476 { 1477 struct region_info *r; 1478 struct dir_info *pool; 1479 char *saved_function; 1480 size_t sz; 1481 1482 r = findpool(p, *argpool, &pool, &saved_function); 1483 1484 REALSIZE(sz, r); 1485 if (pool->mmap_flag) { 1486 clear = 1; 1487 if (!check) { 1488 argsz = sz; 1489 if (sz > MALLOC_MAXCHUNK) 1490 argsz -= mopts.malloc_guard; 1491 } 1492 } 1493 if (check) { 1494 if (sz <= MALLOC_MAXCHUNK) { 1495 if (mopts.chunk_canaries && sz > 0) { 1496 struct chunk_info *info = 1497 (struct chunk_info *)r->size; 1498 uint32_t chunknum = 1499 find_chunknum(pool, info, p, 0); 1500 1501 if (info->bits[info->offset + chunknum] < argsz) 1502 wrterror(pool, "recorded size %hu" 1503 " < %zu", 1504 info->bits[info->offset + chunknum], 1505 argsz); 1506 } else { 1507 if (sz < argsz) 1508 wrterror(pool, "chunk size %zu < %zu", 1509 sz, argsz); 1510 } 1511 } else if (sz - mopts.malloc_guard < argsz) { 1512 wrterror(pool, "recorded size %zu < %zu", 1513 sz - mopts.malloc_guard, argsz); 1514 } 1515 } 1516 if (sz > MALLOC_MAXCHUNK) { 1517 if (!MALLOC_MOVE_COND(sz)) { 1518 if (r->p != p) 1519 wrterror(pool, "bogus pointer %p", p); 1520 if (mopts.chunk_canaries) 1521 validate_canary(pool, p, 1522 sz - mopts.malloc_guard, 1523 PAGEROUND(sz - mopts.malloc_guard)); 1524 } else { 1525 /* shifted towards the end */ 1526 if (p != MALLOC_MOVE(r->p, sz)) 1527 wrterror(pool, "bogus moved pointer %p", p); 1528 p = r->p; 1529 } 1530 if (mopts.malloc_guard) { 1531 if (sz < mopts.malloc_guard) 1532 wrterror(pool, "guard size"); 1533 if (!mopts.malloc_freeunmap) { 1534 if (mprotect((char *)p + PAGEROUND(sz) - 1535 mopts.malloc_guard, mopts.malloc_guard, 1536 PROT_READ | PROT_WRITE)) 1537 wrterror(pool, "mprotect"); 1538 } 1539 STATS_SUB(pool->malloc_guarded, mopts.malloc_guard); 1540 } 1541 unmap(pool, p, PAGEROUND(sz), clear ? argsz : 0); 1542 delete(pool, r); 1543 } else { 1544 void *tmp; 1545 u_int i; 1546 1547 /* Validate and optionally canary check */ 1548 struct chunk_info *info = (struct chunk_info *)r->size; 1549 if (B2SIZE(info->bucket) != sz) 1550 wrterror(pool, "internal struct corrupt"); 1551 find_chunknum(pool, info, p, mopts.chunk_canaries); 1552 1553 if (mopts.malloc_freecheck) { 1554 for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK; i++) { 1555 tmp = pool->delayed_chunks[i]; 1556 if (tmp == p) 1557 wrterror(pool, 1558 "double free %p", p); 1559 if (tmp != NULL) { 1560 size_t tmpsz; 1561 1562 r = find(pool, tmp); 1563 if (r == NULL) 1564 wrterror(pool, 1565 "bogus pointer (" 1566 "double free?) %p", tmp); 1567 REALSIZE(tmpsz, r); 1568 validate_junk(pool, tmp, tmpsz); 1569 } 1570 } 1571 } 1572 1573 if (clear && argsz > 0) 1574 explicit_bzero(p, argsz); 1575 junk_free(pool->malloc_junk, p, sz); 1576 1577 i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK; 1578 tmp = p; 1579 p = pool->delayed_chunks[i]; 1580 if (tmp == p) 1581 wrterror(pool, "double free %p", p); 1582 pool->delayed_chunks[i] = tmp; 1583 if (p != NULL) { 1584 r = find(pool, p); 1585 if (r == NULL) 1586 wrterror(pool, 1587 "bogus pointer (double free?) %p", p); 1588 if (!mopts.malloc_freecheck) { 1589 REALSIZE(sz, r); 1590 validate_junk(pool, p, sz); 1591 } 1592 free_bytes(pool, r, p); 1593 } 1594 } 1595 1596 if (*argpool != pool) { 1597 pool->func = saved_function; 1598 *argpool = pool; 1599 } 1600 } 1601 1602 void 1603 free(void *ptr) 1604 { 1605 struct dir_info *d; 1606 int saved_errno = errno; 1607 1608 /* This is legal. */ 1609 if (ptr == NULL) 1610 return; 1611 1612 d = getpool(); 1613 if (d == NULL) 1614 wrterror(d, "free() called before allocation"); 1615 _MALLOC_LOCK(d->mutex); 1616 d->func = "free"; 1617 if (d->active++) { 1618 malloc_recurse(d); 1619 return; 1620 } 1621 ofree(&d, ptr, 0, 0, 0); 1622 d->active--; 1623 _MALLOC_UNLOCK(d->mutex); 1624 errno = saved_errno; 1625 } 1626 DEF_STRONG(free); 1627 1628 static void 1629 freezero_p(void *ptr, size_t sz) 1630 { 1631 explicit_bzero(ptr, sz); 1632 free(ptr); 1633 } 1634 1635 void 1636 freezero(void *ptr, size_t sz) 1637 { 1638 struct dir_info *d; 1639 int saved_errno = errno; 1640 1641 /* This is legal. */ 1642 if (ptr == NULL) 1643 return; 1644 1645 if (!mopts.internal_funcs) { 1646 freezero_p(ptr, sz); 1647 return; 1648 } 1649 1650 d = getpool(); 1651 if (d == NULL) 1652 wrterror(d, "freezero() called before allocation"); 1653 _MALLOC_LOCK(d->mutex); 1654 d->func = "freezero"; 1655 if (d->active++) { 1656 malloc_recurse(d); 1657 return; 1658 } 1659 ofree(&d, ptr, 1, 1, sz); 1660 d->active--; 1661 _MALLOC_UNLOCK(d->mutex); 1662 errno = saved_errno; 1663 } 1664 DEF_WEAK(freezero); 1665 1666 static void * 1667 orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f) 1668 { 1669 struct region_info *r; 1670 struct dir_info *pool; 1671 char *saved_function; 1672 struct chunk_info *info; 1673 size_t oldsz, goldsz, gnewsz; 1674 void *q, *ret; 1675 uint32_t chunknum; 1676 int forced; 1677 1678 if (p == NULL) 1679 return omalloc(*argpool, newsz, 0, f); 1680 1681 if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1682 errno = ENOMEM; 1683 return NULL; 1684 } 1685 1686 r = findpool(p, *argpool, &pool, &saved_function); 1687 1688 REALSIZE(oldsz, r); 1689 if (oldsz <= MALLOC_MAXCHUNK) { 1690 if (DO_STATS || mopts.chunk_canaries) { 1691 info = (struct chunk_info *)r->size; 1692 chunknum = find_chunknum(pool, info, p, 0); 1693 } 1694 } 1695 1696 goldsz = oldsz; 1697 if (oldsz > MALLOC_MAXCHUNK) { 1698 if (oldsz < mopts.malloc_guard) 1699 wrterror(pool, "guard size"); 1700 oldsz -= mopts.malloc_guard; 1701 } 1702 1703 gnewsz = newsz; 1704 if (gnewsz > MALLOC_MAXCHUNK) 1705 gnewsz += mopts.malloc_guard; 1706 1707 forced = mopts.malloc_realloc || pool->mmap_flag; 1708 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && !forced) { 1709 /* First case: from n pages sized allocation to m pages sized 1710 allocation, m > n */ 1711 size_t roldsz = PAGEROUND(goldsz); 1712 size_t rnewsz = PAGEROUND(gnewsz); 1713 1714 if (rnewsz < roldsz && rnewsz > roldsz / 2 && 1715 roldsz - rnewsz < mopts.def_maxcache * MALLOC_PAGESIZE && 1716 !mopts.malloc_guard) { 1717 1718 ret = p; 1719 goto done; 1720 } 1721 1722 if (rnewsz > roldsz) { 1723 /* try to extend existing region */ 1724 if (!mopts.malloc_guard) { 1725 void *hint = (char *)r->p + roldsz; 1726 size_t needed = rnewsz - roldsz; 1727 1728 STATS_INC(pool->cheap_realloc_tries); 1729 q = MMAPA(hint, needed, MAP_FIXED | __MAP_NOREPLACE | pool->mmap_flag); 1730 if (q == hint) { 1731 STATS_ADD(pool->malloc_used, needed); 1732 if (pool->malloc_junk == 2) 1733 memset(q, SOME_JUNK, needed); 1734 r->size = gnewsz; 1735 if (r->p != p) { 1736 /* old pointer is moved */ 1737 memmove(r->p, p, oldsz); 1738 p = r->p; 1739 } 1740 if (mopts.chunk_canaries) 1741 fill_canary(p, newsz, 1742 PAGEROUND(newsz)); 1743 STATS_SETF(r, f); 1744 STATS_INC(pool->cheap_reallocs); 1745 ret = p; 1746 goto done; 1747 } 1748 } 1749 } else if (rnewsz < roldsz) { 1750 /* shrink number of pages */ 1751 if (mopts.malloc_guard) { 1752 if (mprotect((char *)r->p + rnewsz - 1753 mopts.malloc_guard, mopts.malloc_guard, 1754 PROT_NONE)) 1755 wrterror(pool, "mprotect"); 1756 } 1757 if (munmap((char *)r->p + rnewsz, roldsz - rnewsz)) 1758 wrterror(pool, "munmap %p", (char *)r->p + 1759 rnewsz); 1760 STATS_SUB(pool->malloc_used, roldsz - rnewsz); 1761 r->size = gnewsz; 1762 if (MALLOC_MOVE_COND(gnewsz)) { 1763 void *pp = MALLOC_MOVE(r->p, gnewsz); 1764 memmove(pp, p, newsz); 1765 p = pp; 1766 } else if (mopts.chunk_canaries) 1767 fill_canary(p, newsz, PAGEROUND(newsz)); 1768 STATS_SETF(r, f); 1769 ret = p; 1770 goto done; 1771 } else { 1772 /* number of pages remains the same */ 1773 void *pp = r->p; 1774 1775 r->size = gnewsz; 1776 if (MALLOC_MOVE_COND(gnewsz)) 1777 pp = MALLOC_MOVE(r->p, gnewsz); 1778 if (p != pp) { 1779 memmove(pp, p, oldsz < newsz ? oldsz : newsz); 1780 p = pp; 1781 } 1782 if (p == r->p) { 1783 if (newsz > oldsz && pool->malloc_junk == 2) 1784 memset((char *)p + newsz, SOME_JUNK, 1785 rnewsz - mopts.malloc_guard - 1786 newsz); 1787 if (mopts.chunk_canaries) 1788 fill_canary(p, newsz, PAGEROUND(newsz)); 1789 } 1790 STATS_SETF(r, f); 1791 ret = p; 1792 goto done; 1793 } 1794 } 1795 if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 && 1796 newsz <= MALLOC_MAXCHUNK && newsz > 0 && 1797 !forced && find_bucket(newsz) == find_bucket(oldsz)) { 1798 /* do not reallocate if new size fits good in existing chunk */ 1799 if (pool->malloc_junk == 2) 1800 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz); 1801 if (mopts.chunk_canaries) { 1802 info->bits[info->offset + chunknum] = newsz; 1803 fill_canary(p, newsz, B2SIZE(info->bucket)); 1804 } 1805 if (DO_STATS && chunknum == 0) 1806 STATS_SETF(r, f); 1807 ret = p; 1808 } else if (newsz != oldsz || forced) { 1809 /* create new allocation */ 1810 q = omalloc(pool, newsz, 0, f); 1811 if (q == NULL) { 1812 ret = NULL; 1813 goto done; 1814 } 1815 if (newsz != 0 && oldsz != 0) 1816 memcpy(q, p, oldsz < newsz ? oldsz : newsz); 1817 ofree(&pool, p, 0, 0, 0); 1818 ret = q; 1819 } else { 1820 /* oldsz == newsz */ 1821 if (newsz != 0) 1822 wrterror(pool, "realloc internal inconsistency"); 1823 if (DO_STATS && chunknum == 0) 1824 STATS_SETF(r, f); 1825 ret = p; 1826 } 1827 done: 1828 if (*argpool != pool) { 1829 pool->func = saved_function; 1830 *argpool = pool; 1831 } 1832 return ret; 1833 } 1834 1835 void * 1836 realloc(void *ptr, size_t size) 1837 { 1838 struct dir_info *d; 1839 void *r; 1840 int saved_errno = errno; 1841 1842 PROLOGUE(getpool(), "realloc") 1843 r = orealloc(&d, ptr, size, CALLER); 1844 EPILOGUE() 1845 return r; 1846 } 1847 DEF_STRONG(realloc); 1848 1849 /* 1850 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX 1851 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW 1852 */ 1853 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4)) 1854 1855 void * 1856 calloc(size_t nmemb, size_t size) 1857 { 1858 struct dir_info *d; 1859 void *r; 1860 int saved_errno = errno; 1861 1862 PROLOGUE(getpool(), "calloc") 1863 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1864 nmemb > 0 && SIZE_MAX / nmemb < size) { 1865 d->active--; 1866 _MALLOC_UNLOCK(d->mutex); 1867 if (mopts.malloc_xmalloc) 1868 wrterror(d, "out of memory"); 1869 errno = ENOMEM; 1870 return NULL; 1871 } 1872 1873 size *= nmemb; 1874 r = omalloc(d, size, 1, CALLER); 1875 EPILOGUE() 1876 return r; 1877 } 1878 DEF_STRONG(calloc); 1879 1880 void * 1881 calloc_conceal(size_t nmemb, size_t size) 1882 { 1883 struct dir_info *d; 1884 void *r; 1885 int saved_errno = errno; 1886 1887 PROLOGUE(mopts.malloc_pool[0], "calloc_conceal") 1888 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1889 nmemb > 0 && SIZE_MAX / nmemb < size) { 1890 d->active--; 1891 _MALLOC_UNLOCK(d->mutex); 1892 if (mopts.malloc_xmalloc) 1893 wrterror(d, "out of memory"); 1894 errno = ENOMEM; 1895 return NULL; 1896 } 1897 1898 size *= nmemb; 1899 r = omalloc(d, size, 1, CALLER); 1900 EPILOGUE() 1901 return r; 1902 } 1903 DEF_WEAK(calloc_conceal); 1904 1905 static void * 1906 orecallocarray(struct dir_info **argpool, void *p, size_t oldsize, 1907 size_t newsize, void *f) 1908 { 1909 struct region_info *r; 1910 struct dir_info *pool; 1911 char *saved_function; 1912 void *newptr; 1913 size_t sz; 1914 1915 if (p == NULL) 1916 return omalloc(*argpool, newsize, 1, f); 1917 1918 if (oldsize == newsize) 1919 return p; 1920 1921 r = findpool(p, *argpool, &pool, &saved_function); 1922 1923 REALSIZE(sz, r); 1924 if (sz <= MALLOC_MAXCHUNK) { 1925 if (mopts.chunk_canaries && sz > 0) { 1926 struct chunk_info *info = (struct chunk_info *)r->size; 1927 uint32_t chunknum = find_chunknum(pool, info, p, 0); 1928 1929 if (info->bits[info->offset + chunknum] != oldsize) 1930 wrterror(pool, "recorded size %hu != %zu", 1931 info->bits[info->offset + chunknum], 1932 oldsize); 1933 } else { 1934 if (sz < oldsize) 1935 wrterror(pool, "chunk size %zu < %zu", 1936 sz, oldsize); 1937 } 1938 } else { 1939 if (sz - mopts.malloc_guard < oldsize) 1940 wrterror(pool, "recorded size %zu < %zu", 1941 sz - mopts.malloc_guard, oldsize); 1942 if (oldsize < (sz - mopts.malloc_guard) / 2) 1943 wrterror(pool, "recorded size %zu inconsistent with %zu", 1944 sz - mopts.malloc_guard, oldsize); 1945 } 1946 1947 newptr = omalloc(pool, newsize, 0, f); 1948 if (newptr == NULL) 1949 goto done; 1950 1951 if (newsize > oldsize) { 1952 memcpy(newptr, p, oldsize); 1953 memset((char *)newptr + oldsize, 0, newsize - oldsize); 1954 } else 1955 memcpy(newptr, p, newsize); 1956 1957 ofree(&pool, p, 1, 0, oldsize); 1958 1959 done: 1960 if (*argpool != pool) { 1961 pool->func = saved_function; 1962 *argpool = pool; 1963 } 1964 1965 return newptr; 1966 } 1967 1968 static void * 1969 recallocarray_p(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size) 1970 { 1971 size_t oldsize, newsize; 1972 void *newptr; 1973 1974 if (ptr == NULL) 1975 return calloc(newnmemb, size); 1976 1977 if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1978 newnmemb > 0 && SIZE_MAX / newnmemb < size) { 1979 errno = ENOMEM; 1980 return NULL; 1981 } 1982 newsize = newnmemb * size; 1983 1984 if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1985 oldnmemb > 0 && SIZE_MAX / oldnmemb < size) { 1986 errno = EINVAL; 1987 return NULL; 1988 } 1989 oldsize = oldnmemb * size; 1990 1991 /* 1992 * Don't bother too much if we're shrinking just a bit, 1993 * we do not shrink for series of small steps, oh well. 1994 */ 1995 if (newsize <= oldsize) { 1996 size_t d = oldsize - newsize; 1997 1998 if (d < oldsize / 2 && d < MALLOC_PAGESIZE) { 1999 memset((char *)ptr + newsize, 0, d); 2000 return ptr; 2001 } 2002 } 2003 2004 newptr = malloc(newsize); 2005 if (newptr == NULL) 2006 return NULL; 2007 2008 if (newsize > oldsize) { 2009 memcpy(newptr, ptr, oldsize); 2010 memset((char *)newptr + oldsize, 0, newsize - oldsize); 2011 } else 2012 memcpy(newptr, ptr, newsize); 2013 2014 explicit_bzero(ptr, oldsize); 2015 free(ptr); 2016 2017 return newptr; 2018 } 2019 2020 void * 2021 recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size) 2022 { 2023 struct dir_info *d; 2024 size_t oldsize = 0, newsize; 2025 void *r; 2026 int saved_errno = errno; 2027 2028 if (!mopts.internal_funcs) 2029 return recallocarray_p(ptr, oldnmemb, newnmemb, size); 2030 2031 PROLOGUE(getpool(), "recallocarray") 2032 2033 if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 2034 newnmemb > 0 && SIZE_MAX / newnmemb < size) { 2035 d->active--; 2036 _MALLOC_UNLOCK(d->mutex); 2037 if (mopts.malloc_xmalloc) 2038 wrterror(d, "out of memory"); 2039 errno = ENOMEM; 2040 return NULL; 2041 } 2042 newsize = newnmemb * size; 2043 2044 if (ptr != NULL) { 2045 if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 2046 oldnmemb > 0 && SIZE_MAX / oldnmemb < size) { 2047 d->active--; 2048 _MALLOC_UNLOCK(d->mutex); 2049 errno = EINVAL; 2050 return NULL; 2051 } 2052 oldsize = oldnmemb * size; 2053 } 2054 2055 r = orecallocarray(&d, ptr, oldsize, newsize, CALLER); 2056 EPILOGUE() 2057 return r; 2058 } 2059 DEF_WEAK(recallocarray); 2060 2061 static void * 2062 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill) 2063 { 2064 char *p, *q; 2065 2066 if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0) 2067 wrterror(d, "mapalign bad alignment"); 2068 if (sz != PAGEROUND(sz)) 2069 wrterror(d, "mapalign round"); 2070 2071 /* Allocate sz + alignment bytes of memory, which must include a 2072 * subrange of size bytes that is properly aligned. Unmap the 2073 * other bytes, and then return that subrange. 2074 */ 2075 2076 /* We need sz + alignment to fit into a size_t. */ 2077 if (alignment > SIZE_MAX - sz) 2078 return MAP_FAILED; 2079 2080 p = map(d, sz + alignment, zero_fill); 2081 if (p == MAP_FAILED) 2082 return MAP_FAILED; 2083 q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1)); 2084 if (q != p) { 2085 if (munmap(p, q - p)) 2086 wrterror(d, "munmap %p", p); 2087 } 2088 if (munmap(q + sz, alignment - (q - p))) 2089 wrterror(d, "munmap %p", q + sz); 2090 STATS_SUB(d->malloc_used, alignment); 2091 2092 return q; 2093 } 2094 2095 static void * 2096 omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill, 2097 void *f) 2098 { 2099 size_t psz; 2100 void *p; 2101 2102 /* If between half a page and a page, avoid MALLOC_MOVE. */ 2103 if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE) 2104 sz = MALLOC_PAGESIZE; 2105 if (alignment <= MALLOC_PAGESIZE) { 2106 size_t pof2; 2107 /* 2108 * max(size, alignment) rounded up to power of 2 is enough 2109 * to assure the requested alignment. Large regions are 2110 * always page aligned. 2111 */ 2112 if (sz < alignment) 2113 sz = alignment; 2114 if (sz < MALLOC_PAGESIZE) { 2115 pof2 = MALLOC_MINSIZE; 2116 while (pof2 < sz) 2117 pof2 <<= 1; 2118 } else 2119 pof2 = sz; 2120 return omalloc(pool, pof2, zero_fill, f); 2121 } 2122 2123 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 2124 errno = ENOMEM; 2125 return NULL; 2126 } 2127 2128 if (sz < MALLOC_PAGESIZE) 2129 sz = MALLOC_PAGESIZE; 2130 sz += mopts.malloc_guard; 2131 psz = PAGEROUND(sz); 2132 2133 p = mapalign(pool, alignment, psz, zero_fill); 2134 if (p == MAP_FAILED) { 2135 errno = ENOMEM; 2136 return NULL; 2137 } 2138 2139 if (insert(pool, p, sz, f)) { 2140 unmap(pool, p, psz, 0); 2141 errno = ENOMEM; 2142 return NULL; 2143 } 2144 2145 if (mopts.malloc_guard) { 2146 if (mprotect((char *)p + psz - mopts.malloc_guard, 2147 mopts.malloc_guard, PROT_NONE)) 2148 wrterror(pool, "mprotect"); 2149 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 2150 } 2151 2152 if (pool->malloc_junk == 2) { 2153 if (zero_fill) 2154 memset((char *)p + sz - mopts.malloc_guard, 2155 SOME_JUNK, psz - sz); 2156 else 2157 memset(p, SOME_JUNK, psz - mopts.malloc_guard); 2158 } else if (mopts.chunk_canaries) 2159 fill_canary(p, sz - mopts.malloc_guard, 2160 psz - mopts.malloc_guard); 2161 2162 return p; 2163 } 2164 2165 int 2166 posix_memalign(void **memptr, size_t alignment, size_t size) 2167 { 2168 struct dir_info *d; 2169 int res, saved_errno = errno; 2170 void *r; 2171 2172 /* Make sure that alignment is a large enough power of 2. */ 2173 if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *)) 2174 return EINVAL; 2175 2176 d = getpool(); 2177 if (d == NULL) { 2178 _malloc_init(0); 2179 d = getpool(); 2180 } 2181 _MALLOC_LOCK(d->mutex); 2182 d->func = "posix_memalign"; 2183 if (d->active++) { 2184 malloc_recurse(d); 2185 goto err; 2186 } 2187 r = omemalign(d, alignment, size, 0, CALLER); 2188 d->active--; 2189 _MALLOC_UNLOCK(d->mutex); 2190 if (r == NULL) { 2191 if (mopts.malloc_xmalloc) 2192 wrterror(d, "out of memory"); 2193 goto err; 2194 } 2195 errno = saved_errno; 2196 *memptr = r; 2197 return 0; 2198 2199 err: 2200 res = errno; 2201 errno = saved_errno; 2202 return res; 2203 } 2204 DEF_STRONG(posix_memalign); 2205 2206 void * 2207 aligned_alloc(size_t alignment, size_t size) 2208 { 2209 struct dir_info *d; 2210 int saved_errno = errno; 2211 void *r; 2212 2213 /* Make sure that alignment is a positive power of 2. */ 2214 if (((alignment - 1) & alignment) != 0 || alignment == 0) { 2215 errno = EINVAL; 2216 return NULL; 2217 }; 2218 /* Per spec, size should be a multiple of alignment */ 2219 if ((size & (alignment - 1)) != 0) { 2220 errno = EINVAL; 2221 return NULL; 2222 } 2223 2224 PROLOGUE(getpool(), "aligned_alloc") 2225 r = omemalign(d, alignment, size, 0, CALLER); 2226 EPILOGUE() 2227 return r; 2228 } 2229 DEF_STRONG(aligned_alloc); 2230 2231 #ifdef MALLOC_STATS 2232 2233 static void 2234 ulog(const char *format, ...) 2235 { 2236 va_list ap; 2237 static char* buf; 2238 static size_t filled; 2239 int len; 2240 2241 if (buf == NULL) 2242 buf = MMAP(KTR_USER_MAXLEN, 0); 2243 if (buf == MAP_FAILED) 2244 return; 2245 2246 va_start(ap, format); 2247 len = vsnprintf(buf + filled, KTR_USER_MAXLEN - filled, format, ap); 2248 va_end(ap); 2249 if (len < 0) 2250 return; 2251 if (len > KTR_USER_MAXLEN - filled) 2252 len = KTR_USER_MAXLEN - filled; 2253 filled += len; 2254 if (filled > 0) { 2255 if (filled == KTR_USER_MAXLEN || buf[filled - 1] == '\n') { 2256 utrace("malloc", buf, filled); 2257 filled = 0; 2258 } 2259 } 2260 } 2261 2262 struct malloc_leak { 2263 void *f; 2264 size_t total_size; 2265 int count; 2266 }; 2267 2268 struct leaknode { 2269 RBT_ENTRY(leaknode) entry; 2270 struct malloc_leak d; 2271 }; 2272 2273 static inline int 2274 leakcmp(const struct leaknode *e1, const struct leaknode *e2) 2275 { 2276 return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f; 2277 } 2278 2279 RBT_HEAD(leaktree, leaknode); 2280 RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp); 2281 RBT_GENERATE(leaktree, leaknode, entry, leakcmp); 2282 2283 static void 2284 putleakinfo(struct leaktree *leaks, void *f, size_t sz, int cnt) 2285 { 2286 struct leaknode key, *p; 2287 static struct leaknode *page; 2288 static unsigned int used; 2289 2290 if (cnt == 0 || page == MAP_FAILED) 2291 return; 2292 2293 key.d.f = f; 2294 p = RBT_FIND(leaktree, leaks, &key); 2295 if (p == NULL) { 2296 if (page == NULL || 2297 used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) { 2298 page = MMAP(MALLOC_PAGESIZE, 0); 2299 if (page == MAP_FAILED) 2300 return; 2301 used = 0; 2302 } 2303 p = &page[used++]; 2304 p->d.f = f; 2305 p->d.total_size = sz * cnt; 2306 p->d.count = cnt; 2307 RBT_INSERT(leaktree, leaks, p); 2308 } else { 2309 p->d.total_size += sz * cnt; 2310 p->d.count += cnt; 2311 } 2312 } 2313 2314 static void 2315 dump_leaks(struct leaktree *leaks) 2316 { 2317 struct leaknode *p; 2318 2319 ulog("Leak report:\n"); 2320 ulog(" f sum # avg\n"); 2321 2322 RBT_FOREACH(p, leaktree, leaks) { 2323 Dl_info info; 2324 const char *caller = p->d.f; 2325 const char *object = "."; 2326 2327 if (caller != NULL) { 2328 if (dladdr(p->d.f, &info) != 0) { 2329 caller -= (uintptr_t)info.dli_fbase; 2330 object = info.dli_fname; 2331 } 2332 } 2333 ulog("%18p %7zu %6u %6zu addr2line -e %s %p\n", 2334 p->d.f, p->d.total_size, p->d.count, 2335 p->d.total_size / p->d.count, 2336 object, caller); 2337 } 2338 } 2339 2340 static void 2341 dump_chunk(struct leaktree* leaks, struct chunk_info *p, void *f, 2342 int fromfreelist) 2343 { 2344 while (p != NULL) { 2345 if (mopts.malloc_verbose) 2346 ulog("chunk %18p %18p %4zu %d/%d\n", 2347 p->page, ((p->bits[0] & 1) ? NULL : f), 2348 B2SIZE(p->bucket), p->free, p->total); 2349 if (!fromfreelist) { 2350 size_t sz = B2SIZE(p->bucket); 2351 if (p->bits[0] & 1) 2352 putleakinfo(leaks, NULL, sz, p->total - 2353 p->free); 2354 else { 2355 putleakinfo(leaks, f, sz, 1); 2356 putleakinfo(leaks, NULL, sz, 2357 p->total - p->free - 1); 2358 } 2359 break; 2360 } 2361 p = LIST_NEXT(p, entries); 2362 if (mopts.malloc_verbose && p != NULL) 2363 ulog(" ->"); 2364 } 2365 } 2366 2367 static void 2368 dump_free_chunk_info(struct dir_info *d, struct leaktree *leaks) 2369 { 2370 int i, j, count; 2371 struct chunk_info *p; 2372 2373 ulog("Free chunk structs:\n"); 2374 ulog("Bkt) #CI page" 2375 " f size free/n\n"); 2376 for (i = 0; i <= BUCKETS; i++) { 2377 count = 0; 2378 LIST_FOREACH(p, &d->chunk_info_list[i], entries) 2379 count++; 2380 for (j = 0; j < MALLOC_CHUNK_LISTS; j++) { 2381 p = LIST_FIRST(&d->chunk_dir[i][j]); 2382 if (p == NULL && count == 0) 2383 continue; 2384 if (j == 0) 2385 ulog("%3d) %3d ", i, count); 2386 else 2387 ulog(" "); 2388 if (p != NULL) 2389 dump_chunk(leaks, p, NULL, 1); 2390 else 2391 ulog(".\n"); 2392 } 2393 } 2394 2395 } 2396 2397 static void 2398 dump_free_page_info(struct dir_info *d) 2399 { 2400 struct smallcache *cache; 2401 size_t i, total = 0; 2402 2403 ulog("Cached in small cache:\n"); 2404 for (i = 0; i < MAX_SMALLCACHEABLE_SIZE; i++) { 2405 cache = &d->smallcache[i]; 2406 if (cache->length != 0) 2407 ulog("%zu(%u): %u = %zu\n", i + 1, cache->max, 2408 cache->length, cache->length * (i + 1)); 2409 total += cache->length * (i + 1); 2410 } 2411 2412 ulog("Cached in big cache: %zu/%zu\n", d->bigcache_used, 2413 d->bigcache_size); 2414 for (i = 0; i < d->bigcache_size; i++) { 2415 if (d->bigcache[i].psize != 0) 2416 ulog("%zu: %zu\n", i, d->bigcache[i].psize); 2417 total += d->bigcache[i].psize; 2418 } 2419 ulog("Free pages cached: %zu\n", total); 2420 } 2421 2422 static void 2423 malloc_dump1(int poolno, struct dir_info *d, struct leaktree *leaks) 2424 { 2425 size_t i, realsize; 2426 2427 if (mopts.malloc_verbose) { 2428 ulog("Malloc dir of %s pool %d at %p\n", __progname, poolno, d); 2429 ulog("MT=%d J=%d Fl=%x\n", d->malloc_mt, d->malloc_junk, 2430 d->mmap_flag); 2431 ulog("Region slots free %zu/%zu\n", 2432 d->regions_free, d->regions_total); 2433 ulog("Finds %zu/%zu\n", d->finds, d->find_collisions); 2434 ulog("Inserts %zu/%zu\n", d->inserts, d->insert_collisions); 2435 ulog("Deletes %zu/%zu\n", d->deletes, d->delete_moves); 2436 ulog("Cheap reallocs %zu/%zu\n", 2437 d->cheap_reallocs, d->cheap_realloc_tries); 2438 ulog("Other pool searches %zu/%zu\n", 2439 d->other_pool, d->pool_searches); 2440 ulog("In use %zu\n", d->malloc_used); 2441 ulog("Guarded %zu\n", d->malloc_guarded); 2442 dump_free_chunk_info(d, leaks); 2443 dump_free_page_info(d); 2444 ulog("Hash table:\n"); 2445 ulog("slot) hash d type page " 2446 "f size [free/n]\n"); 2447 } 2448 for (i = 0; i < d->regions_total; i++) { 2449 if (d->r[i].p != NULL) { 2450 size_t h = hash(d->r[i].p) & 2451 (d->regions_total - 1); 2452 if (mopts.malloc_verbose) 2453 ulog("%4zx) #%4zx %zd ", 2454 i, h, h - i); 2455 REALSIZE(realsize, &d->r[i]); 2456 if (realsize > MALLOC_MAXCHUNK) { 2457 putleakinfo(leaks, d->r[i].f, realsize, 1); 2458 if (mopts.malloc_verbose) 2459 ulog("pages %18p %18p %zu\n", d->r[i].p, 2460 d->r[i].f, realsize); 2461 } else 2462 dump_chunk(leaks, 2463 (struct chunk_info *)d->r[i].size, 2464 d->r[i].f, 0); 2465 } 2466 } 2467 if (mopts.malloc_verbose) 2468 ulog("\n"); 2469 } 2470 2471 static void 2472 malloc_dump0(int poolno, struct dir_info *pool, struct leaktree *leaks) 2473 { 2474 int i; 2475 void *p; 2476 struct region_info *r; 2477 2478 if (pool == NULL || pool->r == NULL) 2479 return; 2480 for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) { 2481 p = pool->delayed_chunks[i]; 2482 if (p == NULL) 2483 continue; 2484 r = find(pool, p); 2485 if (r == NULL) 2486 wrterror(pool, "bogus pointer in malloc_dump %p", p); 2487 free_bytes(pool, r, p); 2488 pool->delayed_chunks[i] = NULL; 2489 } 2490 malloc_dump1(poolno, pool, leaks); 2491 } 2492 2493 void 2494 malloc_dump(void) 2495 { 2496 int i; 2497 int saved_errno = errno; 2498 2499 /* XXX leak when run multiple times */ 2500 struct leaktree leaks = RBT_INITIALIZER(&leaks); 2501 2502 for (i = 0; i < mopts.malloc_mutexes; i++) 2503 malloc_dump0(i, mopts.malloc_pool[i], &leaks); 2504 2505 dump_leaks(&leaks); 2506 ulog("\n"); 2507 errno = saved_errno; 2508 } 2509 DEF_WEAK(malloc_dump); 2510 2511 static void 2512 malloc_exit(void) 2513 { 2514 int save_errno = errno; 2515 2516 ulog("******** Start dump %s *******\n", __progname); 2517 ulog("M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u " 2518 "G=%zu\n", 2519 mopts.malloc_mutexes, 2520 mopts.internal_funcs, mopts.malloc_freecheck, 2521 mopts.malloc_freeunmap, mopts.def_malloc_junk, 2522 mopts.malloc_realloc, mopts.malloc_xmalloc, 2523 mopts.chunk_canaries, mopts.def_maxcache, 2524 mopts.malloc_guard); 2525 2526 malloc_dump(); 2527 ulog("******** End dump %s *******\n", __progname); 2528 errno = save_errno; 2529 } 2530 2531 #endif /* MALLOC_STATS */ 2532