1 /* $OpenBSD: malloc.c,v 1.185 2016/03/17 17:55:33 mmcc Exp $ */ 2 /* 3 * Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 5 * Copyright (c) 2008 Damien Miller <djm@openbsd.org> 6 * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* 22 * If we meet some day, and you think this stuff is worth it, you 23 * can buy me a beer in return. Poul-Henning Kamp 24 */ 25 26 /* #define MALLOC_STATS */ 27 28 #include <sys/types.h> 29 #include <sys/param.h> /* PAGE_SHIFT ALIGN */ 30 #include <sys/queue.h> 31 #include <sys/mman.h> 32 #include <sys/uio.h> 33 #include <errno.h> 34 #include <stdint.h> 35 #include <stdlib.h> 36 #include <string.h> 37 #include <stdio.h> 38 #include <unistd.h> 39 40 #ifdef MALLOC_STATS 41 #include <sys/tree.h> 42 #include <fcntl.h> 43 #endif 44 45 #include "thread_private.h" 46 47 #if defined(__sparc__) && !defined(__sparcv9__) 48 #define MALLOC_PAGESHIFT (13U) 49 #elif defined(__mips64__) 50 #define MALLOC_PAGESHIFT (14U) 51 #else 52 #define MALLOC_PAGESHIFT (PAGE_SHIFT) 53 #endif 54 55 #define MALLOC_MINSHIFT 4 56 #define MALLOC_MAXSHIFT (MALLOC_PAGESHIFT - 1) 57 #define MALLOC_PAGESIZE (1UL << MALLOC_PAGESHIFT) 58 #define MALLOC_MINSIZE (1UL << MALLOC_MINSHIFT) 59 #define MALLOC_PAGEMASK (MALLOC_PAGESIZE - 1) 60 #define MASK_POINTER(p) ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK)) 61 62 #define MALLOC_MAXCHUNK (1 << MALLOC_MAXSHIFT) 63 #define MALLOC_MAXCACHE 256 64 #define MALLOC_DELAYED_CHUNK_MASK 15 65 #define MALLOC_INITIAL_REGIONS 512 66 #define MALLOC_DEFAULT_CACHE 64 67 #define MALLOC_CHUNK_LISTS 4 68 69 /* 70 * When the P option is active, we move allocations between half a page 71 * and a whole page towards the end, subject to alignment constraints. 72 * This is the extra headroom we allow. Set to zero to be the most 73 * strict. 74 */ 75 #define MALLOC_LEEWAY 0 76 77 #define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK) 78 79 /* 80 * What to use for Junk. This is the byte value we use to fill with 81 * when the 'J' option is enabled. Use SOME_JUNK right after alloc, 82 * and SOME_FREEJUNK right before free. 83 */ 84 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */ 85 #define SOME_FREEJUNK 0xdf 86 87 #define MMAP(sz) mmap(NULL, (size_t)(sz), PROT_READ | PROT_WRITE, \ 88 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0) 89 90 #define MMAPA(a,sz) mmap((a), (size_t)(sz), PROT_READ | PROT_WRITE, \ 91 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0) 92 93 #define MQUERY(a, sz) mquery((a), (size_t)(sz), PROT_READ | PROT_WRITE, \ 94 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, (off_t)0) 95 96 #define _MALLOC_LEAVE(d) do { if (__isthreaded) { \ 97 (d)->active--; \ 98 _MALLOC_UNLOCK(); } \ 99 } while (0) 100 #define _MALLOC_ENTER(d) do { if (__isthreaded) { \ 101 _MALLOC_LOCK(); \ 102 (d)->active++; } \ 103 } while (0) 104 105 struct region_info { 106 void *p; /* page; low bits used to mark chunks */ 107 uintptr_t size; /* size for pages, or chunk_info pointer */ 108 #ifdef MALLOC_STATS 109 void *f; /* where allocated from */ 110 #endif 111 }; 112 113 LIST_HEAD(chunk_head, chunk_info); 114 115 struct dir_info { 116 u_int32_t canary1; 117 int active; /* status of malloc */ 118 struct region_info *r; /* region slots */ 119 size_t regions_total; /* number of region slots */ 120 size_t regions_free; /* number of free slots */ 121 /* lists of free chunk info structs */ 122 struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1]; 123 /* lists of chunks with free slots */ 124 struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS]; 125 size_t free_regions_size; /* free pages cached */ 126 /* free pages cache */ 127 struct region_info free_regions[MALLOC_MAXCACHE]; 128 /* delayed free chunk slots */ 129 void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1]; 130 size_t rbytesused; /* random bytes used */ 131 char *func; /* current function */ 132 u_char rbytes[32]; /* random bytes */ 133 u_short chunk_start; 134 #ifdef MALLOC_STATS 135 size_t inserts; 136 size_t insert_collisions; 137 size_t finds; 138 size_t find_collisions; 139 size_t deletes; 140 size_t delete_moves; 141 size_t cheap_realloc_tries; 142 size_t cheap_reallocs; 143 size_t malloc_used; /* bytes allocated */ 144 size_t malloc_guarded; /* bytes used for guards */ 145 #define STATS_ADD(x,y) ((x) += (y)) 146 #define STATS_SUB(x,y) ((x) -= (y)) 147 #define STATS_INC(x) ((x)++) 148 #define STATS_ZERO(x) ((x) = 0) 149 #define STATS_SETF(x,y) ((x)->f = (y)) 150 #else 151 #define STATS_ADD(x,y) /* nothing */ 152 #define STATS_SUB(x,y) /* nothing */ 153 #define STATS_INC(x) /* nothing */ 154 #define STATS_ZERO(x) /* nothing */ 155 #define STATS_SETF(x,y) /* nothing */ 156 #endif /* MALLOC_STATS */ 157 u_int32_t canary2; 158 }; 159 #define DIR_INFO_RSZ ((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \ 160 ~MALLOC_PAGEMASK) 161 162 /* 163 * This structure describes a page worth of chunks. 164 * 165 * How many bits per u_short in the bitmap 166 */ 167 #define MALLOC_BITS (NBBY * sizeof(u_short)) 168 struct chunk_info { 169 LIST_ENTRY(chunk_info) entries; 170 void *page; /* pointer to the page */ 171 u_int32_t canary; 172 u_short size; /* size of this page's chunks */ 173 u_short shift; /* how far to shift for this size */ 174 u_short free; /* how many free chunks */ 175 u_short total; /* how many chunk */ 176 /* which chunks are free */ 177 u_short bits[1]; 178 }; 179 180 struct malloc_readonly { 181 struct dir_info *malloc_pool; /* Main bookkeeping information */ 182 int malloc_freenow; /* Free quickly - disable chunk rnd */ 183 int malloc_freeunmap; /* mprotect free pages PROT_NONE? */ 184 int malloc_hint; /* call madvice on free pages? */ 185 int malloc_junk; /* junk fill? */ 186 int malloc_move; /* move allocations to end of page? */ 187 int malloc_realloc; /* always realloc? */ 188 int malloc_xmalloc; /* xmalloc behaviour? */ 189 size_t malloc_canaries; /* use canaries after chunks? */ 190 size_t malloc_guard; /* use guard pages after allocations? */ 191 u_int malloc_cache; /* free pages we cache */ 192 #ifdef MALLOC_STATS 193 int malloc_stats; /* dump statistics at end */ 194 #endif 195 u_int32_t malloc_canary; /* Matched against ones in malloc_pool */ 196 uintptr_t malloc_chunk_canary; 197 }; 198 199 /* This object is mapped PROT_READ after initialisation to prevent tampering */ 200 static union { 201 struct malloc_readonly mopts; 202 u_char _pad[MALLOC_PAGESIZE]; 203 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE))); 204 #define mopts malloc_readonly.mopts 205 #define getpool() mopts.malloc_pool 206 207 char *malloc_options; /* compile-time options */ 208 209 static u_char getrbyte(struct dir_info *d); 210 211 #ifdef MALLOC_STATS 212 void malloc_dump(int); 213 PROTO_NORMAL(malloc_dump); 214 static void malloc_exit(void); 215 #define CALLER __builtin_return_address(0) 216 #else 217 #define CALLER NULL 218 #endif 219 220 /* low bits of r->p determine size: 0 means >= page size and p->size holding 221 * real size, otherwise r->size is a shift count, or 1 for malloc(0) 222 */ 223 #define REALSIZE(sz, r) \ 224 (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \ 225 (sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1)))) 226 227 static inline size_t 228 hash(void *p) 229 { 230 size_t sum; 231 uintptr_t u; 232 233 u = (uintptr_t)p >> MALLOC_PAGESHIFT; 234 sum = u; 235 sum = (sum << 7) - sum + (u >> 16); 236 #ifdef __LP64__ 237 sum = (sum << 7) - sum + (u >> 32); 238 sum = (sum << 7) - sum + (u >> 48); 239 #endif 240 return sum; 241 } 242 243 static void 244 wrterror(struct dir_info *d, char *msg, void *p) 245 { 246 char *q = " error: "; 247 struct iovec iov[7]; 248 char pidbuf[20]; 249 char buf[20]; 250 int saved_errno = errno; 251 252 iov[0].iov_base = __progname; 253 iov[0].iov_len = strlen(__progname); 254 iov[1].iov_base = pidbuf; 255 snprintf(pidbuf, sizeof(pidbuf), "(%d) in ", getpid()); 256 iov[1].iov_len = strlen(pidbuf); 257 if (d != NULL) { 258 iov[2].iov_base = d->func; 259 iov[2].iov_len = strlen(d->func); 260 } else { 261 iov[2].iov_base = "unknown"; 262 iov[2].iov_len = 7; 263 } 264 iov[3].iov_base = q; 265 iov[3].iov_len = strlen(q); 266 iov[4].iov_base = msg; 267 iov[4].iov_len = strlen(msg); 268 iov[5].iov_base = buf; 269 if (p == NULL) 270 iov[5].iov_len = 0; 271 else { 272 snprintf(buf, sizeof(buf), " %p", p); 273 iov[5].iov_len = strlen(buf); 274 } 275 iov[6].iov_base = "\n"; 276 iov[6].iov_len = 1; 277 writev(STDERR_FILENO, iov, 7); 278 279 #ifdef MALLOC_STATS 280 if (mopts.malloc_stats) 281 malloc_dump(STDERR_FILENO); 282 #endif /* MALLOC_STATS */ 283 284 errno = saved_errno; 285 286 abort(); 287 } 288 289 static void 290 rbytes_init(struct dir_info *d) 291 { 292 arc4random_buf(d->rbytes, sizeof(d->rbytes)); 293 /* add 1 to account for using d->rbytes[0] */ 294 d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2); 295 } 296 297 static inline u_char 298 getrbyte(struct dir_info *d) 299 { 300 u_char x; 301 302 if (d->rbytesused >= sizeof(d->rbytes)) 303 rbytes_init(d); 304 x = d->rbytes[d->rbytesused++]; 305 return x; 306 } 307 308 /* 309 * Cache maintenance. We keep at most malloc_cache pages cached. 310 * If the cache is becoming full, unmap pages in the cache for real, 311 * and then add the region to the cache 312 * Opposed to the regular region data structure, the sizes in the 313 * cache are in MALLOC_PAGESIZE units. 314 */ 315 static void 316 unmap(struct dir_info *d, void *p, size_t sz) 317 { 318 size_t psz = sz >> MALLOC_PAGESHIFT; 319 size_t rsz, tounmap; 320 struct region_info *r; 321 u_int i, offset; 322 323 if (sz != PAGEROUND(sz)) { 324 wrterror(d, "munmap round", NULL); 325 return; 326 } 327 328 if (psz > mopts.malloc_cache) { 329 i = munmap(p, sz); 330 if (i) 331 wrterror(d, "munmap", p); 332 STATS_SUB(d->malloc_used, sz); 333 return; 334 } 335 tounmap = 0; 336 rsz = mopts.malloc_cache - d->free_regions_size; 337 if (psz > rsz) 338 tounmap = psz - rsz; 339 offset = getrbyte(d); 340 for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) { 341 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 342 if (r->p != NULL) { 343 rsz = r->size << MALLOC_PAGESHIFT; 344 if (munmap(r->p, rsz)) 345 wrterror(d, "munmap", r->p); 346 r->p = NULL; 347 if (tounmap > r->size) 348 tounmap -= r->size; 349 else 350 tounmap = 0; 351 d->free_regions_size -= r->size; 352 r->size = 0; 353 STATS_SUB(d->malloc_used, rsz); 354 } 355 } 356 if (tounmap > 0) 357 wrterror(d, "malloc cache underflow", NULL); 358 for (i = 0; i < mopts.malloc_cache; i++) { 359 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 360 if (r->p == NULL) { 361 if (mopts.malloc_hint) 362 madvise(p, sz, MADV_FREE); 363 if (mopts.malloc_freeunmap) 364 mprotect(p, sz, PROT_NONE); 365 r->p = p; 366 r->size = psz; 367 d->free_regions_size += psz; 368 break; 369 } 370 } 371 if (i == mopts.malloc_cache) 372 wrterror(d, "malloc free slot lost", NULL); 373 if (d->free_regions_size > mopts.malloc_cache) 374 wrterror(d, "malloc cache overflow", NULL); 375 } 376 377 static void 378 zapcacheregion(struct dir_info *d, void *p, size_t len) 379 { 380 u_int i; 381 struct region_info *r; 382 size_t rsz; 383 384 for (i = 0; i < mopts.malloc_cache; i++) { 385 r = &d->free_regions[i]; 386 if (r->p >= p && r->p <= (void *)((char *)p + len)) { 387 rsz = r->size << MALLOC_PAGESHIFT; 388 if (munmap(r->p, rsz)) 389 wrterror(d, "munmap", r->p); 390 r->p = NULL; 391 d->free_regions_size -= r->size; 392 r->size = 0; 393 STATS_SUB(d->malloc_used, rsz); 394 } 395 } 396 } 397 398 static void * 399 map(struct dir_info *d, void *hint, size_t sz, int zero_fill) 400 { 401 size_t psz = sz >> MALLOC_PAGESHIFT; 402 struct region_info *r, *big = NULL; 403 u_int i, offset; 404 void *p; 405 406 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 407 d->canary1 != ~d->canary2) 408 wrterror(d, "internal struct corrupt", NULL); 409 if (sz != PAGEROUND(sz)) { 410 wrterror(d, "map round", NULL); 411 return MAP_FAILED; 412 } 413 if (!hint && psz > d->free_regions_size) { 414 _MALLOC_LEAVE(d); 415 p = MMAP(sz); 416 _MALLOC_ENTER(d); 417 if (p != MAP_FAILED) 418 STATS_ADD(d->malloc_used, sz); 419 /* zero fill not needed */ 420 return p; 421 } 422 offset = getrbyte(d); 423 for (i = 0; i < mopts.malloc_cache; i++) { 424 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 425 if (r->p != NULL) { 426 if (hint && r->p != hint) 427 continue; 428 if (r->size == psz) { 429 p = r->p; 430 r->p = NULL; 431 r->size = 0; 432 d->free_regions_size -= psz; 433 if (mopts.malloc_freeunmap) 434 mprotect(p, sz, PROT_READ | PROT_WRITE); 435 if (mopts.malloc_hint) 436 madvise(p, sz, MADV_NORMAL); 437 if (zero_fill) 438 memset(p, 0, sz); 439 else if (mopts.malloc_junk == 2 && 440 mopts.malloc_freeunmap) 441 memset(p, SOME_FREEJUNK, sz); 442 return p; 443 } else if (r->size > psz) 444 big = r; 445 } 446 } 447 if (big != NULL) { 448 r = big; 449 p = r->p; 450 r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT); 451 if (mopts.malloc_freeunmap) 452 mprotect(p, sz, PROT_READ | PROT_WRITE); 453 if (mopts.malloc_hint) 454 madvise(p, sz, MADV_NORMAL); 455 r->size -= psz; 456 d->free_regions_size -= psz; 457 if (zero_fill) 458 memset(p, 0, sz); 459 else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap) 460 memset(p, SOME_FREEJUNK, sz); 461 return p; 462 } 463 if (hint) 464 return MAP_FAILED; 465 if (d->free_regions_size > mopts.malloc_cache) 466 wrterror(d, "malloc cache", NULL); 467 _MALLOC_LEAVE(d); 468 p = MMAP(sz); 469 _MALLOC_ENTER(d); 470 if (p != MAP_FAILED) 471 STATS_ADD(d->malloc_used, sz); 472 /* zero fill not needed */ 473 return p; 474 } 475 476 static void 477 omalloc_parseopt(char opt) 478 { 479 switch (opt) { 480 case '>': 481 mopts.malloc_cache <<= 1; 482 if (mopts.malloc_cache > MALLOC_MAXCACHE) 483 mopts.malloc_cache = MALLOC_MAXCACHE; 484 break; 485 case '<': 486 mopts.malloc_cache >>= 1; 487 break; 488 case 'a': 489 case 'A': 490 /* ignored */ 491 break; 492 case 'c': 493 mopts.malloc_canaries = 0; 494 break; 495 case 'C': 496 mopts.malloc_canaries = sizeof(void *); 497 break; 498 #ifdef MALLOC_STATS 499 case 'd': 500 mopts.malloc_stats = 0; 501 break; 502 case 'D': 503 mopts.malloc_stats = 1; 504 break; 505 #endif /* MALLOC_STATS */ 506 case 'f': 507 mopts.malloc_freenow = 0; 508 mopts.malloc_freeunmap = 0; 509 break; 510 case 'F': 511 mopts.malloc_freenow = 1; 512 mopts.malloc_freeunmap = 1; 513 break; 514 case 'g': 515 mopts.malloc_guard = 0; 516 break; 517 case 'G': 518 mopts.malloc_guard = MALLOC_PAGESIZE; 519 break; 520 case 'h': 521 mopts.malloc_hint = 0; 522 break; 523 case 'H': 524 mopts.malloc_hint = 1; 525 break; 526 case 'j': 527 mopts.malloc_junk = 0; 528 break; 529 case 'J': 530 mopts.malloc_junk = 2; 531 break; 532 case 'n': 533 case 'N': 534 break; 535 case 'p': 536 mopts.malloc_move = 0; 537 break; 538 case 'P': 539 mopts.malloc_move = 1; 540 break; 541 case 'r': 542 mopts.malloc_realloc = 0; 543 break; 544 case 'R': 545 mopts.malloc_realloc = 1; 546 break; 547 case 'u': 548 mopts.malloc_freeunmap = 0; 549 break; 550 case 'U': 551 mopts.malloc_freeunmap = 1; 552 break; 553 case 'x': 554 mopts.malloc_xmalloc = 0; 555 break; 556 case 'X': 557 mopts.malloc_xmalloc = 1; 558 break; 559 default: { 560 static const char q[] = "malloc() warning: " 561 "unknown char in MALLOC_OPTIONS\n"; 562 write(STDERR_FILENO, q, sizeof(q) - 1); 563 break; 564 } 565 } 566 } 567 568 /* 569 * Initialize a dir_info, which should have been cleared by caller 570 */ 571 static int 572 omalloc_init(struct dir_info **dp) 573 { 574 char *p, *q, b[64]; 575 int i, j; 576 size_t d_avail, regioninfo_size; 577 struct dir_info *d; 578 579 /* 580 * Default options 581 */ 582 mopts.malloc_junk = 1; 583 mopts.malloc_move = 1; 584 mopts.malloc_cache = MALLOC_DEFAULT_CACHE; 585 586 for (i = 0; i < 3; i++) { 587 switch (i) { 588 case 0: 589 j = readlink("/etc/malloc.conf", b, sizeof b - 1); 590 if (j <= 0) 591 continue; 592 b[j] = '\0'; 593 p = b; 594 break; 595 case 1: 596 if (issetugid() == 0) 597 p = getenv("MALLOC_OPTIONS"); 598 else 599 continue; 600 break; 601 case 2: 602 p = malloc_options; 603 break; 604 default: 605 p = NULL; 606 } 607 608 for (; p != NULL && *p != '\0'; p++) { 609 switch (*p) { 610 case 'S': 611 for (q = "FGJP"; *q != '\0'; q++) 612 omalloc_parseopt(*q); 613 mopts.malloc_cache = 0; 614 break; 615 case 's': 616 for (q = "fgj"; *q != '\0'; q++) 617 omalloc_parseopt(*q); 618 mopts.malloc_cache = MALLOC_DEFAULT_CACHE; 619 break; 620 default: 621 omalloc_parseopt(*p); 622 break; 623 } 624 } 625 } 626 627 #ifdef MALLOC_STATS 628 if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) { 629 static const char q[] = "malloc() warning: atexit(2) failed." 630 " Will not be able to dump stats on exit\n"; 631 write(STDERR_FILENO, q, sizeof(q) - 1); 632 } 633 #endif /* MALLOC_STATS */ 634 635 while ((mopts.malloc_canary = arc4random()) == 0) 636 ; 637 638 arc4random_buf(&mopts.malloc_chunk_canary, 639 sizeof(mopts.malloc_chunk_canary)); 640 641 /* 642 * Allocate dir_info with a guard page on either side. Also 643 * randomise offset inside the page at which the dir_info 644 * lies (subject to alignment by 1 << MALLOC_MINSHIFT) 645 */ 646 if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED) 647 return -1; 648 mprotect(p, MALLOC_PAGESIZE, PROT_NONE); 649 mprotect(p + MALLOC_PAGESIZE + DIR_INFO_RSZ, 650 MALLOC_PAGESIZE, PROT_NONE); 651 d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT; 652 d = (struct dir_info *)(p + MALLOC_PAGESIZE + 653 (arc4random_uniform(d_avail) << MALLOC_MINSHIFT)); 654 655 rbytes_init(d); 656 d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS; 657 regioninfo_size = d->regions_total * sizeof(struct region_info); 658 d->r = MMAP(regioninfo_size); 659 if (d->r == MAP_FAILED) { 660 wrterror(NULL, "malloc init mmap failed", NULL); 661 d->regions_total = 0; 662 return 1; 663 } 664 for (i = 0; i <= MALLOC_MAXSHIFT; i++) { 665 LIST_INIT(&d->chunk_info_list[i]); 666 for (j = 0; j < MALLOC_CHUNK_LISTS; j++) 667 LIST_INIT(&d->chunk_dir[i][j]); 668 } 669 STATS_ADD(d->malloc_used, regioninfo_size); 670 d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d; 671 d->canary2 = ~d->canary1; 672 673 *dp = d; 674 675 /* 676 * Options have been set and will never be reset. 677 * Prevent further tampering with them. 678 */ 679 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) 680 mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ); 681 682 return 0; 683 } 684 685 static int 686 omalloc_grow(struct dir_info *d) 687 { 688 size_t newtotal; 689 size_t newsize; 690 size_t mask; 691 size_t i; 692 struct region_info *p; 693 694 if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 ) 695 return 1; 696 697 newtotal = d->regions_total * 2; 698 newsize = newtotal * sizeof(struct region_info); 699 mask = newtotal - 1; 700 701 p = MMAP(newsize); 702 if (p == MAP_FAILED) 703 return 1; 704 705 STATS_ADD(d->malloc_used, newsize); 706 memset(p, 0, newsize); 707 STATS_ZERO(d->inserts); 708 STATS_ZERO(d->insert_collisions); 709 for (i = 0; i < d->regions_total; i++) { 710 void *q = d->r[i].p; 711 if (q != NULL) { 712 size_t index = hash(q) & mask; 713 STATS_INC(d->inserts); 714 while (p[index].p != NULL) { 715 index = (index - 1) & mask; 716 STATS_INC(d->insert_collisions); 717 } 718 p[index] = d->r[i]; 719 } 720 } 721 /* avoid pages containing meta info to end up in cache */ 722 if (munmap(d->r, d->regions_total * sizeof(struct region_info))) 723 wrterror(d, "munmap", d->r); 724 else 725 STATS_SUB(d->malloc_used, 726 d->regions_total * sizeof(struct region_info)); 727 d->regions_free = d->regions_free + d->regions_total; 728 d->regions_total = newtotal; 729 d->r = p; 730 return 0; 731 } 732 733 static struct chunk_info * 734 alloc_chunk_info(struct dir_info *d, int bits) 735 { 736 struct chunk_info *p; 737 size_t size, count; 738 739 if (bits == 0) 740 count = MALLOC_PAGESIZE / MALLOC_MINSIZE; 741 else 742 count = MALLOC_PAGESIZE >> bits; 743 744 size = howmany(count, MALLOC_BITS); 745 size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short); 746 size = ALIGN(size); 747 748 if (LIST_EMPTY(&d->chunk_info_list[bits])) { 749 char *q; 750 int i; 751 752 q = MMAP(MALLOC_PAGESIZE); 753 if (q == MAP_FAILED) 754 return NULL; 755 STATS_ADD(d->malloc_used, MALLOC_PAGESIZE); 756 count = MALLOC_PAGESIZE / size; 757 for (i = 0; i < count; i++, q += size) 758 LIST_INSERT_HEAD(&d->chunk_info_list[bits], 759 (struct chunk_info *)q, entries); 760 } 761 p = LIST_FIRST(&d->chunk_info_list[bits]); 762 LIST_REMOVE(p, entries); 763 memset(p, 0, size); 764 p->canary = d->canary1; 765 return p; 766 } 767 768 769 /* 770 * The hashtable uses the assumption that p is never NULL. This holds since 771 * non-MAP_FIXED mappings with hint 0 start at BRKSIZ. 772 */ 773 static int 774 insert(struct dir_info *d, void *p, size_t sz, void *f) 775 { 776 size_t index; 777 size_t mask; 778 void *q; 779 780 if (d->regions_free * 4 < d->regions_total) { 781 if (omalloc_grow(d)) 782 return 1; 783 } 784 mask = d->regions_total - 1; 785 index = hash(p) & mask; 786 q = d->r[index].p; 787 STATS_INC(d->inserts); 788 while (q != NULL) { 789 index = (index - 1) & mask; 790 q = d->r[index].p; 791 STATS_INC(d->insert_collisions); 792 } 793 d->r[index].p = p; 794 d->r[index].size = sz; 795 #ifdef MALLOC_STATS 796 d->r[index].f = f; 797 #endif 798 d->regions_free--; 799 return 0; 800 } 801 802 static struct region_info * 803 find(struct dir_info *d, void *p) 804 { 805 size_t index; 806 size_t mask = d->regions_total - 1; 807 void *q, *r; 808 809 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 810 d->canary1 != ~d->canary2) 811 wrterror(d, "internal struct corrupt", NULL); 812 p = MASK_POINTER(p); 813 index = hash(p) & mask; 814 r = d->r[index].p; 815 q = MASK_POINTER(r); 816 STATS_INC(d->finds); 817 while (q != p && r != NULL) { 818 index = (index - 1) & mask; 819 r = d->r[index].p; 820 q = MASK_POINTER(r); 821 STATS_INC(d->find_collisions); 822 } 823 return (q == p && r != NULL) ? &d->r[index] : NULL; 824 } 825 826 static void 827 delete(struct dir_info *d, struct region_info *ri) 828 { 829 /* algorithm R, Knuth Vol III section 6.4 */ 830 size_t mask = d->regions_total - 1; 831 size_t i, j, r; 832 833 if (d->regions_total & (d->regions_total - 1)) 834 wrterror(d, "regions_total not 2^x", NULL); 835 d->regions_free++; 836 STATS_INC(d->deletes); 837 838 i = ri - d->r; 839 for (;;) { 840 d->r[i].p = NULL; 841 d->r[i].size = 0; 842 j = i; 843 for (;;) { 844 i = (i - 1) & mask; 845 if (d->r[i].p == NULL) 846 return; 847 r = hash(d->r[i].p) & mask; 848 if ((i <= r && r < j) || (r < j && j < i) || 849 (j < i && i <= r)) 850 continue; 851 d->r[j] = d->r[i]; 852 STATS_INC(d->delete_moves); 853 break; 854 } 855 856 } 857 } 858 859 /* 860 * Allocate a page of chunks 861 */ 862 static struct chunk_info * 863 omalloc_make_chunks(struct dir_info *d, int bits, int listnum) 864 { 865 struct chunk_info *bp; 866 void *pp; 867 int i, k; 868 869 /* Allocate a new bucket */ 870 pp = map(d, NULL, MALLOC_PAGESIZE, 0); 871 if (pp == MAP_FAILED) 872 return NULL; 873 874 bp = alloc_chunk_info(d, bits); 875 if (bp == NULL) { 876 unmap(d, pp, MALLOC_PAGESIZE); 877 return NULL; 878 } 879 880 /* memory protect the page allocated in the malloc(0) case */ 881 if (bits == 0) { 882 bp->size = 0; 883 bp->shift = 1; 884 i = MALLOC_MINSIZE - 1; 885 while (i >>= 1) 886 bp->shift++; 887 bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift; 888 bp->page = pp; 889 890 k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE); 891 if (k < 0) { 892 unmap(d, pp, MALLOC_PAGESIZE); 893 LIST_INSERT_HEAD(&d->chunk_info_list[0], bp, entries); 894 return NULL; 895 } 896 } else { 897 bp->size = 1U << bits; 898 bp->shift = bits; 899 bp->total = bp->free = MALLOC_PAGESIZE >> bits; 900 bp->page = pp; 901 } 902 903 /* set all valid bits in the bitmap */ 904 k = bp->total; 905 i = 0; 906 907 /* Do a bunch at a time */ 908 for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS) 909 bp->bits[i / MALLOC_BITS] = (u_short)~0U; 910 911 for (; i < k; i++) 912 bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS); 913 914 LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries); 915 916 bits++; 917 if ((uintptr_t)pp & bits) 918 wrterror(d, "pp & bits", pp); 919 920 insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL); 921 return bp; 922 } 923 924 925 /* 926 * Allocate a chunk 927 */ 928 static void * 929 malloc_bytes(struct dir_info *d, size_t size, void *f) 930 { 931 int i, j, listnum; 932 size_t k; 933 u_short u, *lp; 934 struct chunk_info *bp; 935 936 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 937 d->canary1 != ~d->canary2) 938 wrterror(d, "internal struct corrupt", NULL); 939 /* Don't bother with anything less than this */ 940 /* unless we have a malloc(0) requests */ 941 if (size != 0 && size < MALLOC_MINSIZE) 942 size = MALLOC_MINSIZE; 943 944 /* Find the right bucket */ 945 if (size == 0) 946 j = 0; 947 else { 948 j = MALLOC_MINSHIFT; 949 i = (size - 1) >> (MALLOC_MINSHIFT - 1); 950 while (i >>= 1) 951 j++; 952 } 953 954 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; 955 /* If it's empty, make a page more of that size chunks */ 956 if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) { 957 bp = omalloc_make_chunks(d, j, listnum); 958 if (bp == NULL) 959 return NULL; 960 } 961 962 if (bp->canary != d->canary1) 963 wrterror(d, "chunk info corrupted", NULL); 964 965 i = d->chunk_start; 966 if (bp->free > 1) 967 i += getrbyte(d); 968 if (i >= bp->total) 969 i &= bp->total - 1; 970 for (;;) { 971 for (;;) { 972 lp = &bp->bits[i / MALLOC_BITS]; 973 if (!*lp) { 974 i += MALLOC_BITS; 975 i &= ~(MALLOC_BITS - 1); 976 if (i >= bp->total) 977 i = 0; 978 } else 979 break; 980 } 981 k = i % MALLOC_BITS; 982 u = 1 << k; 983 if (*lp & u) 984 break; 985 if (++i >= bp->total) 986 i = 0; 987 } 988 d->chunk_start += i + 1; 989 #ifdef MALLOC_STATS 990 if (i == 0) { 991 struct region_info *r = find(d, bp->page); 992 r->f = f; 993 } 994 #endif 995 996 *lp ^= u; 997 998 /* If there are no more free, remove from free-list */ 999 if (!--bp->free) 1000 LIST_REMOVE(bp, entries); 1001 1002 /* Adjust to the real offset of that chunk */ 1003 k += (lp - bp->bits) * MALLOC_BITS; 1004 k <<= bp->shift; 1005 1006 if (mopts.malloc_canaries && bp->size > 0) { 1007 char *end = (char *)bp->page + k + bp->size; 1008 uintptr_t *canary = (uintptr_t *)(end - mopts.malloc_canaries); 1009 *canary = mopts.malloc_chunk_canary ^ hash(canary); 1010 } 1011 1012 if (mopts.malloc_junk == 2 && bp->size > 0) 1013 memset((char *)bp->page + k, SOME_JUNK, 1014 bp->size - mopts.malloc_canaries); 1015 return ((char *)bp->page + k); 1016 } 1017 1018 static uint32_t 1019 find_chunknum(struct dir_info *d, struct region_info *r, void *ptr) 1020 { 1021 struct chunk_info *info; 1022 uint32_t chunknum; 1023 1024 info = (struct chunk_info *)r->size; 1025 if (info->canary != d->canary1) 1026 wrterror(d, "chunk info corrupted", NULL); 1027 1028 if (mopts.malloc_canaries && info->size > 0) { 1029 char *end = (char *)ptr + info->size; 1030 uintptr_t *canary = (uintptr_t *)(end - mopts.malloc_canaries); 1031 if (*canary != (mopts.malloc_chunk_canary ^ hash(canary))) 1032 wrterror(d, "chunk canary corrupted", ptr); 1033 } 1034 1035 /* Find the chunk number on the page */ 1036 chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift; 1037 1038 if ((uintptr_t)ptr & ((1U << (info->shift)) - 1)) { 1039 wrterror(d, "modified chunk-pointer", ptr); 1040 return -1; 1041 } 1042 if (info->bits[chunknum / MALLOC_BITS] & 1043 (1U << (chunknum % MALLOC_BITS))) { 1044 wrterror(d, "chunk is already free", ptr); 1045 return -1; 1046 } 1047 return chunknum; 1048 } 1049 1050 /* 1051 * Free a chunk, and possibly the page it's on, if the page becomes empty. 1052 */ 1053 static void 1054 free_bytes(struct dir_info *d, struct region_info *r, void *ptr) 1055 { 1056 struct chunk_head *mp; 1057 struct chunk_info *info; 1058 uint32_t chunknum; 1059 int listnum; 1060 1061 info = (struct chunk_info *)r->size; 1062 if ((chunknum = find_chunknum(d, r, ptr)) == -1) 1063 return; 1064 1065 info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS); 1066 info->free++; 1067 1068 if (info->free == 1) { 1069 /* Page became non-full */ 1070 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; 1071 if (info->size != 0) 1072 mp = &d->chunk_dir[info->shift][listnum]; 1073 else 1074 mp = &d->chunk_dir[0][listnum]; 1075 1076 LIST_INSERT_HEAD(mp, info, entries); 1077 return; 1078 } 1079 1080 if (info->free != info->total) 1081 return; 1082 1083 LIST_REMOVE(info, entries); 1084 1085 if (info->size == 0 && !mopts.malloc_freeunmap) 1086 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE); 1087 unmap(d, info->page, MALLOC_PAGESIZE); 1088 1089 delete(d, r); 1090 if (info->size != 0) 1091 mp = &d->chunk_info_list[info->shift]; 1092 else 1093 mp = &d->chunk_info_list[0]; 1094 LIST_INSERT_HEAD(mp, info, entries); 1095 } 1096 1097 1098 1099 static void * 1100 omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f) 1101 { 1102 void *p; 1103 size_t psz; 1104 1105 if (sz > MALLOC_MAXCHUNK) { 1106 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1107 errno = ENOMEM; 1108 return NULL; 1109 } 1110 sz += mopts.malloc_guard; 1111 psz = PAGEROUND(sz); 1112 p = map(pool, NULL, psz, zero_fill); 1113 if (p == MAP_FAILED) { 1114 errno = ENOMEM; 1115 return NULL; 1116 } 1117 if (insert(pool, p, sz, f)) { 1118 unmap(pool, p, psz); 1119 errno = ENOMEM; 1120 return NULL; 1121 } 1122 if (mopts.malloc_guard) { 1123 if (mprotect((char *)p + psz - mopts.malloc_guard, 1124 mopts.malloc_guard, PROT_NONE)) 1125 wrterror(pool, "mprotect", NULL); 1126 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 1127 } 1128 1129 if (mopts.malloc_move && 1130 sz - mopts.malloc_guard < MALLOC_PAGESIZE - 1131 MALLOC_LEEWAY) { 1132 /* fill whole allocation */ 1133 if (mopts.malloc_junk == 2) 1134 memset(p, SOME_JUNK, psz - mopts.malloc_guard); 1135 /* shift towards the end */ 1136 p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY - 1137 (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1)); 1138 /* fill zeros if needed and overwritten above */ 1139 if (zero_fill && mopts.malloc_junk == 2) 1140 memset(p, 0, sz - mopts.malloc_guard); 1141 } else { 1142 if (mopts.malloc_junk == 2) { 1143 if (zero_fill) 1144 memset((char *)p + sz - mopts.malloc_guard, 1145 SOME_JUNK, psz - sz); 1146 else 1147 memset(p, SOME_JUNK, 1148 psz - mopts.malloc_guard); 1149 } 1150 } 1151 1152 } else { 1153 /* takes care of SOME_JUNK */ 1154 p = malloc_bytes(pool, sz, f); 1155 if (zero_fill && p != NULL && sz > 0) 1156 memset(p, 0, sz - mopts.malloc_canaries); 1157 } 1158 1159 return p; 1160 } 1161 1162 /* 1163 * Common function for handling recursion. Only 1164 * print the error message once, to avoid making the problem 1165 * potentially worse. 1166 */ 1167 static void 1168 malloc_recurse(struct dir_info *d) 1169 { 1170 static int noprint; 1171 1172 if (noprint == 0) { 1173 noprint = 1; 1174 wrterror(d, "recursive call", NULL); 1175 } 1176 d->active--; 1177 _MALLOC_UNLOCK(); 1178 errno = EDEADLK; 1179 } 1180 1181 static int 1182 malloc_init(void) 1183 { 1184 if (omalloc_init(&mopts.malloc_pool)) { 1185 _MALLOC_UNLOCK(); 1186 if (mopts.malloc_xmalloc) 1187 wrterror(NULL, "out of memory", NULL); 1188 errno = ENOMEM; 1189 return -1; 1190 } 1191 return 0; 1192 } 1193 1194 void * 1195 malloc(size_t size) 1196 { 1197 void *r; 1198 struct dir_info *d; 1199 int saved_errno = errno; 1200 1201 _MALLOC_LOCK(); 1202 d = getpool(); 1203 if (d == NULL) { 1204 if (malloc_init() != 0) 1205 return NULL; 1206 d = getpool(); 1207 } 1208 d->func = "malloc():"; 1209 1210 if (d->active++) { 1211 malloc_recurse(d); 1212 return NULL; 1213 } 1214 if (size > 0 && size <= MALLOC_MAXCHUNK) 1215 size += mopts.malloc_canaries; 1216 r = omalloc(d, size, 0, CALLER); 1217 d->active--; 1218 _MALLOC_UNLOCK(); 1219 if (r == NULL && mopts.malloc_xmalloc) { 1220 wrterror(d, "out of memory", NULL); 1221 errno = ENOMEM; 1222 } 1223 if (r != NULL) 1224 errno = saved_errno; 1225 return r; 1226 } 1227 /*DEF_STRONG(malloc);*/ 1228 1229 static void 1230 validate_junk(struct dir_info *pool, void *p) { 1231 struct region_info *r; 1232 size_t byte, sz; 1233 1234 if (p == NULL) 1235 return; 1236 r = find(pool, p); 1237 if (r == NULL) { 1238 wrterror(pool, "bogus pointer in validate_junk", p); 1239 return; 1240 } 1241 REALSIZE(sz, r); 1242 if (sz > 0 && sz <= MALLOC_MAXCHUNK) 1243 sz -= mopts.malloc_canaries; 1244 if (sz > 32) 1245 sz = 32; 1246 for (byte = 0; byte < sz; byte++) { 1247 if (((unsigned char *)p)[byte] != SOME_FREEJUNK) { 1248 wrterror(pool, "use after free", p); 1249 return; 1250 } 1251 } 1252 } 1253 1254 static void 1255 ofree(struct dir_info *pool, void *p) 1256 { 1257 struct region_info *r; 1258 size_t sz; 1259 1260 r = find(pool, p); 1261 if (r == NULL) { 1262 wrterror(pool, "bogus pointer (double free?)", p); 1263 return; 1264 } 1265 REALSIZE(sz, r); 1266 if (sz > MALLOC_MAXCHUNK) { 1267 if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE - 1268 MALLOC_LEEWAY) { 1269 if (r->p != p) { 1270 wrterror(pool, "bogus pointer", p); 1271 return; 1272 } 1273 } else { 1274 #if notyetbecause_of_realloc 1275 /* shifted towards the end */ 1276 if (p != ((char *)r->p) + ((MALLOC_PAGESIZE - 1277 MALLOC_MINSIZE - sz - mopts.malloc_guard) & 1278 ~(MALLOC_MINSIZE-1))) { 1279 } 1280 #endif 1281 p = r->p; 1282 } 1283 if (mopts.malloc_guard) { 1284 if (sz < mopts.malloc_guard) 1285 wrterror(pool, "guard size", NULL); 1286 if (!mopts.malloc_freeunmap) { 1287 if (mprotect((char *)p + PAGEROUND(sz) - 1288 mopts.malloc_guard, mopts.malloc_guard, 1289 PROT_READ | PROT_WRITE)) 1290 wrterror(pool, "mprotect", NULL); 1291 } 1292 STATS_SUB(pool->malloc_guarded, mopts.malloc_guard); 1293 } 1294 if (mopts.malloc_junk && !mopts.malloc_freeunmap) { 1295 size_t amt = mopts.malloc_junk == 1 ? MALLOC_MAXCHUNK : 1296 PAGEROUND(sz) - mopts.malloc_guard; 1297 memset(p, SOME_FREEJUNK, amt); 1298 } 1299 unmap(pool, p, PAGEROUND(sz)); 1300 delete(pool, r); 1301 } else { 1302 void *tmp; 1303 int i; 1304 1305 if (mopts.malloc_junk && sz > 0) 1306 memset(p, SOME_FREEJUNK, sz - mopts.malloc_canaries); 1307 if (!mopts.malloc_freenow) { 1308 if (find_chunknum(pool, r, p) == -1) 1309 return; 1310 i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK; 1311 tmp = p; 1312 p = pool->delayed_chunks[i]; 1313 if (tmp == p) { 1314 wrterror(pool, "double free", p); 1315 return; 1316 } 1317 if (mopts.malloc_junk) 1318 validate_junk(pool, p); 1319 pool->delayed_chunks[i] = tmp; 1320 } 1321 if (p != NULL) { 1322 r = find(pool, p); 1323 if (r == NULL) { 1324 wrterror(pool, "bogus pointer (double free?)", p); 1325 return; 1326 } 1327 free_bytes(pool, r, p); 1328 } 1329 } 1330 } 1331 1332 void 1333 free(void *ptr) 1334 { 1335 struct dir_info *d; 1336 int saved_errno = errno; 1337 1338 /* This is legal. */ 1339 if (ptr == NULL) 1340 return; 1341 1342 _MALLOC_LOCK(); 1343 d = getpool(); 1344 if (d == NULL) { 1345 _MALLOC_UNLOCK(); 1346 wrterror(d, "free() called before allocation", NULL); 1347 return; 1348 } 1349 d->func = "free():"; 1350 if (d->active++) { 1351 malloc_recurse(d); 1352 return; 1353 } 1354 ofree(d, ptr); 1355 d->active--; 1356 _MALLOC_UNLOCK(); 1357 errno = saved_errno; 1358 } 1359 /*DEF_STRONG(free);*/ 1360 1361 1362 static void * 1363 orealloc(struct dir_info *pool, void *p, size_t newsz, void *f) 1364 { 1365 struct region_info *r; 1366 size_t oldsz, goldsz, gnewsz; 1367 void *q; 1368 1369 if (p == NULL) 1370 return omalloc(pool, newsz, 0, f); 1371 1372 r = find(pool, p); 1373 if (r == NULL) { 1374 wrterror(pool, "bogus pointer (double free?)", p); 1375 return NULL; 1376 } 1377 if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1378 errno = ENOMEM; 1379 return NULL; 1380 } 1381 1382 REALSIZE(oldsz, r); 1383 goldsz = oldsz; 1384 if (oldsz > MALLOC_MAXCHUNK) { 1385 if (oldsz < mopts.malloc_guard) 1386 wrterror(pool, "guard size", NULL); 1387 oldsz -= mopts.malloc_guard; 1388 } 1389 1390 gnewsz = newsz; 1391 if (gnewsz > MALLOC_MAXCHUNK) 1392 gnewsz += mopts.malloc_guard; 1393 1394 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p && 1395 !mopts.malloc_realloc) { 1396 size_t roldsz = PAGEROUND(goldsz); 1397 size_t rnewsz = PAGEROUND(gnewsz); 1398 1399 if (rnewsz > roldsz) { 1400 if (!mopts.malloc_guard) { 1401 void *hint = (char *)p + roldsz; 1402 size_t needed = rnewsz - roldsz; 1403 1404 STATS_INC(pool->cheap_realloc_tries); 1405 q = map(pool, hint, needed, 0); 1406 if (q == hint) 1407 goto gotit; 1408 zapcacheregion(pool, hint, needed); 1409 q = MQUERY(hint, needed); 1410 if (q == hint) 1411 q = MMAPA(hint, needed); 1412 else 1413 q = MAP_FAILED; 1414 if (q == hint) { 1415 gotit: 1416 STATS_ADD(pool->malloc_used, needed); 1417 if (mopts.malloc_junk == 2) 1418 memset(q, SOME_JUNK, needed); 1419 r->size = newsz; 1420 STATS_SETF(r, f); 1421 STATS_INC(pool->cheap_reallocs); 1422 return p; 1423 } else if (q != MAP_FAILED) { 1424 if (munmap(q, needed)) 1425 wrterror(pool, "munmap", q); 1426 } 1427 } 1428 } else if (rnewsz < roldsz) { 1429 if (mopts.malloc_guard) { 1430 if (mprotect((char *)p + roldsz - 1431 mopts.malloc_guard, mopts.malloc_guard, 1432 PROT_READ | PROT_WRITE)) 1433 wrterror(pool, "mprotect", NULL); 1434 if (mprotect((char *)p + rnewsz - 1435 mopts.malloc_guard, mopts.malloc_guard, 1436 PROT_NONE)) 1437 wrterror(pool, "mprotect", NULL); 1438 } 1439 unmap(pool, (char *)p + rnewsz, roldsz - rnewsz); 1440 r->size = gnewsz; 1441 STATS_SETF(r, f); 1442 return p; 1443 } else { 1444 if (newsz > oldsz && mopts.malloc_junk == 2) 1445 memset((char *)p + newsz, SOME_JUNK, 1446 rnewsz - mopts.malloc_guard - newsz); 1447 r->size = gnewsz; 1448 STATS_SETF(r, f); 1449 return p; 1450 } 1451 } 1452 if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) { 1453 if (mopts.malloc_junk == 2 && newsz > 0) { 1454 size_t usable_oldsz = oldsz; 1455 if (oldsz <= MALLOC_MAXCHUNK) 1456 usable_oldsz -= mopts.malloc_canaries; 1457 if (newsz < usable_oldsz) 1458 memset((char *)p + newsz, SOME_JUNK, usable_oldsz - newsz); 1459 } 1460 STATS_SETF(r, f); 1461 return p; 1462 } else if (newsz != oldsz || mopts.malloc_realloc) { 1463 q = omalloc(pool, newsz, 0, f); 1464 if (q == NULL) 1465 return NULL; 1466 if (newsz != 0 && oldsz != 0) { 1467 size_t copysz = oldsz < newsz ? oldsz : newsz; 1468 if (copysz <= MALLOC_MAXCHUNK) 1469 copysz -= mopts.malloc_canaries; 1470 memcpy(q, p, copysz); 1471 } 1472 ofree(pool, p); 1473 return q; 1474 } else { 1475 STATS_SETF(r, f); 1476 return p; 1477 } 1478 } 1479 1480 void * 1481 realloc(void *ptr, size_t size) 1482 { 1483 struct dir_info *d; 1484 void *r; 1485 int saved_errno = errno; 1486 1487 _MALLOC_LOCK(); 1488 d = getpool(); 1489 if (d == NULL) { 1490 if (malloc_init() != 0) 1491 return NULL; 1492 d = getpool(); 1493 } 1494 d->func = "realloc():"; 1495 if (d->active++) { 1496 malloc_recurse(d); 1497 return NULL; 1498 } 1499 if (size > 0 && size <= MALLOC_MAXCHUNK) 1500 size += mopts.malloc_canaries; 1501 r = orealloc(d, ptr, size, CALLER); 1502 1503 d->active--; 1504 _MALLOC_UNLOCK(); 1505 if (r == NULL && mopts.malloc_xmalloc) { 1506 wrterror(d, "out of memory", NULL); 1507 errno = ENOMEM; 1508 } 1509 if (r != NULL) 1510 errno = saved_errno; 1511 return r; 1512 } 1513 /*DEF_STRONG(realloc);*/ 1514 1515 1516 /* 1517 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX 1518 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW 1519 */ 1520 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4)) 1521 1522 void * 1523 calloc(size_t nmemb, size_t size) 1524 { 1525 struct dir_info *d; 1526 void *r; 1527 int saved_errno = errno; 1528 1529 _MALLOC_LOCK(); 1530 d = getpool(); 1531 if (d == NULL) { 1532 if (malloc_init() != 0) 1533 return NULL; 1534 d = getpool(); 1535 } 1536 d->func = "calloc():"; 1537 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1538 nmemb > 0 && SIZE_MAX / nmemb < size) { 1539 _MALLOC_UNLOCK(); 1540 if (mopts.malloc_xmalloc) 1541 wrterror(d, "out of memory", NULL); 1542 errno = ENOMEM; 1543 return NULL; 1544 } 1545 1546 if (d->active++) { 1547 malloc_recurse(d); 1548 return NULL; 1549 } 1550 1551 size *= nmemb; 1552 if (size > 0 && size <= MALLOC_MAXCHUNK) 1553 size += mopts.malloc_canaries; 1554 r = omalloc(d, size, 1, CALLER); 1555 1556 d->active--; 1557 _MALLOC_UNLOCK(); 1558 if (r == NULL && mopts.malloc_xmalloc) { 1559 wrterror(d, "out of memory", NULL); 1560 errno = ENOMEM; 1561 } 1562 if (r != NULL) 1563 errno = saved_errno; 1564 return r; 1565 } 1566 /*DEF_STRONG(calloc);*/ 1567 1568 static void * 1569 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill) 1570 { 1571 char *p, *q; 1572 1573 if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0) { 1574 wrterror(d, "mapalign bad alignment", NULL); 1575 return MAP_FAILED; 1576 } 1577 if (sz != PAGEROUND(sz)) { 1578 wrterror(d, "mapalign round", NULL); 1579 return MAP_FAILED; 1580 } 1581 1582 /* Allocate sz + alignment bytes of memory, which must include a 1583 * subrange of size bytes that is properly aligned. Unmap the 1584 * other bytes, and then return that subrange. 1585 */ 1586 1587 /* We need sz + alignment to fit into a size_t. */ 1588 if (alignment > SIZE_MAX - sz) 1589 return MAP_FAILED; 1590 1591 p = map(d, NULL, sz + alignment, zero_fill); 1592 if (p == MAP_FAILED) 1593 return MAP_FAILED; 1594 q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1)); 1595 if (q != p) { 1596 if (munmap(p, q - p)) 1597 wrterror(d, "munmap", p); 1598 } 1599 if (munmap(q + sz, alignment - (q - p))) 1600 wrterror(d, "munmap", q + sz); 1601 STATS_SUB(d->malloc_used, alignment); 1602 1603 return q; 1604 } 1605 1606 static void * 1607 omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill, void *f) 1608 { 1609 size_t psz; 1610 void *p; 1611 1612 if (alignment <= MALLOC_PAGESIZE) { 1613 /* 1614 * max(size, alignment) is enough to assure the requested alignment, 1615 * since the allocator always allocates power-of-two blocks. 1616 */ 1617 if (sz < alignment) 1618 sz = alignment; 1619 return omalloc(pool, sz, zero_fill, f); 1620 } 1621 1622 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1623 errno = ENOMEM; 1624 return NULL; 1625 } 1626 1627 sz += mopts.malloc_guard; 1628 psz = PAGEROUND(sz); 1629 1630 p = mapalign(pool, alignment, psz, zero_fill); 1631 if (p == NULL) { 1632 errno = ENOMEM; 1633 return NULL; 1634 } 1635 1636 if (insert(pool, p, sz, f)) { 1637 unmap(pool, p, psz); 1638 errno = ENOMEM; 1639 return NULL; 1640 } 1641 1642 if (mopts.malloc_guard) { 1643 if (mprotect((char *)p + psz - mopts.malloc_guard, 1644 mopts.malloc_guard, PROT_NONE)) 1645 wrterror(pool, "mprotect", NULL); 1646 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 1647 } 1648 1649 if (mopts.malloc_junk == 2) { 1650 if (zero_fill) 1651 memset((char *)p + sz - mopts.malloc_guard, 1652 SOME_JUNK, psz - sz); 1653 else 1654 memset(p, SOME_JUNK, psz - mopts.malloc_guard); 1655 } 1656 1657 return p; 1658 } 1659 1660 int 1661 posix_memalign(void **memptr, size_t alignment, size_t size) 1662 { 1663 struct dir_info *d; 1664 int res, saved_errno = errno; 1665 void *r; 1666 1667 /* Make sure that alignment is a large enough power of 2. */ 1668 if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *)) 1669 return EINVAL; 1670 1671 _MALLOC_LOCK(); 1672 d = getpool(); 1673 if (d == NULL) { 1674 if (malloc_init() != 0) 1675 goto err; 1676 d = getpool(); 1677 } 1678 d->func = "posix_memalign():"; 1679 if (d->active++) { 1680 malloc_recurse(d); 1681 goto err; 1682 } 1683 if (size > 0 && size <= MALLOC_MAXCHUNK) 1684 size += mopts.malloc_canaries; 1685 r = omemalign(d, alignment, size, 0, CALLER); 1686 d->active--; 1687 _MALLOC_UNLOCK(); 1688 if (r == NULL) { 1689 if (mopts.malloc_xmalloc) { 1690 wrterror(d, "out of memory", NULL); 1691 errno = ENOMEM; 1692 } 1693 goto err; 1694 } 1695 errno = saved_errno; 1696 *memptr = r; 1697 return 0; 1698 1699 err: 1700 res = errno; 1701 errno = saved_errno; 1702 return res; 1703 } 1704 /*DEF_STRONG(posix_memalign);*/ 1705 1706 #ifdef MALLOC_STATS 1707 1708 struct malloc_leak { 1709 void (*f)(); 1710 size_t total_size; 1711 int count; 1712 }; 1713 1714 struct leaknode { 1715 RB_ENTRY(leaknode) entry; 1716 struct malloc_leak d; 1717 }; 1718 1719 static int 1720 leakcmp(struct leaknode *e1, struct leaknode *e2) 1721 { 1722 return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f; 1723 } 1724 1725 static RB_HEAD(leaktree, leaknode) leakhead; 1726 RB_GENERATE_STATIC(leaktree, leaknode, entry, leakcmp) 1727 1728 static void 1729 putleakinfo(void *f, size_t sz, int cnt) 1730 { 1731 struct leaknode key, *p; 1732 static struct leaknode *page; 1733 static int used; 1734 1735 if (cnt == 0) 1736 return; 1737 1738 key.d.f = f; 1739 p = RB_FIND(leaktree, &leakhead, &key); 1740 if (p == NULL) { 1741 if (page == NULL || 1742 used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) { 1743 page = MMAP(MALLOC_PAGESIZE); 1744 if (page == MAP_FAILED) 1745 return; 1746 used = 0; 1747 } 1748 p = &page[used++]; 1749 p->d.f = f; 1750 p->d.total_size = sz * cnt; 1751 p->d.count = cnt; 1752 RB_INSERT(leaktree, &leakhead, p); 1753 } else { 1754 p->d.total_size += sz * cnt; 1755 p->d.count += cnt; 1756 } 1757 } 1758 1759 static struct malloc_leak *malloc_leaks; 1760 1761 static void 1762 dump_leaks(int fd) 1763 { 1764 struct leaknode *p; 1765 char buf[64]; 1766 int i = 0; 1767 1768 snprintf(buf, sizeof(buf), "Leak report\n"); 1769 write(fd, buf, strlen(buf)); 1770 snprintf(buf, sizeof(buf), " f sum # avg\n"); 1771 write(fd, buf, strlen(buf)); 1772 /* XXX only one page of summary */ 1773 if (malloc_leaks == NULL) 1774 malloc_leaks = MMAP(MALLOC_PAGESIZE); 1775 if (malloc_leaks != MAP_FAILED) 1776 memset(malloc_leaks, 0, MALLOC_PAGESIZE); 1777 RB_FOREACH(p, leaktree, &leakhead) { 1778 snprintf(buf, sizeof(buf), "%18p %7zu %6u %6zu\n", p->d.f, 1779 p->d.total_size, p->d.count, p->d.total_size / p->d.count); 1780 write(fd, buf, strlen(buf)); 1781 if (malloc_leaks == MAP_FAILED || 1782 i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak)) 1783 continue; 1784 malloc_leaks[i].f = p->d.f; 1785 malloc_leaks[i].total_size = p->d.total_size; 1786 malloc_leaks[i].count = p->d.count; 1787 i++; 1788 } 1789 } 1790 1791 static void 1792 dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist) 1793 { 1794 char buf[64]; 1795 1796 while (p != NULL) { 1797 snprintf(buf, sizeof(buf), "chunk %18p %18p %4d %d/%d\n", 1798 p->page, ((p->bits[0] & 1) ? NULL : f), 1799 p->size, p->free, p->total); 1800 write(fd, buf, strlen(buf)); 1801 if (!fromfreelist) { 1802 if (p->bits[0] & 1) 1803 putleakinfo(NULL, p->size, p->total - p->free); 1804 else { 1805 putleakinfo(f, p->size, 1); 1806 putleakinfo(NULL, p->size, 1807 p->total - p->free - 1); 1808 } 1809 break; 1810 } 1811 p = LIST_NEXT(p, entries); 1812 if (p != NULL) { 1813 snprintf(buf, sizeof(buf), " "); 1814 write(fd, buf, strlen(buf)); 1815 } 1816 } 1817 } 1818 1819 static void 1820 dump_free_chunk_info(int fd, struct dir_info *d) 1821 { 1822 char buf[64]; 1823 int i, j, count; 1824 struct chunk_info *p; 1825 1826 snprintf(buf, sizeof(buf), "Free chunk structs:\n"); 1827 write(fd, buf, strlen(buf)); 1828 for (i = 0; i <= MALLOC_MAXSHIFT; i++) { 1829 count = 0; 1830 LIST_FOREACH(p, &d->chunk_info_list[i], entries) 1831 count++; 1832 for (j = 0; j < MALLOC_CHUNK_LISTS; j++) { 1833 p = LIST_FIRST(&d->chunk_dir[i][j]); 1834 if (p == NULL && count == 0) 1835 continue; 1836 snprintf(buf, sizeof(buf), "%2d) %3d ", i, count); 1837 write(fd, buf, strlen(buf)); 1838 if (p != NULL) 1839 dump_chunk(fd, p, NULL, 1); 1840 else 1841 write(fd, "\n", 1); 1842 } 1843 } 1844 1845 } 1846 1847 static void 1848 dump_free_page_info(int fd, struct dir_info *d) 1849 { 1850 char buf[64]; 1851 int i; 1852 1853 snprintf(buf, sizeof(buf), "Free pages cached: %zu\n", 1854 d->free_regions_size); 1855 write(fd, buf, strlen(buf)); 1856 for (i = 0; i < mopts.malloc_cache; i++) { 1857 if (d->free_regions[i].p != NULL) { 1858 snprintf(buf, sizeof(buf), "%2d) ", i); 1859 write(fd, buf, strlen(buf)); 1860 snprintf(buf, sizeof(buf), "free at %p: %zu\n", 1861 d->free_regions[i].p, d->free_regions[i].size); 1862 write(fd, buf, strlen(buf)); 1863 } 1864 } 1865 } 1866 1867 static void 1868 malloc_dump1(int fd, struct dir_info *d) 1869 { 1870 char buf[100]; 1871 size_t i, realsize; 1872 1873 snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d); 1874 write(fd, buf, strlen(buf)); 1875 if (d == NULL) 1876 return; 1877 snprintf(buf, sizeof(buf), "Region slots free %zu/%zu\n", 1878 d->regions_free, d->regions_total); 1879 write(fd, buf, strlen(buf)); 1880 snprintf(buf, sizeof(buf), "Finds %zu/%zu\n", d->finds, 1881 d->find_collisions); 1882 write(fd, buf, strlen(buf)); 1883 snprintf(buf, sizeof(buf), "Inserts %zu/%zu\n", d->inserts, 1884 d->insert_collisions); 1885 write(fd, buf, strlen(buf)); 1886 snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes, 1887 d->delete_moves); 1888 write(fd, buf, strlen(buf)); 1889 snprintf(buf, sizeof(buf), "Cheap reallocs %zu/%zu\n", 1890 d->cheap_reallocs, d->cheap_realloc_tries); 1891 write(fd, buf, strlen(buf)); 1892 dump_free_chunk_info(fd, d); 1893 dump_free_page_info(fd, d); 1894 snprintf(buf, sizeof(buf), 1895 "slot) hash d type page f size [free/n]\n"); 1896 write(fd, buf, strlen(buf)); 1897 for (i = 0; i < d->regions_total; i++) { 1898 if (d->r[i].p != NULL) { 1899 size_t h = hash(d->r[i].p) & 1900 (d->regions_total - 1); 1901 snprintf(buf, sizeof(buf), "%4zx) #%4zx %zd ", 1902 i, h, h - i); 1903 write(fd, buf, strlen(buf)); 1904 REALSIZE(realsize, &d->r[i]); 1905 if (realsize > MALLOC_MAXCHUNK) { 1906 putleakinfo(d->r[i].f, realsize, 1); 1907 snprintf(buf, sizeof(buf), 1908 "pages %12p %12p %zu\n", d->r[i].p, 1909 d->r[i].f, realsize); 1910 write(fd, buf, strlen(buf)); 1911 } else 1912 dump_chunk(fd, 1913 (struct chunk_info *)d->r[i].size, 1914 d->r[i].f, 0); 1915 } 1916 } 1917 snprintf(buf, sizeof(buf), "In use %zu\n", d->malloc_used); 1918 write(fd, buf, strlen(buf)); 1919 snprintf(buf, sizeof(buf), "Guarded %zu\n", d->malloc_guarded); 1920 write(fd, buf, strlen(buf)); 1921 dump_leaks(fd); 1922 write(fd, "\n", 1); 1923 } 1924 1925 void 1926 malloc_dump(int fd) 1927 { 1928 struct dir_info *pool = getpool(); 1929 int i; 1930 void *p; 1931 struct region_info *r; 1932 int saved_errno = errno; 1933 1934 if (pool == NULL) 1935 return; 1936 for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) { 1937 p = pool->delayed_chunks[i]; 1938 if (p == NULL) 1939 continue; 1940 r = find(pool, p); 1941 if (r == NULL) { 1942 wrterror(pool, "bogus pointer in malloc_dump", p); 1943 continue; 1944 } 1945 free_bytes(pool, r, p); 1946 pool->delayed_chunks[i] = NULL; 1947 } 1948 /* XXX leak when run multiple times */ 1949 RB_INIT(&leakhead); 1950 malloc_dump1(fd, pool); 1951 errno = saved_errno; 1952 } 1953 DEF_WEAK(malloc_dump); 1954 1955 static void 1956 malloc_exit(void) 1957 { 1958 static const char q[] = "malloc() warning: Couldn't dump stats\n"; 1959 int save_errno = errno, fd; 1960 1961 fd = open("malloc.out", O_RDWR|O_APPEND); 1962 if (fd != -1) { 1963 malloc_dump(fd); 1964 close(fd); 1965 } else 1966 write(STDERR_FILENO, q, sizeof(q) - 1); 1967 errno = save_errno; 1968 } 1969 1970 #endif /* MALLOC_STATS */ 1971