1 /* $OpenBSD: malloc.c,v 1.176 2015/09/13 20:29:23 guenther Exp $ */ 2 /* 3 * Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 5 * Copyright (c) 2008 Damien Miller <djm@openbsd.org> 6 * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* 22 * If we meet some day, and you think this stuff is worth it, you 23 * can buy me a beer in return. Poul-Henning Kamp 24 */ 25 26 /* #define MALLOC_STATS */ 27 28 #include <sys/types.h> 29 #include <sys/param.h> /* PAGE_SHIFT ALIGN */ 30 #include <sys/queue.h> 31 #include <sys/mman.h> 32 #include <sys/uio.h> 33 #include <errno.h> 34 #include <stdint.h> 35 #include <stdlib.h> 36 #include <string.h> 37 #include <stdio.h> 38 #include <unistd.h> 39 40 #ifdef MALLOC_STATS 41 #include <sys/tree.h> 42 #include <fcntl.h> 43 #endif 44 45 #include "thread_private.h" 46 47 #if defined(__sparc__) && !defined(__sparcv9__) 48 #define MALLOC_PAGESHIFT (13U) 49 #elif defined(__mips64__) 50 #define MALLOC_PAGESHIFT (14U) 51 #else 52 #define MALLOC_PAGESHIFT (PAGE_SHIFT) 53 #endif 54 55 #define MALLOC_MINSHIFT 4 56 #define MALLOC_MAXSHIFT (MALLOC_PAGESHIFT - 1) 57 #define MALLOC_PAGESIZE (1UL << MALLOC_PAGESHIFT) 58 #define MALLOC_MINSIZE (1UL << MALLOC_MINSHIFT) 59 #define MALLOC_PAGEMASK (MALLOC_PAGESIZE - 1) 60 #define MASK_POINTER(p) ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK)) 61 62 #define MALLOC_MAXCHUNK (1 << MALLOC_MAXSHIFT) 63 #define MALLOC_MAXCACHE 256 64 #define MALLOC_DELAYED_CHUNK_MASK 15 65 #define MALLOC_INITIAL_REGIONS 512 66 #define MALLOC_DEFAULT_CACHE 64 67 #define MALLOC_CHUNK_LISTS 4 68 69 /* 70 * When the P option is active, we move allocations between half a page 71 * and a whole page towards the end, subject to alignment constraints. 72 * This is the extra headroom we allow. Set to zero to be the most 73 * strict. 74 */ 75 #define MALLOC_LEEWAY 0 76 77 #define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK) 78 79 /* 80 * What to use for Junk. This is the byte value we use to fill with 81 * when the 'J' option is enabled. Use SOME_JUNK right after alloc, 82 * and SOME_FREEJUNK right before free. 83 */ 84 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */ 85 #define SOME_FREEJUNK 0xdf 86 87 #define MMAP(sz) mmap(NULL, (size_t)(sz), PROT_READ | PROT_WRITE, \ 88 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0) 89 90 #define MMAPA(a,sz) mmap((a), (size_t)(sz), PROT_READ | PROT_WRITE, \ 91 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0) 92 93 #define MQUERY(a, sz) mquery((a), (size_t)(sz), PROT_READ | PROT_WRITE, \ 94 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, (off_t)0) 95 96 #define _MALLOC_LEAVE() if (__isthreaded) do { \ 97 malloc_active--; \ 98 _MALLOC_UNLOCK(); \ 99 } while (0) 100 #define _MALLOC_ENTER() if (__isthreaded) do { \ 101 _MALLOC_LOCK(); \ 102 malloc_active++; \ 103 } while (0) 104 105 struct region_info { 106 void *p; /* page; low bits used to mark chunks */ 107 uintptr_t size; /* size for pages, or chunk_info pointer */ 108 #ifdef MALLOC_STATS 109 void *f; /* where allocated from */ 110 #endif 111 }; 112 113 LIST_HEAD(chunk_head, chunk_info); 114 115 struct dir_info { 116 u_int32_t canary1; 117 struct region_info *r; /* region slots */ 118 size_t regions_total; /* number of region slots */ 119 size_t regions_free; /* number of free slots */ 120 /* lists of free chunk info structs */ 121 struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1]; 122 /* lists of chunks with free slots */ 123 struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS]; 124 size_t free_regions_size; /* free pages cached */ 125 /* free pages cache */ 126 struct region_info free_regions[MALLOC_MAXCACHE]; 127 /* delayed free chunk slots */ 128 void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1]; 129 size_t rbytesused; /* random bytes used */ 130 u_char rbytes[32]; /* random bytes */ 131 u_short chunk_start; 132 #ifdef MALLOC_STATS 133 size_t inserts; 134 size_t insert_collisions; 135 size_t finds; 136 size_t find_collisions; 137 size_t deletes; 138 size_t delete_moves; 139 size_t cheap_realloc_tries; 140 size_t cheap_reallocs; 141 size_t malloc_used; /* bytes allocated */ 142 size_t malloc_guarded; /* bytes used for guards */ 143 #define STATS_ADD(x,y) ((x) += (y)) 144 #define STATS_SUB(x,y) ((x) -= (y)) 145 #define STATS_INC(x) ((x)++) 146 #define STATS_ZERO(x) ((x) = 0) 147 #define STATS_SETF(x,y) ((x)->f = (y)) 148 #else 149 #define STATS_ADD(x,y) /* nothing */ 150 #define STATS_SUB(x,y) /* nothing */ 151 #define STATS_INC(x) /* nothing */ 152 #define STATS_ZERO(x) /* nothing */ 153 #define STATS_SETF(x,y) /* nothing */ 154 #endif /* MALLOC_STATS */ 155 u_int32_t canary2; 156 }; 157 #define DIR_INFO_RSZ ((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \ 158 ~MALLOC_PAGEMASK) 159 160 /* 161 * This structure describes a page worth of chunks. 162 * 163 * How many bits per u_short in the bitmap 164 */ 165 #define MALLOC_BITS (NBBY * sizeof(u_short)) 166 struct chunk_info { 167 LIST_ENTRY(chunk_info) entries; 168 void *page; /* pointer to the page */ 169 u_int32_t canary; 170 u_short size; /* size of this page's chunks */ 171 u_short shift; /* how far to shift for this size */ 172 u_short free; /* how many free chunks */ 173 u_short total; /* how many chunk */ 174 /* which chunks are free */ 175 u_short bits[1]; 176 }; 177 178 struct malloc_readonly { 179 struct dir_info *malloc_pool; /* Main bookkeeping information */ 180 int malloc_abort; /* abort() on error */ 181 int malloc_freenow; /* Free quickly - disable chunk rnd */ 182 int malloc_freeunmap; /* mprotect free pages PROT_NONE? */ 183 int malloc_hint; /* call madvice on free pages? */ 184 int malloc_junk; /* junk fill? */ 185 int malloc_move; /* move allocations to end of page? */ 186 int malloc_realloc; /* always realloc? */ 187 int malloc_xmalloc; /* xmalloc behaviour? */ 188 size_t malloc_guard; /* use guard pages after allocations? */ 189 u_int malloc_cache; /* free pages we cache */ 190 #ifdef MALLOC_STATS 191 int malloc_stats; /* dump statistics at end */ 192 #endif 193 u_int32_t malloc_canary; /* Matched against ones in malloc_pool */ 194 }; 195 196 /* This object is mapped PROT_READ after initialisation to prevent tampering */ 197 static union { 198 struct malloc_readonly mopts; 199 u_char _pad[MALLOC_PAGESIZE]; 200 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE))); 201 #define mopts malloc_readonly.mopts 202 #define getpool() mopts.malloc_pool 203 204 char *malloc_options; /* compile-time options */ 205 static char *malloc_func; /* current function */ 206 static int malloc_active; /* status of malloc */ 207 208 static u_char getrbyte(struct dir_info *d); 209 210 extern char *__progname; 211 212 #ifdef MALLOC_STATS 213 void malloc_dump(int); 214 PROTO_NORMAL(malloc_dump); 215 static void malloc_exit(void); 216 #define CALLER __builtin_return_address(0) 217 #else 218 #define CALLER NULL 219 #endif 220 221 /* low bits of r->p determine size: 0 means >= page size and p->size holding 222 * real size, otherwise r->size is a shift count, or 1 for malloc(0) 223 */ 224 #define REALSIZE(sz, r) \ 225 (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \ 226 (sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1)))) 227 228 static inline size_t 229 hash(void *p) 230 { 231 size_t sum; 232 uintptr_t u; 233 234 u = (uintptr_t)p >> MALLOC_PAGESHIFT; 235 sum = u; 236 sum = (sum << 7) - sum + (u >> 16); 237 #ifdef __LP64__ 238 sum = (sum << 7) - sum + (u >> 32); 239 sum = (sum << 7) - sum + (u >> 48); 240 #endif 241 return sum; 242 } 243 244 static void 245 wrterror(char *msg, void *p) 246 { 247 char *q = " error: "; 248 struct iovec iov[7]; 249 char pidbuf[20]; 250 char buf[20]; 251 int saved_errno = errno; 252 253 iov[0].iov_base = __progname; 254 iov[0].iov_len = strlen(__progname); 255 iov[1].iov_base = pidbuf; 256 snprintf(pidbuf, sizeof(pidbuf), "(%d) in ", getpid()); 257 iov[1].iov_len = strlen(pidbuf); 258 iov[2].iov_base = malloc_func; 259 iov[2].iov_len = strlen(malloc_func); 260 iov[3].iov_base = q; 261 iov[3].iov_len = strlen(q); 262 iov[4].iov_base = msg; 263 iov[4].iov_len = strlen(msg); 264 iov[5].iov_base = buf; 265 if (p == NULL) 266 iov[5].iov_len = 0; 267 else { 268 snprintf(buf, sizeof(buf), " %p", p); 269 iov[5].iov_len = strlen(buf); 270 } 271 iov[6].iov_base = "\n"; 272 iov[6].iov_len = 1; 273 writev(STDERR_FILENO, iov, 7); 274 275 #ifdef MALLOC_STATS 276 if (mopts.malloc_stats) 277 malloc_dump(STDERR_FILENO); 278 #endif /* MALLOC_STATS */ 279 280 errno = saved_errno; 281 if (mopts.malloc_abort) 282 abort(); 283 } 284 285 static void 286 rbytes_init(struct dir_info *d) 287 { 288 arc4random_buf(d->rbytes, sizeof(d->rbytes)); 289 /* add 1 to account for using d->rbytes[0] */ 290 d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2); 291 } 292 293 static inline u_char 294 getrbyte(struct dir_info *d) 295 { 296 u_char x; 297 298 if (d->rbytesused >= sizeof(d->rbytes)) 299 rbytes_init(d); 300 x = d->rbytes[d->rbytesused++]; 301 return x; 302 } 303 304 /* 305 * Cache maintenance. We keep at most malloc_cache pages cached. 306 * If the cache is becoming full, unmap pages in the cache for real, 307 * and then add the region to the cache 308 * Opposed to the regular region data structure, the sizes in the 309 * cache are in MALLOC_PAGESIZE units. 310 */ 311 static void 312 unmap(struct dir_info *d, void *p, size_t sz) 313 { 314 size_t psz = sz >> MALLOC_PAGESHIFT; 315 size_t rsz, tounmap; 316 struct region_info *r; 317 u_int i, offset; 318 319 if (sz != PAGEROUND(sz)) { 320 wrterror("munmap round", NULL); 321 return; 322 } 323 324 if (psz > mopts.malloc_cache) { 325 i = munmap(p, sz); 326 if (i) 327 wrterror("munmap", p); 328 STATS_SUB(d->malloc_used, sz); 329 return; 330 } 331 tounmap = 0; 332 rsz = mopts.malloc_cache - d->free_regions_size; 333 if (psz > rsz) 334 tounmap = psz - rsz; 335 offset = getrbyte(d); 336 for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) { 337 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 338 if (r->p != NULL) { 339 rsz = r->size << MALLOC_PAGESHIFT; 340 if (munmap(r->p, rsz)) 341 wrterror("munmap", r->p); 342 r->p = NULL; 343 if (tounmap > r->size) 344 tounmap -= r->size; 345 else 346 tounmap = 0; 347 d->free_regions_size -= r->size; 348 r->size = 0; 349 STATS_SUB(d->malloc_used, rsz); 350 } 351 } 352 if (tounmap > 0) 353 wrterror("malloc cache underflow", NULL); 354 for (i = 0; i < mopts.malloc_cache; i++) { 355 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 356 if (r->p == NULL) { 357 if (mopts.malloc_hint) 358 madvise(p, sz, MADV_FREE); 359 if (mopts.malloc_freeunmap) 360 mprotect(p, sz, PROT_NONE); 361 r->p = p; 362 r->size = psz; 363 d->free_regions_size += psz; 364 break; 365 } 366 } 367 if (i == mopts.malloc_cache) 368 wrterror("malloc free slot lost", NULL); 369 if (d->free_regions_size > mopts.malloc_cache) 370 wrterror("malloc cache overflow", NULL); 371 } 372 373 static void 374 zapcacheregion(struct dir_info *d, void *p, size_t len) 375 { 376 u_int i; 377 struct region_info *r; 378 size_t rsz; 379 380 for (i = 0; i < mopts.malloc_cache; i++) { 381 r = &d->free_regions[i]; 382 if (r->p >= p && r->p <= (void *)((char *)p + len)) { 383 rsz = r->size << MALLOC_PAGESHIFT; 384 if (munmap(r->p, rsz)) 385 wrterror("munmap", r->p); 386 r->p = NULL; 387 d->free_regions_size -= r->size; 388 r->size = 0; 389 STATS_SUB(d->malloc_used, rsz); 390 } 391 } 392 } 393 394 static void * 395 map(struct dir_info *d, void *hint, size_t sz, int zero_fill) 396 { 397 size_t psz = sz >> MALLOC_PAGESHIFT; 398 struct region_info *r, *big = NULL; 399 u_int i, offset; 400 void *p; 401 402 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 403 d->canary1 != ~d->canary2) 404 wrterror("internal struct corrupt", NULL); 405 if (sz != PAGEROUND(sz)) { 406 wrterror("map round", NULL); 407 return MAP_FAILED; 408 } 409 if (!hint && psz > d->free_regions_size) { 410 _MALLOC_LEAVE(); 411 p = MMAP(sz); 412 _MALLOC_ENTER(); 413 if (p != MAP_FAILED) 414 STATS_ADD(d->malloc_used, sz); 415 /* zero fill not needed */ 416 return p; 417 } 418 offset = getrbyte(d); 419 for (i = 0; i < mopts.malloc_cache; i++) { 420 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 421 if (r->p != NULL) { 422 if (hint && r->p != hint) 423 continue; 424 if (r->size == psz) { 425 p = r->p; 426 r->p = NULL; 427 r->size = 0; 428 d->free_regions_size -= psz; 429 if (mopts.malloc_freeunmap) 430 mprotect(p, sz, PROT_READ | PROT_WRITE); 431 if (mopts.malloc_hint) 432 madvise(p, sz, MADV_NORMAL); 433 if (zero_fill) 434 memset(p, 0, sz); 435 else if (mopts.malloc_junk == 2 && 436 mopts.malloc_freeunmap) 437 memset(p, SOME_FREEJUNK, sz); 438 return p; 439 } else if (r->size > psz) 440 big = r; 441 } 442 } 443 if (big != NULL) { 444 r = big; 445 p = r->p; 446 r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT); 447 if (mopts.malloc_freeunmap) 448 mprotect(p, sz, PROT_READ | PROT_WRITE); 449 if (mopts.malloc_hint) 450 madvise(p, sz, MADV_NORMAL); 451 r->size -= psz; 452 d->free_regions_size -= psz; 453 if (zero_fill) 454 memset(p, 0, sz); 455 else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap) 456 memset(p, SOME_FREEJUNK, sz); 457 return p; 458 } 459 if (hint) 460 return MAP_FAILED; 461 if (d->free_regions_size > mopts.malloc_cache) 462 wrterror("malloc cache", NULL); 463 _MALLOC_LEAVE(); 464 p = MMAP(sz); 465 _MALLOC_ENTER(); 466 if (p != MAP_FAILED) 467 STATS_ADD(d->malloc_used, sz); 468 /* zero fill not needed */ 469 return p; 470 } 471 472 /* 473 * Initialize a dir_info, which should have been cleared by caller 474 */ 475 static int 476 omalloc_init(struct dir_info **dp) 477 { 478 char *p, b[64]; 479 int i, j; 480 size_t d_avail, regioninfo_size; 481 struct dir_info *d; 482 483 /* 484 * Default options 485 */ 486 mopts.malloc_abort = 1; 487 mopts.malloc_junk = 1; 488 mopts.malloc_move = 1; 489 mopts.malloc_cache = MALLOC_DEFAULT_CACHE; 490 491 for (i = 0; i < 3; i++) { 492 switch (i) { 493 case 0: 494 j = readlink("/etc/malloc.conf", b, sizeof b - 1); 495 if (j <= 0) 496 continue; 497 b[j] = '\0'; 498 p = b; 499 break; 500 case 1: 501 if (issetugid() == 0) 502 p = getenv("MALLOC_OPTIONS"); 503 else 504 continue; 505 break; 506 case 2: 507 p = malloc_options; 508 break; 509 default: 510 p = NULL; 511 } 512 513 for (; p != NULL && *p != '\0'; p++) { 514 switch (*p) { 515 case '>': 516 mopts.malloc_cache <<= 1; 517 if (mopts.malloc_cache > MALLOC_MAXCACHE) 518 mopts.malloc_cache = MALLOC_MAXCACHE; 519 break; 520 case '<': 521 mopts.malloc_cache >>= 1; 522 break; 523 case 'a': 524 mopts.malloc_abort = 0; 525 break; 526 case 'A': 527 mopts.malloc_abort = 1; 528 break; 529 #ifdef MALLOC_STATS 530 case 'd': 531 mopts.malloc_stats = 0; 532 break; 533 case 'D': 534 mopts.malloc_stats = 1; 535 break; 536 #endif /* MALLOC_STATS */ 537 case 'f': 538 mopts.malloc_freenow = 0; 539 mopts.malloc_freeunmap = 0; 540 break; 541 case 'F': 542 mopts.malloc_freenow = 1; 543 mopts.malloc_freeunmap = 1; 544 break; 545 case 'g': 546 mopts.malloc_guard = 0; 547 break; 548 case 'G': 549 mopts.malloc_guard = MALLOC_PAGESIZE; 550 break; 551 case 'h': 552 mopts.malloc_hint = 0; 553 break; 554 case 'H': 555 mopts.malloc_hint = 1; 556 break; 557 case 'j': 558 mopts.malloc_junk = 0; 559 break; 560 case 'J': 561 mopts.malloc_junk = 2; 562 break; 563 case 'n': 564 case 'N': 565 break; 566 case 'p': 567 mopts.malloc_move = 0; 568 break; 569 case 'P': 570 mopts.malloc_move = 1; 571 break; 572 case 'r': 573 mopts.malloc_realloc = 0; 574 break; 575 case 'R': 576 mopts.malloc_realloc = 1; 577 break; 578 case 's': 579 mopts.malloc_freeunmap = mopts.malloc_junk = 0; 580 mopts.malloc_guard = 0; 581 mopts.malloc_cache = MALLOC_DEFAULT_CACHE; 582 break; 583 case 'S': 584 mopts.malloc_freeunmap = 1; 585 mopts.malloc_junk = 2; 586 mopts.malloc_guard = MALLOC_PAGESIZE; 587 mopts.malloc_cache = 0; 588 break; 589 case 'u': 590 mopts.malloc_freeunmap = 0; 591 break; 592 case 'U': 593 mopts.malloc_freeunmap = 1; 594 break; 595 case 'x': 596 mopts.malloc_xmalloc = 0; 597 break; 598 case 'X': 599 mopts.malloc_xmalloc = 1; 600 break; 601 default: { 602 static const char q[] = "malloc() warning: " 603 "unknown char in MALLOC_OPTIONS\n"; 604 write(STDERR_FILENO, q, sizeof(q) - 1); 605 break; 606 } 607 } 608 } 609 } 610 611 #ifdef MALLOC_STATS 612 if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) { 613 static const char q[] = "malloc() warning: atexit(2) failed." 614 " Will not be able to dump stats on exit\n"; 615 write(STDERR_FILENO, q, sizeof(q) - 1); 616 } 617 #endif /* MALLOC_STATS */ 618 619 while ((mopts.malloc_canary = arc4random()) == 0) 620 ; 621 622 /* 623 * Allocate dir_info with a guard page on either side. Also 624 * randomise offset inside the page at which the dir_info 625 * lies (subject to alignment by 1 << MALLOC_MINSHIFT) 626 */ 627 if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED) 628 return -1; 629 mprotect(p, MALLOC_PAGESIZE, PROT_NONE); 630 mprotect(p + MALLOC_PAGESIZE + DIR_INFO_RSZ, 631 MALLOC_PAGESIZE, PROT_NONE); 632 d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT; 633 d = (struct dir_info *)(p + MALLOC_PAGESIZE + 634 (arc4random_uniform(d_avail) << MALLOC_MINSHIFT)); 635 636 rbytes_init(d); 637 d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS; 638 regioninfo_size = d->regions_total * sizeof(struct region_info); 639 d->r = MMAP(regioninfo_size); 640 if (d->r == MAP_FAILED) { 641 wrterror("malloc init mmap failed", NULL); 642 d->regions_total = 0; 643 return 1; 644 } 645 for (i = 0; i <= MALLOC_MAXSHIFT; i++) { 646 LIST_INIT(&d->chunk_info_list[i]); 647 for (j = 0; j < MALLOC_CHUNK_LISTS; j++) 648 LIST_INIT(&d->chunk_dir[i][j]); 649 } 650 STATS_ADD(d->malloc_used, regioninfo_size); 651 d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d; 652 d->canary2 = ~d->canary1; 653 654 *dp = d; 655 656 /* 657 * Options have been set and will never be reset. 658 * Prevent further tampering with them. 659 */ 660 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) 661 mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ); 662 663 return 0; 664 } 665 666 static int 667 omalloc_grow(struct dir_info *d) 668 { 669 size_t newtotal; 670 size_t newsize; 671 size_t mask; 672 size_t i; 673 struct region_info *p; 674 675 if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 ) 676 return 1; 677 678 newtotal = d->regions_total * 2; 679 newsize = newtotal * sizeof(struct region_info); 680 mask = newtotal - 1; 681 682 p = MMAP(newsize); 683 if (p == MAP_FAILED) 684 return 1; 685 686 STATS_ADD(d->malloc_used, newsize); 687 memset(p, 0, newsize); 688 STATS_ZERO(d->inserts); 689 STATS_ZERO(d->insert_collisions); 690 for (i = 0; i < d->regions_total; i++) { 691 void *q = d->r[i].p; 692 if (q != NULL) { 693 size_t index = hash(q) & mask; 694 STATS_INC(d->inserts); 695 while (p[index].p != NULL) { 696 index = (index - 1) & mask; 697 STATS_INC(d->insert_collisions); 698 } 699 p[index] = d->r[i]; 700 } 701 } 702 /* avoid pages containing meta info to end up in cache */ 703 if (munmap(d->r, d->regions_total * sizeof(struct region_info))) 704 wrterror("munmap", d->r); 705 else 706 STATS_SUB(d->malloc_used, 707 d->regions_total * sizeof(struct region_info)); 708 d->regions_free = d->regions_free + d->regions_total; 709 d->regions_total = newtotal; 710 d->r = p; 711 return 0; 712 } 713 714 static struct chunk_info * 715 alloc_chunk_info(struct dir_info *d, int bits) 716 { 717 struct chunk_info *p; 718 size_t size, count; 719 720 if (bits == 0) 721 count = MALLOC_PAGESIZE / MALLOC_MINSIZE; 722 else 723 count = MALLOC_PAGESIZE >> bits; 724 725 size = howmany(count, MALLOC_BITS); 726 size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short); 727 size = ALIGN(size); 728 729 if (LIST_EMPTY(&d->chunk_info_list[bits])) { 730 char *q; 731 int i; 732 733 q = MMAP(MALLOC_PAGESIZE); 734 if (q == MAP_FAILED) 735 return NULL; 736 STATS_ADD(d->malloc_used, MALLOC_PAGESIZE); 737 count = MALLOC_PAGESIZE / size; 738 for (i = 0; i < count; i++, q += size) 739 LIST_INSERT_HEAD(&d->chunk_info_list[bits], 740 (struct chunk_info *)q, entries); 741 } 742 p = LIST_FIRST(&d->chunk_info_list[bits]); 743 LIST_REMOVE(p, entries); 744 memset(p, 0, size); 745 p->canary = d->canary1; 746 return p; 747 } 748 749 750 /* 751 * The hashtable uses the assumption that p is never NULL. This holds since 752 * non-MAP_FIXED mappings with hint 0 start at BRKSIZ. 753 */ 754 static int 755 insert(struct dir_info *d, void *p, size_t sz, void *f) 756 { 757 size_t index; 758 size_t mask; 759 void *q; 760 761 if (d->regions_free * 4 < d->regions_total) { 762 if (omalloc_grow(d)) 763 return 1; 764 } 765 mask = d->regions_total - 1; 766 index = hash(p) & mask; 767 q = d->r[index].p; 768 STATS_INC(d->inserts); 769 while (q != NULL) { 770 index = (index - 1) & mask; 771 q = d->r[index].p; 772 STATS_INC(d->insert_collisions); 773 } 774 d->r[index].p = p; 775 d->r[index].size = sz; 776 #ifdef MALLOC_STATS 777 d->r[index].f = f; 778 #endif 779 d->regions_free--; 780 return 0; 781 } 782 783 static struct region_info * 784 find(struct dir_info *d, void *p) 785 { 786 size_t index; 787 size_t mask = d->regions_total - 1; 788 void *q, *r; 789 790 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 791 d->canary1 != ~d->canary2) 792 wrterror("internal struct corrupt", NULL); 793 p = MASK_POINTER(p); 794 index = hash(p) & mask; 795 r = d->r[index].p; 796 q = MASK_POINTER(r); 797 STATS_INC(d->finds); 798 while (q != p && r != NULL) { 799 index = (index - 1) & mask; 800 r = d->r[index].p; 801 q = MASK_POINTER(r); 802 STATS_INC(d->find_collisions); 803 } 804 return (q == p && r != NULL) ? &d->r[index] : NULL; 805 } 806 807 static void 808 delete(struct dir_info *d, struct region_info *ri) 809 { 810 /* algorithm R, Knuth Vol III section 6.4 */ 811 size_t mask = d->regions_total - 1; 812 size_t i, j, r; 813 814 if (d->regions_total & (d->regions_total - 1)) 815 wrterror("regions_total not 2^x", NULL); 816 d->regions_free++; 817 STATS_INC(getpool()->deletes); 818 819 i = ri - d->r; 820 for (;;) { 821 d->r[i].p = NULL; 822 d->r[i].size = 0; 823 j = i; 824 for (;;) { 825 i = (i - 1) & mask; 826 if (d->r[i].p == NULL) 827 return; 828 r = hash(d->r[i].p) & mask; 829 if ((i <= r && r < j) || (r < j && j < i) || 830 (j < i && i <= r)) 831 continue; 832 d->r[j] = d->r[i]; 833 STATS_INC(getpool()->delete_moves); 834 break; 835 } 836 837 } 838 } 839 840 /* 841 * Allocate a page of chunks 842 */ 843 static struct chunk_info * 844 omalloc_make_chunks(struct dir_info *d, int bits, int listnum) 845 { 846 struct chunk_info *bp; 847 void *pp; 848 int i, k; 849 850 /* Allocate a new bucket */ 851 pp = map(d, NULL, MALLOC_PAGESIZE, 0); 852 if (pp == MAP_FAILED) 853 return NULL; 854 855 bp = alloc_chunk_info(d, bits); 856 if (bp == NULL) { 857 unmap(d, pp, MALLOC_PAGESIZE); 858 return NULL; 859 } 860 861 /* memory protect the page allocated in the malloc(0) case */ 862 if (bits == 0) { 863 bp->size = 0; 864 bp->shift = 1; 865 i = MALLOC_MINSIZE - 1; 866 while (i >>= 1) 867 bp->shift++; 868 bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift; 869 bp->page = pp; 870 871 k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE); 872 if (k < 0) { 873 unmap(d, pp, MALLOC_PAGESIZE); 874 LIST_INSERT_HEAD(&d->chunk_info_list[0], bp, entries); 875 return NULL; 876 } 877 } else { 878 bp->size = 1U << bits; 879 bp->shift = bits; 880 bp->total = bp->free = MALLOC_PAGESIZE >> bits; 881 bp->page = pp; 882 } 883 884 /* set all valid bits in the bitmap */ 885 k = bp->total; 886 i = 0; 887 888 /* Do a bunch at a time */ 889 for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS) 890 bp->bits[i / MALLOC_BITS] = (u_short)~0U; 891 892 for (; i < k; i++) 893 bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS); 894 895 LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries); 896 897 bits++; 898 if ((uintptr_t)pp & bits) 899 wrterror("pp & bits", pp); 900 901 insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL); 902 return bp; 903 } 904 905 906 /* 907 * Allocate a chunk 908 */ 909 static void * 910 malloc_bytes(struct dir_info *d, size_t size, void *f) 911 { 912 int i, j, listnum; 913 size_t k; 914 u_short u, *lp; 915 struct chunk_info *bp; 916 917 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 918 d->canary1 != ~d->canary2) 919 wrterror("internal struct corrupt", NULL); 920 /* Don't bother with anything less than this */ 921 /* unless we have a malloc(0) requests */ 922 if (size != 0 && size < MALLOC_MINSIZE) 923 size = MALLOC_MINSIZE; 924 925 /* Find the right bucket */ 926 if (size == 0) 927 j = 0; 928 else { 929 j = MALLOC_MINSHIFT; 930 i = (size - 1) >> (MALLOC_MINSHIFT - 1); 931 while (i >>= 1) 932 j++; 933 } 934 935 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; 936 /* If it's empty, make a page more of that size chunks */ 937 if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) { 938 bp = omalloc_make_chunks(d, j, listnum); 939 if (bp == NULL) 940 return NULL; 941 } 942 943 if (bp->canary != d->canary1) 944 wrterror("chunk info corrupted", NULL); 945 946 i = d->chunk_start; 947 if (bp->free > 1) 948 i += getrbyte(d); 949 if (i >= bp->total) 950 i &= bp->total - 1; 951 for (;;) { 952 for (;;) { 953 lp = &bp->bits[i / MALLOC_BITS]; 954 if (!*lp) { 955 i += MALLOC_BITS; 956 i &= ~(MALLOC_BITS - 1); 957 if (i >= bp->total) 958 i = 0; 959 } else 960 break; 961 } 962 k = i % MALLOC_BITS; 963 u = 1 << k; 964 if (*lp & u) 965 break; 966 if (++i >= bp->total) 967 i = 0; 968 } 969 d->chunk_start += i + 1; 970 #ifdef MALLOC_STATS 971 if (i == 0) { 972 struct region_info *r = find(d, bp->page); 973 r->f = f; 974 } 975 #endif 976 977 *lp ^= u; 978 979 /* If there are no more free, remove from free-list */ 980 if (!--bp->free) 981 LIST_REMOVE(bp, entries); 982 983 /* Adjust to the real offset of that chunk */ 984 k += (lp - bp->bits) * MALLOC_BITS; 985 k <<= bp->shift; 986 987 if (mopts.malloc_junk == 2 && bp->size > 0) 988 memset((char *)bp->page + k, SOME_JUNK, bp->size); 989 return ((char *)bp->page + k); 990 } 991 992 static uint32_t 993 find_chunknum(struct dir_info *d, struct region_info *r, void *ptr) 994 { 995 struct chunk_info *info; 996 uint32_t chunknum; 997 998 info = (struct chunk_info *)r->size; 999 if (info->canary != d->canary1) 1000 wrterror("chunk info corrupted", NULL); 1001 1002 /* Find the chunk number on the page */ 1003 chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift; 1004 1005 if ((uintptr_t)ptr & ((1U << (info->shift)) - 1)) { 1006 wrterror("modified chunk-pointer", ptr); 1007 return -1; 1008 } 1009 if (info->bits[chunknum / MALLOC_BITS] & 1010 (1U << (chunknum % MALLOC_BITS))) { 1011 wrterror("chunk is already free", ptr); 1012 return -1; 1013 } 1014 return chunknum; 1015 } 1016 1017 /* 1018 * Free a chunk, and possibly the page it's on, if the page becomes empty. 1019 */ 1020 static void 1021 free_bytes(struct dir_info *d, struct region_info *r, void *ptr) 1022 { 1023 struct chunk_head *mp; 1024 struct chunk_info *info; 1025 uint32_t chunknum; 1026 int listnum; 1027 1028 info = (struct chunk_info *)r->size; 1029 if ((chunknum = find_chunknum(d, r, ptr)) == -1) 1030 return; 1031 1032 info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS); 1033 info->free++; 1034 1035 if (info->free == 1) { 1036 /* Page became non-full */ 1037 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; 1038 if (info->size != 0) 1039 mp = &d->chunk_dir[info->shift][listnum]; 1040 else 1041 mp = &d->chunk_dir[0][listnum]; 1042 1043 LIST_INSERT_HEAD(mp, info, entries); 1044 return; 1045 } 1046 1047 if (info->free != info->total) 1048 return; 1049 1050 LIST_REMOVE(info, entries); 1051 1052 if (info->size == 0 && !mopts.malloc_freeunmap) 1053 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE); 1054 unmap(d, info->page, MALLOC_PAGESIZE); 1055 1056 delete(d, r); 1057 if (info->size != 0) 1058 mp = &d->chunk_info_list[info->shift]; 1059 else 1060 mp = &d->chunk_info_list[0]; 1061 LIST_INSERT_HEAD(mp, info, entries); 1062 } 1063 1064 1065 1066 static void * 1067 omalloc(size_t sz, int zero_fill, void *f) 1068 { 1069 struct dir_info *pool = getpool(); 1070 void *p; 1071 size_t psz; 1072 1073 if (sz > MALLOC_MAXCHUNK) { 1074 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1075 errno = ENOMEM; 1076 return NULL; 1077 } 1078 sz += mopts.malloc_guard; 1079 psz = PAGEROUND(sz); 1080 p = map(pool, NULL, psz, zero_fill); 1081 if (p == MAP_FAILED) { 1082 errno = ENOMEM; 1083 return NULL; 1084 } 1085 if (insert(pool, p, sz, f)) { 1086 unmap(pool, p, psz); 1087 errno = ENOMEM; 1088 return NULL; 1089 } 1090 if (mopts.malloc_guard) { 1091 if (mprotect((char *)p + psz - mopts.malloc_guard, 1092 mopts.malloc_guard, PROT_NONE)) 1093 wrterror("mprotect", NULL); 1094 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 1095 } 1096 1097 if (mopts.malloc_move && 1098 sz - mopts.malloc_guard < MALLOC_PAGESIZE - 1099 MALLOC_LEEWAY) { 1100 /* fill whole allocation */ 1101 if (mopts.malloc_junk == 2) 1102 memset(p, SOME_JUNK, psz - mopts.malloc_guard); 1103 /* shift towards the end */ 1104 p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY - 1105 (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1)); 1106 /* fill zeros if needed and overwritten above */ 1107 if (zero_fill && mopts.malloc_junk == 2) 1108 memset(p, 0, sz - mopts.malloc_guard); 1109 } else { 1110 if (mopts.malloc_junk == 2) { 1111 if (zero_fill) 1112 memset((char *)p + sz - mopts.malloc_guard, 1113 SOME_JUNK, psz - sz); 1114 else 1115 memset(p, SOME_JUNK, 1116 psz - mopts.malloc_guard); 1117 } 1118 } 1119 1120 } else { 1121 /* takes care of SOME_JUNK */ 1122 p = malloc_bytes(pool, sz, f); 1123 if (zero_fill && p != NULL && sz > 0) 1124 memset(p, 0, sz); 1125 } 1126 1127 return p; 1128 } 1129 1130 /* 1131 * Common function for handling recursion. Only 1132 * print the error message once, to avoid making the problem 1133 * potentially worse. 1134 */ 1135 static void 1136 malloc_recurse(void) 1137 { 1138 static int noprint; 1139 1140 if (noprint == 0) { 1141 noprint = 1; 1142 wrterror("recursive call", NULL); 1143 } 1144 malloc_active--; 1145 _MALLOC_UNLOCK(); 1146 errno = EDEADLK; 1147 } 1148 1149 static int 1150 malloc_init(void) 1151 { 1152 if (omalloc_init(&mopts.malloc_pool)) { 1153 _MALLOC_UNLOCK(); 1154 if (mopts.malloc_xmalloc) 1155 wrterror("out of memory", NULL); 1156 errno = ENOMEM; 1157 return -1; 1158 } 1159 return 0; 1160 } 1161 1162 void * 1163 malloc(size_t size) 1164 { 1165 void *r; 1166 int saved_errno = errno; 1167 1168 _MALLOC_LOCK(); 1169 malloc_func = "malloc():"; 1170 if (getpool() == NULL) { 1171 if (malloc_init() != 0) 1172 return NULL; 1173 } 1174 1175 if (malloc_active++) { 1176 malloc_recurse(); 1177 return NULL; 1178 } 1179 r = omalloc(size, 0, CALLER); 1180 malloc_active--; 1181 _MALLOC_UNLOCK(); 1182 if (r == NULL && mopts.malloc_xmalloc) { 1183 wrterror("out of memory", NULL); 1184 errno = ENOMEM; 1185 } 1186 if (r != NULL) 1187 errno = saved_errno; 1188 return r; 1189 } 1190 /*DEF_STRONG(malloc);*/ 1191 1192 static void 1193 ofree(void *p) 1194 { 1195 struct dir_info *pool = getpool(); 1196 struct region_info *r; 1197 size_t sz; 1198 1199 r = find(pool, p); 1200 if (r == NULL) { 1201 wrterror("bogus pointer (double free?)", p); 1202 return; 1203 } 1204 REALSIZE(sz, r); 1205 if (sz > MALLOC_MAXCHUNK) { 1206 if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE - 1207 MALLOC_LEEWAY) { 1208 if (r->p != p) { 1209 wrterror("bogus pointer", p); 1210 return; 1211 } 1212 } else { 1213 #if notyetbecause_of_realloc 1214 /* shifted towards the end */ 1215 if (p != ((char *)r->p) + ((MALLOC_PAGESIZE - 1216 MALLOC_MINSIZE - sz - mopts.malloc_guard) & 1217 ~(MALLOC_MINSIZE-1))) { 1218 } 1219 #endif 1220 p = r->p; 1221 } 1222 if (mopts.malloc_guard) { 1223 if (sz < mopts.malloc_guard) 1224 wrterror("guard size", NULL); 1225 if (!mopts.malloc_freeunmap) { 1226 if (mprotect((char *)p + PAGEROUND(sz) - 1227 mopts.malloc_guard, mopts.malloc_guard, 1228 PROT_READ | PROT_WRITE)) 1229 wrterror("mprotect", NULL); 1230 } 1231 STATS_SUB(pool->malloc_guarded, mopts.malloc_guard); 1232 } 1233 if (mopts.malloc_junk && !mopts.malloc_freeunmap) { 1234 size_t amt = mopts.malloc_junk == 1 ? MALLOC_MAXCHUNK : 1235 PAGEROUND(sz) - mopts.malloc_guard; 1236 memset(p, SOME_FREEJUNK, amt); 1237 } 1238 unmap(pool, p, PAGEROUND(sz)); 1239 delete(pool, r); 1240 } else { 1241 void *tmp; 1242 int i; 1243 1244 if (mopts.malloc_junk && sz > 0) 1245 memset(p, SOME_FREEJUNK, sz); 1246 if (!mopts.malloc_freenow) { 1247 if (find_chunknum(pool, r, p) == -1) 1248 return; 1249 i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK; 1250 tmp = p; 1251 p = pool->delayed_chunks[i]; 1252 if (tmp == p) { 1253 wrterror("double free", p); 1254 return; 1255 } 1256 pool->delayed_chunks[i] = tmp; 1257 } 1258 if (p != NULL) { 1259 r = find(pool, p); 1260 if (r == NULL) { 1261 wrterror("bogus pointer (double free?)", p); 1262 return; 1263 } 1264 free_bytes(pool, r, p); 1265 } 1266 } 1267 } 1268 1269 void 1270 free(void *ptr) 1271 { 1272 int saved_errno = errno; 1273 1274 /* This is legal. */ 1275 if (ptr == NULL) 1276 return; 1277 1278 _MALLOC_LOCK(); 1279 malloc_func = "free():"; 1280 if (getpool() == NULL) { 1281 _MALLOC_UNLOCK(); 1282 wrterror("free() called before allocation", NULL); 1283 return; 1284 } 1285 if (malloc_active++) { 1286 malloc_recurse(); 1287 return; 1288 } 1289 ofree(ptr); 1290 malloc_active--; 1291 _MALLOC_UNLOCK(); 1292 errno = saved_errno; 1293 } 1294 /*DEF_STRONG(free);*/ 1295 1296 1297 static void * 1298 orealloc(void *p, size_t newsz, void *f) 1299 { 1300 struct dir_info *pool = getpool(); 1301 struct region_info *r; 1302 size_t oldsz, goldsz, gnewsz; 1303 void *q; 1304 1305 if (p == NULL) 1306 return omalloc(newsz, 0, f); 1307 1308 r = find(pool, p); 1309 if (r == NULL) { 1310 wrterror("bogus pointer (double free?)", p); 1311 return NULL; 1312 } 1313 if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1314 errno = ENOMEM; 1315 return NULL; 1316 } 1317 1318 REALSIZE(oldsz, r); 1319 goldsz = oldsz; 1320 if (oldsz > MALLOC_MAXCHUNK) { 1321 if (oldsz < mopts.malloc_guard) 1322 wrterror("guard size", NULL); 1323 oldsz -= mopts.malloc_guard; 1324 } 1325 1326 gnewsz = newsz; 1327 if (gnewsz > MALLOC_MAXCHUNK) 1328 gnewsz += mopts.malloc_guard; 1329 1330 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p && 1331 !mopts.malloc_realloc) { 1332 size_t roldsz = PAGEROUND(goldsz); 1333 size_t rnewsz = PAGEROUND(gnewsz); 1334 1335 if (rnewsz > roldsz) { 1336 if (!mopts.malloc_guard) { 1337 void *hint = (char *)p + roldsz; 1338 size_t needed = rnewsz - roldsz; 1339 1340 STATS_INC(pool->cheap_realloc_tries); 1341 q = map(pool, hint, needed, 0); 1342 if (q == hint) 1343 goto gotit; 1344 zapcacheregion(pool, hint, needed); 1345 q = MQUERY(hint, needed); 1346 if (q == hint) 1347 q = MMAPA(hint, needed); 1348 else 1349 q = MAP_FAILED; 1350 if (q == hint) { 1351 gotit: 1352 STATS_ADD(pool->malloc_used, needed); 1353 if (mopts.malloc_junk == 2) 1354 memset(q, SOME_JUNK, needed); 1355 r->size = newsz; 1356 STATS_SETF(r, f); 1357 STATS_INC(pool->cheap_reallocs); 1358 return p; 1359 } else if (q != MAP_FAILED) { 1360 if (munmap(q, needed)) 1361 wrterror("munmap", q); 1362 } 1363 } 1364 } else if (rnewsz < roldsz) { 1365 if (mopts.malloc_guard) { 1366 if (mprotect((char *)p + roldsz - 1367 mopts.malloc_guard, mopts.malloc_guard, 1368 PROT_READ | PROT_WRITE)) 1369 wrterror("mprotect", NULL); 1370 if (mprotect((char *)p + rnewsz - 1371 mopts.malloc_guard, mopts.malloc_guard, 1372 PROT_NONE)) 1373 wrterror("mprotect", NULL); 1374 } 1375 unmap(pool, (char *)p + rnewsz, roldsz - rnewsz); 1376 r->size = gnewsz; 1377 STATS_SETF(r, f); 1378 return p; 1379 } else { 1380 if (newsz > oldsz && mopts.malloc_junk == 2) 1381 memset((char *)p + newsz, SOME_JUNK, 1382 rnewsz - mopts.malloc_guard - newsz); 1383 r->size = gnewsz; 1384 STATS_SETF(r, f); 1385 return p; 1386 } 1387 } 1388 if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) { 1389 if (mopts.malloc_junk == 2 && newsz > 0) 1390 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz); 1391 STATS_SETF(r, f); 1392 return p; 1393 } else if (newsz != oldsz || mopts.malloc_realloc) { 1394 q = omalloc(newsz, 0, f); 1395 if (q == NULL) 1396 return NULL; 1397 if (newsz != 0 && oldsz != 0) 1398 memcpy(q, p, oldsz < newsz ? oldsz : newsz); 1399 ofree(p); 1400 return q; 1401 } else { 1402 STATS_SETF(r, f); 1403 return p; 1404 } 1405 } 1406 1407 void * 1408 realloc(void *ptr, size_t size) 1409 { 1410 void *r; 1411 int saved_errno = errno; 1412 1413 _MALLOC_LOCK(); 1414 malloc_func = "realloc():"; 1415 if (getpool() == NULL) { 1416 if (malloc_init() != 0) 1417 return NULL; 1418 } 1419 if (malloc_active++) { 1420 malloc_recurse(); 1421 return NULL; 1422 } 1423 r = orealloc(ptr, size, CALLER); 1424 1425 malloc_active--; 1426 _MALLOC_UNLOCK(); 1427 if (r == NULL && mopts.malloc_xmalloc) { 1428 wrterror("out of memory", NULL); 1429 errno = ENOMEM; 1430 } 1431 if (r != NULL) 1432 errno = saved_errno; 1433 return r; 1434 } 1435 /*DEF_STRONG(realloc);*/ 1436 1437 1438 /* 1439 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX 1440 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW 1441 */ 1442 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4)) 1443 1444 void * 1445 calloc(size_t nmemb, size_t size) 1446 { 1447 void *r; 1448 int saved_errno = errno; 1449 1450 _MALLOC_LOCK(); 1451 malloc_func = "calloc():"; 1452 if (getpool() == NULL) { 1453 if (malloc_init() != 0) 1454 return NULL; 1455 } 1456 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1457 nmemb > 0 && SIZE_MAX / nmemb < size) { 1458 _MALLOC_UNLOCK(); 1459 if (mopts.malloc_xmalloc) 1460 wrterror("out of memory", NULL); 1461 errno = ENOMEM; 1462 return NULL; 1463 } 1464 1465 if (malloc_active++) { 1466 malloc_recurse(); 1467 return NULL; 1468 } 1469 1470 size *= nmemb; 1471 r = omalloc(size, 1, CALLER); 1472 1473 malloc_active--; 1474 _MALLOC_UNLOCK(); 1475 if (r == NULL && mopts.malloc_xmalloc) { 1476 wrterror("out of memory", NULL); 1477 errno = ENOMEM; 1478 } 1479 if (r != NULL) 1480 errno = saved_errno; 1481 return r; 1482 } 1483 /*DEF_STRONG(calloc);*/ 1484 1485 static void * 1486 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill) 1487 { 1488 char *p, *q; 1489 1490 if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0) { 1491 wrterror("mapalign bad alignment", NULL); 1492 return MAP_FAILED; 1493 } 1494 if (sz != PAGEROUND(sz)) { 1495 wrterror("mapalign round", NULL); 1496 return MAP_FAILED; 1497 } 1498 1499 /* Allocate sz + alignment bytes of memory, which must include a 1500 * subrange of size bytes that is properly aligned. Unmap the 1501 * other bytes, and then return that subrange. 1502 */ 1503 1504 /* We need sz + alignment to fit into a size_t. */ 1505 if (alignment > SIZE_MAX - sz) 1506 return MAP_FAILED; 1507 1508 p = map(d, NULL, sz + alignment, zero_fill); 1509 if (p == MAP_FAILED) 1510 return MAP_FAILED; 1511 q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1)); 1512 if (q != p) { 1513 if (munmap(p, q - p)) 1514 wrterror("munmap", p); 1515 } 1516 if (munmap(q + sz, alignment - (q - p))) 1517 wrterror("munmap", q + sz); 1518 STATS_SUB(d->malloc_used, alignment); 1519 1520 return q; 1521 } 1522 1523 static void * 1524 omemalign(size_t alignment, size_t sz, int zero_fill, void *f) 1525 { 1526 struct dir_info *pool = getpool(); 1527 size_t psz; 1528 void *p; 1529 1530 if (alignment <= MALLOC_PAGESIZE) { 1531 /* 1532 * max(size, alignment) is enough to assure the requested alignment, 1533 * since the allocator always allocates power-of-two blocks. 1534 */ 1535 if (sz < alignment) 1536 sz = alignment; 1537 return omalloc(sz, zero_fill, f); 1538 } 1539 1540 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1541 errno = ENOMEM; 1542 return NULL; 1543 } 1544 1545 sz += mopts.malloc_guard; 1546 psz = PAGEROUND(sz); 1547 1548 p = mapalign(pool, alignment, psz, zero_fill); 1549 if (p == NULL) { 1550 errno = ENOMEM; 1551 return NULL; 1552 } 1553 1554 if (insert(pool, p, sz, f)) { 1555 unmap(pool, p, psz); 1556 errno = ENOMEM; 1557 return NULL; 1558 } 1559 1560 if (mopts.malloc_guard) { 1561 if (mprotect((char *)p + psz - mopts.malloc_guard, 1562 mopts.malloc_guard, PROT_NONE)) 1563 wrterror("mprotect", NULL); 1564 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 1565 } 1566 1567 if (mopts.malloc_junk == 2) { 1568 if (zero_fill) 1569 memset((char *)p + sz - mopts.malloc_guard, 1570 SOME_JUNK, psz - sz); 1571 else 1572 memset(p, SOME_JUNK, psz - mopts.malloc_guard); 1573 } 1574 1575 return p; 1576 } 1577 1578 int 1579 posix_memalign(void **memptr, size_t alignment, size_t size) 1580 { 1581 int res, saved_errno = errno; 1582 void *r; 1583 1584 /* Make sure that alignment is a large enough power of 2. */ 1585 if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *)) 1586 return EINVAL; 1587 1588 _MALLOC_LOCK(); 1589 malloc_func = "posix_memalign():"; 1590 if (getpool() == NULL) { 1591 if (malloc_init() != 0) 1592 goto err; 1593 } 1594 if (malloc_active++) { 1595 malloc_recurse(); 1596 goto err; 1597 } 1598 r = omemalign(alignment, size, 0, CALLER); 1599 malloc_active--; 1600 _MALLOC_UNLOCK(); 1601 if (r == NULL) { 1602 if (mopts.malloc_xmalloc) { 1603 wrterror("out of memory", NULL); 1604 errno = ENOMEM; 1605 } 1606 goto err; 1607 } 1608 errno = saved_errno; 1609 *memptr = r; 1610 return 0; 1611 1612 err: 1613 res = errno; 1614 errno = saved_errno; 1615 return res; 1616 } 1617 /*DEF_STRONG(posix_memalign);*/ 1618 1619 #ifdef MALLOC_STATS 1620 1621 struct malloc_leak { 1622 void (*f)(); 1623 size_t total_size; 1624 int count; 1625 }; 1626 1627 struct leaknode { 1628 RB_ENTRY(leaknode) entry; 1629 struct malloc_leak d; 1630 }; 1631 1632 static int 1633 leakcmp(struct leaknode *e1, struct leaknode *e2) 1634 { 1635 return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f; 1636 } 1637 1638 static RB_HEAD(leaktree, leaknode) leakhead; 1639 RB_GENERATE_STATIC(leaktree, leaknode, entry, leakcmp) 1640 1641 static void 1642 putleakinfo(void *f, size_t sz, int cnt) 1643 { 1644 struct leaknode key, *p; 1645 static struct leaknode *page; 1646 static int used; 1647 1648 if (cnt == 0) 1649 return; 1650 1651 key.d.f = f; 1652 p = RB_FIND(leaktree, &leakhead, &key); 1653 if (p == NULL) { 1654 if (page == NULL || 1655 used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) { 1656 page = MMAP(MALLOC_PAGESIZE); 1657 if (page == MAP_FAILED) 1658 return; 1659 used = 0; 1660 } 1661 p = &page[used++]; 1662 p->d.f = f; 1663 p->d.total_size = sz * cnt; 1664 p->d.count = cnt; 1665 RB_INSERT(leaktree, &leakhead, p); 1666 } else { 1667 p->d.total_size += sz * cnt; 1668 p->d.count += cnt; 1669 } 1670 } 1671 1672 static struct malloc_leak *malloc_leaks; 1673 1674 static void 1675 dump_leaks(int fd) 1676 { 1677 struct leaknode *p; 1678 char buf[64]; 1679 int i = 0; 1680 1681 snprintf(buf, sizeof(buf), "Leak report\n"); 1682 write(fd, buf, strlen(buf)); 1683 snprintf(buf, sizeof(buf), " f sum # avg\n"); 1684 write(fd, buf, strlen(buf)); 1685 /* XXX only one page of summary */ 1686 if (malloc_leaks == NULL) 1687 malloc_leaks = MMAP(MALLOC_PAGESIZE); 1688 if (malloc_leaks != MAP_FAILED) 1689 memset(malloc_leaks, 0, MALLOC_PAGESIZE); 1690 RB_FOREACH(p, leaktree, &leakhead) { 1691 snprintf(buf, sizeof(buf), "%18p %7zu %6u %6zu\n", p->d.f, 1692 p->d.total_size, p->d.count, p->d.total_size / p->d.count); 1693 write(fd, buf, strlen(buf)); 1694 if (malloc_leaks == MAP_FAILED || 1695 i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak)) 1696 continue; 1697 malloc_leaks[i].f = p->d.f; 1698 malloc_leaks[i].total_size = p->d.total_size; 1699 malloc_leaks[i].count = p->d.count; 1700 i++; 1701 } 1702 } 1703 1704 static void 1705 dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist) 1706 { 1707 char buf[64]; 1708 1709 while (p != NULL) { 1710 snprintf(buf, sizeof(buf), "chunk %18p %18p %4d %d/%d\n", 1711 p->page, ((p->bits[0] & 1) ? NULL : f), 1712 p->size, p->free, p->total); 1713 write(fd, buf, strlen(buf)); 1714 if (!fromfreelist) { 1715 if (p->bits[0] & 1) 1716 putleakinfo(NULL, p->size, p->total - p->free); 1717 else { 1718 putleakinfo(f, p->size, 1); 1719 putleakinfo(NULL, p->size, 1720 p->total - p->free - 1); 1721 } 1722 break; 1723 } 1724 p = LIST_NEXT(p, entries); 1725 if (p != NULL) { 1726 snprintf(buf, sizeof(buf), " "); 1727 write(fd, buf, strlen(buf)); 1728 } 1729 } 1730 } 1731 1732 static void 1733 dump_free_chunk_info(int fd, struct dir_info *d) 1734 { 1735 char buf[64]; 1736 int i, j, count; 1737 struct chunk_info *p; 1738 1739 snprintf(buf, sizeof(buf), "Free chunk structs:\n"); 1740 write(fd, buf, strlen(buf)); 1741 for (i = 0; i <= MALLOC_MAXSHIFT; i++) { 1742 count = 0; 1743 LIST_FOREACH(p, &d->chunk_info_list[i], entries) 1744 count++; 1745 for (j = 0; j < MALLOC_CHUNK_LISTS; j++) { 1746 p = LIST_FIRST(&d->chunk_dir[i][j]); 1747 if (p == NULL && count == 0) 1748 continue; 1749 snprintf(buf, sizeof(buf), "%2d) %3d ", i, count); 1750 write(fd, buf, strlen(buf)); 1751 if (p != NULL) 1752 dump_chunk(fd, p, NULL, 1); 1753 else 1754 write(fd, "\n", 1); 1755 } 1756 } 1757 1758 } 1759 1760 static void 1761 dump_free_page_info(int fd, struct dir_info *d) 1762 { 1763 char buf[64]; 1764 int i; 1765 1766 snprintf(buf, sizeof(buf), "Free pages cached: %zu\n", 1767 d->free_regions_size); 1768 write(fd, buf, strlen(buf)); 1769 for (i = 0; i < mopts.malloc_cache; i++) { 1770 if (d->free_regions[i].p != NULL) { 1771 snprintf(buf, sizeof(buf), "%2d) ", i); 1772 write(fd, buf, strlen(buf)); 1773 snprintf(buf, sizeof(buf), "free at %p: %zu\n", 1774 d->free_regions[i].p, d->free_regions[i].size); 1775 write(fd, buf, strlen(buf)); 1776 } 1777 } 1778 } 1779 1780 static void 1781 malloc_dump1(int fd, struct dir_info *d) 1782 { 1783 char buf[100]; 1784 size_t i, realsize; 1785 1786 snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d); 1787 write(fd, buf, strlen(buf)); 1788 if (d == NULL) 1789 return; 1790 snprintf(buf, sizeof(buf), "Region slots free %zu/%zu\n", 1791 d->regions_free, d->regions_total); 1792 write(fd, buf, strlen(buf)); 1793 snprintf(buf, sizeof(buf), "Finds %zu/%zu\n", d->finds, 1794 d->find_collisions); 1795 write(fd, buf, strlen(buf)); 1796 snprintf(buf, sizeof(buf), "Inserts %zu/%zu\n", d->inserts, 1797 d->insert_collisions); 1798 write(fd, buf, strlen(buf)); 1799 snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes, 1800 d->delete_moves); 1801 write(fd, buf, strlen(buf)); 1802 snprintf(buf, sizeof(buf), "Cheap reallocs %zu/%zu\n", 1803 d->cheap_reallocs, d->cheap_realloc_tries); 1804 write(fd, buf, strlen(buf)); 1805 dump_free_chunk_info(fd, d); 1806 dump_free_page_info(fd, d); 1807 snprintf(buf, sizeof(buf), 1808 "slot) hash d type page f size [free/n]\n"); 1809 write(fd, buf, strlen(buf)); 1810 for (i = 0; i < d->regions_total; i++) { 1811 if (d->r[i].p != NULL) { 1812 size_t h = hash(d->r[i].p) & 1813 (d->regions_total - 1); 1814 snprintf(buf, sizeof(buf), "%4zx) #%4zx %zd ", 1815 i, h, h - i); 1816 write(fd, buf, strlen(buf)); 1817 REALSIZE(realsize, &d->r[i]); 1818 if (realsize > MALLOC_MAXCHUNK) { 1819 putleakinfo(d->r[i].f, realsize, 1); 1820 snprintf(buf, sizeof(buf), 1821 "pages %12p %12p %zu\n", d->r[i].p, 1822 d->r[i].f, realsize); 1823 write(fd, buf, strlen(buf)); 1824 } else 1825 dump_chunk(fd, 1826 (struct chunk_info *)d->r[i].size, 1827 d->r[i].f, 0); 1828 } 1829 } 1830 snprintf(buf, sizeof(buf), "In use %zu\n", d->malloc_used); 1831 write(fd, buf, strlen(buf)); 1832 snprintf(buf, sizeof(buf), "Guarded %zu\n", d->malloc_guarded); 1833 write(fd, buf, strlen(buf)); 1834 dump_leaks(fd); 1835 write(fd, "\n", 1); 1836 } 1837 1838 void 1839 malloc_dump(int fd) 1840 { 1841 struct dir_info *pool = getpool(); 1842 int i; 1843 void *p; 1844 struct region_info *r; 1845 int saved_errno = errno; 1846 1847 for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) { 1848 p = pool->delayed_chunks[i]; 1849 if (p == NULL) 1850 continue; 1851 r = find(pool, p); 1852 if (r == NULL) 1853 wrterror("bogus pointer in malloc_dump", p); 1854 free_bytes(pool, r, p); 1855 pool->delayed_chunks[i] = NULL; 1856 } 1857 /* XXX leak when run multiple times */ 1858 RB_INIT(&leakhead); 1859 malloc_dump1(fd, pool); 1860 errno = saved_errno; 1861 } 1862 DEF_WEAK(malloc_dump); 1863 1864 static void 1865 malloc_exit(void) 1866 { 1867 static const char q[] = "malloc() warning: Couldn't dump stats\n"; 1868 int save_errno = errno, fd; 1869 1870 fd = open("malloc.out", O_RDWR|O_APPEND); 1871 if (fd != -1) { 1872 malloc_dump(fd); 1873 close(fd); 1874 } else 1875 write(STDERR_FILENO, q, sizeof(q) - 1); 1876 errno = save_errno; 1877 } 1878 1879 #endif /* MALLOC_STATS */ 1880