1 /* $OpenBSD: malloc.c,v 1.170 2014/07/09 19:11:00 tedu Exp $ */ 2 /* 3 * Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 5 * Copyright (c) 2008 Damien Miller <djm@openbsd.org> 6 * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* 22 * If we meet some day, and you think this stuff is worth it, you 23 * can buy me a beer in return. Poul-Henning Kamp 24 */ 25 26 /* #define MALLOC_STATS */ 27 28 #include <sys/types.h> 29 #include <sys/param.h> 30 #include <sys/queue.h> 31 #include <sys/mman.h> 32 #include <sys/uio.h> 33 #include <errno.h> 34 #include <stdint.h> 35 #include <stdlib.h> 36 #include <string.h> 37 #include <stdio.h> 38 #include <unistd.h> 39 40 #ifdef MALLOC_STATS 41 #include <sys/tree.h> 42 #include <fcntl.h> 43 #endif 44 45 #include "thread_private.h" 46 47 #if defined(__sparc__) && !defined(__sparcv9__) 48 #define MALLOC_PAGESHIFT (13U) 49 #elif defined(__mips64__) 50 #define MALLOC_PAGESHIFT (14U) 51 #else 52 #define MALLOC_PAGESHIFT (PAGE_SHIFT) 53 #endif 54 55 #define MALLOC_MINSHIFT 4 56 #define MALLOC_MAXSHIFT (MALLOC_PAGESHIFT - 1) 57 #define MALLOC_PAGESIZE (1UL << MALLOC_PAGESHIFT) 58 #define MALLOC_MINSIZE (1UL << MALLOC_MINSHIFT) 59 #define MALLOC_PAGEMASK (MALLOC_PAGESIZE - 1) 60 #define MASK_POINTER(p) ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK)) 61 62 #define MALLOC_MAXCHUNK (1 << MALLOC_MAXSHIFT) 63 #define MALLOC_MAXCACHE 256 64 #define MALLOC_DELAYED_CHUNK_MASK 15 65 #define MALLOC_INITIAL_REGIONS 512 66 #define MALLOC_DEFAULT_CACHE 64 67 #define MALLOC_CHUNK_LISTS 4 68 69 /* 70 * When the P option is active, we move allocations between half a page 71 * and a whole page towards the end, subject to alignment constraints. 72 * This is the extra headroom we allow. Set to zero to be the most 73 * strict. 74 */ 75 #define MALLOC_LEEWAY 0 76 77 #define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK) 78 79 /* 80 * What to use for Junk. This is the byte value we use to fill with 81 * when the 'J' option is enabled. Use SOME_JUNK right after alloc, 82 * and SOME_FREEJUNK right before free. 83 */ 84 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */ 85 #define SOME_FREEJUNK 0xdf 86 87 #define MMAP(sz) mmap(NULL, (size_t)(sz), PROT_READ | PROT_WRITE, \ 88 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0) 89 90 #define MMAPA(a,sz) mmap((a), (size_t)(sz), PROT_READ | PROT_WRITE, \ 91 MAP_ANON | MAP_PRIVATE, -1, (off_t) 0) 92 93 #define MQUERY(a, sz) mquery((a), (size_t)(sz), PROT_READ | PROT_WRITE, \ 94 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, (off_t)0) 95 96 struct region_info { 97 void *p; /* page; low bits used to mark chunks */ 98 uintptr_t size; /* size for pages, or chunk_info pointer */ 99 #ifdef MALLOC_STATS 100 void *f; /* where allocated from */ 101 #endif 102 }; 103 104 LIST_HEAD(chunk_head, chunk_info); 105 106 struct dir_info { 107 u_int32_t canary1; 108 struct region_info *r; /* region slots */ 109 size_t regions_total; /* number of region slots */ 110 size_t regions_free; /* number of free slots */ 111 /* lists of free chunk info structs */ 112 struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1]; 113 /* lists of chunks with free slots */ 114 struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS]; 115 size_t free_regions_size; /* free pages cached */ 116 /* free pages cache */ 117 struct region_info free_regions[MALLOC_MAXCACHE]; 118 /* delayed free chunk slots */ 119 void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1]; 120 size_t rbytesused; /* random bytes used */ 121 u_char rbytes[32]; /* random bytes */ 122 u_short chunk_start; 123 #ifdef MALLOC_STATS 124 size_t inserts; 125 size_t insert_collisions; 126 size_t finds; 127 size_t find_collisions; 128 size_t deletes; 129 size_t delete_moves; 130 size_t cheap_realloc_tries; 131 size_t cheap_reallocs; 132 size_t malloc_used; /* bytes allocated */ 133 size_t malloc_guarded; /* bytes used for guards */ 134 #define STATS_ADD(x,y) ((x) += (y)) 135 #define STATS_SUB(x,y) ((x) -= (y)) 136 #define STATS_INC(x) ((x)++) 137 #define STATS_ZERO(x) ((x) = 0) 138 #define STATS_SETF(x,y) ((x)->f = (y)) 139 #else 140 #define STATS_ADD(x,y) /* nothing */ 141 #define STATS_SUB(x,y) /* nothing */ 142 #define STATS_INC(x) /* nothing */ 143 #define STATS_ZERO(x) /* nothing */ 144 #define STATS_SETF(x,y) /* nothing */ 145 #endif /* MALLOC_STATS */ 146 u_int32_t canary2; 147 }; 148 #define DIR_INFO_RSZ ((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \ 149 ~MALLOC_PAGEMASK) 150 151 /* 152 * This structure describes a page worth of chunks. 153 * 154 * How many bits per u_short in the bitmap 155 */ 156 #define MALLOC_BITS (NBBY * sizeof(u_short)) 157 struct chunk_info { 158 LIST_ENTRY(chunk_info) entries; 159 void *page; /* pointer to the page */ 160 u_int32_t canary; 161 u_short size; /* size of this page's chunks */ 162 u_short shift; /* how far to shift for this size */ 163 u_short free; /* how many free chunks */ 164 u_short total; /* how many chunk */ 165 /* which chunks are free */ 166 u_short bits[1]; 167 }; 168 169 struct malloc_readonly { 170 struct dir_info *malloc_pool; /* Main bookkeeping information */ 171 int malloc_abort; /* abort() on error */ 172 int malloc_freenow; /* Free quickly - disable chunk rnd */ 173 int malloc_freeunmap; /* mprotect free pages PROT_NONE? */ 174 int malloc_hint; /* call madvice on free pages? */ 175 int malloc_junk; /* junk fill? */ 176 int malloc_move; /* move allocations to end of page? */ 177 int malloc_realloc; /* always realloc? */ 178 int malloc_xmalloc; /* xmalloc behaviour? */ 179 size_t malloc_guard; /* use guard pages after allocations? */ 180 u_int malloc_cache; /* free pages we cache */ 181 #ifdef MALLOC_STATS 182 int malloc_stats; /* dump statistics at end */ 183 #endif 184 u_int32_t malloc_canary; /* Matched against ones in malloc_pool */ 185 }; 186 187 /* This object is mapped PROT_READ after initialisation to prevent tampering */ 188 static union { 189 struct malloc_readonly mopts; 190 u_char _pad[MALLOC_PAGESIZE]; 191 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE))); 192 #define mopts malloc_readonly.mopts 193 #define getpool() mopts.malloc_pool 194 195 char *malloc_options; /* compile-time options */ 196 static char *malloc_func; /* current function */ 197 static int malloc_active; /* status of malloc */ 198 199 static u_char getrbyte(struct dir_info *d); 200 201 extern char *__progname; 202 203 #ifdef MALLOC_STATS 204 void malloc_dump(int); 205 static void malloc_exit(void); 206 #define CALLER __builtin_return_address(0) 207 #else 208 #define CALLER NULL 209 #endif 210 211 /* low bits of r->p determine size: 0 means >= page size and p->size holding 212 * real size, otherwise r->size is a shift count, or 1 for malloc(0) 213 */ 214 #define REALSIZE(sz, r) \ 215 (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \ 216 (sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1)))) 217 218 static inline size_t 219 hash(void *p) 220 { 221 size_t sum; 222 uintptr_t u; 223 224 u = (uintptr_t)p >> MALLOC_PAGESHIFT; 225 sum = u; 226 sum = (sum << 7) - sum + (u >> 16); 227 #ifdef __LP64__ 228 sum = (sum << 7) - sum + (u >> 32); 229 sum = (sum << 7) - sum + (u >> 48); 230 #endif 231 return sum; 232 } 233 234 static void 235 wrterror(char *msg, void *p) 236 { 237 char *q = " error: "; 238 struct iovec iov[7]; 239 char pidbuf[20]; 240 char buf[20]; 241 int saved_errno = errno; 242 243 iov[0].iov_base = __progname; 244 iov[0].iov_len = strlen(__progname); 245 iov[1].iov_base = pidbuf; 246 snprintf(pidbuf, sizeof(pidbuf), "(%d) in ", getpid()); 247 iov[1].iov_len = strlen(pidbuf); 248 iov[2].iov_base = malloc_func; 249 iov[2].iov_len = strlen(malloc_func); 250 iov[3].iov_base = q; 251 iov[3].iov_len = strlen(q); 252 iov[4].iov_base = msg; 253 iov[4].iov_len = strlen(msg); 254 iov[5].iov_base = buf; 255 if (p == NULL) 256 iov[5].iov_len = 0; 257 else { 258 snprintf(buf, sizeof(buf), " %p", p); 259 iov[5].iov_len = strlen(buf); 260 } 261 iov[6].iov_base = "\n"; 262 iov[6].iov_len = 1; 263 writev(STDERR_FILENO, iov, 7); 264 265 #ifdef MALLOC_STATS 266 if (mopts.malloc_stats) 267 malloc_dump(STDERR_FILENO); 268 #endif /* MALLOC_STATS */ 269 270 errno = saved_errno; 271 if (mopts.malloc_abort) 272 abort(); 273 } 274 275 static void 276 rbytes_init(struct dir_info *d) 277 { 278 arc4random_buf(d->rbytes, sizeof(d->rbytes)); 279 /* add 1 to account for using d->rbytes[0] */ 280 d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2); 281 } 282 283 static inline u_char 284 getrbyte(struct dir_info *d) 285 { 286 u_char x; 287 288 if (d->rbytesused >= sizeof(d->rbytes)) 289 rbytes_init(d); 290 x = d->rbytes[d->rbytesused++]; 291 return x; 292 } 293 294 /* 295 * Cache maintenance. We keep at most malloc_cache pages cached. 296 * If the cache is becoming full, unmap pages in the cache for real, 297 * and then add the region to the cache 298 * Opposed to the regular region data structure, the sizes in the 299 * cache are in MALLOC_PAGESIZE units. 300 */ 301 static void 302 unmap(struct dir_info *d, void *p, size_t sz) 303 { 304 size_t psz = sz >> MALLOC_PAGESHIFT; 305 size_t rsz, tounmap; 306 struct region_info *r; 307 u_int i, offset; 308 309 if (sz != PAGEROUND(sz)) { 310 wrterror("munmap round", NULL); 311 return; 312 } 313 314 if (psz > mopts.malloc_cache) { 315 if (munmap(p, sz)) 316 wrterror("munmap", p); 317 STATS_SUB(d->malloc_used, sz); 318 return; 319 } 320 tounmap = 0; 321 rsz = mopts.malloc_cache - d->free_regions_size; 322 if (psz > rsz) 323 tounmap = psz - rsz; 324 offset = getrbyte(d); 325 for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) { 326 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 327 if (r->p != NULL) { 328 rsz = r->size << MALLOC_PAGESHIFT; 329 if (munmap(r->p, rsz)) 330 wrterror("munmap", r->p); 331 r->p = NULL; 332 if (tounmap > r->size) 333 tounmap -= r->size; 334 else 335 tounmap = 0; 336 d->free_regions_size -= r->size; 337 r->size = 0; 338 STATS_SUB(d->malloc_used, rsz); 339 } 340 } 341 if (tounmap > 0) 342 wrterror("malloc cache underflow", NULL); 343 for (i = 0; i < mopts.malloc_cache; i++) { 344 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 345 if (r->p == NULL) { 346 if (mopts.malloc_hint) 347 madvise(p, sz, MADV_FREE); 348 if (mopts.malloc_freeunmap) 349 mprotect(p, sz, PROT_NONE); 350 r->p = p; 351 r->size = psz; 352 d->free_regions_size += psz; 353 break; 354 } 355 } 356 if (i == mopts.malloc_cache) 357 wrterror("malloc free slot lost", NULL); 358 if (d->free_regions_size > mopts.malloc_cache) 359 wrterror("malloc cache overflow", NULL); 360 } 361 362 static void 363 zapcacheregion(struct dir_info *d, void *p, size_t len) 364 { 365 u_int i; 366 struct region_info *r; 367 size_t rsz; 368 369 for (i = 0; i < mopts.malloc_cache; i++) { 370 r = &d->free_regions[i]; 371 if (r->p >= p && r->p <= (void *)((char *)p + len)) { 372 rsz = r->size << MALLOC_PAGESHIFT; 373 if (munmap(r->p, rsz)) 374 wrterror("munmap", r->p); 375 r->p = NULL; 376 d->free_regions_size -= r->size; 377 r->size = 0; 378 STATS_SUB(d->malloc_used, rsz); 379 } 380 } 381 } 382 383 static void * 384 map(struct dir_info *d, size_t sz, int zero_fill) 385 { 386 size_t psz = sz >> MALLOC_PAGESHIFT; 387 struct region_info *r, *big = NULL; 388 u_int i, offset; 389 void *p; 390 391 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 392 d->canary1 != ~d->canary2) 393 wrterror("internal struct corrupt", NULL); 394 if (sz != PAGEROUND(sz)) { 395 wrterror("map round", NULL); 396 return MAP_FAILED; 397 } 398 if (psz > d->free_regions_size) { 399 p = MMAP(sz); 400 if (p != MAP_FAILED) 401 STATS_ADD(d->malloc_used, sz); 402 /* zero fill not needed */ 403 return p; 404 } 405 offset = getrbyte(d); 406 for (i = 0; i < mopts.malloc_cache; i++) { 407 r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; 408 if (r->p != NULL) { 409 if (r->size == psz) { 410 p = r->p; 411 if (mopts.malloc_freeunmap) 412 mprotect(p, sz, PROT_READ | PROT_WRITE); 413 if (mopts.malloc_hint) 414 madvise(p, sz, MADV_NORMAL); 415 r->p = NULL; 416 r->size = 0; 417 d->free_regions_size -= psz; 418 if (zero_fill) 419 memset(p, 0, sz); 420 else if (mopts.malloc_junk == 2 && 421 mopts.malloc_freeunmap) 422 memset(p, SOME_FREEJUNK, sz); 423 return p; 424 } else if (r->size > psz) 425 big = r; 426 } 427 } 428 if (big != NULL) { 429 r = big; 430 p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT); 431 if (mopts.malloc_freeunmap) 432 mprotect(p, sz, PROT_READ | PROT_WRITE); 433 if (mopts.malloc_hint) 434 madvise(p, sz, MADV_NORMAL); 435 r->size -= psz; 436 d->free_regions_size -= psz; 437 if (zero_fill) 438 memset(p, 0, sz); 439 else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap) 440 memset(p, SOME_FREEJUNK, sz); 441 return p; 442 } 443 p = MMAP(sz); 444 if (p != MAP_FAILED) 445 STATS_ADD(d->malloc_used, sz); 446 if (d->free_regions_size > mopts.malloc_cache) 447 wrterror("malloc cache", NULL); 448 /* zero fill not needed */ 449 return p; 450 } 451 452 /* 453 * Initialize a dir_info, which should have been cleared by caller 454 */ 455 static int 456 omalloc_init(struct dir_info **dp) 457 { 458 char *p, b[64]; 459 int i, j; 460 size_t d_avail, regioninfo_size; 461 struct dir_info *d; 462 463 /* 464 * Default options 465 */ 466 mopts.malloc_abort = 1; 467 mopts.malloc_junk = 1; 468 mopts.malloc_move = 1; 469 mopts.malloc_cache = MALLOC_DEFAULT_CACHE; 470 471 for (i = 0; i < 3; i++) { 472 switch (i) { 473 case 0: 474 j = readlink("/etc/malloc.conf", b, sizeof b - 1); 475 if (j <= 0) 476 continue; 477 b[j] = '\0'; 478 p = b; 479 break; 480 case 1: 481 if (issetugid() == 0) 482 p = getenv("MALLOC_OPTIONS"); 483 else 484 continue; 485 break; 486 case 2: 487 p = malloc_options; 488 break; 489 default: 490 p = NULL; 491 } 492 493 for (; p != NULL && *p != '\0'; p++) { 494 switch (*p) { 495 case '>': 496 mopts.malloc_cache <<= 1; 497 if (mopts.malloc_cache > MALLOC_MAXCACHE) 498 mopts.malloc_cache = MALLOC_MAXCACHE; 499 break; 500 case '<': 501 mopts.malloc_cache >>= 1; 502 break; 503 case 'a': 504 mopts.malloc_abort = 0; 505 break; 506 case 'A': 507 mopts.malloc_abort = 1; 508 break; 509 #ifdef MALLOC_STATS 510 case 'd': 511 mopts.malloc_stats = 0; 512 break; 513 case 'D': 514 mopts.malloc_stats = 1; 515 break; 516 #endif /* MALLOC_STATS */ 517 case 'f': 518 mopts.malloc_freenow = 0; 519 mopts.malloc_freeunmap = 0; 520 break; 521 case 'F': 522 mopts.malloc_freenow = 1; 523 mopts.malloc_freeunmap = 1; 524 break; 525 case 'g': 526 mopts.malloc_guard = 0; 527 break; 528 case 'G': 529 mopts.malloc_guard = MALLOC_PAGESIZE; 530 break; 531 case 'h': 532 mopts.malloc_hint = 0; 533 break; 534 case 'H': 535 mopts.malloc_hint = 1; 536 break; 537 case 'j': 538 mopts.malloc_junk = 0; 539 break; 540 case 'J': 541 mopts.malloc_junk = 2; 542 break; 543 case 'n': 544 case 'N': 545 break; 546 case 'p': 547 mopts.malloc_move = 0; 548 break; 549 case 'P': 550 mopts.malloc_move = 1; 551 break; 552 case 'r': 553 mopts.malloc_realloc = 0; 554 break; 555 case 'R': 556 mopts.malloc_realloc = 1; 557 break; 558 case 's': 559 mopts.malloc_freeunmap = mopts.malloc_junk = 0; 560 mopts.malloc_guard = 0; 561 mopts.malloc_cache = MALLOC_DEFAULT_CACHE; 562 break; 563 case 'S': 564 mopts.malloc_freeunmap = 1; 565 mopts.malloc_junk = 2; 566 mopts.malloc_guard = MALLOC_PAGESIZE; 567 mopts.malloc_cache = 0; 568 break; 569 case 'u': 570 mopts.malloc_freeunmap = 0; 571 break; 572 case 'U': 573 mopts.malloc_freeunmap = 1; 574 break; 575 case 'x': 576 mopts.malloc_xmalloc = 0; 577 break; 578 case 'X': 579 mopts.malloc_xmalloc = 1; 580 break; 581 default: { 582 static const char q[] = "malloc() warning: " 583 "unknown char in MALLOC_OPTIONS\n"; 584 write(STDERR_FILENO, q, sizeof(q) - 1); 585 break; 586 } 587 } 588 } 589 } 590 591 #ifdef MALLOC_STATS 592 if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) { 593 static const char q[] = "malloc() warning: atexit(2) failed." 594 " Will not be able to dump stats on exit\n"; 595 write(STDERR_FILENO, q, sizeof(q) - 1); 596 } 597 #endif /* MALLOC_STATS */ 598 599 while ((mopts.malloc_canary = arc4random()) == 0) 600 ; 601 602 /* 603 * Allocate dir_info with a guard page on either side. Also 604 * randomise offset inside the page at which the dir_info 605 * lies (subject to alignment by 1 << MALLOC_MINSHIFT) 606 */ 607 if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED) 608 return -1; 609 mprotect(p, MALLOC_PAGESIZE, PROT_NONE); 610 mprotect(p + MALLOC_PAGESIZE + DIR_INFO_RSZ, 611 MALLOC_PAGESIZE, PROT_NONE); 612 d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT; 613 d = (struct dir_info *)(p + MALLOC_PAGESIZE + 614 (arc4random_uniform(d_avail) << MALLOC_MINSHIFT)); 615 616 rbytes_init(d); 617 d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS; 618 regioninfo_size = d->regions_total * sizeof(struct region_info); 619 d->r = MMAP(regioninfo_size); 620 if (d->r == MAP_FAILED) { 621 wrterror("malloc init mmap failed", NULL); 622 d->regions_total = 0; 623 return 1; 624 } 625 for (i = 0; i <= MALLOC_MAXSHIFT; i++) { 626 LIST_INIT(&d->chunk_info_list[i]); 627 for (j = 0; j < MALLOC_CHUNK_LISTS; j++) 628 LIST_INIT(&d->chunk_dir[i][j]); 629 } 630 STATS_ADD(d->malloc_used, regioninfo_size); 631 d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d; 632 d->canary2 = ~d->canary1; 633 634 *dp = d; 635 636 /* 637 * Options have been set and will never be reset. 638 * Prevent further tampering with them. 639 */ 640 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) 641 mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ); 642 643 return 0; 644 } 645 646 static int 647 omalloc_grow(struct dir_info *d) 648 { 649 size_t newtotal; 650 size_t newsize; 651 size_t mask; 652 size_t i; 653 struct region_info *p; 654 655 if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 ) 656 return 1; 657 658 newtotal = d->regions_total * 2; 659 newsize = newtotal * sizeof(struct region_info); 660 mask = newtotal - 1; 661 662 p = MMAP(newsize); 663 if (p == MAP_FAILED) 664 return 1; 665 666 STATS_ADD(d->malloc_used, newsize); 667 memset(p, 0, newsize); 668 STATS_ZERO(d->inserts); 669 STATS_ZERO(d->insert_collisions); 670 for (i = 0; i < d->regions_total; i++) { 671 void *q = d->r[i].p; 672 if (q != NULL) { 673 size_t index = hash(q) & mask; 674 STATS_INC(d->inserts); 675 while (p[index].p != NULL) { 676 index = (index - 1) & mask; 677 STATS_INC(d->insert_collisions); 678 } 679 p[index] = d->r[i]; 680 } 681 } 682 /* avoid pages containing meta info to end up in cache */ 683 if (munmap(d->r, d->regions_total * sizeof(struct region_info))) 684 wrterror("munmap", d->r); 685 else 686 STATS_SUB(d->malloc_used, 687 d->regions_total * sizeof(struct region_info)); 688 d->regions_free = d->regions_free + d->regions_total; 689 d->regions_total = newtotal; 690 d->r = p; 691 return 0; 692 } 693 694 static struct chunk_info * 695 alloc_chunk_info(struct dir_info *d, int bits) 696 { 697 struct chunk_info *p; 698 size_t size, count; 699 700 if (bits == 0) 701 count = MALLOC_PAGESIZE / MALLOC_MINSIZE; 702 else 703 count = MALLOC_PAGESIZE >> bits; 704 705 size = howmany(count, MALLOC_BITS); 706 size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short); 707 size = ALIGN(size); 708 709 if (LIST_EMPTY(&d->chunk_info_list[bits])) { 710 char *q; 711 int i; 712 713 q = MMAP(MALLOC_PAGESIZE); 714 if (q == MAP_FAILED) 715 return NULL; 716 STATS_ADD(d->malloc_used, MALLOC_PAGESIZE); 717 count = MALLOC_PAGESIZE / size; 718 for (i = 0; i < count; i++, q += size) 719 LIST_INSERT_HEAD(&d->chunk_info_list[bits], 720 (struct chunk_info *)q, entries); 721 } 722 p = LIST_FIRST(&d->chunk_info_list[bits]); 723 LIST_REMOVE(p, entries); 724 memset(p, 0, size); 725 p->canary = d->canary1; 726 return p; 727 } 728 729 730 /* 731 * The hashtable uses the assumption that p is never NULL. This holds since 732 * non-MAP_FIXED mappings with hint 0 start at BRKSIZ. 733 */ 734 static int 735 insert(struct dir_info *d, void *p, size_t sz, void *f) 736 { 737 size_t index; 738 size_t mask; 739 void *q; 740 741 if (d->regions_free * 4 < d->regions_total) { 742 if (omalloc_grow(d)) 743 return 1; 744 } 745 mask = d->regions_total - 1; 746 index = hash(p) & mask; 747 q = d->r[index].p; 748 STATS_INC(d->inserts); 749 while (q != NULL) { 750 index = (index - 1) & mask; 751 q = d->r[index].p; 752 STATS_INC(d->insert_collisions); 753 } 754 d->r[index].p = p; 755 d->r[index].size = sz; 756 #ifdef MALLOC_STATS 757 d->r[index].f = f; 758 #endif 759 d->regions_free--; 760 return 0; 761 } 762 763 static struct region_info * 764 find(struct dir_info *d, void *p) 765 { 766 size_t index; 767 size_t mask = d->regions_total - 1; 768 void *q, *r; 769 770 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 771 d->canary1 != ~d->canary2) 772 wrterror("internal struct corrupt", NULL); 773 p = MASK_POINTER(p); 774 index = hash(p) & mask; 775 r = d->r[index].p; 776 q = MASK_POINTER(r); 777 STATS_INC(d->finds); 778 while (q != p && r != NULL) { 779 index = (index - 1) & mask; 780 r = d->r[index].p; 781 q = MASK_POINTER(r); 782 STATS_INC(d->find_collisions); 783 } 784 return (q == p && r != NULL) ? &d->r[index] : NULL; 785 } 786 787 static void 788 delete(struct dir_info *d, struct region_info *ri) 789 { 790 /* algorithm R, Knuth Vol III section 6.4 */ 791 size_t mask = d->regions_total - 1; 792 size_t i, j, r; 793 794 if (d->regions_total & (d->regions_total - 1)) 795 wrterror("regions_total not 2^x", NULL); 796 d->regions_free++; 797 STATS_INC(getpool()->deletes); 798 799 i = ri - d->r; 800 for (;;) { 801 d->r[i].p = NULL; 802 d->r[i].size = 0; 803 j = i; 804 for (;;) { 805 i = (i - 1) & mask; 806 if (d->r[i].p == NULL) 807 return; 808 r = hash(d->r[i].p) & mask; 809 if ((i <= r && r < j) || (r < j && j < i) || 810 (j < i && i <= r)) 811 continue; 812 d->r[j] = d->r[i]; 813 STATS_INC(getpool()->delete_moves); 814 break; 815 } 816 817 } 818 } 819 820 /* 821 * Allocate a page of chunks 822 */ 823 static struct chunk_info * 824 omalloc_make_chunks(struct dir_info *d, int bits, int listnum) 825 { 826 struct chunk_info *bp; 827 void *pp; 828 int i, k; 829 830 /* Allocate a new bucket */ 831 pp = map(d, MALLOC_PAGESIZE, 0); 832 if (pp == MAP_FAILED) 833 return NULL; 834 835 bp = alloc_chunk_info(d, bits); 836 if (bp == NULL) { 837 unmap(d, pp, MALLOC_PAGESIZE); 838 return NULL; 839 } 840 841 /* memory protect the page allocated in the malloc(0) case */ 842 if (bits == 0) { 843 bp->size = 0; 844 bp->shift = 1; 845 i = MALLOC_MINSIZE - 1; 846 while (i >>= 1) 847 bp->shift++; 848 bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift; 849 bp->page = pp; 850 851 k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE); 852 if (k < 0) { 853 unmap(d, pp, MALLOC_PAGESIZE); 854 LIST_INSERT_HEAD(&d->chunk_info_list[0], bp, entries); 855 return NULL; 856 } 857 } else { 858 bp->size = 1U << bits; 859 bp->shift = bits; 860 bp->total = bp->free = MALLOC_PAGESIZE >> bits; 861 bp->page = pp; 862 } 863 864 /* set all valid bits in the bitmap */ 865 k = bp->total; 866 i = 0; 867 868 /* Do a bunch at a time */ 869 for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS) 870 bp->bits[i / MALLOC_BITS] = (u_short)~0U; 871 872 for (; i < k; i++) 873 bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS); 874 875 LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries); 876 877 bits++; 878 if ((uintptr_t)pp & bits) 879 wrterror("pp & bits", pp); 880 881 insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL); 882 return bp; 883 } 884 885 886 /* 887 * Allocate a chunk 888 */ 889 static void * 890 malloc_bytes(struct dir_info *d, size_t size, void *f) 891 { 892 int i, j, listnum; 893 size_t k; 894 u_short u, *lp; 895 struct chunk_info *bp; 896 897 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 898 d->canary1 != ~d->canary2) 899 wrterror("internal struct corrupt", NULL); 900 /* Don't bother with anything less than this */ 901 /* unless we have a malloc(0) requests */ 902 if (size != 0 && size < MALLOC_MINSIZE) 903 size = MALLOC_MINSIZE; 904 905 /* Find the right bucket */ 906 if (size == 0) 907 j = 0; 908 else { 909 j = MALLOC_MINSHIFT; 910 i = (size - 1) >> (MALLOC_MINSHIFT - 1); 911 while (i >>= 1) 912 j++; 913 } 914 915 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; 916 /* If it's empty, make a page more of that size chunks */ 917 if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) { 918 bp = omalloc_make_chunks(d, j, listnum); 919 if (bp == NULL) 920 return NULL; 921 } 922 923 if (bp->canary != d->canary1) 924 wrterror("chunk info corrupted", NULL); 925 926 i = d->chunk_start; 927 if (bp->free > 1) 928 i += getrbyte(d); 929 if (i >= bp->total) 930 i &= bp->total - 1; 931 for (;;) { 932 for (;;) { 933 lp = &bp->bits[i / MALLOC_BITS]; 934 if (!*lp) { 935 i += MALLOC_BITS; 936 i &= ~(MALLOC_BITS - 1); 937 if (i >= bp->total) 938 i = 0; 939 } else 940 break; 941 } 942 k = i % MALLOC_BITS; 943 u = 1 << k; 944 if (*lp & u) 945 break; 946 if (++i >= bp->total) 947 i = 0; 948 } 949 d->chunk_start += i + 1; 950 #ifdef MALLOC_STATS 951 if (i == 0) { 952 struct region_info *r = find(d, bp->page); 953 r->f = f; 954 } 955 #endif 956 957 *lp ^= u; 958 959 /* If there are no more free, remove from free-list */ 960 if (!--bp->free) 961 LIST_REMOVE(bp, entries); 962 963 /* Adjust to the real offset of that chunk */ 964 k += (lp - bp->bits) * MALLOC_BITS; 965 k <<= bp->shift; 966 967 if (mopts.malloc_junk == 2 && bp->size > 0) 968 memset((char *)bp->page + k, SOME_JUNK, bp->size); 969 return ((char *)bp->page + k); 970 } 971 972 static uint32_t 973 find_chunknum(struct dir_info *d, struct region_info *r, void *ptr) 974 { 975 struct chunk_info *info; 976 uint32_t chunknum; 977 978 info = (struct chunk_info *)r->size; 979 if (info->canary != d->canary1) 980 wrterror("chunk info corrupted", NULL); 981 982 /* Find the chunk number on the page */ 983 chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift; 984 985 if ((uintptr_t)ptr & ((1U << (info->shift)) - 1)) { 986 wrterror("modified chunk-pointer", ptr); 987 return -1; 988 } 989 if (info->bits[chunknum / MALLOC_BITS] & 990 (1U << (chunknum % MALLOC_BITS))) { 991 wrterror("chunk is already free", ptr); 992 return -1; 993 } 994 return chunknum; 995 } 996 997 /* 998 * Free a chunk, and possibly the page it's on, if the page becomes empty. 999 */ 1000 static void 1001 free_bytes(struct dir_info *d, struct region_info *r, void *ptr) 1002 { 1003 struct chunk_head *mp; 1004 struct chunk_info *info; 1005 uint32_t chunknum; 1006 int listnum; 1007 1008 info = (struct chunk_info *)r->size; 1009 if ((chunknum = find_chunknum(d, r, ptr)) == -1) 1010 return; 1011 1012 info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS); 1013 info->free++; 1014 1015 if (info->free == 1) { 1016 /* Page became non-full */ 1017 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; 1018 if (info->size != 0) 1019 mp = &d->chunk_dir[info->shift][listnum]; 1020 else 1021 mp = &d->chunk_dir[0][listnum]; 1022 1023 LIST_INSERT_HEAD(mp, info, entries); 1024 return; 1025 } 1026 1027 if (info->free != info->total) 1028 return; 1029 1030 LIST_REMOVE(info, entries); 1031 1032 if (info->size == 0 && !mopts.malloc_freeunmap) 1033 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE); 1034 unmap(d, info->page, MALLOC_PAGESIZE); 1035 1036 delete(d, r); 1037 if (info->size != 0) 1038 mp = &d->chunk_info_list[info->shift]; 1039 else 1040 mp = &d->chunk_info_list[0]; 1041 LIST_INSERT_HEAD(mp, info, entries); 1042 } 1043 1044 1045 1046 static void * 1047 omalloc(size_t sz, int zero_fill, void *f) 1048 { 1049 struct dir_info *pool = getpool(); 1050 void *p; 1051 size_t psz; 1052 1053 if (sz > MALLOC_MAXCHUNK) { 1054 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1055 errno = ENOMEM; 1056 return NULL; 1057 } 1058 sz += mopts.malloc_guard; 1059 psz = PAGEROUND(sz); 1060 p = map(pool, psz, zero_fill); 1061 if (p == MAP_FAILED) { 1062 errno = ENOMEM; 1063 return NULL; 1064 } 1065 if (insert(pool, p, sz, f)) { 1066 unmap(pool, p, psz); 1067 errno = ENOMEM; 1068 return NULL; 1069 } 1070 if (mopts.malloc_guard) { 1071 if (mprotect((char *)p + psz - mopts.malloc_guard, 1072 mopts.malloc_guard, PROT_NONE)) 1073 wrterror("mprotect", NULL); 1074 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 1075 } 1076 1077 if (mopts.malloc_move && 1078 sz - mopts.malloc_guard < MALLOC_PAGESIZE - 1079 MALLOC_LEEWAY) { 1080 /* fill whole allocation */ 1081 if (mopts.malloc_junk == 2) 1082 memset(p, SOME_JUNK, psz - mopts.malloc_guard); 1083 /* shift towards the end */ 1084 p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY - 1085 (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1)); 1086 /* fill zeros if needed and overwritten above */ 1087 if (zero_fill && mopts.malloc_junk == 2) 1088 memset(p, 0, sz - mopts.malloc_guard); 1089 } else { 1090 if (mopts.malloc_junk == 2) { 1091 if (zero_fill) 1092 memset((char *)p + sz - mopts.malloc_guard, 1093 SOME_JUNK, psz - sz); 1094 else 1095 memset(p, SOME_JUNK, 1096 psz - mopts.malloc_guard); 1097 } 1098 } 1099 1100 } else { 1101 /* takes care of SOME_JUNK */ 1102 p = malloc_bytes(pool, sz, f); 1103 if (zero_fill && p != NULL && sz > 0) 1104 memset(p, 0, sz); 1105 } 1106 1107 return p; 1108 } 1109 1110 /* 1111 * Common function for handling recursion. Only 1112 * print the error message once, to avoid making the problem 1113 * potentially worse. 1114 */ 1115 static void 1116 malloc_recurse(void) 1117 { 1118 static int noprint; 1119 1120 if (noprint == 0) { 1121 noprint = 1; 1122 wrterror("recursive call", NULL); 1123 } 1124 malloc_active--; 1125 _MALLOC_UNLOCK(); 1126 errno = EDEADLK; 1127 } 1128 1129 static int 1130 malloc_init(void) 1131 { 1132 if (omalloc_init(&mopts.malloc_pool)) { 1133 _MALLOC_UNLOCK(); 1134 if (mopts.malloc_xmalloc) 1135 wrterror("out of memory", NULL); 1136 errno = ENOMEM; 1137 return -1; 1138 } 1139 return 0; 1140 } 1141 1142 void * 1143 malloc(size_t size) 1144 { 1145 void *r; 1146 int saved_errno = errno; 1147 1148 _MALLOC_LOCK(); 1149 malloc_func = "malloc():"; 1150 if (getpool() == NULL) { 1151 if (malloc_init() != 0) 1152 return NULL; 1153 } 1154 1155 if (malloc_active++) { 1156 malloc_recurse(); 1157 return NULL; 1158 } 1159 r = omalloc(size, 0, CALLER); 1160 malloc_active--; 1161 _MALLOC_UNLOCK(); 1162 if (r == NULL && mopts.malloc_xmalloc) { 1163 wrterror("out of memory", NULL); 1164 errno = ENOMEM; 1165 } 1166 if (r != NULL) 1167 errno = saved_errno; 1168 return r; 1169 } 1170 1171 static void 1172 ofree(void *p) 1173 { 1174 struct dir_info *pool = getpool(); 1175 struct region_info *r; 1176 size_t sz; 1177 1178 r = find(pool, p); 1179 if (r == NULL) { 1180 wrterror("bogus pointer (double free?)", p); 1181 return; 1182 } 1183 REALSIZE(sz, r); 1184 if (sz > MALLOC_MAXCHUNK) { 1185 if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE - 1186 MALLOC_LEEWAY) { 1187 if (r->p != p) { 1188 wrterror("bogus pointer", p); 1189 return; 1190 } 1191 } else { 1192 #if notyetbecause_of_realloc 1193 /* shifted towards the end */ 1194 if (p != ((char *)r->p) + ((MALLOC_PAGESIZE - 1195 MALLOC_MINSIZE - sz - mopts.malloc_guard) & 1196 ~(MALLOC_MINSIZE-1))) { 1197 } 1198 #endif 1199 p = r->p; 1200 } 1201 if (mopts.malloc_guard) { 1202 if (sz < mopts.malloc_guard) 1203 wrterror("guard size", NULL); 1204 if (!mopts.malloc_freeunmap) { 1205 if (mprotect((char *)p + PAGEROUND(sz) - 1206 mopts.malloc_guard, mopts.malloc_guard, 1207 PROT_READ | PROT_WRITE)) 1208 wrterror("mprotect", NULL); 1209 } 1210 STATS_SUB(pool->malloc_guarded, mopts.malloc_guard); 1211 } 1212 if (mopts.malloc_junk && !mopts.malloc_freeunmap) { 1213 size_t amt = mopts.malloc_junk == 1 ? MALLOC_MAXCHUNK : 1214 PAGEROUND(sz) - mopts.malloc_guard; 1215 memset(p, SOME_FREEJUNK, amt); 1216 } 1217 unmap(pool, p, PAGEROUND(sz)); 1218 delete(pool, r); 1219 } else { 1220 void *tmp; 1221 int i; 1222 1223 if (mopts.malloc_junk && sz > 0) 1224 memset(p, SOME_FREEJUNK, sz); 1225 if (!mopts.malloc_freenow) { 1226 if (find_chunknum(pool, r, p) == -1) 1227 return; 1228 i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK; 1229 tmp = p; 1230 p = pool->delayed_chunks[i]; 1231 if (tmp == p) { 1232 wrterror("double free", p); 1233 return; 1234 } 1235 pool->delayed_chunks[i] = tmp; 1236 } 1237 if (p != NULL) { 1238 r = find(pool, p); 1239 if (r == NULL) { 1240 wrterror("bogus pointer (double free?)", p); 1241 return; 1242 } 1243 free_bytes(pool, r, p); 1244 } 1245 } 1246 } 1247 1248 void 1249 free(void *ptr) 1250 { 1251 int saved_errno = errno; 1252 1253 /* This is legal. */ 1254 if (ptr == NULL) 1255 return; 1256 1257 _MALLOC_LOCK(); 1258 malloc_func = "free():"; 1259 if (getpool() == NULL) { 1260 _MALLOC_UNLOCK(); 1261 wrterror("free() called before allocation", NULL); 1262 return; 1263 } 1264 if (malloc_active++) { 1265 malloc_recurse(); 1266 return; 1267 } 1268 ofree(ptr); 1269 malloc_active--; 1270 _MALLOC_UNLOCK(); 1271 errno = saved_errno; 1272 } 1273 1274 1275 static void * 1276 orealloc(void *p, size_t newsz, void *f) 1277 { 1278 struct dir_info *pool = getpool(); 1279 struct region_info *r; 1280 size_t oldsz, goldsz, gnewsz; 1281 void *q; 1282 1283 if (p == NULL) 1284 return omalloc(newsz, 0, f); 1285 1286 r = find(pool, p); 1287 if (r == NULL) { 1288 wrterror("bogus pointer (double free?)", p); 1289 return NULL; 1290 } 1291 if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1292 errno = ENOMEM; 1293 return NULL; 1294 } 1295 1296 REALSIZE(oldsz, r); 1297 goldsz = oldsz; 1298 if (oldsz > MALLOC_MAXCHUNK) { 1299 if (oldsz < mopts.malloc_guard) 1300 wrterror("guard size", NULL); 1301 oldsz -= mopts.malloc_guard; 1302 } 1303 1304 gnewsz = newsz; 1305 if (gnewsz > MALLOC_MAXCHUNK) 1306 gnewsz += mopts.malloc_guard; 1307 1308 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p && 1309 !mopts.malloc_realloc) { 1310 size_t roldsz = PAGEROUND(goldsz); 1311 size_t rnewsz = PAGEROUND(gnewsz); 1312 1313 if (rnewsz > roldsz) { 1314 if (!mopts.malloc_guard) { 1315 void *hint = (char *)p + roldsz; 1316 size_t needed = rnewsz - roldsz; 1317 1318 STATS_INC(pool->cheap_realloc_tries); 1319 zapcacheregion(pool, hint, needed); 1320 q = MQUERY(hint, needed); 1321 if (q == hint) 1322 q = MMAPA(hint, needed); 1323 else 1324 q = MAP_FAILED; 1325 if (q == hint) { 1326 STATS_ADD(pool->malloc_used, needed); 1327 if (mopts.malloc_junk == 2) 1328 memset(q, SOME_JUNK, needed); 1329 r->size = newsz; 1330 STATS_SETF(r, f); 1331 STATS_INC(pool->cheap_reallocs); 1332 return p; 1333 } else if (q != MAP_FAILED) { 1334 if (munmap(q, needed)) 1335 wrterror("munmap", q); 1336 } 1337 } 1338 } else if (rnewsz < roldsz) { 1339 if (mopts.malloc_guard) { 1340 if (mprotect((char *)p + roldsz - 1341 mopts.malloc_guard, mopts.malloc_guard, 1342 PROT_READ | PROT_WRITE)) 1343 wrterror("mprotect", NULL); 1344 if (mprotect((char *)p + rnewsz - 1345 mopts.malloc_guard, mopts.malloc_guard, 1346 PROT_NONE)) 1347 wrterror("mprotect", NULL); 1348 } 1349 unmap(pool, (char *)p + rnewsz, roldsz - rnewsz); 1350 r->size = gnewsz; 1351 STATS_SETF(r, f); 1352 return p; 1353 } else { 1354 if (newsz > oldsz && mopts.malloc_junk == 2) 1355 memset((char *)p + newsz, SOME_JUNK, 1356 rnewsz - mopts.malloc_guard - newsz); 1357 r->size = gnewsz; 1358 STATS_SETF(r, f); 1359 return p; 1360 } 1361 } 1362 if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) { 1363 if (mopts.malloc_junk == 2 && newsz > 0) 1364 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz); 1365 STATS_SETF(r, f); 1366 return p; 1367 } else if (newsz != oldsz || mopts.malloc_realloc) { 1368 q = omalloc(newsz, 0, f); 1369 if (q == NULL) 1370 return NULL; 1371 if (newsz != 0 && oldsz != 0) 1372 memcpy(q, p, oldsz < newsz ? oldsz : newsz); 1373 ofree(p); 1374 return q; 1375 } else { 1376 STATS_SETF(r, f); 1377 return p; 1378 } 1379 } 1380 1381 void * 1382 realloc(void *ptr, size_t size) 1383 { 1384 void *r; 1385 int saved_errno = errno; 1386 1387 _MALLOC_LOCK(); 1388 malloc_func = "realloc():"; 1389 if (getpool() == NULL) { 1390 if (malloc_init() != 0) 1391 return NULL; 1392 } 1393 if (malloc_active++) { 1394 malloc_recurse(); 1395 return NULL; 1396 } 1397 r = orealloc(ptr, size, CALLER); 1398 1399 malloc_active--; 1400 _MALLOC_UNLOCK(); 1401 if (r == NULL && mopts.malloc_xmalloc) { 1402 wrterror("out of memory", NULL); 1403 errno = ENOMEM; 1404 } 1405 if (r != NULL) 1406 errno = saved_errno; 1407 return r; 1408 } 1409 1410 1411 /* 1412 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX 1413 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW 1414 */ 1415 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4)) 1416 1417 void * 1418 calloc(size_t nmemb, size_t size) 1419 { 1420 void *r; 1421 int saved_errno = errno; 1422 1423 _MALLOC_LOCK(); 1424 malloc_func = "calloc():"; 1425 if (getpool() == NULL) { 1426 if (malloc_init() != 0) 1427 return NULL; 1428 } 1429 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1430 nmemb > 0 && SIZE_MAX / nmemb < size) { 1431 _MALLOC_UNLOCK(); 1432 if (mopts.malloc_xmalloc) 1433 wrterror("out of memory", NULL); 1434 errno = ENOMEM; 1435 return NULL; 1436 } 1437 1438 if (malloc_active++) { 1439 malloc_recurse(); 1440 return NULL; 1441 } 1442 1443 size *= nmemb; 1444 r = omalloc(size, 1, CALLER); 1445 1446 malloc_active--; 1447 _MALLOC_UNLOCK(); 1448 if (r == NULL && mopts.malloc_xmalloc) { 1449 wrterror("out of memory", NULL); 1450 errno = ENOMEM; 1451 } 1452 if (r != NULL) 1453 errno = saved_errno; 1454 return r; 1455 } 1456 1457 static void * 1458 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill) 1459 { 1460 char *p, *q; 1461 1462 if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0) { 1463 wrterror("mapalign bad alignment", NULL); 1464 return MAP_FAILED; 1465 } 1466 if (sz != PAGEROUND(sz)) { 1467 wrterror("mapalign round", NULL); 1468 return MAP_FAILED; 1469 } 1470 1471 /* Allocate sz + alignment bytes of memory, which must include a 1472 * subrange of size bytes that is properly aligned. Unmap the 1473 * other bytes, and then return that subrange. 1474 */ 1475 1476 /* We need sz + alignment to fit into a size_t. */ 1477 if (alignment > SIZE_MAX - sz) 1478 return MAP_FAILED; 1479 1480 p = map(d, sz + alignment, zero_fill); 1481 if (p == MAP_FAILED) 1482 return MAP_FAILED; 1483 q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1)); 1484 if (q != p) { 1485 if (munmap(p, q - p)) 1486 wrterror("munmap", p); 1487 } 1488 if (munmap(q + sz, alignment - (q - p))) 1489 wrterror("munmap", q + sz); 1490 STATS_SUB(d->malloc_used, alignment); 1491 1492 return q; 1493 } 1494 1495 static void * 1496 omemalign(size_t alignment, size_t sz, int zero_fill, void *f) 1497 { 1498 struct dir_info *pool = getpool(); 1499 size_t psz; 1500 void *p; 1501 1502 if (alignment <= MALLOC_PAGESIZE) { 1503 /* 1504 * max(size, alignment) is enough to assure the requested alignment, 1505 * since the allocator always allocates power-of-two blocks. 1506 */ 1507 if (sz < alignment) 1508 sz = alignment; 1509 return omalloc(sz, zero_fill, f); 1510 } 1511 1512 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1513 errno = ENOMEM; 1514 return NULL; 1515 } 1516 1517 sz += mopts.malloc_guard; 1518 psz = PAGEROUND(sz); 1519 1520 p = mapalign(pool, alignment, psz, zero_fill); 1521 if (p == NULL) { 1522 errno = ENOMEM; 1523 return NULL; 1524 } 1525 1526 if (insert(pool, p, sz, f)) { 1527 unmap(pool, p, psz); 1528 errno = ENOMEM; 1529 return NULL; 1530 } 1531 1532 if (mopts.malloc_guard) { 1533 if (mprotect((char *)p + psz - mopts.malloc_guard, 1534 mopts.malloc_guard, PROT_NONE)) 1535 wrterror("mprotect", NULL); 1536 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 1537 } 1538 1539 if (mopts.malloc_junk == 2) { 1540 if (zero_fill) 1541 memset((char *)p + sz - mopts.malloc_guard, 1542 SOME_JUNK, psz - sz); 1543 else 1544 memset(p, SOME_JUNK, psz - mopts.malloc_guard); 1545 } 1546 1547 return p; 1548 } 1549 1550 int 1551 posix_memalign(void **memptr, size_t alignment, size_t size) 1552 { 1553 int res, saved_errno = errno; 1554 void *r; 1555 1556 /* Make sure that alignment is a large enough power of 2. */ 1557 if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *)) 1558 return EINVAL; 1559 1560 _MALLOC_LOCK(); 1561 malloc_func = "posix_memalign():"; 1562 if (getpool() == NULL) { 1563 if (malloc_init() != 0) 1564 goto err; 1565 } 1566 if (malloc_active++) { 1567 malloc_recurse(); 1568 goto err; 1569 } 1570 r = omemalign(alignment, size, 0, CALLER); 1571 malloc_active--; 1572 _MALLOC_UNLOCK(); 1573 if (r == NULL) { 1574 if (mopts.malloc_xmalloc) { 1575 wrterror("out of memory", NULL); 1576 errno = ENOMEM; 1577 } 1578 goto err; 1579 } 1580 errno = saved_errno; 1581 *memptr = r; 1582 return 0; 1583 1584 err: 1585 res = errno; 1586 errno = saved_errno; 1587 return res; 1588 } 1589 1590 #ifdef MALLOC_STATS 1591 1592 struct malloc_leak { 1593 void (*f)(); 1594 size_t total_size; 1595 int count; 1596 }; 1597 1598 struct leaknode { 1599 RB_ENTRY(leaknode) entry; 1600 struct malloc_leak d; 1601 }; 1602 1603 static int 1604 leakcmp(struct leaknode *e1, struct leaknode *e2) 1605 { 1606 return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f; 1607 } 1608 1609 static RB_HEAD(leaktree, leaknode) leakhead; 1610 RB_GENERATE_STATIC(leaktree, leaknode, entry, leakcmp) 1611 1612 static void 1613 putleakinfo(void *f, size_t sz, int cnt) 1614 { 1615 struct leaknode key, *p; 1616 static struct leaknode *page; 1617 static int used; 1618 1619 if (cnt == 0) 1620 return; 1621 1622 key.d.f = f; 1623 p = RB_FIND(leaktree, &leakhead, &key); 1624 if (p == NULL) { 1625 if (page == NULL || 1626 used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) { 1627 page = MMAP(MALLOC_PAGESIZE); 1628 if (page == MAP_FAILED) 1629 return; 1630 used = 0; 1631 } 1632 p = &page[used++]; 1633 p->d.f = f; 1634 p->d.total_size = sz * cnt; 1635 p->d.count = cnt; 1636 RB_INSERT(leaktree, &leakhead, p); 1637 } else { 1638 p->d.total_size += sz * cnt; 1639 p->d.count += cnt; 1640 } 1641 } 1642 1643 static struct malloc_leak *malloc_leaks; 1644 1645 static void 1646 dump_leaks(int fd) 1647 { 1648 struct leaknode *p; 1649 char buf[64]; 1650 int i = 0; 1651 1652 snprintf(buf, sizeof(buf), "Leak report\n"); 1653 write(fd, buf, strlen(buf)); 1654 snprintf(buf, sizeof(buf), " f sum # avg\n"); 1655 write(fd, buf, strlen(buf)); 1656 /* XXX only one page of summary */ 1657 if (malloc_leaks == NULL) 1658 malloc_leaks = MMAP(MALLOC_PAGESIZE); 1659 if (malloc_leaks != MAP_FAILED) 1660 memset(malloc_leaks, 0, MALLOC_PAGESIZE); 1661 RB_FOREACH(p, leaktree, &leakhead) { 1662 snprintf(buf, sizeof(buf), "%18p %7zu %6u %6zu\n", p->d.f, 1663 p->d.total_size, p->d.count, p->d.total_size / p->d.count); 1664 write(fd, buf, strlen(buf)); 1665 if (malloc_leaks == MAP_FAILED || 1666 i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak)) 1667 continue; 1668 malloc_leaks[i].f = p->d.f; 1669 malloc_leaks[i].total_size = p->d.total_size; 1670 malloc_leaks[i].count = p->d.count; 1671 i++; 1672 } 1673 } 1674 1675 static void 1676 dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist) 1677 { 1678 char buf[64]; 1679 1680 while (p != NULL) { 1681 snprintf(buf, sizeof(buf), "chunk %18p %18p %4d %d/%d\n", 1682 p->page, ((p->bits[0] & 1) ? NULL : f), 1683 p->size, p->free, p->total); 1684 write(fd, buf, strlen(buf)); 1685 if (!fromfreelist) { 1686 if (p->bits[0] & 1) 1687 putleakinfo(NULL, p->size, p->total - p->free); 1688 else { 1689 putleakinfo(f, p->size, 1); 1690 putleakinfo(NULL, p->size, 1691 p->total - p->free - 1); 1692 } 1693 break; 1694 } 1695 p = LIST_NEXT(p, entries); 1696 if (p != NULL) { 1697 snprintf(buf, sizeof(buf), " "); 1698 write(fd, buf, strlen(buf)); 1699 } 1700 } 1701 } 1702 1703 static void 1704 dump_free_chunk_info(int fd, struct dir_info *d) 1705 { 1706 char buf[64]; 1707 int i, j, count; 1708 struct chunk_info *p; 1709 1710 snprintf(buf, sizeof(buf), "Free chunk structs:\n"); 1711 write(fd, buf, strlen(buf)); 1712 for (i = 0; i <= MALLOC_MAXSHIFT; i++) { 1713 count = 0; 1714 LIST_FOREACH(p, &d->chunk_info_list[i], entries) 1715 count++; 1716 for (j = 0; j < MALLOC_CHUNK_LISTS; j++) { 1717 p = LIST_FIRST(&d->chunk_dir[i][j]); 1718 if (p == NULL && count == 0) 1719 continue; 1720 snprintf(buf, sizeof(buf), "%2d) %3d ", i, count); 1721 write(fd, buf, strlen(buf)); 1722 if (p != NULL) 1723 dump_chunk(fd, p, NULL, 1); 1724 else 1725 write(fd, "\n", 1); 1726 } 1727 } 1728 1729 } 1730 1731 static void 1732 dump_free_page_info(int fd, struct dir_info *d) 1733 { 1734 char buf[64]; 1735 int i; 1736 1737 snprintf(buf, sizeof(buf), "Free pages cached: %zu\n", 1738 d->free_regions_size); 1739 write(fd, buf, strlen(buf)); 1740 for (i = 0; i < mopts.malloc_cache; i++) { 1741 if (d->free_regions[i].p != NULL) { 1742 snprintf(buf, sizeof(buf), "%2d) ", i); 1743 write(fd, buf, strlen(buf)); 1744 snprintf(buf, sizeof(buf), "free at %p: %zu\n", 1745 d->free_regions[i].p, d->free_regions[i].size); 1746 write(fd, buf, strlen(buf)); 1747 } 1748 } 1749 } 1750 1751 static void 1752 malloc_dump1(int fd, struct dir_info *d) 1753 { 1754 char buf[100]; 1755 size_t i, realsize; 1756 1757 snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d); 1758 write(fd, buf, strlen(buf)); 1759 if (d == NULL) 1760 return; 1761 snprintf(buf, sizeof(buf), "Region slots free %zu/%zu\n", 1762 d->regions_free, d->regions_total); 1763 write(fd, buf, strlen(buf)); 1764 snprintf(buf, sizeof(buf), "Finds %zu/%zu\n", d->finds, 1765 d->find_collisions); 1766 write(fd, buf, strlen(buf)); 1767 snprintf(buf, sizeof(buf), "Inserts %zu/%zu\n", d->inserts, 1768 d->insert_collisions); 1769 write(fd, buf, strlen(buf)); 1770 snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes, 1771 d->delete_moves); 1772 write(fd, buf, strlen(buf)); 1773 snprintf(buf, sizeof(buf), "Cheap reallocs %zu/%zu\n", 1774 d->cheap_reallocs, d->cheap_realloc_tries); 1775 write(fd, buf, strlen(buf)); 1776 dump_free_chunk_info(fd, d); 1777 dump_free_page_info(fd, d); 1778 snprintf(buf, sizeof(buf), 1779 "slot) hash d type page f size [free/n]\n"); 1780 write(fd, buf, strlen(buf)); 1781 for (i = 0; i < d->regions_total; i++) { 1782 if (d->r[i].p != NULL) { 1783 size_t h = hash(d->r[i].p) & 1784 (d->regions_total - 1); 1785 snprintf(buf, sizeof(buf), "%4zx) #%4zx %zd ", 1786 i, h, h - i); 1787 write(fd, buf, strlen(buf)); 1788 REALSIZE(realsize, &d->r[i]); 1789 if (realsize > MALLOC_MAXCHUNK) { 1790 putleakinfo(d->r[i].f, realsize, 1); 1791 snprintf(buf, sizeof(buf), 1792 "pages %12p %12p %zu\n", d->r[i].p, 1793 d->r[i].f, realsize); 1794 write(fd, buf, strlen(buf)); 1795 } else 1796 dump_chunk(fd, 1797 (struct chunk_info *)d->r[i].size, 1798 d->r[i].f, 0); 1799 } 1800 } 1801 snprintf(buf, sizeof(buf), "In use %zu\n", d->malloc_used); 1802 write(fd, buf, strlen(buf)); 1803 snprintf(buf, sizeof(buf), "Guarded %zu\n", d->malloc_guarded); 1804 write(fd, buf, strlen(buf)); 1805 dump_leaks(fd); 1806 write(fd, "\n", 1); 1807 } 1808 1809 void 1810 malloc_dump(int fd) 1811 { 1812 struct dir_info *pool = getpool(); 1813 int i; 1814 void *p; 1815 struct region_info *r; 1816 int saved_errno = errno; 1817 1818 for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) { 1819 p = pool->delayed_chunks[i]; 1820 if (p == NULL) 1821 continue; 1822 r = find(pool, p); 1823 if (r == NULL) 1824 wrterror("bogus pointer in malloc_dump", p); 1825 free_bytes(pool, r, p); 1826 pool->delayed_chunks[i] = NULL; 1827 } 1828 /* XXX leak when run multiple times */ 1829 RB_INIT(&leakhead); 1830 malloc_dump1(fd, pool); 1831 errno = saved_errno; 1832 } 1833 1834 static void 1835 malloc_exit(void) 1836 { 1837 static const char q[] = "malloc() warning: Couldn't dump stats\n"; 1838 int save_errno = errno, fd; 1839 1840 fd = open("malloc.out", O_RDWR|O_APPEND); 1841 if (fd != -1) { 1842 malloc_dump(fd); 1843 close(fd); 1844 } else 1845 write(STDERR_FILENO, q, sizeof(q) - 1); 1846 errno = save_errno; 1847 } 1848 1849 #endif /* MALLOC_STATS */ 1850