Lines Matching refs:d

210 static void unmap(struct dir_info *d, void *p, size_t sz, size_t clear);
269 static __dead void wrterror(struct dir_info *d, char *msg, ...)
287 caller(struct dir_info *d)
308 return store_caller(d, &p);
312 static inline void* caller(struct dir_info *d)
320 return store_caller(d, &p);
359 wrterror(struct dir_info *d, char *msg, ...)
364 dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname,
365 getpid(), (d != NULL && d->func) ? d->func : "unknown");
382 rbytes_init(struct dir_info *d)
384 arc4random_buf(d->rbytes, sizeof(d->rbytes));
385 /* add 1 to account for using d->rbytes[0] */
386 d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2);
390 getrbyte(struct dir_info *d)
394 if (d->rbytesused >= sizeof(d->rbytes))
395 rbytes_init(d);
396 x = d->rbytes[d->rbytesused++];
429 case 'd':
575 omalloc_poolinit(struct dir_info *d, int mmap_flag)
579 d->r = NULL;
580 d->rbytesused = sizeof(d->rbytes);
581 d->regions_free = d->regions_total = 0;
583 LIST_INIT(&d->chunk_info_list[i]);
585 LIST_INIT(&d->chunk_dir[i][j]);
587 d->mmap_flag = mmap_flag;
588 d->malloc_junk = mopts.def_malloc_junk;
590 RBT_INIT(btshead, &d->btraces);
592 d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
593 d->canary2 = ~d->canary1;
597 omalloc_grow(struct dir_info *d)
605 if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2)
608 newtotal = d->regions_total == 0 ? MALLOC_INITIAL_REGIONS :
609 d->regions_total * 2;
614 p = MMAP(newsize, d->mmap_flag);
618 STATS_ADD(d->malloc_used, newsize);
619 STATS_ZERO(d->inserts);
620 STATS_ZERO(d->insert_collisions);
621 for (i = 0; i < d->regions_total; i++) {
622 void *q = d->r[i].p;
625 STATS_INC(d->inserts);
628 STATS_INC(d->insert_collisions);
630 p[index] = d->r[i];
634 if (d->regions_total > 0) {
635 oldpsz = PAGEROUND(d->regions_total *
638 unmap(d, d->r, oldpsz, oldpsz);
640 d->regions_free += newtotal - d->regions_total;
641 d->regions_total = newtotal;
642 d->r = p;
651 insert(struct dir_info *d, void *p, size_t sz, void *f)
657 if (d->regions_free * 4 < d->regions_total || d->regions_total == 0) {
658 if (omalloc_grow(d))
661 mask = d->regions_total - 1;
663 q = d->r[index].p;
664 STATS_INC(d->inserts);
667 q = d->r[index].p;
668 STATS_INC(d->insert_collisions);
670 d->r[index].p = p;
671 d->r[index].size = sz;
672 STATS_SETF(&d->r[index], f);
673 d->regions_free--;
678 find(struct dir_info *d, void *p)
681 size_t mask = d->regions_total - 1;
684 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
685 d->canary1 != ~d->canary2)
686 wrterror(d, "internal struct corrupt");
687 if (d->r == NULL)
691 r = d->r[index].p;
695 r = d->r[index].p;
698 return (q == p && r != NULL) ? &d->r[index] : NULL;
702 delete(struct dir_info *d, struct region_info *ri)
705 size_t mask = d->regions_total - 1;
708 if (d->regions_total & (d->regions_total - 1))
709 wrterror(d, "regions_total not 2^x");
710 d->regions_free++;
711 STATS_INC(d->deletes);
713 i = ri - d->r;
715 d->r[i].p = NULL;
716 d->r[i].size = 0;
720 if (d->r[i].p == NULL)
722 r = hash(d->r[i].p) & mask;
726 d->r[j] = d->r[i];
727 STATS_INC(d->delete_moves);
795 unmap(struct dir_info *d, void *p, size_t sz, size_t clear)
803 wrterror(d, "munmap round");
805 if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE &&
807 u_short base = getrbyte(d);
811 for (j = 0; j < d->bigcache_size / 4; j++) {
812 i = (base + j) & (d->bigcache_size - 1);
813 if (d->bigcache_used <
814 BIGCACHE_FILL(d->bigcache_size)) {
815 if (d->bigcache[i].psize == 0)
818 if (d->bigcache[i].psize != 0)
823 if (d->bigcache[i].psize != 0) {
826 r = d->bigcache[i].page;
827 d->bigcache_used -= d->bigcache[i].psize;
828 tmp = d->bigcache[i].psize << MALLOC_PAGESHIFT;
830 validate_junk(d, r, tmp);
832 wrterror(d, "munmap %p", r);
833 STATS_SUB(d->malloc_used, tmp);
840 wrterror(d, "mprotect %p", r);
842 junk_free(d->malloc_junk, p, sz);
843 d->bigcache[i].page = p;
844 d->bigcache[i].psize = psz;
845 d->bigcache_used += psz;
848 if (psz > MAX_SMALLCACHEABLE_SIZE || d->smallcache[psz - 1].max == 0) {
850 wrterror(d, "munmap %p", p);
851 STATS_SUB(d->malloc_used, sz);
854 cache = &d->smallcache[psz - 1];
858 i = getrbyte(d) & (cache->max - 1);
863 validate_junk(d, r, sz);
865 wrterror(d, "munmap %p", r);
866 STATS_SUB(d->malloc_used, sz);
877 junk_free(d->malloc_junk, p, sz);
883 map(struct dir_info *d, size_t sz, int zero_fill)
890 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
891 d->canary1 != ~d->canary2)
892 wrterror(d, "internal struct corrupt");
894 wrterror(d, "map round");
897 if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE &&
899 size_t base = getrbyte(d);
900 size_t cached = d->bigcache_used;
903 for (j = 0; j < d->bigcache_size && cached >= psz; j++) {
904 i = (j + base) & (d->bigcache_size - 1);
905 if (d->bigcache[i].psize == psz) {
906 p = d->bigcache[i].page;
907 d->bigcache_used -= psz;
908 d->bigcache[i].page = NULL;
909 d->bigcache[i].psize = 0;
912 validate_junk(d, p, sz);
918 junk_free(d->malloc_junk, p, sz);
921 cached -= d->bigcache[i].psize;
924 if (psz <= MAX_SMALLCACHEABLE_SIZE && d->smallcache[psz - 1].max > 0) {
925 cache = &d->smallcache[psz - 1];
931 i = getrbyte(d) % cache->length;
940 validate_junk(d, p, sz);
946 junk_free(d->malloc_junk, p, sz);
950 p = MMAP(cache->max * sz, d->mmap_flag);
952 STATS_ADD(d->malloc_used, cache->max * sz);
970 p = MMAP(sz, d->mmap_flag);
972 STATS_ADD(d->malloc_used, sz);
978 init_chunk_info(struct dir_info *d, struct chunk_info *p, u_int bucket)
985 p->canary = (u_short)d->canary1;
994 alloc_chunk_info(struct dir_info *d, u_int bucket)
998 if (LIST_EMPTY(&d->chunk_info_list[bucket])) {
1018 if (d->chunk_pages_used == chunk_pages ||
1019 d->chunk_pages == NULL) {
1020 q = MMAP(MALLOC_PAGESIZE * chunk_pages, d->mmap_flag);
1023 d->chunk_pages = q;
1024 d->chunk_pages_used = 0;
1025 STATS_ADD(d->malloc_used, MALLOC_PAGESIZE *
1028 q = (char *)d->chunk_pages + d->chunk_pages_used *
1030 d->chunk_pages_used++;
1034 LIST_INSERT_HEAD(&d->chunk_info_list[bucket], p,
1038 p = LIST_FIRST(&d->chunk_info_list[bucket]);
1041 init_chunk_info(d, p, bucket);
1049 omalloc_make_chunks(struct dir_info *d, u_int bucket, u_int listnum)
1056 pp = map(d, MALLOC_PAGESIZE, 0);
1060 ff = map(d, MALLOC_PAGESIZE, 0);
1071 bp = alloc_chunk_info(d, bucket);
1076 if (insert(d, (void *)((uintptr_t)pp | (bucket + 1)), (uintptr_t)bp,
1079 LIST_INSERT_HEAD(&d->chunk_dir[bucket][listnum], bp, entries);
1081 if (bucket > 0 && d->malloc_junk != 0)
1087 unmap(d, pp, MALLOC_PAGESIZE, 0);
1089 unmap(d, ff, MALLOC_PAGESIZE, 0);
1167 malloc_bytes(struct dir_info *d, size_t size)
1174 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
1175 d->canary1 != ~d->canary2)
1176 wrterror(d, "internal struct corrupt");
1180 r = getrbyte(d);
1184 if ((bp = LIST_FIRST(&d->chunk_dir[bucket][listnum])) == NULL) {
1185 bp = omalloc_make_chunks(d, bucket, listnum);
1190 if (bp->canary != (u_short)d->canary1 || bucket != bp->bucket)
1191 wrterror(d, "chunk info corrupted");
1196 r = r << 8 | getrbyte(d);
1231 struct region_info *r = find(d, bp->page);
1232 STATS_SETFN(r, k, d->caller);
1237 validate_junk(d, p, B2SIZE(bucket));
1245 validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1257 wrterror(d, "canary corrupted %p[%tu]@%zu/%zu%s",
1266 find_chunknum(struct dir_info *d, struct chunk_info *info, void *ptr, int check)
1270 if (info->canary != (u_short)d->canary1)
1271 wrterror(d, "chunk info corrupted");
1277 wrterror(d, "modified chunk-pointer %p", ptr);
1279 wrterror(d, "double free %p", ptr);
1281 validate_canary(d, ptr, info->bits[info->offset + chunknum],
1291 free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1299 chunknum = find_chunknum(d, info, ptr, 0);
1306 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
1307 mp = &d->chunk_dir[info->bucket][listnum];
1319 unmap(d, info->page, MALLOC_PAGESIZE, 0);
1322 unmap(d, r->f, MALLOC_PAGESIZE, MALLOC_PAGESIZE);
1327 delete(d, r);
1328 mp = &d->chunk_info_list[info->bucket];
1405 malloc_recurse(struct dir_info *d)
1411 wrterror(d, "recursive call");
1413 d->active--;
1414 _MALLOC_UNLOCK(d->mutex);
1422 struct dir_info *d;
1439 sz = mopts.malloc_mutexes * sizeof(*d);
1450 d = (struct dir_info *)(p + MALLOC_PAGESIZE +
1452 STATS_ADD(d[1].malloc_used, roundup_sz + 2 * MALLOC_PAGESIZE);
1454 mopts.malloc_pool[i] = &d[i];
1470 d = mopts.malloc_pool[i];
1471 d->malloc_mt = from_rthreads;
1472 if (d->canary1 == ~d->canary2)
1475 omalloc_poolinit(d, MAP_CONCEAL);
1476 d->malloc_junk = 2;
1477 d->bigcache_size = 0;
1479 d->smallcache[j].max = 0;
1483 omalloc_poolinit(d, 0);
1484 d->malloc_junk = mopts.def_malloc_junk;
1485 d->bigcache_size = mopts.def_maxcache;
1487 d->smallcache[j].max =
1489 sz += d->smallcache[j].max * sizeof(void *);
1491 sz += d->bigcache_size * sizeof(struct bigcache);
1501 d->smallcache[j].pages = p;
1502 p = (char *)p + d->smallcache[j].max *
1505 d->bigcache = p;
1508 d->mutex = i;
1516 d = (p); \
1517 if (d == NULL) { \
1519 d = (p); \
1521 _MALLOC_LOCK(d->mutex); \
1522 d->func = fn; \
1523 if (d->active++) { \
1524 malloc_recurse(d); \
1529 d->active--; \
1530 _MALLOC_UNLOCK(d->mutex); \
1532 wrterror(d, "out of memory"); \
1540 struct dir_info *d;
1544 SET_CALLER(d, caller(d));
1545 r = omalloc(d, size, 0);
1555 struct dir_info *d;
1559 SET_CALLER(d, caller(d));
1560 r = omalloc(d, size, 0);
1731 struct dir_info *d;
1738 d = getpool();
1739 if (d == NULL)
1740 wrterror(d, "free() called before allocation");
1741 _MALLOC_LOCK(d->mutex);
1742 d->func = "free";
1743 if (d->active++) {
1744 malloc_recurse(d);
1747 ofree(&d, ptr, 0, 0, 0);
1748 d->active--;
1749 _MALLOC_UNLOCK(d->mutex);
1764 struct dir_info *d;
1776 d = getpool();
1777 if (d == NULL)
1778 wrterror(d, "freezero() called before allocation");
1779 _MALLOC_LOCK(d->mutex);
1780 d->func = "freezero";
1781 if (d->active++) {
1782 malloc_recurse(d);
1785 ofree(&d, ptr, 1, 1, sz);
1786 d->active--;
1787 _MALLOC_UNLOCK(d->mutex);
1965 struct dir_info *d;
1970 SET_CALLER(d, caller(d));
1971 r = orealloc(&d, ptr, size);
1986 struct dir_info *d;
1991 SET_CALLER(d, caller(d));
1994 d->active--;
1995 _MALLOC_UNLOCK(d->mutex);
1997 wrterror(d, "out of memory");
2003 r = omalloc(d, size, 1);
2012 struct dir_info *d;
2017 SET_CALLER(d, caller(d));
2020 d->active--;
2021 _MALLOC_UNLOCK(d->mutex);
2023 wrterror(d, "out of memory");
2029 r = omalloc(d, size, 1);
2127 size_t d = oldsize - newsize;
2129 if (d < oldsize / 2 && d < MALLOC_PAGESIZE) {
2130 memset((char *)ptr + newsize, 0, d);
2154 struct dir_info *d;
2163 SET_CALLER(d, caller(d));
2167 d->active--;
2168 _MALLOC_UNLOCK(d->mutex);
2170 wrterror(d, "out of memory");
2179 d->active--;
2180 _MALLOC_UNLOCK(d->mutex);
2187 r = orecallocarray(&d, ptr, oldsize, newsize);
2194 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
2199 wrterror(d, "mapalign bad alignment");
2201 wrterror(d, "mapalign round");
2212 p = map(d, sz + alignment, zero_fill);
2218 wrterror(d, "munmap %p", p);
2221 wrterror(d, "munmap %p", q + sz);
2222 STATS_SUB(d->malloc_used, alignment);
2303 struct dir_info *d;
2311 d = getpool();
2312 if (d == NULL) {
2314 d = getpool();
2316 _MALLOC_LOCK(d->mutex);
2317 d->func = "posix_memalign";
2318 if (d->active++) {
2319 malloc_recurse(d);
2322 SET_CALLER(d, caller(d));
2323 r = omemalign(d, alignment, size, 0);
2324 d->active--;
2325 _MALLOC_UNLOCK(d->mutex);
2328 wrterror(d, "out of memory");
2345 struct dir_info *d;
2361 SET_CALLER(d, caller(d));
2362 r = omemalign(d, alignment, size, 0);
2379 store_caller(struct dir_info *d, struct btnode *f)
2383 if (DO_STATS == 0 || d->btnodes == MAP_FAILED)
2386 p = RBT_FIND(btshead, &d->btraces, f);
2389 if (d->btnodes == NULL ||
2390 d->btnodesused >= MALLOC_PAGESIZE / sizeof(struct btnode)) {
2391 d->btnodes = map(d, MALLOC_PAGESIZE, 0);
2392 if (d->btnodes == MAP_FAILED)
2394 d->btnodesused = 0;
2396 p = &d->btnodes[d->btnodesused++];
2398 RBT_INSERT(btshead, &d->btraces, p);
2477 struct malloc_leak d;
2483 return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
2496 dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname,
2516 key.d.f = f;
2529 p->d.f = f;
2530 p->d.total_size = sz * cnt;
2531 p->d.count = cnt;
2534 p->d.total_size += sz * cnt;
2535 p->d.count += cnt;
2560 if (p->d.f == NULL) {
2563 p->d.f, p->d.total_size, p->d.count,
2564 p->d.total_size / p->d.count, buf);
2571 abscaller = ((struct btnode*)p->d.f)->caller[i];
2577 abscaller, p->d.total_size, p->d.count,
2578 p->d.total_size / p->d.count, buf);
2603 ulog("chunk %18p %18p %4zu %d/%d\n",
2621 dump_free_chunk_info(struct dir_info *d, struct leaktree *leaks)
2631 LIST_FOREACH(p, &d->chunk_info_list[i], entries)
2634 p = LIST_FIRST(&d->chunk_dir[i][j]);
2638 ulog("%3d) %3d ", i, count);
2651 dump_free_page_info(struct dir_info *d)
2658 cache = &d->smallcache[i];
2665 ulog("Cached in big cache: %zu/%zu\n", d->bigcache_used,
2666 d->bigcache_size);
2667 for (i = 0; i < d->bigcache_size; i++) {
2668 if (d->bigcache[i].psize != 0)
2669 ulog("%zu: %zu\n", i, d->bigcache[i].psize);
2670 total += d->bigcache[i].psize;
2676 malloc_dump1(int poolno, struct dir_info *d, struct leaktree *leaks)
2681 ulog("Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
2682 ulog("MT=%d J=%d Fl=%#x\n", d->malloc_mt, d->malloc_junk,
2683 d->mmap_flag);
2685 d->regions_free, d->regions_total);
2686 ulog("Inserts %zu/%zu\n", d->inserts, d->insert_collisions);
2687 ulog("Deletes %zu/%zu\n", d->deletes, d->delete_moves);
2689 d->cheap_reallocs, d->cheap_realloc_tries);
2690 ulog("In use %zu\n", d->malloc_used);
2691 ulog("Guarded %zu\n", d->malloc_guarded);
2692 dump_free_chunk_info(d, leaks);
2693 dump_free_page_info(d);
2695 ulog("slot) hash d type page "
2698 for (i = 0; i < d->regions_total; i++) {
2699 if (d->r[i].p != NULL) {
2700 size_t h = hash(d->r[i].p) &
2701 (d->regions_total - 1);
2705 REALSIZE(realsize, &d->r[i]);
2707 putleakinfo(leaks, d->r[i].f, realsize, 1);
2709 ulog("pages %18p %18p %zu\n", d->r[i].p,
2710 d->r[i].f, realsize);
2713 (struct chunk_info *)d->r[i].size,
2714 d->r[i].f, 0);
2767 ulog("M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%#x cache=%u "