10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5*1528Sjwadams * Common Development and Distribution License (the "License").
6*1528Sjwadams * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
22*1528Sjwadams * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
230Sstevel@tonic-gate * Use is subject to license terms.
240Sstevel@tonic-gate */
250Sstevel@tonic-gate
260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI"
270Sstevel@tonic-gate
280Sstevel@tonic-gate /*
290Sstevel@tonic-gate * A generic memory leak detector. The target interface, defined in
300Sstevel@tonic-gate * <leaky_impl.h>, is implemented by the genunix and libumem dmods to fill
310Sstevel@tonic-gate * in the details of operation.
320Sstevel@tonic-gate */
330Sstevel@tonic-gate
340Sstevel@tonic-gate #include <mdb/mdb_modapi.h>
350Sstevel@tonic-gate
360Sstevel@tonic-gate #include "leaky.h"
370Sstevel@tonic-gate #include "leaky_impl.h"
380Sstevel@tonic-gate
390Sstevel@tonic-gate #define LK_BUFCTLHSIZE 127
400Sstevel@tonic-gate
410Sstevel@tonic-gate /*
420Sstevel@tonic-gate * We re-use the low bit of the lkm_addr as the 'marked' bit.
430Sstevel@tonic-gate */
440Sstevel@tonic-gate #define LK_MARKED(b) ((uintptr_t)(b) & 1)
450Sstevel@tonic-gate #define LK_MARK(b) ((b) |= 1)
460Sstevel@tonic-gate #define LK_ADDR(b) ((uintptr_t)(b) & ~1UL)
470Sstevel@tonic-gate
480Sstevel@tonic-gate /*
490Sstevel@tonic-gate * Possible values for lk_state.
500Sstevel@tonic-gate */
510Sstevel@tonic-gate #define LK_CLEAN 0 /* No outstanding mdb_alloc()'s */
520Sstevel@tonic-gate #define LK_SWEEPING 1 /* Potentially some outstanding mdb_alloc()'s */
530Sstevel@tonic-gate #define LK_DONE 2 /* All mdb_alloc()'s complete */
540Sstevel@tonic-gate #define LK_CLEANING 3 /* Currently cleaning prior mdb_alloc()'s */
550Sstevel@tonic-gate
560Sstevel@tonic-gate static volatile int lk_state;
570Sstevel@tonic-gate
580Sstevel@tonic-gate #define LK_STATE_SIZE 10000 /* completely arbitrary */
590Sstevel@tonic-gate
600Sstevel@tonic-gate typedef int leak_ndx_t; /* change if >2 billion buffers are needed */
610Sstevel@tonic-gate
620Sstevel@tonic-gate typedef struct leak_state {
630Sstevel@tonic-gate struct leak_state *lks_next;
640Sstevel@tonic-gate leak_ndx_t lks_stack[LK_STATE_SIZE];
650Sstevel@tonic-gate } leak_state_t;
660Sstevel@tonic-gate
670Sstevel@tonic-gate typedef struct leak_beans {
680Sstevel@tonic-gate int lkb_dups;
690Sstevel@tonic-gate int lkb_follows;
700Sstevel@tonic-gate int lkb_misses;
710Sstevel@tonic-gate int lkb_dismissals;
720Sstevel@tonic-gate int lkb_pushes;
730Sstevel@tonic-gate int lkb_deepest;
740Sstevel@tonic-gate } leak_beans_t;
750Sstevel@tonic-gate
760Sstevel@tonic-gate typedef struct leak_type {
770Sstevel@tonic-gate int lt_type;
780Sstevel@tonic-gate size_t lt_leaks;
790Sstevel@tonic-gate leak_bufctl_t **lt_sorted;
800Sstevel@tonic-gate } leak_type_t;
810Sstevel@tonic-gate
820Sstevel@tonic-gate typedef struct leak_walk {
830Sstevel@tonic-gate int lkw_ndx;
840Sstevel@tonic-gate leak_bufctl_t *lkw_current;
850Sstevel@tonic-gate leak_bufctl_t *lkw_hash_next;
860Sstevel@tonic-gate } leak_walk_t;
870Sstevel@tonic-gate
880Sstevel@tonic-gate #define LK_SCAN_BUFFER_SIZE 16384
890Sstevel@tonic-gate static uintptr_t *lk_scan_buffer;
900Sstevel@tonic-gate
910Sstevel@tonic-gate static leak_mtab_t *lk_mtab;
920Sstevel@tonic-gate static leak_state_t *lk_free_state;
930Sstevel@tonic-gate static leak_ndx_t lk_nbuffers;
940Sstevel@tonic-gate static leak_beans_t lk_beans;
950Sstevel@tonic-gate static leak_bufctl_t *lk_bufctl[LK_BUFCTLHSIZE];
960Sstevel@tonic-gate static leak_type_t lk_types[LK_NUM_TYPES];
970Sstevel@tonic-gate static size_t lk_memusage;
980Sstevel@tonic-gate #ifndef _KMDB
990Sstevel@tonic-gate static hrtime_t lk_begin;
1000Sstevel@tonic-gate static hrtime_t lk_vbegin;
1010Sstevel@tonic-gate #endif
1020Sstevel@tonic-gate static uint_t lk_verbose = FALSE;
1030Sstevel@tonic-gate
1040Sstevel@tonic-gate static void
leaky_verbose(char * str,uint64_t stat)1050Sstevel@tonic-gate leaky_verbose(char *str, uint64_t stat)
1060Sstevel@tonic-gate {
1070Sstevel@tonic-gate if (lk_verbose == FALSE)
1080Sstevel@tonic-gate return;
1090Sstevel@tonic-gate
1100Sstevel@tonic-gate mdb_printf("findleaks: ");
1110Sstevel@tonic-gate
1120Sstevel@tonic-gate if (str == NULL) {
1130Sstevel@tonic-gate mdb_printf("\n");
1140Sstevel@tonic-gate return;
1150Sstevel@tonic-gate }
1160Sstevel@tonic-gate
1170Sstevel@tonic-gate mdb_printf("%*s => %lld\n", 30, str, stat);
1180Sstevel@tonic-gate }
1190Sstevel@tonic-gate
1200Sstevel@tonic-gate static void
leaky_verbose_perc(char * str,uint64_t stat,uint64_t total)1210Sstevel@tonic-gate leaky_verbose_perc(char *str, uint64_t stat, uint64_t total)
1220Sstevel@tonic-gate {
1230Sstevel@tonic-gate uint_t perc = (stat * 100) / total;
1240Sstevel@tonic-gate uint_t tenths = ((stat * 1000) / total) % 10;
1250Sstevel@tonic-gate
1260Sstevel@tonic-gate if (lk_verbose == FALSE)
1270Sstevel@tonic-gate return;
1280Sstevel@tonic-gate
1290Sstevel@tonic-gate mdb_printf("findleaks: %*s => %-13lld (%2d.%1d%%)\n",
1300Sstevel@tonic-gate 30, str, stat, perc, tenths);
1310Sstevel@tonic-gate }
1320Sstevel@tonic-gate
1330Sstevel@tonic-gate static void
leaky_verbose_begin(void)1340Sstevel@tonic-gate leaky_verbose_begin(void)
1350Sstevel@tonic-gate {
1360Sstevel@tonic-gate /* kmdb can't tell time */
1370Sstevel@tonic-gate #ifndef _KMDB
1380Sstevel@tonic-gate extern hrtime_t gethrvtime(void);
1390Sstevel@tonic-gate lk_begin = gethrtime();
1400Sstevel@tonic-gate lk_vbegin = gethrvtime();
1410Sstevel@tonic-gate #endif
1420Sstevel@tonic-gate lk_memusage = 0;
1430Sstevel@tonic-gate }
1440Sstevel@tonic-gate
1450Sstevel@tonic-gate static void
leaky_verbose_end(void)1460Sstevel@tonic-gate leaky_verbose_end(void)
1470Sstevel@tonic-gate {
1480Sstevel@tonic-gate /* kmdb can't tell time */
1490Sstevel@tonic-gate #ifndef _KMDB
1500Sstevel@tonic-gate extern hrtime_t gethrvtime(void);
1510Sstevel@tonic-gate
1520Sstevel@tonic-gate hrtime_t ts = gethrtime() - lk_begin;
1530Sstevel@tonic-gate hrtime_t sec = ts / (hrtime_t)NANOSEC;
1540Sstevel@tonic-gate hrtime_t nsec = ts % (hrtime_t)NANOSEC;
1550Sstevel@tonic-gate
1560Sstevel@tonic-gate hrtime_t vts = gethrvtime() - lk_vbegin;
1570Sstevel@tonic-gate hrtime_t vsec = vts / (hrtime_t)NANOSEC;
1580Sstevel@tonic-gate hrtime_t vnsec = vts % (hrtime_t)NANOSEC;
1590Sstevel@tonic-gate #endif
1600Sstevel@tonic-gate
1610Sstevel@tonic-gate if (lk_verbose == FALSE)
1620Sstevel@tonic-gate return;
1630Sstevel@tonic-gate
1640Sstevel@tonic-gate mdb_printf("findleaks: %*s => %lu kB\n",
1650Sstevel@tonic-gate 30, "peak memory usage", (lk_memusage + 1023)/1024);
1660Sstevel@tonic-gate #ifndef _KMDB
1670Sstevel@tonic-gate mdb_printf("findleaks: %*s => %lld.%lld seconds\n",
1680Sstevel@tonic-gate 30, "elapsed CPU time", vsec, (vnsec * 10)/(hrtime_t)NANOSEC);
1690Sstevel@tonic-gate mdb_printf("findleaks: %*s => %lld.%lld seconds\n",
1700Sstevel@tonic-gate 30, "elapsed wall time", sec, (nsec * 10)/(hrtime_t)NANOSEC);
1710Sstevel@tonic-gate #endif
1720Sstevel@tonic-gate leaky_verbose(NULL, 0);
1730Sstevel@tonic-gate }
1740Sstevel@tonic-gate
1750Sstevel@tonic-gate static void *
leaky_alloc(size_t sz,uint_t flags)1760Sstevel@tonic-gate leaky_alloc(size_t sz, uint_t flags)
1770Sstevel@tonic-gate {
1780Sstevel@tonic-gate void *buf = mdb_alloc(sz, flags);
1790Sstevel@tonic-gate
1800Sstevel@tonic-gate if (buf != NULL)
1810Sstevel@tonic-gate lk_memusage += sz;
1820Sstevel@tonic-gate
1830Sstevel@tonic-gate return (buf);
1840Sstevel@tonic-gate }
1850Sstevel@tonic-gate
1860Sstevel@tonic-gate static void *
leaky_zalloc(size_t sz,uint_t flags)1870Sstevel@tonic-gate leaky_zalloc(size_t sz, uint_t flags)
1880Sstevel@tonic-gate {
1890Sstevel@tonic-gate void *buf = mdb_zalloc(sz, flags);
1900Sstevel@tonic-gate
1910Sstevel@tonic-gate if (buf != NULL)
1920Sstevel@tonic-gate lk_memusage += sz;
1930Sstevel@tonic-gate
1940Sstevel@tonic-gate return (buf);
1950Sstevel@tonic-gate }
1960Sstevel@tonic-gate
1970Sstevel@tonic-gate static int
leaky_mtabcmp(const void * l,const void * r)1980Sstevel@tonic-gate leaky_mtabcmp(const void *l, const void *r)
1990Sstevel@tonic-gate {
2000Sstevel@tonic-gate const leak_mtab_t *lhs = (const leak_mtab_t *)l;
2010Sstevel@tonic-gate const leak_mtab_t *rhs = (const leak_mtab_t *)r;
2020Sstevel@tonic-gate
2030Sstevel@tonic-gate if (lhs->lkm_base < rhs->lkm_base)
2040Sstevel@tonic-gate return (-1);
2050Sstevel@tonic-gate if (lhs->lkm_base > rhs->lkm_base)
2060Sstevel@tonic-gate return (1);
2070Sstevel@tonic-gate
2080Sstevel@tonic-gate return (0);
2090Sstevel@tonic-gate }
2100Sstevel@tonic-gate
2110Sstevel@tonic-gate static leak_ndx_t
leaky_search(uintptr_t addr)2120Sstevel@tonic-gate leaky_search(uintptr_t addr)
2130Sstevel@tonic-gate {
2140Sstevel@tonic-gate leak_ndx_t left = 0, right = lk_nbuffers - 1, guess;
2150Sstevel@tonic-gate
2160Sstevel@tonic-gate while (right >= left) {
2170Sstevel@tonic-gate guess = (right + left) >> 1;
2180Sstevel@tonic-gate
2190Sstevel@tonic-gate if (addr < LK_ADDR(lk_mtab[guess].lkm_base)) {
2200Sstevel@tonic-gate right = guess - 1;
2210Sstevel@tonic-gate continue;
2220Sstevel@tonic-gate }
2230Sstevel@tonic-gate
2240Sstevel@tonic-gate if (addr >= lk_mtab[guess].lkm_limit) {
2250Sstevel@tonic-gate left = guess + 1;
2260Sstevel@tonic-gate continue;
2270Sstevel@tonic-gate }
2280Sstevel@tonic-gate
2290Sstevel@tonic-gate return (guess);
2300Sstevel@tonic-gate }
2310Sstevel@tonic-gate
2320Sstevel@tonic-gate return (-1);
2330Sstevel@tonic-gate }
2340Sstevel@tonic-gate
2350Sstevel@tonic-gate void
leaky_grep(uintptr_t addr,size_t size)2360Sstevel@tonic-gate leaky_grep(uintptr_t addr, size_t size)
2370Sstevel@tonic-gate {
2380Sstevel@tonic-gate uintptr_t *buf, *cur, *end;
2390Sstevel@tonic-gate size_t bytes, newsz, nptrs;
2400Sstevel@tonic-gate leak_state_t *state = NULL, *new_state;
2410Sstevel@tonic-gate uint_t state_idx;
2420Sstevel@tonic-gate uintptr_t min = LK_ADDR(lk_mtab[0].lkm_base);
2430Sstevel@tonic-gate uintptr_t max = lk_mtab[lk_nbuffers - 1].lkm_limit;
2440Sstevel@tonic-gate int dups = 0, misses = 0, depth = 0, deepest = 0;
2450Sstevel@tonic-gate int follows = 0, dismissals = 0, pushes = 0;
2460Sstevel@tonic-gate leak_ndx_t mtab_ndx;
2470Sstevel@tonic-gate leak_mtab_t *lmp;
2480Sstevel@tonic-gate uintptr_t nbase;
2490Sstevel@tonic-gate uintptr_t base;
2500Sstevel@tonic-gate size_t base_size;
2510Sstevel@tonic-gate const uintptr_t mask = sizeof (uintptr_t) - 1;
2520Sstevel@tonic-gate
2530Sstevel@tonic-gate if (addr == NULL || size == 0)
2540Sstevel@tonic-gate return;
2550Sstevel@tonic-gate
2560Sstevel@tonic-gate state_idx = 0;
2570Sstevel@tonic-gate
2580Sstevel@tonic-gate /*
2590Sstevel@tonic-gate * Our main loop, led by the 'pop' label:
2600Sstevel@tonic-gate * 1) read in a buffer piece by piece,
2610Sstevel@tonic-gate * 2) mark all unmarked mtab entries reachable from it, and
2620Sstevel@tonic-gate * either scan them in-line or push them onto our stack of
2630Sstevel@tonic-gate * unfinished work.
2640Sstevel@tonic-gate * 3) pop the top mtab entry off the stack, and loop.
2650Sstevel@tonic-gate */
2660Sstevel@tonic-gate pop:
2670Sstevel@tonic-gate base = addr;
2680Sstevel@tonic-gate base_size = size;
2690Sstevel@tonic-gate
2700Sstevel@tonic-gate /*
2710Sstevel@tonic-gate * If our address isn't pointer-aligned, we need to align it and
2720Sstevel@tonic-gate * whack the size appropriately.
2730Sstevel@tonic-gate */
2740Sstevel@tonic-gate if (size < mask) {
2750Sstevel@tonic-gate size = 0;
2760Sstevel@tonic-gate } else if (addr & mask) {
2770Sstevel@tonic-gate size -= (mask + 1) - (addr & mask);
2780Sstevel@tonic-gate addr += (mask + 1) - (addr & mask);
2790Sstevel@tonic-gate }
2800Sstevel@tonic-gate size -= (size & mask);
2810Sstevel@tonic-gate
2820Sstevel@tonic-gate while (size > 0) {
2830Sstevel@tonic-gate buf = lk_scan_buffer;
2840Sstevel@tonic-gate end = &buf[LK_SCAN_BUFFER_SIZE / sizeof (uintptr_t)];
2850Sstevel@tonic-gate
2860Sstevel@tonic-gate bytes = MIN(size, LK_SCAN_BUFFER_SIZE);
2870Sstevel@tonic-gate cur = end - (bytes / sizeof (uintptr_t));
2880Sstevel@tonic-gate
2890Sstevel@tonic-gate if (mdb_vread(cur, bytes, addr) == -1) {
2900Sstevel@tonic-gate mdb_warn("[%p, %p): couldn't read %ld bytes at %p",
2910Sstevel@tonic-gate base, base + base_size, bytes, addr);
2920Sstevel@tonic-gate break;
2930Sstevel@tonic-gate }
2940Sstevel@tonic-gate
2950Sstevel@tonic-gate addr += bytes;
2960Sstevel@tonic-gate size -= bytes;
2970Sstevel@tonic-gate
2980Sstevel@tonic-gate /*
2990Sstevel@tonic-gate * The buffer looks like: ('+'s are unscanned data)
3000Sstevel@tonic-gate *
3010Sstevel@tonic-gate * -----------------------------++++++++++++++++
3020Sstevel@tonic-gate * | | |
3030Sstevel@tonic-gate * buf cur end
3040Sstevel@tonic-gate *
3050Sstevel@tonic-gate * cur scans forward. When we encounter a new buffer, and
3060Sstevel@tonic-gate * it will fit behind "cur", we read it in and back up cur,
3070Sstevel@tonic-gate * processing it immediately.
3080Sstevel@tonic-gate */
3090Sstevel@tonic-gate while (cur < end) {
3100Sstevel@tonic-gate uintptr_t ptr = *cur++;
3110Sstevel@tonic-gate
3120Sstevel@tonic-gate if (ptr < min || ptr > max) {
3130Sstevel@tonic-gate dismissals++;
3140Sstevel@tonic-gate continue;
3150Sstevel@tonic-gate }
3160Sstevel@tonic-gate
3170Sstevel@tonic-gate if ((mtab_ndx = leaky_search(ptr)) == -1) {
3180Sstevel@tonic-gate misses++;
3190Sstevel@tonic-gate continue;
3200Sstevel@tonic-gate }
3210Sstevel@tonic-gate
3220Sstevel@tonic-gate lmp = &lk_mtab[mtab_ndx];
3230Sstevel@tonic-gate if (LK_MARKED(lmp->lkm_base)) {
3240Sstevel@tonic-gate dups++; /* already seen */
3250Sstevel@tonic-gate continue;
3260Sstevel@tonic-gate }
3270Sstevel@tonic-gate
3280Sstevel@tonic-gate /*
3290Sstevel@tonic-gate * Found an unmarked buffer. Mark it, then either
3300Sstevel@tonic-gate * read it in, or add it to the stack of pending work.
3310Sstevel@tonic-gate */
3320Sstevel@tonic-gate follows++;
3330Sstevel@tonic-gate LK_MARK(lmp->lkm_base);
3340Sstevel@tonic-gate
3350Sstevel@tonic-gate nbase = LK_ADDR(lmp->lkm_base);
3360Sstevel@tonic-gate newsz = lmp->lkm_limit - nbase;
3370Sstevel@tonic-gate
3380Sstevel@tonic-gate nptrs = newsz / sizeof (uintptr_t);
3390Sstevel@tonic-gate newsz = nptrs * sizeof (uintptr_t);
3400Sstevel@tonic-gate
3410Sstevel@tonic-gate if ((nbase & mask) == 0 && nptrs <= (cur - buf) &&
3420Sstevel@tonic-gate mdb_vread(cur - nptrs, newsz, nbase) != -1) {
3430Sstevel@tonic-gate cur -= nptrs;
3440Sstevel@tonic-gate continue;
3450Sstevel@tonic-gate }
3460Sstevel@tonic-gate
3470Sstevel@tonic-gate /*
3480Sstevel@tonic-gate * couldn't process it in-place -- add it to the
3490Sstevel@tonic-gate * stack.
3500Sstevel@tonic-gate */
3510Sstevel@tonic-gate if (state == NULL || state_idx == LK_STATE_SIZE) {
3520Sstevel@tonic-gate if ((new_state = lk_free_state) != NULL)
3530Sstevel@tonic-gate lk_free_state = new_state->lks_next;
3540Sstevel@tonic-gate else
3550Sstevel@tonic-gate new_state = leaky_zalloc(
3560Sstevel@tonic-gate sizeof (*state), UM_SLEEP | UM_GC);
3570Sstevel@tonic-gate
3580Sstevel@tonic-gate new_state->lks_next = state;
3590Sstevel@tonic-gate state = new_state;
3600Sstevel@tonic-gate state_idx = 0;
3610Sstevel@tonic-gate }
3620Sstevel@tonic-gate
3630Sstevel@tonic-gate pushes++;
3640Sstevel@tonic-gate state->lks_stack[state_idx++] = mtab_ndx;
3650Sstevel@tonic-gate if (++depth > deepest)
3660Sstevel@tonic-gate deepest = depth;
3670Sstevel@tonic-gate }
3680Sstevel@tonic-gate }
3690Sstevel@tonic-gate
3700Sstevel@tonic-gate /*
3710Sstevel@tonic-gate * Retrieve the next mtab index, extract its info, and loop around
3720Sstevel@tonic-gate * to process it.
3730Sstevel@tonic-gate */
3740Sstevel@tonic-gate if (state_idx == 0 && state != NULL) {
3750Sstevel@tonic-gate new_state = state->lks_next;
3760Sstevel@tonic-gate
3770Sstevel@tonic-gate state->lks_next = lk_free_state;
3780Sstevel@tonic-gate lk_free_state = state;
3790Sstevel@tonic-gate
3800Sstevel@tonic-gate state = new_state;
3810Sstevel@tonic-gate state_idx = LK_STATE_SIZE;
3820Sstevel@tonic-gate }
3830Sstevel@tonic-gate
3840Sstevel@tonic-gate if (depth > 0) {
3850Sstevel@tonic-gate mtab_ndx = state->lks_stack[--state_idx];
3860Sstevel@tonic-gate
3870Sstevel@tonic-gate addr = LK_ADDR(lk_mtab[mtab_ndx].lkm_base);
3880Sstevel@tonic-gate size = lk_mtab[mtab_ndx].lkm_limit - addr;
3890Sstevel@tonic-gate depth--;
3900Sstevel@tonic-gate
3910Sstevel@tonic-gate goto pop;
3920Sstevel@tonic-gate }
3930Sstevel@tonic-gate
3940Sstevel@tonic-gate /*
3950Sstevel@tonic-gate * update the beans
3960Sstevel@tonic-gate */
3970Sstevel@tonic-gate lk_beans.lkb_dups += dups;
3980Sstevel@tonic-gate lk_beans.lkb_dismissals += dismissals;
3990Sstevel@tonic-gate lk_beans.lkb_misses += misses;
4000Sstevel@tonic-gate lk_beans.lkb_follows += follows;
4010Sstevel@tonic-gate lk_beans.lkb_pushes += pushes;
4020Sstevel@tonic-gate
4030Sstevel@tonic-gate if (deepest > lk_beans.lkb_deepest)
4040Sstevel@tonic-gate lk_beans.lkb_deepest = deepest;
4050Sstevel@tonic-gate }
4060Sstevel@tonic-gate
4070Sstevel@tonic-gate static void
leaky_do_grep_ptr(uintptr_t loc,int process)4080Sstevel@tonic-gate leaky_do_grep_ptr(uintptr_t loc, int process)
4090Sstevel@tonic-gate {
4100Sstevel@tonic-gate leak_ndx_t ndx;
4110Sstevel@tonic-gate leak_mtab_t *lkmp;
4120Sstevel@tonic-gate size_t sz;
4130Sstevel@tonic-gate
4140Sstevel@tonic-gate if (loc < LK_ADDR(lk_mtab[0].lkm_base) ||
4150Sstevel@tonic-gate loc > lk_mtab[lk_nbuffers - 1].lkm_limit) {
4160Sstevel@tonic-gate lk_beans.lkb_dismissals++;
4170Sstevel@tonic-gate return;
4180Sstevel@tonic-gate }
4190Sstevel@tonic-gate if ((ndx = leaky_search(loc)) == -1) {
4200Sstevel@tonic-gate lk_beans.lkb_misses++;
4210Sstevel@tonic-gate return;
4220Sstevel@tonic-gate }
4230Sstevel@tonic-gate
4240Sstevel@tonic-gate lkmp = &lk_mtab[ndx];
4250Sstevel@tonic-gate sz = lkmp->lkm_limit - lkmp->lkm_base;
4260Sstevel@tonic-gate
4270Sstevel@tonic-gate if (LK_MARKED(lkmp->lkm_base)) {
4280Sstevel@tonic-gate lk_beans.lkb_dups++;
4290Sstevel@tonic-gate } else {
4300Sstevel@tonic-gate LK_MARK(lkmp->lkm_base);
4310Sstevel@tonic-gate lk_beans.lkb_follows++;
4320Sstevel@tonic-gate if (process)
4330Sstevel@tonic-gate leaky_grep(lkmp->lkm_base, sz);
4340Sstevel@tonic-gate }
4350Sstevel@tonic-gate }
4360Sstevel@tonic-gate
4370Sstevel@tonic-gate void
leaky_grep_ptr(uintptr_t loc)4380Sstevel@tonic-gate leaky_grep_ptr(uintptr_t loc)
4390Sstevel@tonic-gate {
4400Sstevel@tonic-gate leaky_do_grep_ptr(loc, 1);
4410Sstevel@tonic-gate }
4420Sstevel@tonic-gate
4430Sstevel@tonic-gate void
leaky_mark_ptr(uintptr_t loc)4440Sstevel@tonic-gate leaky_mark_ptr(uintptr_t loc)
4450Sstevel@tonic-gate {
4460Sstevel@tonic-gate leaky_do_grep_ptr(loc, 0);
4470Sstevel@tonic-gate }
4480Sstevel@tonic-gate
4490Sstevel@tonic-gate /*
4500Sstevel@tonic-gate * This may be used to manually process a marked buffer.
4510Sstevel@tonic-gate */
4520Sstevel@tonic-gate int
leaky_lookup_marked(uintptr_t loc,uintptr_t * addr_out,size_t * size_out)4530Sstevel@tonic-gate leaky_lookup_marked(uintptr_t loc, uintptr_t *addr_out, size_t *size_out)
4540Sstevel@tonic-gate {
4550Sstevel@tonic-gate leak_ndx_t ndx;
4560Sstevel@tonic-gate leak_mtab_t *lkmp;
4570Sstevel@tonic-gate
4580Sstevel@tonic-gate if ((ndx = leaky_search(loc)) == -1)
4590Sstevel@tonic-gate return (0);
4600Sstevel@tonic-gate
4610Sstevel@tonic-gate lkmp = &lk_mtab[ndx];
4620Sstevel@tonic-gate *addr_out = LK_ADDR(lkmp->lkm_base);
4630Sstevel@tonic-gate *size_out = lkmp->lkm_limit - LK_ADDR(lkmp->lkm_base);
4640Sstevel@tonic-gate return (1);
4650Sstevel@tonic-gate }
4660Sstevel@tonic-gate
4670Sstevel@tonic-gate void
leaky_add_leak(int type,uintptr_t addr,uintptr_t bufaddr,hrtime_t timestamp,leak_pc_t * stack,uint_t depth,uintptr_t cid,uintptr_t data)4680Sstevel@tonic-gate leaky_add_leak(int type, uintptr_t addr, uintptr_t bufaddr, hrtime_t timestamp,
4690Sstevel@tonic-gate leak_pc_t *stack, uint_t depth, uintptr_t cid, uintptr_t data)
4700Sstevel@tonic-gate {
4710Sstevel@tonic-gate leak_bufctl_t *nlkb, *lkb;
4720Sstevel@tonic-gate uintptr_t total = 0;
4730Sstevel@tonic-gate size_t ndx;
4740Sstevel@tonic-gate int i;
4750Sstevel@tonic-gate
4760Sstevel@tonic-gate if (type < 0 || type >= LK_NUM_TYPES || depth != (uint8_t)depth) {
4770Sstevel@tonic-gate mdb_warn("invalid arguments to leaky_add_leak()\n");
4780Sstevel@tonic-gate return;
4790Sstevel@tonic-gate }
4800Sstevel@tonic-gate
4810Sstevel@tonic-gate nlkb = leaky_zalloc(LEAK_BUFCTL_SIZE(depth), UM_SLEEP);
4820Sstevel@tonic-gate nlkb->lkb_type = type;
4830Sstevel@tonic-gate nlkb->lkb_addr = addr;
4840Sstevel@tonic-gate nlkb->lkb_bufaddr = bufaddr;
4850Sstevel@tonic-gate nlkb->lkb_cid = cid;
4860Sstevel@tonic-gate nlkb->lkb_data = data;
4870Sstevel@tonic-gate nlkb->lkb_depth = depth;
4880Sstevel@tonic-gate nlkb->lkb_timestamp = timestamp;
4890Sstevel@tonic-gate
4900Sstevel@tonic-gate total = type;
4910Sstevel@tonic-gate for (i = 0; i < depth; i++) {
4920Sstevel@tonic-gate total += stack[i];
4930Sstevel@tonic-gate nlkb->lkb_stack[i] = stack[i];
4940Sstevel@tonic-gate }
4950Sstevel@tonic-gate
4960Sstevel@tonic-gate ndx = total % LK_BUFCTLHSIZE;
4970Sstevel@tonic-gate
4980Sstevel@tonic-gate if ((lkb = lk_bufctl[ndx]) == NULL) {
4990Sstevel@tonic-gate lk_types[type].lt_leaks++;
5000Sstevel@tonic-gate lk_bufctl[ndx] = nlkb;
5010Sstevel@tonic-gate return;
5020Sstevel@tonic-gate }
5030Sstevel@tonic-gate
5040Sstevel@tonic-gate for (;;) {
5050Sstevel@tonic-gate if (lkb->lkb_type != type || lkb->lkb_depth != depth ||
5060Sstevel@tonic-gate lkb->lkb_cid != cid)
5070Sstevel@tonic-gate goto no_match;
5080Sstevel@tonic-gate
5090Sstevel@tonic-gate for (i = 0; i < depth; i++)
5100Sstevel@tonic-gate if (lkb->lkb_stack[i] != stack[i])
5110Sstevel@tonic-gate goto no_match;
5120Sstevel@tonic-gate
5130Sstevel@tonic-gate /*
5140Sstevel@tonic-gate * If we're here, we've found a matching stack; link it in.
5150Sstevel@tonic-gate * Note that the volatile cast assures that these stores
5160Sstevel@tonic-gate * will occur in program order (thus assuring that we can
5170Sstevel@tonic-gate * take an interrupt and still be in a sane enough state to
5180Sstevel@tonic-gate * throw away the data structure later, in leaky_cleanup()).
5190Sstevel@tonic-gate */
5200Sstevel@tonic-gate ((volatile leak_bufctl_t *)nlkb)->lkb_next = lkb->lkb_next;
5210Sstevel@tonic-gate ((volatile leak_bufctl_t *)lkb)->lkb_next = nlkb;
5220Sstevel@tonic-gate lkb->lkb_dups++;
5230Sstevel@tonic-gate
5240Sstevel@tonic-gate /*
5250Sstevel@tonic-gate * If we're older, swap places so that we are the
5260Sstevel@tonic-gate * representative leak.
5270Sstevel@tonic-gate */
5280Sstevel@tonic-gate if (timestamp < lkb->lkb_timestamp) {
5290Sstevel@tonic-gate nlkb->lkb_addr = lkb->lkb_addr;
5300Sstevel@tonic-gate nlkb->lkb_bufaddr = lkb->lkb_bufaddr;
5310Sstevel@tonic-gate nlkb->lkb_data = lkb->lkb_data;
5320Sstevel@tonic-gate nlkb->lkb_timestamp = lkb->lkb_timestamp;
5330Sstevel@tonic-gate
5340Sstevel@tonic-gate lkb->lkb_addr = addr;
5350Sstevel@tonic-gate lkb->lkb_bufaddr = bufaddr;
5360Sstevel@tonic-gate lkb->lkb_data = data;
5370Sstevel@tonic-gate lkb->lkb_timestamp = timestamp;
5380Sstevel@tonic-gate }
5390Sstevel@tonic-gate break;
5400Sstevel@tonic-gate
5410Sstevel@tonic-gate no_match:
5420Sstevel@tonic-gate if (lkb->lkb_hash_next == NULL) {
5430Sstevel@tonic-gate lkb->lkb_hash_next = nlkb;
5440Sstevel@tonic-gate lk_types[type].lt_leaks++;
5450Sstevel@tonic-gate break;
5460Sstevel@tonic-gate }
5470Sstevel@tonic-gate lkb = lkb->lkb_hash_next;
5480Sstevel@tonic-gate }
5490Sstevel@tonic-gate }
5500Sstevel@tonic-gate
5510Sstevel@tonic-gate int
leaky_ctlcmp(const void * l,const void * r)5520Sstevel@tonic-gate leaky_ctlcmp(const void *l, const void *r)
5530Sstevel@tonic-gate {
5540Sstevel@tonic-gate const leak_bufctl_t *lhs = *((const leak_bufctl_t **)l);
5550Sstevel@tonic-gate const leak_bufctl_t *rhs = *((const leak_bufctl_t **)r);
5560Sstevel@tonic-gate
5570Sstevel@tonic-gate return (leaky_subr_bufctl_cmp(lhs, rhs));
5580Sstevel@tonic-gate }
5590Sstevel@tonic-gate
5600Sstevel@tonic-gate void
leaky_sort(void)5610Sstevel@tonic-gate leaky_sort(void)
5620Sstevel@tonic-gate {
5630Sstevel@tonic-gate int type, i, j;
5640Sstevel@tonic-gate leak_bufctl_t *lkb;
5650Sstevel@tonic-gate leak_type_t *ltp;
5660Sstevel@tonic-gate
5670Sstevel@tonic-gate for (type = 0; type < LK_NUM_TYPES; type++) {
5680Sstevel@tonic-gate ltp = &lk_types[type];
5690Sstevel@tonic-gate
5700Sstevel@tonic-gate if (ltp->lt_leaks == 0)
5710Sstevel@tonic-gate continue;
5720Sstevel@tonic-gate
5730Sstevel@tonic-gate ltp->lt_sorted = leaky_alloc(ltp->lt_leaks *
5740Sstevel@tonic-gate sizeof (leak_bufctl_t *), UM_SLEEP);
5750Sstevel@tonic-gate
5760Sstevel@tonic-gate j = 0;
5770Sstevel@tonic-gate for (i = 0; i < LK_BUFCTLHSIZE; i++) {
5780Sstevel@tonic-gate for (lkb = lk_bufctl[i]; lkb != NULL;
5790Sstevel@tonic-gate lkb = lkb->lkb_hash_next) {
5800Sstevel@tonic-gate if (lkb->lkb_type == type)
5810Sstevel@tonic-gate ltp->lt_sorted[j++] = lkb;
5820Sstevel@tonic-gate }
5830Sstevel@tonic-gate }
5840Sstevel@tonic-gate if (j != ltp->lt_leaks)
5850Sstevel@tonic-gate mdb_warn("expected %d leaks, got %d\n", ltp->lt_leaks,
5860Sstevel@tonic-gate j);
5870Sstevel@tonic-gate
5880Sstevel@tonic-gate qsort(ltp->lt_sorted, ltp->lt_leaks, sizeof (leak_bufctl_t *),
5890Sstevel@tonic-gate leaky_ctlcmp);
5900Sstevel@tonic-gate }
5910Sstevel@tonic-gate }
5920Sstevel@tonic-gate
5930Sstevel@tonic-gate void
leaky_cleanup(int force)5940Sstevel@tonic-gate leaky_cleanup(int force)
5950Sstevel@tonic-gate {
5960Sstevel@tonic-gate int i;
5970Sstevel@tonic-gate leak_bufctl_t *lkb, *l, *next;
5980Sstevel@tonic-gate
5990Sstevel@tonic-gate /*
6000Sstevel@tonic-gate * State structures are allocated UM_GC, so we just need to nuke
6010Sstevel@tonic-gate * the freelist pointer.
6020Sstevel@tonic-gate */
6030Sstevel@tonic-gate lk_free_state = NULL;
6040Sstevel@tonic-gate
605*1528Sjwadams switch (lk_state) {
606*1528Sjwadams case LK_CLEAN:
607*1528Sjwadams return; /* nothing to do */
608*1528Sjwadams
609*1528Sjwadams case LK_CLEANING:
6100Sstevel@tonic-gate mdb_warn("interrupted during ::findleaks cleanup; some mdb "
6110Sstevel@tonic-gate "memory will be leaked\n");
6120Sstevel@tonic-gate
6130Sstevel@tonic-gate for (i = 0; i < LK_BUFCTLHSIZE; i++)
6140Sstevel@tonic-gate lk_bufctl[i] = NULL;
6150Sstevel@tonic-gate
6160Sstevel@tonic-gate for (i = 0; i < LK_NUM_TYPES; i++) {
6170Sstevel@tonic-gate lk_types[i].lt_leaks = 0;
6180Sstevel@tonic-gate lk_types[i].lt_sorted = NULL;
6190Sstevel@tonic-gate }
6200Sstevel@tonic-gate
6210Sstevel@tonic-gate bzero(&lk_beans, sizeof (lk_beans));
6220Sstevel@tonic-gate lk_state = LK_CLEAN;
6230Sstevel@tonic-gate return;
624*1528Sjwadams
625*1528Sjwadams case LK_SWEEPING:
626*1528Sjwadams break; /* must clean up */
6270Sstevel@tonic-gate
628*1528Sjwadams case LK_DONE:
629*1528Sjwadams default:
630*1528Sjwadams if (!force)
631*1528Sjwadams return;
632*1528Sjwadams break; /* only clean up if forced */
633*1528Sjwadams }
6340Sstevel@tonic-gate
6350Sstevel@tonic-gate lk_state = LK_CLEANING;
6360Sstevel@tonic-gate
6370Sstevel@tonic-gate for (i = 0; i < LK_NUM_TYPES; i++) {
6380Sstevel@tonic-gate if (lk_types[i].lt_sorted != NULL) {
6390Sstevel@tonic-gate mdb_free(lk_types[i].lt_sorted,
6400Sstevel@tonic-gate lk_types[i].lt_leaks * sizeof (leak_bufctl_t *));
6410Sstevel@tonic-gate lk_types[i].lt_sorted = NULL;
6420Sstevel@tonic-gate }
6430Sstevel@tonic-gate lk_types[i].lt_leaks = 0;
6440Sstevel@tonic-gate }
6450Sstevel@tonic-gate
6460Sstevel@tonic-gate for (i = 0; i < LK_BUFCTLHSIZE; i++) {
6470Sstevel@tonic-gate for (lkb = lk_bufctl[i]; lkb != NULL; lkb = next) {
6480Sstevel@tonic-gate for (l = lkb->lkb_next; l != NULL; l = next) {
6490Sstevel@tonic-gate next = l->lkb_next;
6500Sstevel@tonic-gate mdb_free(l, LEAK_BUFCTL_SIZE(l->lkb_depth));
6510Sstevel@tonic-gate }
6520Sstevel@tonic-gate next = lkb->lkb_hash_next;
6530Sstevel@tonic-gate mdb_free(lkb, LEAK_BUFCTL_SIZE(lkb->lkb_depth));
6540Sstevel@tonic-gate }
6550Sstevel@tonic-gate lk_bufctl[i] = NULL;
6560Sstevel@tonic-gate }
6570Sstevel@tonic-gate
6580Sstevel@tonic-gate bzero(&lk_beans, sizeof (lk_beans));
6590Sstevel@tonic-gate lk_state = LK_CLEAN;
6600Sstevel@tonic-gate }
6610Sstevel@tonic-gate
6620Sstevel@tonic-gate int
leaky_filter(const leak_pc_t * stack,int depth,uintptr_t filter)6630Sstevel@tonic-gate leaky_filter(const leak_pc_t *stack, int depth, uintptr_t filter)
6640Sstevel@tonic-gate {
6650Sstevel@tonic-gate int i;
6660Sstevel@tonic-gate GElf_Sym sym;
6670Sstevel@tonic-gate char c;
6680Sstevel@tonic-gate
6690Sstevel@tonic-gate if (filter == NULL)
6700Sstevel@tonic-gate return (1);
6710Sstevel@tonic-gate
6720Sstevel@tonic-gate for (i = 0; i < depth; i++) {
6730Sstevel@tonic-gate if (stack[i] == filter)
6740Sstevel@tonic-gate return (1);
6750Sstevel@tonic-gate
6760Sstevel@tonic-gate if (mdb_lookup_by_addr(stack[i], MDB_SYM_FUZZY,
6770Sstevel@tonic-gate &c, sizeof (c), &sym) == -1)
6780Sstevel@tonic-gate continue;
6790Sstevel@tonic-gate
6800Sstevel@tonic-gate if ((uintptr_t)sym.st_value == filter)
6810Sstevel@tonic-gate return (1);
6820Sstevel@tonic-gate }
6830Sstevel@tonic-gate
6840Sstevel@tonic-gate return (0);
6850Sstevel@tonic-gate }
6860Sstevel@tonic-gate
6870Sstevel@tonic-gate void
leaky_dump(uintptr_t filter,uint_t dump_verbose)6880Sstevel@tonic-gate leaky_dump(uintptr_t filter, uint_t dump_verbose)
6890Sstevel@tonic-gate {
6900Sstevel@tonic-gate int i;
6910Sstevel@tonic-gate size_t leaks;
6920Sstevel@tonic-gate leak_bufctl_t **sorted;
6930Sstevel@tonic-gate leak_bufctl_t *lkb;
6940Sstevel@tonic-gate int seen = 0;
6950Sstevel@tonic-gate
6960Sstevel@tonic-gate for (i = 0; i < LK_NUM_TYPES; i++) {
6970Sstevel@tonic-gate leaks = lk_types[i].lt_leaks;
6980Sstevel@tonic-gate sorted = lk_types[i].lt_sorted;
6990Sstevel@tonic-gate
7000Sstevel@tonic-gate leaky_subr_dump_start(i);
7010Sstevel@tonic-gate while (leaks-- > 0) {
7020Sstevel@tonic-gate lkb = *sorted++;
7030Sstevel@tonic-gate
7040Sstevel@tonic-gate if (!leaky_filter(lkb->lkb_stack, lkb->lkb_depth,
7050Sstevel@tonic-gate filter))
7060Sstevel@tonic-gate continue;
7070Sstevel@tonic-gate
7080Sstevel@tonic-gate seen = 1;
7090Sstevel@tonic-gate leaky_subr_dump(lkb, 0);
7100Sstevel@tonic-gate }
7110Sstevel@tonic-gate leaky_subr_dump_end(i);
7120Sstevel@tonic-gate }
7130Sstevel@tonic-gate
7140Sstevel@tonic-gate if (!seen) {
7150Sstevel@tonic-gate if (filter != NULL)
7160Sstevel@tonic-gate mdb_printf(
7170Sstevel@tonic-gate "findleaks: no memory leaks matching %a found\n",
7180Sstevel@tonic-gate filter);
7190Sstevel@tonic-gate else
7200Sstevel@tonic-gate mdb_printf(
7210Sstevel@tonic-gate "findleaks: no memory leaks detected\n");
7220Sstevel@tonic-gate }
7230Sstevel@tonic-gate
7240Sstevel@tonic-gate if (!dump_verbose || !seen)
7250Sstevel@tonic-gate return;
7260Sstevel@tonic-gate
7270Sstevel@tonic-gate mdb_printf("\n");
7280Sstevel@tonic-gate
7290Sstevel@tonic-gate for (i = 0; i < LK_NUM_TYPES; i++) {
7300Sstevel@tonic-gate leaks = lk_types[i].lt_leaks;
7310Sstevel@tonic-gate sorted = lk_types[i].lt_sorted;
7320Sstevel@tonic-gate
7330Sstevel@tonic-gate while (leaks-- > 0) {
7340Sstevel@tonic-gate lkb = *sorted++;
7350Sstevel@tonic-gate
7360Sstevel@tonic-gate if (!leaky_filter(lkb->lkb_stack, lkb->lkb_depth,
7370Sstevel@tonic-gate filter))
7380Sstevel@tonic-gate continue;
7390Sstevel@tonic-gate
7400Sstevel@tonic-gate leaky_subr_dump(lkb, 1);
7410Sstevel@tonic-gate }
7420Sstevel@tonic-gate }
7430Sstevel@tonic-gate }
7440Sstevel@tonic-gate
7450Sstevel@tonic-gate static const char *const findleaks_desc =
7460Sstevel@tonic-gate "Does a conservative garbage collection of the heap in order to find\n"
7470Sstevel@tonic-gate "potentially leaked buffers. Similar leaks are coalesced by stack\n"
7480Sstevel@tonic-gate "trace, with the oldest leak picked as representative. The leak\n"
7490Sstevel@tonic-gate "table is cached between invocations.\n"
7500Sstevel@tonic-gate "\n"
7510Sstevel@tonic-gate "addr, if provided, should be a function or PC location. Reported\n"
7520Sstevel@tonic-gate "leaks will then be limited to those with that function or PC in\n"
7530Sstevel@tonic-gate "their stack trace.\n"
7540Sstevel@tonic-gate "\n"
7550Sstevel@tonic-gate "The 'leak' and 'leakbuf' walkers can be used to retrieve coalesced\n"
7560Sstevel@tonic-gate "leaks.\n";
7570Sstevel@tonic-gate
7580Sstevel@tonic-gate static const char *const findleaks_args =
7590Sstevel@tonic-gate " -d detail each representative leak (long)\n"
7600Sstevel@tonic-gate " -f throw away cached state, and do a full run\n"
7610Sstevel@tonic-gate " -v report verbose information about the findleaks run\n";
7620Sstevel@tonic-gate
7630Sstevel@tonic-gate void
findleaks_help(void)7640Sstevel@tonic-gate findleaks_help(void)
7650Sstevel@tonic-gate {
7660Sstevel@tonic-gate mdb_printf("%s\n", findleaks_desc);
7670Sstevel@tonic-gate mdb_dec_indent(2);
7680Sstevel@tonic-gate mdb_printf("%<b>OPTIONS%</b>\n");
7690Sstevel@tonic-gate mdb_inc_indent(2);
7700Sstevel@tonic-gate mdb_printf("%s", findleaks_args);
7710Sstevel@tonic-gate }
7720Sstevel@tonic-gate
7730Sstevel@tonic-gate #define LK_REPORT_BEAN(x) leaky_verbose_perc(#x, lk_beans.lkb_##x, total);
7740Sstevel@tonic-gate
7750Sstevel@tonic-gate /*ARGSUSED*/
7760Sstevel@tonic-gate int
findleaks(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)7770Sstevel@tonic-gate findleaks(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
7780Sstevel@tonic-gate {
7790Sstevel@tonic-gate size_t est = 0;
7800Sstevel@tonic-gate leak_ndx_t i;
7810Sstevel@tonic-gate leak_mtab_t *lmp;
7820Sstevel@tonic-gate ssize_t total;
7830Sstevel@tonic-gate uintptr_t filter = NULL;
7840Sstevel@tonic-gate uint_t dump = 0;
7850Sstevel@tonic-gate uint_t force = 0;
7860Sstevel@tonic-gate uint_t verbose = 0;
7870Sstevel@tonic-gate int ret;
7880Sstevel@tonic-gate
7890Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC)
7900Sstevel@tonic-gate filter = addr;
7910Sstevel@tonic-gate
7920Sstevel@tonic-gate if (mdb_getopts(argc, argv,
7930Sstevel@tonic-gate 'd', MDB_OPT_SETBITS, TRUE, &dump,
7940Sstevel@tonic-gate 'f', MDB_OPT_SETBITS, TRUE, &force,
7950Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &verbose, NULL) != argc)
7960Sstevel@tonic-gate return (DCMD_USAGE);
7970Sstevel@tonic-gate
7980Sstevel@tonic-gate if (verbose || force)
7990Sstevel@tonic-gate lk_verbose = verbose;
8000Sstevel@tonic-gate
8010Sstevel@tonic-gate /*
8020Sstevel@tonic-gate * Clean any previous ::findleaks.
8030Sstevel@tonic-gate */
8040Sstevel@tonic-gate leaky_cleanup(force);
8050Sstevel@tonic-gate
8060Sstevel@tonic-gate if (lk_state == LK_DONE) {
8070Sstevel@tonic-gate if (lk_verbose)
8080Sstevel@tonic-gate mdb_printf("findleaks: using cached results "
809*1528Sjwadams "(use '-f' to force a full run)\n");
8100Sstevel@tonic-gate goto dump;
8110Sstevel@tonic-gate }
8120Sstevel@tonic-gate
8130Sstevel@tonic-gate leaky_verbose_begin();
8140Sstevel@tonic-gate
8150Sstevel@tonic-gate if ((ret = leaky_subr_estimate(&est)) != DCMD_OK)
8160Sstevel@tonic-gate return (ret);
8170Sstevel@tonic-gate
8180Sstevel@tonic-gate leaky_verbose("maximum buffers", est);
8190Sstevel@tonic-gate
8200Sstevel@tonic-gate /*
8210Sstevel@tonic-gate * Now we have an upper bound on the number of buffers. Allocate
8220Sstevel@tonic-gate * our mtab array.
8230Sstevel@tonic-gate */
8240Sstevel@tonic-gate lk_mtab = leaky_zalloc(est * sizeof (leak_mtab_t), UM_SLEEP | UM_GC);
8250Sstevel@tonic-gate lmp = lk_mtab;
8260Sstevel@tonic-gate
8270Sstevel@tonic-gate if ((ret = leaky_subr_fill(&lmp)) != DCMD_OK)
8280Sstevel@tonic-gate return (ret);
8290Sstevel@tonic-gate
8300Sstevel@tonic-gate lk_nbuffers = lmp - lk_mtab;
8310Sstevel@tonic-gate
8320Sstevel@tonic-gate qsort(lk_mtab, lk_nbuffers, sizeof (leak_mtab_t), leaky_mtabcmp);
8330Sstevel@tonic-gate
8340Sstevel@tonic-gate /*
8350Sstevel@tonic-gate * validate the mtab table now that it is sorted
8360Sstevel@tonic-gate */
8370Sstevel@tonic-gate for (i = 0; i < lk_nbuffers; i++) {
8380Sstevel@tonic-gate if (lk_mtab[i].lkm_base >= lk_mtab[i].lkm_limit) {
8390Sstevel@tonic-gate mdb_warn("[%p, %p): invalid mtab\n",
8400Sstevel@tonic-gate lk_mtab[i].lkm_base, lk_mtab[i].lkm_limit);
8410Sstevel@tonic-gate return (DCMD_ERR);
8420Sstevel@tonic-gate }
8430Sstevel@tonic-gate
8440Sstevel@tonic-gate if (i < lk_nbuffers - 1 &&
8450Sstevel@tonic-gate lk_mtab[i].lkm_limit > lk_mtab[i + 1].lkm_base) {
8460Sstevel@tonic-gate mdb_warn("[%p, %p) and [%p, %p): overlapping mtabs\n",
8470Sstevel@tonic-gate lk_mtab[i].lkm_base, lk_mtab[i].lkm_limit,
8480Sstevel@tonic-gate lk_mtab[i + 1].lkm_base, lk_mtab[i + 1].lkm_limit);
8490Sstevel@tonic-gate return (DCMD_ERR);
8500Sstevel@tonic-gate }
8510Sstevel@tonic-gate }
8520Sstevel@tonic-gate
8530Sstevel@tonic-gate leaky_verbose("actual buffers", lk_nbuffers);
8540Sstevel@tonic-gate
8550Sstevel@tonic-gate lk_scan_buffer = leaky_zalloc(LK_SCAN_BUFFER_SIZE, UM_SLEEP | UM_GC);
8560Sstevel@tonic-gate
8570Sstevel@tonic-gate if ((ret = leaky_subr_run()) != DCMD_OK)
8580Sstevel@tonic-gate return (ret);
8590Sstevel@tonic-gate
8600Sstevel@tonic-gate lk_state = LK_SWEEPING;
8610Sstevel@tonic-gate
8620Sstevel@tonic-gate for (i = 0; i < lk_nbuffers; i++) {
8630Sstevel@tonic-gate if (LK_MARKED(lk_mtab[i].lkm_base))
8640Sstevel@tonic-gate continue;
8650Sstevel@tonic-gate leaky_subr_add_leak(&lk_mtab[i]);
8660Sstevel@tonic-gate }
8670Sstevel@tonic-gate
8680Sstevel@tonic-gate total = lk_beans.lkb_dismissals + lk_beans.lkb_misses +
8690Sstevel@tonic-gate lk_beans.lkb_dups + lk_beans.lkb_follows;
8700Sstevel@tonic-gate
8710Sstevel@tonic-gate leaky_verbose(NULL, 0);
8720Sstevel@tonic-gate leaky_verbose("potential pointers", total);
8730Sstevel@tonic-gate LK_REPORT_BEAN(dismissals);
8740Sstevel@tonic-gate LK_REPORT_BEAN(misses);
8750Sstevel@tonic-gate LK_REPORT_BEAN(dups);
8760Sstevel@tonic-gate LK_REPORT_BEAN(follows);
8770Sstevel@tonic-gate
8780Sstevel@tonic-gate leaky_verbose(NULL, 0);
8790Sstevel@tonic-gate leaky_verbose_end();
8800Sstevel@tonic-gate
8810Sstevel@tonic-gate leaky_sort();
8820Sstevel@tonic-gate lk_state = LK_DONE;
8830Sstevel@tonic-gate dump:
8840Sstevel@tonic-gate leaky_dump(filter, dump);
8850Sstevel@tonic-gate
8860Sstevel@tonic-gate return (DCMD_OK);
8870Sstevel@tonic-gate }
8880Sstevel@tonic-gate
8890Sstevel@tonic-gate int
leaky_walk_init(mdb_walk_state_t * wsp)8900Sstevel@tonic-gate leaky_walk_init(mdb_walk_state_t *wsp)
8910Sstevel@tonic-gate {
8920Sstevel@tonic-gate leak_walk_t *lw;
8930Sstevel@tonic-gate leak_bufctl_t *lkb, *cur;
8940Sstevel@tonic-gate
8950Sstevel@tonic-gate uintptr_t addr;
8960Sstevel@tonic-gate int i;
8970Sstevel@tonic-gate
8980Sstevel@tonic-gate if (lk_state != LK_DONE) {
8990Sstevel@tonic-gate mdb_warn("::findleaks must be run %sbefore leaks can be"
9000Sstevel@tonic-gate " walked\n", lk_state != LK_CLEAN ? "to completion " : "");
9010Sstevel@tonic-gate return (WALK_ERR);
9020Sstevel@tonic-gate }
9030Sstevel@tonic-gate
9040Sstevel@tonic-gate if (wsp->walk_addr == NULL) {
9050Sstevel@tonic-gate lkb = NULL;
9060Sstevel@tonic-gate goto found;
9070Sstevel@tonic-gate }
9080Sstevel@tonic-gate
9090Sstevel@tonic-gate addr = wsp->walk_addr;
9100Sstevel@tonic-gate
9110Sstevel@tonic-gate /*
9120Sstevel@tonic-gate * Search the representative leaks first, since that's what we
9130Sstevel@tonic-gate * report in the table. If that fails, search everything.
9140Sstevel@tonic-gate *
9150Sstevel@tonic-gate * Note that we goto found with lkb as the head of desired dup list.
9160Sstevel@tonic-gate */
9170Sstevel@tonic-gate for (i = 0; i < LK_BUFCTLHSIZE; i++) {
9180Sstevel@tonic-gate for (lkb = lk_bufctl[i]; lkb != NULL; lkb = lkb->lkb_hash_next)
9190Sstevel@tonic-gate if (lkb->lkb_addr == addr)
9200Sstevel@tonic-gate goto found;
9210Sstevel@tonic-gate }
9220Sstevel@tonic-gate
9230Sstevel@tonic-gate for (i = 0; i < LK_BUFCTLHSIZE; i++) {
9240Sstevel@tonic-gate for (lkb = lk_bufctl[i]; lkb != NULL; lkb = lkb->lkb_hash_next)
9250Sstevel@tonic-gate for (cur = lkb; cur != NULL; cur = cur->lkb_next)
9260Sstevel@tonic-gate if (cur->lkb_addr == addr)
9270Sstevel@tonic-gate goto found;
9280Sstevel@tonic-gate }
9290Sstevel@tonic-gate
9300Sstevel@tonic-gate mdb_warn("%p is not a leaked ctl address\n", addr);
9310Sstevel@tonic-gate return (WALK_ERR);
9320Sstevel@tonic-gate
9330Sstevel@tonic-gate found:
9340Sstevel@tonic-gate wsp->walk_data = lw = mdb_zalloc(sizeof (*lw), UM_SLEEP);
9350Sstevel@tonic-gate lw->lkw_ndx = 0;
9360Sstevel@tonic-gate lw->lkw_current = lkb;
9370Sstevel@tonic-gate lw->lkw_hash_next = NULL;
9380Sstevel@tonic-gate
9390Sstevel@tonic-gate return (WALK_NEXT);
9400Sstevel@tonic-gate }
9410Sstevel@tonic-gate
9420Sstevel@tonic-gate leak_bufctl_t *
leaky_walk_step_common(mdb_walk_state_t * wsp)9430Sstevel@tonic-gate leaky_walk_step_common(mdb_walk_state_t *wsp)
9440Sstevel@tonic-gate {
9450Sstevel@tonic-gate leak_walk_t *lw = wsp->walk_data;
9460Sstevel@tonic-gate leak_bufctl_t *lk;
9470Sstevel@tonic-gate
9480Sstevel@tonic-gate if ((lk = lw->lkw_current) == NULL) {
9490Sstevel@tonic-gate if ((lk = lw->lkw_hash_next) == NULL) {
9500Sstevel@tonic-gate if (wsp->walk_addr)
9510Sstevel@tonic-gate return (NULL);
9520Sstevel@tonic-gate
9530Sstevel@tonic-gate while (lk == NULL && lw->lkw_ndx < LK_BUFCTLHSIZE)
9540Sstevel@tonic-gate lk = lk_bufctl[lw->lkw_ndx++];
9550Sstevel@tonic-gate
9560Sstevel@tonic-gate if (lw->lkw_ndx == LK_BUFCTLHSIZE)
9570Sstevel@tonic-gate return (NULL);
9580Sstevel@tonic-gate }
9590Sstevel@tonic-gate
9600Sstevel@tonic-gate lw->lkw_hash_next = lk->lkb_hash_next;
9610Sstevel@tonic-gate }
9620Sstevel@tonic-gate
9630Sstevel@tonic-gate lw->lkw_current = lk->lkb_next;
9640Sstevel@tonic-gate return (lk);
9650Sstevel@tonic-gate }
9660Sstevel@tonic-gate
9670Sstevel@tonic-gate int
leaky_walk_step(mdb_walk_state_t * wsp)9680Sstevel@tonic-gate leaky_walk_step(mdb_walk_state_t *wsp)
9690Sstevel@tonic-gate {
9700Sstevel@tonic-gate leak_bufctl_t *lk;
9710Sstevel@tonic-gate
9720Sstevel@tonic-gate if ((lk = leaky_walk_step_common(wsp)) == NULL)
9730Sstevel@tonic-gate return (WALK_DONE);
9740Sstevel@tonic-gate
9750Sstevel@tonic-gate return (leaky_subr_invoke_callback(lk, wsp->walk_callback,
9760Sstevel@tonic-gate wsp->walk_cbdata));
9770Sstevel@tonic-gate }
9780Sstevel@tonic-gate
9790Sstevel@tonic-gate void
leaky_walk_fini(mdb_walk_state_t * wsp)9800Sstevel@tonic-gate leaky_walk_fini(mdb_walk_state_t *wsp)
9810Sstevel@tonic-gate {
9820Sstevel@tonic-gate leak_walk_t *lw = wsp->walk_data;
9830Sstevel@tonic-gate
9840Sstevel@tonic-gate mdb_free(lw, sizeof (leak_walk_t));
9850Sstevel@tonic-gate }
9860Sstevel@tonic-gate
9870Sstevel@tonic-gate int
leaky_buf_walk_step(mdb_walk_state_t * wsp)9880Sstevel@tonic-gate leaky_buf_walk_step(mdb_walk_state_t *wsp)
9890Sstevel@tonic-gate {
9900Sstevel@tonic-gate leak_bufctl_t *lk;
9910Sstevel@tonic-gate
9920Sstevel@tonic-gate if ((lk = leaky_walk_step_common(wsp)) == NULL)
9930Sstevel@tonic-gate return (WALK_DONE);
9940Sstevel@tonic-gate
9950Sstevel@tonic-gate return (wsp->walk_callback(lk->lkb_bufaddr, NULL, wsp->walk_cbdata));
9960Sstevel@tonic-gate }
997