xref: /onnv-gate/usr/src/cmd/mdb/common/modules/genunix/leaky_subr.c (revision 11459:976cd2e02041)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
50Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
60Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
70Sstevel@tonic-gate  * with the License.
80Sstevel@tonic-gate  *
90Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
100Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
110Sstevel@tonic-gate  * See the License for the specific language governing permissions
120Sstevel@tonic-gate  * and limitations under the License.
130Sstevel@tonic-gate  *
140Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
150Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
160Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
170Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
180Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
190Sstevel@tonic-gate  *
200Sstevel@tonic-gate  * CDDL HEADER END
210Sstevel@tonic-gate  */
220Sstevel@tonic-gate /*
231226Scth  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
290Sstevel@tonic-gate #include <mdb/mdb_param.h>
300Sstevel@tonic-gate #include <mdb/mdb_modapi.h>
310Sstevel@tonic-gate 
320Sstevel@tonic-gate #include <sys/fs/ufs_inode.h>
330Sstevel@tonic-gate #include <sys/kmem_impl.h>
340Sstevel@tonic-gate #include <sys/vmem_impl.h>
350Sstevel@tonic-gate #include <sys/modctl.h>
360Sstevel@tonic-gate #include <sys/kobj.h>
370Sstevel@tonic-gate #include <sys/kobj_impl.h>
380Sstevel@tonic-gate #include <vm/seg_vn.h>
390Sstevel@tonic-gate #include <vm/as.h>
400Sstevel@tonic-gate #include <vm/seg_map.h>
410Sstevel@tonic-gate #include <mdb/mdb_ctf.h>
420Sstevel@tonic-gate 
430Sstevel@tonic-gate #include "kmem.h"
440Sstevel@tonic-gate #include "leaky_impl.h"
450Sstevel@tonic-gate 
460Sstevel@tonic-gate /*
470Sstevel@tonic-gate  * This file defines the genunix target for leaky.c.  There are three types
480Sstevel@tonic-gate  * of buffers in the kernel's heap:  TYPE_VMEM, for kmem_oversize allocations,
490Sstevel@tonic-gate  * TYPE_KMEM, for kmem_cache_alloc() allocations bufctl_audit_ts, and
500Sstevel@tonic-gate  * TYPE_CACHE, for kmem_cache_alloc() allocation without bufctl_audit_ts.
510Sstevel@tonic-gate  *
520Sstevel@tonic-gate  * See "leaky_impl.h" for the target interface definition.
530Sstevel@tonic-gate  */
540Sstevel@tonic-gate 
550Sstevel@tonic-gate #define	TYPE_VMEM	0		/* lkb_data is the vmem_seg's size */
560Sstevel@tonic-gate #define	TYPE_CACHE	1		/* lkb_cid is the bufctl's cache */
570Sstevel@tonic-gate #define	TYPE_KMEM	2		/* lkb_cid is the bufctl's cache */
580Sstevel@tonic-gate 
590Sstevel@tonic-gate #define	LKM_CTL_BUFCTL	0	/* normal allocation, PTR is bufctl */
600Sstevel@tonic-gate #define	LKM_CTL_VMSEG	1	/* oversize allocation, PTR is vmem_seg_t */
610Sstevel@tonic-gate #define	LKM_CTL_CACHE	2	/* normal alloc, non-debug, PTR is cache */
620Sstevel@tonic-gate #define	LKM_CTL_MASK	3L
630Sstevel@tonic-gate 
640Sstevel@tonic-gate #define	LKM_CTL(ptr, type)	(LKM_CTLPTR(ptr) | (type))
650Sstevel@tonic-gate #define	LKM_CTLPTR(ctl)		((uintptr_t)(ctl) & ~(LKM_CTL_MASK))
660Sstevel@tonic-gate #define	LKM_CTLTYPE(ctl)	((uintptr_t)(ctl) &  (LKM_CTL_MASK))
670Sstevel@tonic-gate 
680Sstevel@tonic-gate static int kmem_lite_count = 0;	/* cache of the kernel's version */
690Sstevel@tonic-gate 
700Sstevel@tonic-gate /*ARGSUSED*/
710Sstevel@tonic-gate static int
leaky_mtab(uintptr_t addr,const kmem_bufctl_audit_t * bcp,leak_mtab_t ** lmp)720Sstevel@tonic-gate leaky_mtab(uintptr_t addr, const kmem_bufctl_audit_t *bcp, leak_mtab_t **lmp)
730Sstevel@tonic-gate {
740Sstevel@tonic-gate 	leak_mtab_t *lm = (*lmp)++;
750Sstevel@tonic-gate 
760Sstevel@tonic-gate 	lm->lkm_base = (uintptr_t)bcp->bc_addr;
770Sstevel@tonic-gate 	lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_BUFCTL);
780Sstevel@tonic-gate 
790Sstevel@tonic-gate 	return (WALK_NEXT);
800Sstevel@tonic-gate }
810Sstevel@tonic-gate 
820Sstevel@tonic-gate /*ARGSUSED*/
830Sstevel@tonic-gate static int
leaky_mtab_addr(uintptr_t addr,void * ignored,leak_mtab_t ** lmp)840Sstevel@tonic-gate leaky_mtab_addr(uintptr_t addr, void *ignored, leak_mtab_t **lmp)
850Sstevel@tonic-gate {
860Sstevel@tonic-gate 	leak_mtab_t *lm = (*lmp)++;
870Sstevel@tonic-gate 
880Sstevel@tonic-gate 	lm->lkm_base = addr;
890Sstevel@tonic-gate 
900Sstevel@tonic-gate 	return (WALK_NEXT);
910Sstevel@tonic-gate }
920Sstevel@tonic-gate 
930Sstevel@tonic-gate static int
leaky_seg(uintptr_t addr,const vmem_seg_t * seg,leak_mtab_t ** lmp)940Sstevel@tonic-gate leaky_seg(uintptr_t addr, const vmem_seg_t *seg, leak_mtab_t **lmp)
950Sstevel@tonic-gate {
960Sstevel@tonic-gate 	leak_mtab_t *lm = (*lmp)++;
970Sstevel@tonic-gate 
980Sstevel@tonic-gate 	lm->lkm_base = seg->vs_start;
990Sstevel@tonic-gate 	lm->lkm_limit = seg->vs_end;
1000Sstevel@tonic-gate 	lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_VMSEG);
1010Sstevel@tonic-gate 
1020Sstevel@tonic-gate 	return (WALK_NEXT);
1030Sstevel@tonic-gate }
1040Sstevel@tonic-gate 
1050Sstevel@tonic-gate static int
leaky_vmem_interested(const vmem_t * vmem)1060Sstevel@tonic-gate leaky_vmem_interested(const vmem_t *vmem)
1070Sstevel@tonic-gate {
1080Sstevel@tonic-gate 	if (strcmp(vmem->vm_name, "kmem_oversize") != 0 &&
1090Sstevel@tonic-gate 	    strcmp(vmem->vm_name, "static_alloc") != 0)
1100Sstevel@tonic-gate 		return (0);
1110Sstevel@tonic-gate 	return (1);
1120Sstevel@tonic-gate }
1130Sstevel@tonic-gate 
1140Sstevel@tonic-gate static int
leaky_vmem(uintptr_t addr,const vmem_t * vmem,leak_mtab_t ** lmp)1150Sstevel@tonic-gate leaky_vmem(uintptr_t addr, const vmem_t *vmem, leak_mtab_t **lmp)
1160Sstevel@tonic-gate {
1170Sstevel@tonic-gate 	if (!leaky_vmem_interested(vmem))
1180Sstevel@tonic-gate 		return (WALK_NEXT);
1190Sstevel@tonic-gate 
1200Sstevel@tonic-gate 	if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)leaky_seg, lmp, addr) == -1)
1210Sstevel@tonic-gate 		mdb_warn("can't walk vmem_alloc for kmem_oversize (%p)", addr);
1220Sstevel@tonic-gate 
1230Sstevel@tonic-gate 	return (WALK_NEXT);
1240Sstevel@tonic-gate }
1250Sstevel@tonic-gate 
1260Sstevel@tonic-gate /*ARGSUSED*/
1270Sstevel@tonic-gate static int
leaky_estimate_vmem(uintptr_t addr,const vmem_t * vmem,size_t * est)1280Sstevel@tonic-gate leaky_estimate_vmem(uintptr_t addr, const vmem_t *vmem, size_t *est)
1290Sstevel@tonic-gate {
1300Sstevel@tonic-gate 	if (!leaky_vmem_interested(vmem))
1310Sstevel@tonic-gate 		return (WALK_NEXT);
1320Sstevel@tonic-gate 
1330Sstevel@tonic-gate 	*est += (int)(vmem->vm_kstat.vk_alloc.value.ui64 -
1340Sstevel@tonic-gate 	    vmem->vm_kstat.vk_free.value.ui64);
1350Sstevel@tonic-gate 
1360Sstevel@tonic-gate 	return (WALK_NEXT);
1370Sstevel@tonic-gate }
1380Sstevel@tonic-gate 
1390Sstevel@tonic-gate static int
leaky_interested(const kmem_cache_t * c)1400Sstevel@tonic-gate leaky_interested(const kmem_cache_t *c)
1410Sstevel@tonic-gate {
1420Sstevel@tonic-gate 	vmem_t vmem;
1430Sstevel@tonic-gate 
1440Sstevel@tonic-gate 	/*
1450Sstevel@tonic-gate 	 * ignore HAT-related caches that happen to derive from kmem_default
1460Sstevel@tonic-gate 	 */
1470Sstevel@tonic-gate 	if (strcmp(c->cache_name, "sfmmu1_cache") == 0 ||
1480Sstevel@tonic-gate 	    strcmp(c->cache_name, "sf_hment_cache") == 0 ||
1490Sstevel@tonic-gate 	    strcmp(c->cache_name, "pa_hment_cache") == 0)
1500Sstevel@tonic-gate 		return (0);
1510Sstevel@tonic-gate 
1520Sstevel@tonic-gate 	if (mdb_vread(&vmem, sizeof (vmem), (uintptr_t)c->cache_arena) == -1) {
1530Sstevel@tonic-gate 		mdb_warn("cannot read arena %p for cache '%s'",
1540Sstevel@tonic-gate 		    (uintptr_t)c->cache_arena, c->cache_name);
1550Sstevel@tonic-gate 		return (0);
1560Sstevel@tonic-gate 	}
1570Sstevel@tonic-gate 
1580Sstevel@tonic-gate 	/*
1590Sstevel@tonic-gate 	 * If this cache isn't allocating from the kmem_default,
1600Sstevel@tonic-gate 	 * kmem_firewall, or static vmem arenas, we're not interested.
1610Sstevel@tonic-gate 	 */
1620Sstevel@tonic-gate 	if (strcmp(vmem.vm_name, "kmem_default") != 0 &&
1630Sstevel@tonic-gate 	    strcmp(vmem.vm_name, "kmem_firewall") != 0 &&
1640Sstevel@tonic-gate 	    strcmp(vmem.vm_name, "static") != 0)
1650Sstevel@tonic-gate 		return (0);
1660Sstevel@tonic-gate 
1670Sstevel@tonic-gate 	return (1);
1680Sstevel@tonic-gate }
1690Sstevel@tonic-gate 
1700Sstevel@tonic-gate static int
leaky_estimate(uintptr_t addr,const kmem_cache_t * c,size_t * est)1710Sstevel@tonic-gate leaky_estimate(uintptr_t addr, const kmem_cache_t *c, size_t *est)
1720Sstevel@tonic-gate {
1730Sstevel@tonic-gate 	if (!leaky_interested(c))
1740Sstevel@tonic-gate 		return (WALK_NEXT);
1750Sstevel@tonic-gate 
1760Sstevel@tonic-gate 	*est += kmem_estimate_allocated(addr, c);
1770Sstevel@tonic-gate 
1780Sstevel@tonic-gate 	return (WALK_NEXT);
1790Sstevel@tonic-gate }
1800Sstevel@tonic-gate 
1810Sstevel@tonic-gate /*ARGSUSED*/
1820Sstevel@tonic-gate static int
leaky_cache(uintptr_t addr,const kmem_cache_t * c,leak_mtab_t ** lmp)1830Sstevel@tonic-gate leaky_cache(uintptr_t addr, const kmem_cache_t *c, leak_mtab_t **lmp)
1840Sstevel@tonic-gate {
1850Sstevel@tonic-gate 	leak_mtab_t *lm = *lmp;
1860Sstevel@tonic-gate 	mdb_walk_cb_t cb;
1870Sstevel@tonic-gate 	const char *walk;
1880Sstevel@tonic-gate 	int audit = (c->cache_flags & KMF_AUDIT);
1890Sstevel@tonic-gate 
1900Sstevel@tonic-gate 	if (!leaky_interested(c))
1910Sstevel@tonic-gate 		return (WALK_NEXT);
1920Sstevel@tonic-gate 
1930Sstevel@tonic-gate 	if (audit) {
1940Sstevel@tonic-gate 		walk = "bufctl";
1950Sstevel@tonic-gate 		cb = (mdb_walk_cb_t)leaky_mtab;
1960Sstevel@tonic-gate 	} else {
1970Sstevel@tonic-gate 		walk = "kmem";
1980Sstevel@tonic-gate 		cb = (mdb_walk_cb_t)leaky_mtab_addr;
1990Sstevel@tonic-gate 	}
2000Sstevel@tonic-gate 	if (mdb_pwalk(walk, cb, lmp, addr) == -1) {
2010Sstevel@tonic-gate 		mdb_warn("can't walk kmem for cache %p (%s)", addr,
2020Sstevel@tonic-gate 		    c->cache_name);
2030Sstevel@tonic-gate 		return (WALK_DONE);
2040Sstevel@tonic-gate 	}
2050Sstevel@tonic-gate 
2060Sstevel@tonic-gate 	for (; lm < *lmp; lm++) {
2070Sstevel@tonic-gate 		lm->lkm_limit = lm->lkm_base + c->cache_bufsize;
2080Sstevel@tonic-gate 		if (!audit)
2090Sstevel@tonic-gate 			lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_CACHE);
2100Sstevel@tonic-gate 	}
2110Sstevel@tonic-gate 
2120Sstevel@tonic-gate 	return (WALK_NEXT);
2130Sstevel@tonic-gate }
2140Sstevel@tonic-gate 
2150Sstevel@tonic-gate /*ARGSUSED*/
2160Sstevel@tonic-gate static int
leaky_scan_buffer(uintptr_t addr,const void * ignored,const kmem_cache_t * c)2170Sstevel@tonic-gate leaky_scan_buffer(uintptr_t addr, const void *ignored, const kmem_cache_t *c)
2180Sstevel@tonic-gate {
2190Sstevel@tonic-gate 	leaky_grep(addr, c->cache_bufsize);
2200Sstevel@tonic-gate 
2210Sstevel@tonic-gate 	/*
2220Sstevel@tonic-gate 	 * free, constructed KMF_LITE buffers keep their first uint64_t in
2230Sstevel@tonic-gate 	 * their buftag's redzone.
2240Sstevel@tonic-gate 	 */
2250Sstevel@tonic-gate 	if (c->cache_flags & KMF_LITE) {
2260Sstevel@tonic-gate 		/* LINTED alignment */
2270Sstevel@tonic-gate 		kmem_buftag_t *btp = KMEM_BUFTAG(c, addr);
2280Sstevel@tonic-gate 		leaky_grep((uintptr_t)&btp->bt_redzone,
2290Sstevel@tonic-gate 		    sizeof (btp->bt_redzone));
2300Sstevel@tonic-gate 	}
2310Sstevel@tonic-gate 
2320Sstevel@tonic-gate 	return (WALK_NEXT);
2330Sstevel@tonic-gate }
2340Sstevel@tonic-gate 
2350Sstevel@tonic-gate /*ARGSUSED*/
2360Sstevel@tonic-gate static int
leaky_scan_cache(uintptr_t addr,const kmem_cache_t * c,void * ignored)2370Sstevel@tonic-gate leaky_scan_cache(uintptr_t addr, const kmem_cache_t *c, void *ignored)
2380Sstevel@tonic-gate {
2390Sstevel@tonic-gate 	if (!leaky_interested(c))
2400Sstevel@tonic-gate 		return (WALK_NEXT);
2410Sstevel@tonic-gate 
2420Sstevel@tonic-gate 	/*
2430Sstevel@tonic-gate 	 * Scan all of the free, constructed buffers, since they may have
2440Sstevel@tonic-gate 	 * pointers to allocated objects.
2450Sstevel@tonic-gate 	 */
2460Sstevel@tonic-gate 	if (mdb_pwalk("freemem_constructed",
2470Sstevel@tonic-gate 	    (mdb_walk_cb_t)leaky_scan_buffer, (void *)c, addr) == -1) {
2480Sstevel@tonic-gate 		mdb_warn("can't walk freemem_constructed for cache %p (%s)",
2490Sstevel@tonic-gate 		    addr, c->cache_name);
2500Sstevel@tonic-gate 		return (WALK_DONE);
2510Sstevel@tonic-gate 	}
2520Sstevel@tonic-gate 
2530Sstevel@tonic-gate 	return (WALK_NEXT);
2540Sstevel@tonic-gate }
2550Sstevel@tonic-gate 
2560Sstevel@tonic-gate /*ARGSUSED*/
2570Sstevel@tonic-gate static int
leaky_modctl(uintptr_t addr,const struct modctl * m,int * ignored)2580Sstevel@tonic-gate leaky_modctl(uintptr_t addr, const struct modctl *m, int *ignored)
2590Sstevel@tonic-gate {
2600Sstevel@tonic-gate 	struct module mod;
2610Sstevel@tonic-gate 	char name[MODMAXNAMELEN];
2620Sstevel@tonic-gate 
2630Sstevel@tonic-gate 	if (m->mod_mp == NULL)
2640Sstevel@tonic-gate 		return (WALK_NEXT);
2650Sstevel@tonic-gate 
2660Sstevel@tonic-gate 	if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) {
2670Sstevel@tonic-gate 		mdb_warn("couldn't read modctl %p's module", addr);
2680Sstevel@tonic-gate 		return (WALK_NEXT);
2690Sstevel@tonic-gate 	}
2700Sstevel@tonic-gate 
2710Sstevel@tonic-gate 	if (mdb_readstr(name, sizeof (name), (uintptr_t)m->mod_modname) == -1)
2720Sstevel@tonic-gate 		(void) mdb_snprintf(name, sizeof (name), "0x%p", addr);
2730Sstevel@tonic-gate 
2740Sstevel@tonic-gate 	leaky_grep((uintptr_t)m->mod_mp, sizeof (struct module));
2750Sstevel@tonic-gate 	leaky_grep((uintptr_t)mod.data, mod.data_size);
2760Sstevel@tonic-gate 	leaky_grep((uintptr_t)mod.bss, mod.bss_size);
2770Sstevel@tonic-gate 
2780Sstevel@tonic-gate 	return (WALK_NEXT);
2790Sstevel@tonic-gate }
2800Sstevel@tonic-gate 
2810Sstevel@tonic-gate static int
leaky_thread(uintptr_t addr,const kthread_t * t,unsigned long * pagesize)2820Sstevel@tonic-gate leaky_thread(uintptr_t addr, const kthread_t *t, unsigned long *pagesize)
2830Sstevel@tonic-gate {
2840Sstevel@tonic-gate 	uintptr_t size, base = (uintptr_t)t->t_stkbase;
2850Sstevel@tonic-gate 	uintptr_t stk = (uintptr_t)t->t_stk;
2860Sstevel@tonic-gate 
2870Sstevel@tonic-gate 	/*
2880Sstevel@tonic-gate 	 * If this thread isn't in memory, we can't look at its stack.  This
2890Sstevel@tonic-gate 	 * may result in false positives, so we print a warning.
2900Sstevel@tonic-gate 	 */
2910Sstevel@tonic-gate 	if (!(t->t_schedflag & TS_LOAD)) {
2920Sstevel@tonic-gate 		mdb_printf("findleaks: thread %p's stack swapped out; "
2930Sstevel@tonic-gate 		    "false positives possible\n", addr);
2940Sstevel@tonic-gate 		return (WALK_NEXT);
2950Sstevel@tonic-gate 	}
2960Sstevel@tonic-gate 
2970Sstevel@tonic-gate 	if (t->t_state != TS_FREE)
2980Sstevel@tonic-gate 		leaky_grep(base, stk - base);
2990Sstevel@tonic-gate 
3000Sstevel@tonic-gate 	/*
3010Sstevel@tonic-gate 	 * There is always gunk hanging out between t_stk and the page
3020Sstevel@tonic-gate 	 * boundary.  If this thread structure wasn't kmem allocated,
3030Sstevel@tonic-gate 	 * this will include the thread structure itself.  If the thread
3040Sstevel@tonic-gate 	 * _is_ kmem allocated, we'll be able to get to it via allthreads.
3050Sstevel@tonic-gate 	 */
3060Sstevel@tonic-gate 	size = *pagesize - (stk & (*pagesize - 1));
3070Sstevel@tonic-gate 
3080Sstevel@tonic-gate 	leaky_grep(stk, size);
3090Sstevel@tonic-gate 
3100Sstevel@tonic-gate 	return (WALK_NEXT);
3110Sstevel@tonic-gate }
3120Sstevel@tonic-gate 
3130Sstevel@tonic-gate /*ARGSUSED*/
3140Sstevel@tonic-gate static int
leaky_kstat(uintptr_t addr,vmem_seg_t * seg,void * ignored)3150Sstevel@tonic-gate leaky_kstat(uintptr_t addr, vmem_seg_t *seg, void *ignored)
3160Sstevel@tonic-gate {
3170Sstevel@tonic-gate 	leaky_grep(seg->vs_start, seg->vs_end - seg->vs_start);
3180Sstevel@tonic-gate 
3190Sstevel@tonic-gate 	return (WALK_NEXT);
3200Sstevel@tonic-gate }
3210Sstevel@tonic-gate 
3220Sstevel@tonic-gate static void
leaky_kludge(void)3230Sstevel@tonic-gate leaky_kludge(void)
3240Sstevel@tonic-gate {
3250Sstevel@tonic-gate 	GElf_Sym sym;
3260Sstevel@tonic-gate 	mdb_ctf_id_t id, rid;
3270Sstevel@tonic-gate 
3280Sstevel@tonic-gate 	int max_mem_nodes;
3290Sstevel@tonic-gate 	uintptr_t *counters;
3300Sstevel@tonic-gate 	size_t ncounters;
3310Sstevel@tonic-gate 	ssize_t hwpm_size;
3320Sstevel@tonic-gate 	int idx;
3330Sstevel@tonic-gate 
3340Sstevel@tonic-gate 	/*
3350Sstevel@tonic-gate 	 * Because of DR, the page counters (which live in the kmem64 segment)
3360Sstevel@tonic-gate 	 * can point into kmem_alloc()ed memory.  The "page_counters" array
3370Sstevel@tonic-gate 	 * is multi-dimensional, and each entry points to an array of
3380Sstevel@tonic-gate 	 * "hw_page_map_t"s which is "max_mem_nodes" in length.
3390Sstevel@tonic-gate 	 *
3400Sstevel@tonic-gate 	 * To keep this from having too much grotty knowledge of internals,
3410Sstevel@tonic-gate 	 * we use CTF data to get the size of the structure.  For simplicity,
3420Sstevel@tonic-gate 	 * we treat the page_counters array as a flat array of pointers, and
3430Sstevel@tonic-gate 	 * use its size to determine how much to scan.  Unused entries will
3440Sstevel@tonic-gate 	 * be NULL.
3450Sstevel@tonic-gate 	 */
3460Sstevel@tonic-gate 	if (mdb_lookup_by_name("page_counters", &sym) == -1) {
3470Sstevel@tonic-gate 		mdb_warn("unable to lookup page_counters");
3480Sstevel@tonic-gate 		return;
3490Sstevel@tonic-gate 	}
3500Sstevel@tonic-gate 
3510Sstevel@tonic-gate 	if (mdb_readvar(&max_mem_nodes, "max_mem_nodes") == -1) {
3520Sstevel@tonic-gate 		mdb_warn("unable to read max_mem_nodes");
3530Sstevel@tonic-gate 		return;
3540Sstevel@tonic-gate 	}
3550Sstevel@tonic-gate 
3560Sstevel@tonic-gate 	if (mdb_ctf_lookup_by_name("unix`hw_page_map_t", &id) == -1 ||
3570Sstevel@tonic-gate 	    mdb_ctf_type_resolve(id, &rid) == -1 ||
3580Sstevel@tonic-gate 	    (hwpm_size = mdb_ctf_type_size(rid)) < 0) {
3590Sstevel@tonic-gate 		mdb_warn("unable to lookup unix`hw_page_map_t");
3600Sstevel@tonic-gate 		return;
3610Sstevel@tonic-gate 	}
3620Sstevel@tonic-gate 
3630Sstevel@tonic-gate 	counters = mdb_alloc(sym.st_size, UM_SLEEP | UM_GC);
3640Sstevel@tonic-gate 
3650Sstevel@tonic-gate 	if (mdb_vread(counters, sym.st_size, (uintptr_t)sym.st_value) == -1) {
3660Sstevel@tonic-gate 		mdb_warn("unable to read page_counters");
3670Sstevel@tonic-gate 		return;
3680Sstevel@tonic-gate 	}
3690Sstevel@tonic-gate 
3700Sstevel@tonic-gate 	ncounters = sym.st_size / sizeof (counters);
3710Sstevel@tonic-gate 
3720Sstevel@tonic-gate 	for (idx = 0; idx < ncounters; idx++) {
3730Sstevel@tonic-gate 		uintptr_t addr = counters[idx];
3740Sstevel@tonic-gate 		if (addr != 0)
3750Sstevel@tonic-gate 			leaky_grep(addr, hwpm_size * max_mem_nodes);
3760Sstevel@tonic-gate 	}
3770Sstevel@tonic-gate }
3780Sstevel@tonic-gate 
3790Sstevel@tonic-gate int
leaky_subr_estimate(size_t * estp)3800Sstevel@tonic-gate leaky_subr_estimate(size_t *estp)
3810Sstevel@tonic-gate {
3820Sstevel@tonic-gate 	uintptr_t panicstr;
3830Sstevel@tonic-gate 	int state;
3840Sstevel@tonic-gate 
3850Sstevel@tonic-gate 	if ((state = mdb_get_state()) == MDB_STATE_RUNNING) {
3860Sstevel@tonic-gate 		mdb_warn("findleaks: can only be run on a system "
3870Sstevel@tonic-gate 		    "dump or under kmdb; see dumpadm(1M)\n");
3880Sstevel@tonic-gate 		return (DCMD_ERR);
3890Sstevel@tonic-gate 	}
3900Sstevel@tonic-gate 
3910Sstevel@tonic-gate 	if (mdb_readvar(&panicstr, "panicstr") == -1) {
3920Sstevel@tonic-gate 		mdb_warn("can't read variable 'panicstr'");
3930Sstevel@tonic-gate 		return (DCMD_ERR);
3940Sstevel@tonic-gate 	}
3950Sstevel@tonic-gate 
3960Sstevel@tonic-gate 	if (state != MDB_STATE_STOPPED && panicstr == NULL) {
3970Sstevel@tonic-gate 		mdb_warn("findleaks: cannot be run on a live dump.\n");
3980Sstevel@tonic-gate 		return (DCMD_ERR);
3990Sstevel@tonic-gate 	}
4000Sstevel@tonic-gate 
4010Sstevel@tonic-gate 	if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_estimate, estp) == -1) {
4020Sstevel@tonic-gate 		mdb_warn("couldn't walk 'kmem_cache'");
4030Sstevel@tonic-gate 		return (DCMD_ERR);
4040Sstevel@tonic-gate 	}
4050Sstevel@tonic-gate 
4060Sstevel@tonic-gate 	if (*estp == 0) {
4070Sstevel@tonic-gate 		mdb_warn("findleaks: no buffers found\n");
4080Sstevel@tonic-gate 		return (DCMD_ERR);
4090Sstevel@tonic-gate 	}
4100Sstevel@tonic-gate 
4110Sstevel@tonic-gate 	if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_estimate_vmem, estp) == -1) {
4120Sstevel@tonic-gate 		mdb_warn("couldn't walk 'vmem'");
4130Sstevel@tonic-gate 		return (DCMD_ERR);
4140Sstevel@tonic-gate 	}
4150Sstevel@tonic-gate 
4160Sstevel@tonic-gate 	return (DCMD_OK);
4170Sstevel@tonic-gate }
4180Sstevel@tonic-gate 
4190Sstevel@tonic-gate int
leaky_subr_fill(leak_mtab_t ** lmpp)4200Sstevel@tonic-gate leaky_subr_fill(leak_mtab_t **lmpp)
4210Sstevel@tonic-gate {
4220Sstevel@tonic-gate 	if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_vmem, lmpp) == -1) {
4230Sstevel@tonic-gate 		mdb_warn("couldn't walk 'vmem'");
4240Sstevel@tonic-gate 		return (DCMD_ERR);
4250Sstevel@tonic-gate 	}
4260Sstevel@tonic-gate 
4270Sstevel@tonic-gate 	if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_cache, lmpp) == -1) {
4280Sstevel@tonic-gate 		mdb_warn("couldn't walk 'kmem_cache'");
4290Sstevel@tonic-gate 		return (DCMD_ERR);
4300Sstevel@tonic-gate 	}
4310Sstevel@tonic-gate 
4320Sstevel@tonic-gate 	if (mdb_readvar(&kmem_lite_count, "kmem_lite_count") == -1) {
4330Sstevel@tonic-gate 		mdb_warn("couldn't read 'kmem_lite_count'");
4340Sstevel@tonic-gate 		kmem_lite_count = 0;
4350Sstevel@tonic-gate 	} else if (kmem_lite_count > 16) {
4360Sstevel@tonic-gate 		mdb_warn("kmem_lite_count nonsensical, ignored\n");
4370Sstevel@tonic-gate 		kmem_lite_count = 0;
4380Sstevel@tonic-gate 	}
4390Sstevel@tonic-gate 
4400Sstevel@tonic-gate 	return (DCMD_OK);
4410Sstevel@tonic-gate }
4420Sstevel@tonic-gate 
4430Sstevel@tonic-gate int
leaky_subr_run(void)4440Sstevel@tonic-gate leaky_subr_run(void)
4450Sstevel@tonic-gate {
446*11459SJonathan.Adams@Sun.COM 	unsigned long ps = PAGESIZE;
4470Sstevel@tonic-gate 	uintptr_t kstat_arena;
4481226Scth 	uintptr_t dmods;
4490Sstevel@tonic-gate 
4500Sstevel@tonic-gate 	leaky_kludge();
4510Sstevel@tonic-gate 
4520Sstevel@tonic-gate 	if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_scan_cache,
4530Sstevel@tonic-gate 	    NULL) == -1) {
4540Sstevel@tonic-gate 		mdb_warn("couldn't walk 'kmem_cache'");
4550Sstevel@tonic-gate 		return (DCMD_ERR);
4560Sstevel@tonic-gate 	}
4570Sstevel@tonic-gate 
4580Sstevel@tonic-gate 	if (mdb_walk("modctl", (mdb_walk_cb_t)leaky_modctl, NULL) == -1) {
4590Sstevel@tonic-gate 		mdb_warn("couldn't walk 'modctl'");
4600Sstevel@tonic-gate 		return (DCMD_ERR);
4610Sstevel@tonic-gate 	}
4620Sstevel@tonic-gate 
4631226Scth 	/*
4641226Scth 	 * If kmdb is loaded, we need to walk it's module list, since kmdb
4651226Scth 	 * modctl structures can reference kmem allocations.
4661226Scth 	 */
4671226Scth 	if ((mdb_readvar(&dmods, "kdi_dmods") != -1) && (dmods != NULL))
4681226Scth 		(void) mdb_pwalk("modctl", (mdb_walk_cb_t)leaky_modctl,
4691226Scth 		    NULL, dmods);
4701226Scth 
4710Sstevel@tonic-gate 	if (mdb_walk("thread", (mdb_walk_cb_t)leaky_thread, &ps) == -1) {
4720Sstevel@tonic-gate 		mdb_warn("couldn't walk 'thread'");
4730Sstevel@tonic-gate 		return (DCMD_ERR);
4740Sstevel@tonic-gate 	}
4750Sstevel@tonic-gate 
4760Sstevel@tonic-gate 	if (mdb_walk("deathrow", (mdb_walk_cb_t)leaky_thread, &ps) == -1) {
4770Sstevel@tonic-gate 		mdb_warn("couldn't walk 'deathrow'");
4780Sstevel@tonic-gate 		return (DCMD_ERR);
4790Sstevel@tonic-gate 	}
4800Sstevel@tonic-gate 
4810Sstevel@tonic-gate 	if (mdb_readvar(&kstat_arena, "kstat_arena") == -1) {
4820Sstevel@tonic-gate 		mdb_warn("couldn't read 'kstat_arena'");
4830Sstevel@tonic-gate 		return (DCMD_ERR);
4840Sstevel@tonic-gate 	}
4850Sstevel@tonic-gate 
4860Sstevel@tonic-gate 	if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)leaky_kstat,
4870Sstevel@tonic-gate 	    NULL, kstat_arena) == -1) {
4880Sstevel@tonic-gate 		mdb_warn("couldn't walk kstat vmem arena");
4890Sstevel@tonic-gate 		return (DCMD_ERR);
4900Sstevel@tonic-gate 	}
4910Sstevel@tonic-gate 
4920Sstevel@tonic-gate 	return (DCMD_OK);
4930Sstevel@tonic-gate }
4940Sstevel@tonic-gate 
4950Sstevel@tonic-gate void
leaky_subr_add_leak(leak_mtab_t * lmp)4960Sstevel@tonic-gate leaky_subr_add_leak(leak_mtab_t *lmp)
4970Sstevel@tonic-gate {
4980Sstevel@tonic-gate 	uintptr_t addr = LKM_CTLPTR(lmp->lkm_bufctl);
4990Sstevel@tonic-gate 	size_t depth;
5000Sstevel@tonic-gate 
5010Sstevel@tonic-gate 	switch (LKM_CTLTYPE(lmp->lkm_bufctl)) {
5020Sstevel@tonic-gate 	case LKM_CTL_VMSEG: {
5030Sstevel@tonic-gate 		vmem_seg_t vs;
5040Sstevel@tonic-gate 
5050Sstevel@tonic-gate 		if (mdb_vread(&vs, sizeof (vs), addr) == -1) {
5060Sstevel@tonic-gate 			mdb_warn("couldn't read leaked vmem_seg at addr %p",
5070Sstevel@tonic-gate 			    addr);
5080Sstevel@tonic-gate 			return;
5090Sstevel@tonic-gate 		}
5100Sstevel@tonic-gate 		depth = MIN(vs.vs_depth, VMEM_STACK_DEPTH);
5110Sstevel@tonic-gate 
5120Sstevel@tonic-gate 		leaky_add_leak(TYPE_VMEM, addr, vs.vs_start, vs.vs_timestamp,
5130Sstevel@tonic-gate 		    vs.vs_stack, depth, 0, (vs.vs_end - vs.vs_start));
5140Sstevel@tonic-gate 		break;
5150Sstevel@tonic-gate 	}
5160Sstevel@tonic-gate 	case LKM_CTL_BUFCTL: {
5170Sstevel@tonic-gate 		kmem_bufctl_audit_t bc;
5180Sstevel@tonic-gate 
5190Sstevel@tonic-gate 		if (mdb_vread(&bc, sizeof (bc), addr) == -1) {
5200Sstevel@tonic-gate 			mdb_warn("couldn't read leaked bufctl at addr %p",
5210Sstevel@tonic-gate 			    addr);
5220Sstevel@tonic-gate 			return;
5230Sstevel@tonic-gate 		}
5240Sstevel@tonic-gate 
5250Sstevel@tonic-gate 		depth = MIN(bc.bc_depth, KMEM_STACK_DEPTH);
5260Sstevel@tonic-gate 
5270Sstevel@tonic-gate 		/*
5280Sstevel@tonic-gate 		 * The top of the stack will be kmem_cache_alloc+offset.
5290Sstevel@tonic-gate 		 * Since the offset in kmem_cache_alloc() isn't interesting
5300Sstevel@tonic-gate 		 * we skip that frame for the purposes of uniquifying stacks.
5310Sstevel@tonic-gate 		 *
5320Sstevel@tonic-gate 		 * We also use the cache pointer as the leaks's cid, to
5330Sstevel@tonic-gate 		 * prevent the coalescing of leaks from different caches.
5340Sstevel@tonic-gate 		 */
5350Sstevel@tonic-gate 		if (depth > 0)
5360Sstevel@tonic-gate 			depth--;
5370Sstevel@tonic-gate 		leaky_add_leak(TYPE_KMEM, addr, (uintptr_t)bc.bc_addr,
5380Sstevel@tonic-gate 		    bc.bc_timestamp, bc.bc_stack + 1, depth,
5390Sstevel@tonic-gate 		    (uintptr_t)bc.bc_cache, 0);
5400Sstevel@tonic-gate 		break;
5410Sstevel@tonic-gate 	}
5420Sstevel@tonic-gate 	case LKM_CTL_CACHE: {
5430Sstevel@tonic-gate 		kmem_cache_t cache;
5440Sstevel@tonic-gate 		kmem_buftag_lite_t bt;
5450Sstevel@tonic-gate 		pc_t caller;
5460Sstevel@tonic-gate 		int depth = 0;
5470Sstevel@tonic-gate 
5480Sstevel@tonic-gate 		/*
5490Sstevel@tonic-gate 		 * For KMF_LITE caches, we can get the allocation PC
5500Sstevel@tonic-gate 		 * out of the buftag structure.
5510Sstevel@tonic-gate 		 */
5520Sstevel@tonic-gate 		if (mdb_vread(&cache, sizeof (cache), addr) != -1 &&
5530Sstevel@tonic-gate 		    (cache.cache_flags & KMF_LITE) &&
5540Sstevel@tonic-gate 		    kmem_lite_count > 0 &&
5550Sstevel@tonic-gate 		    mdb_vread(&bt, sizeof (bt),
5560Sstevel@tonic-gate 		    /* LINTED alignment */
5570Sstevel@tonic-gate 		    (uintptr_t)KMEM_BUFTAG(&cache, lmp->lkm_base)) != -1) {
5580Sstevel@tonic-gate 			caller = bt.bt_history[0];
5590Sstevel@tonic-gate 			depth = 1;
5600Sstevel@tonic-gate 		}
5610Sstevel@tonic-gate 		leaky_add_leak(TYPE_CACHE, lmp->lkm_base, lmp->lkm_base, 0,
5620Sstevel@tonic-gate 		    &caller, depth, addr, addr);
5630Sstevel@tonic-gate 		break;
5640Sstevel@tonic-gate 	}
5650Sstevel@tonic-gate 	default:
5660Sstevel@tonic-gate 		mdb_warn("internal error: invalid leak_bufctl_t\n");
5670Sstevel@tonic-gate 		break;
5680Sstevel@tonic-gate 	}
5690Sstevel@tonic-gate }
5700Sstevel@tonic-gate 
5710Sstevel@tonic-gate static void
leaky_subr_caller(const pc_t * stack,uint_t depth,char * buf,uintptr_t * pcp)5720Sstevel@tonic-gate leaky_subr_caller(const pc_t *stack, uint_t depth, char *buf, uintptr_t *pcp)
5730Sstevel@tonic-gate {
5740Sstevel@tonic-gate 	int i;
5750Sstevel@tonic-gate 	GElf_Sym sym;
5760Sstevel@tonic-gate 	uintptr_t pc = 0;
5770Sstevel@tonic-gate 
5780Sstevel@tonic-gate 	buf[0] = 0;
5790Sstevel@tonic-gate 
5800Sstevel@tonic-gate 	for (i = 0; i < depth; i++) {
5810Sstevel@tonic-gate 		pc = stack[i];
5820Sstevel@tonic-gate 
5830Sstevel@tonic-gate 		if (mdb_lookup_by_addr(pc,
5840Sstevel@tonic-gate 		    MDB_SYM_FUZZY, buf, MDB_SYM_NAMLEN, &sym) == -1)
5850Sstevel@tonic-gate 			continue;
5860Sstevel@tonic-gate 		if (strncmp(buf, "kmem_", 5) == 0)
5870Sstevel@tonic-gate 			continue;
5880Sstevel@tonic-gate 		if (strncmp(buf, "vmem_", 5) == 0)
5890Sstevel@tonic-gate 			continue;
5900Sstevel@tonic-gate 		*pcp = pc;
5910Sstevel@tonic-gate 
5920Sstevel@tonic-gate 		return;
5930Sstevel@tonic-gate 	}
5940Sstevel@tonic-gate 
5950Sstevel@tonic-gate 	/*
5960Sstevel@tonic-gate 	 * We're only here if the entire call chain begins with "kmem_";
5970Sstevel@tonic-gate 	 * this shouldn't happen, but we'll just use the last caller.
5980Sstevel@tonic-gate 	 */
5990Sstevel@tonic-gate 	*pcp = pc;
6000Sstevel@tonic-gate }
6010Sstevel@tonic-gate 
6020Sstevel@tonic-gate int
leaky_subr_bufctl_cmp(const leak_bufctl_t * lhs,const leak_bufctl_t * rhs)6030Sstevel@tonic-gate leaky_subr_bufctl_cmp(const leak_bufctl_t *lhs, const leak_bufctl_t *rhs)
6040Sstevel@tonic-gate {
6050Sstevel@tonic-gate 	char lbuf[MDB_SYM_NAMLEN], rbuf[MDB_SYM_NAMLEN];
6060Sstevel@tonic-gate 	uintptr_t lcaller, rcaller;
6070Sstevel@tonic-gate 	int rval;
6080Sstevel@tonic-gate 
6090Sstevel@tonic-gate 	leaky_subr_caller(lhs->lkb_stack, lhs->lkb_depth, lbuf, &lcaller);
6100Sstevel@tonic-gate 	leaky_subr_caller(rhs->lkb_stack, lhs->lkb_depth, rbuf, &rcaller);
6110Sstevel@tonic-gate 
6120Sstevel@tonic-gate 	if (rval = strcmp(lbuf, rbuf))
6130Sstevel@tonic-gate 		return (rval);
6140Sstevel@tonic-gate 
6150Sstevel@tonic-gate 	if (lcaller < rcaller)
6160Sstevel@tonic-gate 		return (-1);
6170Sstevel@tonic-gate 
6180Sstevel@tonic-gate 	if (lcaller > rcaller)
6190Sstevel@tonic-gate 		return (1);
6200Sstevel@tonic-gate 
6210Sstevel@tonic-gate 	if (lhs->lkb_data < rhs->lkb_data)
6220Sstevel@tonic-gate 		return (-1);
6230Sstevel@tonic-gate 
6240Sstevel@tonic-gate 	if (lhs->lkb_data > rhs->lkb_data)
6250Sstevel@tonic-gate 		return (1);
6260Sstevel@tonic-gate 
6270Sstevel@tonic-gate 	return (0);
6280Sstevel@tonic-gate }
6290Sstevel@tonic-gate 
6300Sstevel@tonic-gate /*
6310Sstevel@tonic-gate  * Global state variables used by the leaky_subr_dump_* routines.  Note that
6320Sstevel@tonic-gate  * they are carefully cleared before use.
6330Sstevel@tonic-gate  */
6340Sstevel@tonic-gate static int lk_vmem_seen;
6350Sstevel@tonic-gate static int lk_cache_seen;
6360Sstevel@tonic-gate static int lk_kmem_seen;
6370Sstevel@tonic-gate static size_t lk_ttl;
6380Sstevel@tonic-gate static size_t lk_bytes;
6390Sstevel@tonic-gate 
6400Sstevel@tonic-gate void
leaky_subr_dump_start(int type)6410Sstevel@tonic-gate leaky_subr_dump_start(int type)
6420Sstevel@tonic-gate {
6430Sstevel@tonic-gate 	switch (type) {
6440Sstevel@tonic-gate 	case TYPE_VMEM:
6450Sstevel@tonic-gate 		lk_vmem_seen = 0;
6460Sstevel@tonic-gate 		break;
6470Sstevel@tonic-gate 	case TYPE_CACHE:
6480Sstevel@tonic-gate 		lk_cache_seen = 0;
6490Sstevel@tonic-gate 		break;
6500Sstevel@tonic-gate 	case TYPE_KMEM:
6510Sstevel@tonic-gate 		lk_kmem_seen = 0;
6520Sstevel@tonic-gate 		break;
6530Sstevel@tonic-gate 	default:
6540Sstevel@tonic-gate 		break;
6550Sstevel@tonic-gate 	}
6560Sstevel@tonic-gate 
6570Sstevel@tonic-gate 	lk_ttl = 0;
6580Sstevel@tonic-gate 	lk_bytes = 0;
6590Sstevel@tonic-gate }
6600Sstevel@tonic-gate 
6610Sstevel@tonic-gate void
leaky_subr_dump(const leak_bufctl_t * lkb,int verbose)6620Sstevel@tonic-gate leaky_subr_dump(const leak_bufctl_t *lkb, int verbose)
6630Sstevel@tonic-gate {
6640Sstevel@tonic-gate 	const leak_bufctl_t *cur;
6650Sstevel@tonic-gate 	kmem_cache_t cache;
6660Sstevel@tonic-gate 	size_t min, max, size;
6670Sstevel@tonic-gate 	char sz[30];
6680Sstevel@tonic-gate 	char c[MDB_SYM_NAMLEN];
6690Sstevel@tonic-gate 	uintptr_t caller;
6700Sstevel@tonic-gate 
6710Sstevel@tonic-gate 	if (verbose) {
6720Sstevel@tonic-gate 		lk_ttl = 0;
6730Sstevel@tonic-gate 		lk_bytes = 0;
6740Sstevel@tonic-gate 	}
6750Sstevel@tonic-gate 
6760Sstevel@tonic-gate 	switch (lkb->lkb_type) {
6770Sstevel@tonic-gate 	case TYPE_VMEM:
6780Sstevel@tonic-gate 		if (!verbose && !lk_vmem_seen) {
6790Sstevel@tonic-gate 			lk_vmem_seen = 1;
6800Sstevel@tonic-gate 			mdb_printf("%-16s %7s %?s %s\n",
6810Sstevel@tonic-gate 			    "BYTES", "LEAKED", "VMEM_SEG", "CALLER");
6820Sstevel@tonic-gate 		}
6830Sstevel@tonic-gate 
6840Sstevel@tonic-gate 		min = max = lkb->lkb_data;
6850Sstevel@tonic-gate 
6860Sstevel@tonic-gate 		for (cur = lkb; cur != NULL; cur = cur->lkb_next) {
6870Sstevel@tonic-gate 			size = cur->lkb_data;
6880Sstevel@tonic-gate 
6890Sstevel@tonic-gate 			if (size < min)
6900Sstevel@tonic-gate 				min = size;
6910Sstevel@tonic-gate 			if (size > max)
6920Sstevel@tonic-gate 				max = size;
6930Sstevel@tonic-gate 
6940Sstevel@tonic-gate 			lk_ttl++;
6950Sstevel@tonic-gate 			lk_bytes += size;
6960Sstevel@tonic-gate 		}
6970Sstevel@tonic-gate 
6980Sstevel@tonic-gate 		if (min == max)
6990Sstevel@tonic-gate 			(void) mdb_snprintf(sz, sizeof (sz), "%ld", min);
7000Sstevel@tonic-gate 		else
7010Sstevel@tonic-gate 			(void) mdb_snprintf(sz, sizeof (sz), "%ld-%ld",
7020Sstevel@tonic-gate 			    min, max);
7030Sstevel@tonic-gate 
7040Sstevel@tonic-gate 		if (!verbose) {
7050Sstevel@tonic-gate 			leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth,
7060Sstevel@tonic-gate 			    c, &caller);
7070Sstevel@tonic-gate 
7080Sstevel@tonic-gate 			if (caller != 0) {
7090Sstevel@tonic-gate 				(void) mdb_snprintf(c, sizeof (c),
7100Sstevel@tonic-gate 				    "%a", caller);
7110Sstevel@tonic-gate 			} else {
7120Sstevel@tonic-gate 				(void) mdb_snprintf(c, sizeof (c),
7130Sstevel@tonic-gate 				    "%s", "?");
7140Sstevel@tonic-gate 			}
7150Sstevel@tonic-gate 			mdb_printf("%-16s %7d %?p %s\n", sz, lkb->lkb_dups + 1,
7160Sstevel@tonic-gate 			    lkb->lkb_addr, c);
7170Sstevel@tonic-gate 		} else {
7180Sstevel@tonic-gate 			mdb_arg_t v;
7190Sstevel@tonic-gate 
7200Sstevel@tonic-gate 			if (lk_ttl == 1)
7210Sstevel@tonic-gate 				mdb_printf("kmem_oversize leak: 1 vmem_seg, "
7220Sstevel@tonic-gate 				    "%ld bytes\n", lk_bytes);
7230Sstevel@tonic-gate 			else
7240Sstevel@tonic-gate 				mdb_printf("kmem_oversize leak: %d vmem_segs, "
7250Sstevel@tonic-gate 				    "%s bytes each, %ld bytes total\n",
7260Sstevel@tonic-gate 				    lk_ttl, sz, lk_bytes);
7270Sstevel@tonic-gate 
7280Sstevel@tonic-gate 			v.a_type = MDB_TYPE_STRING;
7290Sstevel@tonic-gate 			v.a_un.a_str = "-v";
7300Sstevel@tonic-gate 
7310Sstevel@tonic-gate 			if (mdb_call_dcmd("vmem_seg", lkb->lkb_addr,
7320Sstevel@tonic-gate 			    DCMD_ADDRSPEC, 1, &v) == -1) {
7330Sstevel@tonic-gate 				mdb_warn("'%p::vmem_seg -v' failed",
7340Sstevel@tonic-gate 				    lkb->lkb_addr);
7350Sstevel@tonic-gate 			}
7360Sstevel@tonic-gate 		}
7370Sstevel@tonic-gate 		return;
7380Sstevel@tonic-gate 
7390Sstevel@tonic-gate 	case TYPE_CACHE:
7400Sstevel@tonic-gate 		if (!verbose && !lk_cache_seen) {
7410Sstevel@tonic-gate 			lk_cache_seen = 1;
7420Sstevel@tonic-gate 			if (lk_vmem_seen)
7430Sstevel@tonic-gate 				mdb_printf("\n");
7440Sstevel@tonic-gate 			mdb_printf("%-?s %7s %?s %s\n",
7450Sstevel@tonic-gate 			    "CACHE", "LEAKED", "BUFFER", "CALLER");
7460Sstevel@tonic-gate 		}
7470Sstevel@tonic-gate 
7480Sstevel@tonic-gate 		if (mdb_vread(&cache, sizeof (cache), lkb->lkb_data) == -1) {
7490Sstevel@tonic-gate 			/*
7500Sstevel@tonic-gate 			 * This _really_ shouldn't happen; we shouldn't
7510Sstevel@tonic-gate 			 * have been able to get this far if this
7520Sstevel@tonic-gate 			 * cache wasn't readable.
7530Sstevel@tonic-gate 			 */
7540Sstevel@tonic-gate 			mdb_warn("can't read cache %p for leaked "
7550Sstevel@tonic-gate 			    "buffer %p", lkb->lkb_data, lkb->lkb_addr);
7560Sstevel@tonic-gate 			return;
7570Sstevel@tonic-gate 		}
7580Sstevel@tonic-gate 
7590Sstevel@tonic-gate 		lk_ttl += lkb->lkb_dups + 1;
7600Sstevel@tonic-gate 		lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
7610Sstevel@tonic-gate 
7620Sstevel@tonic-gate 		caller = (lkb->lkb_depth == 0) ? 0 : lkb->lkb_stack[0];
7630Sstevel@tonic-gate 		if (caller != 0) {
7640Sstevel@tonic-gate 			(void) mdb_snprintf(c, sizeof (c), "%a", caller);
7650Sstevel@tonic-gate 		} else {
7660Sstevel@tonic-gate 			(void) mdb_snprintf(c, sizeof (c),
7670Sstevel@tonic-gate 			    "%s", (verbose) ? "" : "?");
7680Sstevel@tonic-gate 		}
7690Sstevel@tonic-gate 
7700Sstevel@tonic-gate 		if (!verbose) {
7710Sstevel@tonic-gate 			mdb_printf("%0?p %7d %0?p %s\n", lkb->lkb_cid,
7720Sstevel@tonic-gate 			    lkb->lkb_dups + 1, lkb->lkb_addr, c);
7730Sstevel@tonic-gate 		} else {
7740Sstevel@tonic-gate 			if (lk_ttl == 1)
7750Sstevel@tonic-gate 				mdb_printf("%s leak: 1 buffer, %ld bytes,\n",
7760Sstevel@tonic-gate 				    cache.cache_name, lk_bytes);
7770Sstevel@tonic-gate 			else
7780Sstevel@tonic-gate 				mdb_printf("%s leak: %d buffers, "
7790Sstevel@tonic-gate 				    "%ld bytes each, %ld bytes total,\n",
7800Sstevel@tonic-gate 				    cache.cache_name, lk_ttl,
7810Sstevel@tonic-gate 				    cache.cache_bufsize, lk_bytes);
7820Sstevel@tonic-gate 
7830Sstevel@tonic-gate 			mdb_printf("    sample addr %p%s%s\n",
7840Sstevel@tonic-gate 			    lkb->lkb_addr, (caller == 0) ? "" : ", caller ", c);
7850Sstevel@tonic-gate 		}
7860Sstevel@tonic-gate 		return;
7870Sstevel@tonic-gate 
7880Sstevel@tonic-gate 	case TYPE_KMEM:
7890Sstevel@tonic-gate 		if (!verbose && !lk_kmem_seen) {
7900Sstevel@tonic-gate 			lk_kmem_seen = 1;
7910Sstevel@tonic-gate 			if (lk_vmem_seen || lk_cache_seen)
7920Sstevel@tonic-gate 				mdb_printf("\n");
7930Sstevel@tonic-gate 			mdb_printf("%-?s %7s %?s %s\n",
7940Sstevel@tonic-gate 			    "CACHE", "LEAKED", "BUFCTL", "CALLER");
7950Sstevel@tonic-gate 		}
7960Sstevel@tonic-gate 
7970Sstevel@tonic-gate 		if (mdb_vread(&cache, sizeof (cache), lkb->lkb_cid) == -1) {
7980Sstevel@tonic-gate 			/*
7990Sstevel@tonic-gate 			 * This _really_ shouldn't happen; we shouldn't
8000Sstevel@tonic-gate 			 * have been able to get this far if this
8010Sstevel@tonic-gate 			 * cache wasn't readable.
8020Sstevel@tonic-gate 			 */
8030Sstevel@tonic-gate 			mdb_warn("can't read cache %p for leaked "
8040Sstevel@tonic-gate 			    "bufctl %p", lkb->lkb_cid, lkb->lkb_addr);
8050Sstevel@tonic-gate 			return;
8060Sstevel@tonic-gate 		}
8070Sstevel@tonic-gate 
8080Sstevel@tonic-gate 		lk_ttl += lkb->lkb_dups + 1;
8090Sstevel@tonic-gate 		lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
8100Sstevel@tonic-gate 
8110Sstevel@tonic-gate 		if (!verbose) {
8120Sstevel@tonic-gate 			leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth,
8130Sstevel@tonic-gate 			    c, &caller);
8140Sstevel@tonic-gate 
8150Sstevel@tonic-gate 			if (caller != 0) {
8160Sstevel@tonic-gate 				(void) mdb_snprintf(c, sizeof (c),
8170Sstevel@tonic-gate 				    "%a", caller);
8180Sstevel@tonic-gate 			} else {
8190Sstevel@tonic-gate 				(void) mdb_snprintf(c, sizeof (c),
8200Sstevel@tonic-gate 				    "%s", "?");
8210Sstevel@tonic-gate 			}
8220Sstevel@tonic-gate 			mdb_printf("%0?p %7d %0?p %s\n", lkb->lkb_cid,
8230Sstevel@tonic-gate 			    lkb->lkb_dups + 1, lkb->lkb_addr, c);
8240Sstevel@tonic-gate 		} else {
8250Sstevel@tonic-gate 			mdb_arg_t v;
8260Sstevel@tonic-gate 
8270Sstevel@tonic-gate 			if (lk_ttl == 1)
8280Sstevel@tonic-gate 				mdb_printf("%s leak: 1 buffer, %ld bytes\n",
8290Sstevel@tonic-gate 				    cache.cache_name, lk_bytes);
8300Sstevel@tonic-gate 			else
8310Sstevel@tonic-gate 				mdb_printf("%s leak: %d buffers, "
8320Sstevel@tonic-gate 				    "%ld bytes each, %ld bytes total\n",
8330Sstevel@tonic-gate 				    cache.cache_name, lk_ttl,
8340Sstevel@tonic-gate 				    cache.cache_bufsize, lk_bytes);
8350Sstevel@tonic-gate 
8360Sstevel@tonic-gate 			v.a_type = MDB_TYPE_STRING;
8370Sstevel@tonic-gate 			v.a_un.a_str = "-v";
8380Sstevel@tonic-gate 
8390Sstevel@tonic-gate 			if (mdb_call_dcmd("bufctl", lkb->lkb_addr,
8400Sstevel@tonic-gate 			    DCMD_ADDRSPEC, 1, &v) == -1) {
8410Sstevel@tonic-gate 				mdb_warn("'%p::bufctl -v' failed",
8420Sstevel@tonic-gate 				    lkb->lkb_addr);
8430Sstevel@tonic-gate 			}
8440Sstevel@tonic-gate 		}
8450Sstevel@tonic-gate 		return;
8460Sstevel@tonic-gate 
8470Sstevel@tonic-gate 	default:
8480Sstevel@tonic-gate 		return;
8490Sstevel@tonic-gate 	}
8500Sstevel@tonic-gate }
8510Sstevel@tonic-gate 
8520Sstevel@tonic-gate void
leaky_subr_dump_end(int type)8530Sstevel@tonic-gate leaky_subr_dump_end(int type)
8540Sstevel@tonic-gate {
8550Sstevel@tonic-gate 	int i;
8560Sstevel@tonic-gate 	int width;
8570Sstevel@tonic-gate 	const char *leaks;
8580Sstevel@tonic-gate 
8590Sstevel@tonic-gate 	switch (type) {
8600Sstevel@tonic-gate 	case TYPE_VMEM:
8610Sstevel@tonic-gate 		if (!lk_vmem_seen)
8620Sstevel@tonic-gate 			return;
8630Sstevel@tonic-gate 
8640Sstevel@tonic-gate 		width = 16;
8650Sstevel@tonic-gate 		leaks = "kmem_oversize leak";
8660Sstevel@tonic-gate 		break;
8670Sstevel@tonic-gate 
8680Sstevel@tonic-gate 	case TYPE_CACHE:
8690Sstevel@tonic-gate 		if (!lk_cache_seen)
8700Sstevel@tonic-gate 			return;
8710Sstevel@tonic-gate 
8720Sstevel@tonic-gate 		width = sizeof (uintptr_t) * 2;
8730Sstevel@tonic-gate 		leaks = "buffer";
8740Sstevel@tonic-gate 		break;
8750Sstevel@tonic-gate 
8760Sstevel@tonic-gate 	case TYPE_KMEM:
8770Sstevel@tonic-gate 		if (!lk_kmem_seen)
8780Sstevel@tonic-gate 			return;
8790Sstevel@tonic-gate 
8800Sstevel@tonic-gate 		width = sizeof (uintptr_t) * 2;
8810Sstevel@tonic-gate 		leaks = "buffer";
8820Sstevel@tonic-gate 		break;
8830Sstevel@tonic-gate 
8840Sstevel@tonic-gate 	default:
8850Sstevel@tonic-gate 		return;
8860Sstevel@tonic-gate 	}
8870Sstevel@tonic-gate 
8880Sstevel@tonic-gate 	for (i = 0; i < 72; i++)
8890Sstevel@tonic-gate 		mdb_printf("-");
8900Sstevel@tonic-gate 	mdb_printf("\n%*s %7ld %s%s, %ld byte%s\n",
8910Sstevel@tonic-gate 	    width, "Total", lk_ttl, leaks, (lk_ttl == 1) ? "" : "s",
8920Sstevel@tonic-gate 	    lk_bytes, (lk_bytes == 1) ? "" : "s");
8930Sstevel@tonic-gate }
8940Sstevel@tonic-gate 
8950Sstevel@tonic-gate int
leaky_subr_invoke_callback(const leak_bufctl_t * lkb,mdb_walk_cb_t cb,void * cbdata)8960Sstevel@tonic-gate leaky_subr_invoke_callback(const leak_bufctl_t *lkb, mdb_walk_cb_t cb,
8970Sstevel@tonic-gate     void *cbdata)
8980Sstevel@tonic-gate {
8990Sstevel@tonic-gate 	kmem_bufctl_audit_t bc;
9000Sstevel@tonic-gate 	vmem_seg_t vs;
9010Sstevel@tonic-gate 
9020Sstevel@tonic-gate 	switch (lkb->lkb_type) {
9030Sstevel@tonic-gate 	case TYPE_VMEM:
9040Sstevel@tonic-gate 		if (mdb_vread(&vs, sizeof (vs), lkb->lkb_addr) == -1) {
9050Sstevel@tonic-gate 			mdb_warn("unable to read vmem_seg at %p",
9060Sstevel@tonic-gate 			    lkb->lkb_addr);
9070Sstevel@tonic-gate 			return (WALK_NEXT);
9080Sstevel@tonic-gate 		}
9090Sstevel@tonic-gate 		return (cb(lkb->lkb_addr, &vs, cbdata));
9100Sstevel@tonic-gate 
9110Sstevel@tonic-gate 	case TYPE_CACHE:
9120Sstevel@tonic-gate 		return (cb(lkb->lkb_addr, NULL, cbdata));
9130Sstevel@tonic-gate 
9140Sstevel@tonic-gate 	case TYPE_KMEM:
9150Sstevel@tonic-gate 		if (mdb_vread(&bc, sizeof (bc), lkb->lkb_addr) == -1) {
9160Sstevel@tonic-gate 			mdb_warn("unable to read bufctl at %p",
9170Sstevel@tonic-gate 			    lkb->lkb_addr);
9180Sstevel@tonic-gate 			return (WALK_NEXT);
9190Sstevel@tonic-gate 		}
9200Sstevel@tonic-gate 		return (cb(lkb->lkb_addr, &bc, cbdata));
9210Sstevel@tonic-gate 	default:
9220Sstevel@tonic-gate 		return (WALK_NEXT);
9230Sstevel@tonic-gate 	}
9240Sstevel@tonic-gate }
925