10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5*1528Sjwadams * Common Development and Distribution License (the "License").
6*1528Sjwadams * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
22*1528Sjwadams * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
230Sstevel@tonic-gate * Use is subject to license terms.
240Sstevel@tonic-gate */
250Sstevel@tonic-gate
260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI"
270Sstevel@tonic-gate
280Sstevel@tonic-gate #include "umem.h"
290Sstevel@tonic-gate
300Sstevel@tonic-gate #include <sys/vmem_impl_user.h>
310Sstevel@tonic-gate #include <umem_impl.h>
320Sstevel@tonic-gate
330Sstevel@tonic-gate #include <alloca.h>
340Sstevel@tonic-gate #include <libproc.h>
350Sstevel@tonic-gate #include <stdio.h>
360Sstevel@tonic-gate #include <string.h>
370Sstevel@tonic-gate #include <sys/stack.h>
380Sstevel@tonic-gate
390Sstevel@tonic-gate #include "leaky_impl.h"
400Sstevel@tonic-gate #include "misc.h"
410Sstevel@tonic-gate #include "proc_kludges.h"
420Sstevel@tonic-gate
430Sstevel@tonic-gate #include "umem_pagesize.h"
440Sstevel@tonic-gate
450Sstevel@tonic-gate /*
460Sstevel@tonic-gate * This file defines the libumem target for ../genunix/leaky.c.
470Sstevel@tonic-gate *
480Sstevel@tonic-gate * See ../genunix/leaky_impl.h for the target interface definition.
490Sstevel@tonic-gate */
500Sstevel@tonic-gate
510Sstevel@tonic-gate /*
520Sstevel@tonic-gate * leaky_subr_dump_start()/_end() depend on the ordering of TYPE_VMEM,
530Sstevel@tonic-gate * TYPE_MMAP and TYPE_SBRK.
540Sstevel@tonic-gate */
550Sstevel@tonic-gate #define TYPE_MMAP 0 /* lkb_data is the size */
560Sstevel@tonic-gate #define TYPE_SBRK 1 /* lkb_data is the size */
570Sstevel@tonic-gate #define TYPE_VMEM 2 /* lkb_data is the vmem_seg's size */
580Sstevel@tonic-gate #define TYPE_CACHE 3 /* lkb_cid is the bufctl's cache */
590Sstevel@tonic-gate #define TYPE_UMEM 4 /* lkb_cid is the bufctl's cache */
600Sstevel@tonic-gate
610Sstevel@tonic-gate #define LKM_CTL_BUFCTL 0 /* normal allocation, PTR is bufctl */
620Sstevel@tonic-gate #define LKM_CTL_VMSEG 1 /* oversize allocation, PTR is vmem_seg_t */
630Sstevel@tonic-gate #define LKM_CTL_MEMORY 2 /* non-umem mmap or brk, PTR is region start */
640Sstevel@tonic-gate #define LKM_CTL_CACHE 3 /* normal alloc, non-debug, PTR is cache */
650Sstevel@tonic-gate #define LKM_CTL_MASK 3L
660Sstevel@tonic-gate
670Sstevel@tonic-gate /*
680Sstevel@tonic-gate * create a lkm_bufctl from a pointer and a type
690Sstevel@tonic-gate */
700Sstevel@tonic-gate #define LKM_CTL(ptr, type) (LKM_CTLPTR(ptr) | (type))
710Sstevel@tonic-gate #define LKM_CTLPTR(ctl) ((uintptr_t)(ctl) & ~(LKM_CTL_MASK))
720Sstevel@tonic-gate #define LKM_CTLTYPE(ctl) ((uintptr_t)(ctl) & (LKM_CTL_MASK))
730Sstevel@tonic-gate
740Sstevel@tonic-gate static uintptr_t leak_brkbase;
750Sstevel@tonic-gate static uintptr_t leak_brksize;
760Sstevel@tonic-gate
770Sstevel@tonic-gate #define LEAKY_INBRK(ptr) \
780Sstevel@tonic-gate (((uintptr_t)(ptr) - leak_brkbase) < leak_brksize)
790Sstevel@tonic-gate
800Sstevel@tonic-gate typedef struct leaky_seg_info {
810Sstevel@tonic-gate uintptr_t ls_start;
820Sstevel@tonic-gate uintptr_t ls_end;
830Sstevel@tonic-gate } leaky_seg_info_t;
840Sstevel@tonic-gate
850Sstevel@tonic-gate typedef struct leaky_maps {
860Sstevel@tonic-gate leaky_seg_info_t *lm_segs;
870Sstevel@tonic-gate uintptr_t lm_seg_count;
880Sstevel@tonic-gate uintptr_t lm_seg_max;
890Sstevel@tonic-gate
900Sstevel@tonic-gate pstatus_t *lm_pstatus;
910Sstevel@tonic-gate
920Sstevel@tonic-gate leak_mtab_t **lm_lmp;
930Sstevel@tonic-gate } leaky_maps_t;
940Sstevel@tonic-gate
950Sstevel@tonic-gate /*ARGSUSED*/
960Sstevel@tonic-gate static int
leaky_mtab(uintptr_t addr,const umem_bufctl_audit_t * bcp,leak_mtab_t ** lmp)970Sstevel@tonic-gate leaky_mtab(uintptr_t addr, const umem_bufctl_audit_t *bcp, leak_mtab_t **lmp)
980Sstevel@tonic-gate {
990Sstevel@tonic-gate leak_mtab_t *lm = (*lmp)++;
1000Sstevel@tonic-gate
1010Sstevel@tonic-gate lm->lkm_base = (uintptr_t)bcp->bc_addr;
1020Sstevel@tonic-gate lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_BUFCTL);
1030Sstevel@tonic-gate
1040Sstevel@tonic-gate return (WALK_NEXT);
1050Sstevel@tonic-gate }
1060Sstevel@tonic-gate
1070Sstevel@tonic-gate /*ARGSUSED*/
1080Sstevel@tonic-gate static int
leaky_mtab_addr(uintptr_t addr,void * ignored,leak_mtab_t ** lmp)1090Sstevel@tonic-gate leaky_mtab_addr(uintptr_t addr, void *ignored, leak_mtab_t **lmp)
1100Sstevel@tonic-gate {
1110Sstevel@tonic-gate leak_mtab_t *lm = (*lmp)++;
1120Sstevel@tonic-gate
1130Sstevel@tonic-gate lm->lkm_base = addr;
1140Sstevel@tonic-gate
1150Sstevel@tonic-gate return (WALK_NEXT);
1160Sstevel@tonic-gate }
1170Sstevel@tonic-gate
1180Sstevel@tonic-gate static int
leaky_seg(uintptr_t addr,const vmem_seg_t * seg,leak_mtab_t ** lmp)1190Sstevel@tonic-gate leaky_seg(uintptr_t addr, const vmem_seg_t *seg, leak_mtab_t **lmp)
1200Sstevel@tonic-gate {
1210Sstevel@tonic-gate leak_mtab_t *lm = (*lmp)++;
1220Sstevel@tonic-gate
1230Sstevel@tonic-gate lm->lkm_base = seg->vs_start;
1240Sstevel@tonic-gate lm->lkm_limit = seg->vs_end;
1250Sstevel@tonic-gate lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_VMSEG);
1260Sstevel@tonic-gate return (WALK_NEXT);
1270Sstevel@tonic-gate }
1280Sstevel@tonic-gate
1290Sstevel@tonic-gate static int
leaky_vmem(uintptr_t addr,const vmem_t * vmem,leak_mtab_t ** lmp)1300Sstevel@tonic-gate leaky_vmem(uintptr_t addr, const vmem_t *vmem, leak_mtab_t **lmp)
1310Sstevel@tonic-gate {
1320Sstevel@tonic-gate if (strcmp(vmem->vm_name, "umem_oversize") != 0 &&
1330Sstevel@tonic-gate strcmp(vmem->vm_name, "umem_memalign") != 0)
1340Sstevel@tonic-gate return (WALK_NEXT);
1350Sstevel@tonic-gate
1360Sstevel@tonic-gate if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)leaky_seg, lmp, addr) == -1)
1370Sstevel@tonic-gate mdb_warn("can't walk vmem_alloc for %s (%p)", vmem->vm_name,
1380Sstevel@tonic-gate addr);
1390Sstevel@tonic-gate
1400Sstevel@tonic-gate return (WALK_NEXT);
1410Sstevel@tonic-gate }
1420Sstevel@tonic-gate
1430Sstevel@tonic-gate /*ARGSUSED*/
1440Sstevel@tonic-gate static int
leaky_estimate_vmem(uintptr_t addr,const vmem_t * vmem,size_t * est)1450Sstevel@tonic-gate leaky_estimate_vmem(uintptr_t addr, const vmem_t *vmem, size_t *est)
1460Sstevel@tonic-gate {
1470Sstevel@tonic-gate if (strcmp(vmem->vm_name, "umem_oversize") != 0 &&
1480Sstevel@tonic-gate strcmp(vmem->vm_name, "umem_memalign") != 0)
1490Sstevel@tonic-gate return (WALK_NEXT);
1500Sstevel@tonic-gate
1510Sstevel@tonic-gate *est += (int)(vmem->vm_kstat.vk_alloc - vmem->vm_kstat.vk_free);
1520Sstevel@tonic-gate
1530Sstevel@tonic-gate return (WALK_NEXT);
1540Sstevel@tonic-gate }
1550Sstevel@tonic-gate
1560Sstevel@tonic-gate static int
leaky_seg_cmp(const void * l,const void * r)1570Sstevel@tonic-gate leaky_seg_cmp(const void *l, const void *r)
1580Sstevel@tonic-gate {
1590Sstevel@tonic-gate const leaky_seg_info_t *lhs = (const leaky_seg_info_t *)l;
1600Sstevel@tonic-gate const leaky_seg_info_t *rhs = (const leaky_seg_info_t *)r;
1610Sstevel@tonic-gate
1620Sstevel@tonic-gate if (lhs->ls_start < rhs->ls_start)
1630Sstevel@tonic-gate return (-1);
1640Sstevel@tonic-gate if (lhs->ls_start > rhs->ls_start)
1650Sstevel@tonic-gate return (1);
1660Sstevel@tonic-gate
1670Sstevel@tonic-gate return (0);
1680Sstevel@tonic-gate }
1690Sstevel@tonic-gate
1700Sstevel@tonic-gate static ssize_t
leaky_seg_search(uintptr_t addr,leaky_seg_info_t * listp,unsigned count)1710Sstevel@tonic-gate leaky_seg_search(uintptr_t addr, leaky_seg_info_t *listp, unsigned count)
1720Sstevel@tonic-gate {
1730Sstevel@tonic-gate ssize_t left = 0, right = count - 1, guess;
1740Sstevel@tonic-gate
1750Sstevel@tonic-gate while (right >= left) {
1760Sstevel@tonic-gate guess = (right + left) >> 1;
1770Sstevel@tonic-gate
1780Sstevel@tonic-gate if (addr < listp[guess].ls_start) {
1790Sstevel@tonic-gate right = guess - 1;
1800Sstevel@tonic-gate continue;
1810Sstevel@tonic-gate }
1820Sstevel@tonic-gate
1830Sstevel@tonic-gate if (addr >= listp[guess].ls_end) {
1840Sstevel@tonic-gate left = guess + 1;
1850Sstevel@tonic-gate continue;
1860Sstevel@tonic-gate }
1870Sstevel@tonic-gate
1880Sstevel@tonic-gate return (guess);
1890Sstevel@tonic-gate }
1900Sstevel@tonic-gate
1910Sstevel@tonic-gate return (-1);
1920Sstevel@tonic-gate }
1930Sstevel@tonic-gate
1940Sstevel@tonic-gate /*ARGSUSED*/
1950Sstevel@tonic-gate static int
leaky_count(uintptr_t addr,void * unused,size_t * total)1960Sstevel@tonic-gate leaky_count(uintptr_t addr, void *unused, size_t *total)
1970Sstevel@tonic-gate {
1980Sstevel@tonic-gate ++*total;
1990Sstevel@tonic-gate
2000Sstevel@tonic-gate return (WALK_NEXT);
2010Sstevel@tonic-gate }
2020Sstevel@tonic-gate
2030Sstevel@tonic-gate /*ARGSUSED*/
2040Sstevel@tonic-gate static int
leaky_read_segs(uintptr_t addr,const vmem_seg_t * seg,leaky_maps_t * lmp)2050Sstevel@tonic-gate leaky_read_segs(uintptr_t addr, const vmem_seg_t *seg, leaky_maps_t *lmp)
2060Sstevel@tonic-gate {
2070Sstevel@tonic-gate leaky_seg_info_t *my_si = lmp->lm_segs + lmp->lm_seg_count;
2080Sstevel@tonic-gate
2090Sstevel@tonic-gate if (seg->vs_start == seg->vs_end && seg->vs_start == 0)
2100Sstevel@tonic-gate return (WALK_NEXT);
2110Sstevel@tonic-gate
2120Sstevel@tonic-gate if (lmp->lm_seg_count++ >= lmp->lm_seg_max)
2130Sstevel@tonic-gate return (WALK_ERR);
2140Sstevel@tonic-gate
2150Sstevel@tonic-gate my_si->ls_start = seg->vs_start;
2160Sstevel@tonic-gate my_si->ls_end = seg->vs_end;
2170Sstevel@tonic-gate
2180Sstevel@tonic-gate return (WALK_NEXT);
2190Sstevel@tonic-gate }
2200Sstevel@tonic-gate
2210Sstevel@tonic-gate /* ARGSUSED */
2220Sstevel@tonic-gate static int
leaky_process_anon_mappings(uintptr_t ignored,const prmap_t * pmp,leaky_maps_t * lmp)2230Sstevel@tonic-gate leaky_process_anon_mappings(uintptr_t ignored, const prmap_t *pmp,
2240Sstevel@tonic-gate leaky_maps_t *lmp)
2250Sstevel@tonic-gate {
2260Sstevel@tonic-gate uintptr_t start = pmp->pr_vaddr;
2270Sstevel@tonic-gate uintptr_t end = pmp->pr_vaddr + pmp->pr_size;
2280Sstevel@tonic-gate
2290Sstevel@tonic-gate leak_mtab_t *lm;
2300Sstevel@tonic-gate pstatus_t *Psp = lmp->lm_pstatus;
2310Sstevel@tonic-gate
2320Sstevel@tonic-gate uintptr_t brk_start = Psp->pr_brkbase;
2330Sstevel@tonic-gate uintptr_t brk_end = Psp->pr_brkbase + Psp->pr_brksize;
2340Sstevel@tonic-gate
2350Sstevel@tonic-gate int has_brk = 0;
2360Sstevel@tonic-gate int in_vmem = 0;
2370Sstevel@tonic-gate
2380Sstevel@tonic-gate /*
2390Sstevel@tonic-gate * This checks if there is any overlap between the segment and the brk.
2400Sstevel@tonic-gate */
2410Sstevel@tonic-gate if (end > brk_start && start < brk_end)
2420Sstevel@tonic-gate has_brk = 1;
2430Sstevel@tonic-gate
2440Sstevel@tonic-gate if (leaky_seg_search(start, lmp->lm_segs, lmp->lm_seg_count) != -1)
2450Sstevel@tonic-gate in_vmem = 1;
2460Sstevel@tonic-gate
2470Sstevel@tonic-gate /*
2480Sstevel@tonic-gate * We only want anonymous, mmaped memory. That means:
2490Sstevel@tonic-gate *
2500Sstevel@tonic-gate * 1. Must be read-write
2510Sstevel@tonic-gate * 2. Cannot be shared
2520Sstevel@tonic-gate * 3. Cannot have backing
2530Sstevel@tonic-gate * 4. Cannot be in the brk
2540Sstevel@tonic-gate * 5. Cannot be part of the vmem heap.
2550Sstevel@tonic-gate */
2560Sstevel@tonic-gate if ((pmp->pr_mflags & (MA_READ | MA_WRITE)) == (MA_READ | MA_WRITE) &&
2570Sstevel@tonic-gate (pmp->pr_mflags & MA_SHARED) == 0 &&
2580Sstevel@tonic-gate (pmp->pr_mapname[0] == 0) &&
2590Sstevel@tonic-gate !has_brk &&
2600Sstevel@tonic-gate !in_vmem) {
2610Sstevel@tonic-gate dprintf(("mmaped region: [%p, %p)\n", start, end));
2620Sstevel@tonic-gate lm = (*lmp->lm_lmp)++;
2630Sstevel@tonic-gate lm->lkm_base = start;
2640Sstevel@tonic-gate lm->lkm_limit = end;
2650Sstevel@tonic-gate lm->lkm_bufctl = LKM_CTL(pmp->pr_vaddr, LKM_CTL_MEMORY);
2660Sstevel@tonic-gate }
2670Sstevel@tonic-gate
2680Sstevel@tonic-gate return (WALK_NEXT);
2690Sstevel@tonic-gate }
2700Sstevel@tonic-gate
2710Sstevel@tonic-gate static void
leaky_handle_sbrk(leaky_maps_t * lmp)2720Sstevel@tonic-gate leaky_handle_sbrk(leaky_maps_t *lmp)
2730Sstevel@tonic-gate {
2740Sstevel@tonic-gate uintptr_t brkbase = lmp->lm_pstatus->pr_brkbase;
2750Sstevel@tonic-gate uintptr_t brkend = brkbase + lmp->lm_pstatus->pr_brksize;
2760Sstevel@tonic-gate
2770Sstevel@tonic-gate leak_mtab_t *lm;
2780Sstevel@tonic-gate
2790Sstevel@tonic-gate leaky_seg_info_t *segs = lmp->lm_segs;
2800Sstevel@tonic-gate
2810Sstevel@tonic-gate int x, first = -1, last = -1;
2820Sstevel@tonic-gate
2830Sstevel@tonic-gate dprintf(("brk: [%p, %p)\n", brkbase, brkend));
2840Sstevel@tonic-gate
2850Sstevel@tonic-gate for (x = 0; x < lmp->lm_seg_count; x++) {
2860Sstevel@tonic-gate if (segs[x].ls_start >= brkbase && segs[x].ls_end <= brkend) {
2870Sstevel@tonic-gate if (first == -1)
2880Sstevel@tonic-gate first = x;
2890Sstevel@tonic-gate last = x;
2900Sstevel@tonic-gate }
2910Sstevel@tonic-gate }
2920Sstevel@tonic-gate
2930Sstevel@tonic-gate if (brkbase == brkend) {
2940Sstevel@tonic-gate dprintf(("empty brk -- do nothing\n"));
2950Sstevel@tonic-gate } else if (first == -1) {
2960Sstevel@tonic-gate dprintf(("adding [%p, %p) whole brk\n", brkbase, brkend));
2970Sstevel@tonic-gate
2980Sstevel@tonic-gate lm = (*lmp->lm_lmp)++;
2990Sstevel@tonic-gate lm->lkm_base = brkbase;
3000Sstevel@tonic-gate lm->lkm_limit = brkend;
3010Sstevel@tonic-gate lm->lkm_bufctl = LKM_CTL(brkbase, LKM_CTL_MEMORY);
3020Sstevel@tonic-gate } else {
3030Sstevel@tonic-gate uintptr_t curbrk = P2ROUNDUP(brkbase, umem_pagesize);
3040Sstevel@tonic-gate
3050Sstevel@tonic-gate if (curbrk != segs[first].ls_start) {
3060Sstevel@tonic-gate dprintf(("adding [%p, %p) in brk, before first seg\n",
3070Sstevel@tonic-gate brkbase, segs[first].ls_start));
3080Sstevel@tonic-gate
3090Sstevel@tonic-gate lm = (*lmp->lm_lmp)++;
3100Sstevel@tonic-gate lm->lkm_base = brkbase;
3110Sstevel@tonic-gate lm->lkm_limit = segs[first].ls_start;
3120Sstevel@tonic-gate lm->lkm_bufctl = LKM_CTL(brkbase, LKM_CTL_MEMORY);
3130Sstevel@tonic-gate
3140Sstevel@tonic-gate curbrk = segs[first].ls_start;
3150Sstevel@tonic-gate
3160Sstevel@tonic-gate } else if (curbrk != brkbase) {
3170Sstevel@tonic-gate dprintf(("ignore [%p, %p) -- realign\n", brkbase,
3180Sstevel@tonic-gate curbrk));
3190Sstevel@tonic-gate }
3200Sstevel@tonic-gate
3210Sstevel@tonic-gate for (x = first; x <= last; x++) {
3220Sstevel@tonic-gate if (curbrk < segs[x].ls_start) {
3230Sstevel@tonic-gate dprintf(("adding [%p, %p) in brk\n", curbrk,
3240Sstevel@tonic-gate segs[x].ls_start));
3250Sstevel@tonic-gate
3260Sstevel@tonic-gate lm = (*lmp->lm_lmp)++;
3270Sstevel@tonic-gate lm->lkm_base = curbrk;
3280Sstevel@tonic-gate lm->lkm_limit = segs[x].ls_start;
3290Sstevel@tonic-gate lm->lkm_bufctl = LKM_CTL(curbrk,
3300Sstevel@tonic-gate LKM_CTL_MEMORY);
3310Sstevel@tonic-gate }
3320Sstevel@tonic-gate curbrk = segs[x].ls_end;
3330Sstevel@tonic-gate }
3340Sstevel@tonic-gate
3350Sstevel@tonic-gate if (curbrk < brkend) {
3360Sstevel@tonic-gate dprintf(("adding [%p, %p) in brk, after last seg\n",
3370Sstevel@tonic-gate curbrk, brkend));
3380Sstevel@tonic-gate
3390Sstevel@tonic-gate lm = (*lmp->lm_lmp)++;
3400Sstevel@tonic-gate lm->lkm_base = curbrk;
3410Sstevel@tonic-gate lm->lkm_limit = brkend;
3420Sstevel@tonic-gate lm->lkm_bufctl = LKM_CTL(curbrk, LKM_CTL_MEMORY);
3430Sstevel@tonic-gate }
3440Sstevel@tonic-gate }
3450Sstevel@tonic-gate }
3460Sstevel@tonic-gate
3470Sstevel@tonic-gate static int
leaky_handle_anon_mappings(leak_mtab_t ** lmp)3480Sstevel@tonic-gate leaky_handle_anon_mappings(leak_mtab_t **lmp)
3490Sstevel@tonic-gate {
3500Sstevel@tonic-gate leaky_maps_t lm;
3510Sstevel@tonic-gate
3520Sstevel@tonic-gate vmem_t *heap_arena;
3530Sstevel@tonic-gate vmem_t *vm_next;
3540Sstevel@tonic-gate vmem_t *heap_top;
3550Sstevel@tonic-gate vmem_t vmem;
3560Sstevel@tonic-gate
3570Sstevel@tonic-gate pstatus_t Ps;
3580Sstevel@tonic-gate
3590Sstevel@tonic-gate if (mdb_get_xdata("pstatus", &Ps, sizeof (Ps)) == -1) {
3600Sstevel@tonic-gate mdb_warn("couldn't read pstatus xdata");
3610Sstevel@tonic-gate return (DCMD_ERR);
3620Sstevel@tonic-gate }
3630Sstevel@tonic-gate lm.lm_pstatus = &Ps;
3640Sstevel@tonic-gate
3650Sstevel@tonic-gate leak_brkbase = Ps.pr_brkbase;
3660Sstevel@tonic-gate leak_brksize = Ps.pr_brksize;
3670Sstevel@tonic-gate
3680Sstevel@tonic-gate if (umem_readvar(&heap_arena, "heap_arena") == -1) {
3690Sstevel@tonic-gate mdb_warn("couldn't read heap_arena");
3700Sstevel@tonic-gate return (DCMD_ERR);
3710Sstevel@tonic-gate }
3720Sstevel@tonic-gate
3730Sstevel@tonic-gate if (heap_arena == NULL) {
3740Sstevel@tonic-gate mdb_warn("heap_arena is NULL.\n");
3750Sstevel@tonic-gate return (DCMD_ERR);
3760Sstevel@tonic-gate }
3770Sstevel@tonic-gate
3780Sstevel@tonic-gate for (vm_next = heap_arena; vm_next != NULL; vm_next = vmem.vm_source) {
3790Sstevel@tonic-gate if (mdb_vread(&vmem, sizeof (vmem), (uintptr_t)vm_next) == -1) {
3800Sstevel@tonic-gate mdb_warn("couldn't read vmem at %p", vm_next);
3810Sstevel@tonic-gate return (DCMD_ERR);
3820Sstevel@tonic-gate }
3830Sstevel@tonic-gate heap_top = vm_next;
3840Sstevel@tonic-gate }
3850Sstevel@tonic-gate
3860Sstevel@tonic-gate lm.lm_seg_count = 0;
3870Sstevel@tonic-gate lm.lm_seg_max = 0;
3880Sstevel@tonic-gate
3890Sstevel@tonic-gate if (mdb_pwalk("vmem_span", (mdb_walk_cb_t)leaky_count,
3900Sstevel@tonic-gate &lm.lm_seg_max, (uintptr_t)heap_top) == -1) {
3910Sstevel@tonic-gate mdb_warn("couldn't walk vmem_span for vmem %p", heap_top);
3920Sstevel@tonic-gate return (DCMD_ERR);
3930Sstevel@tonic-gate }
3940Sstevel@tonic-gate lm.lm_segs = mdb_alloc(lm.lm_seg_max * sizeof (*lm.lm_segs),
3950Sstevel@tonic-gate UM_SLEEP | UM_GC);
3960Sstevel@tonic-gate
3970Sstevel@tonic-gate if (mdb_pwalk("vmem_span", (mdb_walk_cb_t)leaky_read_segs, &lm,
3980Sstevel@tonic-gate (uintptr_t)heap_top) == -1) {
3990Sstevel@tonic-gate mdb_warn("couldn't walk vmem_span for vmem %p",
4000Sstevel@tonic-gate heap_top);
4010Sstevel@tonic-gate return (DCMD_ERR);
4020Sstevel@tonic-gate }
4030Sstevel@tonic-gate
4040Sstevel@tonic-gate if (lm.lm_seg_count > lm.lm_seg_max) {
4050Sstevel@tonic-gate mdb_warn("segment list for vmem %p grew\n", heap_top);
4060Sstevel@tonic-gate return (DCMD_ERR);
4070Sstevel@tonic-gate }
4080Sstevel@tonic-gate
4090Sstevel@tonic-gate qsort(lm.lm_segs, lm.lm_seg_count, sizeof (*lm.lm_segs), leaky_seg_cmp);
4100Sstevel@tonic-gate
4110Sstevel@tonic-gate lm.lm_lmp = lmp;
4120Sstevel@tonic-gate
4130Sstevel@tonic-gate prockludge_add_walkers();
4140Sstevel@tonic-gate
4150Sstevel@tonic-gate if (mdb_walk(KLUDGE_MAPWALK_NAME,
4160Sstevel@tonic-gate (mdb_walk_cb_t)leaky_process_anon_mappings, &lm) == -1) {
4170Sstevel@tonic-gate mdb_warn("Couldn't walk "KLUDGE_MAPWALK_NAME);
4180Sstevel@tonic-gate prockludge_remove_walkers();
4190Sstevel@tonic-gate return (DCMD_ERR);
4200Sstevel@tonic-gate }
4210Sstevel@tonic-gate
4220Sstevel@tonic-gate prockludge_remove_walkers();
4230Sstevel@tonic-gate leaky_handle_sbrk(&lm);
4240Sstevel@tonic-gate
4250Sstevel@tonic-gate return (DCMD_OK);
4260Sstevel@tonic-gate }
4270Sstevel@tonic-gate
4280Sstevel@tonic-gate static int
leaky_interested(const umem_cache_t * c)4290Sstevel@tonic-gate leaky_interested(const umem_cache_t *c)
4300Sstevel@tonic-gate {
4310Sstevel@tonic-gate vmem_t vmem;
4320Sstevel@tonic-gate
4330Sstevel@tonic-gate if (mdb_vread(&vmem, sizeof (vmem), (uintptr_t)c->cache_arena) == -1) {
4340Sstevel@tonic-gate mdb_warn("cannot read arena %p for cache '%s'",
4350Sstevel@tonic-gate (uintptr_t)c->cache_arena, c->cache_name);
4360Sstevel@tonic-gate return (0);
4370Sstevel@tonic-gate }
4380Sstevel@tonic-gate
4390Sstevel@tonic-gate /*
4400Sstevel@tonic-gate * If this cache isn't allocating from either the umem_default or
4410Sstevel@tonic-gate * umem_firewall vmem arena, we're not interested.
4420Sstevel@tonic-gate */
4430Sstevel@tonic-gate if (strcmp(vmem.vm_name, "umem_default") != 0 &&
4440Sstevel@tonic-gate strcmp(vmem.vm_name, "umem_firewall") != 0) {
4450Sstevel@tonic-gate dprintf(("Skipping cache '%s' with arena '%s'\n",
4460Sstevel@tonic-gate c->cache_name, vmem.vm_name));
4470Sstevel@tonic-gate return (0);
4480Sstevel@tonic-gate }
4490Sstevel@tonic-gate
4500Sstevel@tonic-gate return (1);
4510Sstevel@tonic-gate }
4520Sstevel@tonic-gate
4530Sstevel@tonic-gate /*ARGSUSED*/
4540Sstevel@tonic-gate static int
leaky_estimate(uintptr_t addr,const umem_cache_t * c,size_t * est)4550Sstevel@tonic-gate leaky_estimate(uintptr_t addr, const umem_cache_t *c, size_t *est)
4560Sstevel@tonic-gate {
4570Sstevel@tonic-gate if (!leaky_interested(c))
4580Sstevel@tonic-gate return (WALK_NEXT);
4590Sstevel@tonic-gate
4600Sstevel@tonic-gate *est += umem_estimate_allocated(addr, c);
4610Sstevel@tonic-gate
4620Sstevel@tonic-gate return (WALK_NEXT);
4630Sstevel@tonic-gate }
4640Sstevel@tonic-gate
4650Sstevel@tonic-gate /*ARGSUSED*/
4660Sstevel@tonic-gate static int
leaky_cache(uintptr_t addr,const umem_cache_t * c,leak_mtab_t ** lmp)4670Sstevel@tonic-gate leaky_cache(uintptr_t addr, const umem_cache_t *c, leak_mtab_t **lmp)
4680Sstevel@tonic-gate {
4690Sstevel@tonic-gate leak_mtab_t *lm = *lmp;
4700Sstevel@tonic-gate mdb_walk_cb_t cb;
4710Sstevel@tonic-gate const char *walk;
4720Sstevel@tonic-gate int audit = (c->cache_flags & UMF_AUDIT);
4730Sstevel@tonic-gate
4740Sstevel@tonic-gate if (!leaky_interested(c))
4750Sstevel@tonic-gate return (WALK_NEXT);
4760Sstevel@tonic-gate
4770Sstevel@tonic-gate if (audit) {
4780Sstevel@tonic-gate walk = "bufctl";
4790Sstevel@tonic-gate cb = (mdb_walk_cb_t)leaky_mtab;
4800Sstevel@tonic-gate } else {
4810Sstevel@tonic-gate walk = "umem";
4820Sstevel@tonic-gate cb = (mdb_walk_cb_t)leaky_mtab_addr;
4830Sstevel@tonic-gate }
4840Sstevel@tonic-gate if (mdb_pwalk(walk, cb, lmp, addr) == -1) {
4850Sstevel@tonic-gate mdb_warn("can't walk umem for cache %p (%s)", addr,
4860Sstevel@tonic-gate c->cache_name);
4870Sstevel@tonic-gate return (WALK_DONE);
4880Sstevel@tonic-gate }
4890Sstevel@tonic-gate
4900Sstevel@tonic-gate for (; lm < *lmp; lm++) {
4910Sstevel@tonic-gate lm->lkm_limit = lm->lkm_base + c->cache_bufsize;
4920Sstevel@tonic-gate if (!audit)
4930Sstevel@tonic-gate lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_CACHE);
4940Sstevel@tonic-gate }
4950Sstevel@tonic-gate return (WALK_NEXT);
4960Sstevel@tonic-gate }
4970Sstevel@tonic-gate
4980Sstevel@tonic-gate static char *map_head = "%-?s %?s %-10s used reason\n";
4990Sstevel@tonic-gate static char *map_fmt = "[%?p,%?p) %-10s ";
5000Sstevel@tonic-gate #define BACKING_LEN 10 /* must match the third field's width in map_fmt */
5010Sstevel@tonic-gate
5020Sstevel@tonic-gate static void
leaky_mappings_header(void)5030Sstevel@tonic-gate leaky_mappings_header(void)
5040Sstevel@tonic-gate {
5050Sstevel@tonic-gate dprintf((map_head, "mapping", "", "backing"));
5060Sstevel@tonic-gate }
5070Sstevel@tonic-gate
5080Sstevel@tonic-gate /* ARGSUSED */
5090Sstevel@tonic-gate static int
leaky_grep_mappings(uintptr_t ignored,const prmap_t * pmp,const pstatus_t * Psp)5100Sstevel@tonic-gate leaky_grep_mappings(uintptr_t ignored, const prmap_t *pmp,
5110Sstevel@tonic-gate const pstatus_t *Psp)
5120Sstevel@tonic-gate {
5130Sstevel@tonic-gate const char *map_libname_ptr;
5140Sstevel@tonic-gate char db_mp_name[BACKING_LEN+1];
5150Sstevel@tonic-gate
5160Sstevel@tonic-gate map_libname_ptr = strrchr(pmp->pr_mapname, '/');
5170Sstevel@tonic-gate if (map_libname_ptr != NULL)
5180Sstevel@tonic-gate map_libname_ptr++;
5190Sstevel@tonic-gate else
5200Sstevel@tonic-gate map_libname_ptr = pmp->pr_mapname;
5210Sstevel@tonic-gate
5220Sstevel@tonic-gate strlcpy(db_mp_name, map_libname_ptr, sizeof (db_mp_name));
5230Sstevel@tonic-gate
5240Sstevel@tonic-gate dprintf((map_fmt, pmp->pr_vaddr, (char *)pmp->pr_vaddr + pmp->pr_size,
5250Sstevel@tonic-gate db_mp_name));
5260Sstevel@tonic-gate
5270Sstevel@tonic-gate #define USE(rsn) dprintf_cont(("yes %s\n", (rsn)))
5280Sstevel@tonic-gate #define IGNORE(rsn) dprintf_cont(("no %s\n", (rsn)))
5290Sstevel@tonic-gate
5300Sstevel@tonic-gate if (!(pmp->pr_mflags & MA_WRITE) || !(pmp->pr_mflags & MA_READ)) {
5310Sstevel@tonic-gate IGNORE("read-only");
5320Sstevel@tonic-gate } else if (pmp->pr_vaddr <= Psp->pr_brkbase &&
5330Sstevel@tonic-gate pmp->pr_vaddr + pmp->pr_size > Psp->pr_brkbase) {
5340Sstevel@tonic-gate USE("bss"); /* grab up to brkbase */
5350Sstevel@tonic-gate leaky_grep(pmp->pr_vaddr, Psp->pr_brkbase - pmp->pr_vaddr);
5360Sstevel@tonic-gate } else if (pmp->pr_vaddr >= Psp->pr_brkbase &&
5370Sstevel@tonic-gate pmp->pr_vaddr < Psp->pr_brkbase + Psp->pr_brksize) {
5380Sstevel@tonic-gate IGNORE("in brk");
5390Sstevel@tonic-gate } else if (pmp->pr_vaddr == Psp->pr_stkbase &&
5400Sstevel@tonic-gate pmp->pr_size == Psp->pr_stksize) {
5410Sstevel@tonic-gate IGNORE("stack");
5420Sstevel@tonic-gate } else if (0 == strcmp(map_libname_ptr, "a.out")) {
5430Sstevel@tonic-gate USE("a.out data");
5440Sstevel@tonic-gate leaky_grep(pmp->pr_vaddr, pmp->pr_size);
5450Sstevel@tonic-gate } else if (0 == strncmp(map_libname_ptr, "libumem.so", 10)) {
5460Sstevel@tonic-gate IGNORE("part of umem");
5470Sstevel@tonic-gate } else if (pmp->pr_mapname[0] != 0) {
5480Sstevel@tonic-gate USE("lib data"); /* library data/bss */
5490Sstevel@tonic-gate leaky_grep(pmp->pr_vaddr, pmp->pr_size);
5500Sstevel@tonic-gate } else if ((pmp->pr_mflags & MA_ANON) && pmp->pr_mapname[0] == 0) {
5510Sstevel@tonic-gate IGNORE("anon");
5520Sstevel@tonic-gate } else {
5530Sstevel@tonic-gate IGNORE(""); /* default to ignoring */
5540Sstevel@tonic-gate }
5550Sstevel@tonic-gate
5560Sstevel@tonic-gate #undef USE
5570Sstevel@tonic-gate #undef IGNORE
5580Sstevel@tonic-gate
5590Sstevel@tonic-gate return (WALK_NEXT);
5600Sstevel@tonic-gate }
5610Sstevel@tonic-gate
5620Sstevel@tonic-gate /*ARGSUSED*/
5630Sstevel@tonic-gate static int
leaky_mark_lwp(void * ignored,const lwpstatus_t * lwp)5640Sstevel@tonic-gate leaky_mark_lwp(void *ignored, const lwpstatus_t *lwp)
5650Sstevel@tonic-gate {
5660Sstevel@tonic-gate leaky_mark_ptr(lwp->pr_reg[R_SP] + STACK_BIAS);
5670Sstevel@tonic-gate return (0);
5680Sstevel@tonic-gate }
5690Sstevel@tonic-gate
5700Sstevel@tonic-gate /*ARGSUSED*/
5710Sstevel@tonic-gate static int
leaky_process_lwp(void * ignored,const lwpstatus_t * lwp)5720Sstevel@tonic-gate leaky_process_lwp(void *ignored, const lwpstatus_t *lwp)
5730Sstevel@tonic-gate {
5740Sstevel@tonic-gate const uintptr_t *regs = (const uintptr_t *)&lwp->pr_reg;
5750Sstevel@tonic-gate int i;
5760Sstevel@tonic-gate uintptr_t sp;
5770Sstevel@tonic-gate uintptr_t addr;
5780Sstevel@tonic-gate size_t size;
5790Sstevel@tonic-gate
5800Sstevel@tonic-gate for (i = 0; i < R_SP; i++)
5810Sstevel@tonic-gate leaky_grep_ptr(regs[i]);
5820Sstevel@tonic-gate
5830Sstevel@tonic-gate sp = regs[i++] + STACK_BIAS;
5840Sstevel@tonic-gate if (leaky_lookup_marked(sp, &addr, &size))
5850Sstevel@tonic-gate leaky_grep(sp, size - (sp - addr));
5860Sstevel@tonic-gate
5870Sstevel@tonic-gate for (; i < NPRGREG; i++)
5880Sstevel@tonic-gate leaky_grep_ptr(regs[i]);
5890Sstevel@tonic-gate
5900Sstevel@tonic-gate return (0);
5910Sstevel@tonic-gate }
5920Sstevel@tonic-gate
5930Sstevel@tonic-gate /*
5940Sstevel@tonic-gate * Handles processing various proc-related things:
5950Sstevel@tonic-gate * 1. calls leaky_process_lwp on each the LWP
5960Sstevel@tonic-gate * 2. leaky_greps the bss/data of libraries and a.out, and the a.out stack.
5970Sstevel@tonic-gate */
5980Sstevel@tonic-gate static int
leaky_process_proc(void)5990Sstevel@tonic-gate leaky_process_proc(void)
6000Sstevel@tonic-gate {
6010Sstevel@tonic-gate pstatus_t Ps;
6020Sstevel@tonic-gate struct ps_prochandle *Pr;
6030Sstevel@tonic-gate
6040Sstevel@tonic-gate if (mdb_get_xdata("pstatus", &Ps, sizeof (Ps)) == -1) {
6050Sstevel@tonic-gate mdb_warn("couldn't read pstatus xdata");
6060Sstevel@tonic-gate return (DCMD_ERR);
6070Sstevel@tonic-gate }
6080Sstevel@tonic-gate
6090Sstevel@tonic-gate dprintf(("pstatus says:\n"));
6100Sstevel@tonic-gate dprintf(("\tbrk: base %p size %p\n",
6110Sstevel@tonic-gate Ps.pr_brkbase, Ps.pr_brksize));
6120Sstevel@tonic-gate dprintf(("\tstk: base %p size %p\n",
6130Sstevel@tonic-gate Ps.pr_stkbase, Ps.pr_stksize));
6140Sstevel@tonic-gate
6150Sstevel@tonic-gate if (mdb_get_xdata("pshandle", &Pr, sizeof (Pr)) == -1) {
6160Sstevel@tonic-gate mdb_warn("couldn't read pshandle xdata");
6170Sstevel@tonic-gate return (DCMD_ERR);
6180Sstevel@tonic-gate }
6190Sstevel@tonic-gate
6200Sstevel@tonic-gate if (Plwp_iter(Pr, leaky_mark_lwp, NULL) != 0) {
6210Sstevel@tonic-gate mdb_warn("findleaks: Failed to iterate lwps\n");
6220Sstevel@tonic-gate return (DCMD_ERR);
6230Sstevel@tonic-gate }
6240Sstevel@tonic-gate
6250Sstevel@tonic-gate if (Plwp_iter(Pr, leaky_process_lwp, NULL) != 0) {
6260Sstevel@tonic-gate mdb_warn("findleaks: Failed to iterate lwps\n");
6270Sstevel@tonic-gate return (DCMD_ERR);
6280Sstevel@tonic-gate }
6290Sstevel@tonic-gate
6300Sstevel@tonic-gate prockludge_add_walkers();
6310Sstevel@tonic-gate
6320Sstevel@tonic-gate leaky_mappings_header();
6330Sstevel@tonic-gate
6340Sstevel@tonic-gate if (mdb_walk(KLUDGE_MAPWALK_NAME, (mdb_walk_cb_t)leaky_grep_mappings,
6350Sstevel@tonic-gate &Ps) == -1) {
6360Sstevel@tonic-gate mdb_warn("Couldn't walk "KLUDGE_MAPWALK_NAME);
6370Sstevel@tonic-gate prockludge_remove_walkers();
6380Sstevel@tonic-gate return (-1);
6390Sstevel@tonic-gate }
6400Sstevel@tonic-gate
6410Sstevel@tonic-gate prockludge_remove_walkers();
6420Sstevel@tonic-gate
6430Sstevel@tonic-gate return (0);
6440Sstevel@tonic-gate }
6450Sstevel@tonic-gate
6460Sstevel@tonic-gate static void
leaky_subr_caller(const uintptr_t * stack,uint_t depth,char * buf,uintptr_t * pcp)6470Sstevel@tonic-gate leaky_subr_caller(const uintptr_t *stack, uint_t depth, char *buf,
6480Sstevel@tonic-gate uintptr_t *pcp)
6490Sstevel@tonic-gate {
6500Sstevel@tonic-gate int i;
6510Sstevel@tonic-gate GElf_Sym sym;
6520Sstevel@tonic-gate uintptr_t pc = 0;
6530Sstevel@tonic-gate
6540Sstevel@tonic-gate buf[0] = 0;
6550Sstevel@tonic-gate
6560Sstevel@tonic-gate for (i = 0; i < depth; i++) {
6570Sstevel@tonic-gate pc = stack[i];
6580Sstevel@tonic-gate
6590Sstevel@tonic-gate if (mdb_lookup_by_addr(pc,
6600Sstevel@tonic-gate MDB_SYM_FUZZY, buf, MDB_SYM_NAMLEN, &sym) == -1)
6610Sstevel@tonic-gate continue;
6620Sstevel@tonic-gate if (strncmp(buf, "libumem.so", 10) == 0)
6630Sstevel@tonic-gate continue;
6640Sstevel@tonic-gate
6650Sstevel@tonic-gate *pcp = pc;
6660Sstevel@tonic-gate return;
6670Sstevel@tonic-gate }
6680Sstevel@tonic-gate
6690Sstevel@tonic-gate /*
6700Sstevel@tonic-gate * We're only here if the entire call chain is in libumem.so;
6710Sstevel@tonic-gate * this shouldn't happen, but we'll just use the last caller.
6720Sstevel@tonic-gate */
6730Sstevel@tonic-gate *pcp = pc;
6740Sstevel@tonic-gate }
6750Sstevel@tonic-gate
6760Sstevel@tonic-gate int
leaky_subr_bufctl_cmp(const leak_bufctl_t * lhs,const leak_bufctl_t * rhs)6770Sstevel@tonic-gate leaky_subr_bufctl_cmp(const leak_bufctl_t *lhs, const leak_bufctl_t *rhs)
6780Sstevel@tonic-gate {
6790Sstevel@tonic-gate char lbuf[MDB_SYM_NAMLEN], rbuf[MDB_SYM_NAMLEN];
6800Sstevel@tonic-gate uintptr_t lcaller, rcaller;
6810Sstevel@tonic-gate int rval;
6820Sstevel@tonic-gate
6830Sstevel@tonic-gate leaky_subr_caller(lhs->lkb_stack, lhs->lkb_depth, lbuf, &lcaller);
6840Sstevel@tonic-gate leaky_subr_caller(rhs->lkb_stack, lhs->lkb_depth, rbuf, &rcaller);
6850Sstevel@tonic-gate
6860Sstevel@tonic-gate if (rval = strcmp(lbuf, rbuf))
6870Sstevel@tonic-gate return (rval);
6880Sstevel@tonic-gate
6890Sstevel@tonic-gate if (lcaller < rcaller)
6900Sstevel@tonic-gate return (-1);
6910Sstevel@tonic-gate
6920Sstevel@tonic-gate if (lcaller > rcaller)
6930Sstevel@tonic-gate return (1);
6940Sstevel@tonic-gate
6950Sstevel@tonic-gate if (lhs->lkb_data < rhs->lkb_data)
6960Sstevel@tonic-gate return (-1);
6970Sstevel@tonic-gate
6980Sstevel@tonic-gate if (lhs->lkb_data > rhs->lkb_data)
6990Sstevel@tonic-gate return (1);
7000Sstevel@tonic-gate
7010Sstevel@tonic-gate return (0);
7020Sstevel@tonic-gate }
7030Sstevel@tonic-gate
7040Sstevel@tonic-gate /*ARGSUSED*/
7050Sstevel@tonic-gate int
leaky_subr_estimate(size_t * estp)7060Sstevel@tonic-gate leaky_subr_estimate(size_t *estp)
7070Sstevel@tonic-gate {
708*1528Sjwadams if (umem_ready == 0) {
709*1528Sjwadams mdb_warn(
710*1528Sjwadams "findleaks: umem is not loaded in the address space\n");
711*1528Sjwadams return (DCMD_ERR);
712*1528Sjwadams }
7130Sstevel@tonic-gate
714*1528Sjwadams if (umem_ready == UMEM_READY_INIT_FAILED) {
715*1528Sjwadams mdb_warn("findleaks: umem initialization failed -- no "
716*1528Sjwadams "possible leaks.\n");
7170Sstevel@tonic-gate return (DCMD_ERR);
7180Sstevel@tonic-gate }
7190Sstevel@tonic-gate
7200Sstevel@tonic-gate if (umem_ready != UMEM_READY) {
7210Sstevel@tonic-gate mdb_warn("findleaks: No allocations have occured -- no "
7220Sstevel@tonic-gate "possible leaks.\n");
7230Sstevel@tonic-gate return (DCMD_ERR);
7240Sstevel@tonic-gate }
7250Sstevel@tonic-gate
7260Sstevel@tonic-gate if (mdb_walk("umem_cache", (mdb_walk_cb_t)leaky_estimate, estp) == -1) {
7270Sstevel@tonic-gate mdb_warn("couldn't walk 'umem_cache'");
7280Sstevel@tonic-gate return (DCMD_ERR);
7290Sstevel@tonic-gate }
7300Sstevel@tonic-gate
7310Sstevel@tonic-gate if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_estimate_vmem, estp) == -1) {
7320Sstevel@tonic-gate mdb_warn("couldn't walk 'vmem'");
7330Sstevel@tonic-gate return (DCMD_ERR);
7340Sstevel@tonic-gate }
7350Sstevel@tonic-gate
7360Sstevel@tonic-gate if (*estp == 0) {
7370Sstevel@tonic-gate mdb_warn("findleaks: No allocated buffers found.\n");
7380Sstevel@tonic-gate return (DCMD_ERR);
7390Sstevel@tonic-gate }
7400Sstevel@tonic-gate
7410Sstevel@tonic-gate prockludge_add_walkers();
7420Sstevel@tonic-gate
7430Sstevel@tonic-gate if (mdb_walk(KLUDGE_MAPWALK_NAME, (mdb_walk_cb_t)leaky_count,
7440Sstevel@tonic-gate estp) == -1) {
7450Sstevel@tonic-gate mdb_warn("Couldn't walk "KLUDGE_MAPWALK_NAME);
7460Sstevel@tonic-gate prockludge_remove_walkers();
7470Sstevel@tonic-gate return (DCMD_ERR);
7480Sstevel@tonic-gate }
7490Sstevel@tonic-gate
7500Sstevel@tonic-gate prockludge_remove_walkers();
7510Sstevel@tonic-gate
7520Sstevel@tonic-gate return (DCMD_OK);
7530Sstevel@tonic-gate }
7540Sstevel@tonic-gate
7550Sstevel@tonic-gate int
leaky_subr_fill(leak_mtab_t ** lmpp)7560Sstevel@tonic-gate leaky_subr_fill(leak_mtab_t **lmpp)
7570Sstevel@tonic-gate {
7580Sstevel@tonic-gate if (leaky_handle_anon_mappings(lmpp) != DCMD_OK) {
7590Sstevel@tonic-gate mdb_warn("unable to process mappings\n");
7600Sstevel@tonic-gate return (DCMD_ERR);
7610Sstevel@tonic-gate }
7620Sstevel@tonic-gate
7630Sstevel@tonic-gate if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_vmem, lmpp) == -1) {
7640Sstevel@tonic-gate mdb_warn("couldn't walk 'vmem'");
7650Sstevel@tonic-gate return (DCMD_ERR);
7660Sstevel@tonic-gate }
7670Sstevel@tonic-gate
7680Sstevel@tonic-gate if (mdb_walk("umem_cache", (mdb_walk_cb_t)leaky_cache, lmpp) == -1) {
7690Sstevel@tonic-gate mdb_warn("couldn't walk 'umem_cache'");
7700Sstevel@tonic-gate return (DCMD_ERR);
7710Sstevel@tonic-gate }
7720Sstevel@tonic-gate
7730Sstevel@tonic-gate return (DCMD_OK);
7740Sstevel@tonic-gate }
7750Sstevel@tonic-gate
7760Sstevel@tonic-gate int
leaky_subr_run(void)7770Sstevel@tonic-gate leaky_subr_run(void)
7780Sstevel@tonic-gate {
7790Sstevel@tonic-gate if (leaky_process_proc() == DCMD_ERR) {
7800Sstevel@tonic-gate mdb_warn("failed to process proc");
7810Sstevel@tonic-gate return (DCMD_ERR);
7820Sstevel@tonic-gate }
7830Sstevel@tonic-gate return (DCMD_OK);
7840Sstevel@tonic-gate }
7850Sstevel@tonic-gate
7860Sstevel@tonic-gate void
leaky_subr_add_leak(leak_mtab_t * lmp)7870Sstevel@tonic-gate leaky_subr_add_leak(leak_mtab_t *lmp)
7880Sstevel@tonic-gate {
7890Sstevel@tonic-gate uintptr_t addr = LKM_CTLPTR(lmp->lkm_bufctl);
7900Sstevel@tonic-gate uint_t depth;
7910Sstevel@tonic-gate
7920Sstevel@tonic-gate vmem_seg_t vs;
7930Sstevel@tonic-gate umem_bufctl_audit_t *bcp;
7940Sstevel@tonic-gate UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
7950Sstevel@tonic-gate
7960Sstevel@tonic-gate switch (LKM_CTLTYPE(lmp->lkm_bufctl)) {
7970Sstevel@tonic-gate case LKM_CTL_BUFCTL:
7980Sstevel@tonic-gate if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) {
7990Sstevel@tonic-gate mdb_warn("couldn't read leaked bufctl at addr %p",
8000Sstevel@tonic-gate addr);
8010Sstevel@tonic-gate return;
8020Sstevel@tonic-gate }
8030Sstevel@tonic-gate
8040Sstevel@tonic-gate depth = MIN(bcp->bc_depth, umem_stack_depth);
8050Sstevel@tonic-gate
8060Sstevel@tonic-gate /*
8070Sstevel@tonic-gate * The top of the stack will be in umem_cache_alloc().
8080Sstevel@tonic-gate * Since the offset in umem_cache_alloc() isn't interesting
8090Sstevel@tonic-gate * we skip that frame for the purposes of uniquifying stacks.
8100Sstevel@tonic-gate *
8110Sstevel@tonic-gate * Also, we use the cache pointer as the leaks's cid, to
8120Sstevel@tonic-gate * prevent the coalescing of leaks from different caches.
8130Sstevel@tonic-gate */
8140Sstevel@tonic-gate if (depth > 0)
8150Sstevel@tonic-gate depth--;
8160Sstevel@tonic-gate leaky_add_leak(TYPE_UMEM, addr, (uintptr_t)bcp->bc_addr,
8170Sstevel@tonic-gate bcp->bc_timestamp, bcp->bc_stack + 1, depth,
8180Sstevel@tonic-gate (uintptr_t)bcp->bc_cache, (uintptr_t)bcp->bc_cache);
8190Sstevel@tonic-gate break;
8200Sstevel@tonic-gate case LKM_CTL_VMSEG:
8210Sstevel@tonic-gate if (mdb_vread(&vs, sizeof (vs), addr) == -1) {
8220Sstevel@tonic-gate mdb_warn("couldn't read leaked vmem_seg at addr %p",
8230Sstevel@tonic-gate addr);
8240Sstevel@tonic-gate return;
8250Sstevel@tonic-gate }
8260Sstevel@tonic-gate depth = MIN(vs.vs_depth, VMEM_STACK_DEPTH);
8270Sstevel@tonic-gate
8280Sstevel@tonic-gate leaky_add_leak(TYPE_VMEM, addr, vs.vs_start, vs.vs_timestamp,
8290Sstevel@tonic-gate vs.vs_stack, depth, 0, (vs.vs_end - vs.vs_start));
8300Sstevel@tonic-gate break;
8310Sstevel@tonic-gate case LKM_CTL_MEMORY:
8320Sstevel@tonic-gate if (LEAKY_INBRK(addr))
8330Sstevel@tonic-gate leaky_add_leak(TYPE_SBRK, addr, addr, 0, NULL, 0, 0,
8340Sstevel@tonic-gate lmp->lkm_limit - addr);
8350Sstevel@tonic-gate else
8360Sstevel@tonic-gate leaky_add_leak(TYPE_MMAP, addr, addr, 0, NULL, 0, 0,
8370Sstevel@tonic-gate lmp->lkm_limit - addr);
8380Sstevel@tonic-gate break;
8390Sstevel@tonic-gate case LKM_CTL_CACHE:
8400Sstevel@tonic-gate leaky_add_leak(TYPE_CACHE, lmp->lkm_base, lmp->lkm_base, 0,
8410Sstevel@tonic-gate NULL, 0, addr, addr);
8420Sstevel@tonic-gate break;
8430Sstevel@tonic-gate default:
8440Sstevel@tonic-gate mdb_warn("internal error: invalid leak_bufctl_t\n");
8450Sstevel@tonic-gate break;
8460Sstevel@tonic-gate }
8470Sstevel@tonic-gate }
8480Sstevel@tonic-gate
8490Sstevel@tonic-gate static int lk_vmem_seen;
8500Sstevel@tonic-gate static int lk_cache_seen;
8510Sstevel@tonic-gate static int lk_umem_seen;
8520Sstevel@tonic-gate static size_t lk_ttl;
8530Sstevel@tonic-gate static size_t lk_bytes;
8540Sstevel@tonic-gate
8550Sstevel@tonic-gate void
leaky_subr_dump_start(int type)8560Sstevel@tonic-gate leaky_subr_dump_start(int type)
8570Sstevel@tonic-gate {
8580Sstevel@tonic-gate switch (type) {
8590Sstevel@tonic-gate case TYPE_MMAP:
8600Sstevel@tonic-gate lk_vmem_seen = 0;
8610Sstevel@tonic-gate break;
8620Sstevel@tonic-gate
8630Sstevel@tonic-gate case TYPE_SBRK:
8640Sstevel@tonic-gate case TYPE_VMEM:
8650Sstevel@tonic-gate return; /* don't zero counts */
8660Sstevel@tonic-gate
8670Sstevel@tonic-gate case TYPE_CACHE:
8680Sstevel@tonic-gate lk_cache_seen = 0;
8690Sstevel@tonic-gate break;
8700Sstevel@tonic-gate
8710Sstevel@tonic-gate case TYPE_UMEM:
8720Sstevel@tonic-gate lk_umem_seen = 0;
8730Sstevel@tonic-gate break;
8740Sstevel@tonic-gate
8750Sstevel@tonic-gate default:
8760Sstevel@tonic-gate break;
8770Sstevel@tonic-gate }
8780Sstevel@tonic-gate
8790Sstevel@tonic-gate lk_ttl = 0;
8800Sstevel@tonic-gate lk_bytes = 0;
8810Sstevel@tonic-gate }
8820Sstevel@tonic-gate
8830Sstevel@tonic-gate void
leaky_subr_dump(const leak_bufctl_t * lkb,int verbose)8840Sstevel@tonic-gate leaky_subr_dump(const leak_bufctl_t *lkb, int verbose)
8850Sstevel@tonic-gate {
8860Sstevel@tonic-gate const leak_bufctl_t *cur;
8870Sstevel@tonic-gate umem_cache_t cache;
8880Sstevel@tonic-gate size_t min, max, size;
8890Sstevel@tonic-gate char sz[30];
8900Sstevel@tonic-gate char c[MDB_SYM_NAMLEN];
8910Sstevel@tonic-gate uintptr_t caller;
8920Sstevel@tonic-gate const char *nm, *nm_lc;
8930Sstevel@tonic-gate uint8_t type = lkb->lkb_type;
8940Sstevel@tonic-gate
8950Sstevel@tonic-gate if (verbose) {
8960Sstevel@tonic-gate lk_ttl = 0;
8970Sstevel@tonic-gate lk_bytes = 0;
8980Sstevel@tonic-gate } else if (!lk_vmem_seen && (type == TYPE_VMEM || type == TYPE_MMAP ||
8990Sstevel@tonic-gate type == TYPE_SBRK)) {
9000Sstevel@tonic-gate lk_vmem_seen = 1;
9010Sstevel@tonic-gate mdb_printf("%-16s %7s %?s %s\n",
9020Sstevel@tonic-gate "BYTES", "LEAKED", "VMEM_SEG", "CALLER");
9030Sstevel@tonic-gate }
9040Sstevel@tonic-gate
9050Sstevel@tonic-gate switch (lkb->lkb_type) {
9060Sstevel@tonic-gate case TYPE_MMAP:
9070Sstevel@tonic-gate case TYPE_SBRK:
9080Sstevel@tonic-gate nm = (lkb->lkb_type == TYPE_MMAP) ? "MMAP" : "SBRK";
9090Sstevel@tonic-gate nm_lc = (lkb->lkb_type == TYPE_MMAP) ? "mmap(2)" : "sbrk(2)";
9100Sstevel@tonic-gate
9110Sstevel@tonic-gate for (; lkb != NULL; lkb = lkb->lkb_next) {
9120Sstevel@tonic-gate if (!verbose)
9130Sstevel@tonic-gate mdb_printf("%-16d %7d %?p %s\n", lkb->lkb_data,
9140Sstevel@tonic-gate lkb->lkb_dups + 1, lkb->lkb_addr, nm);
9150Sstevel@tonic-gate else
9160Sstevel@tonic-gate mdb_printf("%s leak: [%p, %p), %ld bytes\n",
9170Sstevel@tonic-gate nm_lc, lkb->lkb_addr,
9180Sstevel@tonic-gate lkb->lkb_addr + lkb->lkb_data,
9190Sstevel@tonic-gate lkb->lkb_data);
9200Sstevel@tonic-gate lk_ttl++;
9210Sstevel@tonic-gate lk_bytes += lkb->lkb_data;
9220Sstevel@tonic-gate }
9230Sstevel@tonic-gate return;
9240Sstevel@tonic-gate
9250Sstevel@tonic-gate case TYPE_VMEM:
9260Sstevel@tonic-gate min = max = lkb->lkb_data;
9270Sstevel@tonic-gate
9280Sstevel@tonic-gate for (cur = lkb; cur != NULL; cur = cur->lkb_next) {
9290Sstevel@tonic-gate size = cur->lkb_data;
9300Sstevel@tonic-gate
9310Sstevel@tonic-gate if (size < min)
9320Sstevel@tonic-gate min = size;
9330Sstevel@tonic-gate if (size > max)
9340Sstevel@tonic-gate max = size;
9350Sstevel@tonic-gate
9360Sstevel@tonic-gate lk_ttl++;
9370Sstevel@tonic-gate lk_bytes += size;
9380Sstevel@tonic-gate }
9390Sstevel@tonic-gate
9400Sstevel@tonic-gate if (min == max)
9410Sstevel@tonic-gate (void) mdb_snprintf(sz, sizeof (sz), "%ld", min);
9420Sstevel@tonic-gate else
9430Sstevel@tonic-gate (void) mdb_snprintf(sz, sizeof (sz), "%ld-%ld",
9440Sstevel@tonic-gate min, max);
9450Sstevel@tonic-gate
9460Sstevel@tonic-gate if (!verbose) {
9470Sstevel@tonic-gate leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth,
9480Sstevel@tonic-gate c, &caller);
9490Sstevel@tonic-gate
9500Sstevel@tonic-gate mdb_printf("%-16s %7d %?p %a\n", sz, lkb->lkb_dups + 1,
9510Sstevel@tonic-gate lkb->lkb_addr, caller);
9520Sstevel@tonic-gate } else {
9530Sstevel@tonic-gate mdb_arg_t v;
9540Sstevel@tonic-gate
9550Sstevel@tonic-gate if (lk_ttl == 1)
9560Sstevel@tonic-gate mdb_printf("umem_oversize leak: 1 vmem_seg, "
9570Sstevel@tonic-gate "%ld bytes\n", lk_bytes);
9580Sstevel@tonic-gate else
9590Sstevel@tonic-gate mdb_printf("umem_oversize leak: %d vmem_segs, "
9600Sstevel@tonic-gate "%s bytes each, %ld bytes total\n",
9610Sstevel@tonic-gate lk_ttl, sz, lk_bytes);
9620Sstevel@tonic-gate
9630Sstevel@tonic-gate v.a_type = MDB_TYPE_STRING;
9640Sstevel@tonic-gate v.a_un.a_str = "-v";
9650Sstevel@tonic-gate
9660Sstevel@tonic-gate if (mdb_call_dcmd("vmem_seg", lkb->lkb_addr,
9670Sstevel@tonic-gate DCMD_ADDRSPEC, 1, &v) == -1) {
9680Sstevel@tonic-gate mdb_warn("'%p::vmem_seg -v' failed",
9690Sstevel@tonic-gate lkb->lkb_addr);
9700Sstevel@tonic-gate }
9710Sstevel@tonic-gate }
9720Sstevel@tonic-gate return;
9730Sstevel@tonic-gate
9740Sstevel@tonic-gate case TYPE_CACHE:
9750Sstevel@tonic-gate if (!lk_cache_seen) {
9760Sstevel@tonic-gate lk_cache_seen = 1;
9770Sstevel@tonic-gate if (lk_vmem_seen)
9780Sstevel@tonic-gate mdb_printf("\n");
9790Sstevel@tonic-gate mdb_printf("%-?s %7s %?s %s\n",
9800Sstevel@tonic-gate "CACHE", "LEAKED", "BUFFER", "CALLER");
9810Sstevel@tonic-gate }
9820Sstevel@tonic-gate
9830Sstevel@tonic-gate if (mdb_vread(&cache, sizeof (cache), lkb->lkb_data) == -1) {
9840Sstevel@tonic-gate /*
9850Sstevel@tonic-gate * This _really_ shouldn't happen; we shouldn't
9860Sstevel@tonic-gate * have been able to get this far if this
9870Sstevel@tonic-gate * cache wasn't readable.
9880Sstevel@tonic-gate */
9890Sstevel@tonic-gate mdb_warn("can't read cache %p for leaked "
9900Sstevel@tonic-gate "buffer %p", lkb->lkb_data, lkb->lkb_addr);
9910Sstevel@tonic-gate return;
9920Sstevel@tonic-gate }
9930Sstevel@tonic-gate
9940Sstevel@tonic-gate lk_ttl += lkb->lkb_dups + 1;
9950Sstevel@tonic-gate lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
9960Sstevel@tonic-gate
9970Sstevel@tonic-gate caller = (lkb->lkb_depth == 0) ? 0 : lkb->lkb_stack[0];
9980Sstevel@tonic-gate if (caller != 0) {
9990Sstevel@tonic-gate (void) mdb_snprintf(c, sizeof (c), "%a", caller);
10000Sstevel@tonic-gate } else {
10010Sstevel@tonic-gate (void) mdb_snprintf(c, sizeof (c), "%s",
10020Sstevel@tonic-gate (verbose) ? "" : "?");
10030Sstevel@tonic-gate }
10040Sstevel@tonic-gate
10050Sstevel@tonic-gate if (!verbose) {
10060Sstevel@tonic-gate mdb_printf("%0?p %7d %0?p %s\n", lkb->lkb_cid,
10070Sstevel@tonic-gate lkb->lkb_dups + 1, lkb->lkb_addr, c);
10080Sstevel@tonic-gate } else {
10090Sstevel@tonic-gate if (lk_ttl == 1)
10100Sstevel@tonic-gate mdb_printf("%s leak: 1 buffer, %ld bytes,\n",
10110Sstevel@tonic-gate cache.cache_name, lk_bytes);
10120Sstevel@tonic-gate else
10130Sstevel@tonic-gate mdb_printf("%s leak: %d buffers, "
10140Sstevel@tonic-gate "%ld bytes each, %ld bytes total,\n",
10150Sstevel@tonic-gate cache.cache_name, lk_ttl,
10160Sstevel@tonic-gate cache.cache_bufsize, lk_bytes);
10170Sstevel@tonic-gate mdb_printf(" %s%s%ssample addr %p\n",
10180Sstevel@tonic-gate (caller == 0) ? "" : "caller ", c,
10190Sstevel@tonic-gate (caller == 0) ? "" : ", ", lkb->lkb_addr);
10200Sstevel@tonic-gate }
10210Sstevel@tonic-gate return;
10220Sstevel@tonic-gate
10230Sstevel@tonic-gate case TYPE_UMEM:
10240Sstevel@tonic-gate if (!lk_umem_seen) {
10250Sstevel@tonic-gate lk_umem_seen = 1;
10260Sstevel@tonic-gate if (lk_vmem_seen || lk_cache_seen)
10270Sstevel@tonic-gate mdb_printf("\n");
10280Sstevel@tonic-gate mdb_printf("%-?s %7s %?s %s\n",
10290Sstevel@tonic-gate "CACHE", "LEAKED", "BUFCTL", "CALLER");
10300Sstevel@tonic-gate }
10310Sstevel@tonic-gate if (mdb_vread(&cache, sizeof (cache), lkb->lkb_data) == -1) {
10320Sstevel@tonic-gate /*
10330Sstevel@tonic-gate * This _really_ shouldn't happen; we shouldn't
10340Sstevel@tonic-gate * have been able to get this far if this
10350Sstevel@tonic-gate * cache wasn't readable.
10360Sstevel@tonic-gate */
10370Sstevel@tonic-gate mdb_warn("can't read cache %p for leaked "
10380Sstevel@tonic-gate "bufctl %p", lkb->lkb_data, lkb->lkb_addr);
10390Sstevel@tonic-gate return;
10400Sstevel@tonic-gate }
10410Sstevel@tonic-gate
10420Sstevel@tonic-gate lk_ttl += lkb->lkb_dups + 1;
10430Sstevel@tonic-gate lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
10440Sstevel@tonic-gate
10450Sstevel@tonic-gate if (!verbose) {
10460Sstevel@tonic-gate leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth, c,
10470Sstevel@tonic-gate &caller);
10480Sstevel@tonic-gate
10490Sstevel@tonic-gate mdb_printf("%0?p %7d %0?p %a\n", lkb->lkb_data,
10500Sstevel@tonic-gate lkb->lkb_dups + 1, lkb->lkb_addr, caller);
10510Sstevel@tonic-gate } else {
10520Sstevel@tonic-gate mdb_arg_t v;
10530Sstevel@tonic-gate
10540Sstevel@tonic-gate if (lk_ttl == 1)
10550Sstevel@tonic-gate mdb_printf("%s leak: 1 buffer, %ld bytes\n",
10560Sstevel@tonic-gate cache.cache_name, lk_bytes);
10570Sstevel@tonic-gate else
10580Sstevel@tonic-gate mdb_printf("%s leak: %d buffers, "
10590Sstevel@tonic-gate "%ld bytes each, %ld bytes total\n",
10600Sstevel@tonic-gate cache.cache_name, lk_ttl,
10610Sstevel@tonic-gate cache.cache_bufsize, lk_bytes);
10620Sstevel@tonic-gate
10630Sstevel@tonic-gate v.a_type = MDB_TYPE_STRING;
10640Sstevel@tonic-gate v.a_un.a_str = "-v";
10650Sstevel@tonic-gate
10660Sstevel@tonic-gate if (mdb_call_dcmd("bufctl", lkb->lkb_addr,
10670Sstevel@tonic-gate DCMD_ADDRSPEC, 1, &v) == -1) {
10680Sstevel@tonic-gate mdb_warn("'%p::bufctl -v' failed",
10690Sstevel@tonic-gate lkb->lkb_addr);
10700Sstevel@tonic-gate }
10710Sstevel@tonic-gate }
10720Sstevel@tonic-gate return;
10730Sstevel@tonic-gate
10740Sstevel@tonic-gate default:
10750Sstevel@tonic-gate return;
10760Sstevel@tonic-gate }
10770Sstevel@tonic-gate }
10780Sstevel@tonic-gate
10790Sstevel@tonic-gate void
leaky_subr_dump_end(int type)10800Sstevel@tonic-gate leaky_subr_dump_end(int type)
10810Sstevel@tonic-gate {
10820Sstevel@tonic-gate int i;
10830Sstevel@tonic-gate int width;
10840Sstevel@tonic-gate const char *leak;
10850Sstevel@tonic-gate
10860Sstevel@tonic-gate switch (type) {
10870Sstevel@tonic-gate case TYPE_VMEM:
10880Sstevel@tonic-gate if (!lk_vmem_seen)
10890Sstevel@tonic-gate return;
10900Sstevel@tonic-gate
10910Sstevel@tonic-gate width = 16;
10920Sstevel@tonic-gate leak = "oversized leak";
10930Sstevel@tonic-gate break;
10940Sstevel@tonic-gate
10950Sstevel@tonic-gate case TYPE_CACHE:
10960Sstevel@tonic-gate if (!lk_cache_seen)
10970Sstevel@tonic-gate return;
10980Sstevel@tonic-gate
10990Sstevel@tonic-gate width = sizeof (uintptr_t) * 2;
11000Sstevel@tonic-gate leak = "buffer";
11010Sstevel@tonic-gate break;
11020Sstevel@tonic-gate
11030Sstevel@tonic-gate case TYPE_UMEM:
11040Sstevel@tonic-gate if (!lk_umem_seen)
11050Sstevel@tonic-gate return;
11060Sstevel@tonic-gate
11070Sstevel@tonic-gate width = sizeof (uintptr_t) * 2;
11080Sstevel@tonic-gate leak = "buffer";
11090Sstevel@tonic-gate break;
11100Sstevel@tonic-gate
11110Sstevel@tonic-gate default:
11120Sstevel@tonic-gate return;
11130Sstevel@tonic-gate }
11140Sstevel@tonic-gate
11150Sstevel@tonic-gate for (i = 0; i < 72; i++)
11160Sstevel@tonic-gate mdb_printf("-");
11170Sstevel@tonic-gate mdb_printf("\n%*s %7ld %s%s, %ld byte%s\n",
11180Sstevel@tonic-gate width, "Total", lk_ttl, leak, (lk_ttl == 1) ? "" : "s",
11190Sstevel@tonic-gate lk_bytes, (lk_bytes == 1) ? "" : "s");
11200Sstevel@tonic-gate }
11210Sstevel@tonic-gate
11220Sstevel@tonic-gate int
leaky_subr_invoke_callback(const leak_bufctl_t * lkb,mdb_walk_cb_t cb,void * cbdata)11230Sstevel@tonic-gate leaky_subr_invoke_callback(const leak_bufctl_t *lkb, mdb_walk_cb_t cb,
11240Sstevel@tonic-gate void *cbdata)
11250Sstevel@tonic-gate {
11260Sstevel@tonic-gate vmem_seg_t vs;
11270Sstevel@tonic-gate umem_bufctl_audit_t *bcp;
11280Sstevel@tonic-gate UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
11290Sstevel@tonic-gate
11300Sstevel@tonic-gate switch (lkb->lkb_type) {
11310Sstevel@tonic-gate case TYPE_VMEM:
11320Sstevel@tonic-gate if (mdb_vread(&vs, sizeof (vs), lkb->lkb_addr) == -1) {
11330Sstevel@tonic-gate mdb_warn("unable to read vmem_seg at %p",
11340Sstevel@tonic-gate lkb->lkb_addr);
11350Sstevel@tonic-gate return (WALK_NEXT);
11360Sstevel@tonic-gate }
11370Sstevel@tonic-gate return (cb(lkb->lkb_addr, &vs, cbdata));
11380Sstevel@tonic-gate
11390Sstevel@tonic-gate case TYPE_UMEM:
11400Sstevel@tonic-gate if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE,
11410Sstevel@tonic-gate lkb->lkb_addr) == -1) {
11420Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p",
11430Sstevel@tonic-gate lkb->lkb_addr);
11440Sstevel@tonic-gate return (WALK_NEXT);
11450Sstevel@tonic-gate }
11460Sstevel@tonic-gate return (cb(lkb->lkb_addr, bcp, cbdata));
11470Sstevel@tonic-gate
11480Sstevel@tonic-gate default:
11490Sstevel@tonic-gate return (cb(lkb->lkb_addr, NULL, cbdata));
11500Sstevel@tonic-gate }
11510Sstevel@tonic-gate }
1152