xref: /onnv-gate/usr/src/cmd/mdb/common/modules/genunix/leaky.c (revision 0:68f95e015346)
1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate /*
30*0Sstevel@tonic-gate  * A generic memory leak detector.  The target interface, defined in
31*0Sstevel@tonic-gate  * <leaky_impl.h>, is implemented by the genunix and libumem dmods to fill
32*0Sstevel@tonic-gate  * in the details of operation.
33*0Sstevel@tonic-gate  */
34*0Sstevel@tonic-gate 
35*0Sstevel@tonic-gate #include <mdb/mdb_modapi.h>
36*0Sstevel@tonic-gate 
37*0Sstevel@tonic-gate #include "leaky.h"
38*0Sstevel@tonic-gate #include "leaky_impl.h"
39*0Sstevel@tonic-gate 
40*0Sstevel@tonic-gate #define	LK_BUFCTLHSIZE	127
41*0Sstevel@tonic-gate 
42*0Sstevel@tonic-gate /*
43*0Sstevel@tonic-gate  * We re-use the low bit of the lkm_addr as the 'marked' bit.
44*0Sstevel@tonic-gate  */
45*0Sstevel@tonic-gate #define	LK_MARKED(b)	((uintptr_t)(b) & 1)
46*0Sstevel@tonic-gate #define	LK_MARK(b)	((b) |= 1)
47*0Sstevel@tonic-gate #define	LK_ADDR(b)	((uintptr_t)(b) & ~1UL)
48*0Sstevel@tonic-gate 
49*0Sstevel@tonic-gate /*
50*0Sstevel@tonic-gate  * Possible values for lk_state.
51*0Sstevel@tonic-gate  */
52*0Sstevel@tonic-gate #define	LK_CLEAN	0	/* No outstanding mdb_alloc()'s */
53*0Sstevel@tonic-gate #define	LK_SWEEPING	1	/* Potentially some outstanding mdb_alloc()'s */
54*0Sstevel@tonic-gate #define	LK_DONE		2	/* All mdb_alloc()'s complete */
55*0Sstevel@tonic-gate #define	LK_CLEANING	3	/* Currently cleaning prior mdb_alloc()'s */
56*0Sstevel@tonic-gate 
57*0Sstevel@tonic-gate static volatile int lk_state;
58*0Sstevel@tonic-gate 
59*0Sstevel@tonic-gate #define	LK_STATE_SIZE	10000	/* completely arbitrary */
60*0Sstevel@tonic-gate 
61*0Sstevel@tonic-gate typedef int leak_ndx_t;		/* change if >2 billion buffers are needed */
62*0Sstevel@tonic-gate 
63*0Sstevel@tonic-gate typedef struct leak_state {
64*0Sstevel@tonic-gate 	struct leak_state *lks_next;
65*0Sstevel@tonic-gate 	leak_ndx_t lks_stack[LK_STATE_SIZE];
66*0Sstevel@tonic-gate } leak_state_t;
67*0Sstevel@tonic-gate 
68*0Sstevel@tonic-gate typedef struct leak_beans {
69*0Sstevel@tonic-gate 	int lkb_dups;
70*0Sstevel@tonic-gate 	int lkb_follows;
71*0Sstevel@tonic-gate 	int lkb_misses;
72*0Sstevel@tonic-gate 	int lkb_dismissals;
73*0Sstevel@tonic-gate 	int lkb_pushes;
74*0Sstevel@tonic-gate 	int lkb_deepest;
75*0Sstevel@tonic-gate } leak_beans_t;
76*0Sstevel@tonic-gate 
77*0Sstevel@tonic-gate typedef struct leak_type {
78*0Sstevel@tonic-gate 	int		lt_type;
79*0Sstevel@tonic-gate 	size_t		lt_leaks;
80*0Sstevel@tonic-gate 	leak_bufctl_t	**lt_sorted;
81*0Sstevel@tonic-gate } leak_type_t;
82*0Sstevel@tonic-gate 
83*0Sstevel@tonic-gate typedef struct leak_walk {
84*0Sstevel@tonic-gate 	int lkw_ndx;
85*0Sstevel@tonic-gate 	leak_bufctl_t *lkw_current;
86*0Sstevel@tonic-gate 	leak_bufctl_t *lkw_hash_next;
87*0Sstevel@tonic-gate } leak_walk_t;
88*0Sstevel@tonic-gate 
89*0Sstevel@tonic-gate #define	LK_SCAN_BUFFER_SIZE	16384
90*0Sstevel@tonic-gate static uintptr_t *lk_scan_buffer;
91*0Sstevel@tonic-gate 
92*0Sstevel@tonic-gate static leak_mtab_t *lk_mtab;
93*0Sstevel@tonic-gate static leak_state_t *lk_free_state;
94*0Sstevel@tonic-gate static leak_ndx_t lk_nbuffers;
95*0Sstevel@tonic-gate static leak_beans_t lk_beans;
96*0Sstevel@tonic-gate static leak_bufctl_t *lk_bufctl[LK_BUFCTLHSIZE];
97*0Sstevel@tonic-gate static leak_type_t lk_types[LK_NUM_TYPES];
98*0Sstevel@tonic-gate static size_t lk_memusage;
99*0Sstevel@tonic-gate #ifndef _KMDB
100*0Sstevel@tonic-gate static hrtime_t lk_begin;
101*0Sstevel@tonic-gate static hrtime_t lk_vbegin;
102*0Sstevel@tonic-gate #endif
103*0Sstevel@tonic-gate static uint_t lk_verbose = FALSE;
104*0Sstevel@tonic-gate 
105*0Sstevel@tonic-gate static void
106*0Sstevel@tonic-gate leaky_verbose(char *str, uint64_t stat)
107*0Sstevel@tonic-gate {
108*0Sstevel@tonic-gate 	if (lk_verbose == FALSE)
109*0Sstevel@tonic-gate 		return;
110*0Sstevel@tonic-gate 
111*0Sstevel@tonic-gate 	mdb_printf("findleaks: ");
112*0Sstevel@tonic-gate 
113*0Sstevel@tonic-gate 	if (str == NULL) {
114*0Sstevel@tonic-gate 		mdb_printf("\n");
115*0Sstevel@tonic-gate 		return;
116*0Sstevel@tonic-gate 	}
117*0Sstevel@tonic-gate 
118*0Sstevel@tonic-gate 	mdb_printf("%*s => %lld\n", 30, str, stat);
119*0Sstevel@tonic-gate }
120*0Sstevel@tonic-gate 
121*0Sstevel@tonic-gate static void
122*0Sstevel@tonic-gate leaky_verbose_perc(char *str, uint64_t stat, uint64_t total)
123*0Sstevel@tonic-gate {
124*0Sstevel@tonic-gate 	uint_t perc = (stat * 100) / total;
125*0Sstevel@tonic-gate 	uint_t tenths = ((stat * 1000) / total) % 10;
126*0Sstevel@tonic-gate 
127*0Sstevel@tonic-gate 	if (lk_verbose == FALSE)
128*0Sstevel@tonic-gate 		return;
129*0Sstevel@tonic-gate 
130*0Sstevel@tonic-gate 	mdb_printf("findleaks: %*s => %-13lld (%2d.%1d%%)\n",
131*0Sstevel@tonic-gate 	    30, str, stat, perc, tenths);
132*0Sstevel@tonic-gate }
133*0Sstevel@tonic-gate 
134*0Sstevel@tonic-gate static void
135*0Sstevel@tonic-gate leaky_verbose_begin(void)
136*0Sstevel@tonic-gate {
137*0Sstevel@tonic-gate 	/* kmdb can't tell time */
138*0Sstevel@tonic-gate #ifndef _KMDB
139*0Sstevel@tonic-gate 	extern hrtime_t gethrvtime(void);
140*0Sstevel@tonic-gate 	lk_begin = gethrtime();
141*0Sstevel@tonic-gate 	lk_vbegin = gethrvtime();
142*0Sstevel@tonic-gate #endif
143*0Sstevel@tonic-gate 	lk_memusage = 0;
144*0Sstevel@tonic-gate }
145*0Sstevel@tonic-gate 
146*0Sstevel@tonic-gate static void
147*0Sstevel@tonic-gate leaky_verbose_end(void)
148*0Sstevel@tonic-gate {
149*0Sstevel@tonic-gate 	/* kmdb can't tell time */
150*0Sstevel@tonic-gate #ifndef _KMDB
151*0Sstevel@tonic-gate 	extern hrtime_t gethrvtime(void);
152*0Sstevel@tonic-gate 
153*0Sstevel@tonic-gate 	hrtime_t ts = gethrtime() - lk_begin;
154*0Sstevel@tonic-gate 	hrtime_t sec = ts / (hrtime_t)NANOSEC;
155*0Sstevel@tonic-gate 	hrtime_t nsec = ts % (hrtime_t)NANOSEC;
156*0Sstevel@tonic-gate 
157*0Sstevel@tonic-gate 	hrtime_t vts = gethrvtime() - lk_vbegin;
158*0Sstevel@tonic-gate 	hrtime_t vsec = vts / (hrtime_t)NANOSEC;
159*0Sstevel@tonic-gate 	hrtime_t vnsec = vts % (hrtime_t)NANOSEC;
160*0Sstevel@tonic-gate #endif
161*0Sstevel@tonic-gate 
162*0Sstevel@tonic-gate 	if (lk_verbose == FALSE)
163*0Sstevel@tonic-gate 		return;
164*0Sstevel@tonic-gate 
165*0Sstevel@tonic-gate 	mdb_printf("findleaks: %*s => %lu kB\n",
166*0Sstevel@tonic-gate 	    30, "peak memory usage", (lk_memusage + 1023)/1024);
167*0Sstevel@tonic-gate #ifndef _KMDB
168*0Sstevel@tonic-gate 	mdb_printf("findleaks: %*s => %lld.%lld seconds\n",
169*0Sstevel@tonic-gate 	    30, "elapsed CPU time", vsec, (vnsec * 10)/(hrtime_t)NANOSEC);
170*0Sstevel@tonic-gate 	mdb_printf("findleaks: %*s => %lld.%lld seconds\n",
171*0Sstevel@tonic-gate 	    30, "elapsed wall time", sec, (nsec * 10)/(hrtime_t)NANOSEC);
172*0Sstevel@tonic-gate #endif
173*0Sstevel@tonic-gate 	leaky_verbose(NULL, 0);
174*0Sstevel@tonic-gate }
175*0Sstevel@tonic-gate 
176*0Sstevel@tonic-gate static void *
177*0Sstevel@tonic-gate leaky_alloc(size_t sz, uint_t flags)
178*0Sstevel@tonic-gate {
179*0Sstevel@tonic-gate 	void *buf = mdb_alloc(sz, flags);
180*0Sstevel@tonic-gate 
181*0Sstevel@tonic-gate 	if (buf != NULL)
182*0Sstevel@tonic-gate 		lk_memusage += sz;
183*0Sstevel@tonic-gate 
184*0Sstevel@tonic-gate 	return (buf);
185*0Sstevel@tonic-gate }
186*0Sstevel@tonic-gate 
187*0Sstevel@tonic-gate static void *
188*0Sstevel@tonic-gate leaky_zalloc(size_t sz, uint_t flags)
189*0Sstevel@tonic-gate {
190*0Sstevel@tonic-gate 	void *buf = mdb_zalloc(sz, flags);
191*0Sstevel@tonic-gate 
192*0Sstevel@tonic-gate 	if (buf != NULL)
193*0Sstevel@tonic-gate 		lk_memusage += sz;
194*0Sstevel@tonic-gate 
195*0Sstevel@tonic-gate 	return (buf);
196*0Sstevel@tonic-gate }
197*0Sstevel@tonic-gate 
198*0Sstevel@tonic-gate static int
199*0Sstevel@tonic-gate leaky_mtabcmp(const void *l, const void *r)
200*0Sstevel@tonic-gate {
201*0Sstevel@tonic-gate 	const leak_mtab_t *lhs = (const leak_mtab_t *)l;
202*0Sstevel@tonic-gate 	const leak_mtab_t *rhs = (const leak_mtab_t *)r;
203*0Sstevel@tonic-gate 
204*0Sstevel@tonic-gate 	if (lhs->lkm_base < rhs->lkm_base)
205*0Sstevel@tonic-gate 		return (-1);
206*0Sstevel@tonic-gate 	if (lhs->lkm_base > rhs->lkm_base)
207*0Sstevel@tonic-gate 		return (1);
208*0Sstevel@tonic-gate 
209*0Sstevel@tonic-gate 	return (0);
210*0Sstevel@tonic-gate }
211*0Sstevel@tonic-gate 
212*0Sstevel@tonic-gate static leak_ndx_t
213*0Sstevel@tonic-gate leaky_search(uintptr_t addr)
214*0Sstevel@tonic-gate {
215*0Sstevel@tonic-gate 	leak_ndx_t left = 0, right = lk_nbuffers - 1, guess;
216*0Sstevel@tonic-gate 
217*0Sstevel@tonic-gate 	while (right >= left) {
218*0Sstevel@tonic-gate 		guess = (right + left) >> 1;
219*0Sstevel@tonic-gate 
220*0Sstevel@tonic-gate 		if (addr < LK_ADDR(lk_mtab[guess].lkm_base)) {
221*0Sstevel@tonic-gate 			right = guess - 1;
222*0Sstevel@tonic-gate 			continue;
223*0Sstevel@tonic-gate 		}
224*0Sstevel@tonic-gate 
225*0Sstevel@tonic-gate 		if (addr >= lk_mtab[guess].lkm_limit) {
226*0Sstevel@tonic-gate 			left = guess + 1;
227*0Sstevel@tonic-gate 			continue;
228*0Sstevel@tonic-gate 		}
229*0Sstevel@tonic-gate 
230*0Sstevel@tonic-gate 		return (guess);
231*0Sstevel@tonic-gate 	}
232*0Sstevel@tonic-gate 
233*0Sstevel@tonic-gate 	return (-1);
234*0Sstevel@tonic-gate }
235*0Sstevel@tonic-gate 
236*0Sstevel@tonic-gate void
237*0Sstevel@tonic-gate leaky_grep(uintptr_t addr, size_t size)
238*0Sstevel@tonic-gate {
239*0Sstevel@tonic-gate 	uintptr_t *buf, *cur, *end;
240*0Sstevel@tonic-gate 	size_t bytes, newsz, nptrs;
241*0Sstevel@tonic-gate 	leak_state_t *state = NULL, *new_state;
242*0Sstevel@tonic-gate 	uint_t state_idx;
243*0Sstevel@tonic-gate 	uintptr_t min = LK_ADDR(lk_mtab[0].lkm_base);
244*0Sstevel@tonic-gate 	uintptr_t max = lk_mtab[lk_nbuffers - 1].lkm_limit;
245*0Sstevel@tonic-gate 	int dups = 0, misses = 0, depth = 0, deepest = 0;
246*0Sstevel@tonic-gate 	int follows = 0, dismissals = 0, pushes = 0;
247*0Sstevel@tonic-gate 	leak_ndx_t mtab_ndx;
248*0Sstevel@tonic-gate 	leak_mtab_t *lmp;
249*0Sstevel@tonic-gate 	uintptr_t nbase;
250*0Sstevel@tonic-gate 	uintptr_t base;
251*0Sstevel@tonic-gate 	size_t base_size;
252*0Sstevel@tonic-gate 	const uintptr_t mask = sizeof (uintptr_t) - 1;
253*0Sstevel@tonic-gate 
254*0Sstevel@tonic-gate 	if (addr == NULL || size == 0)
255*0Sstevel@tonic-gate 		return;
256*0Sstevel@tonic-gate 
257*0Sstevel@tonic-gate 	state_idx = 0;
258*0Sstevel@tonic-gate 
259*0Sstevel@tonic-gate 	/*
260*0Sstevel@tonic-gate 	 * Our main loop, led by the 'pop' label:
261*0Sstevel@tonic-gate 	 *	1)  read in a buffer piece by piece,
262*0Sstevel@tonic-gate 	 *	2)  mark all unmarked mtab entries reachable from it, and
263*0Sstevel@tonic-gate 	 *	    either scan them in-line or push them onto our stack of
264*0Sstevel@tonic-gate 	 *	    unfinished work.
265*0Sstevel@tonic-gate 	 *	3)  pop the top mtab entry off the stack, and loop.
266*0Sstevel@tonic-gate 	 */
267*0Sstevel@tonic-gate pop:
268*0Sstevel@tonic-gate 	base = addr;
269*0Sstevel@tonic-gate 	base_size = size;
270*0Sstevel@tonic-gate 
271*0Sstevel@tonic-gate 	/*
272*0Sstevel@tonic-gate 	 * If our address isn't pointer-aligned, we need to align it and
273*0Sstevel@tonic-gate 	 * whack the size appropriately.
274*0Sstevel@tonic-gate 	 */
275*0Sstevel@tonic-gate 	if (size < mask) {
276*0Sstevel@tonic-gate 		size = 0;
277*0Sstevel@tonic-gate 	} else if (addr & mask) {
278*0Sstevel@tonic-gate 		size -= (mask + 1) - (addr & mask);
279*0Sstevel@tonic-gate 		addr += (mask + 1) - (addr & mask);
280*0Sstevel@tonic-gate 	}
281*0Sstevel@tonic-gate 	size -= (size & mask);
282*0Sstevel@tonic-gate 
283*0Sstevel@tonic-gate 	while (size > 0) {
284*0Sstevel@tonic-gate 		buf = lk_scan_buffer;
285*0Sstevel@tonic-gate 		end = &buf[LK_SCAN_BUFFER_SIZE / sizeof (uintptr_t)];
286*0Sstevel@tonic-gate 
287*0Sstevel@tonic-gate 		bytes = MIN(size, LK_SCAN_BUFFER_SIZE);
288*0Sstevel@tonic-gate 		cur = end - (bytes / sizeof (uintptr_t));
289*0Sstevel@tonic-gate 
290*0Sstevel@tonic-gate 		if (mdb_vread(cur, bytes, addr) == -1) {
291*0Sstevel@tonic-gate 			mdb_warn("[%p, %p): couldn't read %ld bytes at %p",
292*0Sstevel@tonic-gate 			    base, base + base_size, bytes, addr);
293*0Sstevel@tonic-gate 			break;
294*0Sstevel@tonic-gate 		}
295*0Sstevel@tonic-gate 
296*0Sstevel@tonic-gate 		addr += bytes;
297*0Sstevel@tonic-gate 		size -= bytes;
298*0Sstevel@tonic-gate 
299*0Sstevel@tonic-gate 		/*
300*0Sstevel@tonic-gate 		 * The buffer looks like:  ('+'s are unscanned data)
301*0Sstevel@tonic-gate 		 *
302*0Sstevel@tonic-gate 		 * -----------------------------++++++++++++++++
303*0Sstevel@tonic-gate 		 * |				|		|
304*0Sstevel@tonic-gate 		 * buf				cur		end
305*0Sstevel@tonic-gate 		 *
306*0Sstevel@tonic-gate 		 * cur scans forward.  When we encounter a new buffer, and
307*0Sstevel@tonic-gate 		 * it will fit behind "cur", we read it in and back up cur,
308*0Sstevel@tonic-gate 		 * processing it immediately.
309*0Sstevel@tonic-gate 		 */
310*0Sstevel@tonic-gate 		while (cur < end) {
311*0Sstevel@tonic-gate 			uintptr_t ptr = *cur++;
312*0Sstevel@tonic-gate 
313*0Sstevel@tonic-gate 			if (ptr < min || ptr > max) {
314*0Sstevel@tonic-gate 				dismissals++;
315*0Sstevel@tonic-gate 				continue;
316*0Sstevel@tonic-gate 			}
317*0Sstevel@tonic-gate 
318*0Sstevel@tonic-gate 			if ((mtab_ndx = leaky_search(ptr)) == -1) {
319*0Sstevel@tonic-gate 				misses++;
320*0Sstevel@tonic-gate 				continue;
321*0Sstevel@tonic-gate 			}
322*0Sstevel@tonic-gate 
323*0Sstevel@tonic-gate 			lmp = &lk_mtab[mtab_ndx];
324*0Sstevel@tonic-gate 			if (LK_MARKED(lmp->lkm_base)) {
325*0Sstevel@tonic-gate 				dups++;			/* already seen */
326*0Sstevel@tonic-gate 				continue;
327*0Sstevel@tonic-gate 			}
328*0Sstevel@tonic-gate 
329*0Sstevel@tonic-gate 			/*
330*0Sstevel@tonic-gate 			 * Found an unmarked buffer.  Mark it, then either
331*0Sstevel@tonic-gate 			 * read it in, or add it to the stack of pending work.
332*0Sstevel@tonic-gate 			 */
333*0Sstevel@tonic-gate 			follows++;
334*0Sstevel@tonic-gate 			LK_MARK(lmp->lkm_base);
335*0Sstevel@tonic-gate 
336*0Sstevel@tonic-gate 			nbase = LK_ADDR(lmp->lkm_base);
337*0Sstevel@tonic-gate 			newsz = lmp->lkm_limit - nbase;
338*0Sstevel@tonic-gate 
339*0Sstevel@tonic-gate 			nptrs = newsz / sizeof (uintptr_t);
340*0Sstevel@tonic-gate 			newsz = nptrs * sizeof (uintptr_t);
341*0Sstevel@tonic-gate 
342*0Sstevel@tonic-gate 			if ((nbase & mask) == 0 && nptrs <= (cur - buf) &&
343*0Sstevel@tonic-gate 			    mdb_vread(cur - nptrs, newsz, nbase) != -1) {
344*0Sstevel@tonic-gate 				cur -= nptrs;
345*0Sstevel@tonic-gate 				continue;
346*0Sstevel@tonic-gate 			}
347*0Sstevel@tonic-gate 
348*0Sstevel@tonic-gate 			/*
349*0Sstevel@tonic-gate 			 * couldn't process it in-place -- add it to the
350*0Sstevel@tonic-gate 			 * stack.
351*0Sstevel@tonic-gate 			 */
352*0Sstevel@tonic-gate 			if (state == NULL || state_idx == LK_STATE_SIZE) {
353*0Sstevel@tonic-gate 				if ((new_state = lk_free_state) != NULL)
354*0Sstevel@tonic-gate 					lk_free_state = new_state->lks_next;
355*0Sstevel@tonic-gate 				else
356*0Sstevel@tonic-gate 					new_state = leaky_zalloc(
357*0Sstevel@tonic-gate 					    sizeof (*state), UM_SLEEP | UM_GC);
358*0Sstevel@tonic-gate 
359*0Sstevel@tonic-gate 				new_state->lks_next = state;
360*0Sstevel@tonic-gate 				state = new_state;
361*0Sstevel@tonic-gate 				state_idx = 0;
362*0Sstevel@tonic-gate 			}
363*0Sstevel@tonic-gate 
364*0Sstevel@tonic-gate 			pushes++;
365*0Sstevel@tonic-gate 			state->lks_stack[state_idx++] = mtab_ndx;
366*0Sstevel@tonic-gate 			if (++depth > deepest)
367*0Sstevel@tonic-gate 				deepest = depth;
368*0Sstevel@tonic-gate 		}
369*0Sstevel@tonic-gate 	}
370*0Sstevel@tonic-gate 
371*0Sstevel@tonic-gate 	/*
372*0Sstevel@tonic-gate 	 * Retrieve the next mtab index, extract its info, and loop around
373*0Sstevel@tonic-gate 	 * to process it.
374*0Sstevel@tonic-gate 	 */
375*0Sstevel@tonic-gate 	if (state_idx == 0 && state != NULL) {
376*0Sstevel@tonic-gate 		new_state = state->lks_next;
377*0Sstevel@tonic-gate 
378*0Sstevel@tonic-gate 		state->lks_next = lk_free_state;
379*0Sstevel@tonic-gate 		lk_free_state = state;
380*0Sstevel@tonic-gate 
381*0Sstevel@tonic-gate 		state = new_state;
382*0Sstevel@tonic-gate 		state_idx = LK_STATE_SIZE;
383*0Sstevel@tonic-gate 	}
384*0Sstevel@tonic-gate 
385*0Sstevel@tonic-gate 	if (depth > 0) {
386*0Sstevel@tonic-gate 		mtab_ndx = state->lks_stack[--state_idx];
387*0Sstevel@tonic-gate 
388*0Sstevel@tonic-gate 		addr = LK_ADDR(lk_mtab[mtab_ndx].lkm_base);
389*0Sstevel@tonic-gate 		size = lk_mtab[mtab_ndx].lkm_limit - addr;
390*0Sstevel@tonic-gate 		depth--;
391*0Sstevel@tonic-gate 
392*0Sstevel@tonic-gate 		goto pop;
393*0Sstevel@tonic-gate 	}
394*0Sstevel@tonic-gate 
395*0Sstevel@tonic-gate 	/*
396*0Sstevel@tonic-gate 	 * update the beans
397*0Sstevel@tonic-gate 	 */
398*0Sstevel@tonic-gate 	lk_beans.lkb_dups += dups;
399*0Sstevel@tonic-gate 	lk_beans.lkb_dismissals += dismissals;
400*0Sstevel@tonic-gate 	lk_beans.lkb_misses += misses;
401*0Sstevel@tonic-gate 	lk_beans.lkb_follows += follows;
402*0Sstevel@tonic-gate 	lk_beans.lkb_pushes += pushes;
403*0Sstevel@tonic-gate 
404*0Sstevel@tonic-gate 	if (deepest > lk_beans.lkb_deepest)
405*0Sstevel@tonic-gate 		lk_beans.lkb_deepest = deepest;
406*0Sstevel@tonic-gate }
407*0Sstevel@tonic-gate 
408*0Sstevel@tonic-gate static void
409*0Sstevel@tonic-gate leaky_do_grep_ptr(uintptr_t loc, int process)
410*0Sstevel@tonic-gate {
411*0Sstevel@tonic-gate 	leak_ndx_t ndx;
412*0Sstevel@tonic-gate 	leak_mtab_t *lkmp;
413*0Sstevel@tonic-gate 	size_t sz;
414*0Sstevel@tonic-gate 
415*0Sstevel@tonic-gate 	if (loc < LK_ADDR(lk_mtab[0].lkm_base) ||
416*0Sstevel@tonic-gate 	    loc > lk_mtab[lk_nbuffers - 1].lkm_limit) {
417*0Sstevel@tonic-gate 		lk_beans.lkb_dismissals++;
418*0Sstevel@tonic-gate 		return;
419*0Sstevel@tonic-gate 	}
420*0Sstevel@tonic-gate 	if ((ndx = leaky_search(loc)) == -1) {
421*0Sstevel@tonic-gate 		lk_beans.lkb_misses++;
422*0Sstevel@tonic-gate 		return;
423*0Sstevel@tonic-gate 	}
424*0Sstevel@tonic-gate 
425*0Sstevel@tonic-gate 	lkmp = &lk_mtab[ndx];
426*0Sstevel@tonic-gate 	sz = lkmp->lkm_limit - lkmp->lkm_base;
427*0Sstevel@tonic-gate 
428*0Sstevel@tonic-gate 	if (LK_MARKED(lkmp->lkm_base)) {
429*0Sstevel@tonic-gate 		lk_beans.lkb_dups++;
430*0Sstevel@tonic-gate 	} else {
431*0Sstevel@tonic-gate 		LK_MARK(lkmp->lkm_base);
432*0Sstevel@tonic-gate 		lk_beans.lkb_follows++;
433*0Sstevel@tonic-gate 		if (process)
434*0Sstevel@tonic-gate 			leaky_grep(lkmp->lkm_base, sz);
435*0Sstevel@tonic-gate 	}
436*0Sstevel@tonic-gate }
437*0Sstevel@tonic-gate 
438*0Sstevel@tonic-gate void
439*0Sstevel@tonic-gate leaky_grep_ptr(uintptr_t loc)
440*0Sstevel@tonic-gate {
441*0Sstevel@tonic-gate 	leaky_do_grep_ptr(loc, 1);
442*0Sstevel@tonic-gate }
443*0Sstevel@tonic-gate 
444*0Sstevel@tonic-gate void
445*0Sstevel@tonic-gate leaky_mark_ptr(uintptr_t loc)
446*0Sstevel@tonic-gate {
447*0Sstevel@tonic-gate 	leaky_do_grep_ptr(loc, 0);
448*0Sstevel@tonic-gate }
449*0Sstevel@tonic-gate 
450*0Sstevel@tonic-gate /*
451*0Sstevel@tonic-gate  * This may be used to manually process a marked buffer.
452*0Sstevel@tonic-gate  */
453*0Sstevel@tonic-gate int
454*0Sstevel@tonic-gate leaky_lookup_marked(uintptr_t loc, uintptr_t *addr_out, size_t *size_out)
455*0Sstevel@tonic-gate {
456*0Sstevel@tonic-gate 	leak_ndx_t ndx;
457*0Sstevel@tonic-gate 	leak_mtab_t *lkmp;
458*0Sstevel@tonic-gate 
459*0Sstevel@tonic-gate 	if ((ndx = leaky_search(loc)) == -1)
460*0Sstevel@tonic-gate 		return (0);
461*0Sstevel@tonic-gate 
462*0Sstevel@tonic-gate 	lkmp = &lk_mtab[ndx];
463*0Sstevel@tonic-gate 	*addr_out = LK_ADDR(lkmp->lkm_base);
464*0Sstevel@tonic-gate 	*size_out = lkmp->lkm_limit - LK_ADDR(lkmp->lkm_base);
465*0Sstevel@tonic-gate 	return (1);
466*0Sstevel@tonic-gate }
467*0Sstevel@tonic-gate 
468*0Sstevel@tonic-gate void
469*0Sstevel@tonic-gate leaky_add_leak(int type, uintptr_t addr, uintptr_t bufaddr, hrtime_t timestamp,
470*0Sstevel@tonic-gate     leak_pc_t *stack, uint_t depth, uintptr_t cid, uintptr_t data)
471*0Sstevel@tonic-gate {
472*0Sstevel@tonic-gate 	leak_bufctl_t *nlkb, *lkb;
473*0Sstevel@tonic-gate 	uintptr_t total = 0;
474*0Sstevel@tonic-gate 	size_t ndx;
475*0Sstevel@tonic-gate 	int i;
476*0Sstevel@tonic-gate 
477*0Sstevel@tonic-gate 	if (type < 0 || type >= LK_NUM_TYPES || depth != (uint8_t)depth) {
478*0Sstevel@tonic-gate 		mdb_warn("invalid arguments to leaky_add_leak()\n");
479*0Sstevel@tonic-gate 		return;
480*0Sstevel@tonic-gate 	}
481*0Sstevel@tonic-gate 
482*0Sstevel@tonic-gate 	nlkb = leaky_zalloc(LEAK_BUFCTL_SIZE(depth), UM_SLEEP);
483*0Sstevel@tonic-gate 	nlkb->lkb_type = type;
484*0Sstevel@tonic-gate 	nlkb->lkb_addr = addr;
485*0Sstevel@tonic-gate 	nlkb->lkb_bufaddr = bufaddr;
486*0Sstevel@tonic-gate 	nlkb->lkb_cid = cid;
487*0Sstevel@tonic-gate 	nlkb->lkb_data = data;
488*0Sstevel@tonic-gate 	nlkb->lkb_depth = depth;
489*0Sstevel@tonic-gate 	nlkb->lkb_timestamp = timestamp;
490*0Sstevel@tonic-gate 
491*0Sstevel@tonic-gate 	total = type;
492*0Sstevel@tonic-gate 	for (i = 0; i < depth; i++) {
493*0Sstevel@tonic-gate 		total += stack[i];
494*0Sstevel@tonic-gate 		nlkb->lkb_stack[i] = stack[i];
495*0Sstevel@tonic-gate 	}
496*0Sstevel@tonic-gate 
497*0Sstevel@tonic-gate 	ndx = total % LK_BUFCTLHSIZE;
498*0Sstevel@tonic-gate 
499*0Sstevel@tonic-gate 	if ((lkb = lk_bufctl[ndx]) == NULL) {
500*0Sstevel@tonic-gate 		lk_types[type].lt_leaks++;
501*0Sstevel@tonic-gate 		lk_bufctl[ndx] = nlkb;
502*0Sstevel@tonic-gate 		return;
503*0Sstevel@tonic-gate 	}
504*0Sstevel@tonic-gate 
505*0Sstevel@tonic-gate 	for (;;) {
506*0Sstevel@tonic-gate 		if (lkb->lkb_type != type || lkb->lkb_depth != depth ||
507*0Sstevel@tonic-gate 		    lkb->lkb_cid != cid)
508*0Sstevel@tonic-gate 			goto no_match;
509*0Sstevel@tonic-gate 
510*0Sstevel@tonic-gate 		for (i = 0; i < depth; i++)
511*0Sstevel@tonic-gate 			if (lkb->lkb_stack[i] != stack[i])
512*0Sstevel@tonic-gate 				goto no_match;
513*0Sstevel@tonic-gate 
514*0Sstevel@tonic-gate 		/*
515*0Sstevel@tonic-gate 		 * If we're here, we've found a matching stack; link it in.
516*0Sstevel@tonic-gate 		 * Note that the volatile cast assures that these stores
517*0Sstevel@tonic-gate 		 * will occur in program order (thus assuring that we can
518*0Sstevel@tonic-gate 		 * take an interrupt and still be in a sane enough state to
519*0Sstevel@tonic-gate 		 * throw away the data structure later, in leaky_cleanup()).
520*0Sstevel@tonic-gate 		 */
521*0Sstevel@tonic-gate 		((volatile leak_bufctl_t *)nlkb)->lkb_next = lkb->lkb_next;
522*0Sstevel@tonic-gate 		((volatile leak_bufctl_t *)lkb)->lkb_next = nlkb;
523*0Sstevel@tonic-gate 		lkb->lkb_dups++;
524*0Sstevel@tonic-gate 
525*0Sstevel@tonic-gate 		/*
526*0Sstevel@tonic-gate 		 * If we're older, swap places so that we are the
527*0Sstevel@tonic-gate 		 * representative leak.
528*0Sstevel@tonic-gate 		 */
529*0Sstevel@tonic-gate 		if (timestamp < lkb->lkb_timestamp) {
530*0Sstevel@tonic-gate 			nlkb->lkb_addr = lkb->lkb_addr;
531*0Sstevel@tonic-gate 			nlkb->lkb_bufaddr = lkb->lkb_bufaddr;
532*0Sstevel@tonic-gate 			nlkb->lkb_data = lkb->lkb_data;
533*0Sstevel@tonic-gate 			nlkb->lkb_timestamp = lkb->lkb_timestamp;
534*0Sstevel@tonic-gate 
535*0Sstevel@tonic-gate 			lkb->lkb_addr = addr;
536*0Sstevel@tonic-gate 			lkb->lkb_bufaddr = bufaddr;
537*0Sstevel@tonic-gate 			lkb->lkb_data = data;
538*0Sstevel@tonic-gate 			lkb->lkb_timestamp = timestamp;
539*0Sstevel@tonic-gate 		}
540*0Sstevel@tonic-gate 		break;
541*0Sstevel@tonic-gate 
542*0Sstevel@tonic-gate no_match:
543*0Sstevel@tonic-gate 		if (lkb->lkb_hash_next == NULL) {
544*0Sstevel@tonic-gate 			lkb->lkb_hash_next = nlkb;
545*0Sstevel@tonic-gate 			lk_types[type].lt_leaks++;
546*0Sstevel@tonic-gate 			break;
547*0Sstevel@tonic-gate 		}
548*0Sstevel@tonic-gate 		lkb = lkb->lkb_hash_next;
549*0Sstevel@tonic-gate 	}
550*0Sstevel@tonic-gate }
551*0Sstevel@tonic-gate 
552*0Sstevel@tonic-gate int
553*0Sstevel@tonic-gate leaky_ctlcmp(const void *l, const void *r)
554*0Sstevel@tonic-gate {
555*0Sstevel@tonic-gate 	const leak_bufctl_t *lhs = *((const leak_bufctl_t **)l);
556*0Sstevel@tonic-gate 	const leak_bufctl_t *rhs = *((const leak_bufctl_t **)r);
557*0Sstevel@tonic-gate 
558*0Sstevel@tonic-gate 	return (leaky_subr_bufctl_cmp(lhs, rhs));
559*0Sstevel@tonic-gate }
560*0Sstevel@tonic-gate 
561*0Sstevel@tonic-gate void
562*0Sstevel@tonic-gate leaky_sort(void)
563*0Sstevel@tonic-gate {
564*0Sstevel@tonic-gate 	int type, i, j;
565*0Sstevel@tonic-gate 	leak_bufctl_t *lkb;
566*0Sstevel@tonic-gate 	leak_type_t *ltp;
567*0Sstevel@tonic-gate 
568*0Sstevel@tonic-gate 	for (type = 0; type < LK_NUM_TYPES; type++) {
569*0Sstevel@tonic-gate 		ltp = &lk_types[type];
570*0Sstevel@tonic-gate 
571*0Sstevel@tonic-gate 		if (ltp->lt_leaks == 0)
572*0Sstevel@tonic-gate 			continue;
573*0Sstevel@tonic-gate 
574*0Sstevel@tonic-gate 		ltp->lt_sorted = leaky_alloc(ltp->lt_leaks *
575*0Sstevel@tonic-gate 		    sizeof (leak_bufctl_t *), UM_SLEEP);
576*0Sstevel@tonic-gate 
577*0Sstevel@tonic-gate 		j = 0;
578*0Sstevel@tonic-gate 		for (i = 0; i < LK_BUFCTLHSIZE; i++) {
579*0Sstevel@tonic-gate 			for (lkb = lk_bufctl[i]; lkb != NULL;
580*0Sstevel@tonic-gate 			    lkb = lkb->lkb_hash_next) {
581*0Sstevel@tonic-gate 				if (lkb->lkb_type == type)
582*0Sstevel@tonic-gate 					ltp->lt_sorted[j++] = lkb;
583*0Sstevel@tonic-gate 			}
584*0Sstevel@tonic-gate 		}
585*0Sstevel@tonic-gate 		if (j != ltp->lt_leaks)
586*0Sstevel@tonic-gate 			mdb_warn("expected %d leaks, got %d\n", ltp->lt_leaks,
587*0Sstevel@tonic-gate 			    j);
588*0Sstevel@tonic-gate 
589*0Sstevel@tonic-gate 		qsort(ltp->lt_sorted, ltp->lt_leaks, sizeof (leak_bufctl_t *),
590*0Sstevel@tonic-gate 		    leaky_ctlcmp);
591*0Sstevel@tonic-gate 	}
592*0Sstevel@tonic-gate }
593*0Sstevel@tonic-gate 
594*0Sstevel@tonic-gate void
595*0Sstevel@tonic-gate leaky_cleanup(int force)
596*0Sstevel@tonic-gate {
597*0Sstevel@tonic-gate 	int i;
598*0Sstevel@tonic-gate 	leak_bufctl_t *lkb, *l, *next;
599*0Sstevel@tonic-gate 
600*0Sstevel@tonic-gate 	/*
601*0Sstevel@tonic-gate 	 * State structures are allocated UM_GC, so we just need to nuke
602*0Sstevel@tonic-gate 	 * the freelist pointer.
603*0Sstevel@tonic-gate 	 */
604*0Sstevel@tonic-gate 	lk_free_state = NULL;
605*0Sstevel@tonic-gate 
606*0Sstevel@tonic-gate 	if (lk_state == LK_CLEANING) {
607*0Sstevel@tonic-gate 		mdb_warn("interrupted during ::findleaks cleanup; some mdb "
608*0Sstevel@tonic-gate 		    "memory will be leaked\n");
609*0Sstevel@tonic-gate 
610*0Sstevel@tonic-gate 		for (i = 0; i < LK_BUFCTLHSIZE; i++)
611*0Sstevel@tonic-gate 			lk_bufctl[i] = NULL;
612*0Sstevel@tonic-gate 
613*0Sstevel@tonic-gate 		for (i = 0; i < LK_NUM_TYPES; i++) {
614*0Sstevel@tonic-gate 			lk_types[i].lt_leaks = 0;
615*0Sstevel@tonic-gate 			lk_types[i].lt_sorted = NULL;
616*0Sstevel@tonic-gate 		}
617*0Sstevel@tonic-gate 
618*0Sstevel@tonic-gate 		bzero(&lk_beans, sizeof (lk_beans));
619*0Sstevel@tonic-gate 		lk_state = LK_CLEAN;
620*0Sstevel@tonic-gate 		return;
621*0Sstevel@tonic-gate 	}
622*0Sstevel@tonic-gate 
623*0Sstevel@tonic-gate 	if (!force && lk_state != LK_SWEEPING)
624*0Sstevel@tonic-gate 		return;
625*0Sstevel@tonic-gate 
626*0Sstevel@tonic-gate 	lk_state = LK_CLEANING;
627*0Sstevel@tonic-gate 
628*0Sstevel@tonic-gate 	for (i = 0; i < LK_NUM_TYPES; i++) {
629*0Sstevel@tonic-gate 		if (lk_types[i].lt_sorted != NULL) {
630*0Sstevel@tonic-gate 			mdb_free(lk_types[i].lt_sorted,
631*0Sstevel@tonic-gate 			    lk_types[i].lt_leaks * sizeof (leak_bufctl_t *));
632*0Sstevel@tonic-gate 			lk_types[i].lt_sorted = NULL;
633*0Sstevel@tonic-gate 		}
634*0Sstevel@tonic-gate 		lk_types[i].lt_leaks = 0;
635*0Sstevel@tonic-gate 	}
636*0Sstevel@tonic-gate 
637*0Sstevel@tonic-gate 	for (i = 0; i < LK_BUFCTLHSIZE; i++) {
638*0Sstevel@tonic-gate 		for (lkb = lk_bufctl[i]; lkb != NULL; lkb = next) {
639*0Sstevel@tonic-gate 			for (l = lkb->lkb_next; l != NULL; l = next) {
640*0Sstevel@tonic-gate 				next = l->lkb_next;
641*0Sstevel@tonic-gate 				mdb_free(l, LEAK_BUFCTL_SIZE(l->lkb_depth));
642*0Sstevel@tonic-gate 			}
643*0Sstevel@tonic-gate 			next = lkb->lkb_hash_next;
644*0Sstevel@tonic-gate 			mdb_free(lkb, LEAK_BUFCTL_SIZE(lkb->lkb_depth));
645*0Sstevel@tonic-gate 		}
646*0Sstevel@tonic-gate 		lk_bufctl[i] = NULL;
647*0Sstevel@tonic-gate 	}
648*0Sstevel@tonic-gate 
649*0Sstevel@tonic-gate 	bzero(&lk_beans, sizeof (lk_beans));
650*0Sstevel@tonic-gate 	lk_state = LK_CLEAN;
651*0Sstevel@tonic-gate }
652*0Sstevel@tonic-gate 
653*0Sstevel@tonic-gate int
654*0Sstevel@tonic-gate leaky_filter(const leak_pc_t *stack, int depth, uintptr_t filter)
655*0Sstevel@tonic-gate {
656*0Sstevel@tonic-gate 	int i;
657*0Sstevel@tonic-gate 	GElf_Sym sym;
658*0Sstevel@tonic-gate 	char c;
659*0Sstevel@tonic-gate 
660*0Sstevel@tonic-gate 	if (filter == NULL)
661*0Sstevel@tonic-gate 		return (1);
662*0Sstevel@tonic-gate 
663*0Sstevel@tonic-gate 	for (i = 0; i < depth; i++) {
664*0Sstevel@tonic-gate 		if (stack[i] == filter)
665*0Sstevel@tonic-gate 			return (1);
666*0Sstevel@tonic-gate 
667*0Sstevel@tonic-gate 		if (mdb_lookup_by_addr(stack[i], MDB_SYM_FUZZY,
668*0Sstevel@tonic-gate 		    &c, sizeof (c), &sym) == -1)
669*0Sstevel@tonic-gate 			continue;
670*0Sstevel@tonic-gate 
671*0Sstevel@tonic-gate 		if ((uintptr_t)sym.st_value == filter)
672*0Sstevel@tonic-gate 			return (1);
673*0Sstevel@tonic-gate 	}
674*0Sstevel@tonic-gate 
675*0Sstevel@tonic-gate 	return (0);
676*0Sstevel@tonic-gate }
677*0Sstevel@tonic-gate 
678*0Sstevel@tonic-gate void
679*0Sstevel@tonic-gate leaky_dump(uintptr_t filter, uint_t dump_verbose)
680*0Sstevel@tonic-gate {
681*0Sstevel@tonic-gate 	int i;
682*0Sstevel@tonic-gate 	size_t leaks;
683*0Sstevel@tonic-gate 	leak_bufctl_t **sorted;
684*0Sstevel@tonic-gate 	leak_bufctl_t *lkb;
685*0Sstevel@tonic-gate 	int seen = 0;
686*0Sstevel@tonic-gate 
687*0Sstevel@tonic-gate 	for (i = 0; i < LK_NUM_TYPES; i++) {
688*0Sstevel@tonic-gate 		leaks = lk_types[i].lt_leaks;
689*0Sstevel@tonic-gate 		sorted = lk_types[i].lt_sorted;
690*0Sstevel@tonic-gate 
691*0Sstevel@tonic-gate 		leaky_subr_dump_start(i);
692*0Sstevel@tonic-gate 		while (leaks-- > 0) {
693*0Sstevel@tonic-gate 			lkb = *sorted++;
694*0Sstevel@tonic-gate 
695*0Sstevel@tonic-gate 			if (!leaky_filter(lkb->lkb_stack, lkb->lkb_depth,
696*0Sstevel@tonic-gate 			    filter))
697*0Sstevel@tonic-gate 				continue;
698*0Sstevel@tonic-gate 
699*0Sstevel@tonic-gate 			seen = 1;
700*0Sstevel@tonic-gate 			leaky_subr_dump(lkb, 0);
701*0Sstevel@tonic-gate 		}
702*0Sstevel@tonic-gate 		leaky_subr_dump_end(i);
703*0Sstevel@tonic-gate 	}
704*0Sstevel@tonic-gate 
705*0Sstevel@tonic-gate 	if (!seen) {
706*0Sstevel@tonic-gate 		if (filter != NULL)
707*0Sstevel@tonic-gate 			mdb_printf(
708*0Sstevel@tonic-gate 			    "findleaks: no memory leaks matching %a found\n",
709*0Sstevel@tonic-gate 			    filter);
710*0Sstevel@tonic-gate 		else
711*0Sstevel@tonic-gate 			mdb_printf(
712*0Sstevel@tonic-gate 			    "findleaks: no memory leaks detected\n");
713*0Sstevel@tonic-gate 	}
714*0Sstevel@tonic-gate 
715*0Sstevel@tonic-gate 	if (!dump_verbose || !seen)
716*0Sstevel@tonic-gate 		return;
717*0Sstevel@tonic-gate 
718*0Sstevel@tonic-gate 	mdb_printf("\n");
719*0Sstevel@tonic-gate 
720*0Sstevel@tonic-gate 	for (i = 0; i < LK_NUM_TYPES; i++) {
721*0Sstevel@tonic-gate 		leaks = lk_types[i].lt_leaks;
722*0Sstevel@tonic-gate 		sorted = lk_types[i].lt_sorted;
723*0Sstevel@tonic-gate 
724*0Sstevel@tonic-gate 		while (leaks-- > 0) {
725*0Sstevel@tonic-gate 			lkb = *sorted++;
726*0Sstevel@tonic-gate 
727*0Sstevel@tonic-gate 			if (!leaky_filter(lkb->lkb_stack, lkb->lkb_depth,
728*0Sstevel@tonic-gate 			    filter))
729*0Sstevel@tonic-gate 				continue;
730*0Sstevel@tonic-gate 
731*0Sstevel@tonic-gate 			leaky_subr_dump(lkb, 1);
732*0Sstevel@tonic-gate 		}
733*0Sstevel@tonic-gate 	}
734*0Sstevel@tonic-gate }
735*0Sstevel@tonic-gate 
736*0Sstevel@tonic-gate static const char *const findleaks_desc =
737*0Sstevel@tonic-gate 	"Does a conservative garbage collection of the heap in order to find\n"
738*0Sstevel@tonic-gate 	"potentially leaked buffers.  Similar leaks are coalesced by stack\n"
739*0Sstevel@tonic-gate 	"trace, with the oldest leak picked as representative.  The leak\n"
740*0Sstevel@tonic-gate 	"table is cached between invocations.\n"
741*0Sstevel@tonic-gate 	"\n"
742*0Sstevel@tonic-gate 	"addr, if provided, should be a function or PC location.  Reported\n"
743*0Sstevel@tonic-gate 	"leaks will then be limited to those with that function or PC in\n"
744*0Sstevel@tonic-gate 	"their stack trace.\n"
745*0Sstevel@tonic-gate 	"\n"
746*0Sstevel@tonic-gate 	"The 'leak' and 'leakbuf' walkers can be used to retrieve coalesced\n"
747*0Sstevel@tonic-gate 	"leaks.\n";
748*0Sstevel@tonic-gate 
749*0Sstevel@tonic-gate static const char *const findleaks_args =
750*0Sstevel@tonic-gate 	"  -d    detail each representative leak (long)\n"
751*0Sstevel@tonic-gate 	"  -f    throw away cached state, and do a full run\n"
752*0Sstevel@tonic-gate 	"  -v    report verbose information about the findleaks run\n";
753*0Sstevel@tonic-gate 
754*0Sstevel@tonic-gate void
755*0Sstevel@tonic-gate findleaks_help(void)
756*0Sstevel@tonic-gate {
757*0Sstevel@tonic-gate 	mdb_printf("%s\n", findleaks_desc);
758*0Sstevel@tonic-gate 	mdb_dec_indent(2);
759*0Sstevel@tonic-gate 	mdb_printf("%<b>OPTIONS%</b>\n");
760*0Sstevel@tonic-gate 	mdb_inc_indent(2);
761*0Sstevel@tonic-gate 	mdb_printf("%s", findleaks_args);
762*0Sstevel@tonic-gate }
763*0Sstevel@tonic-gate 
764*0Sstevel@tonic-gate #define	LK_REPORT_BEAN(x) leaky_verbose_perc(#x, lk_beans.lkb_##x, total);
765*0Sstevel@tonic-gate 
766*0Sstevel@tonic-gate /*ARGSUSED*/
767*0Sstevel@tonic-gate int
768*0Sstevel@tonic-gate findleaks(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
769*0Sstevel@tonic-gate {
770*0Sstevel@tonic-gate 	size_t est = 0;
771*0Sstevel@tonic-gate 	leak_ndx_t i;
772*0Sstevel@tonic-gate 	leak_mtab_t *lmp;
773*0Sstevel@tonic-gate 	ssize_t total;
774*0Sstevel@tonic-gate 	uintptr_t filter = NULL;
775*0Sstevel@tonic-gate 	uint_t dump = 0;
776*0Sstevel@tonic-gate 	uint_t force = 0;
777*0Sstevel@tonic-gate 	uint_t verbose = 0;
778*0Sstevel@tonic-gate 	int ret;
779*0Sstevel@tonic-gate 
780*0Sstevel@tonic-gate 	if (flags & DCMD_ADDRSPEC)
781*0Sstevel@tonic-gate 		filter = addr;
782*0Sstevel@tonic-gate 
783*0Sstevel@tonic-gate 	if (mdb_getopts(argc, argv,
784*0Sstevel@tonic-gate 	    'd', MDB_OPT_SETBITS, TRUE, &dump,
785*0Sstevel@tonic-gate 	    'f', MDB_OPT_SETBITS, TRUE, &force,
786*0Sstevel@tonic-gate 	    'v', MDB_OPT_SETBITS, TRUE, &verbose, NULL) != argc)
787*0Sstevel@tonic-gate 		return (DCMD_USAGE);
788*0Sstevel@tonic-gate 
789*0Sstevel@tonic-gate 	if (verbose || force)
790*0Sstevel@tonic-gate 		lk_verbose = verbose;
791*0Sstevel@tonic-gate 
792*0Sstevel@tonic-gate 	/*
793*0Sstevel@tonic-gate 	 * Clean any previous ::findleaks.
794*0Sstevel@tonic-gate 	 */
795*0Sstevel@tonic-gate 	leaky_cleanup(force);
796*0Sstevel@tonic-gate 
797*0Sstevel@tonic-gate 	if (lk_state == LK_DONE) {
798*0Sstevel@tonic-gate 		if (lk_verbose)
799*0Sstevel@tonic-gate 			mdb_printf("findleaks: using cached results "
800*0Sstevel@tonic-gate 			    "(-f will force a full run)\n");
801*0Sstevel@tonic-gate 		goto dump;
802*0Sstevel@tonic-gate 	}
803*0Sstevel@tonic-gate 
804*0Sstevel@tonic-gate 	leaky_verbose_begin();
805*0Sstevel@tonic-gate 
806*0Sstevel@tonic-gate 	if ((ret = leaky_subr_estimate(&est)) != DCMD_OK)
807*0Sstevel@tonic-gate 		return (ret);
808*0Sstevel@tonic-gate 
809*0Sstevel@tonic-gate 	leaky_verbose("maximum buffers", est);
810*0Sstevel@tonic-gate 
811*0Sstevel@tonic-gate 	/*
812*0Sstevel@tonic-gate 	 * Now we have an upper bound on the number of buffers.  Allocate
813*0Sstevel@tonic-gate 	 * our mtab array.
814*0Sstevel@tonic-gate 	 */
815*0Sstevel@tonic-gate 	lk_mtab = leaky_zalloc(est * sizeof (leak_mtab_t), UM_SLEEP | UM_GC);
816*0Sstevel@tonic-gate 	lmp = lk_mtab;
817*0Sstevel@tonic-gate 
818*0Sstevel@tonic-gate 	if ((ret = leaky_subr_fill(&lmp)) != DCMD_OK)
819*0Sstevel@tonic-gate 		return (ret);
820*0Sstevel@tonic-gate 
821*0Sstevel@tonic-gate 	lk_nbuffers = lmp - lk_mtab;
822*0Sstevel@tonic-gate 
823*0Sstevel@tonic-gate 	qsort(lk_mtab, lk_nbuffers, sizeof (leak_mtab_t), leaky_mtabcmp);
824*0Sstevel@tonic-gate 
825*0Sstevel@tonic-gate 	/*
826*0Sstevel@tonic-gate 	 * validate the mtab table now that it is sorted
827*0Sstevel@tonic-gate 	 */
828*0Sstevel@tonic-gate 	for (i = 0; i < lk_nbuffers; i++) {
829*0Sstevel@tonic-gate 		if (lk_mtab[i].lkm_base >= lk_mtab[i].lkm_limit) {
830*0Sstevel@tonic-gate 			mdb_warn("[%p, %p): invalid mtab\n",
831*0Sstevel@tonic-gate 			    lk_mtab[i].lkm_base, lk_mtab[i].lkm_limit);
832*0Sstevel@tonic-gate 			return (DCMD_ERR);
833*0Sstevel@tonic-gate 		}
834*0Sstevel@tonic-gate 
835*0Sstevel@tonic-gate 		if (i < lk_nbuffers - 1 &&
836*0Sstevel@tonic-gate 		    lk_mtab[i].lkm_limit > lk_mtab[i + 1].lkm_base) {
837*0Sstevel@tonic-gate 			mdb_warn("[%p, %p) and [%p, %p): overlapping mtabs\n",
838*0Sstevel@tonic-gate 			    lk_mtab[i].lkm_base, lk_mtab[i].lkm_limit,
839*0Sstevel@tonic-gate 			    lk_mtab[i + 1].lkm_base, lk_mtab[i + 1].lkm_limit);
840*0Sstevel@tonic-gate 			return (DCMD_ERR);
841*0Sstevel@tonic-gate 		}
842*0Sstevel@tonic-gate 	}
843*0Sstevel@tonic-gate 
844*0Sstevel@tonic-gate 	leaky_verbose("actual buffers", lk_nbuffers);
845*0Sstevel@tonic-gate 
846*0Sstevel@tonic-gate 	lk_scan_buffer = leaky_zalloc(LK_SCAN_BUFFER_SIZE, UM_SLEEP | UM_GC);
847*0Sstevel@tonic-gate 
848*0Sstevel@tonic-gate 	if ((ret = leaky_subr_run()) != DCMD_OK)
849*0Sstevel@tonic-gate 		return (ret);
850*0Sstevel@tonic-gate 
851*0Sstevel@tonic-gate 	lk_state = LK_SWEEPING;
852*0Sstevel@tonic-gate 
853*0Sstevel@tonic-gate 	for (i = 0; i < lk_nbuffers; i++) {
854*0Sstevel@tonic-gate 		if (LK_MARKED(lk_mtab[i].lkm_base))
855*0Sstevel@tonic-gate 			continue;
856*0Sstevel@tonic-gate 		leaky_subr_add_leak(&lk_mtab[i]);
857*0Sstevel@tonic-gate 	}
858*0Sstevel@tonic-gate 
859*0Sstevel@tonic-gate 	total = lk_beans.lkb_dismissals + lk_beans.lkb_misses +
860*0Sstevel@tonic-gate 	    lk_beans.lkb_dups + lk_beans.lkb_follows;
861*0Sstevel@tonic-gate 
862*0Sstevel@tonic-gate 	leaky_verbose(NULL, 0);
863*0Sstevel@tonic-gate 	leaky_verbose("potential pointers", total);
864*0Sstevel@tonic-gate 	LK_REPORT_BEAN(dismissals);
865*0Sstevel@tonic-gate 	LK_REPORT_BEAN(misses);
866*0Sstevel@tonic-gate 	LK_REPORT_BEAN(dups);
867*0Sstevel@tonic-gate 	LK_REPORT_BEAN(follows);
868*0Sstevel@tonic-gate 
869*0Sstevel@tonic-gate 	leaky_verbose(NULL, 0);
870*0Sstevel@tonic-gate 	leaky_verbose_end();
871*0Sstevel@tonic-gate 
872*0Sstevel@tonic-gate 	leaky_sort();
873*0Sstevel@tonic-gate 	lk_state = LK_DONE;
874*0Sstevel@tonic-gate dump:
875*0Sstevel@tonic-gate 	leaky_dump(filter, dump);
876*0Sstevel@tonic-gate 
877*0Sstevel@tonic-gate 	return (DCMD_OK);
878*0Sstevel@tonic-gate }
879*0Sstevel@tonic-gate 
880*0Sstevel@tonic-gate int
881*0Sstevel@tonic-gate leaky_walk_init(mdb_walk_state_t *wsp)
882*0Sstevel@tonic-gate {
883*0Sstevel@tonic-gate 	leak_walk_t *lw;
884*0Sstevel@tonic-gate 	leak_bufctl_t *lkb, *cur;
885*0Sstevel@tonic-gate 
886*0Sstevel@tonic-gate 	uintptr_t addr;
887*0Sstevel@tonic-gate 	int i;
888*0Sstevel@tonic-gate 
889*0Sstevel@tonic-gate 	if (lk_state != LK_DONE) {
890*0Sstevel@tonic-gate 		mdb_warn("::findleaks must be run %sbefore leaks can be"
891*0Sstevel@tonic-gate 		    " walked\n", lk_state != LK_CLEAN ? "to completion " : "");
892*0Sstevel@tonic-gate 		return (WALK_ERR);
893*0Sstevel@tonic-gate 	}
894*0Sstevel@tonic-gate 
895*0Sstevel@tonic-gate 	if (wsp->walk_addr == NULL) {
896*0Sstevel@tonic-gate 		lkb = NULL;
897*0Sstevel@tonic-gate 		goto found;
898*0Sstevel@tonic-gate 	}
899*0Sstevel@tonic-gate 
900*0Sstevel@tonic-gate 	addr = wsp->walk_addr;
901*0Sstevel@tonic-gate 
902*0Sstevel@tonic-gate 	/*
903*0Sstevel@tonic-gate 	 * Search the representative leaks first, since that's what we
904*0Sstevel@tonic-gate 	 * report in the table.  If that fails, search everything.
905*0Sstevel@tonic-gate 	 *
906*0Sstevel@tonic-gate 	 * Note that we goto found with lkb as the head of desired dup list.
907*0Sstevel@tonic-gate 	 */
908*0Sstevel@tonic-gate 	for (i = 0; i < LK_BUFCTLHSIZE; i++) {
909*0Sstevel@tonic-gate 		for (lkb = lk_bufctl[i]; lkb != NULL; lkb = lkb->lkb_hash_next)
910*0Sstevel@tonic-gate 			if (lkb->lkb_addr == addr)
911*0Sstevel@tonic-gate 				goto found;
912*0Sstevel@tonic-gate 	}
913*0Sstevel@tonic-gate 
914*0Sstevel@tonic-gate 	for (i = 0; i < LK_BUFCTLHSIZE; i++) {
915*0Sstevel@tonic-gate 		for (lkb = lk_bufctl[i]; lkb != NULL; lkb = lkb->lkb_hash_next)
916*0Sstevel@tonic-gate 			for (cur = lkb; cur != NULL; cur = cur->lkb_next)
917*0Sstevel@tonic-gate 				if (cur->lkb_addr == addr)
918*0Sstevel@tonic-gate 					goto found;
919*0Sstevel@tonic-gate 	}
920*0Sstevel@tonic-gate 
921*0Sstevel@tonic-gate 	mdb_warn("%p is not a leaked ctl address\n", addr);
922*0Sstevel@tonic-gate 	return (WALK_ERR);
923*0Sstevel@tonic-gate 
924*0Sstevel@tonic-gate found:
925*0Sstevel@tonic-gate 	wsp->walk_data = lw = mdb_zalloc(sizeof (*lw), UM_SLEEP);
926*0Sstevel@tonic-gate 	lw->lkw_ndx = 0;
927*0Sstevel@tonic-gate 	lw->lkw_current = lkb;
928*0Sstevel@tonic-gate 	lw->lkw_hash_next = NULL;
929*0Sstevel@tonic-gate 
930*0Sstevel@tonic-gate 	return (WALK_NEXT);
931*0Sstevel@tonic-gate }
932*0Sstevel@tonic-gate 
933*0Sstevel@tonic-gate leak_bufctl_t *
934*0Sstevel@tonic-gate leaky_walk_step_common(mdb_walk_state_t *wsp)
935*0Sstevel@tonic-gate {
936*0Sstevel@tonic-gate 	leak_walk_t *lw = wsp->walk_data;
937*0Sstevel@tonic-gate 	leak_bufctl_t *lk;
938*0Sstevel@tonic-gate 
939*0Sstevel@tonic-gate 	if ((lk = lw->lkw_current) == NULL) {
940*0Sstevel@tonic-gate 		if ((lk = lw->lkw_hash_next) == NULL) {
941*0Sstevel@tonic-gate 			if (wsp->walk_addr)
942*0Sstevel@tonic-gate 				return (NULL);
943*0Sstevel@tonic-gate 
944*0Sstevel@tonic-gate 			while (lk == NULL && lw->lkw_ndx < LK_BUFCTLHSIZE)
945*0Sstevel@tonic-gate 				lk = lk_bufctl[lw->lkw_ndx++];
946*0Sstevel@tonic-gate 
947*0Sstevel@tonic-gate 			if (lw->lkw_ndx == LK_BUFCTLHSIZE)
948*0Sstevel@tonic-gate 				return (NULL);
949*0Sstevel@tonic-gate 		}
950*0Sstevel@tonic-gate 
951*0Sstevel@tonic-gate 		lw->lkw_hash_next = lk->lkb_hash_next;
952*0Sstevel@tonic-gate 	}
953*0Sstevel@tonic-gate 
954*0Sstevel@tonic-gate 	lw->lkw_current = lk->lkb_next;
955*0Sstevel@tonic-gate 	return (lk);
956*0Sstevel@tonic-gate }
957*0Sstevel@tonic-gate 
958*0Sstevel@tonic-gate int
959*0Sstevel@tonic-gate leaky_walk_step(mdb_walk_state_t *wsp)
960*0Sstevel@tonic-gate {
961*0Sstevel@tonic-gate 	leak_bufctl_t *lk;
962*0Sstevel@tonic-gate 
963*0Sstevel@tonic-gate 	if ((lk = leaky_walk_step_common(wsp)) == NULL)
964*0Sstevel@tonic-gate 		return (WALK_DONE);
965*0Sstevel@tonic-gate 
966*0Sstevel@tonic-gate 	return (leaky_subr_invoke_callback(lk, wsp->walk_callback,
967*0Sstevel@tonic-gate 	    wsp->walk_cbdata));
968*0Sstevel@tonic-gate }
969*0Sstevel@tonic-gate 
970*0Sstevel@tonic-gate void
971*0Sstevel@tonic-gate leaky_walk_fini(mdb_walk_state_t *wsp)
972*0Sstevel@tonic-gate {
973*0Sstevel@tonic-gate 	leak_walk_t *lw = wsp->walk_data;
974*0Sstevel@tonic-gate 
975*0Sstevel@tonic-gate 	mdb_free(lw, sizeof (leak_walk_t));
976*0Sstevel@tonic-gate }
977*0Sstevel@tonic-gate 
978*0Sstevel@tonic-gate int
979*0Sstevel@tonic-gate leaky_buf_walk_step(mdb_walk_state_t *wsp)
980*0Sstevel@tonic-gate {
981*0Sstevel@tonic-gate 	leak_bufctl_t *lk;
982*0Sstevel@tonic-gate 
983*0Sstevel@tonic-gate 	if ((lk = leaky_walk_step_common(wsp)) == NULL)
984*0Sstevel@tonic-gate 		return (WALK_DONE);
985*0Sstevel@tonic-gate 
986*0Sstevel@tonic-gate 	return (wsp->walk_callback(lk->lkb_bufaddr, NULL, wsp->walk_cbdata));
987*0Sstevel@tonic-gate }
988