xref: /onnv-gate/usr/src/cmd/mdb/common/modules/libumem/umem.c (revision 10610:218c21980cfd)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51528Sjwadams  * Common Development and Distribution License (the "License").
61528Sjwadams  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
2210388SJonathan.Adams@Sun.COM  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #include "umem.h"
270Sstevel@tonic-gate 
280Sstevel@tonic-gate #include <sys/vmem_impl_user.h>
290Sstevel@tonic-gate #include <umem_impl.h>
300Sstevel@tonic-gate 
310Sstevel@tonic-gate #include <alloca.h>
321528Sjwadams #include <limits.h>
33*10610SJonathan.Adams@Sun.COM #include <mdb/mdb_whatis.h>
340Sstevel@tonic-gate 
350Sstevel@tonic-gate #include "misc.h"
361528Sjwadams #include "leaky.h"
374798Stomee #include "dist.h"
380Sstevel@tonic-gate 
390Sstevel@tonic-gate #include "umem_pagesize.h"
400Sstevel@tonic-gate 
410Sstevel@tonic-gate #define	UM_ALLOCATED		0x1
420Sstevel@tonic-gate #define	UM_FREE			0x2
430Sstevel@tonic-gate #define	UM_BUFCTL		0x4
440Sstevel@tonic-gate #define	UM_HASH			0x8
450Sstevel@tonic-gate 
461528Sjwadams int umem_ready;
471528Sjwadams 
481528Sjwadams static int umem_stack_depth_warned;
491528Sjwadams static uint32_t umem_max_ncpus;
500Sstevel@tonic-gate uint32_t umem_stack_depth;
511528Sjwadams 
520Sstevel@tonic-gate size_t umem_pagesize;
530Sstevel@tonic-gate 
540Sstevel@tonic-gate #define	UMEM_READVAR(var)				\
550Sstevel@tonic-gate 	(umem_readvar(&(var), #var) == -1 &&		\
561528Sjwadams 	    (mdb_warn("failed to read "#var), 1))
570Sstevel@tonic-gate 
580Sstevel@tonic-gate int
umem_update_variables(void)591528Sjwadams umem_update_variables(void)
600Sstevel@tonic-gate {
610Sstevel@tonic-gate 	size_t pagesize;
620Sstevel@tonic-gate 
630Sstevel@tonic-gate 	/*
641528Sjwadams 	 * Figure out which type of umem is being used; if it's not there
651528Sjwadams 	 * yet, succeed quietly.
660Sstevel@tonic-gate 	 */
671528Sjwadams 	if (umem_set_standalone() == -1) {
681528Sjwadams 		umem_ready = 0;
691528Sjwadams 		return (0);		/* umem not there yet */
701528Sjwadams 	}
711528Sjwadams 
721528Sjwadams 	/*
731528Sjwadams 	 * Solaris 9 used a different name for umem_max_ncpus.  It's
741528Sjwadams 	 * cheap backwards compatibility to check for both names.
751528Sjwadams 	 */
761528Sjwadams 	if (umem_readvar(&umem_max_ncpus, "umem_max_ncpus") == -1 &&
771528Sjwadams 	    umem_readvar(&umem_max_ncpus, "max_ncpus") == -1) {
781528Sjwadams 		mdb_warn("unable to read umem_max_ncpus or max_ncpus");
791528Sjwadams 		return (-1);
801528Sjwadams 	}
811528Sjwadams 	if (UMEM_READVAR(umem_ready))
820Sstevel@tonic-gate 		return (-1);
830Sstevel@tonic-gate 	if (UMEM_READVAR(umem_stack_depth))
840Sstevel@tonic-gate 		return (-1);
850Sstevel@tonic-gate 	if (UMEM_READVAR(pagesize))
860Sstevel@tonic-gate 		return (-1);
870Sstevel@tonic-gate 
880Sstevel@tonic-gate 	if (umem_stack_depth > UMEM_MAX_STACK_DEPTH) {
891528Sjwadams 		if (umem_stack_depth_warned == 0) {
901528Sjwadams 			mdb_warn("umem_stack_depth corrupted (%d > %d)\n",
911528Sjwadams 			    umem_stack_depth, UMEM_MAX_STACK_DEPTH);
921528Sjwadams 			umem_stack_depth_warned = 1;
931528Sjwadams 		}
940Sstevel@tonic-gate 		umem_stack_depth = 0;
950Sstevel@tonic-gate 	}
961528Sjwadams 
971528Sjwadams 	umem_pagesize = pagesize;
981528Sjwadams 
990Sstevel@tonic-gate 	return (0);
1000Sstevel@tonic-gate }
1010Sstevel@tonic-gate 
1020Sstevel@tonic-gate /*ARGSUSED*/
1031528Sjwadams static int
umem_init_walkers(uintptr_t addr,const umem_cache_t * c,void * ignored)1040Sstevel@tonic-gate umem_init_walkers(uintptr_t addr, const umem_cache_t *c, void *ignored)
1050Sstevel@tonic-gate {
1060Sstevel@tonic-gate 	mdb_walker_t w;
1070Sstevel@tonic-gate 	char descr[64];
1080Sstevel@tonic-gate 
1090Sstevel@tonic-gate 	(void) mdb_snprintf(descr, sizeof (descr),
1100Sstevel@tonic-gate 	    "walk the %s cache", c->cache_name);
1110Sstevel@tonic-gate 
1120Sstevel@tonic-gate 	w.walk_name = c->cache_name;
1130Sstevel@tonic-gate 	w.walk_descr = descr;
1140Sstevel@tonic-gate 	w.walk_init = umem_walk_init;
1150Sstevel@tonic-gate 	w.walk_step = umem_walk_step;
1160Sstevel@tonic-gate 	w.walk_fini = umem_walk_fini;
1170Sstevel@tonic-gate 	w.walk_init_arg = (void *)addr;
1180Sstevel@tonic-gate 
1190Sstevel@tonic-gate 	if (mdb_add_walker(&w) == -1)
1200Sstevel@tonic-gate 		mdb_warn("failed to add %s walker", c->cache_name);
1210Sstevel@tonic-gate 
1220Sstevel@tonic-gate 	return (WALK_NEXT);
1230Sstevel@tonic-gate }
1240Sstevel@tonic-gate 
1251528Sjwadams /*ARGSUSED*/
1261528Sjwadams static void
umem_statechange_cb(void * arg)1271528Sjwadams umem_statechange_cb(void *arg)
1281528Sjwadams {
1291528Sjwadams 	static int been_ready = 0;
1301528Sjwadams 
1311528Sjwadams #ifndef _KMDB
1321528Sjwadams 	leaky_cleanup(1);	/* state changes invalidate leaky state */
1331528Sjwadams #endif
1341528Sjwadams 
1351528Sjwadams 	if (umem_update_variables() == -1)
1361528Sjwadams 		return;
1371528Sjwadams 
1381528Sjwadams 	if (been_ready)
1391528Sjwadams 		return;
1401528Sjwadams 
1411528Sjwadams 	if (umem_ready != UMEM_READY)
1421528Sjwadams 		return;
1431528Sjwadams 
1441528Sjwadams 	been_ready = 1;
1451528Sjwadams 	(void) mdb_walk("umem_cache", (mdb_walk_cb_t)umem_init_walkers, NULL);
1461528Sjwadams }
1471528Sjwadams 
1481528Sjwadams int
umem_abort_messages(void)1490Sstevel@tonic-gate umem_abort_messages(void)
1500Sstevel@tonic-gate {
1510Sstevel@tonic-gate 	char *umem_error_buffer;
1520Sstevel@tonic-gate 	uint_t umem_error_begin;
1530Sstevel@tonic-gate 	GElf_Sym sym;
1540Sstevel@tonic-gate 	size_t bufsize;
1550Sstevel@tonic-gate 
1560Sstevel@tonic-gate 	if (UMEM_READVAR(umem_error_begin))
1570Sstevel@tonic-gate 		return (DCMD_ERR);
1580Sstevel@tonic-gate 
1590Sstevel@tonic-gate 	if (umem_lookup_by_name("umem_error_buffer", &sym) == -1) {
1600Sstevel@tonic-gate 		mdb_warn("unable to look up umem_error_buffer");
1610Sstevel@tonic-gate 		return (DCMD_ERR);
1620Sstevel@tonic-gate 	}
1630Sstevel@tonic-gate 
1640Sstevel@tonic-gate 	bufsize = (size_t)sym.st_size;
1650Sstevel@tonic-gate 
1660Sstevel@tonic-gate 	umem_error_buffer = mdb_alloc(bufsize+1, UM_SLEEP | UM_GC);
1670Sstevel@tonic-gate 
1680Sstevel@tonic-gate 	if (mdb_vread(umem_error_buffer, bufsize, (uintptr_t)sym.st_value)
1690Sstevel@tonic-gate 	    != bufsize) {
1700Sstevel@tonic-gate 		mdb_warn("unable to read umem_error_buffer");
1710Sstevel@tonic-gate 		return (DCMD_ERR);
1720Sstevel@tonic-gate 	}
1730Sstevel@tonic-gate 	/* put a zero after the end of the buffer to simplify printing */
1740Sstevel@tonic-gate 	umem_error_buffer[bufsize] = 0;
1750Sstevel@tonic-gate 
1760Sstevel@tonic-gate 	if ((umem_error_begin % bufsize) == 0)
1770Sstevel@tonic-gate 		mdb_printf("%s\n", umem_error_buffer);
1780Sstevel@tonic-gate 	else {
1790Sstevel@tonic-gate 		umem_error_buffer[(umem_error_begin % bufsize) - 1] = 0;
1800Sstevel@tonic-gate 		mdb_printf("%s%s\n",
1810Sstevel@tonic-gate 		    &umem_error_buffer[umem_error_begin % bufsize],
1820Sstevel@tonic-gate 		    umem_error_buffer);
1830Sstevel@tonic-gate 	}
1840Sstevel@tonic-gate 
1850Sstevel@tonic-gate 	return (DCMD_OK);
1860Sstevel@tonic-gate }
1870Sstevel@tonic-gate 
1880Sstevel@tonic-gate static void
umem_log_status(const char * name,umem_log_header_t * val)1890Sstevel@tonic-gate umem_log_status(const char *name, umem_log_header_t *val)
1900Sstevel@tonic-gate {
1910Sstevel@tonic-gate 	umem_log_header_t my_lh;
1920Sstevel@tonic-gate 	uintptr_t pos = (uintptr_t)val;
1930Sstevel@tonic-gate 	size_t size;
1940Sstevel@tonic-gate 
1950Sstevel@tonic-gate 	if (pos == NULL)
1960Sstevel@tonic-gate 		return;
1970Sstevel@tonic-gate 
1980Sstevel@tonic-gate 	if (mdb_vread(&my_lh, sizeof (umem_log_header_t), pos) == -1) {
1990Sstevel@tonic-gate 		mdb_warn("\nunable to read umem_%s_log pointer %p",
2000Sstevel@tonic-gate 		    name, pos);
2010Sstevel@tonic-gate 		return;
2020Sstevel@tonic-gate 	}
2030Sstevel@tonic-gate 
2040Sstevel@tonic-gate 	size = my_lh.lh_chunksize * my_lh.lh_nchunks;
2050Sstevel@tonic-gate 
2060Sstevel@tonic-gate 	if (size % (1024 * 1024) == 0)
2070Sstevel@tonic-gate 		mdb_printf("%s=%dm ", name, size / (1024 * 1024));
2080Sstevel@tonic-gate 	else if (size % 1024 == 0)
2090Sstevel@tonic-gate 		mdb_printf("%s=%dk ", name, size / 1024);
2100Sstevel@tonic-gate 	else
2110Sstevel@tonic-gate 		mdb_printf("%s=%d ", name, size);
2120Sstevel@tonic-gate }
2130Sstevel@tonic-gate 
2140Sstevel@tonic-gate typedef struct umem_debug_flags {
2150Sstevel@tonic-gate 	const char	*udf_name;
2160Sstevel@tonic-gate 	uint_t		udf_flags;
2170Sstevel@tonic-gate 	uint_t		udf_clear;	/* if 0, uses udf_flags */
2180Sstevel@tonic-gate } umem_debug_flags_t;
2190Sstevel@tonic-gate 
2200Sstevel@tonic-gate umem_debug_flags_t umem_status_flags[] = {
2210Sstevel@tonic-gate 	{ "random",	UMF_RANDOMIZE,	UMF_RANDOM },
2220Sstevel@tonic-gate 	{ "default",	UMF_AUDIT | UMF_DEADBEEF | UMF_REDZONE | UMF_CONTENTS },
2230Sstevel@tonic-gate 	{ "audit",	UMF_AUDIT },
2240Sstevel@tonic-gate 	{ "guards",	UMF_DEADBEEF | UMF_REDZONE },
2250Sstevel@tonic-gate 	{ "nosignal",	UMF_CHECKSIGNAL },
2260Sstevel@tonic-gate 	{ "firewall",	UMF_FIREWALL },
2270Sstevel@tonic-gate 	{ "lite",	UMF_LITE },
2280Sstevel@tonic-gate 	{ NULL }
2290Sstevel@tonic-gate };
2300Sstevel@tonic-gate 
2310Sstevel@tonic-gate /*ARGSUSED*/
2320Sstevel@tonic-gate int
umem_status(uintptr_t addr,uint_t flags,int ac,const mdb_arg_t * argv)2330Sstevel@tonic-gate umem_status(uintptr_t addr, uint_t flags, int ac, const mdb_arg_t *argv)
2340Sstevel@tonic-gate {
2350Sstevel@tonic-gate 	int umem_logging;
2360Sstevel@tonic-gate 
2370Sstevel@tonic-gate 	umem_log_header_t *umem_transaction_log;
2380Sstevel@tonic-gate 	umem_log_header_t *umem_content_log;
2390Sstevel@tonic-gate 	umem_log_header_t *umem_failure_log;
2400Sstevel@tonic-gate 	umem_log_header_t *umem_slab_log;
2410Sstevel@tonic-gate 
2420Sstevel@tonic-gate 	mdb_printf("Status:\t\t%s\n",
2430Sstevel@tonic-gate 	    umem_ready == UMEM_READY_INIT_FAILED ? "initialization failed" :
2440Sstevel@tonic-gate 	    umem_ready == UMEM_READY_STARTUP ? "uninitialized" :
2450Sstevel@tonic-gate 	    umem_ready == UMEM_READY_INITING ? "initialization in process" :
2460Sstevel@tonic-gate 	    umem_ready == UMEM_READY ? "ready and active" :
2471528Sjwadams 	    umem_ready == 0 ? "not loaded into address space" :
2480Sstevel@tonic-gate 	    "unknown (umem_ready invalid)");
2490Sstevel@tonic-gate 
2501528Sjwadams 	if (umem_ready == 0)
2511528Sjwadams 		return (DCMD_OK);
2521528Sjwadams 
2530Sstevel@tonic-gate 	mdb_printf("Concurrency:\t%d\n", umem_max_ncpus);
2540Sstevel@tonic-gate 
2550Sstevel@tonic-gate 	if (UMEM_READVAR(umem_logging))
2560Sstevel@tonic-gate 		goto err;
2570Sstevel@tonic-gate 	if (UMEM_READVAR(umem_transaction_log))
2580Sstevel@tonic-gate 		goto err;
2590Sstevel@tonic-gate 	if (UMEM_READVAR(umem_content_log))
2600Sstevel@tonic-gate 		goto err;
2610Sstevel@tonic-gate 	if (UMEM_READVAR(umem_failure_log))
2620Sstevel@tonic-gate 		goto err;
2630Sstevel@tonic-gate 	if (UMEM_READVAR(umem_slab_log))
2640Sstevel@tonic-gate 		goto err;
2650Sstevel@tonic-gate 
2660Sstevel@tonic-gate 	mdb_printf("Logs:\t\t");
2670Sstevel@tonic-gate 	umem_log_status("transaction", umem_transaction_log);
2680Sstevel@tonic-gate 	umem_log_status("content", umem_content_log);
2690Sstevel@tonic-gate 	umem_log_status("fail", umem_failure_log);
2700Sstevel@tonic-gate 	umem_log_status("slab", umem_slab_log);
2710Sstevel@tonic-gate 	if (!umem_logging)
2720Sstevel@tonic-gate 		mdb_printf("(inactive)");
2730Sstevel@tonic-gate 	mdb_printf("\n");
2740Sstevel@tonic-gate 
2750Sstevel@tonic-gate 	mdb_printf("Message buffer:\n");
2760Sstevel@tonic-gate 	return (umem_abort_messages());
2770Sstevel@tonic-gate 
2780Sstevel@tonic-gate err:
2790Sstevel@tonic-gate 	mdb_printf("Message buffer:\n");
2800Sstevel@tonic-gate 	(void) umem_abort_messages();
2810Sstevel@tonic-gate 	return (DCMD_ERR);
2820Sstevel@tonic-gate }
2830Sstevel@tonic-gate 
2840Sstevel@tonic-gate typedef struct {
2850Sstevel@tonic-gate 	uintptr_t ucw_first;
2860Sstevel@tonic-gate 	uintptr_t ucw_current;
2870Sstevel@tonic-gate } umem_cache_walk_t;
2880Sstevel@tonic-gate 
2890Sstevel@tonic-gate int
umem_cache_walk_init(mdb_walk_state_t * wsp)2900Sstevel@tonic-gate umem_cache_walk_init(mdb_walk_state_t *wsp)
2910Sstevel@tonic-gate {
2920Sstevel@tonic-gate 	umem_cache_walk_t *ucw;
2930Sstevel@tonic-gate 	umem_cache_t c;
2940Sstevel@tonic-gate 	uintptr_t cp;
2950Sstevel@tonic-gate 	GElf_Sym sym;
2960Sstevel@tonic-gate 
2970Sstevel@tonic-gate 	if (umem_lookup_by_name("umem_null_cache", &sym) == -1) {
2980Sstevel@tonic-gate 		mdb_warn("couldn't find umem_null_cache");
2990Sstevel@tonic-gate 		return (WALK_ERR);
3000Sstevel@tonic-gate 	}
3010Sstevel@tonic-gate 
3020Sstevel@tonic-gate 	cp = (uintptr_t)sym.st_value;
3030Sstevel@tonic-gate 
3040Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (umem_cache_t), cp) == -1) {
3050Sstevel@tonic-gate 		mdb_warn("couldn't read cache at %p", cp);
3060Sstevel@tonic-gate 		return (WALK_ERR);
3070Sstevel@tonic-gate 	}
3080Sstevel@tonic-gate 
3090Sstevel@tonic-gate 	ucw = mdb_alloc(sizeof (umem_cache_walk_t), UM_SLEEP);
3100Sstevel@tonic-gate 
3110Sstevel@tonic-gate 	ucw->ucw_first = cp;
3120Sstevel@tonic-gate 	ucw->ucw_current = (uintptr_t)c.cache_next;
3130Sstevel@tonic-gate 	wsp->walk_data = ucw;
3140Sstevel@tonic-gate 
3150Sstevel@tonic-gate 	return (WALK_NEXT);
3160Sstevel@tonic-gate }
3170Sstevel@tonic-gate 
3180Sstevel@tonic-gate int
umem_cache_walk_step(mdb_walk_state_t * wsp)3190Sstevel@tonic-gate umem_cache_walk_step(mdb_walk_state_t *wsp)
3200Sstevel@tonic-gate {
3210Sstevel@tonic-gate 	umem_cache_walk_t *ucw = wsp->walk_data;
3220Sstevel@tonic-gate 	umem_cache_t c;
3230Sstevel@tonic-gate 	int status;
3240Sstevel@tonic-gate 
3250Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (umem_cache_t), ucw->ucw_current) == -1) {
3260Sstevel@tonic-gate 		mdb_warn("couldn't read cache at %p", ucw->ucw_current);
3270Sstevel@tonic-gate 		return (WALK_DONE);
3280Sstevel@tonic-gate 	}
3290Sstevel@tonic-gate 
3300Sstevel@tonic-gate 	status = wsp->walk_callback(ucw->ucw_current, &c, wsp->walk_cbdata);
3310Sstevel@tonic-gate 
3320Sstevel@tonic-gate 	if ((ucw->ucw_current = (uintptr_t)c.cache_next) == ucw->ucw_first)
3330Sstevel@tonic-gate 		return (WALK_DONE);
3340Sstevel@tonic-gate 
3350Sstevel@tonic-gate 	return (status);
3360Sstevel@tonic-gate }
3370Sstevel@tonic-gate 
3380Sstevel@tonic-gate void
umem_cache_walk_fini(mdb_walk_state_t * wsp)3390Sstevel@tonic-gate umem_cache_walk_fini(mdb_walk_state_t *wsp)
3400Sstevel@tonic-gate {
3410Sstevel@tonic-gate 	umem_cache_walk_t *ucw = wsp->walk_data;
3420Sstevel@tonic-gate 	mdb_free(ucw, sizeof (umem_cache_walk_t));
3430Sstevel@tonic-gate }
3440Sstevel@tonic-gate 
3450Sstevel@tonic-gate typedef struct {
3460Sstevel@tonic-gate 	umem_cpu_t *ucw_cpus;
3470Sstevel@tonic-gate 	uint32_t ucw_current;
3480Sstevel@tonic-gate 	uint32_t ucw_max;
3490Sstevel@tonic-gate } umem_cpu_walk_state_t;
3500Sstevel@tonic-gate 
3510Sstevel@tonic-gate int
umem_cpu_walk_init(mdb_walk_state_t * wsp)3520Sstevel@tonic-gate umem_cpu_walk_init(mdb_walk_state_t *wsp)
3530Sstevel@tonic-gate {
3540Sstevel@tonic-gate 	umem_cpu_t *umem_cpus;
3550Sstevel@tonic-gate 
3560Sstevel@tonic-gate 	umem_cpu_walk_state_t *ucw;
3570Sstevel@tonic-gate 
3580Sstevel@tonic-gate 	if (umem_readvar(&umem_cpus, "umem_cpus") == -1) {
3590Sstevel@tonic-gate 		mdb_warn("failed to read 'umem_cpus'");
3600Sstevel@tonic-gate 		return (WALK_ERR);
3610Sstevel@tonic-gate 	}
3620Sstevel@tonic-gate 
3630Sstevel@tonic-gate 	ucw = mdb_alloc(sizeof (*ucw), UM_SLEEP);
3640Sstevel@tonic-gate 
3650Sstevel@tonic-gate 	ucw->ucw_cpus = umem_cpus;
3660Sstevel@tonic-gate 	ucw->ucw_current = 0;
3670Sstevel@tonic-gate 	ucw->ucw_max = umem_max_ncpus;
3680Sstevel@tonic-gate 
3690Sstevel@tonic-gate 	wsp->walk_data = ucw;
3700Sstevel@tonic-gate 	return (WALK_NEXT);
3710Sstevel@tonic-gate }
3720Sstevel@tonic-gate 
3730Sstevel@tonic-gate int
umem_cpu_walk_step(mdb_walk_state_t * wsp)3740Sstevel@tonic-gate umem_cpu_walk_step(mdb_walk_state_t *wsp)
3750Sstevel@tonic-gate {
3760Sstevel@tonic-gate 	umem_cpu_t cpu;
3770Sstevel@tonic-gate 	umem_cpu_walk_state_t *ucw = wsp->walk_data;
3780Sstevel@tonic-gate 
3790Sstevel@tonic-gate 	uintptr_t caddr;
3800Sstevel@tonic-gate 
3810Sstevel@tonic-gate 	if (ucw->ucw_current >= ucw->ucw_max)
3820Sstevel@tonic-gate 		return (WALK_DONE);
3830Sstevel@tonic-gate 
3840Sstevel@tonic-gate 	caddr = (uintptr_t)&(ucw->ucw_cpus[ucw->ucw_current]);
3850Sstevel@tonic-gate 
3860Sstevel@tonic-gate 	if (mdb_vread(&cpu, sizeof (umem_cpu_t), caddr) == -1) {
3870Sstevel@tonic-gate 		mdb_warn("failed to read cpu %d", ucw->ucw_current);
3880Sstevel@tonic-gate 		return (WALK_ERR);
3890Sstevel@tonic-gate 	}
3900Sstevel@tonic-gate 
3910Sstevel@tonic-gate 	ucw->ucw_current++;
3920Sstevel@tonic-gate 
3930Sstevel@tonic-gate 	return (wsp->walk_callback(caddr, &cpu, wsp->walk_cbdata));
3940Sstevel@tonic-gate }
3950Sstevel@tonic-gate 
3960Sstevel@tonic-gate void
umem_cpu_walk_fini(mdb_walk_state_t * wsp)3970Sstevel@tonic-gate umem_cpu_walk_fini(mdb_walk_state_t *wsp)
3980Sstevel@tonic-gate {
3990Sstevel@tonic-gate 	umem_cpu_walk_state_t *ucw = wsp->walk_data;
4000Sstevel@tonic-gate 
4010Sstevel@tonic-gate 	mdb_free(ucw, sizeof (*ucw));
4020Sstevel@tonic-gate }
4030Sstevel@tonic-gate 
4040Sstevel@tonic-gate int
umem_cpu_cache_walk_init(mdb_walk_state_t * wsp)4050Sstevel@tonic-gate umem_cpu_cache_walk_init(mdb_walk_state_t *wsp)
4060Sstevel@tonic-gate {
4070Sstevel@tonic-gate 	if (wsp->walk_addr == NULL) {
4080Sstevel@tonic-gate 		mdb_warn("umem_cpu_cache doesn't support global walks");
4090Sstevel@tonic-gate 		return (WALK_ERR);
4100Sstevel@tonic-gate 	}
4110Sstevel@tonic-gate 
4120Sstevel@tonic-gate 	if (mdb_layered_walk("umem_cpu", wsp) == -1) {
4130Sstevel@tonic-gate 		mdb_warn("couldn't walk 'umem_cpu'");
4140Sstevel@tonic-gate 		return (WALK_ERR);
4150Sstevel@tonic-gate 	}
4160Sstevel@tonic-gate 
4170Sstevel@tonic-gate 	wsp->walk_data = (void *)wsp->walk_addr;
4180Sstevel@tonic-gate 
4190Sstevel@tonic-gate 	return (WALK_NEXT);
4200Sstevel@tonic-gate }
4210Sstevel@tonic-gate 
4220Sstevel@tonic-gate int
umem_cpu_cache_walk_step(mdb_walk_state_t * wsp)4230Sstevel@tonic-gate umem_cpu_cache_walk_step(mdb_walk_state_t *wsp)
4240Sstevel@tonic-gate {
4250Sstevel@tonic-gate 	uintptr_t caddr = (uintptr_t)wsp->walk_data;
4260Sstevel@tonic-gate 	const umem_cpu_t *cpu = wsp->walk_layer;
4270Sstevel@tonic-gate 	umem_cpu_cache_t cc;
4280Sstevel@tonic-gate 
4290Sstevel@tonic-gate 	caddr += cpu->cpu_cache_offset;
4300Sstevel@tonic-gate 
4310Sstevel@tonic-gate 	if (mdb_vread(&cc, sizeof (umem_cpu_cache_t), caddr) == -1) {
4320Sstevel@tonic-gate 		mdb_warn("couldn't read umem_cpu_cache at %p", caddr);
4330Sstevel@tonic-gate 		return (WALK_ERR);
4340Sstevel@tonic-gate 	}
4350Sstevel@tonic-gate 
4360Sstevel@tonic-gate 	return (wsp->walk_callback(caddr, &cc, wsp->walk_cbdata));
4370Sstevel@tonic-gate }
4380Sstevel@tonic-gate 
4390Sstevel@tonic-gate int
umem_slab_walk_init(mdb_walk_state_t * wsp)4400Sstevel@tonic-gate umem_slab_walk_init(mdb_walk_state_t *wsp)
4410Sstevel@tonic-gate {
4420Sstevel@tonic-gate 	uintptr_t caddr = wsp->walk_addr;
4430Sstevel@tonic-gate 	umem_cache_t c;
4440Sstevel@tonic-gate 
4450Sstevel@tonic-gate 	if (caddr == NULL) {
4460Sstevel@tonic-gate 		mdb_warn("umem_slab doesn't support global walks\n");
4470Sstevel@tonic-gate 		return (WALK_ERR);
4480Sstevel@tonic-gate 	}
4490Sstevel@tonic-gate 
4500Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (c), caddr) == -1) {
4510Sstevel@tonic-gate 		mdb_warn("couldn't read umem_cache at %p", caddr);
4520Sstevel@tonic-gate 		return (WALK_ERR);
4530Sstevel@tonic-gate 	}
4540Sstevel@tonic-gate 
4550Sstevel@tonic-gate 	wsp->walk_data =
4560Sstevel@tonic-gate 	    (void *)(caddr + offsetof(umem_cache_t, cache_nullslab));
4570Sstevel@tonic-gate 	wsp->walk_addr = (uintptr_t)c.cache_nullslab.slab_next;
4580Sstevel@tonic-gate 
4590Sstevel@tonic-gate 	return (WALK_NEXT);
4600Sstevel@tonic-gate }
4610Sstevel@tonic-gate 
4620Sstevel@tonic-gate int
umem_slab_walk_partial_init(mdb_walk_state_t * wsp)4630Sstevel@tonic-gate umem_slab_walk_partial_init(mdb_walk_state_t *wsp)
4640Sstevel@tonic-gate {
4650Sstevel@tonic-gate 	uintptr_t caddr = wsp->walk_addr;
4660Sstevel@tonic-gate 	umem_cache_t c;
4670Sstevel@tonic-gate 
4680Sstevel@tonic-gate 	if (caddr == NULL) {
4690Sstevel@tonic-gate 		mdb_warn("umem_slab_partial doesn't support global walks\n");
4700Sstevel@tonic-gate 		return (WALK_ERR);
4710Sstevel@tonic-gate 	}
4720Sstevel@tonic-gate 
4730Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (c), caddr) == -1) {
4740Sstevel@tonic-gate 		mdb_warn("couldn't read umem_cache at %p", caddr);
4750Sstevel@tonic-gate 		return (WALK_ERR);
4760Sstevel@tonic-gate 	}
4770Sstevel@tonic-gate 
4780Sstevel@tonic-gate 	wsp->walk_data =
4790Sstevel@tonic-gate 	    (void *)(caddr + offsetof(umem_cache_t, cache_nullslab));
4800Sstevel@tonic-gate 	wsp->walk_addr = (uintptr_t)c.cache_freelist;
4810Sstevel@tonic-gate 
4820Sstevel@tonic-gate 	/*
4830Sstevel@tonic-gate 	 * Some consumers (umem_walk_step(), in particular) require at
4840Sstevel@tonic-gate 	 * least one callback if there are any buffers in the cache.  So
4850Sstevel@tonic-gate 	 * if there are *no* partial slabs, report the last full slab, if
4860Sstevel@tonic-gate 	 * any.
4870Sstevel@tonic-gate 	 *
4880Sstevel@tonic-gate 	 * Yes, this is ugly, but it's cleaner than the other possibilities.
4890Sstevel@tonic-gate 	 */
4900Sstevel@tonic-gate 	if ((uintptr_t)wsp->walk_data == wsp->walk_addr)
4910Sstevel@tonic-gate 		wsp->walk_addr = (uintptr_t)c.cache_nullslab.slab_prev;
4920Sstevel@tonic-gate 
4930Sstevel@tonic-gate 	return (WALK_NEXT);
4940Sstevel@tonic-gate }
4950Sstevel@tonic-gate 
4960Sstevel@tonic-gate int
umem_slab_walk_step(mdb_walk_state_t * wsp)4970Sstevel@tonic-gate umem_slab_walk_step(mdb_walk_state_t *wsp)
4980Sstevel@tonic-gate {
4990Sstevel@tonic-gate 	umem_slab_t s;
5000Sstevel@tonic-gate 	uintptr_t addr = wsp->walk_addr;
5010Sstevel@tonic-gate 	uintptr_t saddr = (uintptr_t)wsp->walk_data;
5020Sstevel@tonic-gate 	uintptr_t caddr = saddr - offsetof(umem_cache_t, cache_nullslab);
5030Sstevel@tonic-gate 
5040Sstevel@tonic-gate 	if (addr == saddr)
5050Sstevel@tonic-gate 		return (WALK_DONE);
5060Sstevel@tonic-gate 
5070Sstevel@tonic-gate 	if (mdb_vread(&s, sizeof (s), addr) == -1) {
5080Sstevel@tonic-gate 		mdb_warn("failed to read slab at %p", wsp->walk_addr);
5090Sstevel@tonic-gate 		return (WALK_ERR);
5100Sstevel@tonic-gate 	}
5110Sstevel@tonic-gate 
5120Sstevel@tonic-gate 	if ((uintptr_t)s.slab_cache != caddr) {
5130Sstevel@tonic-gate 		mdb_warn("slab %p isn't in cache %p (in cache %p)\n",
5140Sstevel@tonic-gate 		    addr, caddr, s.slab_cache);
5150Sstevel@tonic-gate 		return (WALK_ERR);
5160Sstevel@tonic-gate 	}
5170Sstevel@tonic-gate 
5180Sstevel@tonic-gate 	wsp->walk_addr = (uintptr_t)s.slab_next;
5190Sstevel@tonic-gate 
5200Sstevel@tonic-gate 	return (wsp->walk_callback(addr, &s, wsp->walk_cbdata));
5210Sstevel@tonic-gate }
5220Sstevel@tonic-gate 
5230Sstevel@tonic-gate int
umem_cache(uintptr_t addr,uint_t flags,int ac,const mdb_arg_t * argv)5240Sstevel@tonic-gate umem_cache(uintptr_t addr, uint_t flags, int ac, const mdb_arg_t *argv)
5250Sstevel@tonic-gate {
5260Sstevel@tonic-gate 	umem_cache_t c;
5270Sstevel@tonic-gate 
5280Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC)) {
5290Sstevel@tonic-gate 		if (mdb_walk_dcmd("umem_cache", "umem_cache", ac, argv) == -1) {
5300Sstevel@tonic-gate 			mdb_warn("can't walk umem_cache");
5310Sstevel@tonic-gate 			return (DCMD_ERR);
5320Sstevel@tonic-gate 		}
5330Sstevel@tonic-gate 		return (DCMD_OK);
5340Sstevel@tonic-gate 	}
5350Sstevel@tonic-gate 
5360Sstevel@tonic-gate 	if (DCMD_HDRSPEC(flags))
5370Sstevel@tonic-gate 		mdb_printf("%-?s %-25s %4s %8s %8s %8s\n", "ADDR", "NAME",
5380Sstevel@tonic-gate 		    "FLAG", "CFLAG", "BUFSIZE", "BUFTOTL");
5390Sstevel@tonic-gate 
5400Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (c), addr) == -1) {
5410Sstevel@tonic-gate 		mdb_warn("couldn't read umem_cache at %p", addr);
5420Sstevel@tonic-gate 		return (DCMD_ERR);
5430Sstevel@tonic-gate 	}
5440Sstevel@tonic-gate 
5450Sstevel@tonic-gate 	mdb_printf("%0?p %-25s %04x %08x %8ld %8lld\n", addr, c.cache_name,
5460Sstevel@tonic-gate 	    c.cache_flags, c.cache_cflags, c.cache_bufsize, c.cache_buftotal);
5470Sstevel@tonic-gate 
5480Sstevel@tonic-gate 	return (DCMD_OK);
5490Sstevel@tonic-gate }
5500Sstevel@tonic-gate 
5510Sstevel@tonic-gate static int
addrcmp(const void * lhs,const void * rhs)5520Sstevel@tonic-gate addrcmp(const void *lhs, const void *rhs)
5530Sstevel@tonic-gate {
5540Sstevel@tonic-gate 	uintptr_t p1 = *((uintptr_t *)lhs);
5550Sstevel@tonic-gate 	uintptr_t p2 = *((uintptr_t *)rhs);
5560Sstevel@tonic-gate 
5570Sstevel@tonic-gate 	if (p1 < p2)
5580Sstevel@tonic-gate 		return (-1);
5590Sstevel@tonic-gate 	if (p1 > p2)
5600Sstevel@tonic-gate 		return (1);
5610Sstevel@tonic-gate 	return (0);
5620Sstevel@tonic-gate }
5630Sstevel@tonic-gate 
5640Sstevel@tonic-gate static int
bufctlcmp(const umem_bufctl_audit_t ** lhs,const umem_bufctl_audit_t ** rhs)5650Sstevel@tonic-gate bufctlcmp(const umem_bufctl_audit_t **lhs, const umem_bufctl_audit_t **rhs)
5660Sstevel@tonic-gate {
5670Sstevel@tonic-gate 	const umem_bufctl_audit_t *bcp1 = *lhs;
5680Sstevel@tonic-gate 	const umem_bufctl_audit_t *bcp2 = *rhs;
5690Sstevel@tonic-gate 
5700Sstevel@tonic-gate 	if (bcp1->bc_timestamp > bcp2->bc_timestamp)
5710Sstevel@tonic-gate 		return (-1);
5720Sstevel@tonic-gate 
5730Sstevel@tonic-gate 	if (bcp1->bc_timestamp < bcp2->bc_timestamp)
5740Sstevel@tonic-gate 		return (1);
5750Sstevel@tonic-gate 
5760Sstevel@tonic-gate 	return (0);
5770Sstevel@tonic-gate }
5780Sstevel@tonic-gate 
5790Sstevel@tonic-gate typedef struct umem_hash_walk {
5800Sstevel@tonic-gate 	uintptr_t *umhw_table;
5810Sstevel@tonic-gate 	size_t umhw_nelems;
5820Sstevel@tonic-gate 	size_t umhw_pos;
5830Sstevel@tonic-gate 	umem_bufctl_t umhw_cur;
5840Sstevel@tonic-gate } umem_hash_walk_t;
5850Sstevel@tonic-gate 
5860Sstevel@tonic-gate int
umem_hash_walk_init(mdb_walk_state_t * wsp)5870Sstevel@tonic-gate umem_hash_walk_init(mdb_walk_state_t *wsp)
5880Sstevel@tonic-gate {
5890Sstevel@tonic-gate 	umem_hash_walk_t *umhw;
5900Sstevel@tonic-gate 	uintptr_t *hash;
5910Sstevel@tonic-gate 	umem_cache_t c;
5920Sstevel@tonic-gate 	uintptr_t haddr, addr = wsp->walk_addr;
5930Sstevel@tonic-gate 	size_t nelems;
5940Sstevel@tonic-gate 	size_t hsize;
5950Sstevel@tonic-gate 
5960Sstevel@tonic-gate 	if (addr == NULL) {
5970Sstevel@tonic-gate 		mdb_warn("umem_hash doesn't support global walks\n");
5980Sstevel@tonic-gate 		return (WALK_ERR);
5990Sstevel@tonic-gate 	}
6000Sstevel@tonic-gate 
6010Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (c), addr) == -1) {
6020Sstevel@tonic-gate 		mdb_warn("couldn't read cache at addr %p", addr);
6030Sstevel@tonic-gate 		return (WALK_ERR);
6040Sstevel@tonic-gate 	}
6050Sstevel@tonic-gate 
6060Sstevel@tonic-gate 	if (!(c.cache_flags & UMF_HASH)) {
6070Sstevel@tonic-gate 		mdb_warn("cache %p doesn't have a hash table\n", addr);
6080Sstevel@tonic-gate 		return (WALK_DONE);		/* nothing to do */
6090Sstevel@tonic-gate 	}
6100Sstevel@tonic-gate 
6110Sstevel@tonic-gate 	umhw = mdb_zalloc(sizeof (umem_hash_walk_t), UM_SLEEP);
6120Sstevel@tonic-gate 	umhw->umhw_cur.bc_next = NULL;
6130Sstevel@tonic-gate 	umhw->umhw_pos = 0;
6140Sstevel@tonic-gate 
6150Sstevel@tonic-gate 	umhw->umhw_nelems = nelems = c.cache_hash_mask + 1;
6160Sstevel@tonic-gate 	hsize = nelems * sizeof (uintptr_t);
6170Sstevel@tonic-gate 	haddr = (uintptr_t)c.cache_hash_table;
6180Sstevel@tonic-gate 
6190Sstevel@tonic-gate 	umhw->umhw_table = hash = mdb_alloc(hsize, UM_SLEEP);
6200Sstevel@tonic-gate 	if (mdb_vread(hash, hsize, haddr) == -1) {
6210Sstevel@tonic-gate 		mdb_warn("failed to read hash table at %p", haddr);
6220Sstevel@tonic-gate 		mdb_free(hash, hsize);
6230Sstevel@tonic-gate 		mdb_free(umhw, sizeof (umem_hash_walk_t));
6240Sstevel@tonic-gate 		return (WALK_ERR);
6250Sstevel@tonic-gate 	}
6260Sstevel@tonic-gate 
6270Sstevel@tonic-gate 	wsp->walk_data = umhw;
6280Sstevel@tonic-gate 
6290Sstevel@tonic-gate 	return (WALK_NEXT);
6300Sstevel@tonic-gate }
6310Sstevel@tonic-gate 
6320Sstevel@tonic-gate int
umem_hash_walk_step(mdb_walk_state_t * wsp)6330Sstevel@tonic-gate umem_hash_walk_step(mdb_walk_state_t *wsp)
6340Sstevel@tonic-gate {
6350Sstevel@tonic-gate 	umem_hash_walk_t *umhw = wsp->walk_data;
6360Sstevel@tonic-gate 	uintptr_t addr = NULL;
6370Sstevel@tonic-gate 
6380Sstevel@tonic-gate 	if ((addr = (uintptr_t)umhw->umhw_cur.bc_next) == NULL) {
6390Sstevel@tonic-gate 		while (umhw->umhw_pos < umhw->umhw_nelems) {
6400Sstevel@tonic-gate 			if ((addr = umhw->umhw_table[umhw->umhw_pos++]) != NULL)
6410Sstevel@tonic-gate 				break;
6420Sstevel@tonic-gate 		}
6430Sstevel@tonic-gate 	}
6440Sstevel@tonic-gate 	if (addr == NULL)
6450Sstevel@tonic-gate 		return (WALK_DONE);
6460Sstevel@tonic-gate 
6470Sstevel@tonic-gate 	if (mdb_vread(&umhw->umhw_cur, sizeof (umem_bufctl_t), addr) == -1) {
6480Sstevel@tonic-gate 		mdb_warn("couldn't read umem_bufctl_t at addr %p", addr);
6490Sstevel@tonic-gate 		return (WALK_ERR);
6500Sstevel@tonic-gate 	}
6510Sstevel@tonic-gate 
6520Sstevel@tonic-gate 	return (wsp->walk_callback(addr, &umhw->umhw_cur, wsp->walk_cbdata));
6530Sstevel@tonic-gate }
6540Sstevel@tonic-gate 
6550Sstevel@tonic-gate void
umem_hash_walk_fini(mdb_walk_state_t * wsp)6560Sstevel@tonic-gate umem_hash_walk_fini(mdb_walk_state_t *wsp)
6570Sstevel@tonic-gate {
6580Sstevel@tonic-gate 	umem_hash_walk_t *umhw = wsp->walk_data;
6590Sstevel@tonic-gate 
6600Sstevel@tonic-gate 	if (umhw == NULL)
6610Sstevel@tonic-gate 		return;
6620Sstevel@tonic-gate 
6630Sstevel@tonic-gate 	mdb_free(umhw->umhw_table, umhw->umhw_nelems * sizeof (uintptr_t));
6640Sstevel@tonic-gate 	mdb_free(umhw, sizeof (umem_hash_walk_t));
6650Sstevel@tonic-gate }
6660Sstevel@tonic-gate 
6670Sstevel@tonic-gate /*
6680Sstevel@tonic-gate  * Find the address of the bufctl structure for the address 'buf' in cache
6690Sstevel@tonic-gate  * 'cp', which is at address caddr, and place it in *out.
6700Sstevel@tonic-gate  */
6710Sstevel@tonic-gate static int
umem_hash_lookup(umem_cache_t * cp,uintptr_t caddr,void * buf,uintptr_t * out)6720Sstevel@tonic-gate umem_hash_lookup(umem_cache_t *cp, uintptr_t caddr, void *buf, uintptr_t *out)
6730Sstevel@tonic-gate {
6740Sstevel@tonic-gate 	uintptr_t bucket = (uintptr_t)UMEM_HASH(cp, buf);
6750Sstevel@tonic-gate 	umem_bufctl_t *bcp;
6760Sstevel@tonic-gate 	umem_bufctl_t bc;
6770Sstevel@tonic-gate 
6780Sstevel@tonic-gate 	if (mdb_vread(&bcp, sizeof (umem_bufctl_t *), bucket) == -1) {
6790Sstevel@tonic-gate 		mdb_warn("unable to read hash bucket for %p in cache %p",
6800Sstevel@tonic-gate 		    buf, caddr);
6810Sstevel@tonic-gate 		return (-1);
6820Sstevel@tonic-gate 	}
6830Sstevel@tonic-gate 
6840Sstevel@tonic-gate 	while (bcp != NULL) {
6850Sstevel@tonic-gate 		if (mdb_vread(&bc, sizeof (umem_bufctl_t),
6860Sstevel@tonic-gate 		    (uintptr_t)bcp) == -1) {
6870Sstevel@tonic-gate 			mdb_warn("unable to read bufctl at %p", bcp);
6880Sstevel@tonic-gate 			return (-1);
6890Sstevel@tonic-gate 		}
6900Sstevel@tonic-gate 		if (bc.bc_addr == buf) {
6910Sstevel@tonic-gate 			*out = (uintptr_t)bcp;
6920Sstevel@tonic-gate 			return (0);
6930Sstevel@tonic-gate 		}
6940Sstevel@tonic-gate 		bcp = bc.bc_next;
6950Sstevel@tonic-gate 	}
6960Sstevel@tonic-gate 
6970Sstevel@tonic-gate 	mdb_warn("unable to find bufctl for %p in cache %p\n", buf, caddr);
6980Sstevel@tonic-gate 	return (-1);
6990Sstevel@tonic-gate }
7000Sstevel@tonic-gate 
7010Sstevel@tonic-gate int
umem_get_magsize(const umem_cache_t * cp)7020Sstevel@tonic-gate umem_get_magsize(const umem_cache_t *cp)
7030Sstevel@tonic-gate {
7040Sstevel@tonic-gate 	uintptr_t addr = (uintptr_t)cp->cache_magtype;
7050Sstevel@tonic-gate 	GElf_Sym mt_sym;
7060Sstevel@tonic-gate 	umem_magtype_t mt;
7070Sstevel@tonic-gate 	int res;
7080Sstevel@tonic-gate 
7090Sstevel@tonic-gate 	/*
7100Sstevel@tonic-gate 	 * if cpu 0 has a non-zero magsize, it must be correct.  caches
7110Sstevel@tonic-gate 	 * with UMF_NOMAGAZINE have disabled their magazine layers, so
7120Sstevel@tonic-gate 	 * it is okay to return 0 for them.
7130Sstevel@tonic-gate 	 */
7140Sstevel@tonic-gate 	if ((res = cp->cache_cpu[0].cc_magsize) != 0 ||
7150Sstevel@tonic-gate 	    (cp->cache_flags & UMF_NOMAGAZINE))
7160Sstevel@tonic-gate 		return (res);
7170Sstevel@tonic-gate 
7181528Sjwadams 	if (umem_lookup_by_name("umem_magtype", &mt_sym) == -1) {
7190Sstevel@tonic-gate 		mdb_warn("unable to read 'umem_magtype'");
7200Sstevel@tonic-gate 	} else if (addr < mt_sym.st_value ||
7210Sstevel@tonic-gate 	    addr + sizeof (mt) - 1 > mt_sym.st_value + mt_sym.st_size - 1 ||
7220Sstevel@tonic-gate 	    ((addr - mt_sym.st_value) % sizeof (mt)) != 0) {
7230Sstevel@tonic-gate 		mdb_warn("cache '%s' has invalid magtype pointer (%p)\n",
7240Sstevel@tonic-gate 		    cp->cache_name, addr);
7250Sstevel@tonic-gate 		return (0);
7260Sstevel@tonic-gate 	}
7270Sstevel@tonic-gate 	if (mdb_vread(&mt, sizeof (mt), addr) == -1) {
7280Sstevel@tonic-gate 		mdb_warn("unable to read magtype at %a", addr);
7290Sstevel@tonic-gate 		return (0);
7300Sstevel@tonic-gate 	}
7310Sstevel@tonic-gate 	return (mt.mt_magsize);
7320Sstevel@tonic-gate }
7330Sstevel@tonic-gate 
7340Sstevel@tonic-gate /*ARGSUSED*/
7350Sstevel@tonic-gate static int
umem_estimate_slab(uintptr_t addr,const umem_slab_t * sp,size_t * est)7360Sstevel@tonic-gate umem_estimate_slab(uintptr_t addr, const umem_slab_t *sp, size_t *est)
7370Sstevel@tonic-gate {
7380Sstevel@tonic-gate 	*est -= (sp->slab_chunks - sp->slab_refcnt);
7390Sstevel@tonic-gate 
7400Sstevel@tonic-gate 	return (WALK_NEXT);
7410Sstevel@tonic-gate }
7420Sstevel@tonic-gate 
7430Sstevel@tonic-gate /*
7440Sstevel@tonic-gate  * Returns an upper bound on the number of allocated buffers in a given
7450Sstevel@tonic-gate  * cache.
7460Sstevel@tonic-gate  */
7470Sstevel@tonic-gate size_t
umem_estimate_allocated(uintptr_t addr,const umem_cache_t * cp)7480Sstevel@tonic-gate umem_estimate_allocated(uintptr_t addr, const umem_cache_t *cp)
7490Sstevel@tonic-gate {
7500Sstevel@tonic-gate 	int magsize;
7510Sstevel@tonic-gate 	size_t cache_est;
7520Sstevel@tonic-gate 
7530Sstevel@tonic-gate 	cache_est = cp->cache_buftotal;
7540Sstevel@tonic-gate 
7550Sstevel@tonic-gate 	(void) mdb_pwalk("umem_slab_partial",
7560Sstevel@tonic-gate 	    (mdb_walk_cb_t)umem_estimate_slab, &cache_est, addr);
7570Sstevel@tonic-gate 
7580Sstevel@tonic-gate 	if ((magsize = umem_get_magsize(cp)) != 0) {
7590Sstevel@tonic-gate 		size_t mag_est = cp->cache_full.ml_total * magsize;
7600Sstevel@tonic-gate 
7610Sstevel@tonic-gate 		if (cache_est >= mag_est) {
7620Sstevel@tonic-gate 			cache_est -= mag_est;
7630Sstevel@tonic-gate 		} else {
7640Sstevel@tonic-gate 			mdb_warn("cache %p's magazine layer holds more buffers "
7650Sstevel@tonic-gate 			    "than the slab layer.\n", addr);
7660Sstevel@tonic-gate 		}
7670Sstevel@tonic-gate 	}
7680Sstevel@tonic-gate 	return (cache_est);
7690Sstevel@tonic-gate }
7700Sstevel@tonic-gate 
7710Sstevel@tonic-gate #define	READMAG_ROUNDS(rounds) { \
7720Sstevel@tonic-gate 	if (mdb_vread(mp, magbsize, (uintptr_t)ump) == -1) { \
7730Sstevel@tonic-gate 		mdb_warn("couldn't read magazine at %p", ump); \
7740Sstevel@tonic-gate 		goto fail; \
7750Sstevel@tonic-gate 	} \
7760Sstevel@tonic-gate 	for (i = 0; i < rounds; i++) { \
7770Sstevel@tonic-gate 		maglist[magcnt++] = mp->mag_round[i]; \
7780Sstevel@tonic-gate 		if (magcnt == magmax) { \
7790Sstevel@tonic-gate 			mdb_warn("%d magazines exceeds fudge factor\n", \
7800Sstevel@tonic-gate 			    magcnt); \
7810Sstevel@tonic-gate 			goto fail; \
7820Sstevel@tonic-gate 		} \
7830Sstevel@tonic-gate 	} \
7840Sstevel@tonic-gate }
7850Sstevel@tonic-gate 
7860Sstevel@tonic-gate int
umem_read_magazines(umem_cache_t * cp,uintptr_t addr,void *** maglistp,size_t * magcntp,size_t * magmaxp,int alloc_flags)7871528Sjwadams umem_read_magazines(umem_cache_t *cp, uintptr_t addr,
7880Sstevel@tonic-gate     void ***maglistp, size_t *magcntp, size_t *magmaxp, int alloc_flags)
7890Sstevel@tonic-gate {
7900Sstevel@tonic-gate 	umem_magazine_t *ump, *mp;
7910Sstevel@tonic-gate 	void **maglist = NULL;
7920Sstevel@tonic-gate 	int i, cpu;
7930Sstevel@tonic-gate 	size_t magsize, magmax, magbsize;
7940Sstevel@tonic-gate 	size_t magcnt = 0;
7950Sstevel@tonic-gate 
7960Sstevel@tonic-gate 	/*
7970Sstevel@tonic-gate 	 * Read the magtype out of the cache, after verifying the pointer's
7980Sstevel@tonic-gate 	 * correctness.
7990Sstevel@tonic-gate 	 */
8000Sstevel@tonic-gate 	magsize = umem_get_magsize(cp);
8011528Sjwadams 	if (magsize == 0) {
8021528Sjwadams 		*maglistp = NULL;
8031528Sjwadams 		*magcntp = 0;
8041528Sjwadams 		*magmaxp = 0;
8051528Sjwadams 		return (WALK_NEXT);
8061528Sjwadams 	}
8070Sstevel@tonic-gate 
8080Sstevel@tonic-gate 	/*
8090Sstevel@tonic-gate 	 * There are several places where we need to go buffer hunting:
8100Sstevel@tonic-gate 	 * the per-CPU loaded magazine, the per-CPU spare full magazine,
8110Sstevel@tonic-gate 	 * and the full magazine list in the depot.
8120Sstevel@tonic-gate 	 *
8130Sstevel@tonic-gate 	 * For an upper bound on the number of buffers in the magazine
8140Sstevel@tonic-gate 	 * layer, we have the number of magazines on the cache_full
8150Sstevel@tonic-gate 	 * list plus at most two magazines per CPU (the loaded and the
8160Sstevel@tonic-gate 	 * spare).  Toss in 100 magazines as a fudge factor in case this
8170Sstevel@tonic-gate 	 * is live (the number "100" comes from the same fudge factor in
8180Sstevel@tonic-gate 	 * crash(1M)).
8190Sstevel@tonic-gate 	 */
8201528Sjwadams 	magmax = (cp->cache_full.ml_total + 2 * umem_max_ncpus + 100) * magsize;
8210Sstevel@tonic-gate 	magbsize = offsetof(umem_magazine_t, mag_round[magsize]);
8220Sstevel@tonic-gate 
8230Sstevel@tonic-gate 	if (magbsize >= PAGESIZE / 2) {
8240Sstevel@tonic-gate 		mdb_warn("magazine size for cache %p unreasonable (%x)\n",
8250Sstevel@tonic-gate 		    addr, magbsize);
8261528Sjwadams 		return (WALK_ERR);
8270Sstevel@tonic-gate 	}
8280Sstevel@tonic-gate 
8290Sstevel@tonic-gate 	maglist = mdb_alloc(magmax * sizeof (void *), alloc_flags);
8300Sstevel@tonic-gate 	mp = mdb_alloc(magbsize, alloc_flags);
8310Sstevel@tonic-gate 	if (mp == NULL || maglist == NULL)
8320Sstevel@tonic-gate 		goto fail;
8330Sstevel@tonic-gate 
8340Sstevel@tonic-gate 	/*
8350Sstevel@tonic-gate 	 * First up: the magazines in the depot (i.e. on the cache_full list).
8360Sstevel@tonic-gate 	 */
8370Sstevel@tonic-gate 	for (ump = cp->cache_full.ml_list; ump != NULL; ) {
8380Sstevel@tonic-gate 		READMAG_ROUNDS(magsize);
8390Sstevel@tonic-gate 		ump = mp->mag_next;
8400Sstevel@tonic-gate 
8410Sstevel@tonic-gate 		if (ump == cp->cache_full.ml_list)
8420Sstevel@tonic-gate 			break; /* cache_full list loop detected */
8430Sstevel@tonic-gate 	}
8440Sstevel@tonic-gate 
8450Sstevel@tonic-gate 	dprintf(("cache_full list done\n"));
8460Sstevel@tonic-gate 
8470Sstevel@tonic-gate 	/*
8480Sstevel@tonic-gate 	 * Now whip through the CPUs, snagging the loaded magazines
8490Sstevel@tonic-gate 	 * and full spares.
8500Sstevel@tonic-gate 	 */
8511528Sjwadams 	for (cpu = 0; cpu < umem_max_ncpus; cpu++) {
8520Sstevel@tonic-gate 		umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu];
8530Sstevel@tonic-gate 
8540Sstevel@tonic-gate 		dprintf(("reading cpu cache %p\n",
8550Sstevel@tonic-gate 		    (uintptr_t)ccp - (uintptr_t)cp + addr));
8560Sstevel@tonic-gate 
8570Sstevel@tonic-gate 		if (ccp->cc_rounds > 0 &&
8580Sstevel@tonic-gate 		    (ump = ccp->cc_loaded) != NULL) {
8590Sstevel@tonic-gate 			dprintf(("reading %d loaded rounds\n", ccp->cc_rounds));
8600Sstevel@tonic-gate 			READMAG_ROUNDS(ccp->cc_rounds);
8610Sstevel@tonic-gate 		}
8620Sstevel@tonic-gate 
8630Sstevel@tonic-gate 		if (ccp->cc_prounds > 0 &&
8640Sstevel@tonic-gate 		    (ump = ccp->cc_ploaded) != NULL) {
8650Sstevel@tonic-gate 			dprintf(("reading %d previously loaded rounds\n",
8660Sstevel@tonic-gate 			    ccp->cc_prounds));
8670Sstevel@tonic-gate 			READMAG_ROUNDS(ccp->cc_prounds);
8680Sstevel@tonic-gate 		}
8690Sstevel@tonic-gate 	}
8700Sstevel@tonic-gate 
8710Sstevel@tonic-gate 	dprintf(("magazine layer: %d buffers\n", magcnt));
8720Sstevel@tonic-gate 
8730Sstevel@tonic-gate 	if (!(alloc_flags & UM_GC))
8740Sstevel@tonic-gate 		mdb_free(mp, magbsize);
8750Sstevel@tonic-gate 
8760Sstevel@tonic-gate 	*maglistp = maglist;
8770Sstevel@tonic-gate 	*magcntp = magcnt;
8780Sstevel@tonic-gate 	*magmaxp = magmax;
8790Sstevel@tonic-gate 
8800Sstevel@tonic-gate 	return (WALK_NEXT);
8810Sstevel@tonic-gate 
8820Sstevel@tonic-gate fail:
8830Sstevel@tonic-gate 	if (!(alloc_flags & UM_GC)) {
8840Sstevel@tonic-gate 		if (mp)
8850Sstevel@tonic-gate 			mdb_free(mp, magbsize);
8860Sstevel@tonic-gate 		if (maglist)
8870Sstevel@tonic-gate 			mdb_free(maglist, magmax * sizeof (void *));
8880Sstevel@tonic-gate 	}
8890Sstevel@tonic-gate 	return (WALK_ERR);
8900Sstevel@tonic-gate }
8910Sstevel@tonic-gate 
8920Sstevel@tonic-gate static int
umem_walk_callback(mdb_walk_state_t * wsp,uintptr_t buf)8930Sstevel@tonic-gate umem_walk_callback(mdb_walk_state_t *wsp, uintptr_t buf)
8940Sstevel@tonic-gate {
8950Sstevel@tonic-gate 	return (wsp->walk_callback(buf, NULL, wsp->walk_cbdata));
8960Sstevel@tonic-gate }
8970Sstevel@tonic-gate 
8980Sstevel@tonic-gate static int
bufctl_walk_callback(umem_cache_t * cp,mdb_walk_state_t * wsp,uintptr_t buf)8990Sstevel@tonic-gate bufctl_walk_callback(umem_cache_t *cp, mdb_walk_state_t *wsp, uintptr_t buf)
9000Sstevel@tonic-gate {
9010Sstevel@tonic-gate 	umem_bufctl_audit_t *b;
9020Sstevel@tonic-gate 	UMEM_LOCAL_BUFCTL_AUDIT(&b);
9030Sstevel@tonic-gate 
9040Sstevel@tonic-gate 	/*
9050Sstevel@tonic-gate 	 * if UMF_AUDIT is not set, we know that we're looking at a
9060Sstevel@tonic-gate 	 * umem_bufctl_t.
9070Sstevel@tonic-gate 	 */
9080Sstevel@tonic-gate 	if (!(cp->cache_flags & UMF_AUDIT) ||
9090Sstevel@tonic-gate 	    mdb_vread(b, UMEM_BUFCTL_AUDIT_SIZE, buf) == -1) {
9100Sstevel@tonic-gate 		(void) memset(b, 0, UMEM_BUFCTL_AUDIT_SIZE);
9110Sstevel@tonic-gate 		if (mdb_vread(b, sizeof (umem_bufctl_t), buf) == -1) {
9120Sstevel@tonic-gate 			mdb_warn("unable to read bufctl at %p", buf);
9130Sstevel@tonic-gate 			return (WALK_ERR);
9140Sstevel@tonic-gate 		}
9150Sstevel@tonic-gate 	}
9160Sstevel@tonic-gate 
9170Sstevel@tonic-gate 	return (wsp->walk_callback(buf, b, wsp->walk_cbdata));
9180Sstevel@tonic-gate }
9190Sstevel@tonic-gate 
9200Sstevel@tonic-gate typedef struct umem_walk {
9210Sstevel@tonic-gate 	int umw_type;
9220Sstevel@tonic-gate 
9230Sstevel@tonic-gate 	int umw_addr;			/* cache address */
9240Sstevel@tonic-gate 	umem_cache_t *umw_cp;
9250Sstevel@tonic-gate 	size_t umw_csize;
9260Sstevel@tonic-gate 
9270Sstevel@tonic-gate 	/*
9280Sstevel@tonic-gate 	 * magazine layer
9290Sstevel@tonic-gate 	 */
9300Sstevel@tonic-gate 	void **umw_maglist;
9310Sstevel@tonic-gate 	size_t umw_max;
9320Sstevel@tonic-gate 	size_t umw_count;
9330Sstevel@tonic-gate 	size_t umw_pos;
9340Sstevel@tonic-gate 
9350Sstevel@tonic-gate 	/*
9360Sstevel@tonic-gate 	 * slab layer
9370Sstevel@tonic-gate 	 */
9380Sstevel@tonic-gate 	char *umw_valid;	/* to keep track of freed buffers */
9390Sstevel@tonic-gate 	char *umw_ubase;	/* buffer for slab data */
9400Sstevel@tonic-gate } umem_walk_t;
9410Sstevel@tonic-gate 
9420Sstevel@tonic-gate static int
umem_walk_init_common(mdb_walk_state_t * wsp,int type)9430Sstevel@tonic-gate umem_walk_init_common(mdb_walk_state_t *wsp, int type)
9440Sstevel@tonic-gate {
9450Sstevel@tonic-gate 	umem_walk_t *umw;
9461528Sjwadams 	int csize;
9470Sstevel@tonic-gate 	umem_cache_t *cp;
9481528Sjwadams 	size_t vm_quantum;
9490Sstevel@tonic-gate 
9500Sstevel@tonic-gate 	size_t magmax, magcnt;
9510Sstevel@tonic-gate 	void **maglist = NULL;
9520Sstevel@tonic-gate 	uint_t chunksize, slabsize;
9530Sstevel@tonic-gate 	int status = WALK_ERR;
9540Sstevel@tonic-gate 	uintptr_t addr = wsp->walk_addr;
9550Sstevel@tonic-gate 	const char *layered;
9560Sstevel@tonic-gate 
9570Sstevel@tonic-gate 	type &= ~UM_HASH;
9580Sstevel@tonic-gate 
9590Sstevel@tonic-gate 	if (addr == NULL) {
9600Sstevel@tonic-gate 		mdb_warn("umem walk doesn't support global walks\n");
9610Sstevel@tonic-gate 		return (WALK_ERR);
9620Sstevel@tonic-gate 	}
9630Sstevel@tonic-gate 
9640Sstevel@tonic-gate 	dprintf(("walking %p\n", addr));
9650Sstevel@tonic-gate 
9660Sstevel@tonic-gate 	/*
9671528Sjwadams 	 * The number of "cpus" determines how large the cache is.
9680Sstevel@tonic-gate 	 */
9691528Sjwadams 	csize = UMEM_CACHE_SIZE(umem_max_ncpus);
9700Sstevel@tonic-gate 	cp = mdb_alloc(csize, UM_SLEEP);
9710Sstevel@tonic-gate 
9720Sstevel@tonic-gate 	if (mdb_vread(cp, csize, addr) == -1) {
9730Sstevel@tonic-gate 		mdb_warn("couldn't read cache at addr %p", addr);
9740Sstevel@tonic-gate 		goto out2;
9750Sstevel@tonic-gate 	}
9760Sstevel@tonic-gate 
9771528Sjwadams 	/*
9781528Sjwadams 	 * It's easy for someone to hand us an invalid cache address.
9791528Sjwadams 	 * Unfortunately, it is hard for this walker to survive an
9801528Sjwadams 	 * invalid cache cleanly.  So we make sure that:
9811528Sjwadams 	 *
9821528Sjwadams 	 *	1. the vmem arena for the cache is readable,
9831528Sjwadams 	 *	2. the vmem arena's quantum is a power of 2,
9841528Sjwadams 	 *	3. our slabsize is a multiple of the quantum, and
9851528Sjwadams 	 *	4. our chunksize is >0 and less than our slabsize.
9861528Sjwadams 	 */
9871528Sjwadams 	if (mdb_vread(&vm_quantum, sizeof (vm_quantum),
9881528Sjwadams 	    (uintptr_t)&cp->cache_arena->vm_quantum) == -1 ||
9891528Sjwadams 	    vm_quantum == 0 ||
9901528Sjwadams 	    (vm_quantum & (vm_quantum - 1)) != 0 ||
9911528Sjwadams 	    cp->cache_slabsize < vm_quantum ||
9921528Sjwadams 	    P2PHASE(cp->cache_slabsize, vm_quantum) != 0 ||
9931528Sjwadams 	    cp->cache_chunksize == 0 ||
9941528Sjwadams 	    cp->cache_chunksize > cp->cache_slabsize) {
9951528Sjwadams 		mdb_warn("%p is not a valid umem_cache_t\n", addr);
9961528Sjwadams 		goto out2;
9971528Sjwadams 	}
9981528Sjwadams 
9990Sstevel@tonic-gate 	dprintf(("buf total is %d\n", cp->cache_buftotal));
10000Sstevel@tonic-gate 
10010Sstevel@tonic-gate 	if (cp->cache_buftotal == 0) {
10020Sstevel@tonic-gate 		mdb_free(cp, csize);
10030Sstevel@tonic-gate 		return (WALK_DONE);
10040Sstevel@tonic-gate 	}
10050Sstevel@tonic-gate 
10060Sstevel@tonic-gate 	/*
10070Sstevel@tonic-gate 	 * If they ask for bufctls, but it's a small-slab cache,
10080Sstevel@tonic-gate 	 * there is nothing to report.
10090Sstevel@tonic-gate 	 */
10100Sstevel@tonic-gate 	if ((type & UM_BUFCTL) && !(cp->cache_flags & UMF_HASH)) {
10110Sstevel@tonic-gate 		dprintf(("bufctl requested, not UMF_HASH (flags: %p)\n",
10120Sstevel@tonic-gate 		    cp->cache_flags));
10130Sstevel@tonic-gate 		mdb_free(cp, csize);
10140Sstevel@tonic-gate 		return (WALK_DONE);
10150Sstevel@tonic-gate 	}
10160Sstevel@tonic-gate 
10170Sstevel@tonic-gate 	/*
10180Sstevel@tonic-gate 	 * Read in the contents of the magazine layer
10190Sstevel@tonic-gate 	 */
10201528Sjwadams 	if (umem_read_magazines(cp, addr, &maglist, &magcnt, &magmax,
10211528Sjwadams 	    UM_SLEEP) == WALK_ERR)
10220Sstevel@tonic-gate 		goto out2;
10230Sstevel@tonic-gate 
10240Sstevel@tonic-gate 	/*
10250Sstevel@tonic-gate 	 * We have all of the buffers from the magazines;  if we are walking
10260Sstevel@tonic-gate 	 * allocated buffers, sort them so we can bsearch them later.
10270Sstevel@tonic-gate 	 */
10280Sstevel@tonic-gate 	if (type & UM_ALLOCATED)
10290Sstevel@tonic-gate 		qsort(maglist, magcnt, sizeof (void *), addrcmp);
10300Sstevel@tonic-gate 
10310Sstevel@tonic-gate 	wsp->walk_data = umw = mdb_zalloc(sizeof (umem_walk_t), UM_SLEEP);
10320Sstevel@tonic-gate 
10330Sstevel@tonic-gate 	umw->umw_type = type;
10340Sstevel@tonic-gate 	umw->umw_addr = addr;
10350Sstevel@tonic-gate 	umw->umw_cp = cp;
10360Sstevel@tonic-gate 	umw->umw_csize = csize;
10370Sstevel@tonic-gate 	umw->umw_maglist = maglist;
10380Sstevel@tonic-gate 	umw->umw_max = magmax;
10390Sstevel@tonic-gate 	umw->umw_count = magcnt;
10400Sstevel@tonic-gate 	umw->umw_pos = 0;
10410Sstevel@tonic-gate 
10420Sstevel@tonic-gate 	/*
10430Sstevel@tonic-gate 	 * When walking allocated buffers in a UMF_HASH cache, we walk the
10440Sstevel@tonic-gate 	 * hash table instead of the slab layer.
10450Sstevel@tonic-gate 	 */
10460Sstevel@tonic-gate 	if ((cp->cache_flags & UMF_HASH) && (type & UM_ALLOCATED)) {
10470Sstevel@tonic-gate 		layered = "umem_hash";
10480Sstevel@tonic-gate 
10490Sstevel@tonic-gate 		umw->umw_type |= UM_HASH;
10500Sstevel@tonic-gate 	} else {
10510Sstevel@tonic-gate 		/*
10520Sstevel@tonic-gate 		 * If we are walking freed buffers, we only need the
10530Sstevel@tonic-gate 		 * magazine layer plus the partially allocated slabs.
10540Sstevel@tonic-gate 		 * To walk allocated buffers, we need all of the slabs.
10550Sstevel@tonic-gate 		 */
10560Sstevel@tonic-gate 		if (type & UM_ALLOCATED)
10570Sstevel@tonic-gate 			layered = "umem_slab";
10580Sstevel@tonic-gate 		else
10590Sstevel@tonic-gate 			layered = "umem_slab_partial";
10600Sstevel@tonic-gate 
10610Sstevel@tonic-gate 		/*
10620Sstevel@tonic-gate 		 * for small-slab caches, we read in the entire slab.  For
10630Sstevel@tonic-gate 		 * freed buffers, we can just walk the freelist.  For
10640Sstevel@tonic-gate 		 * allocated buffers, we use a 'valid' array to track
10650Sstevel@tonic-gate 		 * the freed buffers.
10660Sstevel@tonic-gate 		 */
10670Sstevel@tonic-gate 		if (!(cp->cache_flags & UMF_HASH)) {
10680Sstevel@tonic-gate 			chunksize = cp->cache_chunksize;
10690Sstevel@tonic-gate 			slabsize = cp->cache_slabsize;
10700Sstevel@tonic-gate 
10710Sstevel@tonic-gate 			umw->umw_ubase = mdb_alloc(slabsize +
10720Sstevel@tonic-gate 			    sizeof (umem_bufctl_t), UM_SLEEP);
10730Sstevel@tonic-gate 
10740Sstevel@tonic-gate 			if (type & UM_ALLOCATED)
10750Sstevel@tonic-gate 				umw->umw_valid =
10760Sstevel@tonic-gate 				    mdb_alloc(slabsize / chunksize, UM_SLEEP);
10770Sstevel@tonic-gate 		}
10780Sstevel@tonic-gate 	}
10790Sstevel@tonic-gate 
10800Sstevel@tonic-gate 	status = WALK_NEXT;
10810Sstevel@tonic-gate 
10820Sstevel@tonic-gate 	if (mdb_layered_walk(layered, wsp) == -1) {
10830Sstevel@tonic-gate 		mdb_warn("unable to start layered '%s' walk", layered);
10840Sstevel@tonic-gate 		status = WALK_ERR;
10850Sstevel@tonic-gate 	}
10860Sstevel@tonic-gate 
10870Sstevel@tonic-gate out1:
10880Sstevel@tonic-gate 	if (status == WALK_ERR) {
10890Sstevel@tonic-gate 		if (umw->umw_valid)
10900Sstevel@tonic-gate 			mdb_free(umw->umw_valid, slabsize / chunksize);
10910Sstevel@tonic-gate 
10920Sstevel@tonic-gate 		if (umw->umw_ubase)
10930Sstevel@tonic-gate 			mdb_free(umw->umw_ubase, slabsize +
10940Sstevel@tonic-gate 			    sizeof (umem_bufctl_t));
10950Sstevel@tonic-gate 
10961528Sjwadams 		if (umw->umw_maglist)
10971528Sjwadams 			mdb_free(umw->umw_maglist, umw->umw_max *
10981528Sjwadams 			    sizeof (uintptr_t));
10991528Sjwadams 
11000Sstevel@tonic-gate 		mdb_free(umw, sizeof (umem_walk_t));
11010Sstevel@tonic-gate 		wsp->walk_data = NULL;
11020Sstevel@tonic-gate 	}
11030Sstevel@tonic-gate 
11040Sstevel@tonic-gate out2:
11050Sstevel@tonic-gate 	if (status == WALK_ERR)
11060Sstevel@tonic-gate 		mdb_free(cp, csize);
11070Sstevel@tonic-gate 
11080Sstevel@tonic-gate 	return (status);
11090Sstevel@tonic-gate }
11100Sstevel@tonic-gate 
11110Sstevel@tonic-gate int
umem_walk_step(mdb_walk_state_t * wsp)11120Sstevel@tonic-gate umem_walk_step(mdb_walk_state_t *wsp)
11130Sstevel@tonic-gate {
11140Sstevel@tonic-gate 	umem_walk_t *umw = wsp->walk_data;
11150Sstevel@tonic-gate 	int type = umw->umw_type;
11160Sstevel@tonic-gate 	umem_cache_t *cp = umw->umw_cp;
11170Sstevel@tonic-gate 
11180Sstevel@tonic-gate 	void **maglist = umw->umw_maglist;
11190Sstevel@tonic-gate 	int magcnt = umw->umw_count;
11200Sstevel@tonic-gate 
11210Sstevel@tonic-gate 	uintptr_t chunksize, slabsize;
11220Sstevel@tonic-gate 	uintptr_t addr;
11230Sstevel@tonic-gate 	const umem_slab_t *sp;
11240Sstevel@tonic-gate 	const umem_bufctl_t *bcp;
11250Sstevel@tonic-gate 	umem_bufctl_t bc;
11260Sstevel@tonic-gate 
11270Sstevel@tonic-gate 	int chunks;
11280Sstevel@tonic-gate 	char *kbase;
11290Sstevel@tonic-gate 	void *buf;
11300Sstevel@tonic-gate 	int i, ret;
11310Sstevel@tonic-gate 
11320Sstevel@tonic-gate 	char *valid, *ubase;
11330Sstevel@tonic-gate 
11340Sstevel@tonic-gate 	/*
11350Sstevel@tonic-gate 	 * first, handle the 'umem_hash' layered walk case
11360Sstevel@tonic-gate 	 */
11370Sstevel@tonic-gate 	if (type & UM_HASH) {
11380Sstevel@tonic-gate 		/*
11390Sstevel@tonic-gate 		 * We have a buffer which has been allocated out of the
11400Sstevel@tonic-gate 		 * global layer. We need to make sure that it's not
11410Sstevel@tonic-gate 		 * actually sitting in a magazine before we report it as
11420Sstevel@tonic-gate 		 * an allocated buffer.
11430Sstevel@tonic-gate 		 */
11440Sstevel@tonic-gate 		buf = ((const umem_bufctl_t *)wsp->walk_layer)->bc_addr;
11450Sstevel@tonic-gate 
11460Sstevel@tonic-gate 		if (magcnt > 0 &&
11470Sstevel@tonic-gate 		    bsearch(&buf, maglist, magcnt, sizeof (void *),
11480Sstevel@tonic-gate 		    addrcmp) != NULL)
11490Sstevel@tonic-gate 			return (WALK_NEXT);
11500Sstevel@tonic-gate 
11510Sstevel@tonic-gate 		if (type & UM_BUFCTL)
11520Sstevel@tonic-gate 			return (bufctl_walk_callback(cp, wsp, wsp->walk_addr));
11530Sstevel@tonic-gate 
11540Sstevel@tonic-gate 		return (umem_walk_callback(wsp, (uintptr_t)buf));
11550Sstevel@tonic-gate 	}
11560Sstevel@tonic-gate 
11570Sstevel@tonic-gate 	ret = WALK_NEXT;
11580Sstevel@tonic-gate 
11590Sstevel@tonic-gate 	addr = umw->umw_addr;
11600Sstevel@tonic-gate 
11610Sstevel@tonic-gate 	/*
11620Sstevel@tonic-gate 	 * If we're walking freed buffers, report everything in the
11630Sstevel@tonic-gate 	 * magazine layer before processing the first slab.
11640Sstevel@tonic-gate 	 */
11650Sstevel@tonic-gate 	if ((type & UM_FREE) && magcnt != 0) {
11660Sstevel@tonic-gate 		umw->umw_count = 0;		/* only do this once */
11670Sstevel@tonic-gate 		for (i = 0; i < magcnt; i++) {
11680Sstevel@tonic-gate 			buf = maglist[i];
11690Sstevel@tonic-gate 
11700Sstevel@tonic-gate 			if (type & UM_BUFCTL) {
11710Sstevel@tonic-gate 				uintptr_t out;
11720Sstevel@tonic-gate 
11730Sstevel@tonic-gate 				if (cp->cache_flags & UMF_BUFTAG) {
11740Sstevel@tonic-gate 					umem_buftag_t *btp;
11750Sstevel@tonic-gate 					umem_buftag_t tag;
11760Sstevel@tonic-gate 
11770Sstevel@tonic-gate 					/* LINTED - alignment */
11780Sstevel@tonic-gate 					btp = UMEM_BUFTAG(cp, buf);
11790Sstevel@tonic-gate 					if (mdb_vread(&tag, sizeof (tag),
11800Sstevel@tonic-gate 					    (uintptr_t)btp) == -1) {
11810Sstevel@tonic-gate 						mdb_warn("reading buftag for "
11820Sstevel@tonic-gate 						    "%p at %p", buf, btp);
11830Sstevel@tonic-gate 						continue;
11840Sstevel@tonic-gate 					}
11850Sstevel@tonic-gate 					out = (uintptr_t)tag.bt_bufctl;
11860Sstevel@tonic-gate 				} else {
11870Sstevel@tonic-gate 					if (umem_hash_lookup(cp, addr, buf,
11880Sstevel@tonic-gate 					    &out) == -1)
11890Sstevel@tonic-gate 						continue;
11900Sstevel@tonic-gate 				}
11910Sstevel@tonic-gate 				ret = bufctl_walk_callback(cp, wsp, out);
11920Sstevel@tonic-gate 			} else {
11930Sstevel@tonic-gate 				ret = umem_walk_callback(wsp, (uintptr_t)buf);
11940Sstevel@tonic-gate 			}
11950Sstevel@tonic-gate 
11960Sstevel@tonic-gate 			if (ret != WALK_NEXT)
11970Sstevel@tonic-gate 				return (ret);
11980Sstevel@tonic-gate 		}
11990Sstevel@tonic-gate 	}
12000Sstevel@tonic-gate 
12010Sstevel@tonic-gate 	/*
12020Sstevel@tonic-gate 	 * Handle the buffers in the current slab
12030Sstevel@tonic-gate 	 */
12040Sstevel@tonic-gate 	chunksize = cp->cache_chunksize;
12050Sstevel@tonic-gate 	slabsize = cp->cache_slabsize;
12060Sstevel@tonic-gate 
12070Sstevel@tonic-gate 	sp = wsp->walk_layer;
12080Sstevel@tonic-gate 	chunks = sp->slab_chunks;
12090Sstevel@tonic-gate 	kbase = sp->slab_base;
12100Sstevel@tonic-gate 
12110Sstevel@tonic-gate 	dprintf(("kbase is %p\n", kbase));
12120Sstevel@tonic-gate 
12130Sstevel@tonic-gate 	if (!(cp->cache_flags & UMF_HASH)) {
12140Sstevel@tonic-gate 		valid = umw->umw_valid;
12150Sstevel@tonic-gate 		ubase = umw->umw_ubase;
12160Sstevel@tonic-gate 
12170Sstevel@tonic-gate 		if (mdb_vread(ubase, chunks * chunksize,
12180Sstevel@tonic-gate 		    (uintptr_t)kbase) == -1) {
12190Sstevel@tonic-gate 			mdb_warn("failed to read slab contents at %p", kbase);
12200Sstevel@tonic-gate 			return (WALK_ERR);
12210Sstevel@tonic-gate 		}
12220Sstevel@tonic-gate 
12230Sstevel@tonic-gate 		/*
12240Sstevel@tonic-gate 		 * Set up the valid map as fully allocated -- we'll punch
12250Sstevel@tonic-gate 		 * out the freelist.
12260Sstevel@tonic-gate 		 */
12270Sstevel@tonic-gate 		if (type & UM_ALLOCATED)
12280Sstevel@tonic-gate 			(void) memset(valid, 1, chunks);
12290Sstevel@tonic-gate 	} else {
12300Sstevel@tonic-gate 		valid = NULL;
12310Sstevel@tonic-gate 		ubase = NULL;
12320Sstevel@tonic-gate 	}
12330Sstevel@tonic-gate 
12340Sstevel@tonic-gate 	/*
12350Sstevel@tonic-gate 	 * walk the slab's freelist
12360Sstevel@tonic-gate 	 */
12370Sstevel@tonic-gate 	bcp = sp->slab_head;
12380Sstevel@tonic-gate 
12390Sstevel@tonic-gate 	dprintf(("refcnt is %d; chunks is %d\n", sp->slab_refcnt, chunks));
12400Sstevel@tonic-gate 
12410Sstevel@tonic-gate 	/*
12420Sstevel@tonic-gate 	 * since we could be in the middle of allocating a buffer,
12430Sstevel@tonic-gate 	 * our refcnt could be one higher than it aught.  So we
12440Sstevel@tonic-gate 	 * check one further on the freelist than the count allows.
12450Sstevel@tonic-gate 	 */
12460Sstevel@tonic-gate 	for (i = sp->slab_refcnt; i <= chunks; i++) {
12470Sstevel@tonic-gate 		uint_t ndx;
12480Sstevel@tonic-gate 
12490Sstevel@tonic-gate 		dprintf(("bcp is %p\n", bcp));
12500Sstevel@tonic-gate 
12510Sstevel@tonic-gate 		if (bcp == NULL) {
12520Sstevel@tonic-gate 			if (i == chunks)
12530Sstevel@tonic-gate 				break;
12540Sstevel@tonic-gate 			mdb_warn(
12550Sstevel@tonic-gate 			    "slab %p in cache %p freelist too short by %d\n",
12560Sstevel@tonic-gate 			    sp, addr, chunks - i);
12570Sstevel@tonic-gate 			break;
12580Sstevel@tonic-gate 		}
12590Sstevel@tonic-gate 
12600Sstevel@tonic-gate 		if (cp->cache_flags & UMF_HASH) {
12610Sstevel@tonic-gate 			if (mdb_vread(&bc, sizeof (bc), (uintptr_t)bcp) == -1) {
12620Sstevel@tonic-gate 				mdb_warn("failed to read bufctl ptr at %p",
12630Sstevel@tonic-gate 				    bcp);
12640Sstevel@tonic-gate 				break;
12650Sstevel@tonic-gate 			}
12660Sstevel@tonic-gate 			buf = bc.bc_addr;
12670Sstevel@tonic-gate 		} else {
12680Sstevel@tonic-gate 			/*
12690Sstevel@tonic-gate 			 * Otherwise the buffer is in the slab which
12700Sstevel@tonic-gate 			 * we've read in;  we just need to determine
12710Sstevel@tonic-gate 			 * its offset in the slab to find the
12720Sstevel@tonic-gate 			 * umem_bufctl_t.
12730Sstevel@tonic-gate 			 */
12740Sstevel@tonic-gate 			bc = *((umem_bufctl_t *)
12750Sstevel@tonic-gate 			    ((uintptr_t)bcp - (uintptr_t)kbase +
12760Sstevel@tonic-gate 			    (uintptr_t)ubase));
12770Sstevel@tonic-gate 
12780Sstevel@tonic-gate 			buf = UMEM_BUF(cp, bcp);
12790Sstevel@tonic-gate 		}
12800Sstevel@tonic-gate 
12810Sstevel@tonic-gate 		ndx = ((uintptr_t)buf - (uintptr_t)kbase) / chunksize;
12820Sstevel@tonic-gate 
12830Sstevel@tonic-gate 		if (ndx > slabsize / cp->cache_bufsize) {
12840Sstevel@tonic-gate 			/*
12850Sstevel@tonic-gate 			 * This is very wrong; we have managed to find
12860Sstevel@tonic-gate 			 * a buffer in the slab which shouldn't
12870Sstevel@tonic-gate 			 * actually be here.  Emit a warning, and
12880Sstevel@tonic-gate 			 * try to continue.
12890Sstevel@tonic-gate 			 */
12900Sstevel@tonic-gate 			mdb_warn("buf %p is out of range for "
12910Sstevel@tonic-gate 			    "slab %p, cache %p\n", buf, sp, addr);
12920Sstevel@tonic-gate 		} else if (type & UM_ALLOCATED) {
12930Sstevel@tonic-gate 			/*
12940Sstevel@tonic-gate 			 * we have found a buffer on the slab's freelist;
12950Sstevel@tonic-gate 			 * clear its entry
12960Sstevel@tonic-gate 			 */
12970Sstevel@tonic-gate 			valid[ndx] = 0;
12980Sstevel@tonic-gate 		} else {
12990Sstevel@tonic-gate 			/*
13000Sstevel@tonic-gate 			 * Report this freed buffer
13010Sstevel@tonic-gate 			 */
13020Sstevel@tonic-gate 			if (type & UM_BUFCTL) {
13030Sstevel@tonic-gate 				ret = bufctl_walk_callback(cp, wsp,
13040Sstevel@tonic-gate 				    (uintptr_t)bcp);
13050Sstevel@tonic-gate 			} else {
13060Sstevel@tonic-gate 				ret = umem_walk_callback(wsp, (uintptr_t)buf);
13070Sstevel@tonic-gate 			}
13080Sstevel@tonic-gate 			if (ret != WALK_NEXT)
13090Sstevel@tonic-gate 				return (ret);
13100Sstevel@tonic-gate 		}
13110Sstevel@tonic-gate 
13120Sstevel@tonic-gate 		bcp = bc.bc_next;
13130Sstevel@tonic-gate 	}
13140Sstevel@tonic-gate 
13150Sstevel@tonic-gate 	if (bcp != NULL) {
13160Sstevel@tonic-gate 		dprintf(("slab %p in cache %p freelist too long (%p)\n",
13170Sstevel@tonic-gate 		    sp, addr, bcp));
13180Sstevel@tonic-gate 	}
13190Sstevel@tonic-gate 
13200Sstevel@tonic-gate 	/*
13210Sstevel@tonic-gate 	 * If we are walking freed buffers, the loop above handled reporting
13220Sstevel@tonic-gate 	 * them.
13230Sstevel@tonic-gate 	 */
13240Sstevel@tonic-gate 	if (type & UM_FREE)
13250Sstevel@tonic-gate 		return (WALK_NEXT);
13260Sstevel@tonic-gate 
13270Sstevel@tonic-gate 	if (type & UM_BUFCTL) {
13280Sstevel@tonic-gate 		mdb_warn("impossible situation: small-slab UM_BUFCTL walk for "
13290Sstevel@tonic-gate 		    "cache %p\n", addr);
13300Sstevel@tonic-gate 		return (WALK_ERR);
13310Sstevel@tonic-gate 	}
13320Sstevel@tonic-gate 
13330Sstevel@tonic-gate 	/*
13340Sstevel@tonic-gate 	 * Report allocated buffers, skipping buffers in the magazine layer.
13350Sstevel@tonic-gate 	 * We only get this far for small-slab caches.
13360Sstevel@tonic-gate 	 */
13370Sstevel@tonic-gate 	for (i = 0; ret == WALK_NEXT && i < chunks; i++) {
13380Sstevel@tonic-gate 		buf = (char *)kbase + i * chunksize;
13390Sstevel@tonic-gate 
13400Sstevel@tonic-gate 		if (!valid[i])
13410Sstevel@tonic-gate 			continue;		/* on slab freelist */
13420Sstevel@tonic-gate 
13430Sstevel@tonic-gate 		if (magcnt > 0 &&
13440Sstevel@tonic-gate 		    bsearch(&buf, maglist, magcnt, sizeof (void *),
13450Sstevel@tonic-gate 		    addrcmp) != NULL)
13460Sstevel@tonic-gate 			continue;		/* in magazine layer */
13470Sstevel@tonic-gate 
13480Sstevel@tonic-gate 		ret = umem_walk_callback(wsp, (uintptr_t)buf);
13490Sstevel@tonic-gate 	}
13500Sstevel@tonic-gate 	return (ret);
13510Sstevel@tonic-gate }
13520Sstevel@tonic-gate 
13530Sstevel@tonic-gate void
umem_walk_fini(mdb_walk_state_t * wsp)13540Sstevel@tonic-gate umem_walk_fini(mdb_walk_state_t *wsp)
13550Sstevel@tonic-gate {
13560Sstevel@tonic-gate 	umem_walk_t *umw = wsp->walk_data;
13570Sstevel@tonic-gate 	uintptr_t chunksize;
13580Sstevel@tonic-gate 	uintptr_t slabsize;
13590Sstevel@tonic-gate 
13600Sstevel@tonic-gate 	if (umw == NULL)
13610Sstevel@tonic-gate 		return;
13620Sstevel@tonic-gate 
13630Sstevel@tonic-gate 	if (umw->umw_maglist != NULL)
13640Sstevel@tonic-gate 		mdb_free(umw->umw_maglist, umw->umw_max * sizeof (void *));
13650Sstevel@tonic-gate 
13660Sstevel@tonic-gate 	chunksize = umw->umw_cp->cache_chunksize;
13670Sstevel@tonic-gate 	slabsize = umw->umw_cp->cache_slabsize;
13680Sstevel@tonic-gate 
13690Sstevel@tonic-gate 	if (umw->umw_valid != NULL)
13700Sstevel@tonic-gate 		mdb_free(umw->umw_valid, slabsize / chunksize);
13710Sstevel@tonic-gate 	if (umw->umw_ubase != NULL)
13720Sstevel@tonic-gate 		mdb_free(umw->umw_ubase, slabsize + sizeof (umem_bufctl_t));
13730Sstevel@tonic-gate 
13740Sstevel@tonic-gate 	mdb_free(umw->umw_cp, umw->umw_csize);
13750Sstevel@tonic-gate 	mdb_free(umw, sizeof (umem_walk_t));
13760Sstevel@tonic-gate }
13770Sstevel@tonic-gate 
13780Sstevel@tonic-gate /*ARGSUSED*/
13790Sstevel@tonic-gate static int
umem_walk_all(uintptr_t addr,const umem_cache_t * c,mdb_walk_state_t * wsp)13800Sstevel@tonic-gate umem_walk_all(uintptr_t addr, const umem_cache_t *c, mdb_walk_state_t *wsp)
13810Sstevel@tonic-gate {
13820Sstevel@tonic-gate 	/*
13830Sstevel@tonic-gate 	 * Buffers allocated from NOTOUCH caches can also show up as freed
13840Sstevel@tonic-gate 	 * memory in other caches.  This can be a little confusing, so we
13850Sstevel@tonic-gate 	 * don't walk NOTOUCH caches when walking all caches (thereby assuring
13860Sstevel@tonic-gate 	 * that "::walk umem" and "::walk freemem" yield disjoint output).
13870Sstevel@tonic-gate 	 */
13880Sstevel@tonic-gate 	if (c->cache_cflags & UMC_NOTOUCH)
13890Sstevel@tonic-gate 		return (WALK_NEXT);
13900Sstevel@tonic-gate 
13910Sstevel@tonic-gate 	if (mdb_pwalk(wsp->walk_data, wsp->walk_callback,
13920Sstevel@tonic-gate 	    wsp->walk_cbdata, addr) == -1)
13930Sstevel@tonic-gate 		return (WALK_DONE);
13940Sstevel@tonic-gate 
13950Sstevel@tonic-gate 	return (WALK_NEXT);
13960Sstevel@tonic-gate }
13970Sstevel@tonic-gate 
13980Sstevel@tonic-gate #define	UMEM_WALK_ALL(name, wsp) { \
13990Sstevel@tonic-gate 	wsp->walk_data = (name); \
14000Sstevel@tonic-gate 	if (mdb_walk("umem_cache", (mdb_walk_cb_t)umem_walk_all, wsp) == -1) \
14010Sstevel@tonic-gate 		return (WALK_ERR); \
14020Sstevel@tonic-gate 	return (WALK_DONE); \
14030Sstevel@tonic-gate }
14040Sstevel@tonic-gate 
14050Sstevel@tonic-gate int
umem_walk_init(mdb_walk_state_t * wsp)14060Sstevel@tonic-gate umem_walk_init(mdb_walk_state_t *wsp)
14070Sstevel@tonic-gate {
14080Sstevel@tonic-gate 	if (wsp->walk_arg != NULL)
14090Sstevel@tonic-gate 		wsp->walk_addr = (uintptr_t)wsp->walk_arg;
14100Sstevel@tonic-gate 
14110Sstevel@tonic-gate 	if (wsp->walk_addr == NULL)
14120Sstevel@tonic-gate 		UMEM_WALK_ALL("umem", wsp);
14130Sstevel@tonic-gate 	return (umem_walk_init_common(wsp, UM_ALLOCATED));
14140Sstevel@tonic-gate }
14150Sstevel@tonic-gate 
14160Sstevel@tonic-gate int
bufctl_walk_init(mdb_walk_state_t * wsp)14170Sstevel@tonic-gate bufctl_walk_init(mdb_walk_state_t *wsp)
14180Sstevel@tonic-gate {
14190Sstevel@tonic-gate 	if (wsp->walk_addr == NULL)
14200Sstevel@tonic-gate 		UMEM_WALK_ALL("bufctl", wsp);
14210Sstevel@tonic-gate 	return (umem_walk_init_common(wsp, UM_ALLOCATED | UM_BUFCTL));
14220Sstevel@tonic-gate }
14230Sstevel@tonic-gate 
14240Sstevel@tonic-gate int
freemem_walk_init(mdb_walk_state_t * wsp)14250Sstevel@tonic-gate freemem_walk_init(mdb_walk_state_t *wsp)
14260Sstevel@tonic-gate {
14270Sstevel@tonic-gate 	if (wsp->walk_addr == NULL)
14280Sstevel@tonic-gate 		UMEM_WALK_ALL("freemem", wsp);
14290Sstevel@tonic-gate 	return (umem_walk_init_common(wsp, UM_FREE));
14300Sstevel@tonic-gate }
14310Sstevel@tonic-gate 
14320Sstevel@tonic-gate int
freectl_walk_init(mdb_walk_state_t * wsp)14330Sstevel@tonic-gate freectl_walk_init(mdb_walk_state_t *wsp)
14340Sstevel@tonic-gate {
14350Sstevel@tonic-gate 	if (wsp->walk_addr == NULL)
14360Sstevel@tonic-gate 		UMEM_WALK_ALL("freectl", wsp);
14370Sstevel@tonic-gate 	return (umem_walk_init_common(wsp, UM_FREE | UM_BUFCTL));
14380Sstevel@tonic-gate }
14390Sstevel@tonic-gate 
14400Sstevel@tonic-gate typedef struct bufctl_history_walk {
14410Sstevel@tonic-gate 	void		*bhw_next;
14420Sstevel@tonic-gate 	umem_cache_t	*bhw_cache;
14430Sstevel@tonic-gate 	umem_slab_t	*bhw_slab;
14440Sstevel@tonic-gate 	hrtime_t	bhw_timestamp;
14450Sstevel@tonic-gate } bufctl_history_walk_t;
14460Sstevel@tonic-gate 
14470Sstevel@tonic-gate int
bufctl_history_walk_init(mdb_walk_state_t * wsp)14480Sstevel@tonic-gate bufctl_history_walk_init(mdb_walk_state_t *wsp)
14490Sstevel@tonic-gate {
14500Sstevel@tonic-gate 	bufctl_history_walk_t *bhw;
14510Sstevel@tonic-gate 	umem_bufctl_audit_t bc;
14520Sstevel@tonic-gate 	umem_bufctl_audit_t bcn;
14530Sstevel@tonic-gate 
14540Sstevel@tonic-gate 	if (wsp->walk_addr == NULL) {
14550Sstevel@tonic-gate 		mdb_warn("bufctl_history walk doesn't support global walks\n");
14560Sstevel@tonic-gate 		return (WALK_ERR);
14570Sstevel@tonic-gate 	}
14580Sstevel@tonic-gate 
14590Sstevel@tonic-gate 	if (mdb_vread(&bc, sizeof (bc), wsp->walk_addr) == -1) {
14600Sstevel@tonic-gate 		mdb_warn("unable to read bufctl at %p", wsp->walk_addr);
14610Sstevel@tonic-gate 		return (WALK_ERR);
14620Sstevel@tonic-gate 	}
14630Sstevel@tonic-gate 
14640Sstevel@tonic-gate 	bhw = mdb_zalloc(sizeof (*bhw), UM_SLEEP);
14650Sstevel@tonic-gate 	bhw->bhw_timestamp = 0;
14660Sstevel@tonic-gate 	bhw->bhw_cache = bc.bc_cache;
14670Sstevel@tonic-gate 	bhw->bhw_slab = bc.bc_slab;
14680Sstevel@tonic-gate 
14690Sstevel@tonic-gate 	/*
14700Sstevel@tonic-gate 	 * sometimes the first log entry matches the base bufctl;  in that
14710Sstevel@tonic-gate 	 * case, skip the base bufctl.
14720Sstevel@tonic-gate 	 */
14730Sstevel@tonic-gate 	if (bc.bc_lastlog != NULL &&
14740Sstevel@tonic-gate 	    mdb_vread(&bcn, sizeof (bcn), (uintptr_t)bc.bc_lastlog) != -1 &&
14750Sstevel@tonic-gate 	    bc.bc_addr == bcn.bc_addr &&
14760Sstevel@tonic-gate 	    bc.bc_cache == bcn.bc_cache &&
14770Sstevel@tonic-gate 	    bc.bc_slab == bcn.bc_slab &&
14780Sstevel@tonic-gate 	    bc.bc_timestamp == bcn.bc_timestamp &&
14790Sstevel@tonic-gate 	    bc.bc_thread == bcn.bc_thread)
14800Sstevel@tonic-gate 		bhw->bhw_next = bc.bc_lastlog;
14810Sstevel@tonic-gate 	else
14820Sstevel@tonic-gate 		bhw->bhw_next = (void *)wsp->walk_addr;
14830Sstevel@tonic-gate 
14840Sstevel@tonic-gate 	wsp->walk_addr = (uintptr_t)bc.bc_addr;
14850Sstevel@tonic-gate 	wsp->walk_data = bhw;
14860Sstevel@tonic-gate 
14870Sstevel@tonic-gate 	return (WALK_NEXT);
14880Sstevel@tonic-gate }
14890Sstevel@tonic-gate 
14900Sstevel@tonic-gate int
bufctl_history_walk_step(mdb_walk_state_t * wsp)14910Sstevel@tonic-gate bufctl_history_walk_step(mdb_walk_state_t *wsp)
14920Sstevel@tonic-gate {
14930Sstevel@tonic-gate 	bufctl_history_walk_t *bhw = wsp->walk_data;
14940Sstevel@tonic-gate 	uintptr_t addr = (uintptr_t)bhw->bhw_next;
14950Sstevel@tonic-gate 	uintptr_t baseaddr = wsp->walk_addr;
14960Sstevel@tonic-gate 	umem_bufctl_audit_t *b;
14970Sstevel@tonic-gate 	UMEM_LOCAL_BUFCTL_AUDIT(&b);
14980Sstevel@tonic-gate 
14990Sstevel@tonic-gate 	if (addr == NULL)
15000Sstevel@tonic-gate 		return (WALK_DONE);
15010Sstevel@tonic-gate 
15020Sstevel@tonic-gate 	if (mdb_vread(b, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) {
15030Sstevel@tonic-gate 		mdb_warn("unable to read bufctl at %p", bhw->bhw_next);
15040Sstevel@tonic-gate 		return (WALK_ERR);
15050Sstevel@tonic-gate 	}
15060Sstevel@tonic-gate 
15070Sstevel@tonic-gate 	/*
15080Sstevel@tonic-gate 	 * The bufctl is only valid if the address, cache, and slab are
15090Sstevel@tonic-gate 	 * correct.  We also check that the timestamp is decreasing, to
15100Sstevel@tonic-gate 	 * prevent infinite loops.
15110Sstevel@tonic-gate 	 */
15120Sstevel@tonic-gate 	if ((uintptr_t)b->bc_addr != baseaddr ||
15130Sstevel@tonic-gate 	    b->bc_cache != bhw->bhw_cache ||
15140Sstevel@tonic-gate 	    b->bc_slab != bhw->bhw_slab ||
15150Sstevel@tonic-gate 	    (bhw->bhw_timestamp != 0 && b->bc_timestamp >= bhw->bhw_timestamp))
15160Sstevel@tonic-gate 		return (WALK_DONE);
15170Sstevel@tonic-gate 
15180Sstevel@tonic-gate 	bhw->bhw_next = b->bc_lastlog;
15190Sstevel@tonic-gate 	bhw->bhw_timestamp = b->bc_timestamp;
15200Sstevel@tonic-gate 
15210Sstevel@tonic-gate 	return (wsp->walk_callback(addr, b, wsp->walk_cbdata));
15220Sstevel@tonic-gate }
15230Sstevel@tonic-gate 
15240Sstevel@tonic-gate void
bufctl_history_walk_fini(mdb_walk_state_t * wsp)15250Sstevel@tonic-gate bufctl_history_walk_fini(mdb_walk_state_t *wsp)
15260Sstevel@tonic-gate {
15270Sstevel@tonic-gate 	bufctl_history_walk_t *bhw = wsp->walk_data;
15280Sstevel@tonic-gate 
15290Sstevel@tonic-gate 	mdb_free(bhw, sizeof (*bhw));
15300Sstevel@tonic-gate }
15310Sstevel@tonic-gate 
15320Sstevel@tonic-gate typedef struct umem_log_walk {
15330Sstevel@tonic-gate 	umem_bufctl_audit_t *ulw_base;
15340Sstevel@tonic-gate 	umem_bufctl_audit_t **ulw_sorted;
15350Sstevel@tonic-gate 	umem_log_header_t ulw_lh;
15360Sstevel@tonic-gate 	size_t ulw_size;
15370Sstevel@tonic-gate 	size_t ulw_maxndx;
15380Sstevel@tonic-gate 	size_t ulw_ndx;
15390Sstevel@tonic-gate } umem_log_walk_t;
15400Sstevel@tonic-gate 
15410Sstevel@tonic-gate int
umem_log_walk_init(mdb_walk_state_t * wsp)15420Sstevel@tonic-gate umem_log_walk_init(mdb_walk_state_t *wsp)
15430Sstevel@tonic-gate {
15440Sstevel@tonic-gate 	uintptr_t lp = wsp->walk_addr;
15450Sstevel@tonic-gate 	umem_log_walk_t *ulw;
15460Sstevel@tonic-gate 	umem_log_header_t *lhp;
15470Sstevel@tonic-gate 	int maxndx, i, j, k;
15480Sstevel@tonic-gate 
15490Sstevel@tonic-gate 	/*
15500Sstevel@tonic-gate 	 * By default (global walk), walk the umem_transaction_log.  Otherwise
15510Sstevel@tonic-gate 	 * read the log whose umem_log_header_t is stored at walk_addr.
15520Sstevel@tonic-gate 	 */
15530Sstevel@tonic-gate 	if (lp == NULL && umem_readvar(&lp, "umem_transaction_log") == -1) {
15540Sstevel@tonic-gate 		mdb_warn("failed to read 'umem_transaction_log'");
15550Sstevel@tonic-gate 		return (WALK_ERR);
15560Sstevel@tonic-gate 	}
15570Sstevel@tonic-gate 
15580Sstevel@tonic-gate 	if (lp == NULL) {
15590Sstevel@tonic-gate 		mdb_warn("log is disabled\n");
15600Sstevel@tonic-gate 		return (WALK_ERR);
15610Sstevel@tonic-gate 	}
15620Sstevel@tonic-gate 
15630Sstevel@tonic-gate 	ulw = mdb_zalloc(sizeof (umem_log_walk_t), UM_SLEEP);
15640Sstevel@tonic-gate 	lhp = &ulw->ulw_lh;
15650Sstevel@tonic-gate 
15660Sstevel@tonic-gate 	if (mdb_vread(lhp, sizeof (umem_log_header_t), lp) == -1) {
15670Sstevel@tonic-gate 		mdb_warn("failed to read log header at %p", lp);
15680Sstevel@tonic-gate 		mdb_free(ulw, sizeof (umem_log_walk_t));
15690Sstevel@tonic-gate 		return (WALK_ERR);
15700Sstevel@tonic-gate 	}
15710Sstevel@tonic-gate 
15720Sstevel@tonic-gate 	ulw->ulw_size = lhp->lh_chunksize * lhp->lh_nchunks;
15730Sstevel@tonic-gate 	ulw->ulw_base = mdb_alloc(ulw->ulw_size, UM_SLEEP);
15740Sstevel@tonic-gate 	maxndx = lhp->lh_chunksize / UMEM_BUFCTL_AUDIT_SIZE - 1;
15750Sstevel@tonic-gate 
15760Sstevel@tonic-gate 	if (mdb_vread(ulw->ulw_base, ulw->ulw_size,
15770Sstevel@tonic-gate 	    (uintptr_t)lhp->lh_base) == -1) {
15780Sstevel@tonic-gate 		mdb_warn("failed to read log at base %p", lhp->lh_base);
15790Sstevel@tonic-gate 		mdb_free(ulw->ulw_base, ulw->ulw_size);
15800Sstevel@tonic-gate 		mdb_free(ulw, sizeof (umem_log_walk_t));
15810Sstevel@tonic-gate 		return (WALK_ERR);
15820Sstevel@tonic-gate 	}
15830Sstevel@tonic-gate 
15840Sstevel@tonic-gate 	ulw->ulw_sorted = mdb_alloc(maxndx * lhp->lh_nchunks *
15850Sstevel@tonic-gate 	    sizeof (umem_bufctl_audit_t *), UM_SLEEP);
15860Sstevel@tonic-gate 
15870Sstevel@tonic-gate 	for (i = 0, k = 0; i < lhp->lh_nchunks; i++) {
15880Sstevel@tonic-gate 		caddr_t chunk = (caddr_t)
15890Sstevel@tonic-gate 		    ((uintptr_t)ulw->ulw_base + i * lhp->lh_chunksize);
15900Sstevel@tonic-gate 
15910Sstevel@tonic-gate 		for (j = 0; j < maxndx; j++) {
15920Sstevel@tonic-gate 			/* LINTED align */
15930Sstevel@tonic-gate 			ulw->ulw_sorted[k++] = (umem_bufctl_audit_t *)chunk;
15940Sstevel@tonic-gate 			chunk += UMEM_BUFCTL_AUDIT_SIZE;
15950Sstevel@tonic-gate 		}
15960Sstevel@tonic-gate 	}
15970Sstevel@tonic-gate 
15980Sstevel@tonic-gate 	qsort(ulw->ulw_sorted, k, sizeof (umem_bufctl_audit_t *),
15990Sstevel@tonic-gate 	    (int(*)(const void *, const void *))bufctlcmp);
16000Sstevel@tonic-gate 
16010Sstevel@tonic-gate 	ulw->ulw_maxndx = k;
16020Sstevel@tonic-gate 	wsp->walk_data = ulw;
16030Sstevel@tonic-gate 
16040Sstevel@tonic-gate 	return (WALK_NEXT);
16050Sstevel@tonic-gate }
16060Sstevel@tonic-gate 
16070Sstevel@tonic-gate int
umem_log_walk_step(mdb_walk_state_t * wsp)16080Sstevel@tonic-gate umem_log_walk_step(mdb_walk_state_t *wsp)
16090Sstevel@tonic-gate {
16100Sstevel@tonic-gate 	umem_log_walk_t *ulw = wsp->walk_data;
16110Sstevel@tonic-gate 	umem_bufctl_audit_t *bcp;
16120Sstevel@tonic-gate 
16130Sstevel@tonic-gate 	if (ulw->ulw_ndx == ulw->ulw_maxndx)
16140Sstevel@tonic-gate 		return (WALK_DONE);
16150Sstevel@tonic-gate 
16160Sstevel@tonic-gate 	bcp = ulw->ulw_sorted[ulw->ulw_ndx++];
16170Sstevel@tonic-gate 
16180Sstevel@tonic-gate 	return (wsp->walk_callback((uintptr_t)bcp - (uintptr_t)ulw->ulw_base +
16190Sstevel@tonic-gate 	    (uintptr_t)ulw->ulw_lh.lh_base, bcp, wsp->walk_cbdata));
16200Sstevel@tonic-gate }
16210Sstevel@tonic-gate 
16220Sstevel@tonic-gate void
umem_log_walk_fini(mdb_walk_state_t * wsp)16230Sstevel@tonic-gate umem_log_walk_fini(mdb_walk_state_t *wsp)
16240Sstevel@tonic-gate {
16250Sstevel@tonic-gate 	umem_log_walk_t *ulw = wsp->walk_data;
16260Sstevel@tonic-gate 
16270Sstevel@tonic-gate 	mdb_free(ulw->ulw_base, ulw->ulw_size);
16280Sstevel@tonic-gate 	mdb_free(ulw->ulw_sorted, ulw->ulw_maxndx *
16290Sstevel@tonic-gate 	    sizeof (umem_bufctl_audit_t *));
16300Sstevel@tonic-gate 	mdb_free(ulw, sizeof (umem_log_walk_t));
16310Sstevel@tonic-gate }
16320Sstevel@tonic-gate 
16330Sstevel@tonic-gate typedef struct allocdby_bufctl {
16340Sstevel@tonic-gate 	uintptr_t abb_addr;
16350Sstevel@tonic-gate 	hrtime_t abb_ts;
16360Sstevel@tonic-gate } allocdby_bufctl_t;
16370Sstevel@tonic-gate 
16380Sstevel@tonic-gate typedef struct allocdby_walk {
16390Sstevel@tonic-gate 	const char *abw_walk;
16400Sstevel@tonic-gate 	uintptr_t abw_thread;
16410Sstevel@tonic-gate 	size_t abw_nbufs;
16420Sstevel@tonic-gate 	size_t abw_size;
16430Sstevel@tonic-gate 	allocdby_bufctl_t *abw_buf;
16440Sstevel@tonic-gate 	size_t abw_ndx;
16450Sstevel@tonic-gate } allocdby_walk_t;
16460Sstevel@tonic-gate 
16470Sstevel@tonic-gate int
allocdby_walk_bufctl(uintptr_t addr,const umem_bufctl_audit_t * bcp,allocdby_walk_t * abw)16480Sstevel@tonic-gate allocdby_walk_bufctl(uintptr_t addr, const umem_bufctl_audit_t *bcp,
16490Sstevel@tonic-gate     allocdby_walk_t *abw)
16500Sstevel@tonic-gate {
16510Sstevel@tonic-gate 	if ((uintptr_t)bcp->bc_thread != abw->abw_thread)
16520Sstevel@tonic-gate 		return (WALK_NEXT);
16530Sstevel@tonic-gate 
16540Sstevel@tonic-gate 	if (abw->abw_nbufs == abw->abw_size) {
16550Sstevel@tonic-gate 		allocdby_bufctl_t *buf;
16560Sstevel@tonic-gate 		size_t oldsize = sizeof (allocdby_bufctl_t) * abw->abw_size;
16570Sstevel@tonic-gate 
16580Sstevel@tonic-gate 		buf = mdb_zalloc(oldsize << 1, UM_SLEEP);
16590Sstevel@tonic-gate 
16600Sstevel@tonic-gate 		bcopy(abw->abw_buf, buf, oldsize);
16610Sstevel@tonic-gate 		mdb_free(abw->abw_buf, oldsize);
16620Sstevel@tonic-gate 
16630Sstevel@tonic-gate 		abw->abw_size <<= 1;
16640Sstevel@tonic-gate 		abw->abw_buf = buf;
16650Sstevel@tonic-gate 	}
16660Sstevel@tonic-gate 
16670Sstevel@tonic-gate 	abw->abw_buf[abw->abw_nbufs].abb_addr = addr;
16680Sstevel@tonic-gate 	abw->abw_buf[abw->abw_nbufs].abb_ts = bcp->bc_timestamp;
16690Sstevel@tonic-gate 	abw->abw_nbufs++;
16700Sstevel@tonic-gate 
16710Sstevel@tonic-gate 	return (WALK_NEXT);
16720Sstevel@tonic-gate }
16730Sstevel@tonic-gate 
16740Sstevel@tonic-gate /*ARGSUSED*/
16750Sstevel@tonic-gate int
allocdby_walk_cache(uintptr_t addr,const umem_cache_t * c,allocdby_walk_t * abw)16760Sstevel@tonic-gate allocdby_walk_cache(uintptr_t addr, const umem_cache_t *c, allocdby_walk_t *abw)
16770Sstevel@tonic-gate {
16780Sstevel@tonic-gate 	if (mdb_pwalk(abw->abw_walk, (mdb_walk_cb_t)allocdby_walk_bufctl,
16790Sstevel@tonic-gate 	    abw, addr) == -1) {
16800Sstevel@tonic-gate 		mdb_warn("couldn't walk bufctl for cache %p", addr);
16810Sstevel@tonic-gate 		return (WALK_DONE);
16820Sstevel@tonic-gate 	}
16830Sstevel@tonic-gate 
16840Sstevel@tonic-gate 	return (WALK_NEXT);
16850Sstevel@tonic-gate }
16860Sstevel@tonic-gate 
16870Sstevel@tonic-gate static int
allocdby_cmp(const allocdby_bufctl_t * lhs,const allocdby_bufctl_t * rhs)16880Sstevel@tonic-gate allocdby_cmp(const allocdby_bufctl_t *lhs, const allocdby_bufctl_t *rhs)
16890Sstevel@tonic-gate {
16900Sstevel@tonic-gate 	if (lhs->abb_ts < rhs->abb_ts)
16910Sstevel@tonic-gate 		return (1);
16920Sstevel@tonic-gate 	if (lhs->abb_ts > rhs->abb_ts)
16930Sstevel@tonic-gate 		return (-1);
16940Sstevel@tonic-gate 	return (0);
16950Sstevel@tonic-gate }
16960Sstevel@tonic-gate 
16970Sstevel@tonic-gate static int
allocdby_walk_init_common(mdb_walk_state_t * wsp,const char * walk)16980Sstevel@tonic-gate allocdby_walk_init_common(mdb_walk_state_t *wsp, const char *walk)
16990Sstevel@tonic-gate {
17000Sstevel@tonic-gate 	allocdby_walk_t *abw;
17010Sstevel@tonic-gate 
17020Sstevel@tonic-gate 	if (wsp->walk_addr == NULL) {
17030Sstevel@tonic-gate 		mdb_warn("allocdby walk doesn't support global walks\n");
17040Sstevel@tonic-gate 		return (WALK_ERR);
17050Sstevel@tonic-gate 	}
17060Sstevel@tonic-gate 
17070Sstevel@tonic-gate 	abw = mdb_zalloc(sizeof (allocdby_walk_t), UM_SLEEP);
17080Sstevel@tonic-gate 
17090Sstevel@tonic-gate 	abw->abw_thread = wsp->walk_addr;
17100Sstevel@tonic-gate 	abw->abw_walk = walk;
17110Sstevel@tonic-gate 	abw->abw_size = 128;	/* something reasonable */
17120Sstevel@tonic-gate 	abw->abw_buf =
17130Sstevel@tonic-gate 	    mdb_zalloc(abw->abw_size * sizeof (allocdby_bufctl_t), UM_SLEEP);
17140Sstevel@tonic-gate 
17150Sstevel@tonic-gate 	wsp->walk_data = abw;
17160Sstevel@tonic-gate 
17170Sstevel@tonic-gate 	if (mdb_walk("umem_cache",
17180Sstevel@tonic-gate 	    (mdb_walk_cb_t)allocdby_walk_cache, abw) == -1) {
17190Sstevel@tonic-gate 		mdb_warn("couldn't walk umem_cache");
17200Sstevel@tonic-gate 		allocdby_walk_fini(wsp);
17210Sstevel@tonic-gate 		return (WALK_ERR);
17220Sstevel@tonic-gate 	}
17230Sstevel@tonic-gate 
17240Sstevel@tonic-gate 	qsort(abw->abw_buf, abw->abw_nbufs, sizeof (allocdby_bufctl_t),
17250Sstevel@tonic-gate 	    (int(*)(const void *, const void *))allocdby_cmp);
17260Sstevel@tonic-gate 
17270Sstevel@tonic-gate 	return (WALK_NEXT);
17280Sstevel@tonic-gate }
17290Sstevel@tonic-gate 
17300Sstevel@tonic-gate int
allocdby_walk_init(mdb_walk_state_t * wsp)17310Sstevel@tonic-gate allocdby_walk_init(mdb_walk_state_t *wsp)
17320Sstevel@tonic-gate {
17330Sstevel@tonic-gate 	return (allocdby_walk_init_common(wsp, "bufctl"));
17340Sstevel@tonic-gate }
17350Sstevel@tonic-gate 
17360Sstevel@tonic-gate int
freedby_walk_init(mdb_walk_state_t * wsp)17370Sstevel@tonic-gate freedby_walk_init(mdb_walk_state_t *wsp)
17380Sstevel@tonic-gate {
17390Sstevel@tonic-gate 	return (allocdby_walk_init_common(wsp, "freectl"));
17400Sstevel@tonic-gate }
17410Sstevel@tonic-gate 
17420Sstevel@tonic-gate int
allocdby_walk_step(mdb_walk_state_t * wsp)17430Sstevel@tonic-gate allocdby_walk_step(mdb_walk_state_t *wsp)
17440Sstevel@tonic-gate {
17450Sstevel@tonic-gate 	allocdby_walk_t *abw = wsp->walk_data;
17460Sstevel@tonic-gate 	uintptr_t addr;
17470Sstevel@tonic-gate 	umem_bufctl_audit_t *bcp;
17480Sstevel@tonic-gate 	UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
17490Sstevel@tonic-gate 
17500Sstevel@tonic-gate 	if (abw->abw_ndx == abw->abw_nbufs)
17510Sstevel@tonic-gate 		return (WALK_DONE);
17520Sstevel@tonic-gate 
17530Sstevel@tonic-gate 	addr = abw->abw_buf[abw->abw_ndx++].abb_addr;
17540Sstevel@tonic-gate 
17550Sstevel@tonic-gate 	if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) {
17560Sstevel@tonic-gate 		mdb_warn("couldn't read bufctl at %p", addr);
17570Sstevel@tonic-gate 		return (WALK_DONE);
17580Sstevel@tonic-gate 	}
17590Sstevel@tonic-gate 
17600Sstevel@tonic-gate 	return (wsp->walk_callback(addr, bcp, wsp->walk_cbdata));
17610Sstevel@tonic-gate }
17620Sstevel@tonic-gate 
17630Sstevel@tonic-gate void
allocdby_walk_fini(mdb_walk_state_t * wsp)17640Sstevel@tonic-gate allocdby_walk_fini(mdb_walk_state_t *wsp)
17650Sstevel@tonic-gate {
17660Sstevel@tonic-gate 	allocdby_walk_t *abw = wsp->walk_data;
17670Sstevel@tonic-gate 
17680Sstevel@tonic-gate 	mdb_free(abw->abw_buf, sizeof (allocdby_bufctl_t) * abw->abw_size);
17690Sstevel@tonic-gate 	mdb_free(abw, sizeof (allocdby_walk_t));
17700Sstevel@tonic-gate }
17710Sstevel@tonic-gate 
17720Sstevel@tonic-gate /*ARGSUSED*/
17730Sstevel@tonic-gate int
allocdby_walk(uintptr_t addr,const umem_bufctl_audit_t * bcp,void * ignored)17740Sstevel@tonic-gate allocdby_walk(uintptr_t addr, const umem_bufctl_audit_t *bcp, void *ignored)
17750Sstevel@tonic-gate {
17760Sstevel@tonic-gate 	char c[MDB_SYM_NAMLEN];
17770Sstevel@tonic-gate 	GElf_Sym sym;
17780Sstevel@tonic-gate 	int i;
17790Sstevel@tonic-gate 
17800Sstevel@tonic-gate 	mdb_printf("%0?p %12llx ", addr, bcp->bc_timestamp);
17810Sstevel@tonic-gate 	for (i = 0; i < bcp->bc_depth; i++) {
17820Sstevel@tonic-gate 		if (mdb_lookup_by_addr(bcp->bc_stack[i],
17830Sstevel@tonic-gate 		    MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1)
17840Sstevel@tonic-gate 			continue;
17850Sstevel@tonic-gate 		if (is_umem_sym(c, "umem_"))
17860Sstevel@tonic-gate 			continue;
17870Sstevel@tonic-gate 		mdb_printf("%s+0x%lx",
17880Sstevel@tonic-gate 		    c, bcp->bc_stack[i] - (uintptr_t)sym.st_value);
17890Sstevel@tonic-gate 		break;
17900Sstevel@tonic-gate 	}
17910Sstevel@tonic-gate 	mdb_printf("\n");
17920Sstevel@tonic-gate 
17930Sstevel@tonic-gate 	return (WALK_NEXT);
17940Sstevel@tonic-gate }
17950Sstevel@tonic-gate 
17960Sstevel@tonic-gate static int
allocdby_common(uintptr_t addr,uint_t flags,const char * w)17970Sstevel@tonic-gate allocdby_common(uintptr_t addr, uint_t flags, const char *w)
17980Sstevel@tonic-gate {
17990Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC))
18000Sstevel@tonic-gate 		return (DCMD_USAGE);
18010Sstevel@tonic-gate 
18020Sstevel@tonic-gate 	mdb_printf("%-?s %12s %s\n", "BUFCTL", "TIMESTAMP", "CALLER");
18030Sstevel@tonic-gate 
18040Sstevel@tonic-gate 	if (mdb_pwalk(w, (mdb_walk_cb_t)allocdby_walk, NULL, addr) == -1) {
18050Sstevel@tonic-gate 		mdb_warn("can't walk '%s' for %p", w, addr);
18060Sstevel@tonic-gate 		return (DCMD_ERR);
18070Sstevel@tonic-gate 	}
18080Sstevel@tonic-gate 
18090Sstevel@tonic-gate 	return (DCMD_OK);
18100Sstevel@tonic-gate }
18110Sstevel@tonic-gate 
18120Sstevel@tonic-gate /*ARGSUSED*/
18130Sstevel@tonic-gate int
allocdby(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)18140Sstevel@tonic-gate allocdby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
18150Sstevel@tonic-gate {
18160Sstevel@tonic-gate 	return (allocdby_common(addr, flags, "allocdby"));
18170Sstevel@tonic-gate }
18180Sstevel@tonic-gate 
18190Sstevel@tonic-gate /*ARGSUSED*/
18200Sstevel@tonic-gate int
freedby(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)18210Sstevel@tonic-gate freedby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
18220Sstevel@tonic-gate {
18230Sstevel@tonic-gate 	return (allocdby_common(addr, flags, "freedby"));
18240Sstevel@tonic-gate }
18250Sstevel@tonic-gate 
1826*10610SJonathan.Adams@Sun.COM typedef struct whatis_info {
1827*10610SJonathan.Adams@Sun.COM 	mdb_whatis_t *wi_w;
1828*10610SJonathan.Adams@Sun.COM 	const umem_cache_t *wi_cache;
1829*10610SJonathan.Adams@Sun.COM 	const vmem_t *wi_vmem;
1830*10610SJonathan.Adams@Sun.COM 	vmem_t *wi_msb_arena;
1831*10610SJonathan.Adams@Sun.COM 	size_t wi_slab_size;
1832*10610SJonathan.Adams@Sun.COM 	int wi_slab_found;
1833*10610SJonathan.Adams@Sun.COM 	uint_t wi_freemem;
1834*10610SJonathan.Adams@Sun.COM } whatis_info_t;
183510388SJonathan.Adams@Sun.COM 
183610388SJonathan.Adams@Sun.COM /* call one of our dcmd functions with "-v" and the provided address */
183710388SJonathan.Adams@Sun.COM static void
whatis_call_printer(mdb_dcmd_f * dcmd,uintptr_t addr)183810388SJonathan.Adams@Sun.COM whatis_call_printer(mdb_dcmd_f *dcmd, uintptr_t addr)
183910388SJonathan.Adams@Sun.COM {
184010388SJonathan.Adams@Sun.COM 	mdb_arg_t a;
184110388SJonathan.Adams@Sun.COM 	a.a_type = MDB_TYPE_STRING;
184210388SJonathan.Adams@Sun.COM 	a.a_un.a_str = "-v";
184310388SJonathan.Adams@Sun.COM 
1844*10610SJonathan.Adams@Sun.COM 	mdb_printf(":\n");
184510388SJonathan.Adams@Sun.COM 	(void) (*dcmd)(addr, DCMD_ADDRSPEC, 1, &a);
184610388SJonathan.Adams@Sun.COM }
184710388SJonathan.Adams@Sun.COM 
18480Sstevel@tonic-gate static void
whatis_print_umem(whatis_info_t * wi,uintptr_t maddr,uintptr_t addr,uintptr_t baddr)1849*10610SJonathan.Adams@Sun.COM whatis_print_umem(whatis_info_t *wi, uintptr_t maddr, uintptr_t addr,
1850*10610SJonathan.Adams@Sun.COM     uintptr_t baddr)
18510Sstevel@tonic-gate {
1852*10610SJonathan.Adams@Sun.COM 	mdb_whatis_t *w = wi->wi_w;
1853*10610SJonathan.Adams@Sun.COM 	const umem_cache_t *cp = wi->wi_cache;
1854*10610SJonathan.Adams@Sun.COM 	int quiet = (mdb_whatis_flags(w) & WHATIS_QUIET);
1855*10610SJonathan.Adams@Sun.COM 
1856*10610SJonathan.Adams@Sun.COM 	int call_printer = (!quiet && (cp->cache_flags & UMF_AUDIT));
1857*10610SJonathan.Adams@Sun.COM 
1858*10610SJonathan.Adams@Sun.COM 	mdb_whatis_report_object(w, maddr, addr, "");
185910388SJonathan.Adams@Sun.COM 
186010388SJonathan.Adams@Sun.COM 	if (baddr != 0 && !call_printer)
186110388SJonathan.Adams@Sun.COM 		mdb_printf("bufctl %p ", baddr);
186210388SJonathan.Adams@Sun.COM 
1863*10610SJonathan.Adams@Sun.COM 	mdb_printf("%s from %s",
1864*10610SJonathan.Adams@Sun.COM 	    (wi->wi_freemem == FALSE) ? "allocated" : "freed", cp->cache_name);
1865*10610SJonathan.Adams@Sun.COM 
1866*10610SJonathan.Adams@Sun.COM 	if (call_printer && baddr != 0) {
186710388SJonathan.Adams@Sun.COM 		whatis_call_printer(bufctl, baddr);
1868*10610SJonathan.Adams@Sun.COM 		return;
1869*10610SJonathan.Adams@Sun.COM 	}
1870*10610SJonathan.Adams@Sun.COM 	mdb_printf("\n");
1871*10610SJonathan.Adams@Sun.COM }
1872*10610SJonathan.Adams@Sun.COM 
1873*10610SJonathan.Adams@Sun.COM /*ARGSUSED*/
1874*10610SJonathan.Adams@Sun.COM static int
whatis_walk_umem(uintptr_t addr,void * ignored,whatis_info_t * wi)1875*10610SJonathan.Adams@Sun.COM whatis_walk_umem(uintptr_t addr, void *ignored, whatis_info_t *wi)
1876*10610SJonathan.Adams@Sun.COM {
1877*10610SJonathan.Adams@Sun.COM 	mdb_whatis_t *w = wi->wi_w;
1878*10610SJonathan.Adams@Sun.COM 
1879*10610SJonathan.Adams@Sun.COM 	uintptr_t cur;
1880*10610SJonathan.Adams@Sun.COM 	size_t size = wi->wi_cache->cache_bufsize;
1881*10610SJonathan.Adams@Sun.COM 
1882*10610SJonathan.Adams@Sun.COM 	while (mdb_whatis_match(w, addr, size, &cur))
1883*10610SJonathan.Adams@Sun.COM 		whatis_print_umem(wi, cur, addr, NULL);
1884*10610SJonathan.Adams@Sun.COM 
1885*10610SJonathan.Adams@Sun.COM 	return (WHATIS_WALKRET(w));
18860Sstevel@tonic-gate }
18870Sstevel@tonic-gate 
18880Sstevel@tonic-gate /*ARGSUSED*/
18890Sstevel@tonic-gate static int
whatis_walk_bufctl(uintptr_t baddr,const umem_bufctl_t * bcp,whatis_info_t * wi)1890*10610SJonathan.Adams@Sun.COM whatis_walk_bufctl(uintptr_t baddr, const umem_bufctl_t *bcp, whatis_info_t *wi)
18910Sstevel@tonic-gate {
1892*10610SJonathan.Adams@Sun.COM 	mdb_whatis_t *w = wi->wi_w;
1893*10610SJonathan.Adams@Sun.COM 
1894*10610SJonathan.Adams@Sun.COM 	uintptr_t cur;
1895*10610SJonathan.Adams@Sun.COM 	uintptr_t addr = (uintptr_t)bcp->bc_addr;
1896*10610SJonathan.Adams@Sun.COM 	size_t size = wi->wi_cache->cache_bufsize;
1897*10610SJonathan.Adams@Sun.COM 
1898*10610SJonathan.Adams@Sun.COM 	while (mdb_whatis_match(w, addr, size, &cur))
1899*10610SJonathan.Adams@Sun.COM 		whatis_print_umem(wi, cur, addr, baddr);
1900*10610SJonathan.Adams@Sun.COM 
1901*10610SJonathan.Adams@Sun.COM 	return (WHATIS_WALKRET(w));
1902*10610SJonathan.Adams@Sun.COM }
1903*10610SJonathan.Adams@Sun.COM 
1904*10610SJonathan.Adams@Sun.COM 
1905*10610SJonathan.Adams@Sun.COM static int
whatis_walk_seg(uintptr_t addr,const vmem_seg_t * vs,whatis_info_t * wi)1906*10610SJonathan.Adams@Sun.COM whatis_walk_seg(uintptr_t addr, const vmem_seg_t *vs, whatis_info_t *wi)
1907*10610SJonathan.Adams@Sun.COM {
1908*10610SJonathan.Adams@Sun.COM 	mdb_whatis_t *w = wi->wi_w;
1909*10610SJonathan.Adams@Sun.COM 
1910*10610SJonathan.Adams@Sun.COM 	size_t size = vs->vs_end - vs->vs_start;
1911*10610SJonathan.Adams@Sun.COM 	uintptr_t cur;
1912*10610SJonathan.Adams@Sun.COM 
1913*10610SJonathan.Adams@Sun.COM 	/* We're not interested in anything but alloc and free segments */
1914*10610SJonathan.Adams@Sun.COM 	if (vs->vs_type != VMEM_ALLOC && vs->vs_type != VMEM_FREE)
19150Sstevel@tonic-gate 		return (WALK_NEXT);
19160Sstevel@tonic-gate 
1917*10610SJonathan.Adams@Sun.COM 	while (mdb_whatis_match(w, vs->vs_start, size, &cur)) {
1918*10610SJonathan.Adams@Sun.COM 		mdb_whatis_report_object(w, cur, vs->vs_start, "");
1919*10610SJonathan.Adams@Sun.COM 
1920*10610SJonathan.Adams@Sun.COM 		/*
1921*10610SJonathan.Adams@Sun.COM 		 * If we're not printing it seperately, provide the vmem_seg
1922*10610SJonathan.Adams@Sun.COM 		 * pointer if it has a stack trace.
1923*10610SJonathan.Adams@Sun.COM 		 */
1924*10610SJonathan.Adams@Sun.COM 		if ((mdb_whatis_flags(w) & WHATIS_QUIET) &&
1925*10610SJonathan.Adams@Sun.COM 		    ((mdb_whatis_flags(w) & WHATIS_BUFCTL) != 0 ||
1926*10610SJonathan.Adams@Sun.COM 		    (vs->vs_type == VMEM_ALLOC && vs->vs_depth != 0))) {
1927*10610SJonathan.Adams@Sun.COM 			mdb_printf("vmem_seg %p ", addr);
1928*10610SJonathan.Adams@Sun.COM 		}
1929*10610SJonathan.Adams@Sun.COM 
1930*10610SJonathan.Adams@Sun.COM 		mdb_printf("%s from %s vmem arena",
1931*10610SJonathan.Adams@Sun.COM 		    (vs->vs_type == VMEM_ALLOC) ? "allocated" : "freed",
1932*10610SJonathan.Adams@Sun.COM 		    wi->wi_vmem->vm_name);
1933*10610SJonathan.Adams@Sun.COM 
1934*10610SJonathan.Adams@Sun.COM 		if (!mdb_whatis_flags(w) & WHATIS_QUIET)
1935*10610SJonathan.Adams@Sun.COM 			whatis_call_printer(vmem_seg, addr);
1936*10610SJonathan.Adams@Sun.COM 		else
1937*10610SJonathan.Adams@Sun.COM 			mdb_printf("\n");
1938*10610SJonathan.Adams@Sun.COM 	}
1939*10610SJonathan.Adams@Sun.COM 
1940*10610SJonathan.Adams@Sun.COM 	return (WHATIS_WALKRET(w));
19410Sstevel@tonic-gate }
19420Sstevel@tonic-gate 
19430Sstevel@tonic-gate static int
whatis_walk_vmem(uintptr_t addr,const vmem_t * vmem,whatis_info_t * wi)1944*10610SJonathan.Adams@Sun.COM whatis_walk_vmem(uintptr_t addr, const vmem_t *vmem, whatis_info_t *wi)
19450Sstevel@tonic-gate {
1946*10610SJonathan.Adams@Sun.COM 	mdb_whatis_t *w = wi->wi_w;
19470Sstevel@tonic-gate 	const char *nm = vmem->vm_name;
1948*10610SJonathan.Adams@Sun.COM 	wi->wi_vmem = vmem;
1949*10610SJonathan.Adams@Sun.COM 
1950*10610SJonathan.Adams@Sun.COM 	if (mdb_whatis_flags(w) & WHATIS_VERBOSE)
19510Sstevel@tonic-gate 		mdb_printf("Searching vmem arena %s...\n", nm);
19520Sstevel@tonic-gate 
1953*10610SJonathan.Adams@Sun.COM 	if (mdb_pwalk("vmem_seg",
1954*10610SJonathan.Adams@Sun.COM 	    (mdb_walk_cb_t)whatis_walk_seg, wi, addr) == -1) {
19550Sstevel@tonic-gate 		mdb_warn("can't walk vmem seg for %p", addr);
19560Sstevel@tonic-gate 		return (WALK_NEXT);
19570Sstevel@tonic-gate 	}
19580Sstevel@tonic-gate 
1959*10610SJonathan.Adams@Sun.COM 	return (WHATIS_WALKRET(w));
19600Sstevel@tonic-gate }
19610Sstevel@tonic-gate 
19620Sstevel@tonic-gate /*ARGSUSED*/
19630Sstevel@tonic-gate static int
whatis_walk_slab(uintptr_t saddr,const umem_slab_t * sp,whatis_info_t * wi)1964*10610SJonathan.Adams@Sun.COM whatis_walk_slab(uintptr_t saddr, const umem_slab_t *sp, whatis_info_t *wi)
19650Sstevel@tonic-gate {
1966*10610SJonathan.Adams@Sun.COM 	mdb_whatis_t *w = wi->wi_w;
1967*10610SJonathan.Adams@Sun.COM 
1968*10610SJonathan.Adams@Sun.COM 	/* It must overlap with the slab data, or it's not interesting */
1969*10610SJonathan.Adams@Sun.COM 	if (mdb_whatis_overlaps(w,
1970*10610SJonathan.Adams@Sun.COM 	    (uintptr_t)sp->slab_base, wi->wi_slab_size)) {
1971*10610SJonathan.Adams@Sun.COM 		wi->wi_slab_found++;
1972*10610SJonathan.Adams@Sun.COM 		return (WALK_DONE);
1973*10610SJonathan.Adams@Sun.COM 	}
1974*10610SJonathan.Adams@Sun.COM 	return (WALK_NEXT);
19750Sstevel@tonic-gate }
19760Sstevel@tonic-gate 
19770Sstevel@tonic-gate static int
whatis_walk_cache(uintptr_t addr,const umem_cache_t * c,whatis_info_t * wi)1978*10610SJonathan.Adams@Sun.COM whatis_walk_cache(uintptr_t addr, const umem_cache_t *c, whatis_info_t *wi)
19790Sstevel@tonic-gate {
1980*10610SJonathan.Adams@Sun.COM 	mdb_whatis_t *w = wi->wi_w;
19810Sstevel@tonic-gate 	char *walk, *freewalk;
19820Sstevel@tonic-gate 	mdb_walk_cb_t func;
1983*10610SJonathan.Adams@Sun.COM 	int do_bufctl;
1984*10610SJonathan.Adams@Sun.COM 
1985*10610SJonathan.Adams@Sun.COM 	/* Override the '-b' flag as necessary */
1986*10610SJonathan.Adams@Sun.COM 	if (!(c->cache_flags & UMF_HASH))
1987*10610SJonathan.Adams@Sun.COM 		do_bufctl = FALSE;	/* no bufctls to walk */
1988*10610SJonathan.Adams@Sun.COM 	else if (c->cache_flags & UMF_AUDIT)
1989*10610SJonathan.Adams@Sun.COM 		do_bufctl = TRUE;	/* we always want debugging info */
1990*10610SJonathan.Adams@Sun.COM 	else
1991*10610SJonathan.Adams@Sun.COM 		do_bufctl = ((mdb_whatis_flags(w) & WHATIS_BUFCTL) != 0);
1992*10610SJonathan.Adams@Sun.COM 
1993*10610SJonathan.Adams@Sun.COM 	if (do_bufctl) {
199410388SJonathan.Adams@Sun.COM 		walk = "bufctl";
199510388SJonathan.Adams@Sun.COM 		freewalk = "freectl";
199610388SJonathan.Adams@Sun.COM 		func = (mdb_walk_cb_t)whatis_walk_bufctl;
199710388SJonathan.Adams@Sun.COM 	} else {
19980Sstevel@tonic-gate 		walk = "umem";
19990Sstevel@tonic-gate 		freewalk = "freemem";
20000Sstevel@tonic-gate 		func = (mdb_walk_cb_t)whatis_walk_umem;
20010Sstevel@tonic-gate 	}
20020Sstevel@tonic-gate 
2003*10610SJonathan.Adams@Sun.COM 	wi->wi_cache = c;
2004*10610SJonathan.Adams@Sun.COM 
2005*10610SJonathan.Adams@Sun.COM 	if (mdb_whatis_flags(w) & WHATIS_VERBOSE)
20060Sstevel@tonic-gate 		mdb_printf("Searching %s...\n", c->cache_name);
20070Sstevel@tonic-gate 
2008*10610SJonathan.Adams@Sun.COM 	/*
2009*10610SJonathan.Adams@Sun.COM 	 * If more then two buffers live on each slab, figure out if we're
2010*10610SJonathan.Adams@Sun.COM 	 * interested in anything in any slab before doing the more expensive
2011*10610SJonathan.Adams@Sun.COM 	 * umem/freemem (bufctl/freectl) walkers.
2012*10610SJonathan.Adams@Sun.COM 	 */
2013*10610SJonathan.Adams@Sun.COM 	wi->wi_slab_size = c->cache_slabsize - c->cache_maxcolor;
2014*10610SJonathan.Adams@Sun.COM 	if (!(c->cache_flags & UMF_HASH))
2015*10610SJonathan.Adams@Sun.COM 		wi->wi_slab_size -= sizeof (umem_slab_t);
2016*10610SJonathan.Adams@Sun.COM 
2017*10610SJonathan.Adams@Sun.COM 	if ((wi->wi_slab_size / c->cache_chunksize) > 2) {
2018*10610SJonathan.Adams@Sun.COM 		wi->wi_slab_found = 0;
2019*10610SJonathan.Adams@Sun.COM 		if (mdb_pwalk("umem_slab", (mdb_walk_cb_t)whatis_walk_slab, wi,
2020*10610SJonathan.Adams@Sun.COM 		    addr) == -1) {
2021*10610SJonathan.Adams@Sun.COM 			mdb_warn("can't find umem_slab walker");
2022*10610SJonathan.Adams@Sun.COM 			return (WALK_DONE);
2023*10610SJonathan.Adams@Sun.COM 		}
2024*10610SJonathan.Adams@Sun.COM 		if (wi->wi_slab_found == 0)
2025*10610SJonathan.Adams@Sun.COM 			return (WALK_NEXT);
2026*10610SJonathan.Adams@Sun.COM 	}
2027*10610SJonathan.Adams@Sun.COM 
2028*10610SJonathan.Adams@Sun.COM 	wi->wi_freemem = FALSE;
2029*10610SJonathan.Adams@Sun.COM 	if (mdb_pwalk(walk, func, wi, addr) == -1) {
20300Sstevel@tonic-gate 		mdb_warn("can't find %s walker", walk);
20310Sstevel@tonic-gate 		return (WALK_DONE);
20320Sstevel@tonic-gate 	}
20330Sstevel@tonic-gate 
2034*10610SJonathan.Adams@Sun.COM 	if (mdb_whatis_done(w))
20350Sstevel@tonic-gate 		return (WALK_DONE);
20360Sstevel@tonic-gate 
20370Sstevel@tonic-gate 	/*
20380Sstevel@tonic-gate 	 * We have searched for allocated memory; now search for freed memory.
20390Sstevel@tonic-gate 	 */
2040*10610SJonathan.Adams@Sun.COM 	if (mdb_whatis_flags(w) & WHATIS_VERBOSE)
20410Sstevel@tonic-gate 		mdb_printf("Searching %s for free memory...\n", c->cache_name);
20420Sstevel@tonic-gate 
2043*10610SJonathan.Adams@Sun.COM 	wi->wi_freemem = TRUE;
2044*10610SJonathan.Adams@Sun.COM 
2045*10610SJonathan.Adams@Sun.COM 	if (mdb_pwalk(freewalk, func, wi, addr) == -1) {
20460Sstevel@tonic-gate 		mdb_warn("can't find %s walker", freewalk);
20470Sstevel@tonic-gate 		return (WALK_DONE);
20480Sstevel@tonic-gate 	}
20490Sstevel@tonic-gate 
2050*10610SJonathan.Adams@Sun.COM 	return (WHATIS_WALKRET(w));
2051*10610SJonathan.Adams@Sun.COM }
2052*10610SJonathan.Adams@Sun.COM 
2053*10610SJonathan.Adams@Sun.COM static int
whatis_walk_touch(uintptr_t addr,const umem_cache_t * c,whatis_info_t * wi)2054*10610SJonathan.Adams@Sun.COM whatis_walk_touch(uintptr_t addr, const umem_cache_t *c, whatis_info_t *wi)
2055*10610SJonathan.Adams@Sun.COM {
2056*10610SJonathan.Adams@Sun.COM 	if (c->cache_arena == wi->wi_msb_arena ||
2057*10610SJonathan.Adams@Sun.COM 	    (c->cache_cflags & UMC_NOTOUCH))
2058*10610SJonathan.Adams@Sun.COM 		return (WALK_NEXT);
2059*10610SJonathan.Adams@Sun.COM 
2060*10610SJonathan.Adams@Sun.COM 	return (whatis_walk_cache(addr, c, wi));
2061*10610SJonathan.Adams@Sun.COM }
2062*10610SJonathan.Adams@Sun.COM 
2063*10610SJonathan.Adams@Sun.COM static int
whatis_walk_metadata(uintptr_t addr,const umem_cache_t * c,whatis_info_t * wi)2064*10610SJonathan.Adams@Sun.COM whatis_walk_metadata(uintptr_t addr, const umem_cache_t *c, whatis_info_t *wi)
2065*10610SJonathan.Adams@Sun.COM {
2066*10610SJonathan.Adams@Sun.COM 	if (c->cache_arena != wi->wi_msb_arena)
2067*10610SJonathan.Adams@Sun.COM 		return (WALK_NEXT);
2068*10610SJonathan.Adams@Sun.COM 
2069*10610SJonathan.Adams@Sun.COM 	return (whatis_walk_cache(addr, c, wi));
20700Sstevel@tonic-gate }
20710Sstevel@tonic-gate 
20720Sstevel@tonic-gate static int
whatis_walk_notouch(uintptr_t addr,const umem_cache_t * c,whatis_info_t * wi)2073*10610SJonathan.Adams@Sun.COM whatis_walk_notouch(uintptr_t addr, const umem_cache_t *c, whatis_info_t *wi)
20740Sstevel@tonic-gate {
2075*10610SJonathan.Adams@Sun.COM 	if (c->cache_arena == wi->wi_msb_arena ||
2076*10610SJonathan.Adams@Sun.COM 	    !(c->cache_cflags & UMC_NOTOUCH))
20770Sstevel@tonic-gate 		return (WALK_NEXT);
20780Sstevel@tonic-gate 
2079*10610SJonathan.Adams@Sun.COM 	return (whatis_walk_cache(addr, c, wi));
20800Sstevel@tonic-gate }
20810Sstevel@tonic-gate 
2082*10610SJonathan.Adams@Sun.COM /*ARGSUSED*/
20830Sstevel@tonic-gate static int
whatis_run_umem(mdb_whatis_t * w,void * ignored)2084*10610SJonathan.Adams@Sun.COM whatis_run_umem(mdb_whatis_t *w, void *ignored)
20850Sstevel@tonic-gate {
2086*10610SJonathan.Adams@Sun.COM 	whatis_info_t wi;
2087*10610SJonathan.Adams@Sun.COM 
2088*10610SJonathan.Adams@Sun.COM 	bzero(&wi, sizeof (wi));
2089*10610SJonathan.Adams@Sun.COM 	wi.wi_w = w;
2090*10610SJonathan.Adams@Sun.COM 
2091*10610SJonathan.Adams@Sun.COM 	/* umem's metadata is allocated from the umem_internal_arena */
2092*10610SJonathan.Adams@Sun.COM 	if (mdb_readvar(&wi.wi_msb_arena, "umem_internal_arena") == -1)
2093*10610SJonathan.Adams@Sun.COM 		mdb_warn("unable to readvar \"umem_internal_arena\"");
2094*10610SJonathan.Adams@Sun.COM 
2095*10610SJonathan.Adams@Sun.COM 	/*
2096*10610SJonathan.Adams@Sun.COM 	 * We process umem caches in the following order:
2097*10610SJonathan.Adams@Sun.COM 	 *
2098*10610SJonathan.Adams@Sun.COM 	 *	non-UMC_NOTOUCH, non-metadata	(typically the most interesting)
2099*10610SJonathan.Adams@Sun.COM 	 *	metadata			(can be huge with UMF_AUDIT)
2100*10610SJonathan.Adams@Sun.COM 	 *	UMC_NOTOUCH, non-metadata	(see umem_walk_all())
2101*10610SJonathan.Adams@Sun.COM 	 */
2102*10610SJonathan.Adams@Sun.COM 	if (mdb_walk("umem_cache", (mdb_walk_cb_t)whatis_walk_touch,
2103*10610SJonathan.Adams@Sun.COM 	    &wi) == -1 ||
2104*10610SJonathan.Adams@Sun.COM 	    mdb_walk("umem_cache", (mdb_walk_cb_t)whatis_walk_metadata,
2105*10610SJonathan.Adams@Sun.COM 	    &wi) == -1 ||
2106*10610SJonathan.Adams@Sun.COM 	    mdb_walk("umem_cache", (mdb_walk_cb_t)whatis_walk_notouch,
2107*10610SJonathan.Adams@Sun.COM 	    &wi) == -1) {
2108*10610SJonathan.Adams@Sun.COM 		mdb_warn("couldn't find umem_cache walker");
2109*10610SJonathan.Adams@Sun.COM 		return (1);
2110*10610SJonathan.Adams@Sun.COM 	}
2111*10610SJonathan.Adams@Sun.COM 	return (0);
2112*10610SJonathan.Adams@Sun.COM }
2113*10610SJonathan.Adams@Sun.COM 
2114*10610SJonathan.Adams@Sun.COM /*ARGSUSED*/
2115*10610SJonathan.Adams@Sun.COM static int
whatis_run_vmem(mdb_whatis_t * w,void * ignored)2116*10610SJonathan.Adams@Sun.COM whatis_run_vmem(mdb_whatis_t *w, void *ignored)
2117*10610SJonathan.Adams@Sun.COM {
2118*10610SJonathan.Adams@Sun.COM 	whatis_info_t wi;
2119*10610SJonathan.Adams@Sun.COM 
2120*10610SJonathan.Adams@Sun.COM 	bzero(&wi, sizeof (wi));
2121*10610SJonathan.Adams@Sun.COM 	wi.wi_w = w;
2122*10610SJonathan.Adams@Sun.COM 
2123*10610SJonathan.Adams@Sun.COM 	if (mdb_walk("vmem_postfix",
2124*10610SJonathan.Adams@Sun.COM 	    (mdb_walk_cb_t)whatis_walk_vmem, &wi) == -1) {
2125*10610SJonathan.Adams@Sun.COM 		mdb_warn("couldn't find vmem_postfix walker");
2126*10610SJonathan.Adams@Sun.COM 		return (1);
2127*10610SJonathan.Adams@Sun.COM 	}
2128*10610SJonathan.Adams@Sun.COM 	return (0);
21290Sstevel@tonic-gate }
21300Sstevel@tonic-gate 
21310Sstevel@tonic-gate int
umem_init(void)2132*10610SJonathan.Adams@Sun.COM umem_init(void)
21330Sstevel@tonic-gate {
2134*10610SJonathan.Adams@Sun.COM 	mdb_walker_t w = {
2135*10610SJonathan.Adams@Sun.COM 		"umem_cache", "walk list of umem caches", umem_cache_walk_init,
2136*10610SJonathan.Adams@Sun.COM 		umem_cache_walk_step, umem_cache_walk_fini
2137*10610SJonathan.Adams@Sun.COM 	};
2138*10610SJonathan.Adams@Sun.COM 
2139*10610SJonathan.Adams@Sun.COM 	if (mdb_add_walker(&w) == -1) {
2140*10610SJonathan.Adams@Sun.COM 		mdb_warn("failed to add umem_cache walker");
2141*10610SJonathan.Adams@Sun.COM 		return (-1);
2142*10610SJonathan.Adams@Sun.COM 	}
2143*10610SJonathan.Adams@Sun.COM 
2144*10610SJonathan.Adams@Sun.COM 	if (umem_update_variables() == -1)
2145*10610SJonathan.Adams@Sun.COM 		return (-1);
2146*10610SJonathan.Adams@Sun.COM 
2147*10610SJonathan.Adams@Sun.COM 	/* install a callback so that our variables are always up-to-date */
2148*10610SJonathan.Adams@Sun.COM 	(void) mdb_callback_add(MDB_CALLBACK_STCHG, umem_statechange_cb, NULL);
2149*10610SJonathan.Adams@Sun.COM 	umem_statechange_cb(NULL);
21500Sstevel@tonic-gate 
21510Sstevel@tonic-gate 	/*
2152*10610SJonathan.Adams@Sun.COM 	 * Register our ::whatis callbacks.
21530Sstevel@tonic-gate 	 */
2154*10610SJonathan.Adams@Sun.COM 	mdb_whatis_register("umem", whatis_run_umem, NULL,
2155*10610SJonathan.Adams@Sun.COM 	    WHATIS_PRIO_ALLOCATOR, WHATIS_REG_NO_ID);
2156*10610SJonathan.Adams@Sun.COM 	mdb_whatis_register("vmem", whatis_run_vmem, NULL,
2157*10610SJonathan.Adams@Sun.COM 	    WHATIS_PRIO_ALLOCATOR, WHATIS_REG_NO_ID);
2158*10610SJonathan.Adams@Sun.COM 
2159*10610SJonathan.Adams@Sun.COM 	return (0);
21600Sstevel@tonic-gate }
21610Sstevel@tonic-gate 
21620Sstevel@tonic-gate typedef struct umem_log_cpu {
21630Sstevel@tonic-gate 	uintptr_t umc_low;
21640Sstevel@tonic-gate 	uintptr_t umc_high;
21650Sstevel@tonic-gate } umem_log_cpu_t;
21660Sstevel@tonic-gate 
21670Sstevel@tonic-gate int
umem_log_walk(uintptr_t addr,const umem_bufctl_audit_t * b,umem_log_cpu_t * umc)21680Sstevel@tonic-gate umem_log_walk(uintptr_t addr, const umem_bufctl_audit_t *b, umem_log_cpu_t *umc)
21690Sstevel@tonic-gate {
21700Sstevel@tonic-gate 	int i;
21710Sstevel@tonic-gate 
21720Sstevel@tonic-gate 	for (i = 0; i < umem_max_ncpus; i++) {
21730Sstevel@tonic-gate 		if (addr >= umc[i].umc_low && addr < umc[i].umc_high)
21740Sstevel@tonic-gate 			break;
21750Sstevel@tonic-gate 	}
21760Sstevel@tonic-gate 
21770Sstevel@tonic-gate 	if (i == umem_max_ncpus)
21780Sstevel@tonic-gate 		mdb_printf("   ");
21790Sstevel@tonic-gate 	else
21800Sstevel@tonic-gate 		mdb_printf("%3d", i);
21810Sstevel@tonic-gate 
21820Sstevel@tonic-gate 	mdb_printf(" %0?p %0?p %16llx %0?p\n", addr, b->bc_addr,
21830Sstevel@tonic-gate 	    b->bc_timestamp, b->bc_thread);
21840Sstevel@tonic-gate 
21850Sstevel@tonic-gate 	return (WALK_NEXT);
21860Sstevel@tonic-gate }
21870Sstevel@tonic-gate 
21880Sstevel@tonic-gate /*ARGSUSED*/
21890Sstevel@tonic-gate int
umem_log(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)21900Sstevel@tonic-gate umem_log(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
21910Sstevel@tonic-gate {
21920Sstevel@tonic-gate 	umem_log_header_t lh;
21930Sstevel@tonic-gate 	umem_cpu_log_header_t clh;
21940Sstevel@tonic-gate 	uintptr_t lhp, clhp;
21950Sstevel@tonic-gate 	umem_log_cpu_t *umc;
21960Sstevel@tonic-gate 	int i;
21970Sstevel@tonic-gate 
21980Sstevel@tonic-gate 	if (umem_readvar(&lhp, "umem_transaction_log") == -1) {
21990Sstevel@tonic-gate 		mdb_warn("failed to read 'umem_transaction_log'");
22000Sstevel@tonic-gate 		return (DCMD_ERR);
22010Sstevel@tonic-gate 	}
22020Sstevel@tonic-gate 
22030Sstevel@tonic-gate 	if (lhp == NULL) {
22040Sstevel@tonic-gate 		mdb_warn("no umem transaction log\n");
22050Sstevel@tonic-gate 		return (DCMD_ERR);
22060Sstevel@tonic-gate 	}
22070Sstevel@tonic-gate 
22080Sstevel@tonic-gate 	if (mdb_vread(&lh, sizeof (umem_log_header_t), lhp) == -1) {
22090Sstevel@tonic-gate 		mdb_warn("failed to read log header at %p", lhp);
22100Sstevel@tonic-gate 		return (DCMD_ERR);
22110Sstevel@tonic-gate 	}
22120Sstevel@tonic-gate 
22130Sstevel@tonic-gate 	clhp = lhp + ((uintptr_t)&lh.lh_cpu[0] - (uintptr_t)&lh);
22140Sstevel@tonic-gate 
22150Sstevel@tonic-gate 	umc = mdb_zalloc(sizeof (umem_log_cpu_t) * umem_max_ncpus,
22160Sstevel@tonic-gate 	    UM_SLEEP | UM_GC);
22170Sstevel@tonic-gate 
22180Sstevel@tonic-gate 	for (i = 0; i < umem_max_ncpus; i++) {
22190Sstevel@tonic-gate 		if (mdb_vread(&clh, sizeof (clh), clhp) == -1) {
22200Sstevel@tonic-gate 			mdb_warn("cannot read cpu %d's log header at %p",
22210Sstevel@tonic-gate 			    i, clhp);
22220Sstevel@tonic-gate 			return (DCMD_ERR);
22230Sstevel@tonic-gate 		}
22240Sstevel@tonic-gate 
22250Sstevel@tonic-gate 		umc[i].umc_low = clh.clh_chunk * lh.lh_chunksize +
22260Sstevel@tonic-gate 		    (uintptr_t)lh.lh_base;
22270Sstevel@tonic-gate 		umc[i].umc_high = (uintptr_t)clh.clh_current;
22280Sstevel@tonic-gate 
22290Sstevel@tonic-gate 		clhp += sizeof (umem_cpu_log_header_t);
22300Sstevel@tonic-gate 	}
22310Sstevel@tonic-gate 
22320Sstevel@tonic-gate 	if (DCMD_HDRSPEC(flags)) {
22330Sstevel@tonic-gate 		mdb_printf("%3s %-?s %-?s %16s %-?s\n", "CPU", "ADDR",
22340Sstevel@tonic-gate 		    "BUFADDR", "TIMESTAMP", "THREAD");
22350Sstevel@tonic-gate 	}
22360Sstevel@tonic-gate 
22370Sstevel@tonic-gate 	/*
22380Sstevel@tonic-gate 	 * If we have been passed an address, we'll just print out that
22390Sstevel@tonic-gate 	 * log entry.
22400Sstevel@tonic-gate 	 */
22410Sstevel@tonic-gate 	if (flags & DCMD_ADDRSPEC) {
22420Sstevel@tonic-gate 		umem_bufctl_audit_t *bp;
22430Sstevel@tonic-gate 		UMEM_LOCAL_BUFCTL_AUDIT(&bp);
22440Sstevel@tonic-gate 
22450Sstevel@tonic-gate 		if (mdb_vread(bp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) {
22460Sstevel@tonic-gate 			mdb_warn("failed to read bufctl at %p", addr);
22470Sstevel@tonic-gate 			return (DCMD_ERR);
22480Sstevel@tonic-gate 		}
22490Sstevel@tonic-gate 
22500Sstevel@tonic-gate 		(void) umem_log_walk(addr, bp, umc);
22510Sstevel@tonic-gate 
22520Sstevel@tonic-gate 		return (DCMD_OK);
22530Sstevel@tonic-gate 	}
22540Sstevel@tonic-gate 
22550Sstevel@tonic-gate 	if (mdb_walk("umem_log", (mdb_walk_cb_t)umem_log_walk, umc) == -1) {
22560Sstevel@tonic-gate 		mdb_warn("can't find umem log walker");
22570Sstevel@tonic-gate 		return (DCMD_ERR);
22580Sstevel@tonic-gate 	}
22590Sstevel@tonic-gate 
22600Sstevel@tonic-gate 	return (DCMD_OK);
22610Sstevel@tonic-gate }
22620Sstevel@tonic-gate 
22630Sstevel@tonic-gate typedef struct bufctl_history_cb {
22640Sstevel@tonic-gate 	int		bhc_flags;
22650Sstevel@tonic-gate 	int		bhc_argc;
22660Sstevel@tonic-gate 	const mdb_arg_t	*bhc_argv;
22670Sstevel@tonic-gate 	int		bhc_ret;
22680Sstevel@tonic-gate } bufctl_history_cb_t;
22690Sstevel@tonic-gate 
22700Sstevel@tonic-gate /*ARGSUSED*/
22710Sstevel@tonic-gate static int
bufctl_history_callback(uintptr_t addr,const void * ign,void * arg)22720Sstevel@tonic-gate bufctl_history_callback(uintptr_t addr, const void *ign, void *arg)
22730Sstevel@tonic-gate {
22740Sstevel@tonic-gate 	bufctl_history_cb_t *bhc = arg;
22750Sstevel@tonic-gate 
22760Sstevel@tonic-gate 	bhc->bhc_ret =
22770Sstevel@tonic-gate 	    bufctl(addr, bhc->bhc_flags, bhc->bhc_argc, bhc->bhc_argv);
22780Sstevel@tonic-gate 
22790Sstevel@tonic-gate 	bhc->bhc_flags &= ~DCMD_LOOPFIRST;
22800Sstevel@tonic-gate 
22810Sstevel@tonic-gate 	return ((bhc->bhc_ret == DCMD_OK)? WALK_NEXT : WALK_DONE);
22820Sstevel@tonic-gate }
22830Sstevel@tonic-gate 
22840Sstevel@tonic-gate void
bufctl_help(void)22850Sstevel@tonic-gate bufctl_help(void)
22860Sstevel@tonic-gate {
22870Sstevel@tonic-gate 	mdb_printf("%s\n",
22880Sstevel@tonic-gate "Display the contents of umem_bufctl_audit_ts, with optional filtering.\n");
22890Sstevel@tonic-gate 	mdb_dec_indent(2);
22900Sstevel@tonic-gate 	mdb_printf("%<b>OPTIONS%</b>\n");
22910Sstevel@tonic-gate 	mdb_inc_indent(2);
22920Sstevel@tonic-gate 	mdb_printf("%s",
22930Sstevel@tonic-gate "  -v    Display the full content of the bufctl, including its stack trace\n"
22940Sstevel@tonic-gate "  -h    retrieve the bufctl's transaction history, if available\n"
22950Sstevel@tonic-gate "  -a addr\n"
22960Sstevel@tonic-gate "        filter out bufctls not involving the buffer at addr\n"
22970Sstevel@tonic-gate "  -c caller\n"
22980Sstevel@tonic-gate "        filter out bufctls without the function/PC in their stack trace\n"
22990Sstevel@tonic-gate "  -e earliest\n"
23000Sstevel@tonic-gate "        filter out bufctls timestamped before earliest\n"
23010Sstevel@tonic-gate "  -l latest\n"
23020Sstevel@tonic-gate "        filter out bufctls timestamped after latest\n"
23030Sstevel@tonic-gate "  -t thread\n"
23040Sstevel@tonic-gate "        filter out bufctls not involving thread\n");
23050Sstevel@tonic-gate }
23060Sstevel@tonic-gate 
23070Sstevel@tonic-gate int
bufctl(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)23080Sstevel@tonic-gate bufctl(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
23090Sstevel@tonic-gate {
23100Sstevel@tonic-gate 	uint_t verbose = FALSE;
23110Sstevel@tonic-gate 	uint_t history = FALSE;
23120Sstevel@tonic-gate 	uint_t in_history = FALSE;
23130Sstevel@tonic-gate 	uintptr_t caller = NULL, thread = NULL;
23140Sstevel@tonic-gate 	uintptr_t laddr, haddr, baddr = NULL;
23150Sstevel@tonic-gate 	hrtime_t earliest = 0, latest = 0;
23160Sstevel@tonic-gate 	int i, depth;
23170Sstevel@tonic-gate 	char c[MDB_SYM_NAMLEN];
23180Sstevel@tonic-gate 	GElf_Sym sym;
23190Sstevel@tonic-gate 	umem_bufctl_audit_t *bcp;
23200Sstevel@tonic-gate 	UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
23210Sstevel@tonic-gate 
23220Sstevel@tonic-gate 	if (mdb_getopts(argc, argv,
23230Sstevel@tonic-gate 	    'v', MDB_OPT_SETBITS, TRUE, &verbose,
23240Sstevel@tonic-gate 	    'h', MDB_OPT_SETBITS, TRUE, &history,
23250Sstevel@tonic-gate 	    'H', MDB_OPT_SETBITS, TRUE, &in_history,		/* internal */
23260Sstevel@tonic-gate 	    'c', MDB_OPT_UINTPTR, &caller,
23270Sstevel@tonic-gate 	    't', MDB_OPT_UINTPTR, &thread,
23280Sstevel@tonic-gate 	    'e', MDB_OPT_UINT64, &earliest,
23290Sstevel@tonic-gate 	    'l', MDB_OPT_UINT64, &latest,
23300Sstevel@tonic-gate 	    'a', MDB_OPT_UINTPTR, &baddr, NULL) != argc)
23310Sstevel@tonic-gate 		return (DCMD_USAGE);
23320Sstevel@tonic-gate 
23330Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC))
23340Sstevel@tonic-gate 		return (DCMD_USAGE);
23350Sstevel@tonic-gate 
23360Sstevel@tonic-gate 	if (in_history && !history)
23370Sstevel@tonic-gate 		return (DCMD_USAGE);
23380Sstevel@tonic-gate 
23390Sstevel@tonic-gate 	if (history && !in_history) {
23400Sstevel@tonic-gate 		mdb_arg_t *nargv = mdb_zalloc(sizeof (*nargv) * (argc + 1),
23410Sstevel@tonic-gate 		    UM_SLEEP | UM_GC);
23420Sstevel@tonic-gate 		bufctl_history_cb_t bhc;
23430Sstevel@tonic-gate 
23440Sstevel@tonic-gate 		nargv[0].a_type = MDB_TYPE_STRING;
23450Sstevel@tonic-gate 		nargv[0].a_un.a_str = "-H";		/* prevent recursion */
23460Sstevel@tonic-gate 
23470Sstevel@tonic-gate 		for (i = 0; i < argc; i++)
23480Sstevel@tonic-gate 			nargv[i + 1] = argv[i];
23490Sstevel@tonic-gate 
23500Sstevel@tonic-gate 		/*
23510Sstevel@tonic-gate 		 * When in history mode, we treat each element as if it
23520Sstevel@tonic-gate 		 * were in a seperate loop, so that the headers group
23530Sstevel@tonic-gate 		 * bufctls with similar histories.
23540Sstevel@tonic-gate 		 */
23550Sstevel@tonic-gate 		bhc.bhc_flags = flags | DCMD_LOOP | DCMD_LOOPFIRST;
23560Sstevel@tonic-gate 		bhc.bhc_argc = argc + 1;
23570Sstevel@tonic-gate 		bhc.bhc_argv = nargv;
23580Sstevel@tonic-gate 		bhc.bhc_ret = DCMD_OK;
23590Sstevel@tonic-gate 
23600Sstevel@tonic-gate 		if (mdb_pwalk("bufctl_history", bufctl_history_callback, &bhc,
23610Sstevel@tonic-gate 		    addr) == -1) {
23620Sstevel@tonic-gate 			mdb_warn("unable to walk bufctl_history");
23630Sstevel@tonic-gate 			return (DCMD_ERR);
23640Sstevel@tonic-gate 		}
23650Sstevel@tonic-gate 
23660Sstevel@tonic-gate 		if (bhc.bhc_ret == DCMD_OK && !(flags & DCMD_PIPE_OUT))
23670Sstevel@tonic-gate 			mdb_printf("\n");
23680Sstevel@tonic-gate 
23690Sstevel@tonic-gate 		return (bhc.bhc_ret);
23700Sstevel@tonic-gate 	}
23710Sstevel@tonic-gate 
23720Sstevel@tonic-gate 	if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) {
23730Sstevel@tonic-gate 		if (verbose) {
23740Sstevel@tonic-gate 			mdb_printf("%16s %16s %16s %16s\n"
23750Sstevel@tonic-gate 			    "%<u>%16s %16s %16s %16s%</u>\n",
23760Sstevel@tonic-gate 			    "ADDR", "BUFADDR", "TIMESTAMP", "THREAD",
23770Sstevel@tonic-gate 			    "", "CACHE", "LASTLOG", "CONTENTS");
23780Sstevel@tonic-gate 		} else {
23790Sstevel@tonic-gate 			mdb_printf("%<u>%-?s %-?s %-12s %5s %s%</u>\n",
23800Sstevel@tonic-gate 			    "ADDR", "BUFADDR", "TIMESTAMP", "THRD", "CALLER");
23810Sstevel@tonic-gate 		}
23820Sstevel@tonic-gate 	}
23830Sstevel@tonic-gate 
23840Sstevel@tonic-gate 	if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) {
23850Sstevel@tonic-gate 		mdb_warn("couldn't read bufctl at %p", addr);
23860Sstevel@tonic-gate 		return (DCMD_ERR);
23870Sstevel@tonic-gate 	}
23880Sstevel@tonic-gate 
23890Sstevel@tonic-gate 	/*
23900Sstevel@tonic-gate 	 * Guard against bogus bc_depth in case the bufctl is corrupt or
23910Sstevel@tonic-gate 	 * the address does not really refer to a bufctl.
23920Sstevel@tonic-gate 	 */
23930Sstevel@tonic-gate 	depth = MIN(bcp->bc_depth, umem_stack_depth);
23940Sstevel@tonic-gate 
23950Sstevel@tonic-gate 	if (caller != NULL) {
23960Sstevel@tonic-gate 		laddr = caller;
23970Sstevel@tonic-gate 		haddr = caller + sizeof (caller);
23980Sstevel@tonic-gate 
23990Sstevel@tonic-gate 		if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, sizeof (c),
24000Sstevel@tonic-gate 		    &sym) != -1 && caller == (uintptr_t)sym.st_value) {
24010Sstevel@tonic-gate 			/*
24020Sstevel@tonic-gate 			 * We were provided an exact symbol value; any
24030Sstevel@tonic-gate 			 * address in the function is valid.
24040Sstevel@tonic-gate 			 */
24050Sstevel@tonic-gate 			laddr = (uintptr_t)sym.st_value;
24060Sstevel@tonic-gate 			haddr = (uintptr_t)sym.st_value + sym.st_size;
24070Sstevel@tonic-gate 		}
24080Sstevel@tonic-gate 
24090Sstevel@tonic-gate 		for (i = 0; i < depth; i++)
24100Sstevel@tonic-gate 			if (bcp->bc_stack[i] >= laddr &&
24110Sstevel@tonic-gate 			    bcp->bc_stack[i] < haddr)
24120Sstevel@tonic-gate 				break;
24130Sstevel@tonic-gate 
24140Sstevel@tonic-gate 		if (i == depth)
24150Sstevel@tonic-gate 			return (DCMD_OK);
24160Sstevel@tonic-gate 	}
24170Sstevel@tonic-gate 
24180Sstevel@tonic-gate 	if (thread != NULL && (uintptr_t)bcp->bc_thread != thread)
24190Sstevel@tonic-gate 		return (DCMD_OK);
24200Sstevel@tonic-gate 
24210Sstevel@tonic-gate 	if (earliest != 0 && bcp->bc_timestamp < earliest)
24220Sstevel@tonic-gate 		return (DCMD_OK);
24230Sstevel@tonic-gate 
24240Sstevel@tonic-gate 	if (latest != 0 && bcp->bc_timestamp > latest)
24250Sstevel@tonic-gate 		return (DCMD_OK);
24260Sstevel@tonic-gate 
24270Sstevel@tonic-gate 	if (baddr != 0 && (uintptr_t)bcp->bc_addr != baddr)
24280Sstevel@tonic-gate 		return (DCMD_OK);
24290Sstevel@tonic-gate 
24300Sstevel@tonic-gate 	if (flags & DCMD_PIPE_OUT) {
24310Sstevel@tonic-gate 		mdb_printf("%#r\n", addr);
24320Sstevel@tonic-gate 		return (DCMD_OK);
24330Sstevel@tonic-gate 	}
24340Sstevel@tonic-gate 
24350Sstevel@tonic-gate 	if (verbose) {
24360Sstevel@tonic-gate 		mdb_printf(
24370Sstevel@tonic-gate 		    "%<b>%16p%</b> %16p %16llx %16d\n"
24380Sstevel@tonic-gate 		    "%16s %16p %16p %16p\n",
24390Sstevel@tonic-gate 		    addr, bcp->bc_addr, bcp->bc_timestamp, bcp->bc_thread,
24400Sstevel@tonic-gate 		    "", bcp->bc_cache, bcp->bc_lastlog, bcp->bc_contents);
24410Sstevel@tonic-gate 
24420Sstevel@tonic-gate 		mdb_inc_indent(17);
24430Sstevel@tonic-gate 		for (i = 0; i < depth; i++)
24440Sstevel@tonic-gate 			mdb_printf("%a\n", bcp->bc_stack[i]);
24450Sstevel@tonic-gate 		mdb_dec_indent(17);
24460Sstevel@tonic-gate 		mdb_printf("\n");
24470Sstevel@tonic-gate 	} else {
24480Sstevel@tonic-gate 		mdb_printf("%0?p %0?p %12llx %5d", addr, bcp->bc_addr,
24490Sstevel@tonic-gate 		    bcp->bc_timestamp, bcp->bc_thread);
24500Sstevel@tonic-gate 
24510Sstevel@tonic-gate 		for (i = 0; i < depth; i++) {
24520Sstevel@tonic-gate 			if (mdb_lookup_by_addr(bcp->bc_stack[i],
24530Sstevel@tonic-gate 			    MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1)
24540Sstevel@tonic-gate 				continue;
24550Sstevel@tonic-gate 			if (is_umem_sym(c, "umem_"))
24560Sstevel@tonic-gate 				continue;
24570Sstevel@tonic-gate 			mdb_printf(" %a\n", bcp->bc_stack[i]);
24580Sstevel@tonic-gate 			break;
24590Sstevel@tonic-gate 		}
24600Sstevel@tonic-gate 
24610Sstevel@tonic-gate 		if (i >= depth)
24620Sstevel@tonic-gate 			mdb_printf("\n");
24630Sstevel@tonic-gate 	}
24640Sstevel@tonic-gate 
24650Sstevel@tonic-gate 	return (DCMD_OK);
24660Sstevel@tonic-gate }
24670Sstevel@tonic-gate 
24680Sstevel@tonic-gate /*ARGSUSED*/
24690Sstevel@tonic-gate int
bufctl_audit(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)24700Sstevel@tonic-gate bufctl_audit(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
24710Sstevel@tonic-gate {
24720Sstevel@tonic-gate 	mdb_arg_t a;
24730Sstevel@tonic-gate 
24740Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC))
24750Sstevel@tonic-gate 		return (DCMD_USAGE);
24760Sstevel@tonic-gate 
24770Sstevel@tonic-gate 	if (argc != 0)
24780Sstevel@tonic-gate 		return (DCMD_USAGE);
24790Sstevel@tonic-gate 
24800Sstevel@tonic-gate 	a.a_type = MDB_TYPE_STRING;
24810Sstevel@tonic-gate 	a.a_un.a_str = "-v";
24820Sstevel@tonic-gate 
24830Sstevel@tonic-gate 	return (bufctl(addr, flags, 1, &a));
24840Sstevel@tonic-gate }
24850Sstevel@tonic-gate 
24860Sstevel@tonic-gate typedef struct umem_verify {
24870Sstevel@tonic-gate 	uint64_t *umv_buf;		/* buffer to read cache contents into */
24880Sstevel@tonic-gate 	size_t umv_size;		/* number of bytes in umv_buf */
24890Sstevel@tonic-gate 	int umv_corruption;		/* > 0 if corruption found. */
24900Sstevel@tonic-gate 	int umv_besilent;		/* report actual corruption sites */
24910Sstevel@tonic-gate 	struct umem_cache umv_cache;	/* the cache we're operating on */
24920Sstevel@tonic-gate } umem_verify_t;
24930Sstevel@tonic-gate 
24940Sstevel@tonic-gate /*
24950Sstevel@tonic-gate  * verify_pattern()
24960Sstevel@tonic-gate  *	verify that buf is filled with the pattern pat.
24970Sstevel@tonic-gate  */
24980Sstevel@tonic-gate static int64_t
verify_pattern(uint64_t * buf_arg,size_t size,uint64_t pat)24990Sstevel@tonic-gate verify_pattern(uint64_t *buf_arg, size_t size, uint64_t pat)
25000Sstevel@tonic-gate {
25010Sstevel@tonic-gate 	/*LINTED*/
25020Sstevel@tonic-gate 	uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
25030Sstevel@tonic-gate 	uint64_t *buf;
25040Sstevel@tonic-gate 
25050Sstevel@tonic-gate 	for (buf = buf_arg; buf < bufend; buf++)
25060Sstevel@tonic-gate 		if (*buf != pat)
25070Sstevel@tonic-gate 			return ((uintptr_t)buf - (uintptr_t)buf_arg);
25080Sstevel@tonic-gate 	return (-1);
25090Sstevel@tonic-gate }
25100Sstevel@tonic-gate 
25110Sstevel@tonic-gate /*
25120Sstevel@tonic-gate  * verify_buftag()
25130Sstevel@tonic-gate  *	verify that btp->bt_bxstat == (bcp ^ pat)
25140Sstevel@tonic-gate  */
25150Sstevel@tonic-gate static int
verify_buftag(umem_buftag_t * btp,uintptr_t pat)25160Sstevel@tonic-gate verify_buftag(umem_buftag_t *btp, uintptr_t pat)
25170Sstevel@tonic-gate {
25180Sstevel@tonic-gate 	return (btp->bt_bxstat == ((intptr_t)btp->bt_bufctl ^ pat) ? 0 : -1);
25190Sstevel@tonic-gate }
25200Sstevel@tonic-gate 
25210Sstevel@tonic-gate /*
25220Sstevel@tonic-gate  * verify_free()
25230Sstevel@tonic-gate  *	verify the integrity of a free block of memory by checking
25240Sstevel@tonic-gate  *	that it is filled with 0xdeadbeef and that its buftag is sane.
25250Sstevel@tonic-gate  */
25260Sstevel@tonic-gate /*ARGSUSED1*/
25270Sstevel@tonic-gate static int
verify_free(uintptr_t addr,const void * data,void * private)25280Sstevel@tonic-gate verify_free(uintptr_t addr, const void *data, void *private)
25290Sstevel@tonic-gate {
25300Sstevel@tonic-gate 	umem_verify_t *umv = (umem_verify_t *)private;
25310Sstevel@tonic-gate 	uint64_t *buf = umv->umv_buf;	/* buf to validate */
25320Sstevel@tonic-gate 	int64_t corrupt;		/* corruption offset */
25330Sstevel@tonic-gate 	umem_buftag_t *buftagp;		/* ptr to buftag */
25340Sstevel@tonic-gate 	umem_cache_t *cp = &umv->umv_cache;
25350Sstevel@tonic-gate 	int besilent = umv->umv_besilent;
25360Sstevel@tonic-gate 
25370Sstevel@tonic-gate 	/*LINTED*/
25380Sstevel@tonic-gate 	buftagp = UMEM_BUFTAG(cp, buf);
25390Sstevel@tonic-gate 
25400Sstevel@tonic-gate 	/*
25410Sstevel@tonic-gate 	 * Read the buffer to check.
25420Sstevel@tonic-gate 	 */
25430Sstevel@tonic-gate 	if (mdb_vread(buf, umv->umv_size, addr) == -1) {
25440Sstevel@tonic-gate 		if (!besilent)
25450Sstevel@tonic-gate 			mdb_warn("couldn't read %p", addr);
25460Sstevel@tonic-gate 		return (WALK_NEXT);
25470Sstevel@tonic-gate 	}
25480Sstevel@tonic-gate 
25490Sstevel@tonic-gate 	if ((corrupt = verify_pattern(buf, cp->cache_verify,
25500Sstevel@tonic-gate 	    UMEM_FREE_PATTERN)) >= 0) {
25510Sstevel@tonic-gate 		if (!besilent)
25520Sstevel@tonic-gate 			mdb_printf("buffer %p (free) seems corrupted, at %p\n",
25530Sstevel@tonic-gate 			    addr, (uintptr_t)addr + corrupt);
25540Sstevel@tonic-gate 		goto corrupt;
25550Sstevel@tonic-gate 	}
25560Sstevel@tonic-gate 
25570Sstevel@tonic-gate 	if ((cp->cache_flags & UMF_HASH) &&
25580Sstevel@tonic-gate 	    buftagp->bt_redzone != UMEM_REDZONE_PATTERN) {
25590Sstevel@tonic-gate 		if (!besilent)
25600Sstevel@tonic-gate 			mdb_printf("buffer %p (free) seems to "
25610Sstevel@tonic-gate 			    "have a corrupt redzone pattern\n", addr);
25620Sstevel@tonic-gate 		goto corrupt;
25630Sstevel@tonic-gate 	}
25640Sstevel@tonic-gate 
25650Sstevel@tonic-gate 	/*
25660Sstevel@tonic-gate 	 * confirm bufctl pointer integrity.
25670Sstevel@tonic-gate 	 */
25680Sstevel@tonic-gate 	if (verify_buftag(buftagp, UMEM_BUFTAG_FREE) == -1) {
25690Sstevel@tonic-gate 		if (!besilent)
25700Sstevel@tonic-gate 			mdb_printf("buffer %p (free) has a corrupt "
25710Sstevel@tonic-gate 			    "buftag\n", addr);
25720Sstevel@tonic-gate 		goto corrupt;
25730Sstevel@tonic-gate 	}
25740Sstevel@tonic-gate 
25750Sstevel@tonic-gate 	return (WALK_NEXT);
25760Sstevel@tonic-gate corrupt:
25770Sstevel@tonic-gate 	umv->umv_corruption++;
25780Sstevel@tonic-gate 	return (WALK_NEXT);
25790Sstevel@tonic-gate }
25800Sstevel@tonic-gate 
25810Sstevel@tonic-gate /*
25820Sstevel@tonic-gate  * verify_alloc()
25830Sstevel@tonic-gate  *	Verify that the buftag of an allocated buffer makes sense with respect
25840Sstevel@tonic-gate  *	to the buffer.
25850Sstevel@tonic-gate  */
25860Sstevel@tonic-gate /*ARGSUSED1*/
25870Sstevel@tonic-gate static int
verify_alloc(uintptr_t addr,const void * data,void * private)25880Sstevel@tonic-gate verify_alloc(uintptr_t addr, const void *data, void *private)
25890Sstevel@tonic-gate {
25900Sstevel@tonic-gate 	umem_verify_t *umv = (umem_verify_t *)private;
25910Sstevel@tonic-gate 	umem_cache_t *cp = &umv->umv_cache;
25920Sstevel@tonic-gate 	uint64_t *buf = umv->umv_buf;	/* buf to validate */
25930Sstevel@tonic-gate 	/*LINTED*/
25940Sstevel@tonic-gate 	umem_buftag_t *buftagp = UMEM_BUFTAG(cp, buf);
25950Sstevel@tonic-gate 	uint32_t *ip = (uint32_t *)buftagp;
25960Sstevel@tonic-gate 	uint8_t *bp = (uint8_t *)buf;
25970Sstevel@tonic-gate 	int looks_ok = 0, size_ok = 1;	/* flags for finding corruption */
25980Sstevel@tonic-gate 	int besilent = umv->umv_besilent;
25990Sstevel@tonic-gate 
26000Sstevel@tonic-gate 	/*
26010Sstevel@tonic-gate 	 * Read the buffer to check.
26020Sstevel@tonic-gate 	 */
26030Sstevel@tonic-gate 	if (mdb_vread(buf, umv->umv_size, addr) == -1) {
26040Sstevel@tonic-gate 		if (!besilent)
26050Sstevel@tonic-gate 			mdb_warn("couldn't read %p", addr);
26060Sstevel@tonic-gate 		return (WALK_NEXT);
26070Sstevel@tonic-gate 	}
26080Sstevel@tonic-gate 
26090Sstevel@tonic-gate 	/*
26100Sstevel@tonic-gate 	 * There are two cases to handle:
26110Sstevel@tonic-gate 	 * 1. If the buf was alloc'd using umem_cache_alloc, it will have
26120Sstevel@tonic-gate 	 *    0xfeedfacefeedface at the end of it
26130Sstevel@tonic-gate 	 * 2. If the buf was alloc'd using umem_alloc, it will have
26140Sstevel@tonic-gate 	 *    0xbb just past the end of the region in use.  At the buftag,
26150Sstevel@tonic-gate 	 *    it will have 0xfeedface (or, if the whole buffer is in use,
26160Sstevel@tonic-gate 	 *    0xfeedface & bb000000 or 0xfeedfacf & 000000bb depending on
26170Sstevel@tonic-gate 	 *    endianness), followed by 32 bits containing the offset of the
26180Sstevel@tonic-gate 	 *    0xbb byte in the buffer.
26190Sstevel@tonic-gate 	 *
26200Sstevel@tonic-gate 	 * Finally, the two 32-bit words that comprise the second half of the
26210Sstevel@tonic-gate 	 * buftag should xor to UMEM_BUFTAG_ALLOC
26220Sstevel@tonic-gate 	 */
26230Sstevel@tonic-gate 
26240Sstevel@tonic-gate 	if (buftagp->bt_redzone == UMEM_REDZONE_PATTERN)
26250Sstevel@tonic-gate 		looks_ok = 1;
26260Sstevel@tonic-gate 	else if (!UMEM_SIZE_VALID(ip[1]))
26270Sstevel@tonic-gate 		size_ok = 0;
26280Sstevel@tonic-gate 	else if (bp[UMEM_SIZE_DECODE(ip[1])] == UMEM_REDZONE_BYTE)
26290Sstevel@tonic-gate 		looks_ok = 1;
26300Sstevel@tonic-gate 	else
26310Sstevel@tonic-gate 		size_ok = 0;
26320Sstevel@tonic-gate 
26330Sstevel@tonic-gate 	if (!size_ok) {
26340Sstevel@tonic-gate 		if (!besilent)
26350Sstevel@tonic-gate 			mdb_printf("buffer %p (allocated) has a corrupt "
26360Sstevel@tonic-gate 			    "redzone size encoding\n", addr);
26370Sstevel@tonic-gate 		goto corrupt;
26380Sstevel@tonic-gate 	}
26390Sstevel@tonic-gate 
26400Sstevel@tonic-gate 	if (!looks_ok) {
26410Sstevel@tonic-gate 		if (!besilent)
26420Sstevel@tonic-gate 			mdb_printf("buffer %p (allocated) has a corrupt "
26430Sstevel@tonic-gate 			    "redzone signature\n", addr);
26440Sstevel@tonic-gate 		goto corrupt;
26450Sstevel@tonic-gate 	}
26460Sstevel@tonic-gate 
26470Sstevel@tonic-gate 	if (verify_buftag(buftagp, UMEM_BUFTAG_ALLOC) == -1) {
26480Sstevel@tonic-gate 		if (!besilent)
26490Sstevel@tonic-gate 			mdb_printf("buffer %p (allocated) has a "
26500Sstevel@tonic-gate 			    "corrupt buftag\n", addr);
26510Sstevel@tonic-gate 		goto corrupt;
26520Sstevel@tonic-gate 	}
26530Sstevel@tonic-gate 
26540Sstevel@tonic-gate 	return (WALK_NEXT);
26550Sstevel@tonic-gate corrupt:
26560Sstevel@tonic-gate 	umv->umv_corruption++;
26570Sstevel@tonic-gate 	return (WALK_NEXT);
26580Sstevel@tonic-gate }
26590Sstevel@tonic-gate 
26600Sstevel@tonic-gate /*ARGSUSED2*/
26610Sstevel@tonic-gate int
umem_verify(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)26620Sstevel@tonic-gate umem_verify(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
26630Sstevel@tonic-gate {
26640Sstevel@tonic-gate 	if (flags & DCMD_ADDRSPEC) {
26650Sstevel@tonic-gate 		int check_alloc = 0, check_free = 0;
26660Sstevel@tonic-gate 		umem_verify_t umv;
26670Sstevel@tonic-gate 
26680Sstevel@tonic-gate 		if (mdb_vread(&umv.umv_cache, sizeof (umv.umv_cache),
26690Sstevel@tonic-gate 		    addr) == -1) {
26700Sstevel@tonic-gate 			mdb_warn("couldn't read umem_cache %p", addr);
26710Sstevel@tonic-gate 			return (DCMD_ERR);
26720Sstevel@tonic-gate 		}
26730Sstevel@tonic-gate 
26740Sstevel@tonic-gate 		umv.umv_size = umv.umv_cache.cache_buftag +
26750Sstevel@tonic-gate 		    sizeof (umem_buftag_t);
26760Sstevel@tonic-gate 		umv.umv_buf = mdb_alloc(umv.umv_size, UM_SLEEP | UM_GC);
26770Sstevel@tonic-gate 		umv.umv_corruption = 0;
26780Sstevel@tonic-gate 
26790Sstevel@tonic-gate 		if ((umv.umv_cache.cache_flags & UMF_REDZONE)) {
26800Sstevel@tonic-gate 			check_alloc = 1;
26810Sstevel@tonic-gate 			if (umv.umv_cache.cache_flags & UMF_DEADBEEF)
26820Sstevel@tonic-gate 				check_free = 1;
26830Sstevel@tonic-gate 		} else {
26840Sstevel@tonic-gate 			if (!(flags & DCMD_LOOP)) {
26850Sstevel@tonic-gate 				mdb_warn("cache %p (%s) does not have "
26860Sstevel@tonic-gate 				    "redzone checking enabled\n", addr,
26870Sstevel@tonic-gate 				    umv.umv_cache.cache_name);
26880Sstevel@tonic-gate 			}
26890Sstevel@tonic-gate 			return (DCMD_ERR);
26900Sstevel@tonic-gate 		}
26910Sstevel@tonic-gate 
26920Sstevel@tonic-gate 		if (flags & DCMD_LOOP) {
26930Sstevel@tonic-gate 			/*
26940Sstevel@tonic-gate 			 * table mode, don't print out every corrupt buffer
26950Sstevel@tonic-gate 			 */
26960Sstevel@tonic-gate 			umv.umv_besilent = 1;
26970Sstevel@tonic-gate 		} else {
26980Sstevel@tonic-gate 			mdb_printf("Summary for cache '%s'\n",
26990Sstevel@tonic-gate 			    umv.umv_cache.cache_name);
27000Sstevel@tonic-gate 			mdb_inc_indent(2);
27010Sstevel@tonic-gate 			umv.umv_besilent = 0;
27020Sstevel@tonic-gate 		}
27030Sstevel@tonic-gate 
27040Sstevel@tonic-gate 		if (check_alloc)
27050Sstevel@tonic-gate 			(void) mdb_pwalk("umem", verify_alloc, &umv, addr);
27060Sstevel@tonic-gate 		if (check_free)
27070Sstevel@tonic-gate 			(void) mdb_pwalk("freemem", verify_free, &umv, addr);
27080Sstevel@tonic-gate 
27090Sstevel@tonic-gate 		if (flags & DCMD_LOOP) {
27100Sstevel@tonic-gate 			if (umv.umv_corruption == 0) {
27110Sstevel@tonic-gate 				mdb_printf("%-*s %?p clean\n",
27120Sstevel@tonic-gate 				    UMEM_CACHE_NAMELEN,
27130Sstevel@tonic-gate 				    umv.umv_cache.cache_name, addr);
27140Sstevel@tonic-gate 			} else {
27150Sstevel@tonic-gate 				char *s = "";	/* optional s in "buffer[s]" */
27160Sstevel@tonic-gate 				if (umv.umv_corruption > 1)
27170Sstevel@tonic-gate 					s = "s";
27180Sstevel@tonic-gate 
27190Sstevel@tonic-gate 				mdb_printf("%-*s %?p %d corrupt buffer%s\n",
27200Sstevel@tonic-gate 				    UMEM_CACHE_NAMELEN,
27210Sstevel@tonic-gate 				    umv.umv_cache.cache_name, addr,
27220Sstevel@tonic-gate 				    umv.umv_corruption, s);
27230Sstevel@tonic-gate 			}
27240Sstevel@tonic-gate 		} else {
27250Sstevel@tonic-gate 			/*
27260Sstevel@tonic-gate 			 * This is the more verbose mode, when the user has
27270Sstevel@tonic-gate 			 * type addr::umem_verify.  If the cache was clean,
27280Sstevel@tonic-gate 			 * nothing will have yet been printed. So say something.
27290Sstevel@tonic-gate 			 */
27300Sstevel@tonic-gate 			if (umv.umv_corruption == 0)
27310Sstevel@tonic-gate 				mdb_printf("clean\n");
27320Sstevel@tonic-gate 
27330Sstevel@tonic-gate 			mdb_dec_indent(2);
27340Sstevel@tonic-gate 		}
27350Sstevel@tonic-gate 	} else {
27360Sstevel@tonic-gate 		/*
27370Sstevel@tonic-gate 		 * If the user didn't specify a cache to verify, we'll walk all
27380Sstevel@tonic-gate 		 * umem_cache's, specifying ourself as a callback for each...
27390Sstevel@tonic-gate 		 * this is the equivalent of '::walk umem_cache .::umem_verify'
27400Sstevel@tonic-gate 		 */
27410Sstevel@tonic-gate 		mdb_printf("%<u>%-*s %-?s %-20s%</b>\n", UMEM_CACHE_NAMELEN,
27420Sstevel@tonic-gate 		    "Cache Name", "Addr", "Cache Integrity");
27430Sstevel@tonic-gate 		(void) (mdb_walk_dcmd("umem_cache", "umem_verify", 0, NULL));
27440Sstevel@tonic-gate 	}
27450Sstevel@tonic-gate 
27460Sstevel@tonic-gate 	return (DCMD_OK);
27470Sstevel@tonic-gate }
27480Sstevel@tonic-gate 
27490Sstevel@tonic-gate typedef struct vmem_node {
27500Sstevel@tonic-gate 	struct vmem_node *vn_next;
27510Sstevel@tonic-gate 	struct vmem_node *vn_parent;
27520Sstevel@tonic-gate 	struct vmem_node *vn_sibling;
27530Sstevel@tonic-gate 	struct vmem_node *vn_children;
27540Sstevel@tonic-gate 	uintptr_t vn_addr;
27550Sstevel@tonic-gate 	int vn_marked;
27560Sstevel@tonic-gate 	vmem_t vn_vmem;
27570Sstevel@tonic-gate } vmem_node_t;
27580Sstevel@tonic-gate 
27590Sstevel@tonic-gate typedef struct vmem_walk {
27600Sstevel@tonic-gate 	vmem_node_t *vw_root;
27610Sstevel@tonic-gate 	vmem_node_t *vw_current;
27620Sstevel@tonic-gate } vmem_walk_t;
27630Sstevel@tonic-gate 
27640Sstevel@tonic-gate int
vmem_walk_init(mdb_walk_state_t * wsp)27650Sstevel@tonic-gate vmem_walk_init(mdb_walk_state_t *wsp)
27660Sstevel@tonic-gate {
27670Sstevel@tonic-gate 	uintptr_t vaddr, paddr;
27680Sstevel@tonic-gate 	vmem_node_t *head = NULL, *root = NULL, *current = NULL, *parent, *vp;
27690Sstevel@tonic-gate 	vmem_walk_t *vw;
27700Sstevel@tonic-gate 
27710Sstevel@tonic-gate 	if (umem_readvar(&vaddr, "vmem_list") == -1) {
27720Sstevel@tonic-gate 		mdb_warn("couldn't read 'vmem_list'");
27730Sstevel@tonic-gate 		return (WALK_ERR);
27740Sstevel@tonic-gate 	}
27750Sstevel@tonic-gate 
27760Sstevel@tonic-gate 	while (vaddr != NULL) {
27770Sstevel@tonic-gate 		vp = mdb_zalloc(sizeof (vmem_node_t), UM_SLEEP);
27780Sstevel@tonic-gate 		vp->vn_addr = vaddr;
27790Sstevel@tonic-gate 		vp->vn_next = head;
27800Sstevel@tonic-gate 		head = vp;
27810Sstevel@tonic-gate 
27820Sstevel@tonic-gate 		if (vaddr == wsp->walk_addr)
27830Sstevel@tonic-gate 			current = vp;
27840Sstevel@tonic-gate 
27850Sstevel@tonic-gate 		if (mdb_vread(&vp->vn_vmem, sizeof (vmem_t), vaddr) == -1) {
27860Sstevel@tonic-gate 			mdb_warn("couldn't read vmem_t at %p", vaddr);
27870Sstevel@tonic-gate 			goto err;
27880Sstevel@tonic-gate 		}
27890Sstevel@tonic-gate 
27900Sstevel@tonic-gate 		vaddr = (uintptr_t)vp->vn_vmem.vm_next;
27910Sstevel@tonic-gate 	}
27920Sstevel@tonic-gate 
27930Sstevel@tonic-gate 	for (vp = head; vp != NULL; vp = vp->vn_next) {
27940Sstevel@tonic-gate 
27950Sstevel@tonic-gate 		if ((paddr = (uintptr_t)vp->vn_vmem.vm_source) == NULL) {
27960Sstevel@tonic-gate 			vp->vn_sibling = root;
27970Sstevel@tonic-gate 			root = vp;
27980Sstevel@tonic-gate 			continue;
27990Sstevel@tonic-gate 		}
28000Sstevel@tonic-gate 
28010Sstevel@tonic-gate 		for (parent = head; parent != NULL; parent = parent->vn_next) {
28020Sstevel@tonic-gate 			if (parent->vn_addr != paddr)
28030Sstevel@tonic-gate 				continue;
28040Sstevel@tonic-gate 			vp->vn_sibling = parent->vn_children;
28050Sstevel@tonic-gate 			parent->vn_children = vp;
28060Sstevel@tonic-gate 			vp->vn_parent = parent;
28070Sstevel@tonic-gate 			break;
28080Sstevel@tonic-gate 		}
28090Sstevel@tonic-gate 
28100Sstevel@tonic-gate 		if (parent == NULL) {
28110Sstevel@tonic-gate 			mdb_warn("couldn't find %p's parent (%p)\n",
28120Sstevel@tonic-gate 			    vp->vn_addr, paddr);
28130Sstevel@tonic-gate 			goto err;
28140Sstevel@tonic-gate 		}
28150Sstevel@tonic-gate 	}
28160Sstevel@tonic-gate 
28170Sstevel@tonic-gate 	vw = mdb_zalloc(sizeof (vmem_walk_t), UM_SLEEP);
28180Sstevel@tonic-gate 	vw->vw_root = root;
28190Sstevel@tonic-gate 
28200Sstevel@tonic-gate 	if (current != NULL)
28210Sstevel@tonic-gate 		vw->vw_current = current;
28220Sstevel@tonic-gate 	else
28230Sstevel@tonic-gate 		vw->vw_current = root;
28240Sstevel@tonic-gate 
28250Sstevel@tonic-gate 	wsp->walk_data = vw;
28260Sstevel@tonic-gate 	return (WALK_NEXT);
28270Sstevel@tonic-gate err:
28280Sstevel@tonic-gate 	for (vp = head; head != NULL; vp = head) {
28290Sstevel@tonic-gate 		head = vp->vn_next;
28300Sstevel@tonic-gate 		mdb_free(vp, sizeof (vmem_node_t));
28310Sstevel@tonic-gate 	}
28320Sstevel@tonic-gate 
28330Sstevel@tonic-gate 	return (WALK_ERR);
28340Sstevel@tonic-gate }
28350Sstevel@tonic-gate 
28360Sstevel@tonic-gate int
vmem_walk_step(mdb_walk_state_t * wsp)28370Sstevel@tonic-gate vmem_walk_step(mdb_walk_state_t *wsp)
28380Sstevel@tonic-gate {
28390Sstevel@tonic-gate 	vmem_walk_t *vw = wsp->walk_data;
28400Sstevel@tonic-gate 	vmem_node_t *vp;
28410Sstevel@tonic-gate 	int rval;
28420Sstevel@tonic-gate 
28430Sstevel@tonic-gate 	if ((vp = vw->vw_current) == NULL)
28440Sstevel@tonic-gate 		return (WALK_DONE);
28450Sstevel@tonic-gate 
28460Sstevel@tonic-gate 	rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata);
28470Sstevel@tonic-gate 
28480Sstevel@tonic-gate 	if (vp->vn_children != NULL) {
28490Sstevel@tonic-gate 		vw->vw_current = vp->vn_children;
28500Sstevel@tonic-gate 		return (rval);
28510Sstevel@tonic-gate 	}
28520Sstevel@tonic-gate 
28530Sstevel@tonic-gate 	do {
28540Sstevel@tonic-gate 		vw->vw_current = vp->vn_sibling;
28550Sstevel@tonic-gate 		vp = vp->vn_parent;
28560Sstevel@tonic-gate 	} while (vw->vw_current == NULL && vp != NULL);
28570Sstevel@tonic-gate 
28580Sstevel@tonic-gate 	return (rval);
28590Sstevel@tonic-gate }
28600Sstevel@tonic-gate 
28610Sstevel@tonic-gate /*
28620Sstevel@tonic-gate  * The "vmem_postfix" walk walks the vmem arenas in post-fix order; all
28630Sstevel@tonic-gate  * children are visited before their parent.  We perform the postfix walk
28640Sstevel@tonic-gate  * iteratively (rather than recursively) to allow mdb to regain control
28650Sstevel@tonic-gate  * after each callback.
28660Sstevel@tonic-gate  */
28670Sstevel@tonic-gate int
vmem_postfix_walk_step(mdb_walk_state_t * wsp)28680Sstevel@tonic-gate vmem_postfix_walk_step(mdb_walk_state_t *wsp)
28690Sstevel@tonic-gate {
28700Sstevel@tonic-gate 	vmem_walk_t *vw = wsp->walk_data;
28710Sstevel@tonic-gate 	vmem_node_t *vp = vw->vw_current;
28720Sstevel@tonic-gate 	int rval;
28730Sstevel@tonic-gate 
28740Sstevel@tonic-gate 	/*
28750Sstevel@tonic-gate 	 * If this node is marked, then we know that we have already visited
28760Sstevel@tonic-gate 	 * all of its children.  If the node has any siblings, they need to
28770Sstevel@tonic-gate 	 * be visited next; otherwise, we need to visit the parent.  Note
28780Sstevel@tonic-gate 	 * that vp->vn_marked will only be zero on the first invocation of
28790Sstevel@tonic-gate 	 * the step function.
28800Sstevel@tonic-gate 	 */
28810Sstevel@tonic-gate 	if (vp->vn_marked) {
28820Sstevel@tonic-gate 		if (vp->vn_sibling != NULL)
28830Sstevel@tonic-gate 			vp = vp->vn_sibling;
28840Sstevel@tonic-gate 		else if (vp->vn_parent != NULL)
28850Sstevel@tonic-gate 			vp = vp->vn_parent;
28860Sstevel@tonic-gate 		else {
28870Sstevel@tonic-gate 			/*
28880Sstevel@tonic-gate 			 * We have neither a parent, nor a sibling, and we
28890Sstevel@tonic-gate 			 * have already been visited; we're done.
28900Sstevel@tonic-gate 			 */
28910Sstevel@tonic-gate 			return (WALK_DONE);
28920Sstevel@tonic-gate 		}
28930Sstevel@tonic-gate 	}
28940Sstevel@tonic-gate 
28950Sstevel@tonic-gate 	/*
28960Sstevel@tonic-gate 	 * Before we visit this node, visit its children.
28970Sstevel@tonic-gate 	 */
28980Sstevel@tonic-gate 	while (vp->vn_children != NULL && !vp->vn_children->vn_marked)
28990Sstevel@tonic-gate 		vp = vp->vn_children;
29000Sstevel@tonic-gate 
29010Sstevel@tonic-gate 	vp->vn_marked = 1;
29020Sstevel@tonic-gate 	vw->vw_current = vp;
29030Sstevel@tonic-gate 	rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata);
29040Sstevel@tonic-gate 
29050Sstevel@tonic-gate 	return (rval);
29060Sstevel@tonic-gate }
29070Sstevel@tonic-gate 
29080Sstevel@tonic-gate void
vmem_walk_fini(mdb_walk_state_t * wsp)29090Sstevel@tonic-gate vmem_walk_fini(mdb_walk_state_t *wsp)
29100Sstevel@tonic-gate {
29110Sstevel@tonic-gate 	vmem_walk_t *vw = wsp->walk_data;
29120Sstevel@tonic-gate 	vmem_node_t *root = vw->vw_root;
29130Sstevel@tonic-gate 	int done;
29140Sstevel@tonic-gate 
29150Sstevel@tonic-gate 	if (root == NULL)
29160Sstevel@tonic-gate 		return;
29170Sstevel@tonic-gate 
29180Sstevel@tonic-gate 	if ((vw->vw_root = root->vn_children) != NULL)
29190Sstevel@tonic-gate 		vmem_walk_fini(wsp);
29200Sstevel@tonic-gate 
29210Sstevel@tonic-gate 	vw->vw_root = root->vn_sibling;
29220Sstevel@tonic-gate 	done = (root->vn_sibling == NULL && root->vn_parent == NULL);
29230Sstevel@tonic-gate 	mdb_free(root, sizeof (vmem_node_t));
29240Sstevel@tonic-gate 
29250Sstevel@tonic-gate 	if (done) {
29260Sstevel@tonic-gate 		mdb_free(vw, sizeof (vmem_walk_t));
29270Sstevel@tonic-gate 	} else {
29280Sstevel@tonic-gate 		vmem_walk_fini(wsp);
29290Sstevel@tonic-gate 	}
29300Sstevel@tonic-gate }
29310Sstevel@tonic-gate 
29320Sstevel@tonic-gate typedef struct vmem_seg_walk {
29330Sstevel@tonic-gate 	uint8_t vsw_type;
29340Sstevel@tonic-gate 	uintptr_t vsw_start;
29350Sstevel@tonic-gate 	uintptr_t vsw_current;
29360Sstevel@tonic-gate } vmem_seg_walk_t;
29370Sstevel@tonic-gate 
29380Sstevel@tonic-gate /*ARGSUSED*/
29390Sstevel@tonic-gate int
vmem_seg_walk_common_init(mdb_walk_state_t * wsp,uint8_t type,char * name)29400Sstevel@tonic-gate vmem_seg_walk_common_init(mdb_walk_state_t *wsp, uint8_t type, char *name)
29410Sstevel@tonic-gate {
29420Sstevel@tonic-gate 	vmem_seg_walk_t *vsw;
29430Sstevel@tonic-gate 
29440Sstevel@tonic-gate 	if (wsp->walk_addr == NULL) {
29450Sstevel@tonic-gate 		mdb_warn("vmem_%s does not support global walks\n", name);
29460Sstevel@tonic-gate 		return (WALK_ERR);
29470Sstevel@tonic-gate 	}
29480Sstevel@tonic-gate 
29490Sstevel@tonic-gate 	wsp->walk_data = vsw = mdb_alloc(sizeof (vmem_seg_walk_t), UM_SLEEP);
29500Sstevel@tonic-gate 
29510Sstevel@tonic-gate 	vsw->vsw_type = type;
29520Sstevel@tonic-gate 	vsw->vsw_start = wsp->walk_addr + OFFSETOF(vmem_t, vm_seg0);
29530Sstevel@tonic-gate 	vsw->vsw_current = vsw->vsw_start;
29540Sstevel@tonic-gate 
29550Sstevel@tonic-gate 	return (WALK_NEXT);
29560Sstevel@tonic-gate }
29570Sstevel@tonic-gate 
29580Sstevel@tonic-gate /*
29590Sstevel@tonic-gate  * vmem segments can't have type 0 (this should be added to vmem_impl.h).
29600Sstevel@tonic-gate  */
29610Sstevel@tonic-gate #define	VMEM_NONE	0
29620Sstevel@tonic-gate 
29630Sstevel@tonic-gate int
vmem_alloc_walk_init(mdb_walk_state_t * wsp)29640Sstevel@tonic-gate vmem_alloc_walk_init(mdb_walk_state_t *wsp)
29650Sstevel@tonic-gate {
29660Sstevel@tonic-gate 	return (vmem_seg_walk_common_init(wsp, VMEM_ALLOC, "alloc"));
29670Sstevel@tonic-gate }
29680Sstevel@tonic-gate 
29690Sstevel@tonic-gate int
vmem_free_walk_init(mdb_walk_state_t * wsp)29700Sstevel@tonic-gate vmem_free_walk_init(mdb_walk_state_t *wsp)
29710Sstevel@tonic-gate {
29720Sstevel@tonic-gate 	return (vmem_seg_walk_common_init(wsp, VMEM_FREE, "free"));
29730Sstevel@tonic-gate }
29740Sstevel@tonic-gate 
29750Sstevel@tonic-gate int
vmem_span_walk_init(mdb_walk_state_t * wsp)29760Sstevel@tonic-gate vmem_span_walk_init(mdb_walk_state_t *wsp)
29770Sstevel@tonic-gate {
29780Sstevel@tonic-gate 	return (vmem_seg_walk_common_init(wsp, VMEM_SPAN, "span"));
29790Sstevel@tonic-gate }
29800Sstevel@tonic-gate 
29810Sstevel@tonic-gate int
vmem_seg_walk_init(mdb_walk_state_t * wsp)29820Sstevel@tonic-gate vmem_seg_walk_init(mdb_walk_state_t *wsp)
29830Sstevel@tonic-gate {
29840Sstevel@tonic-gate 	return (vmem_seg_walk_common_init(wsp, VMEM_NONE, "seg"));
29850Sstevel@tonic-gate }
29860Sstevel@tonic-gate 
29870Sstevel@tonic-gate int
vmem_seg_walk_step(mdb_walk_state_t * wsp)29880Sstevel@tonic-gate vmem_seg_walk_step(mdb_walk_state_t *wsp)
29890Sstevel@tonic-gate {
29900Sstevel@tonic-gate 	vmem_seg_t seg;
29910Sstevel@tonic-gate 	vmem_seg_walk_t *vsw = wsp->walk_data;
29920Sstevel@tonic-gate 	uintptr_t addr = vsw->vsw_current;
29930Sstevel@tonic-gate 	static size_t seg_size = 0;
29940Sstevel@tonic-gate 	int rval;
29950Sstevel@tonic-gate 
29960Sstevel@tonic-gate 	if (!seg_size) {
29970Sstevel@tonic-gate 		if (umem_readvar(&seg_size, "vmem_seg_size") == -1) {
29980Sstevel@tonic-gate 			mdb_warn("failed to read 'vmem_seg_size'");
29990Sstevel@tonic-gate 			seg_size = sizeof (vmem_seg_t);
30000Sstevel@tonic-gate 		}
30010Sstevel@tonic-gate 	}
30020Sstevel@tonic-gate 
30030Sstevel@tonic-gate 	if (seg_size < sizeof (seg))
30040Sstevel@tonic-gate 		bzero((caddr_t)&seg + seg_size, sizeof (seg) - seg_size);
30050Sstevel@tonic-gate 
30060Sstevel@tonic-gate 	if (mdb_vread(&seg, seg_size, addr) == -1) {
30070Sstevel@tonic-gate 		mdb_warn("couldn't read vmem_seg at %p", addr);
30080Sstevel@tonic-gate 		return (WALK_ERR);
30090Sstevel@tonic-gate 	}
30100Sstevel@tonic-gate 
30110Sstevel@tonic-gate 	vsw->vsw_current = (uintptr_t)seg.vs_anext;
30120Sstevel@tonic-gate 	if (vsw->vsw_type != VMEM_NONE && seg.vs_type != vsw->vsw_type) {
30130Sstevel@tonic-gate 		rval = WALK_NEXT;
30140Sstevel@tonic-gate 	} else {
30150Sstevel@tonic-gate 		rval = wsp->walk_callback(addr, &seg, wsp->walk_cbdata);
30160Sstevel@tonic-gate 	}
30170Sstevel@tonic-gate 
30180Sstevel@tonic-gate 	if (vsw->vsw_current == vsw->vsw_start)
30190Sstevel@tonic-gate 		return (WALK_DONE);
30200Sstevel@tonic-gate 
30210Sstevel@tonic-gate 	return (rval);
30220Sstevel@tonic-gate }
30230Sstevel@tonic-gate 
30240Sstevel@tonic-gate void
vmem_seg_walk_fini(mdb_walk_state_t * wsp)30250Sstevel@tonic-gate vmem_seg_walk_fini(mdb_walk_state_t *wsp)
30260Sstevel@tonic-gate {
30270Sstevel@tonic-gate 	vmem_seg_walk_t *vsw = wsp->walk_data;
30280Sstevel@tonic-gate 
30290Sstevel@tonic-gate 	mdb_free(vsw, sizeof (vmem_seg_walk_t));
30300Sstevel@tonic-gate }
30310Sstevel@tonic-gate 
30320Sstevel@tonic-gate #define	VMEM_NAMEWIDTH	22
30330Sstevel@tonic-gate 
30340Sstevel@tonic-gate int
vmem(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)30350Sstevel@tonic-gate vmem(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
30360Sstevel@tonic-gate {
30370Sstevel@tonic-gate 	vmem_t v, parent;
30380Sstevel@tonic-gate 	uintptr_t paddr;
30390Sstevel@tonic-gate 	int ident = 0;
30400Sstevel@tonic-gate 	char c[VMEM_NAMEWIDTH];
30410Sstevel@tonic-gate 
30420Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC)) {
30430Sstevel@tonic-gate 		if (mdb_walk_dcmd("vmem", "vmem", argc, argv) == -1) {
30440Sstevel@tonic-gate 			mdb_warn("can't walk vmem");
30450Sstevel@tonic-gate 			return (DCMD_ERR);
30460Sstevel@tonic-gate 		}
30470Sstevel@tonic-gate 		return (DCMD_OK);
30480Sstevel@tonic-gate 	}
30490Sstevel@tonic-gate 
30500Sstevel@tonic-gate 	if (DCMD_HDRSPEC(flags))
30510Sstevel@tonic-gate 		mdb_printf("%-?s %-*s %10s %12s %9s %5s\n",
30520Sstevel@tonic-gate 		    "ADDR", VMEM_NAMEWIDTH, "NAME", "INUSE",
30530Sstevel@tonic-gate 		    "TOTAL", "SUCCEED", "FAIL");
30540Sstevel@tonic-gate 
30550Sstevel@tonic-gate 	if (mdb_vread(&v, sizeof (v), addr) == -1) {
30560Sstevel@tonic-gate 		mdb_warn("couldn't read vmem at %p", addr);
30570Sstevel@tonic-gate 		return (DCMD_ERR);
30580Sstevel@tonic-gate 	}
30590Sstevel@tonic-gate 
30600Sstevel@tonic-gate 	for (paddr = (uintptr_t)v.vm_source; paddr != NULL; ident += 2) {
30610Sstevel@tonic-gate 		if (mdb_vread(&parent, sizeof (parent), paddr) == -1) {
30620Sstevel@tonic-gate 			mdb_warn("couldn't trace %p's ancestry", addr);
30630Sstevel@tonic-gate 			ident = 0;
30640Sstevel@tonic-gate 			break;
30650Sstevel@tonic-gate 		}
30660Sstevel@tonic-gate 		paddr = (uintptr_t)parent.vm_source;
30670Sstevel@tonic-gate 	}
30680Sstevel@tonic-gate 
30690Sstevel@tonic-gate 	(void) mdb_snprintf(c, VMEM_NAMEWIDTH, "%*s%s", ident, "", v.vm_name);
30700Sstevel@tonic-gate 
30710Sstevel@tonic-gate 	mdb_printf("%0?p %-*s %10llu %12llu %9llu %5llu\n",
30720Sstevel@tonic-gate 	    addr, VMEM_NAMEWIDTH, c,
30730Sstevel@tonic-gate 	    v.vm_kstat.vk_mem_inuse, v.vm_kstat.vk_mem_total,
30740Sstevel@tonic-gate 	    v.vm_kstat.vk_alloc, v.vm_kstat.vk_fail);
30750Sstevel@tonic-gate 
30760Sstevel@tonic-gate 	return (DCMD_OK);
30770Sstevel@tonic-gate }
30780Sstevel@tonic-gate 
30790Sstevel@tonic-gate void
vmem_seg_help(void)30800Sstevel@tonic-gate vmem_seg_help(void)
30810Sstevel@tonic-gate {
30820Sstevel@tonic-gate 	mdb_printf("%s\n",
30830Sstevel@tonic-gate "Display the contents of vmem_seg_ts, with optional filtering.\n"
30840Sstevel@tonic-gate "\n"
30850Sstevel@tonic-gate "A vmem_seg_t represents a range of addresses (or arbitrary numbers),\n"
30860Sstevel@tonic-gate "representing a single chunk of data.  Only ALLOC segments have debugging\n"
30870Sstevel@tonic-gate "information.\n");
30880Sstevel@tonic-gate 	mdb_dec_indent(2);
30890Sstevel@tonic-gate 	mdb_printf("%<b>OPTIONS%</b>\n");
30900Sstevel@tonic-gate 	mdb_inc_indent(2);
30910Sstevel@tonic-gate 	mdb_printf("%s",
30920Sstevel@tonic-gate "  -v    Display the full content of the vmem_seg, including its stack trace\n"
30930Sstevel@tonic-gate "  -s    report the size of the segment, instead of the end address\n"
30940Sstevel@tonic-gate "  -c caller\n"
30950Sstevel@tonic-gate "        filter out segments without the function/PC in their stack trace\n"
30960Sstevel@tonic-gate "  -e earliest\n"
30970Sstevel@tonic-gate "        filter out segments timestamped before earliest\n"
30980Sstevel@tonic-gate "  -l latest\n"
30990Sstevel@tonic-gate "        filter out segments timestamped after latest\n"
31000Sstevel@tonic-gate "  -m minsize\n"
31010Sstevel@tonic-gate "        filer out segments smaller than minsize\n"
31020Sstevel@tonic-gate "  -M maxsize\n"
31030Sstevel@tonic-gate "        filer out segments larger than maxsize\n"
31040Sstevel@tonic-gate "  -t thread\n"
31050Sstevel@tonic-gate "        filter out segments not involving thread\n"
31060Sstevel@tonic-gate "  -T type\n"
31070Sstevel@tonic-gate "        filter out segments not of type 'type'\n"
31080Sstevel@tonic-gate "        type is one of: ALLOC/FREE/SPAN/ROTOR/WALKER\n");
31090Sstevel@tonic-gate }
31100Sstevel@tonic-gate 
31110Sstevel@tonic-gate 
31120Sstevel@tonic-gate /*ARGSUSED*/
31130Sstevel@tonic-gate int
vmem_seg(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)31140Sstevel@tonic-gate vmem_seg(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
31150Sstevel@tonic-gate {
31160Sstevel@tonic-gate 	vmem_seg_t vs;
31170Sstevel@tonic-gate 	uintptr_t *stk = vs.vs_stack;
31180Sstevel@tonic-gate 	uintptr_t sz;
31190Sstevel@tonic-gate 	uint8_t t;
31200Sstevel@tonic-gate 	const char *type = NULL;
31210Sstevel@tonic-gate 	GElf_Sym sym;
31220Sstevel@tonic-gate 	char c[MDB_SYM_NAMLEN];
31230Sstevel@tonic-gate 	int no_debug;
31240Sstevel@tonic-gate 	int i;
31250Sstevel@tonic-gate 	int depth;
31260Sstevel@tonic-gate 	uintptr_t laddr, haddr;
31270Sstevel@tonic-gate 
31280Sstevel@tonic-gate 	uintptr_t caller = NULL, thread = NULL;
31290Sstevel@tonic-gate 	uintptr_t minsize = 0, maxsize = 0;
31300Sstevel@tonic-gate 
31310Sstevel@tonic-gate 	hrtime_t earliest = 0, latest = 0;
31320Sstevel@tonic-gate 
31330Sstevel@tonic-gate 	uint_t size = 0;
31340Sstevel@tonic-gate 	uint_t verbose = 0;
31350Sstevel@tonic-gate 
31360Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC))
31370Sstevel@tonic-gate 		return (DCMD_USAGE);
31380Sstevel@tonic-gate 
31390Sstevel@tonic-gate 	if (mdb_getopts(argc, argv,
31400Sstevel@tonic-gate 	    'c', MDB_OPT_UINTPTR, &caller,
31410Sstevel@tonic-gate 	    'e', MDB_OPT_UINT64, &earliest,
31420Sstevel@tonic-gate 	    'l', MDB_OPT_UINT64, &latest,
31430Sstevel@tonic-gate 	    's', MDB_OPT_SETBITS, TRUE, &size,
31440Sstevel@tonic-gate 	    'm', MDB_OPT_UINTPTR, &minsize,
31450Sstevel@tonic-gate 	    'M', MDB_OPT_UINTPTR, &maxsize,
31460Sstevel@tonic-gate 	    't', MDB_OPT_UINTPTR, &thread,
31470Sstevel@tonic-gate 	    'T', MDB_OPT_STR, &type,
31480Sstevel@tonic-gate 	    'v', MDB_OPT_SETBITS, TRUE, &verbose,
31490Sstevel@tonic-gate 	    NULL) != argc)
31500Sstevel@tonic-gate 		return (DCMD_USAGE);
31510Sstevel@tonic-gate 
31520Sstevel@tonic-gate 	if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) {
31530Sstevel@tonic-gate 		if (verbose) {
31540Sstevel@tonic-gate 			mdb_printf("%16s %4s %16s %16s %16s\n"
31550Sstevel@tonic-gate 			    "%<u>%16s %4s %16s %16s %16s%</u>\n",
31560Sstevel@tonic-gate 			    "ADDR", "TYPE", "START", "END", "SIZE",
31570Sstevel@tonic-gate 			    "", "", "THREAD", "TIMESTAMP", "");
31580Sstevel@tonic-gate 		} else {
31590Sstevel@tonic-gate 			mdb_printf("%?s %4s %?s %?s %s\n", "ADDR", "TYPE",
31600Sstevel@tonic-gate 			    "START", size? "SIZE" : "END", "WHO");
31610Sstevel@tonic-gate 		}
31620Sstevel@tonic-gate 	}
31630Sstevel@tonic-gate 
31640Sstevel@tonic-gate 	if (mdb_vread(&vs, sizeof (vs), addr) == -1) {
31650Sstevel@tonic-gate 		mdb_warn("couldn't read vmem_seg at %p", addr);
31660Sstevel@tonic-gate 		return (DCMD_ERR);
31670Sstevel@tonic-gate 	}
31680Sstevel@tonic-gate 
31690Sstevel@tonic-gate 	if (type != NULL) {
31700Sstevel@tonic-gate 		if (strcmp(type, "ALLC") == 0 || strcmp(type, "ALLOC") == 0)
31710Sstevel@tonic-gate 			t = VMEM_ALLOC;
31720Sstevel@tonic-gate 		else if (strcmp(type, "FREE") == 0)
31730Sstevel@tonic-gate 			t = VMEM_FREE;
31740Sstevel@tonic-gate 		else if (strcmp(type, "SPAN") == 0)
31750Sstevel@tonic-gate 			t = VMEM_SPAN;
31760Sstevel@tonic-gate 		else if (strcmp(type, "ROTR") == 0 ||
31770Sstevel@tonic-gate 		    strcmp(type, "ROTOR") == 0)
31780Sstevel@tonic-gate 			t = VMEM_ROTOR;
31790Sstevel@tonic-gate 		else if (strcmp(type, "WLKR") == 0 ||
31800Sstevel@tonic-gate 		    strcmp(type, "WALKER") == 0)
31810Sstevel@tonic-gate 			t = VMEM_WALKER;
31820Sstevel@tonic-gate 		else {
31830Sstevel@tonic-gate 			mdb_warn("\"%s\" is not a recognized vmem_seg type\n",
31840Sstevel@tonic-gate 			    type);
31850Sstevel@tonic-gate 			return (DCMD_ERR);
31860Sstevel@tonic-gate 		}
31870Sstevel@tonic-gate 
31880Sstevel@tonic-gate 		if (vs.vs_type != t)
31890Sstevel@tonic-gate 			return (DCMD_OK);
31900Sstevel@tonic-gate 	}
31910Sstevel@tonic-gate 
31920Sstevel@tonic-gate 	sz = vs.vs_end - vs.vs_start;
31930Sstevel@tonic-gate 
31940Sstevel@tonic-gate 	if (minsize != 0 && sz < minsize)
31950Sstevel@tonic-gate 		return (DCMD_OK);
31960Sstevel@tonic-gate 
31970Sstevel@tonic-gate 	if (maxsize != 0 && sz > maxsize)
31980Sstevel@tonic-gate 		return (DCMD_OK);
31990Sstevel@tonic-gate 
32000Sstevel@tonic-gate 	t = vs.vs_type;
32010Sstevel@tonic-gate 	depth = vs.vs_depth;
32020Sstevel@tonic-gate 
32030Sstevel@tonic-gate 	/*
32040Sstevel@tonic-gate 	 * debug info, when present, is only accurate for VMEM_ALLOC segments
32050Sstevel@tonic-gate 	 */
32060Sstevel@tonic-gate 	no_debug = (t != VMEM_ALLOC) ||
32070Sstevel@tonic-gate 	    (depth == 0 || depth > VMEM_STACK_DEPTH);
32080Sstevel@tonic-gate 
32090Sstevel@tonic-gate 	if (no_debug) {
32100Sstevel@tonic-gate 		if (caller != NULL || thread != NULL || earliest != 0 ||
32110Sstevel@tonic-gate 		    latest != 0)
32120Sstevel@tonic-gate 			return (DCMD_OK);		/* not enough info */
32130Sstevel@tonic-gate 	} else {
32140Sstevel@tonic-gate 		if (caller != NULL) {
32150Sstevel@tonic-gate 			laddr = caller;
32160Sstevel@tonic-gate 			haddr = caller + sizeof (caller);
32170Sstevel@tonic-gate 
32180Sstevel@tonic-gate 			if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c,
32190Sstevel@tonic-gate 			    sizeof (c), &sym) != -1 &&
32200Sstevel@tonic-gate 			    caller == (uintptr_t)sym.st_value) {
32210Sstevel@tonic-gate 				/*
32220Sstevel@tonic-gate 				 * We were provided an exact symbol value; any
32230Sstevel@tonic-gate 				 * address in the function is valid.
32240Sstevel@tonic-gate 				 */
32250Sstevel@tonic-gate 				laddr = (uintptr_t)sym.st_value;
32260Sstevel@tonic-gate 				haddr = (uintptr_t)sym.st_value + sym.st_size;
32270Sstevel@tonic-gate 			}
32280Sstevel@tonic-gate 
32290Sstevel@tonic-gate 			for (i = 0; i < depth; i++)
32300Sstevel@tonic-gate 				if (vs.vs_stack[i] >= laddr &&
32310Sstevel@tonic-gate 				    vs.vs_stack[i] < haddr)
32320Sstevel@tonic-gate 					break;
32330Sstevel@tonic-gate 
32340Sstevel@tonic-gate 			if (i == depth)
32350Sstevel@tonic-gate 				return (DCMD_OK);
32360Sstevel@tonic-gate 		}
32370Sstevel@tonic-gate 
32380Sstevel@tonic-gate 		if (thread != NULL && (uintptr_t)vs.vs_thread != thread)
32390Sstevel@tonic-gate 			return (DCMD_OK);
32400Sstevel@tonic-gate 
32410Sstevel@tonic-gate 		if (earliest != 0 && vs.vs_timestamp < earliest)
32420Sstevel@tonic-gate 			return (DCMD_OK);
32430Sstevel@tonic-gate 
32440Sstevel@tonic-gate 		if (latest != 0 && vs.vs_timestamp > latest)
32450Sstevel@tonic-gate 			return (DCMD_OK);
32460Sstevel@tonic-gate 	}
32470Sstevel@tonic-gate 
32480Sstevel@tonic-gate 	type = (t == VMEM_ALLOC ? "ALLC" :
32490Sstevel@tonic-gate 	    t == VMEM_FREE ? "FREE" :
32500Sstevel@tonic-gate 	    t == VMEM_SPAN ? "SPAN" :
32510Sstevel@tonic-gate 	    t == VMEM_ROTOR ? "ROTR" :
32520Sstevel@tonic-gate 	    t == VMEM_WALKER ? "WLKR" :
32530Sstevel@tonic-gate 	    "????");
32540Sstevel@tonic-gate 
32550Sstevel@tonic-gate 	if (flags & DCMD_PIPE_OUT) {
32560Sstevel@tonic-gate 		mdb_printf("%#r\n", addr);
32570Sstevel@tonic-gate 		return (DCMD_OK);
32580Sstevel@tonic-gate 	}
32590Sstevel@tonic-gate 
32600Sstevel@tonic-gate 	if (verbose) {
32610Sstevel@tonic-gate 		mdb_printf("%<b>%16p%</b> %4s %16p %16p %16d\n",
32620Sstevel@tonic-gate 		    addr, type, vs.vs_start, vs.vs_end, sz);
32630Sstevel@tonic-gate 
32640Sstevel@tonic-gate 		if (no_debug)
32650Sstevel@tonic-gate 			return (DCMD_OK);
32660Sstevel@tonic-gate 
32670Sstevel@tonic-gate 		mdb_printf("%16s %4s %16d %16llx\n",
32680Sstevel@tonic-gate 		    "", "", vs.vs_thread, vs.vs_timestamp);
32690Sstevel@tonic-gate 
32700Sstevel@tonic-gate 		mdb_inc_indent(17);
32710Sstevel@tonic-gate 		for (i = 0; i < depth; i++) {
32720Sstevel@tonic-gate 			mdb_printf("%a\n", stk[i]);
32730Sstevel@tonic-gate 		}
32740Sstevel@tonic-gate 		mdb_dec_indent(17);
32750Sstevel@tonic-gate 		mdb_printf("\n");
32760Sstevel@tonic-gate 	} else {
32770Sstevel@tonic-gate 		mdb_printf("%0?p %4s %0?p %0?p", addr, type,
32780Sstevel@tonic-gate 		    vs.vs_start, size? sz : vs.vs_end);
32790Sstevel@tonic-gate 
32800Sstevel@tonic-gate 		if (no_debug) {
32810Sstevel@tonic-gate 			mdb_printf("\n");
32820Sstevel@tonic-gate 			return (DCMD_OK);
32830Sstevel@tonic-gate 		}
32840Sstevel@tonic-gate 
32850Sstevel@tonic-gate 		for (i = 0; i < depth; i++) {
32860Sstevel@tonic-gate 			if (mdb_lookup_by_addr(stk[i], MDB_SYM_FUZZY,
32870Sstevel@tonic-gate 			    c, sizeof (c), &sym) == -1)
32880Sstevel@tonic-gate 				continue;
32890Sstevel@tonic-gate 			if (is_umem_sym(c, "vmem_"))
32900Sstevel@tonic-gate 				continue;
32910Sstevel@tonic-gate 			break;
32920Sstevel@tonic-gate 		}
32930Sstevel@tonic-gate 		mdb_printf(" %a\n", stk[i]);
32940Sstevel@tonic-gate 	}
32950Sstevel@tonic-gate 	return (DCMD_OK);
32960Sstevel@tonic-gate }
32970Sstevel@tonic-gate 
32980Sstevel@tonic-gate /*ARGSUSED*/
32990Sstevel@tonic-gate static int
showbc(uintptr_t addr,const umem_bufctl_audit_t * bcp,hrtime_t * newest)33000Sstevel@tonic-gate showbc(uintptr_t addr, const umem_bufctl_audit_t *bcp, hrtime_t *newest)
33010Sstevel@tonic-gate {
33020Sstevel@tonic-gate 	char name[UMEM_CACHE_NAMELEN + 1];
33030Sstevel@tonic-gate 	hrtime_t delta;
33040Sstevel@tonic-gate 	int i, depth;
33050Sstevel@tonic-gate 
33060Sstevel@tonic-gate 	if (bcp->bc_timestamp == 0)
33070Sstevel@tonic-gate 		return (WALK_DONE);
33080Sstevel@tonic-gate 
33090Sstevel@tonic-gate 	if (*newest == 0)
33100Sstevel@tonic-gate 		*newest = bcp->bc_timestamp;
33110Sstevel@tonic-gate 
33120Sstevel@tonic-gate 	delta = *newest - bcp->bc_timestamp;
33130Sstevel@tonic-gate 	depth = MIN(bcp->bc_depth, umem_stack_depth);
33140Sstevel@tonic-gate 
33150Sstevel@tonic-gate 	if (mdb_readstr(name, sizeof (name), (uintptr_t)
33160Sstevel@tonic-gate 	    &bcp->bc_cache->cache_name) <= 0)
33170Sstevel@tonic-gate 		(void) mdb_snprintf(name, sizeof (name), "%a", bcp->bc_cache);
33180Sstevel@tonic-gate 
33190Sstevel@tonic-gate 	mdb_printf("\nT-%lld.%09lld  addr=%p  %s\n",
33200Sstevel@tonic-gate 	    delta / NANOSEC, delta % NANOSEC, bcp->bc_addr, name);
33210Sstevel@tonic-gate 
33220Sstevel@tonic-gate 	for (i = 0; i < depth; i++)
33230Sstevel@tonic-gate 		mdb_printf("\t %a\n", bcp->bc_stack[i]);
33240Sstevel@tonic-gate 
33250Sstevel@tonic-gate 	return (WALK_NEXT);
33260Sstevel@tonic-gate }
33270Sstevel@tonic-gate 
33280Sstevel@tonic-gate int
umalog(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)33290Sstevel@tonic-gate umalog(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
33300Sstevel@tonic-gate {
33310Sstevel@tonic-gate 	const char *logname = "umem_transaction_log";
33320Sstevel@tonic-gate 	hrtime_t newest = 0;
33330Sstevel@tonic-gate 
33340Sstevel@tonic-gate 	if ((flags & DCMD_ADDRSPEC) || argc > 1)
33350Sstevel@tonic-gate 		return (DCMD_USAGE);
33360Sstevel@tonic-gate 
33370Sstevel@tonic-gate 	if (argc > 0) {
33380Sstevel@tonic-gate 		if (argv->a_type != MDB_TYPE_STRING)
33390Sstevel@tonic-gate 			return (DCMD_USAGE);
33400Sstevel@tonic-gate 		if (strcmp(argv->a_un.a_str, "fail") == 0)
33410Sstevel@tonic-gate 			logname = "umem_failure_log";
33420Sstevel@tonic-gate 		else if (strcmp(argv->a_un.a_str, "slab") == 0)
33430Sstevel@tonic-gate 			logname = "umem_slab_log";
33440Sstevel@tonic-gate 		else
33450Sstevel@tonic-gate 			return (DCMD_USAGE);
33460Sstevel@tonic-gate 	}
33470Sstevel@tonic-gate 
33480Sstevel@tonic-gate 	if (umem_readvar(&addr, logname) == -1) {
33490Sstevel@tonic-gate 		mdb_warn("failed to read %s log header pointer");
33500Sstevel@tonic-gate 		return (DCMD_ERR);
33510Sstevel@tonic-gate 	}
33520Sstevel@tonic-gate 
33530Sstevel@tonic-gate 	if (mdb_pwalk("umem_log", (mdb_walk_cb_t)showbc, &newest, addr) == -1) {
33540Sstevel@tonic-gate 		mdb_warn("failed to walk umem log");
33550Sstevel@tonic-gate 		return (DCMD_ERR);
33560Sstevel@tonic-gate 	}
33570Sstevel@tonic-gate 
33580Sstevel@tonic-gate 	return (DCMD_OK);
33590Sstevel@tonic-gate }
33600Sstevel@tonic-gate 
33610Sstevel@tonic-gate /*
33620Sstevel@tonic-gate  * As the final lure for die-hard crash(1M) users, we provide ::umausers here.
33630Sstevel@tonic-gate  * The first piece is a structure which we use to accumulate umem_cache_t
33640Sstevel@tonic-gate  * addresses of interest.  The umc_add is used as a callback for the umem_cache
33650Sstevel@tonic-gate  * walker; we either add all caches, or ones named explicitly as arguments.
33660Sstevel@tonic-gate  */
33670Sstevel@tonic-gate 
33680Sstevel@tonic-gate typedef struct umclist {
33690Sstevel@tonic-gate 	const char *umc_name;			/* Name to match (or NULL) */
33700Sstevel@tonic-gate 	uintptr_t *umc_caches;			/* List of umem_cache_t addrs */
33710Sstevel@tonic-gate 	int umc_nelems;				/* Num entries in umc_caches */
33720Sstevel@tonic-gate 	int umc_size;				/* Size of umc_caches array */
33730Sstevel@tonic-gate } umclist_t;
33740Sstevel@tonic-gate 
33750Sstevel@tonic-gate static int
umc_add(uintptr_t addr,const umem_cache_t * cp,umclist_t * umc)33760Sstevel@tonic-gate umc_add(uintptr_t addr, const umem_cache_t *cp, umclist_t *umc)
33770Sstevel@tonic-gate {
33780Sstevel@tonic-gate 	void *p;
33790Sstevel@tonic-gate 	int s;
33800Sstevel@tonic-gate 
33810Sstevel@tonic-gate 	if (umc->umc_name == NULL ||
33820Sstevel@tonic-gate 	    strcmp(cp->cache_name, umc->umc_name) == 0) {
33830Sstevel@tonic-gate 		/*
33840Sstevel@tonic-gate 		 * If we have a match, grow our array (if necessary), and then
33850Sstevel@tonic-gate 		 * add the virtual address of the matching cache to our list.
33860Sstevel@tonic-gate 		 */
33870Sstevel@tonic-gate 		if (umc->umc_nelems >= umc->umc_size) {
33880Sstevel@tonic-gate 			s = umc->umc_size ? umc->umc_size * 2 : 256;
33890Sstevel@tonic-gate 			p = mdb_alloc(sizeof (uintptr_t) * s, UM_SLEEP | UM_GC);
33900Sstevel@tonic-gate 
33910Sstevel@tonic-gate 			bcopy(umc->umc_caches, p,
33920Sstevel@tonic-gate 			    sizeof (uintptr_t) * umc->umc_size);
33930Sstevel@tonic-gate 
33940Sstevel@tonic-gate 			umc->umc_caches = p;
33950Sstevel@tonic-gate 			umc->umc_size = s;
33960Sstevel@tonic-gate 		}
33970Sstevel@tonic-gate 
33980Sstevel@tonic-gate 		umc->umc_caches[umc->umc_nelems++] = addr;
33990Sstevel@tonic-gate 		return (umc->umc_name ? WALK_DONE : WALK_NEXT);
34000Sstevel@tonic-gate 	}
34010Sstevel@tonic-gate 
34020Sstevel@tonic-gate 	return (WALK_NEXT);
34030Sstevel@tonic-gate }
34040Sstevel@tonic-gate 
34050Sstevel@tonic-gate /*
34060Sstevel@tonic-gate  * The second piece of ::umausers is a hash table of allocations.  Each
34070Sstevel@tonic-gate  * allocation owner is identified by its stack trace and data_size.  We then
34080Sstevel@tonic-gate  * track the total bytes of all such allocations, and the number of allocations
34090Sstevel@tonic-gate  * to report at the end.  Once we have a list of caches, we walk through the
34100Sstevel@tonic-gate  * allocated bufctls of each, and update our hash table accordingly.
34110Sstevel@tonic-gate  */
34120Sstevel@tonic-gate 
34130Sstevel@tonic-gate typedef struct umowner {
34140Sstevel@tonic-gate 	struct umowner *umo_head;		/* First hash elt in bucket */
34150Sstevel@tonic-gate 	struct umowner *umo_next;		/* Next hash elt in chain */
34160Sstevel@tonic-gate 	size_t umo_signature;			/* Hash table signature */
34170Sstevel@tonic-gate 	uint_t umo_num;				/* Number of allocations */
34180Sstevel@tonic-gate 	size_t umo_data_size;			/* Size of each allocation */
34190Sstevel@tonic-gate 	size_t umo_total_size;			/* Total bytes of allocation */
34200Sstevel@tonic-gate 	int umo_depth;				/* Depth of stack trace */
34210Sstevel@tonic-gate 	uintptr_t *umo_stack;			/* Stack trace */
34220Sstevel@tonic-gate } umowner_t;
34230Sstevel@tonic-gate 
34240Sstevel@tonic-gate typedef struct umusers {
34250Sstevel@tonic-gate 	const umem_cache_t *umu_cache;		/* Current umem cache */
34260Sstevel@tonic-gate 	umowner_t *umu_hash;			/* Hash table of owners */
34270Sstevel@tonic-gate 	uintptr_t *umu_stacks;			/* stacks for owners */
34280Sstevel@tonic-gate 	int umu_nelems;				/* Number of entries in use */
34290Sstevel@tonic-gate 	int umu_size;				/* Total number of entries */
34300Sstevel@tonic-gate } umusers_t;
34310Sstevel@tonic-gate 
34320Sstevel@tonic-gate static void
umu_add(umusers_t * umu,const umem_bufctl_audit_t * bcp,size_t size,size_t data_size)34330Sstevel@tonic-gate umu_add(umusers_t *umu, const umem_bufctl_audit_t *bcp,
34340Sstevel@tonic-gate     size_t size, size_t data_size)
34350Sstevel@tonic-gate {
34360Sstevel@tonic-gate 	int i, depth = MIN(bcp->bc_depth, umem_stack_depth);
34370Sstevel@tonic-gate 	size_t bucket, signature = data_size;
34380Sstevel@tonic-gate 	umowner_t *umo, *umoend;
34390Sstevel@tonic-gate 
34400Sstevel@tonic-gate 	/*
34410Sstevel@tonic-gate 	 * If the hash table is full, double its size and rehash everything.
34420Sstevel@tonic-gate 	 */
34430Sstevel@tonic-gate 	if (umu->umu_nelems >= umu->umu_size) {
34440Sstevel@tonic-gate 		int s = umu->umu_size ? umu->umu_size * 2 : 1024;
34450Sstevel@tonic-gate 		size_t umowner_size = sizeof (umowner_t);
34460Sstevel@tonic-gate 		size_t trace_size = umem_stack_depth * sizeof (uintptr_t);
34470Sstevel@tonic-gate 		uintptr_t *new_stacks;
34480Sstevel@tonic-gate 
34490Sstevel@tonic-gate 		umo = mdb_alloc(umowner_size * s, UM_SLEEP | UM_GC);
34500Sstevel@tonic-gate 		new_stacks = mdb_alloc(trace_size * s, UM_SLEEP | UM_GC);
34510Sstevel@tonic-gate 
34520Sstevel@tonic-gate 		bcopy(umu->umu_hash, umo, umowner_size * umu->umu_size);
34530Sstevel@tonic-gate 		bcopy(umu->umu_stacks, new_stacks, trace_size * umu->umu_size);
34540Sstevel@tonic-gate 		umu->umu_hash = umo;
34550Sstevel@tonic-gate 		umu->umu_stacks = new_stacks;
34560Sstevel@tonic-gate 		umu->umu_size = s;
34570Sstevel@tonic-gate 
34580Sstevel@tonic-gate 		umoend = umu->umu_hash + umu->umu_size;
34590Sstevel@tonic-gate 		for (umo = umu->umu_hash; umo < umoend; umo++) {
34600Sstevel@tonic-gate 			umo->umo_head = NULL;
34610Sstevel@tonic-gate 			umo->umo_stack = &umu->umu_stacks[
34620Sstevel@tonic-gate 			    umem_stack_depth * (umo - umu->umu_hash)];
34630Sstevel@tonic-gate 		}
34640Sstevel@tonic-gate 
34650Sstevel@tonic-gate 		umoend = umu->umu_hash + umu->umu_nelems;
34660Sstevel@tonic-gate 		for (umo = umu->umu_hash; umo < umoend; umo++) {
34670Sstevel@tonic-gate 			bucket = umo->umo_signature & (umu->umu_size - 1);
34680Sstevel@tonic-gate 			umo->umo_next = umu->umu_hash[bucket].umo_head;
34690Sstevel@tonic-gate 			umu->umu_hash[bucket].umo_head = umo;
34700Sstevel@tonic-gate 		}
34710Sstevel@tonic-gate 	}
34720Sstevel@tonic-gate 
34730Sstevel@tonic-gate 	/*
34740Sstevel@tonic-gate 	 * Finish computing the hash signature from the stack trace, and then
34750Sstevel@tonic-gate 	 * see if the owner is in the hash table.  If so, update our stats.
34760Sstevel@tonic-gate 	 */
34770Sstevel@tonic-gate 	for (i = 0; i < depth; i++)
34780Sstevel@tonic-gate 		signature += bcp->bc_stack[i];
34790Sstevel@tonic-gate 
34800Sstevel@tonic-gate 	bucket = signature & (umu->umu_size - 1);
34810Sstevel@tonic-gate 
34820Sstevel@tonic-gate 	for (umo = umu->umu_hash[bucket].umo_head; umo; umo = umo->umo_next) {
34830Sstevel@tonic-gate 		if (umo->umo_signature == signature) {
34840Sstevel@tonic-gate 			size_t difference = 0;
34850Sstevel@tonic-gate 
34860Sstevel@tonic-gate 			difference |= umo->umo_data_size - data_size;
34870Sstevel@tonic-gate 			difference |= umo->umo_depth - depth;
34880Sstevel@tonic-gate 
34890Sstevel@tonic-gate 			for (i = 0; i < depth; i++) {
34900Sstevel@tonic-gate 				difference |= umo->umo_stack[i] -
34910Sstevel@tonic-gate 				    bcp->bc_stack[i];
34920Sstevel@tonic-gate 			}
34930Sstevel@tonic-gate 
34940Sstevel@tonic-gate 			if (difference == 0) {
34950Sstevel@tonic-gate 				umo->umo_total_size += size;
34960Sstevel@tonic-gate 				umo->umo_num++;
34970Sstevel@tonic-gate 				return;
34980Sstevel@tonic-gate 			}
34990Sstevel@tonic-gate 		}
35000Sstevel@tonic-gate 	}
35010Sstevel@tonic-gate 
35020Sstevel@tonic-gate 	/*
35030Sstevel@tonic-gate 	 * If the owner is not yet hashed, grab the next element and fill it
35040Sstevel@tonic-gate 	 * in based on the allocation information.
35050Sstevel@tonic-gate 	 */
35060Sstevel@tonic-gate 	umo = &umu->umu_hash[umu->umu_nelems++];
35070Sstevel@tonic-gate 	umo->umo_next = umu->umu_hash[bucket].umo_head;
35080Sstevel@tonic-gate 	umu->umu_hash[bucket].umo_head = umo;
35090Sstevel@tonic-gate 
35100Sstevel@tonic-gate 	umo->umo_signature = signature;
35110Sstevel@tonic-gate 	umo->umo_num = 1;
35120Sstevel@tonic-gate 	umo->umo_data_size = data_size;
35130Sstevel@tonic-gate 	umo->umo_total_size = size;
35140Sstevel@tonic-gate 	umo->umo_depth = depth;
35150Sstevel@tonic-gate 
35160Sstevel@tonic-gate 	for (i = 0; i < depth; i++)
35170Sstevel@tonic-gate 		umo->umo_stack[i] = bcp->bc_stack[i];
35180Sstevel@tonic-gate }
35190Sstevel@tonic-gate 
35200Sstevel@tonic-gate /*
35210Sstevel@tonic-gate  * When ::umausers is invoked without the -f flag, we simply update our hash
35220Sstevel@tonic-gate  * table with the information from each allocated bufctl.
35230Sstevel@tonic-gate  */
35240Sstevel@tonic-gate /*ARGSUSED*/
35250Sstevel@tonic-gate static int
umause1(uintptr_t addr,const umem_bufctl_audit_t * bcp,umusers_t * umu)35260Sstevel@tonic-gate umause1(uintptr_t addr, const umem_bufctl_audit_t *bcp, umusers_t *umu)
35270Sstevel@tonic-gate {
35280Sstevel@tonic-gate 	const umem_cache_t *cp = umu->umu_cache;
35290Sstevel@tonic-gate 
35300Sstevel@tonic-gate 	umu_add(umu, bcp, cp->cache_bufsize, cp->cache_bufsize);
35310Sstevel@tonic-gate 	return (WALK_NEXT);
35320Sstevel@tonic-gate }
35330Sstevel@tonic-gate 
35340Sstevel@tonic-gate /*
35350Sstevel@tonic-gate  * When ::umausers is invoked with the -f flag, we print out the information
35360Sstevel@tonic-gate  * for each bufctl as well as updating the hash table.
35370Sstevel@tonic-gate  */
35380Sstevel@tonic-gate static int
umause2(uintptr_t addr,const umem_bufctl_audit_t * bcp,umusers_t * umu)35390Sstevel@tonic-gate umause2(uintptr_t addr, const umem_bufctl_audit_t *bcp, umusers_t *umu)
35400Sstevel@tonic-gate {
35410Sstevel@tonic-gate 	int i, depth = MIN(bcp->bc_depth, umem_stack_depth);
35420Sstevel@tonic-gate 	const umem_cache_t *cp = umu->umu_cache;
35430Sstevel@tonic-gate 
35440Sstevel@tonic-gate 	mdb_printf("size %d, addr %p, thread %p, cache %s\n",
35450Sstevel@tonic-gate 	    cp->cache_bufsize, addr, bcp->bc_thread, cp->cache_name);
35460Sstevel@tonic-gate 
35470Sstevel@tonic-gate 	for (i = 0; i < depth; i++)
35480Sstevel@tonic-gate 		mdb_printf("\t %a\n", bcp->bc_stack[i]);
35490Sstevel@tonic-gate 
35500Sstevel@tonic-gate 	umu_add(umu, bcp, cp->cache_bufsize, cp->cache_bufsize);
35510Sstevel@tonic-gate 	return (WALK_NEXT);
35520Sstevel@tonic-gate }
35530Sstevel@tonic-gate 
35540Sstevel@tonic-gate /*
35550Sstevel@tonic-gate  * We sort our results by allocation size before printing them.
35560Sstevel@tonic-gate  */
35570Sstevel@tonic-gate static int
umownercmp(const void * lp,const void * rp)35580Sstevel@tonic-gate umownercmp(const void *lp, const void *rp)
35590Sstevel@tonic-gate {
35600Sstevel@tonic-gate 	const umowner_t *lhs = lp;
35610Sstevel@tonic-gate 	const umowner_t *rhs = rp;
35620Sstevel@tonic-gate 
35630Sstevel@tonic-gate 	return (rhs->umo_total_size - lhs->umo_total_size);
35640Sstevel@tonic-gate }
35650Sstevel@tonic-gate 
35660Sstevel@tonic-gate /*
35670Sstevel@tonic-gate  * The main engine of ::umausers is relatively straightforward: First we
35680Sstevel@tonic-gate  * accumulate our list of umem_cache_t addresses into the umclist_t. Next we
35690Sstevel@tonic-gate  * iterate over the allocated bufctls of each cache in the list.  Finally,
35700Sstevel@tonic-gate  * we sort and print our results.
35710Sstevel@tonic-gate  */
35720Sstevel@tonic-gate /*ARGSUSED*/
35730Sstevel@tonic-gate int
umausers(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)35740Sstevel@tonic-gate umausers(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
35750Sstevel@tonic-gate {
35760Sstevel@tonic-gate 	int mem_threshold = 8192;	/* Minimum # bytes for printing */
35770Sstevel@tonic-gate 	int cnt_threshold = 100;	/* Minimum # blocks for printing */
35780Sstevel@tonic-gate 	int audited_caches = 0;		/* Number of UMF_AUDIT caches found */
35790Sstevel@tonic-gate 	int do_all_caches = 1;		/* Do all caches (no arguments) */
35800Sstevel@tonic-gate 	int opt_e = FALSE;		/* Include "small" users */
35810Sstevel@tonic-gate 	int opt_f = FALSE;		/* Print stack traces */
35820Sstevel@tonic-gate 
35830Sstevel@tonic-gate 	mdb_walk_cb_t callback = (mdb_walk_cb_t)umause1;
35840Sstevel@tonic-gate 	umowner_t *umo, *umoend;
35850Sstevel@tonic-gate 	int i, oelems;
35860Sstevel@tonic-gate 
35870Sstevel@tonic-gate 	umclist_t umc;
35880Sstevel@tonic-gate 	umusers_t umu;
35890Sstevel@tonic-gate 
35900Sstevel@tonic-gate 	if (flags & DCMD_ADDRSPEC)
35910Sstevel@tonic-gate 		return (DCMD_USAGE);
35920Sstevel@tonic-gate 
35930Sstevel@tonic-gate 	bzero(&umc, sizeof (umc));
35940Sstevel@tonic-gate 	bzero(&umu, sizeof (umu));
35950Sstevel@tonic-gate 
35960Sstevel@tonic-gate 	while ((i = mdb_getopts(argc, argv,
35970Sstevel@tonic-gate 	    'e', MDB_OPT_SETBITS, TRUE, &opt_e,
35980Sstevel@tonic-gate 	    'f', MDB_OPT_SETBITS, TRUE, &opt_f, NULL)) != argc) {
35990Sstevel@tonic-gate 
36000Sstevel@tonic-gate 		argv += i;	/* skip past options we just processed */
36010Sstevel@tonic-gate 		argc -= i;	/* adjust argc */
36020Sstevel@tonic-gate 
36030Sstevel@tonic-gate 		if (argv->a_type != MDB_TYPE_STRING || *argv->a_un.a_str == '-')
36040Sstevel@tonic-gate 			return (DCMD_USAGE);
36050Sstevel@tonic-gate 
36060Sstevel@tonic-gate 		oelems = umc.umc_nelems;
36070Sstevel@tonic-gate 		umc.umc_name = argv->a_un.a_str;
36080Sstevel@tonic-gate 		(void) mdb_walk("umem_cache", (mdb_walk_cb_t)umc_add, &umc);
36090Sstevel@tonic-gate 
36100Sstevel@tonic-gate 		if (umc.umc_nelems == oelems) {
36110Sstevel@tonic-gate 			mdb_warn("unknown umem cache: %s\n", umc.umc_name);
36120Sstevel@tonic-gate 			return (DCMD_ERR);
36130Sstevel@tonic-gate 		}
36140Sstevel@tonic-gate 
36150Sstevel@tonic-gate 		do_all_caches = 0;
36160Sstevel@tonic-gate 		argv++;
36170Sstevel@tonic-gate 		argc--;
36180Sstevel@tonic-gate 	}
36190Sstevel@tonic-gate 
36200Sstevel@tonic-gate 	if (opt_e)
36210Sstevel@tonic-gate 		mem_threshold = cnt_threshold = 0;
36220Sstevel@tonic-gate 
36230Sstevel@tonic-gate 	if (opt_f)
36240Sstevel@tonic-gate 		callback = (mdb_walk_cb_t)umause2;
36250Sstevel@tonic-gate 
36260Sstevel@tonic-gate 	if (do_all_caches) {
36270Sstevel@tonic-gate 		umc.umc_name = NULL; /* match all cache names */
36280Sstevel@tonic-gate 		(void) mdb_walk("umem_cache", (mdb_walk_cb_t)umc_add, &umc);
36290Sstevel@tonic-gate 	}
36300Sstevel@tonic-gate 
36310Sstevel@tonic-gate 	for (i = 0; i < umc.umc_nelems; i++) {
36320Sstevel@tonic-gate 		uintptr_t cp = umc.umc_caches[i];
36330Sstevel@tonic-gate 		umem_cache_t c;
36340Sstevel@tonic-gate 
36350Sstevel@tonic-gate 		if (mdb_vread(&c, sizeof (c), cp) == -1) {
36360Sstevel@tonic-gate 			mdb_warn("failed to read cache at %p", cp);
36370Sstevel@tonic-gate 			continue;
36380Sstevel@tonic-gate 		}
36390Sstevel@tonic-gate 
36400Sstevel@tonic-gate 		if (!(c.cache_flags & UMF_AUDIT)) {
36410Sstevel@tonic-gate 			if (!do_all_caches) {
36420Sstevel@tonic-gate 				mdb_warn("UMF_AUDIT is not enabled for %s\n",
36430Sstevel@tonic-gate 				    c.cache_name);
36440Sstevel@tonic-gate 			}
36450Sstevel@tonic-gate 			continue;
36460Sstevel@tonic-gate 		}
36470Sstevel@tonic-gate 
36480Sstevel@tonic-gate 		umu.umu_cache = &c;
36490Sstevel@tonic-gate 		(void) mdb_pwalk("bufctl", callback, &umu, cp);
36500Sstevel@tonic-gate 		audited_caches++;
36510Sstevel@tonic-gate 	}
36520Sstevel@tonic-gate 
36530Sstevel@tonic-gate 	if (audited_caches == 0 && do_all_caches) {
36540Sstevel@tonic-gate 		mdb_warn("UMF_AUDIT is not enabled for any caches\n");
36550Sstevel@tonic-gate 		return (DCMD_ERR);
36560Sstevel@tonic-gate 	}
36570Sstevel@tonic-gate 
36580Sstevel@tonic-gate 	qsort(umu.umu_hash, umu.umu_nelems, sizeof (umowner_t), umownercmp);
36590Sstevel@tonic-gate 	umoend = umu.umu_hash + umu.umu_nelems;
36600Sstevel@tonic-gate 
36610Sstevel@tonic-gate 	for (umo = umu.umu_hash; umo < umoend; umo++) {
36620Sstevel@tonic-gate 		if (umo->umo_total_size < mem_threshold &&
36630Sstevel@tonic-gate 		    umo->umo_num < cnt_threshold)
36640Sstevel@tonic-gate 			continue;
36650Sstevel@tonic-gate 		mdb_printf("%lu bytes for %u allocations with data size %lu:\n",
36660Sstevel@tonic-gate 		    umo->umo_total_size, umo->umo_num, umo->umo_data_size);
36670Sstevel@tonic-gate 		for (i = 0; i < umo->umo_depth; i++)
36680Sstevel@tonic-gate 			mdb_printf("\t %a\n", umo->umo_stack[i]);
36690Sstevel@tonic-gate 	}
36700Sstevel@tonic-gate 
36710Sstevel@tonic-gate 	return (DCMD_OK);
36720Sstevel@tonic-gate }
36731528Sjwadams 
36741528Sjwadams struct malloc_data {
36751528Sjwadams 	uint32_t malloc_size;
36761528Sjwadams 	uint32_t malloc_stat; /* == UMEM_MALLOC_ENCODE(state, malloc_size) */
36771528Sjwadams };
36781528Sjwadams 
36791528Sjwadams #ifdef _LP64
36801528Sjwadams #define	UMI_MAX_BUCKET		(UMEM_MAXBUF - 2*sizeof (struct malloc_data))
36811528Sjwadams #else
36821528Sjwadams #define	UMI_MAX_BUCKET		(UMEM_MAXBUF - sizeof (struct malloc_data))
36831528Sjwadams #endif
36841528Sjwadams 
36851528Sjwadams typedef struct umem_malloc_info {
36861528Sjwadams 	size_t um_total;	/* total allocated buffers */
36871528Sjwadams 	size_t um_malloc;	/* malloc buffers */
36881528Sjwadams 	size_t um_malloc_size;	/* sum of malloc buffer sizes */
36891528Sjwadams 	size_t um_malloc_overhead; /* sum of in-chunk overheads */
36901528Sjwadams 
36911528Sjwadams 	umem_cache_t *um_cp;
36921528Sjwadams 
36931528Sjwadams 	uint_t *um_bucket;
36941528Sjwadams } umem_malloc_info_t;
36951528Sjwadams 
36961528Sjwadams static void
umem_malloc_print_dist(uint_t * um_bucket,size_t minmalloc,size_t maxmalloc,size_t maxbuckets,size_t minbucketsize,int geometric)36971528Sjwadams umem_malloc_print_dist(uint_t *um_bucket, size_t minmalloc, size_t maxmalloc,
36981528Sjwadams     size_t maxbuckets, size_t minbucketsize, int geometric)
36991528Sjwadams {
37004688Stomee 	uint64_t um_malloc;
37011528Sjwadams 	int minb = -1;
37021528Sjwadams 	int maxb = -1;
37031528Sjwadams 	int buckets;
37041528Sjwadams 	int nbucks;
37051528Sjwadams 	int i;
37061528Sjwadams 	int b;
37071528Sjwadams 	const int *distarray;
37081528Sjwadams 
37091528Sjwadams 	minb = (int)minmalloc;
37101528Sjwadams 	maxb = (int)maxmalloc;
37111528Sjwadams 
37121528Sjwadams 	nbucks = buckets = maxb - minb + 1;
37131528Sjwadams 
37141528Sjwadams 	um_malloc = 0;
37151528Sjwadams 	for (b = minb; b <= maxb; b++)
37161528Sjwadams 		um_malloc += um_bucket[b];
37171528Sjwadams 
37181528Sjwadams 	if (maxbuckets != 0)
37191528Sjwadams 		buckets = MIN(buckets, maxbuckets);
37201528Sjwadams 
37211528Sjwadams 	if (minbucketsize > 1) {
37221528Sjwadams 		buckets = MIN(buckets, nbucks/minbucketsize);
37231528Sjwadams 		if (buckets == 0) {
37241528Sjwadams 			buckets = 1;
37251528Sjwadams 			minbucketsize = nbucks;
37261528Sjwadams 		}
37271528Sjwadams 	}
37281528Sjwadams 
37291528Sjwadams 	if (geometric)
37304798Stomee 		distarray = dist_geometric(buckets, minb, maxb, minbucketsize);
37311528Sjwadams 	else
37324798Stomee 		distarray = dist_linear(buckets, minb, maxb);
37334798Stomee 
37344798Stomee 	dist_print_header("malloc size", 11, "count");
37351528Sjwadams 	for (i = 0; i < buckets; i++) {
37364798Stomee 		dist_print_bucket(distarray, i, um_bucket, um_malloc, 11);
37371528Sjwadams 	}
37381528Sjwadams 	mdb_printf("\n");
37391528Sjwadams }
37401528Sjwadams 
37411528Sjwadams /*
37421528Sjwadams  * A malloc()ed buffer looks like:
37431528Sjwadams  *
37441528Sjwadams  *	<----------- mi.malloc_size --->
37451528Sjwadams  *	<----------- cp.cache_bufsize ------------------>
37461528Sjwadams  *	<----------- cp.cache_chunksize -------------------------------->
37471528Sjwadams  *	+-------+-----------------------+---------------+---------------+
37481528Sjwadams  *	|/tag///| mallocsz		|/round-off/////|/debug info////|
37491528Sjwadams  *	+-------+---------------------------------------+---------------+
37501528Sjwadams  *		<-- usable space ------>
37511528Sjwadams  *
37521528Sjwadams  * mallocsz is the argument to malloc(3C).
37531528Sjwadams  * mi.malloc_size is the actual size passed to umem_alloc(), which
37541528Sjwadams  * is rounded up to the smallest available cache size, which is
37551528Sjwadams  * cache_bufsize.  If there is debugging or alignment overhead in
37561528Sjwadams  * the cache, that is reflected in a larger cache_chunksize.
37571528Sjwadams  *
37581528Sjwadams  * The tag at the beginning of the buffer is either 8-bytes or 16-bytes,
37591528Sjwadams  * depending upon the ISA's alignment requirements.  For 32-bit allocations,
37601528Sjwadams  * it is always a 8-byte tag.  For 64-bit allocations larger than 8 bytes,
37611528Sjwadams  * the tag has 8 bytes of padding before it.
37621528Sjwadams  *
37631528Sjwadams  * 32-byte, 64-byte buffers <= 8 bytes:
37641528Sjwadams  *	+-------+-------+--------- ...
37651528Sjwadams  *	|/size//|/stat//| mallocsz ...
37661528Sjwadams  *	+-------+-------+--------- ...
37671528Sjwadams  *			^
37681528Sjwadams  *			pointer returned from malloc(3C)
37691528Sjwadams  *
37701528Sjwadams  * 64-byte buffers > 8 bytes:
37711528Sjwadams  *	+---------------+-------+-------+--------- ...
37721528Sjwadams  *	|/padding///////|/size//|/stat//| mallocsz ...
37731528Sjwadams  *	+---------------+-------+-------+--------- ...
37741528Sjwadams  *					^
37751528Sjwadams  *					pointer returned from malloc(3C)
37761528Sjwadams  *
37771528Sjwadams  * The "size" field is "malloc_size", which is mallocsz + the padding.
37781528Sjwadams  * The "stat" field is derived from malloc_size, and functions as a
37791528Sjwadams  * validation that this buffer is actually from malloc(3C).
37801528Sjwadams  */
37811528Sjwadams /*ARGSUSED*/
37821528Sjwadams static int
um_umem_buffer_cb(uintptr_t addr,void * buf,umem_malloc_info_t * ump)37831528Sjwadams um_umem_buffer_cb(uintptr_t addr, void *buf, umem_malloc_info_t *ump)
37841528Sjwadams {
37851528Sjwadams 	struct malloc_data md;
37861528Sjwadams 	size_t m_addr = addr;
37871528Sjwadams 	size_t overhead = sizeof (md);
37881528Sjwadams 	size_t mallocsz;
37891528Sjwadams 
37901528Sjwadams 	ump->um_total++;
37911528Sjwadams 
37921528Sjwadams #ifdef _LP64
37931528Sjwadams 	if (ump->um_cp->cache_bufsize > UMEM_SECOND_ALIGN) {
37941528Sjwadams 		m_addr += overhead;
37951528Sjwadams 		overhead += sizeof (md);
37961528Sjwadams 	}
37971528Sjwadams #endif
37981528Sjwadams 
37991528Sjwadams 	if (mdb_vread(&md, sizeof (md), m_addr) == -1) {
38001528Sjwadams 		mdb_warn("unable to read malloc header at %p", m_addr);
38011528Sjwadams 		return (WALK_NEXT);
38021528Sjwadams 	}
38031528Sjwadams 
38041528Sjwadams 	switch (UMEM_MALLOC_DECODE(md.malloc_stat, md.malloc_size)) {
38051528Sjwadams 	case MALLOC_MAGIC:
38061528Sjwadams #ifdef _LP64
38071528Sjwadams 	case MALLOC_SECOND_MAGIC:
38081528Sjwadams #endif
38091528Sjwadams 		mallocsz = md.malloc_size - overhead;
38101528Sjwadams 
38111528Sjwadams 		ump->um_malloc++;
38121528Sjwadams 		ump->um_malloc_size += mallocsz;
38131528Sjwadams 		ump->um_malloc_overhead += overhead;
38141528Sjwadams 
38151528Sjwadams 		/* include round-off and debug overhead */
38161528Sjwadams 		ump->um_malloc_overhead +=
38171528Sjwadams 		    ump->um_cp->cache_chunksize - md.malloc_size;
38181528Sjwadams 
38191528Sjwadams 		if (ump->um_bucket != NULL && mallocsz <= UMI_MAX_BUCKET)
38201528Sjwadams 			ump->um_bucket[mallocsz]++;
38211528Sjwadams 
38221528Sjwadams 		break;
38231528Sjwadams 	default:
38241528Sjwadams 		break;
38251528Sjwadams 	}
38261528Sjwadams 
38271528Sjwadams 	return (WALK_NEXT);
38281528Sjwadams }
38291528Sjwadams 
38301528Sjwadams int
get_umem_alloc_sizes(int ** out,size_t * out_num)38311528Sjwadams get_umem_alloc_sizes(int **out, size_t *out_num)
38321528Sjwadams {
38331528Sjwadams 	GElf_Sym sym;
38341528Sjwadams 
38351528Sjwadams 	if (umem_lookup_by_name("umem_alloc_sizes", &sym) == -1) {
38361528Sjwadams 		mdb_warn("unable to look up umem_alloc_sizes");
38371528Sjwadams 		return (-1);
38381528Sjwadams 	}
38391528Sjwadams 
38401528Sjwadams 	*out = mdb_alloc(sym.st_size, UM_SLEEP | UM_GC);
38411528Sjwadams 	*out_num = sym.st_size / sizeof (int);
38421528Sjwadams 
38431528Sjwadams 	if (mdb_vread(*out, sym.st_size, sym.st_value) == -1) {
38441528Sjwadams 		mdb_warn("unable to read umem_alloc_sizes (%p)", sym.st_value);
38451528Sjwadams 		*out = NULL;
38461528Sjwadams 		return (-1);
38471528Sjwadams 	}
38481528Sjwadams 
38491528Sjwadams 	return (0);
38501528Sjwadams }
38511528Sjwadams 
38521528Sjwadams 
38531528Sjwadams static int
um_umem_cache_cb(uintptr_t addr,umem_cache_t * cp,umem_malloc_info_t * ump)38541528Sjwadams um_umem_cache_cb(uintptr_t addr, umem_cache_t *cp, umem_malloc_info_t *ump)
38551528Sjwadams {
38561528Sjwadams 	if (strncmp(cp->cache_name, "umem_alloc_", strlen("umem_alloc_")) != 0)
38571528Sjwadams 		return (WALK_NEXT);
38581528Sjwadams 
38591528Sjwadams 	ump->um_cp = cp;
38601528Sjwadams 
38611528Sjwadams 	if (mdb_pwalk("umem", (mdb_walk_cb_t)um_umem_buffer_cb, ump, addr) ==
38621528Sjwadams 	    -1) {
38631528Sjwadams 		mdb_warn("can't walk 'umem' for cache %p", addr);
38641528Sjwadams 		return (WALK_ERR);
38651528Sjwadams 	}
38661528Sjwadams 
38671528Sjwadams 	return (WALK_NEXT);
38681528Sjwadams }
38691528Sjwadams 
38701528Sjwadams void
umem_malloc_dist_help(void)38711528Sjwadams umem_malloc_dist_help(void)
38721528Sjwadams {
38731528Sjwadams 	mdb_printf("%s\n",
38741528Sjwadams 	    "report distribution of outstanding malloc()s");
38751528Sjwadams 	mdb_dec_indent(2);
38761528Sjwadams 	mdb_printf("%<b>OPTIONS%</b>\n");
38771528Sjwadams 	mdb_inc_indent(2);
38781528Sjwadams 	mdb_printf("%s",
38791528Sjwadams "  -b maxbins\n"
38801528Sjwadams "        Use at most maxbins bins for the data\n"
38811528Sjwadams "  -B minbinsize\n"
38821528Sjwadams "        Make the bins at least minbinsize bytes apart\n"
38831528Sjwadams "  -d    dump the raw data out, without binning\n"
38841528Sjwadams "  -g    use geometric binning instead of linear binning\n");
38851528Sjwadams }
38861528Sjwadams 
38871528Sjwadams /*ARGSUSED*/
38881528Sjwadams int
umem_malloc_dist(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)38891528Sjwadams umem_malloc_dist(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
38901528Sjwadams {
38911528Sjwadams 	umem_malloc_info_t mi;
38921528Sjwadams 	uint_t geometric = 0;
38931528Sjwadams 	uint_t dump = 0;
38941528Sjwadams 	size_t maxbuckets = 0;
38951528Sjwadams 	size_t minbucketsize = 0;
38961528Sjwadams 
38971528Sjwadams 	size_t minalloc = 0;
38981528Sjwadams 	size_t maxalloc = UMI_MAX_BUCKET;
38991528Sjwadams 
39001528Sjwadams 	if (flags & DCMD_ADDRSPEC)
39011528Sjwadams 		return (DCMD_USAGE);
39021528Sjwadams 
39031528Sjwadams 	if (mdb_getopts(argc, argv,
39041528Sjwadams 	    'd', MDB_OPT_SETBITS, TRUE, &dump,
39051528Sjwadams 	    'g', MDB_OPT_SETBITS, TRUE, &geometric,
39061528Sjwadams 	    'b', MDB_OPT_UINTPTR, &maxbuckets,
39071528Sjwadams 	    'B', MDB_OPT_UINTPTR, &minbucketsize,
39081528Sjwadams 	    0) != argc)
39091528Sjwadams 		return (DCMD_USAGE);
39101528Sjwadams 
39111528Sjwadams 	bzero(&mi, sizeof (mi));
39121528Sjwadams 	mi.um_bucket = mdb_zalloc((UMI_MAX_BUCKET + 1) * sizeof (*mi.um_bucket),
39131528Sjwadams 	    UM_SLEEP | UM_GC);
39141528Sjwadams 
39151528Sjwadams 	if (mdb_walk("umem_cache", (mdb_walk_cb_t)um_umem_cache_cb,
39161528Sjwadams 	    &mi) == -1) {
39171528Sjwadams 		mdb_warn("unable to walk 'umem_cache'");
39181528Sjwadams 		return (DCMD_ERR);
39191528Sjwadams 	}
39201528Sjwadams 
39211528Sjwadams 	if (dump) {
39221528Sjwadams 		int i;
39231528Sjwadams 		for (i = minalloc; i <= maxalloc; i++)
39241528Sjwadams 			mdb_printf("%d\t%d\n", i, mi.um_bucket[i]);
39251528Sjwadams 
39261528Sjwadams 		return (DCMD_OK);
39271528Sjwadams 	}
39281528Sjwadams 
39291528Sjwadams 	umem_malloc_print_dist(mi.um_bucket, minalloc, maxalloc,
39301528Sjwadams 	    maxbuckets, minbucketsize, geometric);
39311528Sjwadams 
39321528Sjwadams 	return (DCMD_OK);
39331528Sjwadams }
39341528Sjwadams 
39351528Sjwadams void
umem_malloc_info_help(void)39361528Sjwadams umem_malloc_info_help(void)
39371528Sjwadams {
39381528Sjwadams 	mdb_printf("%s\n",
39391528Sjwadams 	    "report information about malloc()s by cache.  ");
39401528Sjwadams 	mdb_dec_indent(2);
39411528Sjwadams 	mdb_printf("%<b>OPTIONS%</b>\n");
39421528Sjwadams 	mdb_inc_indent(2);
39431528Sjwadams 	mdb_printf("%s",
39441528Sjwadams "  -b maxbins\n"
39451528Sjwadams "        Use at most maxbins bins for the data\n"
39461528Sjwadams "  -B minbinsize\n"
39471528Sjwadams "        Make the bins at least minbinsize bytes apart\n"
39481528Sjwadams "  -d    dump the raw distribution data without binning\n"
39491528Sjwadams #ifndef _KMDB
39501528Sjwadams "  -g    use geometric binning instead of linear binning\n"
39511528Sjwadams #endif
39521528Sjwadams 	    "");
39531528Sjwadams }
39541528Sjwadams int
umem_malloc_info(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)39551528Sjwadams umem_malloc_info(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
39561528Sjwadams {
39571528Sjwadams 	umem_cache_t c;
39581528Sjwadams 	umem_malloc_info_t mi;
39591528Sjwadams 
39601528Sjwadams 	int skip = 0;
39611528Sjwadams 
39621528Sjwadams 	size_t maxmalloc;
39631528Sjwadams 	size_t overhead;
39641528Sjwadams 	size_t allocated;
39651528Sjwadams 	size_t avg_malloc;
39661528Sjwadams 	size_t overhead_pct;	/* 1000 * overhead_percent */
39671528Sjwadams 
39681528Sjwadams 	uint_t verbose = 0;
39691528Sjwadams 	uint_t dump = 0;
39701528Sjwadams 	uint_t geometric = 0;
39711528Sjwadams 	size_t maxbuckets = 0;
39721528Sjwadams 	size_t minbucketsize = 0;
39731528Sjwadams 
39741528Sjwadams 	int *alloc_sizes;
39751528Sjwadams 	int idx;
39761528Sjwadams 	size_t num;
39771528Sjwadams 	size_t minmalloc;
39781528Sjwadams 
39791528Sjwadams 	if (mdb_getopts(argc, argv,
39801528Sjwadams 	    'd', MDB_OPT_SETBITS, TRUE, &dump,
39811528Sjwadams 	    'g', MDB_OPT_SETBITS, TRUE, &geometric,
39821528Sjwadams 	    'b', MDB_OPT_UINTPTR, &maxbuckets,
39831528Sjwadams 	    'B', MDB_OPT_UINTPTR, &minbucketsize,
39841528Sjwadams 	    0) != argc)
39851528Sjwadams 		return (DCMD_USAGE);
39861528Sjwadams 
39871528Sjwadams 	if (dump || geometric || (maxbuckets != 0) || (minbucketsize != 0))
39881528Sjwadams 		verbose = 1;
39891528Sjwadams 
39901528Sjwadams 	if (!(flags & DCMD_ADDRSPEC)) {
39911528Sjwadams 		if (mdb_walk_dcmd("umem_cache", "umem_malloc_info",
39921528Sjwadams 		    argc, argv) == -1) {
39931528Sjwadams 			mdb_warn("can't walk umem_cache");
39941528Sjwadams 			return (DCMD_ERR);
39951528Sjwadams 		}
39961528Sjwadams 		return (DCMD_OK);
39971528Sjwadams 	}
39981528Sjwadams 
39991528Sjwadams 	if (!mdb_vread(&c, sizeof (c), addr)) {
40001528Sjwadams 		mdb_warn("unable to read cache at %p", addr);
40011528Sjwadams 		return (DCMD_ERR);
40021528Sjwadams 	}
40031528Sjwadams 
40041528Sjwadams 	if (strncmp(c.cache_name, "umem_alloc_", strlen("umem_alloc_")) != 0) {
40051528Sjwadams 		if (!(flags & DCMD_LOOP))
40061528Sjwadams 			mdb_warn("umem_malloc_info: cache \"%s\" is not used "
40071528Sjwadams 			    "by malloc()\n", c.cache_name);
40081528Sjwadams 		skip = 1;
40091528Sjwadams 	}
40101528Sjwadams 
40111528Sjwadams 	/*
40121528Sjwadams 	 * normally, print the header only the first time.  In verbose mode,
40131528Sjwadams 	 * print the header on every non-skipped buffer
40141528Sjwadams 	 */
40151528Sjwadams 	if ((!verbose && DCMD_HDRSPEC(flags)) || (verbose && !skip))
40161528Sjwadams 		mdb_printf("%<ul>%-?s %6s %6s %8s %8s %10s %10s %6s%</ul>\n",
40171528Sjwadams 		    "CACHE", "BUFSZ", "MAXMAL",
40181528Sjwadams 		    "BUFMALLC", "AVG_MAL", "MALLOCED", "OVERHEAD", "%OVER");
40191528Sjwadams 
40201528Sjwadams 	if (skip)
40211528Sjwadams 		return (DCMD_OK);
40221528Sjwadams 
40231528Sjwadams 	maxmalloc = c.cache_bufsize - sizeof (struct malloc_data);
40241528Sjwadams #ifdef _LP64
40251528Sjwadams 	if (c.cache_bufsize > UMEM_SECOND_ALIGN)
40261528Sjwadams 		maxmalloc -= sizeof (struct malloc_data);
40271528Sjwadams #endif
40281528Sjwadams 
40291528Sjwadams 	bzero(&mi, sizeof (mi));
40301528Sjwadams 	mi.um_cp = &c;
40311528Sjwadams 	if (verbose)
40321528Sjwadams 		mi.um_bucket =
40331528Sjwadams 		    mdb_zalloc((UMI_MAX_BUCKET + 1) * sizeof (*mi.um_bucket),
40341528Sjwadams 		    UM_SLEEP | UM_GC);
40351528Sjwadams 
40361528Sjwadams 	if (mdb_pwalk("umem", (mdb_walk_cb_t)um_umem_buffer_cb, &mi, addr) ==
40371528Sjwadams 	    -1) {
40381528Sjwadams 		mdb_warn("can't walk 'umem'");
40391528Sjwadams 		return (DCMD_ERR);
40401528Sjwadams 	}
40411528Sjwadams 
40421528Sjwadams 	overhead = mi.um_malloc_overhead;
40431528Sjwadams 	allocated = mi.um_malloc_size;
40441528Sjwadams 
40451528Sjwadams 	/* do integer round off for the average */
40461528Sjwadams 	if (mi.um_malloc != 0)
40471528Sjwadams 		avg_malloc = (allocated + (mi.um_malloc - 1)/2) / mi.um_malloc;
40481528Sjwadams 	else
40491528Sjwadams 		avg_malloc = 0;
40501528Sjwadams 
40511528Sjwadams 	/*
40521528Sjwadams 	 * include per-slab overhead
40531528Sjwadams 	 *
40541528Sjwadams 	 * Each slab in a given cache is the same size, and has the same
40551528Sjwadams 	 * number of chunks in it;  we read in the first slab on the
40561528Sjwadams 	 * slab list to get the number of chunks for all slabs.  To
40571528Sjwadams 	 * compute the per-slab overhead, we just subtract the chunk usage
40581528Sjwadams 	 * from the slabsize:
40591528Sjwadams 	 *
40601528Sjwadams 	 * +------------+-------+-------+ ... --+-------+-------+-------+
40611528Sjwadams 	 * |////////////|	|	| ...	|	|///////|///////|
40621528Sjwadams 	 * |////color///| chunk	| chunk	| ...	| chunk	|/color/|/slab//|
40631528Sjwadams 	 * |////////////|	|	| ...	|	|///////|///////|
40641528Sjwadams 	 * +------------+-------+-------+ ... --+-------+-------+-------+
40651528Sjwadams 	 * |		\_______chunksize * chunks_____/		|
40661528Sjwadams 	 * \__________________________slabsize__________________________/
40671528Sjwadams 	 *
40681528Sjwadams 	 * For UMF_HASH caches, there is an additional source of overhead;
40691528Sjwadams 	 * the external umem_slab_t and per-chunk bufctl structures.  We
40701528Sjwadams 	 * include those in our per-slab overhead.
40711528Sjwadams 	 *
40721528Sjwadams 	 * Once we have a number for the per-slab overhead, we estimate
40731528Sjwadams 	 * the actual overhead by treating the malloc()ed buffers as if
40741528Sjwadams 	 * they were densely packed:
40751528Sjwadams 	 *
40761528Sjwadams 	 *	additional overhead = (# mallocs) * (per-slab) / (chunks);
40771528Sjwadams 	 *
40781528Sjwadams 	 * carefully ordering the multiply before the divide, to avoid
40791528Sjwadams 	 * round-off error.
40801528Sjwadams 	 */
40811528Sjwadams 	if (mi.um_malloc != 0) {
40821528Sjwadams 		umem_slab_t slab;
40831528Sjwadams 		uintptr_t saddr = (uintptr_t)c.cache_nullslab.slab_next;
40841528Sjwadams 
40851528Sjwadams 		if (mdb_vread(&slab, sizeof (slab), saddr) == -1) {
40861528Sjwadams 			mdb_warn("unable to read slab at %p\n", saddr);
40871528Sjwadams 		} else {
40881528Sjwadams 			long chunks = slab.slab_chunks;
40891528Sjwadams 			if (chunks != 0 && c.cache_chunksize != 0 &&
40901528Sjwadams 			    chunks <= c.cache_slabsize / c.cache_chunksize) {
40911528Sjwadams 				uintmax_t perslab =
40921528Sjwadams 				    c.cache_slabsize -
40931528Sjwadams 				    (c.cache_chunksize * chunks);
40941528Sjwadams 
40951528Sjwadams 				if (c.cache_flags & UMF_HASH) {
40961528Sjwadams 					perslab += sizeof (umem_slab_t) +
40971528Sjwadams 					    chunks *
40981528Sjwadams 					    ((c.cache_flags & UMF_AUDIT) ?
40991528Sjwadams 					    sizeof (umem_bufctl_audit_t) :
41001528Sjwadams 					    sizeof (umem_bufctl_t));
41011528Sjwadams 				}
41021528Sjwadams 				overhead +=
41031528Sjwadams 				    (perslab * (uintmax_t)mi.um_malloc)/chunks;
41041528Sjwadams 			} else {
41051528Sjwadams 				mdb_warn("invalid #chunks (%d) in slab %p\n",
41061528Sjwadams 				    chunks, saddr);
41071528Sjwadams 			}
41081528Sjwadams 		}
41091528Sjwadams 	}
41101528Sjwadams 
41111528Sjwadams 	if (allocated != 0)
41121528Sjwadams 		overhead_pct = (1000ULL * overhead) / allocated;
41131528Sjwadams 	else
41141528Sjwadams 		overhead_pct = 0;
41151528Sjwadams 
41161528Sjwadams 	mdb_printf("%0?p %6ld %6ld %8ld %8ld %10ld %10ld %3ld.%01ld%%\n",
41171528Sjwadams 	    addr, c.cache_bufsize, maxmalloc,
41181528Sjwadams 	    mi.um_malloc, avg_malloc, allocated, overhead,
41191528Sjwadams 	    overhead_pct / 10, overhead_pct % 10);
41201528Sjwadams 
41211528Sjwadams 	if (!verbose)
41221528Sjwadams 		return (DCMD_OK);
41231528Sjwadams 
41241528Sjwadams 	if (!dump)
41251528Sjwadams 		mdb_printf("\n");
41261528Sjwadams 
41271528Sjwadams 	if (get_umem_alloc_sizes(&alloc_sizes, &num) == -1)
41281528Sjwadams 		return (DCMD_ERR);
41291528Sjwadams 
41301528Sjwadams 	for (idx = 0; idx < num; idx++) {
41311528Sjwadams 		if (alloc_sizes[idx] == c.cache_bufsize)
41321528Sjwadams 			break;
41331528Sjwadams 		if (alloc_sizes[idx] == 0) {
41341528Sjwadams 			idx = num;	/* 0-terminated array */
41351528Sjwadams 			break;
41361528Sjwadams 		}
41371528Sjwadams 	}
41381528Sjwadams 	if (idx == num) {
41391528Sjwadams 		mdb_warn(
41401528Sjwadams 		    "cache %p's size (%d) not in umem_alloc_sizes\n",
41411528Sjwadams 		    addr, c.cache_bufsize);
41421528Sjwadams 		return (DCMD_ERR);
41431528Sjwadams 	}
41441528Sjwadams 
41451528Sjwadams 	minmalloc = (idx == 0)? 0 : alloc_sizes[idx - 1];
41461528Sjwadams 	if (minmalloc > 0) {
41471528Sjwadams #ifdef _LP64
41481528Sjwadams 		if (minmalloc > UMEM_SECOND_ALIGN)
41491528Sjwadams 			minmalloc -= sizeof (struct malloc_data);
41501528Sjwadams #endif
41511528Sjwadams 		minmalloc -= sizeof (struct malloc_data);
41521528Sjwadams 		minmalloc += 1;
41531528Sjwadams 	}
41541528Sjwadams 
41551528Sjwadams 	if (dump) {
41561528Sjwadams 		for (idx = minmalloc; idx <= maxmalloc; idx++)
41571528Sjwadams 			mdb_printf("%d\t%d\n", idx, mi.um_bucket[idx]);
41581528Sjwadams 		mdb_printf("\n");
41591528Sjwadams 	} else {
41601528Sjwadams 		umem_malloc_print_dist(mi.um_bucket, minmalloc, maxmalloc,
41611528Sjwadams 		    maxbuckets, minbucketsize, geometric);
41621528Sjwadams 	}
41631528Sjwadams 
41641528Sjwadams 	return (DCMD_OK);
41651528Sjwadams }
4166