xref: /onnv-gate/usr/src/cmd/mdb/common/modules/libumem/umem.c (revision 4798:8e0fa896ea0b)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51528Sjwadams  * Common Development and Distribution License (the "License").
61528Sjwadams  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
224688Stomee  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
270Sstevel@tonic-gate 
280Sstevel@tonic-gate #include "umem.h"
290Sstevel@tonic-gate 
300Sstevel@tonic-gate #include <sys/vmem_impl_user.h>
310Sstevel@tonic-gate #include <umem_impl.h>
320Sstevel@tonic-gate 
330Sstevel@tonic-gate #include <alloca.h>
341528Sjwadams #include <limits.h>
350Sstevel@tonic-gate 
360Sstevel@tonic-gate #include "misc.h"
371528Sjwadams #include "leaky.h"
38*4798Stomee #include "dist.h"
390Sstevel@tonic-gate 
400Sstevel@tonic-gate #include "umem_pagesize.h"
410Sstevel@tonic-gate 
420Sstevel@tonic-gate #define	UM_ALLOCATED		0x1
430Sstevel@tonic-gate #define	UM_FREE			0x2
440Sstevel@tonic-gate #define	UM_BUFCTL		0x4
450Sstevel@tonic-gate #define	UM_HASH			0x8
460Sstevel@tonic-gate 
471528Sjwadams int umem_ready;
481528Sjwadams 
491528Sjwadams static int umem_stack_depth_warned;
501528Sjwadams static uint32_t umem_max_ncpus;
510Sstevel@tonic-gate uint32_t umem_stack_depth;
521528Sjwadams 
530Sstevel@tonic-gate size_t umem_pagesize;
540Sstevel@tonic-gate 
550Sstevel@tonic-gate #define	UMEM_READVAR(var)				\
560Sstevel@tonic-gate 	(umem_readvar(&(var), #var) == -1 &&		\
571528Sjwadams 	    (mdb_warn("failed to read "#var), 1))
580Sstevel@tonic-gate 
590Sstevel@tonic-gate int
601528Sjwadams umem_update_variables(void)
610Sstevel@tonic-gate {
620Sstevel@tonic-gate 	size_t pagesize;
630Sstevel@tonic-gate 
640Sstevel@tonic-gate 	/*
651528Sjwadams 	 * Figure out which type of umem is being used; if it's not there
661528Sjwadams 	 * yet, succeed quietly.
670Sstevel@tonic-gate 	 */
681528Sjwadams 	if (umem_set_standalone() == -1) {
691528Sjwadams 		umem_ready = 0;
701528Sjwadams 		return (0);		/* umem not there yet */
711528Sjwadams 	}
721528Sjwadams 
731528Sjwadams 	/*
741528Sjwadams 	 * Solaris 9 used a different name for umem_max_ncpus.  It's
751528Sjwadams 	 * cheap backwards compatibility to check for both names.
761528Sjwadams 	 */
771528Sjwadams 	if (umem_readvar(&umem_max_ncpus, "umem_max_ncpus") == -1 &&
781528Sjwadams 	    umem_readvar(&umem_max_ncpus, "max_ncpus") == -1) {
791528Sjwadams 		mdb_warn("unable to read umem_max_ncpus or max_ncpus");
801528Sjwadams 		return (-1);
811528Sjwadams 	}
821528Sjwadams 	if (UMEM_READVAR(umem_ready))
830Sstevel@tonic-gate 		return (-1);
840Sstevel@tonic-gate 	if (UMEM_READVAR(umem_stack_depth))
850Sstevel@tonic-gate 		return (-1);
860Sstevel@tonic-gate 	if (UMEM_READVAR(pagesize))
870Sstevel@tonic-gate 		return (-1);
880Sstevel@tonic-gate 
890Sstevel@tonic-gate 	if (umem_stack_depth > UMEM_MAX_STACK_DEPTH) {
901528Sjwadams 		if (umem_stack_depth_warned == 0) {
911528Sjwadams 			mdb_warn("umem_stack_depth corrupted (%d > %d)\n",
921528Sjwadams 			    umem_stack_depth, UMEM_MAX_STACK_DEPTH);
931528Sjwadams 			umem_stack_depth_warned = 1;
941528Sjwadams 		}
950Sstevel@tonic-gate 		umem_stack_depth = 0;
960Sstevel@tonic-gate 	}
971528Sjwadams 
981528Sjwadams 	umem_pagesize = pagesize;
991528Sjwadams 
1000Sstevel@tonic-gate 	return (0);
1010Sstevel@tonic-gate }
1020Sstevel@tonic-gate 
1030Sstevel@tonic-gate /*ARGSUSED*/
1041528Sjwadams static int
1050Sstevel@tonic-gate umem_init_walkers(uintptr_t addr, const umem_cache_t *c, void *ignored)
1060Sstevel@tonic-gate {
1070Sstevel@tonic-gate 	mdb_walker_t w;
1080Sstevel@tonic-gate 	char descr[64];
1090Sstevel@tonic-gate 
1100Sstevel@tonic-gate 	(void) mdb_snprintf(descr, sizeof (descr),
1110Sstevel@tonic-gate 	    "walk the %s cache", c->cache_name);
1120Sstevel@tonic-gate 
1130Sstevel@tonic-gate 	w.walk_name = c->cache_name;
1140Sstevel@tonic-gate 	w.walk_descr = descr;
1150Sstevel@tonic-gate 	w.walk_init = umem_walk_init;
1160Sstevel@tonic-gate 	w.walk_step = umem_walk_step;
1170Sstevel@tonic-gate 	w.walk_fini = umem_walk_fini;
1180Sstevel@tonic-gate 	w.walk_init_arg = (void *)addr;
1190Sstevel@tonic-gate 
1200Sstevel@tonic-gate 	if (mdb_add_walker(&w) == -1)
1210Sstevel@tonic-gate 		mdb_warn("failed to add %s walker", c->cache_name);
1220Sstevel@tonic-gate 
1230Sstevel@tonic-gate 	return (WALK_NEXT);
1240Sstevel@tonic-gate }
1250Sstevel@tonic-gate 
1261528Sjwadams /*ARGSUSED*/
1271528Sjwadams static void
1281528Sjwadams umem_statechange_cb(void *arg)
1291528Sjwadams {
1301528Sjwadams 	static int been_ready = 0;
1311528Sjwadams 
1321528Sjwadams #ifndef _KMDB
1331528Sjwadams 	leaky_cleanup(1);	/* state changes invalidate leaky state */
1341528Sjwadams #endif
1351528Sjwadams 
1361528Sjwadams 	if (umem_update_variables() == -1)
1371528Sjwadams 		return;
1381528Sjwadams 
1391528Sjwadams 	if (been_ready)
1401528Sjwadams 		return;
1411528Sjwadams 
1421528Sjwadams 	if (umem_ready != UMEM_READY)
1431528Sjwadams 		return;
1441528Sjwadams 
1451528Sjwadams 	been_ready = 1;
1461528Sjwadams 	(void) mdb_walk("umem_cache", (mdb_walk_cb_t)umem_init_walkers, NULL);
1471528Sjwadams }
1481528Sjwadams 
1491528Sjwadams int
1501528Sjwadams umem_init(void)
1511528Sjwadams {
1521528Sjwadams 	mdb_walker_t w = {
1531528Sjwadams 		"umem_cache", "walk list of umem caches", umem_cache_walk_init,
1541528Sjwadams 		umem_cache_walk_step, umem_cache_walk_fini
1551528Sjwadams 	};
1561528Sjwadams 
1571528Sjwadams 	if (mdb_add_walker(&w) == -1) {
1581528Sjwadams 		mdb_warn("failed to add umem_cache walker");
1591528Sjwadams 		return (-1);
1601528Sjwadams 	}
1611528Sjwadams 
1621528Sjwadams 	if (umem_update_variables() == -1)
1631528Sjwadams 		return (-1);
1641528Sjwadams 
1651528Sjwadams 	/* install a callback so that our variables are always up-to-date */
1661528Sjwadams 	(void) mdb_callback_add(MDB_CALLBACK_STCHG, umem_statechange_cb, NULL);
1671528Sjwadams 	umem_statechange_cb(NULL);
1681528Sjwadams 
1691528Sjwadams 	return (0);
1701528Sjwadams }
1711528Sjwadams 
1720Sstevel@tonic-gate int
1730Sstevel@tonic-gate umem_abort_messages(void)
1740Sstevel@tonic-gate {
1750Sstevel@tonic-gate 	char *umem_error_buffer;
1760Sstevel@tonic-gate 	uint_t umem_error_begin;
1770Sstevel@tonic-gate 	GElf_Sym sym;
1780Sstevel@tonic-gate 	size_t bufsize;
1790Sstevel@tonic-gate 
1800Sstevel@tonic-gate 	if (UMEM_READVAR(umem_error_begin))
1810Sstevel@tonic-gate 		return (DCMD_ERR);
1820Sstevel@tonic-gate 
1830Sstevel@tonic-gate 	if (umem_lookup_by_name("umem_error_buffer", &sym) == -1) {
1840Sstevel@tonic-gate 		mdb_warn("unable to look up umem_error_buffer");
1850Sstevel@tonic-gate 		return (DCMD_ERR);
1860Sstevel@tonic-gate 	}
1870Sstevel@tonic-gate 
1880Sstevel@tonic-gate 	bufsize = (size_t)sym.st_size;
1890Sstevel@tonic-gate 
1900Sstevel@tonic-gate 	umem_error_buffer = mdb_alloc(bufsize+1, UM_SLEEP | UM_GC);
1910Sstevel@tonic-gate 
1920Sstevel@tonic-gate 	if (mdb_vread(umem_error_buffer, bufsize, (uintptr_t)sym.st_value)
1930Sstevel@tonic-gate 	    != bufsize) {
1940Sstevel@tonic-gate 		mdb_warn("unable to read umem_error_buffer");
1950Sstevel@tonic-gate 		return (DCMD_ERR);
1960Sstevel@tonic-gate 	}
1970Sstevel@tonic-gate 	/* put a zero after the end of the buffer to simplify printing */
1980Sstevel@tonic-gate 	umem_error_buffer[bufsize] = 0;
1990Sstevel@tonic-gate 
2000Sstevel@tonic-gate 	if ((umem_error_begin % bufsize) == 0)
2010Sstevel@tonic-gate 		mdb_printf("%s\n", umem_error_buffer);
2020Sstevel@tonic-gate 	else {
2030Sstevel@tonic-gate 		umem_error_buffer[(umem_error_begin % bufsize) - 1] = 0;
2040Sstevel@tonic-gate 		mdb_printf("%s%s\n",
2050Sstevel@tonic-gate 		    &umem_error_buffer[umem_error_begin % bufsize],
2060Sstevel@tonic-gate 		    umem_error_buffer);
2070Sstevel@tonic-gate 	}
2080Sstevel@tonic-gate 
2090Sstevel@tonic-gate 	return (DCMD_OK);
2100Sstevel@tonic-gate }
2110Sstevel@tonic-gate 
2120Sstevel@tonic-gate static void
2130Sstevel@tonic-gate umem_log_status(const char *name, umem_log_header_t *val)
2140Sstevel@tonic-gate {
2150Sstevel@tonic-gate 	umem_log_header_t my_lh;
2160Sstevel@tonic-gate 	uintptr_t pos = (uintptr_t)val;
2170Sstevel@tonic-gate 	size_t size;
2180Sstevel@tonic-gate 
2190Sstevel@tonic-gate 	if (pos == NULL)
2200Sstevel@tonic-gate 		return;
2210Sstevel@tonic-gate 
2220Sstevel@tonic-gate 	if (mdb_vread(&my_lh, sizeof (umem_log_header_t), pos) == -1) {
2230Sstevel@tonic-gate 		mdb_warn("\nunable to read umem_%s_log pointer %p",
2240Sstevel@tonic-gate 		    name, pos);
2250Sstevel@tonic-gate 		return;
2260Sstevel@tonic-gate 	}
2270Sstevel@tonic-gate 
2280Sstevel@tonic-gate 	size = my_lh.lh_chunksize * my_lh.lh_nchunks;
2290Sstevel@tonic-gate 
2300Sstevel@tonic-gate 	if (size % (1024 * 1024) == 0)
2310Sstevel@tonic-gate 		mdb_printf("%s=%dm ", name, size / (1024 * 1024));
2320Sstevel@tonic-gate 	else if (size % 1024 == 0)
2330Sstevel@tonic-gate 		mdb_printf("%s=%dk ", name, size / 1024);
2340Sstevel@tonic-gate 	else
2350Sstevel@tonic-gate 		mdb_printf("%s=%d ", name, size);
2360Sstevel@tonic-gate }
2370Sstevel@tonic-gate 
2380Sstevel@tonic-gate typedef struct umem_debug_flags {
2390Sstevel@tonic-gate 	const char	*udf_name;
2400Sstevel@tonic-gate 	uint_t		udf_flags;
2410Sstevel@tonic-gate 	uint_t		udf_clear;	/* if 0, uses udf_flags */
2420Sstevel@tonic-gate } umem_debug_flags_t;
2430Sstevel@tonic-gate 
2440Sstevel@tonic-gate umem_debug_flags_t umem_status_flags[] = {
2450Sstevel@tonic-gate 	{ "random",	UMF_RANDOMIZE,	UMF_RANDOM },
2460Sstevel@tonic-gate 	{ "default",	UMF_AUDIT | UMF_DEADBEEF | UMF_REDZONE | UMF_CONTENTS },
2470Sstevel@tonic-gate 	{ "audit",	UMF_AUDIT },
2480Sstevel@tonic-gate 	{ "guards",	UMF_DEADBEEF | UMF_REDZONE },
2490Sstevel@tonic-gate 	{ "nosignal",	UMF_CHECKSIGNAL },
2500Sstevel@tonic-gate 	{ "firewall",	UMF_FIREWALL },
2510Sstevel@tonic-gate 	{ "lite",	UMF_LITE },
2520Sstevel@tonic-gate 	{ NULL }
2530Sstevel@tonic-gate };
2540Sstevel@tonic-gate 
2550Sstevel@tonic-gate /*ARGSUSED*/
2560Sstevel@tonic-gate int
2570Sstevel@tonic-gate umem_status(uintptr_t addr, uint_t flags, int ac, const mdb_arg_t *argv)
2580Sstevel@tonic-gate {
2590Sstevel@tonic-gate 	int umem_logging;
2600Sstevel@tonic-gate 
2610Sstevel@tonic-gate 	umem_log_header_t *umem_transaction_log;
2620Sstevel@tonic-gate 	umem_log_header_t *umem_content_log;
2630Sstevel@tonic-gate 	umem_log_header_t *umem_failure_log;
2640Sstevel@tonic-gate 	umem_log_header_t *umem_slab_log;
2650Sstevel@tonic-gate 
2660Sstevel@tonic-gate 	mdb_printf("Status:\t\t%s\n",
2670Sstevel@tonic-gate 	    umem_ready == UMEM_READY_INIT_FAILED ? "initialization failed" :
2680Sstevel@tonic-gate 	    umem_ready == UMEM_READY_STARTUP ? "uninitialized" :
2690Sstevel@tonic-gate 	    umem_ready == UMEM_READY_INITING ? "initialization in process" :
2700Sstevel@tonic-gate 	    umem_ready == UMEM_READY ? "ready and active" :
2711528Sjwadams 	    umem_ready == 0 ? "not loaded into address space" :
2720Sstevel@tonic-gate 	    "unknown (umem_ready invalid)");
2730Sstevel@tonic-gate 
2741528Sjwadams 	if (umem_ready == 0)
2751528Sjwadams 		return (DCMD_OK);
2761528Sjwadams 
2770Sstevel@tonic-gate 	mdb_printf("Concurrency:\t%d\n", umem_max_ncpus);
2780Sstevel@tonic-gate 
2790Sstevel@tonic-gate 	if (UMEM_READVAR(umem_logging))
2800Sstevel@tonic-gate 		goto err;
2810Sstevel@tonic-gate 	if (UMEM_READVAR(umem_transaction_log))
2820Sstevel@tonic-gate 		goto err;
2830Sstevel@tonic-gate 	if (UMEM_READVAR(umem_content_log))
2840Sstevel@tonic-gate 		goto err;
2850Sstevel@tonic-gate 	if (UMEM_READVAR(umem_failure_log))
2860Sstevel@tonic-gate 		goto err;
2870Sstevel@tonic-gate 	if (UMEM_READVAR(umem_slab_log))
2880Sstevel@tonic-gate 		goto err;
2890Sstevel@tonic-gate 
2900Sstevel@tonic-gate 	mdb_printf("Logs:\t\t");
2910Sstevel@tonic-gate 	umem_log_status("transaction", umem_transaction_log);
2920Sstevel@tonic-gate 	umem_log_status("content", umem_content_log);
2930Sstevel@tonic-gate 	umem_log_status("fail", umem_failure_log);
2940Sstevel@tonic-gate 	umem_log_status("slab", umem_slab_log);
2950Sstevel@tonic-gate 	if (!umem_logging)
2960Sstevel@tonic-gate 		mdb_printf("(inactive)");
2970Sstevel@tonic-gate 	mdb_printf("\n");
2980Sstevel@tonic-gate 
2990Sstevel@tonic-gate 	mdb_printf("Message buffer:\n");
3000Sstevel@tonic-gate 	return (umem_abort_messages());
3010Sstevel@tonic-gate 
3020Sstevel@tonic-gate err:
3030Sstevel@tonic-gate 	mdb_printf("Message buffer:\n");
3040Sstevel@tonic-gate 	(void) umem_abort_messages();
3050Sstevel@tonic-gate 	return (DCMD_ERR);
3060Sstevel@tonic-gate }
3070Sstevel@tonic-gate 
3080Sstevel@tonic-gate typedef struct {
3090Sstevel@tonic-gate 	uintptr_t ucw_first;
3100Sstevel@tonic-gate 	uintptr_t ucw_current;
3110Sstevel@tonic-gate } umem_cache_walk_t;
3120Sstevel@tonic-gate 
3130Sstevel@tonic-gate int
3140Sstevel@tonic-gate umem_cache_walk_init(mdb_walk_state_t *wsp)
3150Sstevel@tonic-gate {
3160Sstevel@tonic-gate 	umem_cache_walk_t *ucw;
3170Sstevel@tonic-gate 	umem_cache_t c;
3180Sstevel@tonic-gate 	uintptr_t cp;
3190Sstevel@tonic-gate 	GElf_Sym sym;
3200Sstevel@tonic-gate 
3210Sstevel@tonic-gate 	if (umem_lookup_by_name("umem_null_cache", &sym) == -1) {
3220Sstevel@tonic-gate 		mdb_warn("couldn't find umem_null_cache");
3230Sstevel@tonic-gate 		return (WALK_ERR);
3240Sstevel@tonic-gate 	}
3250Sstevel@tonic-gate 
3260Sstevel@tonic-gate 	cp = (uintptr_t)sym.st_value;
3270Sstevel@tonic-gate 
3280Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (umem_cache_t), cp) == -1) {
3290Sstevel@tonic-gate 		mdb_warn("couldn't read cache at %p", cp);
3300Sstevel@tonic-gate 		return (WALK_ERR);
3310Sstevel@tonic-gate 	}
3320Sstevel@tonic-gate 
3330Sstevel@tonic-gate 	ucw = mdb_alloc(sizeof (umem_cache_walk_t), UM_SLEEP);
3340Sstevel@tonic-gate 
3350Sstevel@tonic-gate 	ucw->ucw_first = cp;
3360Sstevel@tonic-gate 	ucw->ucw_current = (uintptr_t)c.cache_next;
3370Sstevel@tonic-gate 	wsp->walk_data = ucw;
3380Sstevel@tonic-gate 
3390Sstevel@tonic-gate 	return (WALK_NEXT);
3400Sstevel@tonic-gate }
3410Sstevel@tonic-gate 
3420Sstevel@tonic-gate int
3430Sstevel@tonic-gate umem_cache_walk_step(mdb_walk_state_t *wsp)
3440Sstevel@tonic-gate {
3450Sstevel@tonic-gate 	umem_cache_walk_t *ucw = wsp->walk_data;
3460Sstevel@tonic-gate 	umem_cache_t c;
3470Sstevel@tonic-gate 	int status;
3480Sstevel@tonic-gate 
3490Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (umem_cache_t), ucw->ucw_current) == -1) {
3500Sstevel@tonic-gate 		mdb_warn("couldn't read cache at %p", ucw->ucw_current);
3510Sstevel@tonic-gate 		return (WALK_DONE);
3520Sstevel@tonic-gate 	}
3530Sstevel@tonic-gate 
3540Sstevel@tonic-gate 	status = wsp->walk_callback(ucw->ucw_current, &c, wsp->walk_cbdata);
3550Sstevel@tonic-gate 
3560Sstevel@tonic-gate 	if ((ucw->ucw_current = (uintptr_t)c.cache_next) == ucw->ucw_first)
3570Sstevel@tonic-gate 		return (WALK_DONE);
3580Sstevel@tonic-gate 
3590Sstevel@tonic-gate 	return (status);
3600Sstevel@tonic-gate }
3610Sstevel@tonic-gate 
3620Sstevel@tonic-gate void
3630Sstevel@tonic-gate umem_cache_walk_fini(mdb_walk_state_t *wsp)
3640Sstevel@tonic-gate {
3650Sstevel@tonic-gate 	umem_cache_walk_t *ucw = wsp->walk_data;
3660Sstevel@tonic-gate 	mdb_free(ucw, sizeof (umem_cache_walk_t));
3670Sstevel@tonic-gate }
3680Sstevel@tonic-gate 
3690Sstevel@tonic-gate typedef struct {
3700Sstevel@tonic-gate 	umem_cpu_t *ucw_cpus;
3710Sstevel@tonic-gate 	uint32_t ucw_current;
3720Sstevel@tonic-gate 	uint32_t ucw_max;
3730Sstevel@tonic-gate } umem_cpu_walk_state_t;
3740Sstevel@tonic-gate 
3750Sstevel@tonic-gate int
3760Sstevel@tonic-gate umem_cpu_walk_init(mdb_walk_state_t *wsp)
3770Sstevel@tonic-gate {
3780Sstevel@tonic-gate 	umem_cpu_t *umem_cpus;
3790Sstevel@tonic-gate 
3800Sstevel@tonic-gate 	umem_cpu_walk_state_t *ucw;
3810Sstevel@tonic-gate 
3820Sstevel@tonic-gate 	if (umem_readvar(&umem_cpus, "umem_cpus") == -1) {
3830Sstevel@tonic-gate 		mdb_warn("failed to read 'umem_cpus'");
3840Sstevel@tonic-gate 		return (WALK_ERR);
3850Sstevel@tonic-gate 	}
3860Sstevel@tonic-gate 
3870Sstevel@tonic-gate 	ucw = mdb_alloc(sizeof (*ucw), UM_SLEEP);
3880Sstevel@tonic-gate 
3890Sstevel@tonic-gate 	ucw->ucw_cpus = umem_cpus;
3900Sstevel@tonic-gate 	ucw->ucw_current = 0;
3910Sstevel@tonic-gate 	ucw->ucw_max = umem_max_ncpus;
3920Sstevel@tonic-gate 
3930Sstevel@tonic-gate 	wsp->walk_data = ucw;
3940Sstevel@tonic-gate 	return (WALK_NEXT);
3950Sstevel@tonic-gate }
3960Sstevel@tonic-gate 
3970Sstevel@tonic-gate int
3980Sstevel@tonic-gate umem_cpu_walk_step(mdb_walk_state_t *wsp)
3990Sstevel@tonic-gate {
4000Sstevel@tonic-gate 	umem_cpu_t cpu;
4010Sstevel@tonic-gate 	umem_cpu_walk_state_t *ucw = wsp->walk_data;
4020Sstevel@tonic-gate 
4030Sstevel@tonic-gate 	uintptr_t caddr;
4040Sstevel@tonic-gate 
4050Sstevel@tonic-gate 	if (ucw->ucw_current >= ucw->ucw_max)
4060Sstevel@tonic-gate 		return (WALK_DONE);
4070Sstevel@tonic-gate 
4080Sstevel@tonic-gate 	caddr = (uintptr_t)&(ucw->ucw_cpus[ucw->ucw_current]);
4090Sstevel@tonic-gate 
4100Sstevel@tonic-gate 	if (mdb_vread(&cpu, sizeof (umem_cpu_t), caddr) == -1) {
4110Sstevel@tonic-gate 		mdb_warn("failed to read cpu %d", ucw->ucw_current);
4120Sstevel@tonic-gate 		return (WALK_ERR);
4130Sstevel@tonic-gate 	}
4140Sstevel@tonic-gate 
4150Sstevel@tonic-gate 	ucw->ucw_current++;
4160Sstevel@tonic-gate 
4170Sstevel@tonic-gate 	return (wsp->walk_callback(caddr, &cpu, wsp->walk_cbdata));
4180Sstevel@tonic-gate }
4190Sstevel@tonic-gate 
4200Sstevel@tonic-gate void
4210Sstevel@tonic-gate umem_cpu_walk_fini(mdb_walk_state_t *wsp)
4220Sstevel@tonic-gate {
4230Sstevel@tonic-gate 	umem_cpu_walk_state_t *ucw = wsp->walk_data;
4240Sstevel@tonic-gate 
4250Sstevel@tonic-gate 	mdb_free(ucw, sizeof (*ucw));
4260Sstevel@tonic-gate }
4270Sstevel@tonic-gate 
4280Sstevel@tonic-gate int
4290Sstevel@tonic-gate umem_cpu_cache_walk_init(mdb_walk_state_t *wsp)
4300Sstevel@tonic-gate {
4310Sstevel@tonic-gate 	if (wsp->walk_addr == NULL) {
4320Sstevel@tonic-gate 		mdb_warn("umem_cpu_cache doesn't support global walks");
4330Sstevel@tonic-gate 		return (WALK_ERR);
4340Sstevel@tonic-gate 	}
4350Sstevel@tonic-gate 
4360Sstevel@tonic-gate 	if (mdb_layered_walk("umem_cpu", wsp) == -1) {
4370Sstevel@tonic-gate 		mdb_warn("couldn't walk 'umem_cpu'");
4380Sstevel@tonic-gate 		return (WALK_ERR);
4390Sstevel@tonic-gate 	}
4400Sstevel@tonic-gate 
4410Sstevel@tonic-gate 	wsp->walk_data = (void *)wsp->walk_addr;
4420Sstevel@tonic-gate 
4430Sstevel@tonic-gate 	return (WALK_NEXT);
4440Sstevel@tonic-gate }
4450Sstevel@tonic-gate 
4460Sstevel@tonic-gate int
4470Sstevel@tonic-gate umem_cpu_cache_walk_step(mdb_walk_state_t *wsp)
4480Sstevel@tonic-gate {
4490Sstevel@tonic-gate 	uintptr_t caddr = (uintptr_t)wsp->walk_data;
4500Sstevel@tonic-gate 	const umem_cpu_t *cpu = wsp->walk_layer;
4510Sstevel@tonic-gate 	umem_cpu_cache_t cc;
4520Sstevel@tonic-gate 
4530Sstevel@tonic-gate 	caddr += cpu->cpu_cache_offset;
4540Sstevel@tonic-gate 
4550Sstevel@tonic-gate 	if (mdb_vread(&cc, sizeof (umem_cpu_cache_t), caddr) == -1) {
4560Sstevel@tonic-gate 		mdb_warn("couldn't read umem_cpu_cache at %p", caddr);
4570Sstevel@tonic-gate 		return (WALK_ERR);
4580Sstevel@tonic-gate 	}
4590Sstevel@tonic-gate 
4600Sstevel@tonic-gate 	return (wsp->walk_callback(caddr, &cc, wsp->walk_cbdata));
4610Sstevel@tonic-gate }
4620Sstevel@tonic-gate 
4630Sstevel@tonic-gate int
4640Sstevel@tonic-gate umem_slab_walk_init(mdb_walk_state_t *wsp)
4650Sstevel@tonic-gate {
4660Sstevel@tonic-gate 	uintptr_t caddr = wsp->walk_addr;
4670Sstevel@tonic-gate 	umem_cache_t c;
4680Sstevel@tonic-gate 
4690Sstevel@tonic-gate 	if (caddr == NULL) {
4700Sstevel@tonic-gate 		mdb_warn("umem_slab doesn't support global walks\n");
4710Sstevel@tonic-gate 		return (WALK_ERR);
4720Sstevel@tonic-gate 	}
4730Sstevel@tonic-gate 
4740Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (c), caddr) == -1) {
4750Sstevel@tonic-gate 		mdb_warn("couldn't read umem_cache at %p", caddr);
4760Sstevel@tonic-gate 		return (WALK_ERR);
4770Sstevel@tonic-gate 	}
4780Sstevel@tonic-gate 
4790Sstevel@tonic-gate 	wsp->walk_data =
4800Sstevel@tonic-gate 	    (void *)(caddr + offsetof(umem_cache_t, cache_nullslab));
4810Sstevel@tonic-gate 	wsp->walk_addr = (uintptr_t)c.cache_nullslab.slab_next;
4820Sstevel@tonic-gate 
4830Sstevel@tonic-gate 	return (WALK_NEXT);
4840Sstevel@tonic-gate }
4850Sstevel@tonic-gate 
4860Sstevel@tonic-gate int
4870Sstevel@tonic-gate umem_slab_walk_partial_init(mdb_walk_state_t *wsp)
4880Sstevel@tonic-gate {
4890Sstevel@tonic-gate 	uintptr_t caddr = wsp->walk_addr;
4900Sstevel@tonic-gate 	umem_cache_t c;
4910Sstevel@tonic-gate 
4920Sstevel@tonic-gate 	if (caddr == NULL) {
4930Sstevel@tonic-gate 		mdb_warn("umem_slab_partial doesn't support global walks\n");
4940Sstevel@tonic-gate 		return (WALK_ERR);
4950Sstevel@tonic-gate 	}
4960Sstevel@tonic-gate 
4970Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (c), caddr) == -1) {
4980Sstevel@tonic-gate 		mdb_warn("couldn't read umem_cache at %p", caddr);
4990Sstevel@tonic-gate 		return (WALK_ERR);
5000Sstevel@tonic-gate 	}
5010Sstevel@tonic-gate 
5020Sstevel@tonic-gate 	wsp->walk_data =
5030Sstevel@tonic-gate 	    (void *)(caddr + offsetof(umem_cache_t, cache_nullslab));
5040Sstevel@tonic-gate 	wsp->walk_addr = (uintptr_t)c.cache_freelist;
5050Sstevel@tonic-gate 
5060Sstevel@tonic-gate 	/*
5070Sstevel@tonic-gate 	 * Some consumers (umem_walk_step(), in particular) require at
5080Sstevel@tonic-gate 	 * least one callback if there are any buffers in the cache.  So
5090Sstevel@tonic-gate 	 * if there are *no* partial slabs, report the last full slab, if
5100Sstevel@tonic-gate 	 * any.
5110Sstevel@tonic-gate 	 *
5120Sstevel@tonic-gate 	 * Yes, this is ugly, but it's cleaner than the other possibilities.
5130Sstevel@tonic-gate 	 */
5140Sstevel@tonic-gate 	if ((uintptr_t)wsp->walk_data == wsp->walk_addr)
5150Sstevel@tonic-gate 		wsp->walk_addr = (uintptr_t)c.cache_nullslab.slab_prev;
5160Sstevel@tonic-gate 
5170Sstevel@tonic-gate 	return (WALK_NEXT);
5180Sstevel@tonic-gate }
5190Sstevel@tonic-gate 
5200Sstevel@tonic-gate int
5210Sstevel@tonic-gate umem_slab_walk_step(mdb_walk_state_t *wsp)
5220Sstevel@tonic-gate {
5230Sstevel@tonic-gate 	umem_slab_t s;
5240Sstevel@tonic-gate 	uintptr_t addr = wsp->walk_addr;
5250Sstevel@tonic-gate 	uintptr_t saddr = (uintptr_t)wsp->walk_data;
5260Sstevel@tonic-gate 	uintptr_t caddr = saddr - offsetof(umem_cache_t, cache_nullslab);
5270Sstevel@tonic-gate 
5280Sstevel@tonic-gate 	if (addr == saddr)
5290Sstevel@tonic-gate 		return (WALK_DONE);
5300Sstevel@tonic-gate 
5310Sstevel@tonic-gate 	if (mdb_vread(&s, sizeof (s), addr) == -1) {
5320Sstevel@tonic-gate 		mdb_warn("failed to read slab at %p", wsp->walk_addr);
5330Sstevel@tonic-gate 		return (WALK_ERR);
5340Sstevel@tonic-gate 	}
5350Sstevel@tonic-gate 
5360Sstevel@tonic-gate 	if ((uintptr_t)s.slab_cache != caddr) {
5370Sstevel@tonic-gate 		mdb_warn("slab %p isn't in cache %p (in cache %p)\n",
5380Sstevel@tonic-gate 		    addr, caddr, s.slab_cache);
5390Sstevel@tonic-gate 		return (WALK_ERR);
5400Sstevel@tonic-gate 	}
5410Sstevel@tonic-gate 
5420Sstevel@tonic-gate 	wsp->walk_addr = (uintptr_t)s.slab_next;
5430Sstevel@tonic-gate 
5440Sstevel@tonic-gate 	return (wsp->walk_callback(addr, &s, wsp->walk_cbdata));
5450Sstevel@tonic-gate }
5460Sstevel@tonic-gate 
5470Sstevel@tonic-gate int
5480Sstevel@tonic-gate umem_cache(uintptr_t addr, uint_t flags, int ac, const mdb_arg_t *argv)
5490Sstevel@tonic-gate {
5500Sstevel@tonic-gate 	umem_cache_t c;
5510Sstevel@tonic-gate 
5520Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC)) {
5530Sstevel@tonic-gate 		if (mdb_walk_dcmd("umem_cache", "umem_cache", ac, argv) == -1) {
5540Sstevel@tonic-gate 			mdb_warn("can't walk umem_cache");
5550Sstevel@tonic-gate 			return (DCMD_ERR);
5560Sstevel@tonic-gate 		}
5570Sstevel@tonic-gate 		return (DCMD_OK);
5580Sstevel@tonic-gate 	}
5590Sstevel@tonic-gate 
5600Sstevel@tonic-gate 	if (DCMD_HDRSPEC(flags))
5610Sstevel@tonic-gate 		mdb_printf("%-?s %-25s %4s %8s %8s %8s\n", "ADDR", "NAME",
5620Sstevel@tonic-gate 		    "FLAG", "CFLAG", "BUFSIZE", "BUFTOTL");
5630Sstevel@tonic-gate 
5640Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (c), addr) == -1) {
5650Sstevel@tonic-gate 		mdb_warn("couldn't read umem_cache at %p", addr);
5660Sstevel@tonic-gate 		return (DCMD_ERR);
5670Sstevel@tonic-gate 	}
5680Sstevel@tonic-gate 
5690Sstevel@tonic-gate 	mdb_printf("%0?p %-25s %04x %08x %8ld %8lld\n", addr, c.cache_name,
5700Sstevel@tonic-gate 	    c.cache_flags, c.cache_cflags, c.cache_bufsize, c.cache_buftotal);
5710Sstevel@tonic-gate 
5720Sstevel@tonic-gate 	return (DCMD_OK);
5730Sstevel@tonic-gate }
5740Sstevel@tonic-gate 
5750Sstevel@tonic-gate static int
5760Sstevel@tonic-gate addrcmp(const void *lhs, const void *rhs)
5770Sstevel@tonic-gate {
5780Sstevel@tonic-gate 	uintptr_t p1 = *((uintptr_t *)lhs);
5790Sstevel@tonic-gate 	uintptr_t p2 = *((uintptr_t *)rhs);
5800Sstevel@tonic-gate 
5810Sstevel@tonic-gate 	if (p1 < p2)
5820Sstevel@tonic-gate 		return (-1);
5830Sstevel@tonic-gate 	if (p1 > p2)
5840Sstevel@tonic-gate 		return (1);
5850Sstevel@tonic-gate 	return (0);
5860Sstevel@tonic-gate }
5870Sstevel@tonic-gate 
5880Sstevel@tonic-gate static int
5890Sstevel@tonic-gate bufctlcmp(const umem_bufctl_audit_t **lhs, const umem_bufctl_audit_t **rhs)
5900Sstevel@tonic-gate {
5910Sstevel@tonic-gate 	const umem_bufctl_audit_t *bcp1 = *lhs;
5920Sstevel@tonic-gate 	const umem_bufctl_audit_t *bcp2 = *rhs;
5930Sstevel@tonic-gate 
5940Sstevel@tonic-gate 	if (bcp1->bc_timestamp > bcp2->bc_timestamp)
5950Sstevel@tonic-gate 		return (-1);
5960Sstevel@tonic-gate 
5970Sstevel@tonic-gate 	if (bcp1->bc_timestamp < bcp2->bc_timestamp)
5980Sstevel@tonic-gate 		return (1);
5990Sstevel@tonic-gate 
6000Sstevel@tonic-gate 	return (0);
6010Sstevel@tonic-gate }
6020Sstevel@tonic-gate 
6030Sstevel@tonic-gate typedef struct umem_hash_walk {
6040Sstevel@tonic-gate 	uintptr_t *umhw_table;
6050Sstevel@tonic-gate 	size_t umhw_nelems;
6060Sstevel@tonic-gate 	size_t umhw_pos;
6070Sstevel@tonic-gate 	umem_bufctl_t umhw_cur;
6080Sstevel@tonic-gate } umem_hash_walk_t;
6090Sstevel@tonic-gate 
6100Sstevel@tonic-gate int
6110Sstevel@tonic-gate umem_hash_walk_init(mdb_walk_state_t *wsp)
6120Sstevel@tonic-gate {
6130Sstevel@tonic-gate 	umem_hash_walk_t *umhw;
6140Sstevel@tonic-gate 	uintptr_t *hash;
6150Sstevel@tonic-gate 	umem_cache_t c;
6160Sstevel@tonic-gate 	uintptr_t haddr, addr = wsp->walk_addr;
6170Sstevel@tonic-gate 	size_t nelems;
6180Sstevel@tonic-gate 	size_t hsize;
6190Sstevel@tonic-gate 
6200Sstevel@tonic-gate 	if (addr == NULL) {
6210Sstevel@tonic-gate 		mdb_warn("umem_hash doesn't support global walks\n");
6220Sstevel@tonic-gate 		return (WALK_ERR);
6230Sstevel@tonic-gate 	}
6240Sstevel@tonic-gate 
6250Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (c), addr) == -1) {
6260Sstevel@tonic-gate 		mdb_warn("couldn't read cache at addr %p", addr);
6270Sstevel@tonic-gate 		return (WALK_ERR);
6280Sstevel@tonic-gate 	}
6290Sstevel@tonic-gate 
6300Sstevel@tonic-gate 	if (!(c.cache_flags & UMF_HASH)) {
6310Sstevel@tonic-gate 		mdb_warn("cache %p doesn't have a hash table\n", addr);
6320Sstevel@tonic-gate 		return (WALK_DONE);		/* nothing to do */
6330Sstevel@tonic-gate 	}
6340Sstevel@tonic-gate 
6350Sstevel@tonic-gate 	umhw = mdb_zalloc(sizeof (umem_hash_walk_t), UM_SLEEP);
6360Sstevel@tonic-gate 	umhw->umhw_cur.bc_next = NULL;
6370Sstevel@tonic-gate 	umhw->umhw_pos = 0;
6380Sstevel@tonic-gate 
6390Sstevel@tonic-gate 	umhw->umhw_nelems = nelems = c.cache_hash_mask + 1;
6400Sstevel@tonic-gate 	hsize = nelems * sizeof (uintptr_t);
6410Sstevel@tonic-gate 	haddr = (uintptr_t)c.cache_hash_table;
6420Sstevel@tonic-gate 
6430Sstevel@tonic-gate 	umhw->umhw_table = hash = mdb_alloc(hsize, UM_SLEEP);
6440Sstevel@tonic-gate 	if (mdb_vread(hash, hsize, haddr) == -1) {
6450Sstevel@tonic-gate 		mdb_warn("failed to read hash table at %p", haddr);
6460Sstevel@tonic-gate 		mdb_free(hash, hsize);
6470Sstevel@tonic-gate 		mdb_free(umhw, sizeof (umem_hash_walk_t));
6480Sstevel@tonic-gate 		return (WALK_ERR);
6490Sstevel@tonic-gate 	}
6500Sstevel@tonic-gate 
6510Sstevel@tonic-gate 	wsp->walk_data = umhw;
6520Sstevel@tonic-gate 
6530Sstevel@tonic-gate 	return (WALK_NEXT);
6540Sstevel@tonic-gate }
6550Sstevel@tonic-gate 
6560Sstevel@tonic-gate int
6570Sstevel@tonic-gate umem_hash_walk_step(mdb_walk_state_t *wsp)
6580Sstevel@tonic-gate {
6590Sstevel@tonic-gate 	umem_hash_walk_t *umhw = wsp->walk_data;
6600Sstevel@tonic-gate 	uintptr_t addr = NULL;
6610Sstevel@tonic-gate 
6620Sstevel@tonic-gate 	if ((addr = (uintptr_t)umhw->umhw_cur.bc_next) == NULL) {
6630Sstevel@tonic-gate 		while (umhw->umhw_pos < umhw->umhw_nelems) {
6640Sstevel@tonic-gate 			if ((addr = umhw->umhw_table[umhw->umhw_pos++]) != NULL)
6650Sstevel@tonic-gate 				break;
6660Sstevel@tonic-gate 		}
6670Sstevel@tonic-gate 	}
6680Sstevel@tonic-gate 	if (addr == NULL)
6690Sstevel@tonic-gate 		return (WALK_DONE);
6700Sstevel@tonic-gate 
6710Sstevel@tonic-gate 	if (mdb_vread(&umhw->umhw_cur, sizeof (umem_bufctl_t), addr) == -1) {
6720Sstevel@tonic-gate 		mdb_warn("couldn't read umem_bufctl_t at addr %p", addr);
6730Sstevel@tonic-gate 		return (WALK_ERR);
6740Sstevel@tonic-gate 	}
6750Sstevel@tonic-gate 
6760Sstevel@tonic-gate 	return (wsp->walk_callback(addr, &umhw->umhw_cur, wsp->walk_cbdata));
6770Sstevel@tonic-gate }
6780Sstevel@tonic-gate 
6790Sstevel@tonic-gate void
6800Sstevel@tonic-gate umem_hash_walk_fini(mdb_walk_state_t *wsp)
6810Sstevel@tonic-gate {
6820Sstevel@tonic-gate 	umem_hash_walk_t *umhw = wsp->walk_data;
6830Sstevel@tonic-gate 
6840Sstevel@tonic-gate 	if (umhw == NULL)
6850Sstevel@tonic-gate 		return;
6860Sstevel@tonic-gate 
6870Sstevel@tonic-gate 	mdb_free(umhw->umhw_table, umhw->umhw_nelems * sizeof (uintptr_t));
6880Sstevel@tonic-gate 	mdb_free(umhw, sizeof (umem_hash_walk_t));
6890Sstevel@tonic-gate }
6900Sstevel@tonic-gate 
6910Sstevel@tonic-gate /*
6920Sstevel@tonic-gate  * Find the address of the bufctl structure for the address 'buf' in cache
6930Sstevel@tonic-gate  * 'cp', which is at address caddr, and place it in *out.
6940Sstevel@tonic-gate  */
6950Sstevel@tonic-gate static int
6960Sstevel@tonic-gate umem_hash_lookup(umem_cache_t *cp, uintptr_t caddr, void *buf, uintptr_t *out)
6970Sstevel@tonic-gate {
6980Sstevel@tonic-gate 	uintptr_t bucket = (uintptr_t)UMEM_HASH(cp, buf);
6990Sstevel@tonic-gate 	umem_bufctl_t *bcp;
7000Sstevel@tonic-gate 	umem_bufctl_t bc;
7010Sstevel@tonic-gate 
7020Sstevel@tonic-gate 	if (mdb_vread(&bcp, sizeof (umem_bufctl_t *), bucket) == -1) {
7030Sstevel@tonic-gate 		mdb_warn("unable to read hash bucket for %p in cache %p",
7040Sstevel@tonic-gate 		    buf, caddr);
7050Sstevel@tonic-gate 		return (-1);
7060Sstevel@tonic-gate 	}
7070Sstevel@tonic-gate 
7080Sstevel@tonic-gate 	while (bcp != NULL) {
7090Sstevel@tonic-gate 		if (mdb_vread(&bc, sizeof (umem_bufctl_t),
7100Sstevel@tonic-gate 		    (uintptr_t)bcp) == -1) {
7110Sstevel@tonic-gate 			mdb_warn("unable to read bufctl at %p", bcp);
7120Sstevel@tonic-gate 			return (-1);
7130Sstevel@tonic-gate 		}
7140Sstevel@tonic-gate 		if (bc.bc_addr == buf) {
7150Sstevel@tonic-gate 			*out = (uintptr_t)bcp;
7160Sstevel@tonic-gate 			return (0);
7170Sstevel@tonic-gate 		}
7180Sstevel@tonic-gate 		bcp = bc.bc_next;
7190Sstevel@tonic-gate 	}
7200Sstevel@tonic-gate 
7210Sstevel@tonic-gate 	mdb_warn("unable to find bufctl for %p in cache %p\n", buf, caddr);
7220Sstevel@tonic-gate 	return (-1);
7230Sstevel@tonic-gate }
7240Sstevel@tonic-gate 
7250Sstevel@tonic-gate int
7260Sstevel@tonic-gate umem_get_magsize(const umem_cache_t *cp)
7270Sstevel@tonic-gate {
7280Sstevel@tonic-gate 	uintptr_t addr = (uintptr_t)cp->cache_magtype;
7290Sstevel@tonic-gate 	GElf_Sym mt_sym;
7300Sstevel@tonic-gate 	umem_magtype_t mt;
7310Sstevel@tonic-gate 	int res;
7320Sstevel@tonic-gate 
7330Sstevel@tonic-gate 	/*
7340Sstevel@tonic-gate 	 * if cpu 0 has a non-zero magsize, it must be correct.  caches
7350Sstevel@tonic-gate 	 * with UMF_NOMAGAZINE have disabled their magazine layers, so
7360Sstevel@tonic-gate 	 * it is okay to return 0 for them.
7370Sstevel@tonic-gate 	 */
7380Sstevel@tonic-gate 	if ((res = cp->cache_cpu[0].cc_magsize) != 0 ||
7390Sstevel@tonic-gate 	    (cp->cache_flags & UMF_NOMAGAZINE))
7400Sstevel@tonic-gate 		return (res);
7410Sstevel@tonic-gate 
7421528Sjwadams 	if (umem_lookup_by_name("umem_magtype", &mt_sym) == -1) {
7430Sstevel@tonic-gate 		mdb_warn("unable to read 'umem_magtype'");
7440Sstevel@tonic-gate 	} else if (addr < mt_sym.st_value ||
7450Sstevel@tonic-gate 	    addr + sizeof (mt) - 1 > mt_sym.st_value + mt_sym.st_size - 1 ||
7460Sstevel@tonic-gate 	    ((addr - mt_sym.st_value) % sizeof (mt)) != 0) {
7470Sstevel@tonic-gate 		mdb_warn("cache '%s' has invalid magtype pointer (%p)\n",
7480Sstevel@tonic-gate 		    cp->cache_name, addr);
7490Sstevel@tonic-gate 		return (0);
7500Sstevel@tonic-gate 	}
7510Sstevel@tonic-gate 	if (mdb_vread(&mt, sizeof (mt), addr) == -1) {
7520Sstevel@tonic-gate 		mdb_warn("unable to read magtype at %a", addr);
7530Sstevel@tonic-gate 		return (0);
7540Sstevel@tonic-gate 	}
7550Sstevel@tonic-gate 	return (mt.mt_magsize);
7560Sstevel@tonic-gate }
7570Sstevel@tonic-gate 
7580Sstevel@tonic-gate /*ARGSUSED*/
7590Sstevel@tonic-gate static int
7600Sstevel@tonic-gate umem_estimate_slab(uintptr_t addr, const umem_slab_t *sp, size_t *est)
7610Sstevel@tonic-gate {
7620Sstevel@tonic-gate 	*est -= (sp->slab_chunks - sp->slab_refcnt);
7630Sstevel@tonic-gate 
7640Sstevel@tonic-gate 	return (WALK_NEXT);
7650Sstevel@tonic-gate }
7660Sstevel@tonic-gate 
7670Sstevel@tonic-gate /*
7680Sstevel@tonic-gate  * Returns an upper bound on the number of allocated buffers in a given
7690Sstevel@tonic-gate  * cache.
7700Sstevel@tonic-gate  */
7710Sstevel@tonic-gate size_t
7720Sstevel@tonic-gate umem_estimate_allocated(uintptr_t addr, const umem_cache_t *cp)
7730Sstevel@tonic-gate {
7740Sstevel@tonic-gate 	int magsize;
7750Sstevel@tonic-gate 	size_t cache_est;
7760Sstevel@tonic-gate 
7770Sstevel@tonic-gate 	cache_est = cp->cache_buftotal;
7780Sstevel@tonic-gate 
7790Sstevel@tonic-gate 	(void) mdb_pwalk("umem_slab_partial",
7800Sstevel@tonic-gate 	    (mdb_walk_cb_t)umem_estimate_slab, &cache_est, addr);
7810Sstevel@tonic-gate 
7820Sstevel@tonic-gate 	if ((magsize = umem_get_magsize(cp)) != 0) {
7830Sstevel@tonic-gate 		size_t mag_est = cp->cache_full.ml_total * magsize;
7840Sstevel@tonic-gate 
7850Sstevel@tonic-gate 		if (cache_est >= mag_est) {
7860Sstevel@tonic-gate 			cache_est -= mag_est;
7870Sstevel@tonic-gate 		} else {
7880Sstevel@tonic-gate 			mdb_warn("cache %p's magazine layer holds more buffers "
7890Sstevel@tonic-gate 			    "than the slab layer.\n", addr);
7900Sstevel@tonic-gate 		}
7910Sstevel@tonic-gate 	}
7920Sstevel@tonic-gate 	return (cache_est);
7930Sstevel@tonic-gate }
7940Sstevel@tonic-gate 
7950Sstevel@tonic-gate #define	READMAG_ROUNDS(rounds) { \
7960Sstevel@tonic-gate 	if (mdb_vread(mp, magbsize, (uintptr_t)ump) == -1) { \
7970Sstevel@tonic-gate 		mdb_warn("couldn't read magazine at %p", ump); \
7980Sstevel@tonic-gate 		goto fail; \
7990Sstevel@tonic-gate 	} \
8000Sstevel@tonic-gate 	for (i = 0; i < rounds; i++) { \
8010Sstevel@tonic-gate 		maglist[magcnt++] = mp->mag_round[i]; \
8020Sstevel@tonic-gate 		if (magcnt == magmax) { \
8030Sstevel@tonic-gate 			mdb_warn("%d magazines exceeds fudge factor\n", \
8040Sstevel@tonic-gate 			    magcnt); \
8050Sstevel@tonic-gate 			goto fail; \
8060Sstevel@tonic-gate 		} \
8070Sstevel@tonic-gate 	} \
8080Sstevel@tonic-gate }
8090Sstevel@tonic-gate 
8100Sstevel@tonic-gate int
8111528Sjwadams umem_read_magazines(umem_cache_t *cp, uintptr_t addr,
8120Sstevel@tonic-gate     void ***maglistp, size_t *magcntp, size_t *magmaxp, int alloc_flags)
8130Sstevel@tonic-gate {
8140Sstevel@tonic-gate 	umem_magazine_t *ump, *mp;
8150Sstevel@tonic-gate 	void **maglist = NULL;
8160Sstevel@tonic-gate 	int i, cpu;
8170Sstevel@tonic-gate 	size_t magsize, magmax, magbsize;
8180Sstevel@tonic-gate 	size_t magcnt = 0;
8190Sstevel@tonic-gate 
8200Sstevel@tonic-gate 	/*
8210Sstevel@tonic-gate 	 * Read the magtype out of the cache, after verifying the pointer's
8220Sstevel@tonic-gate 	 * correctness.
8230Sstevel@tonic-gate 	 */
8240Sstevel@tonic-gate 	magsize = umem_get_magsize(cp);
8251528Sjwadams 	if (magsize == 0) {
8261528Sjwadams 		*maglistp = NULL;
8271528Sjwadams 		*magcntp = 0;
8281528Sjwadams 		*magmaxp = 0;
8291528Sjwadams 		return (WALK_NEXT);
8301528Sjwadams 	}
8310Sstevel@tonic-gate 
8320Sstevel@tonic-gate 	/*
8330Sstevel@tonic-gate 	 * There are several places where we need to go buffer hunting:
8340Sstevel@tonic-gate 	 * the per-CPU loaded magazine, the per-CPU spare full magazine,
8350Sstevel@tonic-gate 	 * and the full magazine list in the depot.
8360Sstevel@tonic-gate 	 *
8370Sstevel@tonic-gate 	 * For an upper bound on the number of buffers in the magazine
8380Sstevel@tonic-gate 	 * layer, we have the number of magazines on the cache_full
8390Sstevel@tonic-gate 	 * list plus at most two magazines per CPU (the loaded and the
8400Sstevel@tonic-gate 	 * spare).  Toss in 100 magazines as a fudge factor in case this
8410Sstevel@tonic-gate 	 * is live (the number "100" comes from the same fudge factor in
8420Sstevel@tonic-gate 	 * crash(1M)).
8430Sstevel@tonic-gate 	 */
8441528Sjwadams 	magmax = (cp->cache_full.ml_total + 2 * umem_max_ncpus + 100) * magsize;
8450Sstevel@tonic-gate 	magbsize = offsetof(umem_magazine_t, mag_round[magsize]);
8460Sstevel@tonic-gate 
8470Sstevel@tonic-gate 	if (magbsize >= PAGESIZE / 2) {
8480Sstevel@tonic-gate 		mdb_warn("magazine size for cache %p unreasonable (%x)\n",
8490Sstevel@tonic-gate 		    addr, magbsize);
8501528Sjwadams 		return (WALK_ERR);
8510Sstevel@tonic-gate 	}
8520Sstevel@tonic-gate 
8530Sstevel@tonic-gate 	maglist = mdb_alloc(magmax * sizeof (void *), alloc_flags);
8540Sstevel@tonic-gate 	mp = mdb_alloc(magbsize, alloc_flags);
8550Sstevel@tonic-gate 	if (mp == NULL || maglist == NULL)
8560Sstevel@tonic-gate 		goto fail;
8570Sstevel@tonic-gate 
8580Sstevel@tonic-gate 	/*
8590Sstevel@tonic-gate 	 * First up: the magazines in the depot (i.e. on the cache_full list).
8600Sstevel@tonic-gate 	 */
8610Sstevel@tonic-gate 	for (ump = cp->cache_full.ml_list; ump != NULL; ) {
8620Sstevel@tonic-gate 		READMAG_ROUNDS(magsize);
8630Sstevel@tonic-gate 		ump = mp->mag_next;
8640Sstevel@tonic-gate 
8650Sstevel@tonic-gate 		if (ump == cp->cache_full.ml_list)
8660Sstevel@tonic-gate 			break; /* cache_full list loop detected */
8670Sstevel@tonic-gate 	}
8680Sstevel@tonic-gate 
8690Sstevel@tonic-gate 	dprintf(("cache_full list done\n"));
8700Sstevel@tonic-gate 
8710Sstevel@tonic-gate 	/*
8720Sstevel@tonic-gate 	 * Now whip through the CPUs, snagging the loaded magazines
8730Sstevel@tonic-gate 	 * and full spares.
8740Sstevel@tonic-gate 	 */
8751528Sjwadams 	for (cpu = 0; cpu < umem_max_ncpus; cpu++) {
8760Sstevel@tonic-gate 		umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu];
8770Sstevel@tonic-gate 
8780Sstevel@tonic-gate 		dprintf(("reading cpu cache %p\n",
8790Sstevel@tonic-gate 		    (uintptr_t)ccp - (uintptr_t)cp + addr));
8800Sstevel@tonic-gate 
8810Sstevel@tonic-gate 		if (ccp->cc_rounds > 0 &&
8820Sstevel@tonic-gate 		    (ump = ccp->cc_loaded) != NULL) {
8830Sstevel@tonic-gate 			dprintf(("reading %d loaded rounds\n", ccp->cc_rounds));
8840Sstevel@tonic-gate 			READMAG_ROUNDS(ccp->cc_rounds);
8850Sstevel@tonic-gate 		}
8860Sstevel@tonic-gate 
8870Sstevel@tonic-gate 		if (ccp->cc_prounds > 0 &&
8880Sstevel@tonic-gate 		    (ump = ccp->cc_ploaded) != NULL) {
8890Sstevel@tonic-gate 			dprintf(("reading %d previously loaded rounds\n",
8900Sstevel@tonic-gate 			    ccp->cc_prounds));
8910Sstevel@tonic-gate 			READMAG_ROUNDS(ccp->cc_prounds);
8920Sstevel@tonic-gate 		}
8930Sstevel@tonic-gate 	}
8940Sstevel@tonic-gate 
8950Sstevel@tonic-gate 	dprintf(("magazine layer: %d buffers\n", magcnt));
8960Sstevel@tonic-gate 
8970Sstevel@tonic-gate 	if (!(alloc_flags & UM_GC))
8980Sstevel@tonic-gate 		mdb_free(mp, magbsize);
8990Sstevel@tonic-gate 
9000Sstevel@tonic-gate 	*maglistp = maglist;
9010Sstevel@tonic-gate 	*magcntp = magcnt;
9020Sstevel@tonic-gate 	*magmaxp = magmax;
9030Sstevel@tonic-gate 
9040Sstevel@tonic-gate 	return (WALK_NEXT);
9050Sstevel@tonic-gate 
9060Sstevel@tonic-gate fail:
9070Sstevel@tonic-gate 	if (!(alloc_flags & UM_GC)) {
9080Sstevel@tonic-gate 		if (mp)
9090Sstevel@tonic-gate 			mdb_free(mp, magbsize);
9100Sstevel@tonic-gate 		if (maglist)
9110Sstevel@tonic-gate 			mdb_free(maglist, magmax * sizeof (void *));
9120Sstevel@tonic-gate 	}
9130Sstevel@tonic-gate 	return (WALK_ERR);
9140Sstevel@tonic-gate }
9150Sstevel@tonic-gate 
9160Sstevel@tonic-gate static int
9170Sstevel@tonic-gate umem_walk_callback(mdb_walk_state_t *wsp, uintptr_t buf)
9180Sstevel@tonic-gate {
9190Sstevel@tonic-gate 	return (wsp->walk_callback(buf, NULL, wsp->walk_cbdata));
9200Sstevel@tonic-gate }
9210Sstevel@tonic-gate 
9220Sstevel@tonic-gate static int
9230Sstevel@tonic-gate bufctl_walk_callback(umem_cache_t *cp, mdb_walk_state_t *wsp, uintptr_t buf)
9240Sstevel@tonic-gate {
9250Sstevel@tonic-gate 	umem_bufctl_audit_t *b;
9260Sstevel@tonic-gate 	UMEM_LOCAL_BUFCTL_AUDIT(&b);
9270Sstevel@tonic-gate 
9280Sstevel@tonic-gate 	/*
9290Sstevel@tonic-gate 	 * if UMF_AUDIT is not set, we know that we're looking at a
9300Sstevel@tonic-gate 	 * umem_bufctl_t.
9310Sstevel@tonic-gate 	 */
9320Sstevel@tonic-gate 	if (!(cp->cache_flags & UMF_AUDIT) ||
9330Sstevel@tonic-gate 	    mdb_vread(b, UMEM_BUFCTL_AUDIT_SIZE, buf) == -1) {
9340Sstevel@tonic-gate 		(void) memset(b, 0, UMEM_BUFCTL_AUDIT_SIZE);
9350Sstevel@tonic-gate 		if (mdb_vread(b, sizeof (umem_bufctl_t), buf) == -1) {
9360Sstevel@tonic-gate 			mdb_warn("unable to read bufctl at %p", buf);
9370Sstevel@tonic-gate 			return (WALK_ERR);
9380Sstevel@tonic-gate 		}
9390Sstevel@tonic-gate 	}
9400Sstevel@tonic-gate 
9410Sstevel@tonic-gate 	return (wsp->walk_callback(buf, b, wsp->walk_cbdata));
9420Sstevel@tonic-gate }
9430Sstevel@tonic-gate 
9440Sstevel@tonic-gate typedef struct umem_walk {
9450Sstevel@tonic-gate 	int umw_type;
9460Sstevel@tonic-gate 
9470Sstevel@tonic-gate 	int umw_addr;			/* cache address */
9480Sstevel@tonic-gate 	umem_cache_t *umw_cp;
9490Sstevel@tonic-gate 	size_t umw_csize;
9500Sstevel@tonic-gate 
9510Sstevel@tonic-gate 	/*
9520Sstevel@tonic-gate 	 * magazine layer
9530Sstevel@tonic-gate 	 */
9540Sstevel@tonic-gate 	void **umw_maglist;
9550Sstevel@tonic-gate 	size_t umw_max;
9560Sstevel@tonic-gate 	size_t umw_count;
9570Sstevel@tonic-gate 	size_t umw_pos;
9580Sstevel@tonic-gate 
9590Sstevel@tonic-gate 	/*
9600Sstevel@tonic-gate 	 * slab layer
9610Sstevel@tonic-gate 	 */
9620Sstevel@tonic-gate 	char *umw_valid;	/* to keep track of freed buffers */
9630Sstevel@tonic-gate 	char *umw_ubase;	/* buffer for slab data */
9640Sstevel@tonic-gate } umem_walk_t;
9650Sstevel@tonic-gate 
9660Sstevel@tonic-gate static int
9670Sstevel@tonic-gate umem_walk_init_common(mdb_walk_state_t *wsp, int type)
9680Sstevel@tonic-gate {
9690Sstevel@tonic-gate 	umem_walk_t *umw;
9701528Sjwadams 	int csize;
9710Sstevel@tonic-gate 	umem_cache_t *cp;
9721528Sjwadams 	size_t vm_quantum;
9730Sstevel@tonic-gate 
9740Sstevel@tonic-gate 	size_t magmax, magcnt;
9750Sstevel@tonic-gate 	void **maglist = NULL;
9760Sstevel@tonic-gate 	uint_t chunksize, slabsize;
9770Sstevel@tonic-gate 	int status = WALK_ERR;
9780Sstevel@tonic-gate 	uintptr_t addr = wsp->walk_addr;
9790Sstevel@tonic-gate 	const char *layered;
9800Sstevel@tonic-gate 
9810Sstevel@tonic-gate 	type &= ~UM_HASH;
9820Sstevel@tonic-gate 
9830Sstevel@tonic-gate 	if (addr == NULL) {
9840Sstevel@tonic-gate 		mdb_warn("umem walk doesn't support global walks\n");
9850Sstevel@tonic-gate 		return (WALK_ERR);
9860Sstevel@tonic-gate 	}
9870Sstevel@tonic-gate 
9880Sstevel@tonic-gate 	dprintf(("walking %p\n", addr));
9890Sstevel@tonic-gate 
9900Sstevel@tonic-gate 	/*
9911528Sjwadams 	 * The number of "cpus" determines how large the cache is.
9920Sstevel@tonic-gate 	 */
9931528Sjwadams 	csize = UMEM_CACHE_SIZE(umem_max_ncpus);
9940Sstevel@tonic-gate 	cp = mdb_alloc(csize, UM_SLEEP);
9950Sstevel@tonic-gate 
9960Sstevel@tonic-gate 	if (mdb_vread(cp, csize, addr) == -1) {
9970Sstevel@tonic-gate 		mdb_warn("couldn't read cache at addr %p", addr);
9980Sstevel@tonic-gate 		goto out2;
9990Sstevel@tonic-gate 	}
10000Sstevel@tonic-gate 
10011528Sjwadams 	/*
10021528Sjwadams 	 * It's easy for someone to hand us an invalid cache address.
10031528Sjwadams 	 * Unfortunately, it is hard for this walker to survive an
10041528Sjwadams 	 * invalid cache cleanly.  So we make sure that:
10051528Sjwadams 	 *
10061528Sjwadams 	 *	1. the vmem arena for the cache is readable,
10071528Sjwadams 	 *	2. the vmem arena's quantum is a power of 2,
10081528Sjwadams 	 *	3. our slabsize is a multiple of the quantum, and
10091528Sjwadams 	 *	4. our chunksize is >0 and less than our slabsize.
10101528Sjwadams 	 */
10111528Sjwadams 	if (mdb_vread(&vm_quantum, sizeof (vm_quantum),
10121528Sjwadams 	    (uintptr_t)&cp->cache_arena->vm_quantum) == -1 ||
10131528Sjwadams 	    vm_quantum == 0 ||
10141528Sjwadams 	    (vm_quantum & (vm_quantum - 1)) != 0 ||
10151528Sjwadams 	    cp->cache_slabsize < vm_quantum ||
10161528Sjwadams 	    P2PHASE(cp->cache_slabsize, vm_quantum) != 0 ||
10171528Sjwadams 	    cp->cache_chunksize == 0 ||
10181528Sjwadams 	    cp->cache_chunksize > cp->cache_slabsize) {
10191528Sjwadams 		mdb_warn("%p is not a valid umem_cache_t\n", addr);
10201528Sjwadams 		goto out2;
10211528Sjwadams 	}
10221528Sjwadams 
10230Sstevel@tonic-gate 	dprintf(("buf total is %d\n", cp->cache_buftotal));
10240Sstevel@tonic-gate 
10250Sstevel@tonic-gate 	if (cp->cache_buftotal == 0) {
10260Sstevel@tonic-gate 		mdb_free(cp, csize);
10270Sstevel@tonic-gate 		return (WALK_DONE);
10280Sstevel@tonic-gate 	}
10290Sstevel@tonic-gate 
10300Sstevel@tonic-gate 	/*
10310Sstevel@tonic-gate 	 * If they ask for bufctls, but it's a small-slab cache,
10320Sstevel@tonic-gate 	 * there is nothing to report.
10330Sstevel@tonic-gate 	 */
10340Sstevel@tonic-gate 	if ((type & UM_BUFCTL) && !(cp->cache_flags & UMF_HASH)) {
10350Sstevel@tonic-gate 		dprintf(("bufctl requested, not UMF_HASH (flags: %p)\n",
10360Sstevel@tonic-gate 		    cp->cache_flags));
10370Sstevel@tonic-gate 		mdb_free(cp, csize);
10380Sstevel@tonic-gate 		return (WALK_DONE);
10390Sstevel@tonic-gate 	}
10400Sstevel@tonic-gate 
10410Sstevel@tonic-gate 	/*
10420Sstevel@tonic-gate 	 * Read in the contents of the magazine layer
10430Sstevel@tonic-gate 	 */
10441528Sjwadams 	if (umem_read_magazines(cp, addr, &maglist, &magcnt, &magmax,
10451528Sjwadams 	    UM_SLEEP) == WALK_ERR)
10460Sstevel@tonic-gate 		goto out2;
10470Sstevel@tonic-gate 
10480Sstevel@tonic-gate 	/*
10490Sstevel@tonic-gate 	 * We have all of the buffers from the magazines;  if we are walking
10500Sstevel@tonic-gate 	 * allocated buffers, sort them so we can bsearch them later.
10510Sstevel@tonic-gate 	 */
10520Sstevel@tonic-gate 	if (type & UM_ALLOCATED)
10530Sstevel@tonic-gate 		qsort(maglist, magcnt, sizeof (void *), addrcmp);
10540Sstevel@tonic-gate 
10550Sstevel@tonic-gate 	wsp->walk_data = umw = mdb_zalloc(sizeof (umem_walk_t), UM_SLEEP);
10560Sstevel@tonic-gate 
10570Sstevel@tonic-gate 	umw->umw_type = type;
10580Sstevel@tonic-gate 	umw->umw_addr = addr;
10590Sstevel@tonic-gate 	umw->umw_cp = cp;
10600Sstevel@tonic-gate 	umw->umw_csize = csize;
10610Sstevel@tonic-gate 	umw->umw_maglist = maglist;
10620Sstevel@tonic-gate 	umw->umw_max = magmax;
10630Sstevel@tonic-gate 	umw->umw_count = magcnt;
10640Sstevel@tonic-gate 	umw->umw_pos = 0;
10650Sstevel@tonic-gate 
10660Sstevel@tonic-gate 	/*
10670Sstevel@tonic-gate 	 * When walking allocated buffers in a UMF_HASH cache, we walk the
10680Sstevel@tonic-gate 	 * hash table instead of the slab layer.
10690Sstevel@tonic-gate 	 */
10700Sstevel@tonic-gate 	if ((cp->cache_flags & UMF_HASH) && (type & UM_ALLOCATED)) {
10710Sstevel@tonic-gate 		layered = "umem_hash";
10720Sstevel@tonic-gate 
10730Sstevel@tonic-gate 		umw->umw_type |= UM_HASH;
10740Sstevel@tonic-gate 	} else {
10750Sstevel@tonic-gate 		/*
10760Sstevel@tonic-gate 		 * If we are walking freed buffers, we only need the
10770Sstevel@tonic-gate 		 * magazine layer plus the partially allocated slabs.
10780Sstevel@tonic-gate 		 * To walk allocated buffers, we need all of the slabs.
10790Sstevel@tonic-gate 		 */
10800Sstevel@tonic-gate 		if (type & UM_ALLOCATED)
10810Sstevel@tonic-gate 			layered = "umem_slab";
10820Sstevel@tonic-gate 		else
10830Sstevel@tonic-gate 			layered = "umem_slab_partial";
10840Sstevel@tonic-gate 
10850Sstevel@tonic-gate 		/*
10860Sstevel@tonic-gate 		 * for small-slab caches, we read in the entire slab.  For
10870Sstevel@tonic-gate 		 * freed buffers, we can just walk the freelist.  For
10880Sstevel@tonic-gate 		 * allocated buffers, we use a 'valid' array to track
10890Sstevel@tonic-gate 		 * the freed buffers.
10900Sstevel@tonic-gate 		 */
10910Sstevel@tonic-gate 		if (!(cp->cache_flags & UMF_HASH)) {
10920Sstevel@tonic-gate 			chunksize = cp->cache_chunksize;
10930Sstevel@tonic-gate 			slabsize = cp->cache_slabsize;
10940Sstevel@tonic-gate 
10950Sstevel@tonic-gate 			umw->umw_ubase = mdb_alloc(slabsize +
10960Sstevel@tonic-gate 			    sizeof (umem_bufctl_t), UM_SLEEP);
10970Sstevel@tonic-gate 
10980Sstevel@tonic-gate 			if (type & UM_ALLOCATED)
10990Sstevel@tonic-gate 				umw->umw_valid =
11000Sstevel@tonic-gate 				    mdb_alloc(slabsize / chunksize, UM_SLEEP);
11010Sstevel@tonic-gate 		}
11020Sstevel@tonic-gate 	}
11030Sstevel@tonic-gate 
11040Sstevel@tonic-gate 	status = WALK_NEXT;
11050Sstevel@tonic-gate 
11060Sstevel@tonic-gate 	if (mdb_layered_walk(layered, wsp) == -1) {
11070Sstevel@tonic-gate 		mdb_warn("unable to start layered '%s' walk", layered);
11080Sstevel@tonic-gate 		status = WALK_ERR;
11090Sstevel@tonic-gate 	}
11100Sstevel@tonic-gate 
11110Sstevel@tonic-gate out1:
11120Sstevel@tonic-gate 	if (status == WALK_ERR) {
11130Sstevel@tonic-gate 		if (umw->umw_valid)
11140Sstevel@tonic-gate 			mdb_free(umw->umw_valid, slabsize / chunksize);
11150Sstevel@tonic-gate 
11160Sstevel@tonic-gate 		if (umw->umw_ubase)
11170Sstevel@tonic-gate 			mdb_free(umw->umw_ubase, slabsize +
11180Sstevel@tonic-gate 			    sizeof (umem_bufctl_t));
11190Sstevel@tonic-gate 
11201528Sjwadams 		if (umw->umw_maglist)
11211528Sjwadams 			mdb_free(umw->umw_maglist, umw->umw_max *
11221528Sjwadams 			    sizeof (uintptr_t));
11231528Sjwadams 
11240Sstevel@tonic-gate 		mdb_free(umw, sizeof (umem_walk_t));
11250Sstevel@tonic-gate 		wsp->walk_data = NULL;
11260Sstevel@tonic-gate 	}
11270Sstevel@tonic-gate 
11280Sstevel@tonic-gate out2:
11290Sstevel@tonic-gate 	if (status == WALK_ERR)
11300Sstevel@tonic-gate 		mdb_free(cp, csize);
11310Sstevel@tonic-gate 
11320Sstevel@tonic-gate 	return (status);
11330Sstevel@tonic-gate }
11340Sstevel@tonic-gate 
11350Sstevel@tonic-gate int
11360Sstevel@tonic-gate umem_walk_step(mdb_walk_state_t *wsp)
11370Sstevel@tonic-gate {
11380Sstevel@tonic-gate 	umem_walk_t *umw = wsp->walk_data;
11390Sstevel@tonic-gate 	int type = umw->umw_type;
11400Sstevel@tonic-gate 	umem_cache_t *cp = umw->umw_cp;
11410Sstevel@tonic-gate 
11420Sstevel@tonic-gate 	void **maglist = umw->umw_maglist;
11430Sstevel@tonic-gate 	int magcnt = umw->umw_count;
11440Sstevel@tonic-gate 
11450Sstevel@tonic-gate 	uintptr_t chunksize, slabsize;
11460Sstevel@tonic-gate 	uintptr_t addr;
11470Sstevel@tonic-gate 	const umem_slab_t *sp;
11480Sstevel@tonic-gate 	const umem_bufctl_t *bcp;
11490Sstevel@tonic-gate 	umem_bufctl_t bc;
11500Sstevel@tonic-gate 
11510Sstevel@tonic-gate 	int chunks;
11520Sstevel@tonic-gate 	char *kbase;
11530Sstevel@tonic-gate 	void *buf;
11540Sstevel@tonic-gate 	int i, ret;
11550Sstevel@tonic-gate 
11560Sstevel@tonic-gate 	char *valid, *ubase;
11570Sstevel@tonic-gate 
11580Sstevel@tonic-gate 	/*
11590Sstevel@tonic-gate 	 * first, handle the 'umem_hash' layered walk case
11600Sstevel@tonic-gate 	 */
11610Sstevel@tonic-gate 	if (type & UM_HASH) {
11620Sstevel@tonic-gate 		/*
11630Sstevel@tonic-gate 		 * We have a buffer which has been allocated out of the
11640Sstevel@tonic-gate 		 * global layer. We need to make sure that it's not
11650Sstevel@tonic-gate 		 * actually sitting in a magazine before we report it as
11660Sstevel@tonic-gate 		 * an allocated buffer.
11670Sstevel@tonic-gate 		 */
11680Sstevel@tonic-gate 		buf = ((const umem_bufctl_t *)wsp->walk_layer)->bc_addr;
11690Sstevel@tonic-gate 
11700Sstevel@tonic-gate 		if (magcnt > 0 &&
11710Sstevel@tonic-gate 		    bsearch(&buf, maglist, magcnt, sizeof (void *),
11720Sstevel@tonic-gate 		    addrcmp) != NULL)
11730Sstevel@tonic-gate 			return (WALK_NEXT);
11740Sstevel@tonic-gate 
11750Sstevel@tonic-gate 		if (type & UM_BUFCTL)
11760Sstevel@tonic-gate 			return (bufctl_walk_callback(cp, wsp, wsp->walk_addr));
11770Sstevel@tonic-gate 
11780Sstevel@tonic-gate 		return (umem_walk_callback(wsp, (uintptr_t)buf));
11790Sstevel@tonic-gate 	}
11800Sstevel@tonic-gate 
11810Sstevel@tonic-gate 	ret = WALK_NEXT;
11820Sstevel@tonic-gate 
11830Sstevel@tonic-gate 	addr = umw->umw_addr;
11840Sstevel@tonic-gate 
11850Sstevel@tonic-gate 	/*
11860Sstevel@tonic-gate 	 * If we're walking freed buffers, report everything in the
11870Sstevel@tonic-gate 	 * magazine layer before processing the first slab.
11880Sstevel@tonic-gate 	 */
11890Sstevel@tonic-gate 	if ((type & UM_FREE) && magcnt != 0) {
11900Sstevel@tonic-gate 		umw->umw_count = 0;		/* only do this once */
11910Sstevel@tonic-gate 		for (i = 0; i < magcnt; i++) {
11920Sstevel@tonic-gate 			buf = maglist[i];
11930Sstevel@tonic-gate 
11940Sstevel@tonic-gate 			if (type & UM_BUFCTL) {
11950Sstevel@tonic-gate 				uintptr_t out;
11960Sstevel@tonic-gate 
11970Sstevel@tonic-gate 				if (cp->cache_flags & UMF_BUFTAG) {
11980Sstevel@tonic-gate 					umem_buftag_t *btp;
11990Sstevel@tonic-gate 					umem_buftag_t tag;
12000Sstevel@tonic-gate 
12010Sstevel@tonic-gate 					/* LINTED - alignment */
12020Sstevel@tonic-gate 					btp = UMEM_BUFTAG(cp, buf);
12030Sstevel@tonic-gate 					if (mdb_vread(&tag, sizeof (tag),
12040Sstevel@tonic-gate 					    (uintptr_t)btp) == -1) {
12050Sstevel@tonic-gate 						mdb_warn("reading buftag for "
12060Sstevel@tonic-gate 						    "%p at %p", buf, btp);
12070Sstevel@tonic-gate 						continue;
12080Sstevel@tonic-gate 					}
12090Sstevel@tonic-gate 					out = (uintptr_t)tag.bt_bufctl;
12100Sstevel@tonic-gate 				} else {
12110Sstevel@tonic-gate 					if (umem_hash_lookup(cp, addr, buf,
12120Sstevel@tonic-gate 					    &out) == -1)
12130Sstevel@tonic-gate 						continue;
12140Sstevel@tonic-gate 				}
12150Sstevel@tonic-gate 				ret = bufctl_walk_callback(cp, wsp, out);
12160Sstevel@tonic-gate 			} else {
12170Sstevel@tonic-gate 				ret = umem_walk_callback(wsp, (uintptr_t)buf);
12180Sstevel@tonic-gate 			}
12190Sstevel@tonic-gate 
12200Sstevel@tonic-gate 			if (ret != WALK_NEXT)
12210Sstevel@tonic-gate 				return (ret);
12220Sstevel@tonic-gate 		}
12230Sstevel@tonic-gate 	}
12240Sstevel@tonic-gate 
12250Sstevel@tonic-gate 	/*
12260Sstevel@tonic-gate 	 * Handle the buffers in the current slab
12270Sstevel@tonic-gate 	 */
12280Sstevel@tonic-gate 	chunksize = cp->cache_chunksize;
12290Sstevel@tonic-gate 	slabsize = cp->cache_slabsize;
12300Sstevel@tonic-gate 
12310Sstevel@tonic-gate 	sp = wsp->walk_layer;
12320Sstevel@tonic-gate 	chunks = sp->slab_chunks;
12330Sstevel@tonic-gate 	kbase = sp->slab_base;
12340Sstevel@tonic-gate 
12350Sstevel@tonic-gate 	dprintf(("kbase is %p\n", kbase));
12360Sstevel@tonic-gate 
12370Sstevel@tonic-gate 	if (!(cp->cache_flags & UMF_HASH)) {
12380Sstevel@tonic-gate 		valid = umw->umw_valid;
12390Sstevel@tonic-gate 		ubase = umw->umw_ubase;
12400Sstevel@tonic-gate 
12410Sstevel@tonic-gate 		if (mdb_vread(ubase, chunks * chunksize,
12420Sstevel@tonic-gate 		    (uintptr_t)kbase) == -1) {
12430Sstevel@tonic-gate 			mdb_warn("failed to read slab contents at %p", kbase);
12440Sstevel@tonic-gate 			return (WALK_ERR);
12450Sstevel@tonic-gate 		}
12460Sstevel@tonic-gate 
12470Sstevel@tonic-gate 		/*
12480Sstevel@tonic-gate 		 * Set up the valid map as fully allocated -- we'll punch
12490Sstevel@tonic-gate 		 * out the freelist.
12500Sstevel@tonic-gate 		 */
12510Sstevel@tonic-gate 		if (type & UM_ALLOCATED)
12520Sstevel@tonic-gate 			(void) memset(valid, 1, chunks);
12530Sstevel@tonic-gate 	} else {
12540Sstevel@tonic-gate 		valid = NULL;
12550Sstevel@tonic-gate 		ubase = NULL;
12560Sstevel@tonic-gate 	}
12570Sstevel@tonic-gate 
12580Sstevel@tonic-gate 	/*
12590Sstevel@tonic-gate 	 * walk the slab's freelist
12600Sstevel@tonic-gate 	 */
12610Sstevel@tonic-gate 	bcp = sp->slab_head;
12620Sstevel@tonic-gate 
12630Sstevel@tonic-gate 	dprintf(("refcnt is %d; chunks is %d\n", sp->slab_refcnt, chunks));
12640Sstevel@tonic-gate 
12650Sstevel@tonic-gate 	/*
12660Sstevel@tonic-gate 	 * since we could be in the middle of allocating a buffer,
12670Sstevel@tonic-gate 	 * our refcnt could be one higher than it aught.  So we
12680Sstevel@tonic-gate 	 * check one further on the freelist than the count allows.
12690Sstevel@tonic-gate 	 */
12700Sstevel@tonic-gate 	for (i = sp->slab_refcnt; i <= chunks; i++) {
12710Sstevel@tonic-gate 		uint_t ndx;
12720Sstevel@tonic-gate 
12730Sstevel@tonic-gate 		dprintf(("bcp is %p\n", bcp));
12740Sstevel@tonic-gate 
12750Sstevel@tonic-gate 		if (bcp == NULL) {
12760Sstevel@tonic-gate 			if (i == chunks)
12770Sstevel@tonic-gate 				break;
12780Sstevel@tonic-gate 			mdb_warn(
12790Sstevel@tonic-gate 			    "slab %p in cache %p freelist too short by %d\n",
12800Sstevel@tonic-gate 			    sp, addr, chunks - i);
12810Sstevel@tonic-gate 			break;
12820Sstevel@tonic-gate 		}
12830Sstevel@tonic-gate 
12840Sstevel@tonic-gate 		if (cp->cache_flags & UMF_HASH) {
12850Sstevel@tonic-gate 			if (mdb_vread(&bc, sizeof (bc), (uintptr_t)bcp) == -1) {
12860Sstevel@tonic-gate 				mdb_warn("failed to read bufctl ptr at %p",
12870Sstevel@tonic-gate 				    bcp);
12880Sstevel@tonic-gate 				break;
12890Sstevel@tonic-gate 			}
12900Sstevel@tonic-gate 			buf = bc.bc_addr;
12910Sstevel@tonic-gate 		} else {
12920Sstevel@tonic-gate 			/*
12930Sstevel@tonic-gate 			 * Otherwise the buffer is in the slab which
12940Sstevel@tonic-gate 			 * we've read in;  we just need to determine
12950Sstevel@tonic-gate 			 * its offset in the slab to find the
12960Sstevel@tonic-gate 			 * umem_bufctl_t.
12970Sstevel@tonic-gate 			 */
12980Sstevel@tonic-gate 			bc = *((umem_bufctl_t *)
12990Sstevel@tonic-gate 			    ((uintptr_t)bcp - (uintptr_t)kbase +
13000Sstevel@tonic-gate 			    (uintptr_t)ubase));
13010Sstevel@tonic-gate 
13020Sstevel@tonic-gate 			buf = UMEM_BUF(cp, bcp);
13030Sstevel@tonic-gate 		}
13040Sstevel@tonic-gate 
13050Sstevel@tonic-gate 		ndx = ((uintptr_t)buf - (uintptr_t)kbase) / chunksize;
13060Sstevel@tonic-gate 
13070Sstevel@tonic-gate 		if (ndx > slabsize / cp->cache_bufsize) {
13080Sstevel@tonic-gate 			/*
13090Sstevel@tonic-gate 			 * This is very wrong; we have managed to find
13100Sstevel@tonic-gate 			 * a buffer in the slab which shouldn't
13110Sstevel@tonic-gate 			 * actually be here.  Emit a warning, and
13120Sstevel@tonic-gate 			 * try to continue.
13130Sstevel@tonic-gate 			 */
13140Sstevel@tonic-gate 			mdb_warn("buf %p is out of range for "
13150Sstevel@tonic-gate 			    "slab %p, cache %p\n", buf, sp, addr);
13160Sstevel@tonic-gate 		} else if (type & UM_ALLOCATED) {
13170Sstevel@tonic-gate 			/*
13180Sstevel@tonic-gate 			 * we have found a buffer on the slab's freelist;
13190Sstevel@tonic-gate 			 * clear its entry
13200Sstevel@tonic-gate 			 */
13210Sstevel@tonic-gate 			valid[ndx] = 0;
13220Sstevel@tonic-gate 		} else {
13230Sstevel@tonic-gate 			/*
13240Sstevel@tonic-gate 			 * Report this freed buffer
13250Sstevel@tonic-gate 			 */
13260Sstevel@tonic-gate 			if (type & UM_BUFCTL) {
13270Sstevel@tonic-gate 				ret = bufctl_walk_callback(cp, wsp,
13280Sstevel@tonic-gate 				    (uintptr_t)bcp);
13290Sstevel@tonic-gate 			} else {
13300Sstevel@tonic-gate 				ret = umem_walk_callback(wsp, (uintptr_t)buf);
13310Sstevel@tonic-gate 			}
13320Sstevel@tonic-gate 			if (ret != WALK_NEXT)
13330Sstevel@tonic-gate 				return (ret);
13340Sstevel@tonic-gate 		}
13350Sstevel@tonic-gate 
13360Sstevel@tonic-gate 		bcp = bc.bc_next;
13370Sstevel@tonic-gate 	}
13380Sstevel@tonic-gate 
13390Sstevel@tonic-gate 	if (bcp != NULL) {
13400Sstevel@tonic-gate 		dprintf(("slab %p in cache %p freelist too long (%p)\n",
13410Sstevel@tonic-gate 		    sp, addr, bcp));
13420Sstevel@tonic-gate 	}
13430Sstevel@tonic-gate 
13440Sstevel@tonic-gate 	/*
13450Sstevel@tonic-gate 	 * If we are walking freed buffers, the loop above handled reporting
13460Sstevel@tonic-gate 	 * them.
13470Sstevel@tonic-gate 	 */
13480Sstevel@tonic-gate 	if (type & UM_FREE)
13490Sstevel@tonic-gate 		return (WALK_NEXT);
13500Sstevel@tonic-gate 
13510Sstevel@tonic-gate 	if (type & UM_BUFCTL) {
13520Sstevel@tonic-gate 		mdb_warn("impossible situation: small-slab UM_BUFCTL walk for "
13530Sstevel@tonic-gate 		    "cache %p\n", addr);
13540Sstevel@tonic-gate 		return (WALK_ERR);
13550Sstevel@tonic-gate 	}
13560Sstevel@tonic-gate 
13570Sstevel@tonic-gate 	/*
13580Sstevel@tonic-gate 	 * Report allocated buffers, skipping buffers in the magazine layer.
13590Sstevel@tonic-gate 	 * We only get this far for small-slab caches.
13600Sstevel@tonic-gate 	 */
13610Sstevel@tonic-gate 	for (i = 0; ret == WALK_NEXT && i < chunks; i++) {
13620Sstevel@tonic-gate 		buf = (char *)kbase + i * chunksize;
13630Sstevel@tonic-gate 
13640Sstevel@tonic-gate 		if (!valid[i])
13650Sstevel@tonic-gate 			continue;		/* on slab freelist */
13660Sstevel@tonic-gate 
13670Sstevel@tonic-gate 		if (magcnt > 0 &&
13680Sstevel@tonic-gate 		    bsearch(&buf, maglist, magcnt, sizeof (void *),
13690Sstevel@tonic-gate 		    addrcmp) != NULL)
13700Sstevel@tonic-gate 			continue;		/* in magazine layer */
13710Sstevel@tonic-gate 
13720Sstevel@tonic-gate 		ret = umem_walk_callback(wsp, (uintptr_t)buf);
13730Sstevel@tonic-gate 	}
13740Sstevel@tonic-gate 	return (ret);
13750Sstevel@tonic-gate }
13760Sstevel@tonic-gate 
13770Sstevel@tonic-gate void
13780Sstevel@tonic-gate umem_walk_fini(mdb_walk_state_t *wsp)
13790Sstevel@tonic-gate {
13800Sstevel@tonic-gate 	umem_walk_t *umw = wsp->walk_data;
13810Sstevel@tonic-gate 	uintptr_t chunksize;
13820Sstevel@tonic-gate 	uintptr_t slabsize;
13830Sstevel@tonic-gate 
13840Sstevel@tonic-gate 	if (umw == NULL)
13850Sstevel@tonic-gate 		return;
13860Sstevel@tonic-gate 
13870Sstevel@tonic-gate 	if (umw->umw_maglist != NULL)
13880Sstevel@tonic-gate 		mdb_free(umw->umw_maglist, umw->umw_max * sizeof (void *));
13890Sstevel@tonic-gate 
13900Sstevel@tonic-gate 	chunksize = umw->umw_cp->cache_chunksize;
13910Sstevel@tonic-gate 	slabsize = umw->umw_cp->cache_slabsize;
13920Sstevel@tonic-gate 
13930Sstevel@tonic-gate 	if (umw->umw_valid != NULL)
13940Sstevel@tonic-gate 		mdb_free(umw->umw_valid, slabsize / chunksize);
13950Sstevel@tonic-gate 	if (umw->umw_ubase != NULL)
13960Sstevel@tonic-gate 		mdb_free(umw->umw_ubase, slabsize + sizeof (umem_bufctl_t));
13970Sstevel@tonic-gate 
13980Sstevel@tonic-gate 	mdb_free(umw->umw_cp, umw->umw_csize);
13990Sstevel@tonic-gate 	mdb_free(umw, sizeof (umem_walk_t));
14000Sstevel@tonic-gate }
14010Sstevel@tonic-gate 
14020Sstevel@tonic-gate /*ARGSUSED*/
14030Sstevel@tonic-gate static int
14040Sstevel@tonic-gate umem_walk_all(uintptr_t addr, const umem_cache_t *c, mdb_walk_state_t *wsp)
14050Sstevel@tonic-gate {
14060Sstevel@tonic-gate 	/*
14070Sstevel@tonic-gate 	 * Buffers allocated from NOTOUCH caches can also show up as freed
14080Sstevel@tonic-gate 	 * memory in other caches.  This can be a little confusing, so we
14090Sstevel@tonic-gate 	 * don't walk NOTOUCH caches when walking all caches (thereby assuring
14100Sstevel@tonic-gate 	 * that "::walk umem" and "::walk freemem" yield disjoint output).
14110Sstevel@tonic-gate 	 */
14120Sstevel@tonic-gate 	if (c->cache_cflags & UMC_NOTOUCH)
14130Sstevel@tonic-gate 		return (WALK_NEXT);
14140Sstevel@tonic-gate 
14150Sstevel@tonic-gate 	if (mdb_pwalk(wsp->walk_data, wsp->walk_callback,
14160Sstevel@tonic-gate 	    wsp->walk_cbdata, addr) == -1)
14170Sstevel@tonic-gate 		return (WALK_DONE);
14180Sstevel@tonic-gate 
14190Sstevel@tonic-gate 	return (WALK_NEXT);
14200Sstevel@tonic-gate }
14210Sstevel@tonic-gate 
14220Sstevel@tonic-gate #define	UMEM_WALK_ALL(name, wsp) { \
14230Sstevel@tonic-gate 	wsp->walk_data = (name); \
14240Sstevel@tonic-gate 	if (mdb_walk("umem_cache", (mdb_walk_cb_t)umem_walk_all, wsp) == -1) \
14250Sstevel@tonic-gate 		return (WALK_ERR); \
14260Sstevel@tonic-gate 	return (WALK_DONE); \
14270Sstevel@tonic-gate }
14280Sstevel@tonic-gate 
14290Sstevel@tonic-gate int
14300Sstevel@tonic-gate umem_walk_init(mdb_walk_state_t *wsp)
14310Sstevel@tonic-gate {
14320Sstevel@tonic-gate 	if (wsp->walk_arg != NULL)
14330Sstevel@tonic-gate 		wsp->walk_addr = (uintptr_t)wsp->walk_arg;
14340Sstevel@tonic-gate 
14350Sstevel@tonic-gate 	if (wsp->walk_addr == NULL)
14360Sstevel@tonic-gate 		UMEM_WALK_ALL("umem", wsp);
14370Sstevel@tonic-gate 	return (umem_walk_init_common(wsp, UM_ALLOCATED));
14380Sstevel@tonic-gate }
14390Sstevel@tonic-gate 
14400Sstevel@tonic-gate int
14410Sstevel@tonic-gate bufctl_walk_init(mdb_walk_state_t *wsp)
14420Sstevel@tonic-gate {
14430Sstevel@tonic-gate 	if (wsp->walk_addr == NULL)
14440Sstevel@tonic-gate 		UMEM_WALK_ALL("bufctl", wsp);
14450Sstevel@tonic-gate 	return (umem_walk_init_common(wsp, UM_ALLOCATED | UM_BUFCTL));
14460Sstevel@tonic-gate }
14470Sstevel@tonic-gate 
14480Sstevel@tonic-gate int
14490Sstevel@tonic-gate freemem_walk_init(mdb_walk_state_t *wsp)
14500Sstevel@tonic-gate {
14510Sstevel@tonic-gate 	if (wsp->walk_addr == NULL)
14520Sstevel@tonic-gate 		UMEM_WALK_ALL("freemem", wsp);
14530Sstevel@tonic-gate 	return (umem_walk_init_common(wsp, UM_FREE));
14540Sstevel@tonic-gate }
14550Sstevel@tonic-gate 
14560Sstevel@tonic-gate int
14570Sstevel@tonic-gate freectl_walk_init(mdb_walk_state_t *wsp)
14580Sstevel@tonic-gate {
14590Sstevel@tonic-gate 	if (wsp->walk_addr == NULL)
14600Sstevel@tonic-gate 		UMEM_WALK_ALL("freectl", wsp);
14610Sstevel@tonic-gate 	return (umem_walk_init_common(wsp, UM_FREE | UM_BUFCTL));
14620Sstevel@tonic-gate }
14630Sstevel@tonic-gate 
14640Sstevel@tonic-gate typedef struct bufctl_history_walk {
14650Sstevel@tonic-gate 	void		*bhw_next;
14660Sstevel@tonic-gate 	umem_cache_t	*bhw_cache;
14670Sstevel@tonic-gate 	umem_slab_t	*bhw_slab;
14680Sstevel@tonic-gate 	hrtime_t	bhw_timestamp;
14690Sstevel@tonic-gate } bufctl_history_walk_t;
14700Sstevel@tonic-gate 
14710Sstevel@tonic-gate int
14720Sstevel@tonic-gate bufctl_history_walk_init(mdb_walk_state_t *wsp)
14730Sstevel@tonic-gate {
14740Sstevel@tonic-gate 	bufctl_history_walk_t *bhw;
14750Sstevel@tonic-gate 	umem_bufctl_audit_t bc;
14760Sstevel@tonic-gate 	umem_bufctl_audit_t bcn;
14770Sstevel@tonic-gate 
14780Sstevel@tonic-gate 	if (wsp->walk_addr == NULL) {
14790Sstevel@tonic-gate 		mdb_warn("bufctl_history walk doesn't support global walks\n");
14800Sstevel@tonic-gate 		return (WALK_ERR);
14810Sstevel@tonic-gate 	}
14820Sstevel@tonic-gate 
14830Sstevel@tonic-gate 	if (mdb_vread(&bc, sizeof (bc), wsp->walk_addr) == -1) {
14840Sstevel@tonic-gate 		mdb_warn("unable to read bufctl at %p", wsp->walk_addr);
14850Sstevel@tonic-gate 		return (WALK_ERR);
14860Sstevel@tonic-gate 	}
14870Sstevel@tonic-gate 
14880Sstevel@tonic-gate 	bhw = mdb_zalloc(sizeof (*bhw), UM_SLEEP);
14890Sstevel@tonic-gate 	bhw->bhw_timestamp = 0;
14900Sstevel@tonic-gate 	bhw->bhw_cache = bc.bc_cache;
14910Sstevel@tonic-gate 	bhw->bhw_slab = bc.bc_slab;
14920Sstevel@tonic-gate 
14930Sstevel@tonic-gate 	/*
14940Sstevel@tonic-gate 	 * sometimes the first log entry matches the base bufctl;  in that
14950Sstevel@tonic-gate 	 * case, skip the base bufctl.
14960Sstevel@tonic-gate 	 */
14970Sstevel@tonic-gate 	if (bc.bc_lastlog != NULL &&
14980Sstevel@tonic-gate 	    mdb_vread(&bcn, sizeof (bcn), (uintptr_t)bc.bc_lastlog) != -1 &&
14990Sstevel@tonic-gate 	    bc.bc_addr == bcn.bc_addr &&
15000Sstevel@tonic-gate 	    bc.bc_cache == bcn.bc_cache &&
15010Sstevel@tonic-gate 	    bc.bc_slab == bcn.bc_slab &&
15020Sstevel@tonic-gate 	    bc.bc_timestamp == bcn.bc_timestamp &&
15030Sstevel@tonic-gate 	    bc.bc_thread == bcn.bc_thread)
15040Sstevel@tonic-gate 		bhw->bhw_next = bc.bc_lastlog;
15050Sstevel@tonic-gate 	else
15060Sstevel@tonic-gate 		bhw->bhw_next = (void *)wsp->walk_addr;
15070Sstevel@tonic-gate 
15080Sstevel@tonic-gate 	wsp->walk_addr = (uintptr_t)bc.bc_addr;
15090Sstevel@tonic-gate 	wsp->walk_data = bhw;
15100Sstevel@tonic-gate 
15110Sstevel@tonic-gate 	return (WALK_NEXT);
15120Sstevel@tonic-gate }
15130Sstevel@tonic-gate 
15140Sstevel@tonic-gate int
15150Sstevel@tonic-gate bufctl_history_walk_step(mdb_walk_state_t *wsp)
15160Sstevel@tonic-gate {
15170Sstevel@tonic-gate 	bufctl_history_walk_t *bhw = wsp->walk_data;
15180Sstevel@tonic-gate 	uintptr_t addr = (uintptr_t)bhw->bhw_next;
15190Sstevel@tonic-gate 	uintptr_t baseaddr = wsp->walk_addr;
15200Sstevel@tonic-gate 	umem_bufctl_audit_t *b;
15210Sstevel@tonic-gate 	UMEM_LOCAL_BUFCTL_AUDIT(&b);
15220Sstevel@tonic-gate 
15230Sstevel@tonic-gate 	if (addr == NULL)
15240Sstevel@tonic-gate 		return (WALK_DONE);
15250Sstevel@tonic-gate 
15260Sstevel@tonic-gate 	if (mdb_vread(b, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) {
15270Sstevel@tonic-gate 		mdb_warn("unable to read bufctl at %p", bhw->bhw_next);
15280Sstevel@tonic-gate 		return (WALK_ERR);
15290Sstevel@tonic-gate 	}
15300Sstevel@tonic-gate 
15310Sstevel@tonic-gate 	/*
15320Sstevel@tonic-gate 	 * The bufctl is only valid if the address, cache, and slab are
15330Sstevel@tonic-gate 	 * correct.  We also check that the timestamp is decreasing, to
15340Sstevel@tonic-gate 	 * prevent infinite loops.
15350Sstevel@tonic-gate 	 */
15360Sstevel@tonic-gate 	if ((uintptr_t)b->bc_addr != baseaddr ||
15370Sstevel@tonic-gate 	    b->bc_cache != bhw->bhw_cache ||
15380Sstevel@tonic-gate 	    b->bc_slab != bhw->bhw_slab ||
15390Sstevel@tonic-gate 	    (bhw->bhw_timestamp != 0 && b->bc_timestamp >= bhw->bhw_timestamp))
15400Sstevel@tonic-gate 		return (WALK_DONE);
15410Sstevel@tonic-gate 
15420Sstevel@tonic-gate 	bhw->bhw_next = b->bc_lastlog;
15430Sstevel@tonic-gate 	bhw->bhw_timestamp = b->bc_timestamp;
15440Sstevel@tonic-gate 
15450Sstevel@tonic-gate 	return (wsp->walk_callback(addr, b, wsp->walk_cbdata));
15460Sstevel@tonic-gate }
15470Sstevel@tonic-gate 
15480Sstevel@tonic-gate void
15490Sstevel@tonic-gate bufctl_history_walk_fini(mdb_walk_state_t *wsp)
15500Sstevel@tonic-gate {
15510Sstevel@tonic-gate 	bufctl_history_walk_t *bhw = wsp->walk_data;
15520Sstevel@tonic-gate 
15530Sstevel@tonic-gate 	mdb_free(bhw, sizeof (*bhw));
15540Sstevel@tonic-gate }
15550Sstevel@tonic-gate 
15560Sstevel@tonic-gate typedef struct umem_log_walk {
15570Sstevel@tonic-gate 	umem_bufctl_audit_t *ulw_base;
15580Sstevel@tonic-gate 	umem_bufctl_audit_t **ulw_sorted;
15590Sstevel@tonic-gate 	umem_log_header_t ulw_lh;
15600Sstevel@tonic-gate 	size_t ulw_size;
15610Sstevel@tonic-gate 	size_t ulw_maxndx;
15620Sstevel@tonic-gate 	size_t ulw_ndx;
15630Sstevel@tonic-gate } umem_log_walk_t;
15640Sstevel@tonic-gate 
15650Sstevel@tonic-gate int
15660Sstevel@tonic-gate umem_log_walk_init(mdb_walk_state_t *wsp)
15670Sstevel@tonic-gate {
15680Sstevel@tonic-gate 	uintptr_t lp = wsp->walk_addr;
15690Sstevel@tonic-gate 	umem_log_walk_t *ulw;
15700Sstevel@tonic-gate 	umem_log_header_t *lhp;
15710Sstevel@tonic-gate 	int maxndx, i, j, k;
15720Sstevel@tonic-gate 
15730Sstevel@tonic-gate 	/*
15740Sstevel@tonic-gate 	 * By default (global walk), walk the umem_transaction_log.  Otherwise
15750Sstevel@tonic-gate 	 * read the log whose umem_log_header_t is stored at walk_addr.
15760Sstevel@tonic-gate 	 */
15770Sstevel@tonic-gate 	if (lp == NULL && umem_readvar(&lp, "umem_transaction_log") == -1) {
15780Sstevel@tonic-gate 		mdb_warn("failed to read 'umem_transaction_log'");
15790Sstevel@tonic-gate 		return (WALK_ERR);
15800Sstevel@tonic-gate 	}
15810Sstevel@tonic-gate 
15820Sstevel@tonic-gate 	if (lp == NULL) {
15830Sstevel@tonic-gate 		mdb_warn("log is disabled\n");
15840Sstevel@tonic-gate 		return (WALK_ERR);
15850Sstevel@tonic-gate 	}
15860Sstevel@tonic-gate 
15870Sstevel@tonic-gate 	ulw = mdb_zalloc(sizeof (umem_log_walk_t), UM_SLEEP);
15880Sstevel@tonic-gate 	lhp = &ulw->ulw_lh;
15890Sstevel@tonic-gate 
15900Sstevel@tonic-gate 	if (mdb_vread(lhp, sizeof (umem_log_header_t), lp) == -1) {
15910Sstevel@tonic-gate 		mdb_warn("failed to read log header at %p", lp);
15920Sstevel@tonic-gate 		mdb_free(ulw, sizeof (umem_log_walk_t));
15930Sstevel@tonic-gate 		return (WALK_ERR);
15940Sstevel@tonic-gate 	}
15950Sstevel@tonic-gate 
15960Sstevel@tonic-gate 	ulw->ulw_size = lhp->lh_chunksize * lhp->lh_nchunks;
15970Sstevel@tonic-gate 	ulw->ulw_base = mdb_alloc(ulw->ulw_size, UM_SLEEP);
15980Sstevel@tonic-gate 	maxndx = lhp->lh_chunksize / UMEM_BUFCTL_AUDIT_SIZE - 1;
15990Sstevel@tonic-gate 
16000Sstevel@tonic-gate 	if (mdb_vread(ulw->ulw_base, ulw->ulw_size,
16010Sstevel@tonic-gate 	    (uintptr_t)lhp->lh_base) == -1) {
16020Sstevel@tonic-gate 		mdb_warn("failed to read log at base %p", lhp->lh_base);
16030Sstevel@tonic-gate 		mdb_free(ulw->ulw_base, ulw->ulw_size);
16040Sstevel@tonic-gate 		mdb_free(ulw, sizeof (umem_log_walk_t));
16050Sstevel@tonic-gate 		return (WALK_ERR);
16060Sstevel@tonic-gate 	}
16070Sstevel@tonic-gate 
16080Sstevel@tonic-gate 	ulw->ulw_sorted = mdb_alloc(maxndx * lhp->lh_nchunks *
16090Sstevel@tonic-gate 	    sizeof (umem_bufctl_audit_t *), UM_SLEEP);
16100Sstevel@tonic-gate 
16110Sstevel@tonic-gate 	for (i = 0, k = 0; i < lhp->lh_nchunks; i++) {
16120Sstevel@tonic-gate 		caddr_t chunk = (caddr_t)
16130Sstevel@tonic-gate 		    ((uintptr_t)ulw->ulw_base + i * lhp->lh_chunksize);
16140Sstevel@tonic-gate 
16150Sstevel@tonic-gate 		for (j = 0; j < maxndx; j++) {
16160Sstevel@tonic-gate 			/* LINTED align */
16170Sstevel@tonic-gate 			ulw->ulw_sorted[k++] = (umem_bufctl_audit_t *)chunk;
16180Sstevel@tonic-gate 			chunk += UMEM_BUFCTL_AUDIT_SIZE;
16190Sstevel@tonic-gate 		}
16200Sstevel@tonic-gate 	}
16210Sstevel@tonic-gate 
16220Sstevel@tonic-gate 	qsort(ulw->ulw_sorted, k, sizeof (umem_bufctl_audit_t *),
16230Sstevel@tonic-gate 	    (int(*)(const void *, const void *))bufctlcmp);
16240Sstevel@tonic-gate 
16250Sstevel@tonic-gate 	ulw->ulw_maxndx = k;
16260Sstevel@tonic-gate 	wsp->walk_data = ulw;
16270Sstevel@tonic-gate 
16280Sstevel@tonic-gate 	return (WALK_NEXT);
16290Sstevel@tonic-gate }
16300Sstevel@tonic-gate 
16310Sstevel@tonic-gate int
16320Sstevel@tonic-gate umem_log_walk_step(mdb_walk_state_t *wsp)
16330Sstevel@tonic-gate {
16340Sstevel@tonic-gate 	umem_log_walk_t *ulw = wsp->walk_data;
16350Sstevel@tonic-gate 	umem_bufctl_audit_t *bcp;
16360Sstevel@tonic-gate 
16370Sstevel@tonic-gate 	if (ulw->ulw_ndx == ulw->ulw_maxndx)
16380Sstevel@tonic-gate 		return (WALK_DONE);
16390Sstevel@tonic-gate 
16400Sstevel@tonic-gate 	bcp = ulw->ulw_sorted[ulw->ulw_ndx++];
16410Sstevel@tonic-gate 
16420Sstevel@tonic-gate 	return (wsp->walk_callback((uintptr_t)bcp - (uintptr_t)ulw->ulw_base +
16430Sstevel@tonic-gate 	    (uintptr_t)ulw->ulw_lh.lh_base, bcp, wsp->walk_cbdata));
16440Sstevel@tonic-gate }
16450Sstevel@tonic-gate 
16460Sstevel@tonic-gate void
16470Sstevel@tonic-gate umem_log_walk_fini(mdb_walk_state_t *wsp)
16480Sstevel@tonic-gate {
16490Sstevel@tonic-gate 	umem_log_walk_t *ulw = wsp->walk_data;
16500Sstevel@tonic-gate 
16510Sstevel@tonic-gate 	mdb_free(ulw->ulw_base, ulw->ulw_size);
16520Sstevel@tonic-gate 	mdb_free(ulw->ulw_sorted, ulw->ulw_maxndx *
16530Sstevel@tonic-gate 	    sizeof (umem_bufctl_audit_t *));
16540Sstevel@tonic-gate 	mdb_free(ulw, sizeof (umem_log_walk_t));
16550Sstevel@tonic-gate }
16560Sstevel@tonic-gate 
16570Sstevel@tonic-gate typedef struct allocdby_bufctl {
16580Sstevel@tonic-gate 	uintptr_t abb_addr;
16590Sstevel@tonic-gate 	hrtime_t abb_ts;
16600Sstevel@tonic-gate } allocdby_bufctl_t;
16610Sstevel@tonic-gate 
16620Sstevel@tonic-gate typedef struct allocdby_walk {
16630Sstevel@tonic-gate 	const char *abw_walk;
16640Sstevel@tonic-gate 	uintptr_t abw_thread;
16650Sstevel@tonic-gate 	size_t abw_nbufs;
16660Sstevel@tonic-gate 	size_t abw_size;
16670Sstevel@tonic-gate 	allocdby_bufctl_t *abw_buf;
16680Sstevel@tonic-gate 	size_t abw_ndx;
16690Sstevel@tonic-gate } allocdby_walk_t;
16700Sstevel@tonic-gate 
16710Sstevel@tonic-gate int
16720Sstevel@tonic-gate allocdby_walk_bufctl(uintptr_t addr, const umem_bufctl_audit_t *bcp,
16730Sstevel@tonic-gate     allocdby_walk_t *abw)
16740Sstevel@tonic-gate {
16750Sstevel@tonic-gate 	if ((uintptr_t)bcp->bc_thread != abw->abw_thread)
16760Sstevel@tonic-gate 		return (WALK_NEXT);
16770Sstevel@tonic-gate 
16780Sstevel@tonic-gate 	if (abw->abw_nbufs == abw->abw_size) {
16790Sstevel@tonic-gate 		allocdby_bufctl_t *buf;
16800Sstevel@tonic-gate 		size_t oldsize = sizeof (allocdby_bufctl_t) * abw->abw_size;
16810Sstevel@tonic-gate 
16820Sstevel@tonic-gate 		buf = mdb_zalloc(oldsize << 1, UM_SLEEP);
16830Sstevel@tonic-gate 
16840Sstevel@tonic-gate 		bcopy(abw->abw_buf, buf, oldsize);
16850Sstevel@tonic-gate 		mdb_free(abw->abw_buf, oldsize);
16860Sstevel@tonic-gate 
16870Sstevel@tonic-gate 		abw->abw_size <<= 1;
16880Sstevel@tonic-gate 		abw->abw_buf = buf;
16890Sstevel@tonic-gate 	}
16900Sstevel@tonic-gate 
16910Sstevel@tonic-gate 	abw->abw_buf[abw->abw_nbufs].abb_addr = addr;
16920Sstevel@tonic-gate 	abw->abw_buf[abw->abw_nbufs].abb_ts = bcp->bc_timestamp;
16930Sstevel@tonic-gate 	abw->abw_nbufs++;
16940Sstevel@tonic-gate 
16950Sstevel@tonic-gate 	return (WALK_NEXT);
16960Sstevel@tonic-gate }
16970Sstevel@tonic-gate 
16980Sstevel@tonic-gate /*ARGSUSED*/
16990Sstevel@tonic-gate int
17000Sstevel@tonic-gate allocdby_walk_cache(uintptr_t addr, const umem_cache_t *c, allocdby_walk_t *abw)
17010Sstevel@tonic-gate {
17020Sstevel@tonic-gate 	if (mdb_pwalk(abw->abw_walk, (mdb_walk_cb_t)allocdby_walk_bufctl,
17030Sstevel@tonic-gate 	    abw, addr) == -1) {
17040Sstevel@tonic-gate 		mdb_warn("couldn't walk bufctl for cache %p", addr);
17050Sstevel@tonic-gate 		return (WALK_DONE);
17060Sstevel@tonic-gate 	}
17070Sstevel@tonic-gate 
17080Sstevel@tonic-gate 	return (WALK_NEXT);
17090Sstevel@tonic-gate }
17100Sstevel@tonic-gate 
17110Sstevel@tonic-gate static int
17120Sstevel@tonic-gate allocdby_cmp(const allocdby_bufctl_t *lhs, const allocdby_bufctl_t *rhs)
17130Sstevel@tonic-gate {
17140Sstevel@tonic-gate 	if (lhs->abb_ts < rhs->abb_ts)
17150Sstevel@tonic-gate 		return (1);
17160Sstevel@tonic-gate 	if (lhs->abb_ts > rhs->abb_ts)
17170Sstevel@tonic-gate 		return (-1);
17180Sstevel@tonic-gate 	return (0);
17190Sstevel@tonic-gate }
17200Sstevel@tonic-gate 
17210Sstevel@tonic-gate static int
17220Sstevel@tonic-gate allocdby_walk_init_common(mdb_walk_state_t *wsp, const char *walk)
17230Sstevel@tonic-gate {
17240Sstevel@tonic-gate 	allocdby_walk_t *abw;
17250Sstevel@tonic-gate 
17260Sstevel@tonic-gate 	if (wsp->walk_addr == NULL) {
17270Sstevel@tonic-gate 		mdb_warn("allocdby walk doesn't support global walks\n");
17280Sstevel@tonic-gate 		return (WALK_ERR);
17290Sstevel@tonic-gate 	}
17300Sstevel@tonic-gate 
17310Sstevel@tonic-gate 	abw = mdb_zalloc(sizeof (allocdby_walk_t), UM_SLEEP);
17320Sstevel@tonic-gate 
17330Sstevel@tonic-gate 	abw->abw_thread = wsp->walk_addr;
17340Sstevel@tonic-gate 	abw->abw_walk = walk;
17350Sstevel@tonic-gate 	abw->abw_size = 128;	/* something reasonable */
17360Sstevel@tonic-gate 	abw->abw_buf =
17370Sstevel@tonic-gate 	    mdb_zalloc(abw->abw_size * sizeof (allocdby_bufctl_t), UM_SLEEP);
17380Sstevel@tonic-gate 
17390Sstevel@tonic-gate 	wsp->walk_data = abw;
17400Sstevel@tonic-gate 
17410Sstevel@tonic-gate 	if (mdb_walk("umem_cache",
17420Sstevel@tonic-gate 	    (mdb_walk_cb_t)allocdby_walk_cache, abw) == -1) {
17430Sstevel@tonic-gate 		mdb_warn("couldn't walk umem_cache");
17440Sstevel@tonic-gate 		allocdby_walk_fini(wsp);
17450Sstevel@tonic-gate 		return (WALK_ERR);
17460Sstevel@tonic-gate 	}
17470Sstevel@tonic-gate 
17480Sstevel@tonic-gate 	qsort(abw->abw_buf, abw->abw_nbufs, sizeof (allocdby_bufctl_t),
17490Sstevel@tonic-gate 	    (int(*)(const void *, const void *))allocdby_cmp);
17500Sstevel@tonic-gate 
17510Sstevel@tonic-gate 	return (WALK_NEXT);
17520Sstevel@tonic-gate }
17530Sstevel@tonic-gate 
17540Sstevel@tonic-gate int
17550Sstevel@tonic-gate allocdby_walk_init(mdb_walk_state_t *wsp)
17560Sstevel@tonic-gate {
17570Sstevel@tonic-gate 	return (allocdby_walk_init_common(wsp, "bufctl"));
17580Sstevel@tonic-gate }
17590Sstevel@tonic-gate 
17600Sstevel@tonic-gate int
17610Sstevel@tonic-gate freedby_walk_init(mdb_walk_state_t *wsp)
17620Sstevel@tonic-gate {
17630Sstevel@tonic-gate 	return (allocdby_walk_init_common(wsp, "freectl"));
17640Sstevel@tonic-gate }
17650Sstevel@tonic-gate 
17660Sstevel@tonic-gate int
17670Sstevel@tonic-gate allocdby_walk_step(mdb_walk_state_t *wsp)
17680Sstevel@tonic-gate {
17690Sstevel@tonic-gate 	allocdby_walk_t *abw = wsp->walk_data;
17700Sstevel@tonic-gate 	uintptr_t addr;
17710Sstevel@tonic-gate 	umem_bufctl_audit_t *bcp;
17720Sstevel@tonic-gate 	UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
17730Sstevel@tonic-gate 
17740Sstevel@tonic-gate 	if (abw->abw_ndx == abw->abw_nbufs)
17750Sstevel@tonic-gate 		return (WALK_DONE);
17760Sstevel@tonic-gate 
17770Sstevel@tonic-gate 	addr = abw->abw_buf[abw->abw_ndx++].abb_addr;
17780Sstevel@tonic-gate 
17790Sstevel@tonic-gate 	if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) {
17800Sstevel@tonic-gate 		mdb_warn("couldn't read bufctl at %p", addr);
17810Sstevel@tonic-gate 		return (WALK_DONE);
17820Sstevel@tonic-gate 	}
17830Sstevel@tonic-gate 
17840Sstevel@tonic-gate 	return (wsp->walk_callback(addr, bcp, wsp->walk_cbdata));
17850Sstevel@tonic-gate }
17860Sstevel@tonic-gate 
17870Sstevel@tonic-gate void
17880Sstevel@tonic-gate allocdby_walk_fini(mdb_walk_state_t *wsp)
17890Sstevel@tonic-gate {
17900Sstevel@tonic-gate 	allocdby_walk_t *abw = wsp->walk_data;
17910Sstevel@tonic-gate 
17920Sstevel@tonic-gate 	mdb_free(abw->abw_buf, sizeof (allocdby_bufctl_t) * abw->abw_size);
17930Sstevel@tonic-gate 	mdb_free(abw, sizeof (allocdby_walk_t));
17940Sstevel@tonic-gate }
17950Sstevel@tonic-gate 
17960Sstevel@tonic-gate /*ARGSUSED*/
17970Sstevel@tonic-gate int
17980Sstevel@tonic-gate allocdby_walk(uintptr_t addr, const umem_bufctl_audit_t *bcp, void *ignored)
17990Sstevel@tonic-gate {
18000Sstevel@tonic-gate 	char c[MDB_SYM_NAMLEN];
18010Sstevel@tonic-gate 	GElf_Sym sym;
18020Sstevel@tonic-gate 	int i;
18030Sstevel@tonic-gate 
18040Sstevel@tonic-gate 	mdb_printf("%0?p %12llx ", addr, bcp->bc_timestamp);
18050Sstevel@tonic-gate 	for (i = 0; i < bcp->bc_depth; i++) {
18060Sstevel@tonic-gate 		if (mdb_lookup_by_addr(bcp->bc_stack[i],
18070Sstevel@tonic-gate 		    MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1)
18080Sstevel@tonic-gate 			continue;
18090Sstevel@tonic-gate 		if (is_umem_sym(c, "umem_"))
18100Sstevel@tonic-gate 			continue;
18110Sstevel@tonic-gate 		mdb_printf("%s+0x%lx",
18120Sstevel@tonic-gate 		    c, bcp->bc_stack[i] - (uintptr_t)sym.st_value);
18130Sstevel@tonic-gate 		break;
18140Sstevel@tonic-gate 	}
18150Sstevel@tonic-gate 	mdb_printf("\n");
18160Sstevel@tonic-gate 
18170Sstevel@tonic-gate 	return (WALK_NEXT);
18180Sstevel@tonic-gate }
18190Sstevel@tonic-gate 
18200Sstevel@tonic-gate static int
18210Sstevel@tonic-gate allocdby_common(uintptr_t addr, uint_t flags, const char *w)
18220Sstevel@tonic-gate {
18230Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC))
18240Sstevel@tonic-gate 		return (DCMD_USAGE);
18250Sstevel@tonic-gate 
18260Sstevel@tonic-gate 	mdb_printf("%-?s %12s %s\n", "BUFCTL", "TIMESTAMP", "CALLER");
18270Sstevel@tonic-gate 
18280Sstevel@tonic-gate 	if (mdb_pwalk(w, (mdb_walk_cb_t)allocdby_walk, NULL, addr) == -1) {
18290Sstevel@tonic-gate 		mdb_warn("can't walk '%s' for %p", w, addr);
18300Sstevel@tonic-gate 		return (DCMD_ERR);
18310Sstevel@tonic-gate 	}
18320Sstevel@tonic-gate 
18330Sstevel@tonic-gate 	return (DCMD_OK);
18340Sstevel@tonic-gate }
18350Sstevel@tonic-gate 
18360Sstevel@tonic-gate /*ARGSUSED*/
18370Sstevel@tonic-gate int
18380Sstevel@tonic-gate allocdby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
18390Sstevel@tonic-gate {
18400Sstevel@tonic-gate 	return (allocdby_common(addr, flags, "allocdby"));
18410Sstevel@tonic-gate }
18420Sstevel@tonic-gate 
18430Sstevel@tonic-gate /*ARGSUSED*/
18440Sstevel@tonic-gate int
18450Sstevel@tonic-gate freedby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
18460Sstevel@tonic-gate {
18470Sstevel@tonic-gate 	return (allocdby_common(addr, flags, "freedby"));
18480Sstevel@tonic-gate }
18490Sstevel@tonic-gate 
18500Sstevel@tonic-gate typedef struct whatis {
18510Sstevel@tonic-gate 	uintptr_t w_addr;
18520Sstevel@tonic-gate 	const umem_cache_t *w_cache;
18530Sstevel@tonic-gate 	const vmem_t *w_vmem;
18540Sstevel@tonic-gate 	int w_found;
18550Sstevel@tonic-gate 	uint_t w_verbose;
18560Sstevel@tonic-gate 	uint_t w_freemem;
18570Sstevel@tonic-gate 	uint_t w_all;
18580Sstevel@tonic-gate 	uint_t w_bufctl;
18590Sstevel@tonic-gate } whatis_t;
18600Sstevel@tonic-gate 
18610Sstevel@tonic-gate static void
18620Sstevel@tonic-gate whatis_print_umem(uintptr_t addr, uintptr_t baddr, whatis_t *w)
18630Sstevel@tonic-gate {
18640Sstevel@tonic-gate 	/* LINTED pointer cast may result in improper alignment */
18650Sstevel@tonic-gate 	uintptr_t btaddr = (uintptr_t)UMEM_BUFTAG(w->w_cache, addr);
18660Sstevel@tonic-gate 	intptr_t stat;
18670Sstevel@tonic-gate 
18680Sstevel@tonic-gate 	if (w->w_cache->cache_flags & UMF_REDZONE) {
18690Sstevel@tonic-gate 		umem_buftag_t bt;
18700Sstevel@tonic-gate 
18710Sstevel@tonic-gate 		if (mdb_vread(&bt, sizeof (bt), btaddr) == -1)
18720Sstevel@tonic-gate 			goto done;
18730Sstevel@tonic-gate 
18740Sstevel@tonic-gate 		stat = (intptr_t)bt.bt_bufctl ^ bt.bt_bxstat;
18750Sstevel@tonic-gate 
18760Sstevel@tonic-gate 		if (stat != UMEM_BUFTAG_ALLOC && stat != UMEM_BUFTAG_FREE)
18770Sstevel@tonic-gate 			goto done;
18780Sstevel@tonic-gate 
18790Sstevel@tonic-gate 		/*
18800Sstevel@tonic-gate 		 * provide the bufctl ptr if it has useful information
18810Sstevel@tonic-gate 		 */
18820Sstevel@tonic-gate 		if (baddr == 0 && (w->w_cache->cache_flags & UMF_AUDIT))
18830Sstevel@tonic-gate 			baddr = (uintptr_t)bt.bt_bufctl;
18840Sstevel@tonic-gate 	}
18850Sstevel@tonic-gate 
18860Sstevel@tonic-gate done:
18870Sstevel@tonic-gate 	if (baddr == 0)
18880Sstevel@tonic-gate 		mdb_printf("%p is %p+%p, %s from %s\n",
18890Sstevel@tonic-gate 		    w->w_addr, addr, w->w_addr - addr,
18900Sstevel@tonic-gate 		    w->w_freemem == FALSE ? "allocated" : "freed",
18910Sstevel@tonic-gate 		    w->w_cache->cache_name);
18920Sstevel@tonic-gate 	else
18930Sstevel@tonic-gate 		mdb_printf("%p is %p+%p, bufctl %p %s from %s\n",
18940Sstevel@tonic-gate 		    w->w_addr, addr, w->w_addr - addr, baddr,
18950Sstevel@tonic-gate 		    w->w_freemem == FALSE ? "allocated" : "freed",
18960Sstevel@tonic-gate 		    w->w_cache->cache_name);
18970Sstevel@tonic-gate }
18980Sstevel@tonic-gate 
18990Sstevel@tonic-gate /*ARGSUSED*/
19000Sstevel@tonic-gate static int
19010Sstevel@tonic-gate whatis_walk_umem(uintptr_t addr, void *ignored, whatis_t *w)
19020Sstevel@tonic-gate {
19030Sstevel@tonic-gate 	if (w->w_addr < addr || w->w_addr >= addr + w->w_cache->cache_bufsize)
19040Sstevel@tonic-gate 		return (WALK_NEXT);
19050Sstevel@tonic-gate 
19060Sstevel@tonic-gate 	whatis_print_umem(addr, 0, w);
19070Sstevel@tonic-gate 	w->w_found++;
19080Sstevel@tonic-gate 	return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE);
19090Sstevel@tonic-gate }
19100Sstevel@tonic-gate 
19110Sstevel@tonic-gate static int
19120Sstevel@tonic-gate whatis_walk_seg(uintptr_t addr, const vmem_seg_t *vs, whatis_t *w)
19130Sstevel@tonic-gate {
19140Sstevel@tonic-gate 	if (w->w_addr < vs->vs_start || w->w_addr >= vs->vs_end)
19150Sstevel@tonic-gate 		return (WALK_NEXT);
19160Sstevel@tonic-gate 
19170Sstevel@tonic-gate 	mdb_printf("%p is %p+%p ", w->w_addr,
19180Sstevel@tonic-gate 	    vs->vs_start, w->w_addr - vs->vs_start);
19190Sstevel@tonic-gate 
19200Sstevel@tonic-gate 	/*
19210Sstevel@tonic-gate 	 * Always provide the vmem_seg pointer if it has a stack trace.
19220Sstevel@tonic-gate 	 */
19230Sstevel@tonic-gate 	if (w->w_bufctl == TRUE ||
19240Sstevel@tonic-gate 	    (vs->vs_type == VMEM_ALLOC && vs->vs_depth != 0)) {
19250Sstevel@tonic-gate 		mdb_printf("(vmem_seg %p) ", addr);
19260Sstevel@tonic-gate 	}
19270Sstevel@tonic-gate 
19280Sstevel@tonic-gate 	mdb_printf("%sfrom %s vmem arena\n", w->w_freemem == TRUE ?
19290Sstevel@tonic-gate 	    "freed " : "", w->w_vmem->vm_name);
19300Sstevel@tonic-gate 
19310Sstevel@tonic-gate 	w->w_found++;
19320Sstevel@tonic-gate 	return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE);
19330Sstevel@tonic-gate }
19340Sstevel@tonic-gate 
19350Sstevel@tonic-gate static int
19360Sstevel@tonic-gate whatis_walk_vmem(uintptr_t addr, const vmem_t *vmem, whatis_t *w)
19370Sstevel@tonic-gate {
19380Sstevel@tonic-gate 	const char *nm = vmem->vm_name;
19390Sstevel@tonic-gate 	w->w_vmem = vmem;
19400Sstevel@tonic-gate 	w->w_freemem = FALSE;
19410Sstevel@tonic-gate 
19420Sstevel@tonic-gate 	if (w->w_verbose)
19430Sstevel@tonic-gate 		mdb_printf("Searching vmem arena %s...\n", nm);
19440Sstevel@tonic-gate 
19450Sstevel@tonic-gate 	if (mdb_pwalk("vmem_alloc",
19460Sstevel@tonic-gate 	    (mdb_walk_cb_t)whatis_walk_seg, w, addr) == -1) {
19470Sstevel@tonic-gate 		mdb_warn("can't walk vmem seg for %p", addr);
19480Sstevel@tonic-gate 		return (WALK_NEXT);
19490Sstevel@tonic-gate 	}
19500Sstevel@tonic-gate 
19510Sstevel@tonic-gate 	if (w->w_found && w->w_all == FALSE)
19520Sstevel@tonic-gate 		return (WALK_DONE);
19530Sstevel@tonic-gate 
19540Sstevel@tonic-gate 	if (w->w_verbose)
19550Sstevel@tonic-gate 		mdb_printf("Searching vmem arena %s for free virtual...\n", nm);
19560Sstevel@tonic-gate 
19570Sstevel@tonic-gate 	w->w_freemem = TRUE;
19580Sstevel@tonic-gate 
19590Sstevel@tonic-gate 	if (mdb_pwalk("vmem_free",
19600Sstevel@tonic-gate 	    (mdb_walk_cb_t)whatis_walk_seg, w, addr) == -1) {
19610Sstevel@tonic-gate 		mdb_warn("can't walk vmem seg for %p", addr);
19620Sstevel@tonic-gate 		return (WALK_NEXT);
19630Sstevel@tonic-gate 	}
19640Sstevel@tonic-gate 
19650Sstevel@tonic-gate 	return (w->w_found && w->w_all == FALSE ? WALK_DONE : WALK_NEXT);
19660Sstevel@tonic-gate }
19670Sstevel@tonic-gate 
19680Sstevel@tonic-gate /*ARGSUSED*/
19690Sstevel@tonic-gate static int
19700Sstevel@tonic-gate whatis_walk_bufctl(uintptr_t baddr, const umem_bufctl_t *bcp, whatis_t *w)
19710Sstevel@tonic-gate {
19720Sstevel@tonic-gate 	uintptr_t addr;
19730Sstevel@tonic-gate 
19740Sstevel@tonic-gate 	if (bcp == NULL)
19750Sstevel@tonic-gate 		return (WALK_NEXT);
19760Sstevel@tonic-gate 
19770Sstevel@tonic-gate 	addr = (uintptr_t)bcp->bc_addr;
19780Sstevel@tonic-gate 
19790Sstevel@tonic-gate 	if (w->w_addr < addr || w->w_addr >= addr + w->w_cache->cache_bufsize)
19800Sstevel@tonic-gate 		return (WALK_NEXT);
19810Sstevel@tonic-gate 
19820Sstevel@tonic-gate 	whatis_print_umem(addr, baddr, w);
19830Sstevel@tonic-gate 	w->w_found++;
19840Sstevel@tonic-gate 	return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE);
19850Sstevel@tonic-gate }
19860Sstevel@tonic-gate 
19870Sstevel@tonic-gate static int
19880Sstevel@tonic-gate whatis_walk_cache(uintptr_t addr, const umem_cache_t *c, whatis_t *w)
19890Sstevel@tonic-gate {
19900Sstevel@tonic-gate 	char *walk, *freewalk;
19910Sstevel@tonic-gate 	mdb_walk_cb_t func;
19920Sstevel@tonic-gate 
19930Sstevel@tonic-gate 	if (w->w_bufctl == FALSE) {
19940Sstevel@tonic-gate 		walk = "umem";
19950Sstevel@tonic-gate 		freewalk = "freemem";
19960Sstevel@tonic-gate 		func = (mdb_walk_cb_t)whatis_walk_umem;
19970Sstevel@tonic-gate 	} else {
19980Sstevel@tonic-gate 		walk = "bufctl";
19990Sstevel@tonic-gate 		freewalk = "freectl";
20000Sstevel@tonic-gate 		func = (mdb_walk_cb_t)whatis_walk_bufctl;
20010Sstevel@tonic-gate 	}
20020Sstevel@tonic-gate 
20030Sstevel@tonic-gate 	if (w->w_verbose)
20040Sstevel@tonic-gate 		mdb_printf("Searching %s...\n", c->cache_name);
20050Sstevel@tonic-gate 
20060Sstevel@tonic-gate 	w->w_cache = c;
20070Sstevel@tonic-gate 	w->w_freemem = FALSE;
20080Sstevel@tonic-gate 
20090Sstevel@tonic-gate 	if (mdb_pwalk(walk, func, w, addr) == -1) {
20100Sstevel@tonic-gate 		mdb_warn("can't find %s walker", walk);
20110Sstevel@tonic-gate 		return (WALK_DONE);
20120Sstevel@tonic-gate 	}
20130Sstevel@tonic-gate 
20140Sstevel@tonic-gate 	if (w->w_found && w->w_all == FALSE)
20150Sstevel@tonic-gate 		return (WALK_DONE);
20160Sstevel@tonic-gate 
20170Sstevel@tonic-gate 	/*
20180Sstevel@tonic-gate 	 * We have searched for allocated memory; now search for freed memory.
20190Sstevel@tonic-gate 	 */
20200Sstevel@tonic-gate 	if (w->w_verbose)
20210Sstevel@tonic-gate 		mdb_printf("Searching %s for free memory...\n", c->cache_name);
20220Sstevel@tonic-gate 
20230Sstevel@tonic-gate 	w->w_freemem = TRUE;
20240Sstevel@tonic-gate 
20250Sstevel@tonic-gate 	if (mdb_pwalk(freewalk, func, w, addr) == -1) {
20260Sstevel@tonic-gate 		mdb_warn("can't find %s walker", freewalk);
20270Sstevel@tonic-gate 		return (WALK_DONE);
20280Sstevel@tonic-gate 	}
20290Sstevel@tonic-gate 
20300Sstevel@tonic-gate 	return (w->w_found && w->w_all == FALSE ? WALK_DONE : WALK_NEXT);
20310Sstevel@tonic-gate }
20320Sstevel@tonic-gate 
20330Sstevel@tonic-gate static int
20340Sstevel@tonic-gate whatis_walk_touch(uintptr_t addr, const umem_cache_t *c, whatis_t *w)
20350Sstevel@tonic-gate {
20360Sstevel@tonic-gate 	if (c->cache_cflags & UMC_NOTOUCH)
20370Sstevel@tonic-gate 		return (WALK_NEXT);
20380Sstevel@tonic-gate 
20390Sstevel@tonic-gate 	return (whatis_walk_cache(addr, c, w));
20400Sstevel@tonic-gate }
20410Sstevel@tonic-gate 
20420Sstevel@tonic-gate static int
20430Sstevel@tonic-gate whatis_walk_notouch(uintptr_t addr, const umem_cache_t *c, whatis_t *w)
20440Sstevel@tonic-gate {
20450Sstevel@tonic-gate 	if (!(c->cache_cflags & UMC_NOTOUCH))
20460Sstevel@tonic-gate 		return (WALK_NEXT);
20470Sstevel@tonic-gate 
20480Sstevel@tonic-gate 	return (whatis_walk_cache(addr, c, w));
20490Sstevel@tonic-gate }
20500Sstevel@tonic-gate 
20510Sstevel@tonic-gate int
20520Sstevel@tonic-gate whatis(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
20530Sstevel@tonic-gate {
20540Sstevel@tonic-gate 	whatis_t w;
20550Sstevel@tonic-gate 
20560Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC))
20570Sstevel@tonic-gate 		return (DCMD_USAGE);
20580Sstevel@tonic-gate 
20590Sstevel@tonic-gate 	w.w_verbose = FALSE;
20600Sstevel@tonic-gate 	w.w_bufctl = FALSE;
20610Sstevel@tonic-gate 	w.w_all = FALSE;
20620Sstevel@tonic-gate 
20630Sstevel@tonic-gate 	if (mdb_getopts(argc, argv,
20640Sstevel@tonic-gate 	    'v', MDB_OPT_SETBITS, TRUE, &w.w_verbose,
20650Sstevel@tonic-gate 	    'a', MDB_OPT_SETBITS, TRUE, &w.w_all,
20660Sstevel@tonic-gate 	    'b', MDB_OPT_SETBITS, TRUE, &w.w_bufctl, NULL) != argc)
20670Sstevel@tonic-gate 		return (DCMD_USAGE);
20680Sstevel@tonic-gate 
20690Sstevel@tonic-gate 	w.w_addr = addr;
20700Sstevel@tonic-gate 	w.w_found = 0;
20710Sstevel@tonic-gate 
20720Sstevel@tonic-gate 	/*
20730Sstevel@tonic-gate 	 * Mappings and threads should eventually be added here.
20740Sstevel@tonic-gate 	 */
20750Sstevel@tonic-gate 	if (mdb_walk("umem_cache",
20760Sstevel@tonic-gate 	    (mdb_walk_cb_t)whatis_walk_touch, &w) == -1) {
20770Sstevel@tonic-gate 		mdb_warn("couldn't find umem_cache walker");
20780Sstevel@tonic-gate 		return (DCMD_ERR);
20790Sstevel@tonic-gate 	}
20800Sstevel@tonic-gate 
20810Sstevel@tonic-gate 	if (w.w_found && w.w_all == FALSE)
20820Sstevel@tonic-gate 		return (DCMD_OK);
20830Sstevel@tonic-gate 
20840Sstevel@tonic-gate 	if (mdb_walk("umem_cache",
20850Sstevel@tonic-gate 	    (mdb_walk_cb_t)whatis_walk_notouch, &w) == -1) {
20860Sstevel@tonic-gate 		mdb_warn("couldn't find umem_cache walker");
20870Sstevel@tonic-gate 		return (DCMD_ERR);
20880Sstevel@tonic-gate 	}
20890Sstevel@tonic-gate 
20900Sstevel@tonic-gate 	if (w.w_found && w.w_all == FALSE)
20910Sstevel@tonic-gate 		return (DCMD_OK);
20920Sstevel@tonic-gate 
20930Sstevel@tonic-gate 	if (mdb_walk("vmem_postfix",
20940Sstevel@tonic-gate 	    (mdb_walk_cb_t)whatis_walk_vmem, &w) == -1) {
20950Sstevel@tonic-gate 		mdb_warn("couldn't find vmem_postfix walker");
20960Sstevel@tonic-gate 		return (DCMD_ERR);
20970Sstevel@tonic-gate 	}
20980Sstevel@tonic-gate 
20990Sstevel@tonic-gate 	if (w.w_found == 0)
21000Sstevel@tonic-gate 		mdb_printf("%p is unknown\n", addr);
21010Sstevel@tonic-gate 
21020Sstevel@tonic-gate 	return (DCMD_OK);
21030Sstevel@tonic-gate }
21040Sstevel@tonic-gate 
21050Sstevel@tonic-gate typedef struct umem_log_cpu {
21060Sstevel@tonic-gate 	uintptr_t umc_low;
21070Sstevel@tonic-gate 	uintptr_t umc_high;
21080Sstevel@tonic-gate } umem_log_cpu_t;
21090Sstevel@tonic-gate 
21100Sstevel@tonic-gate int
21110Sstevel@tonic-gate umem_log_walk(uintptr_t addr, const umem_bufctl_audit_t *b, umem_log_cpu_t *umc)
21120Sstevel@tonic-gate {
21130Sstevel@tonic-gate 	int i;
21140Sstevel@tonic-gate 
21150Sstevel@tonic-gate 	for (i = 0; i < umem_max_ncpus; i++) {
21160Sstevel@tonic-gate 		if (addr >= umc[i].umc_low && addr < umc[i].umc_high)
21170Sstevel@tonic-gate 			break;
21180Sstevel@tonic-gate 	}
21190Sstevel@tonic-gate 
21200Sstevel@tonic-gate 	if (i == umem_max_ncpus)
21210Sstevel@tonic-gate 		mdb_printf("   ");
21220Sstevel@tonic-gate 	else
21230Sstevel@tonic-gate 		mdb_printf("%3d", i);
21240Sstevel@tonic-gate 
21250Sstevel@tonic-gate 	mdb_printf(" %0?p %0?p %16llx %0?p\n", addr, b->bc_addr,
21260Sstevel@tonic-gate 	    b->bc_timestamp, b->bc_thread);
21270Sstevel@tonic-gate 
21280Sstevel@tonic-gate 	return (WALK_NEXT);
21290Sstevel@tonic-gate }
21300Sstevel@tonic-gate 
21310Sstevel@tonic-gate /*ARGSUSED*/
21320Sstevel@tonic-gate int
21330Sstevel@tonic-gate umem_log(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
21340Sstevel@tonic-gate {
21350Sstevel@tonic-gate 	umem_log_header_t lh;
21360Sstevel@tonic-gate 	umem_cpu_log_header_t clh;
21370Sstevel@tonic-gate 	uintptr_t lhp, clhp;
21380Sstevel@tonic-gate 	umem_log_cpu_t *umc;
21390Sstevel@tonic-gate 	int i;
21400Sstevel@tonic-gate 
21410Sstevel@tonic-gate 	if (umem_readvar(&lhp, "umem_transaction_log") == -1) {
21420Sstevel@tonic-gate 		mdb_warn("failed to read 'umem_transaction_log'");
21430Sstevel@tonic-gate 		return (DCMD_ERR);
21440Sstevel@tonic-gate 	}
21450Sstevel@tonic-gate 
21460Sstevel@tonic-gate 	if (lhp == NULL) {
21470Sstevel@tonic-gate 		mdb_warn("no umem transaction log\n");
21480Sstevel@tonic-gate 		return (DCMD_ERR);
21490Sstevel@tonic-gate 	}
21500Sstevel@tonic-gate 
21510Sstevel@tonic-gate 	if (mdb_vread(&lh, sizeof (umem_log_header_t), lhp) == -1) {
21520Sstevel@tonic-gate 		mdb_warn("failed to read log header at %p", lhp);
21530Sstevel@tonic-gate 		return (DCMD_ERR);
21540Sstevel@tonic-gate 	}
21550Sstevel@tonic-gate 
21560Sstevel@tonic-gate 	clhp = lhp + ((uintptr_t)&lh.lh_cpu[0] - (uintptr_t)&lh);
21570Sstevel@tonic-gate 
21580Sstevel@tonic-gate 	umc = mdb_zalloc(sizeof (umem_log_cpu_t) * umem_max_ncpus,
21590Sstevel@tonic-gate 	    UM_SLEEP | UM_GC);
21600Sstevel@tonic-gate 
21610Sstevel@tonic-gate 	for (i = 0; i < umem_max_ncpus; i++) {
21620Sstevel@tonic-gate 		if (mdb_vread(&clh, sizeof (clh), clhp) == -1) {
21630Sstevel@tonic-gate 			mdb_warn("cannot read cpu %d's log header at %p",
21640Sstevel@tonic-gate 			    i, clhp);
21650Sstevel@tonic-gate 			return (DCMD_ERR);
21660Sstevel@tonic-gate 		}
21670Sstevel@tonic-gate 
21680Sstevel@tonic-gate 		umc[i].umc_low = clh.clh_chunk * lh.lh_chunksize +
21690Sstevel@tonic-gate 		    (uintptr_t)lh.lh_base;
21700Sstevel@tonic-gate 		umc[i].umc_high = (uintptr_t)clh.clh_current;
21710Sstevel@tonic-gate 
21720Sstevel@tonic-gate 		clhp += sizeof (umem_cpu_log_header_t);
21730Sstevel@tonic-gate 	}
21740Sstevel@tonic-gate 
21750Sstevel@tonic-gate 	if (DCMD_HDRSPEC(flags)) {
21760Sstevel@tonic-gate 		mdb_printf("%3s %-?s %-?s %16s %-?s\n", "CPU", "ADDR",
21770Sstevel@tonic-gate 		    "BUFADDR", "TIMESTAMP", "THREAD");
21780Sstevel@tonic-gate 	}
21790Sstevel@tonic-gate 
21800Sstevel@tonic-gate 	/*
21810Sstevel@tonic-gate 	 * If we have been passed an address, we'll just print out that
21820Sstevel@tonic-gate 	 * log entry.
21830Sstevel@tonic-gate 	 */
21840Sstevel@tonic-gate 	if (flags & DCMD_ADDRSPEC) {
21850Sstevel@tonic-gate 		umem_bufctl_audit_t *bp;
21860Sstevel@tonic-gate 		UMEM_LOCAL_BUFCTL_AUDIT(&bp);
21870Sstevel@tonic-gate 
21880Sstevel@tonic-gate 		if (mdb_vread(bp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) {
21890Sstevel@tonic-gate 			mdb_warn("failed to read bufctl at %p", addr);
21900Sstevel@tonic-gate 			return (DCMD_ERR);
21910Sstevel@tonic-gate 		}
21920Sstevel@tonic-gate 
21930Sstevel@tonic-gate 		(void) umem_log_walk(addr, bp, umc);
21940Sstevel@tonic-gate 
21950Sstevel@tonic-gate 		return (DCMD_OK);
21960Sstevel@tonic-gate 	}
21970Sstevel@tonic-gate 
21980Sstevel@tonic-gate 	if (mdb_walk("umem_log", (mdb_walk_cb_t)umem_log_walk, umc) == -1) {
21990Sstevel@tonic-gate 		mdb_warn("can't find umem log walker");
22000Sstevel@tonic-gate 		return (DCMD_ERR);
22010Sstevel@tonic-gate 	}
22020Sstevel@tonic-gate 
22030Sstevel@tonic-gate 	return (DCMD_OK);
22040Sstevel@tonic-gate }
22050Sstevel@tonic-gate 
22060Sstevel@tonic-gate typedef struct bufctl_history_cb {
22070Sstevel@tonic-gate 	int		bhc_flags;
22080Sstevel@tonic-gate 	int		bhc_argc;
22090Sstevel@tonic-gate 	const mdb_arg_t	*bhc_argv;
22100Sstevel@tonic-gate 	int		bhc_ret;
22110Sstevel@tonic-gate } bufctl_history_cb_t;
22120Sstevel@tonic-gate 
22130Sstevel@tonic-gate /*ARGSUSED*/
22140Sstevel@tonic-gate static int
22150Sstevel@tonic-gate bufctl_history_callback(uintptr_t addr, const void *ign, void *arg)
22160Sstevel@tonic-gate {
22170Sstevel@tonic-gate 	bufctl_history_cb_t *bhc = arg;
22180Sstevel@tonic-gate 
22190Sstevel@tonic-gate 	bhc->bhc_ret =
22200Sstevel@tonic-gate 	    bufctl(addr, bhc->bhc_flags, bhc->bhc_argc, bhc->bhc_argv);
22210Sstevel@tonic-gate 
22220Sstevel@tonic-gate 	bhc->bhc_flags &= ~DCMD_LOOPFIRST;
22230Sstevel@tonic-gate 
22240Sstevel@tonic-gate 	return ((bhc->bhc_ret == DCMD_OK)? WALK_NEXT : WALK_DONE);
22250Sstevel@tonic-gate }
22260Sstevel@tonic-gate 
22270Sstevel@tonic-gate void
22280Sstevel@tonic-gate bufctl_help(void)
22290Sstevel@tonic-gate {
22300Sstevel@tonic-gate 	mdb_printf("%s\n",
22310Sstevel@tonic-gate "Display the contents of umem_bufctl_audit_ts, with optional filtering.\n");
22320Sstevel@tonic-gate 	mdb_dec_indent(2);
22330Sstevel@tonic-gate 	mdb_printf("%<b>OPTIONS%</b>\n");
22340Sstevel@tonic-gate 	mdb_inc_indent(2);
22350Sstevel@tonic-gate 	mdb_printf("%s",
22360Sstevel@tonic-gate "  -v    Display the full content of the bufctl, including its stack trace\n"
22370Sstevel@tonic-gate "  -h    retrieve the bufctl's transaction history, if available\n"
22380Sstevel@tonic-gate "  -a addr\n"
22390Sstevel@tonic-gate "        filter out bufctls not involving the buffer at addr\n"
22400Sstevel@tonic-gate "  -c caller\n"
22410Sstevel@tonic-gate "        filter out bufctls without the function/PC in their stack trace\n"
22420Sstevel@tonic-gate "  -e earliest\n"
22430Sstevel@tonic-gate "        filter out bufctls timestamped before earliest\n"
22440Sstevel@tonic-gate "  -l latest\n"
22450Sstevel@tonic-gate "        filter out bufctls timestamped after latest\n"
22460Sstevel@tonic-gate "  -t thread\n"
22470Sstevel@tonic-gate "        filter out bufctls not involving thread\n");
22480Sstevel@tonic-gate }
22490Sstevel@tonic-gate 
22500Sstevel@tonic-gate int
22510Sstevel@tonic-gate bufctl(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
22520Sstevel@tonic-gate {
22530Sstevel@tonic-gate 	uint_t verbose = FALSE;
22540Sstevel@tonic-gate 	uint_t history = FALSE;
22550Sstevel@tonic-gate 	uint_t in_history = FALSE;
22560Sstevel@tonic-gate 	uintptr_t caller = NULL, thread = NULL;
22570Sstevel@tonic-gate 	uintptr_t laddr, haddr, baddr = NULL;
22580Sstevel@tonic-gate 	hrtime_t earliest = 0, latest = 0;
22590Sstevel@tonic-gate 	int i, depth;
22600Sstevel@tonic-gate 	char c[MDB_SYM_NAMLEN];
22610Sstevel@tonic-gate 	GElf_Sym sym;
22620Sstevel@tonic-gate 	umem_bufctl_audit_t *bcp;
22630Sstevel@tonic-gate 	UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
22640Sstevel@tonic-gate 
22650Sstevel@tonic-gate 	if (mdb_getopts(argc, argv,
22660Sstevel@tonic-gate 	    'v', MDB_OPT_SETBITS, TRUE, &verbose,
22670Sstevel@tonic-gate 	    'h', MDB_OPT_SETBITS, TRUE, &history,
22680Sstevel@tonic-gate 	    'H', MDB_OPT_SETBITS, TRUE, &in_history,		/* internal */
22690Sstevel@tonic-gate 	    'c', MDB_OPT_UINTPTR, &caller,
22700Sstevel@tonic-gate 	    't', MDB_OPT_UINTPTR, &thread,
22710Sstevel@tonic-gate 	    'e', MDB_OPT_UINT64, &earliest,
22720Sstevel@tonic-gate 	    'l', MDB_OPT_UINT64, &latest,
22730Sstevel@tonic-gate 	    'a', MDB_OPT_UINTPTR, &baddr, NULL) != argc)
22740Sstevel@tonic-gate 		return (DCMD_USAGE);
22750Sstevel@tonic-gate 
22760Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC))
22770Sstevel@tonic-gate 		return (DCMD_USAGE);
22780Sstevel@tonic-gate 
22790Sstevel@tonic-gate 	if (in_history && !history)
22800Sstevel@tonic-gate 		return (DCMD_USAGE);
22810Sstevel@tonic-gate 
22820Sstevel@tonic-gate 	if (history && !in_history) {
22830Sstevel@tonic-gate 		mdb_arg_t *nargv = mdb_zalloc(sizeof (*nargv) * (argc + 1),
22840Sstevel@tonic-gate 		    UM_SLEEP | UM_GC);
22850Sstevel@tonic-gate 		bufctl_history_cb_t bhc;
22860Sstevel@tonic-gate 
22870Sstevel@tonic-gate 		nargv[0].a_type = MDB_TYPE_STRING;
22880Sstevel@tonic-gate 		nargv[0].a_un.a_str = "-H";		/* prevent recursion */
22890Sstevel@tonic-gate 
22900Sstevel@tonic-gate 		for (i = 0; i < argc; i++)
22910Sstevel@tonic-gate 			nargv[i + 1] = argv[i];
22920Sstevel@tonic-gate 
22930Sstevel@tonic-gate 		/*
22940Sstevel@tonic-gate 		 * When in history mode, we treat each element as if it
22950Sstevel@tonic-gate 		 * were in a seperate loop, so that the headers group
22960Sstevel@tonic-gate 		 * bufctls with similar histories.
22970Sstevel@tonic-gate 		 */
22980Sstevel@tonic-gate 		bhc.bhc_flags = flags | DCMD_LOOP | DCMD_LOOPFIRST;
22990Sstevel@tonic-gate 		bhc.bhc_argc = argc + 1;
23000Sstevel@tonic-gate 		bhc.bhc_argv = nargv;
23010Sstevel@tonic-gate 		bhc.bhc_ret = DCMD_OK;
23020Sstevel@tonic-gate 
23030Sstevel@tonic-gate 		if (mdb_pwalk("bufctl_history", bufctl_history_callback, &bhc,
23040Sstevel@tonic-gate 		    addr) == -1) {
23050Sstevel@tonic-gate 			mdb_warn("unable to walk bufctl_history");
23060Sstevel@tonic-gate 			return (DCMD_ERR);
23070Sstevel@tonic-gate 		}
23080Sstevel@tonic-gate 
23090Sstevel@tonic-gate 		if (bhc.bhc_ret == DCMD_OK && !(flags & DCMD_PIPE_OUT))
23100Sstevel@tonic-gate 			mdb_printf("\n");
23110Sstevel@tonic-gate 
23120Sstevel@tonic-gate 		return (bhc.bhc_ret);
23130Sstevel@tonic-gate 	}
23140Sstevel@tonic-gate 
23150Sstevel@tonic-gate 	if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) {
23160Sstevel@tonic-gate 		if (verbose) {
23170Sstevel@tonic-gate 			mdb_printf("%16s %16s %16s %16s\n"
23180Sstevel@tonic-gate 			    "%<u>%16s %16s %16s %16s%</u>\n",
23190Sstevel@tonic-gate 			    "ADDR", "BUFADDR", "TIMESTAMP", "THREAD",
23200Sstevel@tonic-gate 			    "", "CACHE", "LASTLOG", "CONTENTS");
23210Sstevel@tonic-gate 		} else {
23220Sstevel@tonic-gate 			mdb_printf("%<u>%-?s %-?s %-12s %5s %s%</u>\n",
23230Sstevel@tonic-gate 			    "ADDR", "BUFADDR", "TIMESTAMP", "THRD", "CALLER");
23240Sstevel@tonic-gate 		}
23250Sstevel@tonic-gate 	}
23260Sstevel@tonic-gate 
23270Sstevel@tonic-gate 	if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) {
23280Sstevel@tonic-gate 		mdb_warn("couldn't read bufctl at %p", addr);
23290Sstevel@tonic-gate 		return (DCMD_ERR);
23300Sstevel@tonic-gate 	}
23310Sstevel@tonic-gate 
23320Sstevel@tonic-gate 	/*
23330Sstevel@tonic-gate 	 * Guard against bogus bc_depth in case the bufctl is corrupt or
23340Sstevel@tonic-gate 	 * the address does not really refer to a bufctl.
23350Sstevel@tonic-gate 	 */
23360Sstevel@tonic-gate 	depth = MIN(bcp->bc_depth, umem_stack_depth);
23370Sstevel@tonic-gate 
23380Sstevel@tonic-gate 	if (caller != NULL) {
23390Sstevel@tonic-gate 		laddr = caller;
23400Sstevel@tonic-gate 		haddr = caller + sizeof (caller);
23410Sstevel@tonic-gate 
23420Sstevel@tonic-gate 		if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, sizeof (c),
23430Sstevel@tonic-gate 		    &sym) != -1 && caller == (uintptr_t)sym.st_value) {
23440Sstevel@tonic-gate 			/*
23450Sstevel@tonic-gate 			 * We were provided an exact symbol value; any
23460Sstevel@tonic-gate 			 * address in the function is valid.
23470Sstevel@tonic-gate 			 */
23480Sstevel@tonic-gate 			laddr = (uintptr_t)sym.st_value;
23490Sstevel@tonic-gate 			haddr = (uintptr_t)sym.st_value + sym.st_size;
23500Sstevel@tonic-gate 		}
23510Sstevel@tonic-gate 
23520Sstevel@tonic-gate 		for (i = 0; i < depth; i++)
23530Sstevel@tonic-gate 			if (bcp->bc_stack[i] >= laddr &&
23540Sstevel@tonic-gate 			    bcp->bc_stack[i] < haddr)
23550Sstevel@tonic-gate 				break;
23560Sstevel@tonic-gate 
23570Sstevel@tonic-gate 		if (i == depth)
23580Sstevel@tonic-gate 			return (DCMD_OK);
23590Sstevel@tonic-gate 	}
23600Sstevel@tonic-gate 
23610Sstevel@tonic-gate 	if (thread != NULL && (uintptr_t)bcp->bc_thread != thread)
23620Sstevel@tonic-gate 		return (DCMD_OK);
23630Sstevel@tonic-gate 
23640Sstevel@tonic-gate 	if (earliest != 0 && bcp->bc_timestamp < earliest)
23650Sstevel@tonic-gate 		return (DCMD_OK);
23660Sstevel@tonic-gate 
23670Sstevel@tonic-gate 	if (latest != 0 && bcp->bc_timestamp > latest)
23680Sstevel@tonic-gate 		return (DCMD_OK);
23690Sstevel@tonic-gate 
23700Sstevel@tonic-gate 	if (baddr != 0 && (uintptr_t)bcp->bc_addr != baddr)
23710Sstevel@tonic-gate 		return (DCMD_OK);
23720Sstevel@tonic-gate 
23730Sstevel@tonic-gate 	if (flags & DCMD_PIPE_OUT) {
23740Sstevel@tonic-gate 		mdb_printf("%#r\n", addr);
23750Sstevel@tonic-gate 		return (DCMD_OK);
23760Sstevel@tonic-gate 	}
23770Sstevel@tonic-gate 
23780Sstevel@tonic-gate 	if (verbose) {
23790Sstevel@tonic-gate 		mdb_printf(
23800Sstevel@tonic-gate 		    "%<b>%16p%</b> %16p %16llx %16d\n"
23810Sstevel@tonic-gate 		    "%16s %16p %16p %16p\n",
23820Sstevel@tonic-gate 		    addr, bcp->bc_addr, bcp->bc_timestamp, bcp->bc_thread,
23830Sstevel@tonic-gate 		    "", bcp->bc_cache, bcp->bc_lastlog, bcp->bc_contents);
23840Sstevel@tonic-gate 
23850Sstevel@tonic-gate 		mdb_inc_indent(17);
23860Sstevel@tonic-gate 		for (i = 0; i < depth; i++)
23870Sstevel@tonic-gate 			mdb_printf("%a\n", bcp->bc_stack[i]);
23880Sstevel@tonic-gate 		mdb_dec_indent(17);
23890Sstevel@tonic-gate 		mdb_printf("\n");
23900Sstevel@tonic-gate 	} else {
23910Sstevel@tonic-gate 		mdb_printf("%0?p %0?p %12llx %5d", addr, bcp->bc_addr,
23920Sstevel@tonic-gate 		    bcp->bc_timestamp, bcp->bc_thread);
23930Sstevel@tonic-gate 
23940Sstevel@tonic-gate 		for (i = 0; i < depth; i++) {
23950Sstevel@tonic-gate 			if (mdb_lookup_by_addr(bcp->bc_stack[i],
23960Sstevel@tonic-gate 			    MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1)
23970Sstevel@tonic-gate 				continue;
23980Sstevel@tonic-gate 			if (is_umem_sym(c, "umem_"))
23990Sstevel@tonic-gate 				continue;
24000Sstevel@tonic-gate 			mdb_printf(" %a\n", bcp->bc_stack[i]);
24010Sstevel@tonic-gate 			break;
24020Sstevel@tonic-gate 		}
24030Sstevel@tonic-gate 
24040Sstevel@tonic-gate 		if (i >= depth)
24050Sstevel@tonic-gate 			mdb_printf("\n");
24060Sstevel@tonic-gate 	}
24070Sstevel@tonic-gate 
24080Sstevel@tonic-gate 	return (DCMD_OK);
24090Sstevel@tonic-gate }
24100Sstevel@tonic-gate 
24110Sstevel@tonic-gate /*ARGSUSED*/
24120Sstevel@tonic-gate int
24130Sstevel@tonic-gate bufctl_audit(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
24140Sstevel@tonic-gate {
24150Sstevel@tonic-gate 	mdb_arg_t a;
24160Sstevel@tonic-gate 
24170Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC))
24180Sstevel@tonic-gate 		return (DCMD_USAGE);
24190Sstevel@tonic-gate 
24200Sstevel@tonic-gate 	if (argc != 0)
24210Sstevel@tonic-gate 		return (DCMD_USAGE);
24220Sstevel@tonic-gate 
24230Sstevel@tonic-gate 	a.a_type = MDB_TYPE_STRING;
24240Sstevel@tonic-gate 	a.a_un.a_str = "-v";
24250Sstevel@tonic-gate 
24260Sstevel@tonic-gate 	return (bufctl(addr, flags, 1, &a));
24270Sstevel@tonic-gate }
24280Sstevel@tonic-gate 
24290Sstevel@tonic-gate typedef struct umem_verify {
24300Sstevel@tonic-gate 	uint64_t *umv_buf;		/* buffer to read cache contents into */
24310Sstevel@tonic-gate 	size_t umv_size;		/* number of bytes in umv_buf */
24320Sstevel@tonic-gate 	int umv_corruption;		/* > 0 if corruption found. */
24330Sstevel@tonic-gate 	int umv_besilent;		/* report actual corruption sites */
24340Sstevel@tonic-gate 	struct umem_cache umv_cache;	/* the cache we're operating on */
24350Sstevel@tonic-gate } umem_verify_t;
24360Sstevel@tonic-gate 
24370Sstevel@tonic-gate /*
24380Sstevel@tonic-gate  * verify_pattern()
24390Sstevel@tonic-gate  *	verify that buf is filled with the pattern pat.
24400Sstevel@tonic-gate  */
24410Sstevel@tonic-gate static int64_t
24420Sstevel@tonic-gate verify_pattern(uint64_t *buf_arg, size_t size, uint64_t pat)
24430Sstevel@tonic-gate {
24440Sstevel@tonic-gate 	/*LINTED*/
24450Sstevel@tonic-gate 	uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
24460Sstevel@tonic-gate 	uint64_t *buf;
24470Sstevel@tonic-gate 
24480Sstevel@tonic-gate 	for (buf = buf_arg; buf < bufend; buf++)
24490Sstevel@tonic-gate 		if (*buf != pat)
24500Sstevel@tonic-gate 			return ((uintptr_t)buf - (uintptr_t)buf_arg);
24510Sstevel@tonic-gate 	return (-1);
24520Sstevel@tonic-gate }
24530Sstevel@tonic-gate 
24540Sstevel@tonic-gate /*
24550Sstevel@tonic-gate  * verify_buftag()
24560Sstevel@tonic-gate  *	verify that btp->bt_bxstat == (bcp ^ pat)
24570Sstevel@tonic-gate  */
24580Sstevel@tonic-gate static int
24590Sstevel@tonic-gate verify_buftag(umem_buftag_t *btp, uintptr_t pat)
24600Sstevel@tonic-gate {
24610Sstevel@tonic-gate 	return (btp->bt_bxstat == ((intptr_t)btp->bt_bufctl ^ pat) ? 0 : -1);
24620Sstevel@tonic-gate }
24630Sstevel@tonic-gate 
24640Sstevel@tonic-gate /*
24650Sstevel@tonic-gate  * verify_free()
24660Sstevel@tonic-gate  *	verify the integrity of a free block of memory by checking
24670Sstevel@tonic-gate  *	that it is filled with 0xdeadbeef and that its buftag is sane.
24680Sstevel@tonic-gate  */
24690Sstevel@tonic-gate /*ARGSUSED1*/
24700Sstevel@tonic-gate static int
24710Sstevel@tonic-gate verify_free(uintptr_t addr, const void *data, void *private)
24720Sstevel@tonic-gate {
24730Sstevel@tonic-gate 	umem_verify_t *umv = (umem_verify_t *)private;
24740Sstevel@tonic-gate 	uint64_t *buf = umv->umv_buf;	/* buf to validate */
24750Sstevel@tonic-gate 	int64_t corrupt;		/* corruption offset */
24760Sstevel@tonic-gate 	umem_buftag_t *buftagp;		/* ptr to buftag */
24770Sstevel@tonic-gate 	umem_cache_t *cp = &umv->umv_cache;
24780Sstevel@tonic-gate 	int besilent = umv->umv_besilent;
24790Sstevel@tonic-gate 
24800Sstevel@tonic-gate 	/*LINTED*/
24810Sstevel@tonic-gate 	buftagp = UMEM_BUFTAG(cp, buf);
24820Sstevel@tonic-gate 
24830Sstevel@tonic-gate 	/*
24840Sstevel@tonic-gate 	 * Read the buffer to check.
24850Sstevel@tonic-gate 	 */
24860Sstevel@tonic-gate 	if (mdb_vread(buf, umv->umv_size, addr) == -1) {
24870Sstevel@tonic-gate 		if (!besilent)
24880Sstevel@tonic-gate 			mdb_warn("couldn't read %p", addr);
24890Sstevel@tonic-gate 		return (WALK_NEXT);
24900Sstevel@tonic-gate 	}
24910Sstevel@tonic-gate 
24920Sstevel@tonic-gate 	if ((corrupt = verify_pattern(buf, cp->cache_verify,
24930Sstevel@tonic-gate 	    UMEM_FREE_PATTERN)) >= 0) {
24940Sstevel@tonic-gate 		if (!besilent)
24950Sstevel@tonic-gate 			mdb_printf("buffer %p (free) seems corrupted, at %p\n",
24960Sstevel@tonic-gate 			    addr, (uintptr_t)addr + corrupt);
24970Sstevel@tonic-gate 		goto corrupt;
24980Sstevel@tonic-gate 	}
24990Sstevel@tonic-gate 
25000Sstevel@tonic-gate 	if ((cp->cache_flags & UMF_HASH) &&
25010Sstevel@tonic-gate 	    buftagp->bt_redzone != UMEM_REDZONE_PATTERN) {
25020Sstevel@tonic-gate 		if (!besilent)
25030Sstevel@tonic-gate 			mdb_printf("buffer %p (free) seems to "
25040Sstevel@tonic-gate 			    "have a corrupt redzone pattern\n", addr);
25050Sstevel@tonic-gate 		goto corrupt;
25060Sstevel@tonic-gate 	}
25070Sstevel@tonic-gate 
25080Sstevel@tonic-gate 	/*
25090Sstevel@tonic-gate 	 * confirm bufctl pointer integrity.
25100Sstevel@tonic-gate 	 */
25110Sstevel@tonic-gate 	if (verify_buftag(buftagp, UMEM_BUFTAG_FREE) == -1) {
25120Sstevel@tonic-gate 		if (!besilent)
25130Sstevel@tonic-gate 			mdb_printf("buffer %p (free) has a corrupt "
25140Sstevel@tonic-gate 			    "buftag\n", addr);
25150Sstevel@tonic-gate 		goto corrupt;
25160Sstevel@tonic-gate 	}
25170Sstevel@tonic-gate 
25180Sstevel@tonic-gate 	return (WALK_NEXT);
25190Sstevel@tonic-gate corrupt:
25200Sstevel@tonic-gate 	umv->umv_corruption++;
25210Sstevel@tonic-gate 	return (WALK_NEXT);
25220Sstevel@tonic-gate }
25230Sstevel@tonic-gate 
25240Sstevel@tonic-gate /*
25250Sstevel@tonic-gate  * verify_alloc()
25260Sstevel@tonic-gate  *	Verify that the buftag of an allocated buffer makes sense with respect
25270Sstevel@tonic-gate  *	to the buffer.
25280Sstevel@tonic-gate  */
25290Sstevel@tonic-gate /*ARGSUSED1*/
25300Sstevel@tonic-gate static int
25310Sstevel@tonic-gate verify_alloc(uintptr_t addr, const void *data, void *private)
25320Sstevel@tonic-gate {
25330Sstevel@tonic-gate 	umem_verify_t *umv = (umem_verify_t *)private;
25340Sstevel@tonic-gate 	umem_cache_t *cp = &umv->umv_cache;
25350Sstevel@tonic-gate 	uint64_t *buf = umv->umv_buf;	/* buf to validate */
25360Sstevel@tonic-gate 	/*LINTED*/
25370Sstevel@tonic-gate 	umem_buftag_t *buftagp = UMEM_BUFTAG(cp, buf);
25380Sstevel@tonic-gate 	uint32_t *ip = (uint32_t *)buftagp;
25390Sstevel@tonic-gate 	uint8_t *bp = (uint8_t *)buf;
25400Sstevel@tonic-gate 	int looks_ok = 0, size_ok = 1;	/* flags for finding corruption */
25410Sstevel@tonic-gate 	int besilent = umv->umv_besilent;
25420Sstevel@tonic-gate 
25430Sstevel@tonic-gate 	/*
25440Sstevel@tonic-gate 	 * Read the buffer to check.
25450Sstevel@tonic-gate 	 */
25460Sstevel@tonic-gate 	if (mdb_vread(buf, umv->umv_size, addr) == -1) {
25470Sstevel@tonic-gate 		if (!besilent)
25480Sstevel@tonic-gate 			mdb_warn("couldn't read %p", addr);
25490Sstevel@tonic-gate 		return (WALK_NEXT);
25500Sstevel@tonic-gate 	}
25510Sstevel@tonic-gate 
25520Sstevel@tonic-gate 	/*
25530Sstevel@tonic-gate 	 * There are two cases to handle:
25540Sstevel@tonic-gate 	 * 1. If the buf was alloc'd using umem_cache_alloc, it will have
25550Sstevel@tonic-gate 	 *    0xfeedfacefeedface at the end of it
25560Sstevel@tonic-gate 	 * 2. If the buf was alloc'd using umem_alloc, it will have
25570Sstevel@tonic-gate 	 *    0xbb just past the end of the region in use.  At the buftag,
25580Sstevel@tonic-gate 	 *    it will have 0xfeedface (or, if the whole buffer is in use,
25590Sstevel@tonic-gate 	 *    0xfeedface & bb000000 or 0xfeedfacf & 000000bb depending on
25600Sstevel@tonic-gate 	 *    endianness), followed by 32 bits containing the offset of the
25610Sstevel@tonic-gate 	 *    0xbb byte in the buffer.
25620Sstevel@tonic-gate 	 *
25630Sstevel@tonic-gate 	 * Finally, the two 32-bit words that comprise the second half of the
25640Sstevel@tonic-gate 	 * buftag should xor to UMEM_BUFTAG_ALLOC
25650Sstevel@tonic-gate 	 */
25660Sstevel@tonic-gate 
25670Sstevel@tonic-gate 	if (buftagp->bt_redzone == UMEM_REDZONE_PATTERN)
25680Sstevel@tonic-gate 		looks_ok = 1;
25690Sstevel@tonic-gate 	else if (!UMEM_SIZE_VALID(ip[1]))
25700Sstevel@tonic-gate 		size_ok = 0;
25710Sstevel@tonic-gate 	else if (bp[UMEM_SIZE_DECODE(ip[1])] == UMEM_REDZONE_BYTE)
25720Sstevel@tonic-gate 		looks_ok = 1;
25730Sstevel@tonic-gate 	else
25740Sstevel@tonic-gate 		size_ok = 0;
25750Sstevel@tonic-gate 
25760Sstevel@tonic-gate 	if (!size_ok) {
25770Sstevel@tonic-gate 		if (!besilent)
25780Sstevel@tonic-gate 			mdb_printf("buffer %p (allocated) has a corrupt "
25790Sstevel@tonic-gate 			    "redzone size encoding\n", addr);
25800Sstevel@tonic-gate 		goto corrupt;
25810Sstevel@tonic-gate 	}
25820Sstevel@tonic-gate 
25830Sstevel@tonic-gate 	if (!looks_ok) {
25840Sstevel@tonic-gate 		if (!besilent)
25850Sstevel@tonic-gate 			mdb_printf("buffer %p (allocated) has a corrupt "
25860Sstevel@tonic-gate 			    "redzone signature\n", addr);
25870Sstevel@tonic-gate 		goto corrupt;
25880Sstevel@tonic-gate 	}
25890Sstevel@tonic-gate 
25900Sstevel@tonic-gate 	if (verify_buftag(buftagp, UMEM_BUFTAG_ALLOC) == -1) {
25910Sstevel@tonic-gate 		if (!besilent)
25920Sstevel@tonic-gate 			mdb_printf("buffer %p (allocated) has a "
25930Sstevel@tonic-gate 			    "corrupt buftag\n", addr);
25940Sstevel@tonic-gate 		goto corrupt;
25950Sstevel@tonic-gate 	}
25960Sstevel@tonic-gate 
25970Sstevel@tonic-gate 	return (WALK_NEXT);
25980Sstevel@tonic-gate corrupt:
25990Sstevel@tonic-gate 	umv->umv_corruption++;
26000Sstevel@tonic-gate 	return (WALK_NEXT);
26010Sstevel@tonic-gate }
26020Sstevel@tonic-gate 
26030Sstevel@tonic-gate /*ARGSUSED2*/
26040Sstevel@tonic-gate int
26050Sstevel@tonic-gate umem_verify(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
26060Sstevel@tonic-gate {
26070Sstevel@tonic-gate 	if (flags & DCMD_ADDRSPEC) {
26080Sstevel@tonic-gate 		int check_alloc = 0, check_free = 0;
26090Sstevel@tonic-gate 		umem_verify_t umv;
26100Sstevel@tonic-gate 
26110Sstevel@tonic-gate 		if (mdb_vread(&umv.umv_cache, sizeof (umv.umv_cache),
26120Sstevel@tonic-gate 		    addr) == -1) {
26130Sstevel@tonic-gate 			mdb_warn("couldn't read umem_cache %p", addr);
26140Sstevel@tonic-gate 			return (DCMD_ERR);
26150Sstevel@tonic-gate 		}
26160Sstevel@tonic-gate 
26170Sstevel@tonic-gate 		umv.umv_size = umv.umv_cache.cache_buftag +
26180Sstevel@tonic-gate 		    sizeof (umem_buftag_t);
26190Sstevel@tonic-gate 		umv.umv_buf = mdb_alloc(umv.umv_size, UM_SLEEP | UM_GC);
26200Sstevel@tonic-gate 		umv.umv_corruption = 0;
26210Sstevel@tonic-gate 
26220Sstevel@tonic-gate 		if ((umv.umv_cache.cache_flags & UMF_REDZONE)) {
26230Sstevel@tonic-gate 			check_alloc = 1;
26240Sstevel@tonic-gate 			if (umv.umv_cache.cache_flags & UMF_DEADBEEF)
26250Sstevel@tonic-gate 				check_free = 1;
26260Sstevel@tonic-gate 		} else {
26270Sstevel@tonic-gate 			if (!(flags & DCMD_LOOP)) {
26280Sstevel@tonic-gate 				mdb_warn("cache %p (%s) does not have "
26290Sstevel@tonic-gate 				    "redzone checking enabled\n", addr,
26300Sstevel@tonic-gate 				    umv.umv_cache.cache_name);
26310Sstevel@tonic-gate 			}
26320Sstevel@tonic-gate 			return (DCMD_ERR);
26330Sstevel@tonic-gate 		}
26340Sstevel@tonic-gate 
26350Sstevel@tonic-gate 		if (flags & DCMD_LOOP) {
26360Sstevel@tonic-gate 			/*
26370Sstevel@tonic-gate 			 * table mode, don't print out every corrupt buffer
26380Sstevel@tonic-gate 			 */
26390Sstevel@tonic-gate 			umv.umv_besilent = 1;
26400Sstevel@tonic-gate 		} else {
26410Sstevel@tonic-gate 			mdb_printf("Summary for cache '%s'\n",
26420Sstevel@tonic-gate 			    umv.umv_cache.cache_name);
26430Sstevel@tonic-gate 			mdb_inc_indent(2);
26440Sstevel@tonic-gate 			umv.umv_besilent = 0;
26450Sstevel@tonic-gate 		}
26460Sstevel@tonic-gate 
26470Sstevel@tonic-gate 		if (check_alloc)
26480Sstevel@tonic-gate 			(void) mdb_pwalk("umem", verify_alloc, &umv, addr);
26490Sstevel@tonic-gate 		if (check_free)
26500Sstevel@tonic-gate 			(void) mdb_pwalk("freemem", verify_free, &umv, addr);
26510Sstevel@tonic-gate 
26520Sstevel@tonic-gate 		if (flags & DCMD_LOOP) {
26530Sstevel@tonic-gate 			if (umv.umv_corruption == 0) {
26540Sstevel@tonic-gate 				mdb_printf("%-*s %?p clean\n",
26550Sstevel@tonic-gate 				    UMEM_CACHE_NAMELEN,
26560Sstevel@tonic-gate 				    umv.umv_cache.cache_name, addr);
26570Sstevel@tonic-gate 			} else {
26580Sstevel@tonic-gate 				char *s = "";	/* optional s in "buffer[s]" */
26590Sstevel@tonic-gate 				if (umv.umv_corruption > 1)
26600Sstevel@tonic-gate 					s = "s";
26610Sstevel@tonic-gate 
26620Sstevel@tonic-gate 				mdb_printf("%-*s %?p %d corrupt buffer%s\n",
26630Sstevel@tonic-gate 				    UMEM_CACHE_NAMELEN,
26640Sstevel@tonic-gate 				    umv.umv_cache.cache_name, addr,
26650Sstevel@tonic-gate 				    umv.umv_corruption, s);
26660Sstevel@tonic-gate 			}
26670Sstevel@tonic-gate 		} else {
26680Sstevel@tonic-gate 			/*
26690Sstevel@tonic-gate 			 * This is the more verbose mode, when the user has
26700Sstevel@tonic-gate 			 * type addr::umem_verify.  If the cache was clean,
26710Sstevel@tonic-gate 			 * nothing will have yet been printed. So say something.
26720Sstevel@tonic-gate 			 */
26730Sstevel@tonic-gate 			if (umv.umv_corruption == 0)
26740Sstevel@tonic-gate 				mdb_printf("clean\n");
26750Sstevel@tonic-gate 
26760Sstevel@tonic-gate 			mdb_dec_indent(2);
26770Sstevel@tonic-gate 		}
26780Sstevel@tonic-gate 	} else {
26790Sstevel@tonic-gate 		/*
26800Sstevel@tonic-gate 		 * If the user didn't specify a cache to verify, we'll walk all
26810Sstevel@tonic-gate 		 * umem_cache's, specifying ourself as a callback for each...
26820Sstevel@tonic-gate 		 * this is the equivalent of '::walk umem_cache .::umem_verify'
26830Sstevel@tonic-gate 		 */
26840Sstevel@tonic-gate 		mdb_printf("%<u>%-*s %-?s %-20s%</b>\n", UMEM_CACHE_NAMELEN,
26850Sstevel@tonic-gate 		    "Cache Name", "Addr", "Cache Integrity");
26860Sstevel@tonic-gate 		(void) (mdb_walk_dcmd("umem_cache", "umem_verify", 0, NULL));
26870Sstevel@tonic-gate 	}
26880Sstevel@tonic-gate 
26890Sstevel@tonic-gate 	return (DCMD_OK);
26900Sstevel@tonic-gate }
26910Sstevel@tonic-gate 
26920Sstevel@tonic-gate typedef struct vmem_node {
26930Sstevel@tonic-gate 	struct vmem_node *vn_next;
26940Sstevel@tonic-gate 	struct vmem_node *vn_parent;
26950Sstevel@tonic-gate 	struct vmem_node *vn_sibling;
26960Sstevel@tonic-gate 	struct vmem_node *vn_children;
26970Sstevel@tonic-gate 	uintptr_t vn_addr;
26980Sstevel@tonic-gate 	int vn_marked;
26990Sstevel@tonic-gate 	vmem_t vn_vmem;
27000Sstevel@tonic-gate } vmem_node_t;
27010Sstevel@tonic-gate 
27020Sstevel@tonic-gate typedef struct vmem_walk {
27030Sstevel@tonic-gate 	vmem_node_t *vw_root;
27040Sstevel@tonic-gate 	vmem_node_t *vw_current;
27050Sstevel@tonic-gate } vmem_walk_t;
27060Sstevel@tonic-gate 
27070Sstevel@tonic-gate int
27080Sstevel@tonic-gate vmem_walk_init(mdb_walk_state_t *wsp)
27090Sstevel@tonic-gate {
27100Sstevel@tonic-gate 	uintptr_t vaddr, paddr;
27110Sstevel@tonic-gate 	vmem_node_t *head = NULL, *root = NULL, *current = NULL, *parent, *vp;
27120Sstevel@tonic-gate 	vmem_walk_t *vw;
27130Sstevel@tonic-gate 
27140Sstevel@tonic-gate 	if (umem_readvar(&vaddr, "vmem_list") == -1) {
27150Sstevel@tonic-gate 		mdb_warn("couldn't read 'vmem_list'");
27160Sstevel@tonic-gate 		return (WALK_ERR);
27170Sstevel@tonic-gate 	}
27180Sstevel@tonic-gate 
27190Sstevel@tonic-gate 	while (vaddr != NULL) {
27200Sstevel@tonic-gate 		vp = mdb_zalloc(sizeof (vmem_node_t), UM_SLEEP);
27210Sstevel@tonic-gate 		vp->vn_addr = vaddr;
27220Sstevel@tonic-gate 		vp->vn_next = head;
27230Sstevel@tonic-gate 		head = vp;
27240Sstevel@tonic-gate 
27250Sstevel@tonic-gate 		if (vaddr == wsp->walk_addr)
27260Sstevel@tonic-gate 			current = vp;
27270Sstevel@tonic-gate 
27280Sstevel@tonic-gate 		if (mdb_vread(&vp->vn_vmem, sizeof (vmem_t), vaddr) == -1) {
27290Sstevel@tonic-gate 			mdb_warn("couldn't read vmem_t at %p", vaddr);
27300Sstevel@tonic-gate 			goto err;
27310Sstevel@tonic-gate 		}
27320Sstevel@tonic-gate 
27330Sstevel@tonic-gate 		vaddr = (uintptr_t)vp->vn_vmem.vm_next;
27340Sstevel@tonic-gate 	}
27350Sstevel@tonic-gate 
27360Sstevel@tonic-gate 	for (vp = head; vp != NULL; vp = vp->vn_next) {
27370Sstevel@tonic-gate 
27380Sstevel@tonic-gate 		if ((paddr = (uintptr_t)vp->vn_vmem.vm_source) == NULL) {
27390Sstevel@tonic-gate 			vp->vn_sibling = root;
27400Sstevel@tonic-gate 			root = vp;
27410Sstevel@tonic-gate 			continue;
27420Sstevel@tonic-gate 		}
27430Sstevel@tonic-gate 
27440Sstevel@tonic-gate 		for (parent = head; parent != NULL; parent = parent->vn_next) {
27450Sstevel@tonic-gate 			if (parent->vn_addr != paddr)
27460Sstevel@tonic-gate 				continue;
27470Sstevel@tonic-gate 			vp->vn_sibling = parent->vn_children;
27480Sstevel@tonic-gate 			parent->vn_children = vp;
27490Sstevel@tonic-gate 			vp->vn_parent = parent;
27500Sstevel@tonic-gate 			break;
27510Sstevel@tonic-gate 		}
27520Sstevel@tonic-gate 
27530Sstevel@tonic-gate 		if (parent == NULL) {
27540Sstevel@tonic-gate 			mdb_warn("couldn't find %p's parent (%p)\n",
27550Sstevel@tonic-gate 			    vp->vn_addr, paddr);
27560Sstevel@tonic-gate 			goto err;
27570Sstevel@tonic-gate 		}
27580Sstevel@tonic-gate 	}
27590Sstevel@tonic-gate 
27600Sstevel@tonic-gate 	vw = mdb_zalloc(sizeof (vmem_walk_t), UM_SLEEP);
27610Sstevel@tonic-gate 	vw->vw_root = root;
27620Sstevel@tonic-gate 
27630Sstevel@tonic-gate 	if (current != NULL)
27640Sstevel@tonic-gate 		vw->vw_current = current;
27650Sstevel@tonic-gate 	else
27660Sstevel@tonic-gate 		vw->vw_current = root;
27670Sstevel@tonic-gate 
27680Sstevel@tonic-gate 	wsp->walk_data = vw;
27690Sstevel@tonic-gate 	return (WALK_NEXT);
27700Sstevel@tonic-gate err:
27710Sstevel@tonic-gate 	for (vp = head; head != NULL; vp = head) {
27720Sstevel@tonic-gate 		head = vp->vn_next;
27730Sstevel@tonic-gate 		mdb_free(vp, sizeof (vmem_node_t));
27740Sstevel@tonic-gate 	}
27750Sstevel@tonic-gate 
27760Sstevel@tonic-gate 	return (WALK_ERR);
27770Sstevel@tonic-gate }
27780Sstevel@tonic-gate 
27790Sstevel@tonic-gate int
27800Sstevel@tonic-gate vmem_walk_step(mdb_walk_state_t *wsp)
27810Sstevel@tonic-gate {
27820Sstevel@tonic-gate 	vmem_walk_t *vw = wsp->walk_data;
27830Sstevel@tonic-gate 	vmem_node_t *vp;
27840Sstevel@tonic-gate 	int rval;
27850Sstevel@tonic-gate 
27860Sstevel@tonic-gate 	if ((vp = vw->vw_current) == NULL)
27870Sstevel@tonic-gate 		return (WALK_DONE);
27880Sstevel@tonic-gate 
27890Sstevel@tonic-gate 	rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata);
27900Sstevel@tonic-gate 
27910Sstevel@tonic-gate 	if (vp->vn_children != NULL) {
27920Sstevel@tonic-gate 		vw->vw_current = vp->vn_children;
27930Sstevel@tonic-gate 		return (rval);
27940Sstevel@tonic-gate 	}
27950Sstevel@tonic-gate 
27960Sstevel@tonic-gate 	do {
27970Sstevel@tonic-gate 		vw->vw_current = vp->vn_sibling;
27980Sstevel@tonic-gate 		vp = vp->vn_parent;
27990Sstevel@tonic-gate 	} while (vw->vw_current == NULL && vp != NULL);
28000Sstevel@tonic-gate 
28010Sstevel@tonic-gate 	return (rval);
28020Sstevel@tonic-gate }
28030Sstevel@tonic-gate 
28040Sstevel@tonic-gate /*
28050Sstevel@tonic-gate  * The "vmem_postfix" walk walks the vmem arenas in post-fix order; all
28060Sstevel@tonic-gate  * children are visited before their parent.  We perform the postfix walk
28070Sstevel@tonic-gate  * iteratively (rather than recursively) to allow mdb to regain control
28080Sstevel@tonic-gate  * after each callback.
28090Sstevel@tonic-gate  */
28100Sstevel@tonic-gate int
28110Sstevel@tonic-gate vmem_postfix_walk_step(mdb_walk_state_t *wsp)
28120Sstevel@tonic-gate {
28130Sstevel@tonic-gate 	vmem_walk_t *vw = wsp->walk_data;
28140Sstevel@tonic-gate 	vmem_node_t *vp = vw->vw_current;
28150Sstevel@tonic-gate 	int rval;
28160Sstevel@tonic-gate 
28170Sstevel@tonic-gate 	/*
28180Sstevel@tonic-gate 	 * If this node is marked, then we know that we have already visited
28190Sstevel@tonic-gate 	 * all of its children.  If the node has any siblings, they need to
28200Sstevel@tonic-gate 	 * be visited next; otherwise, we need to visit the parent.  Note
28210Sstevel@tonic-gate 	 * that vp->vn_marked will only be zero on the first invocation of
28220Sstevel@tonic-gate 	 * the step function.
28230Sstevel@tonic-gate 	 */
28240Sstevel@tonic-gate 	if (vp->vn_marked) {
28250Sstevel@tonic-gate 		if (vp->vn_sibling != NULL)
28260Sstevel@tonic-gate 			vp = vp->vn_sibling;
28270Sstevel@tonic-gate 		else if (vp->vn_parent != NULL)
28280Sstevel@tonic-gate 			vp = vp->vn_parent;
28290Sstevel@tonic-gate 		else {
28300Sstevel@tonic-gate 			/*
28310Sstevel@tonic-gate 			 * We have neither a parent, nor a sibling, and we
28320Sstevel@tonic-gate 			 * have already been visited; we're done.
28330Sstevel@tonic-gate 			 */
28340Sstevel@tonic-gate 			return (WALK_DONE);
28350Sstevel@tonic-gate 		}
28360Sstevel@tonic-gate 	}
28370Sstevel@tonic-gate 
28380Sstevel@tonic-gate 	/*
28390Sstevel@tonic-gate 	 * Before we visit this node, visit its children.
28400Sstevel@tonic-gate 	 */
28410Sstevel@tonic-gate 	while (vp->vn_children != NULL && !vp->vn_children->vn_marked)
28420Sstevel@tonic-gate 		vp = vp->vn_children;
28430Sstevel@tonic-gate 
28440Sstevel@tonic-gate 	vp->vn_marked = 1;
28450Sstevel@tonic-gate 	vw->vw_current = vp;
28460Sstevel@tonic-gate 	rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata);
28470Sstevel@tonic-gate 
28480Sstevel@tonic-gate 	return (rval);
28490Sstevel@tonic-gate }
28500Sstevel@tonic-gate 
28510Sstevel@tonic-gate void
28520Sstevel@tonic-gate vmem_walk_fini(mdb_walk_state_t *wsp)
28530Sstevel@tonic-gate {
28540Sstevel@tonic-gate 	vmem_walk_t *vw = wsp->walk_data;
28550Sstevel@tonic-gate 	vmem_node_t *root = vw->vw_root;
28560Sstevel@tonic-gate 	int done;
28570Sstevel@tonic-gate 
28580Sstevel@tonic-gate 	if (root == NULL)
28590Sstevel@tonic-gate 		return;
28600Sstevel@tonic-gate 
28610Sstevel@tonic-gate 	if ((vw->vw_root = root->vn_children) != NULL)
28620Sstevel@tonic-gate 		vmem_walk_fini(wsp);
28630Sstevel@tonic-gate 
28640Sstevel@tonic-gate 	vw->vw_root = root->vn_sibling;
28650Sstevel@tonic-gate 	done = (root->vn_sibling == NULL && root->vn_parent == NULL);
28660Sstevel@tonic-gate 	mdb_free(root, sizeof (vmem_node_t));
28670Sstevel@tonic-gate 
28680Sstevel@tonic-gate 	if (done) {
28690Sstevel@tonic-gate 		mdb_free(vw, sizeof (vmem_walk_t));
28700Sstevel@tonic-gate 	} else {
28710Sstevel@tonic-gate 		vmem_walk_fini(wsp);
28720Sstevel@tonic-gate 	}
28730Sstevel@tonic-gate }
28740Sstevel@tonic-gate 
28750Sstevel@tonic-gate typedef struct vmem_seg_walk {
28760Sstevel@tonic-gate 	uint8_t vsw_type;
28770Sstevel@tonic-gate 	uintptr_t vsw_start;
28780Sstevel@tonic-gate 	uintptr_t vsw_current;
28790Sstevel@tonic-gate } vmem_seg_walk_t;
28800Sstevel@tonic-gate 
28810Sstevel@tonic-gate /*ARGSUSED*/
28820Sstevel@tonic-gate int
28830Sstevel@tonic-gate vmem_seg_walk_common_init(mdb_walk_state_t *wsp, uint8_t type, char *name)
28840Sstevel@tonic-gate {
28850Sstevel@tonic-gate 	vmem_seg_walk_t *vsw;
28860Sstevel@tonic-gate 
28870Sstevel@tonic-gate 	if (wsp->walk_addr == NULL) {
28880Sstevel@tonic-gate 		mdb_warn("vmem_%s does not support global walks\n", name);
28890Sstevel@tonic-gate 		return (WALK_ERR);
28900Sstevel@tonic-gate 	}
28910Sstevel@tonic-gate 
28920Sstevel@tonic-gate 	wsp->walk_data = vsw = mdb_alloc(sizeof (vmem_seg_walk_t), UM_SLEEP);
28930Sstevel@tonic-gate 
28940Sstevel@tonic-gate 	vsw->vsw_type = type;
28950Sstevel@tonic-gate 	vsw->vsw_start = wsp->walk_addr + OFFSETOF(vmem_t, vm_seg0);
28960Sstevel@tonic-gate 	vsw->vsw_current = vsw->vsw_start;
28970Sstevel@tonic-gate 
28980Sstevel@tonic-gate 	return (WALK_NEXT);
28990Sstevel@tonic-gate }
29000Sstevel@tonic-gate 
29010Sstevel@tonic-gate /*
29020Sstevel@tonic-gate  * vmem segments can't have type 0 (this should be added to vmem_impl.h).
29030Sstevel@tonic-gate  */
29040Sstevel@tonic-gate #define	VMEM_NONE	0
29050Sstevel@tonic-gate 
29060Sstevel@tonic-gate int
29070Sstevel@tonic-gate vmem_alloc_walk_init(mdb_walk_state_t *wsp)
29080Sstevel@tonic-gate {
29090Sstevel@tonic-gate 	return (vmem_seg_walk_common_init(wsp, VMEM_ALLOC, "alloc"));
29100Sstevel@tonic-gate }
29110Sstevel@tonic-gate 
29120Sstevel@tonic-gate int
29130Sstevel@tonic-gate vmem_free_walk_init(mdb_walk_state_t *wsp)
29140Sstevel@tonic-gate {
29150Sstevel@tonic-gate 	return (vmem_seg_walk_common_init(wsp, VMEM_FREE, "free"));
29160Sstevel@tonic-gate }
29170Sstevel@tonic-gate 
29180Sstevel@tonic-gate int
29190Sstevel@tonic-gate vmem_span_walk_init(mdb_walk_state_t *wsp)
29200Sstevel@tonic-gate {
29210Sstevel@tonic-gate 	return (vmem_seg_walk_common_init(wsp, VMEM_SPAN, "span"));
29220Sstevel@tonic-gate }
29230Sstevel@tonic-gate 
29240Sstevel@tonic-gate int
29250Sstevel@tonic-gate vmem_seg_walk_init(mdb_walk_state_t *wsp)
29260Sstevel@tonic-gate {
29270Sstevel@tonic-gate 	return (vmem_seg_walk_common_init(wsp, VMEM_NONE, "seg"));
29280Sstevel@tonic-gate }
29290Sstevel@tonic-gate 
29300Sstevel@tonic-gate int
29310Sstevel@tonic-gate vmem_seg_walk_step(mdb_walk_state_t *wsp)
29320Sstevel@tonic-gate {
29330Sstevel@tonic-gate 	vmem_seg_t seg;
29340Sstevel@tonic-gate 	vmem_seg_walk_t *vsw = wsp->walk_data;
29350Sstevel@tonic-gate 	uintptr_t addr = vsw->vsw_current;
29360Sstevel@tonic-gate 	static size_t seg_size = 0;
29370Sstevel@tonic-gate 	int rval;
29380Sstevel@tonic-gate 
29390Sstevel@tonic-gate 	if (!seg_size) {
29400Sstevel@tonic-gate 		if (umem_readvar(&seg_size, "vmem_seg_size") == -1) {
29410Sstevel@tonic-gate 			mdb_warn("failed to read 'vmem_seg_size'");
29420Sstevel@tonic-gate 			seg_size = sizeof (vmem_seg_t);
29430Sstevel@tonic-gate 		}
29440Sstevel@tonic-gate 	}
29450Sstevel@tonic-gate 
29460Sstevel@tonic-gate 	if (seg_size < sizeof (seg))
29470Sstevel@tonic-gate 		bzero((caddr_t)&seg + seg_size, sizeof (seg) - seg_size);
29480Sstevel@tonic-gate 
29490Sstevel@tonic-gate 	if (mdb_vread(&seg, seg_size, addr) == -1) {
29500Sstevel@tonic-gate 		mdb_warn("couldn't read vmem_seg at %p", addr);
29510Sstevel@tonic-gate 		return (WALK_ERR);
29520Sstevel@tonic-gate 	}
29530Sstevel@tonic-gate 
29540Sstevel@tonic-gate 	vsw->vsw_current = (uintptr_t)seg.vs_anext;
29550Sstevel@tonic-gate 	if (vsw->vsw_type != VMEM_NONE && seg.vs_type != vsw->vsw_type) {
29560Sstevel@tonic-gate 		rval = WALK_NEXT;
29570Sstevel@tonic-gate 	} else {
29580Sstevel@tonic-gate 		rval = wsp->walk_callback(addr, &seg, wsp->walk_cbdata);
29590Sstevel@tonic-gate 	}
29600Sstevel@tonic-gate 
29610Sstevel@tonic-gate 	if (vsw->vsw_current == vsw->vsw_start)
29620Sstevel@tonic-gate 		return (WALK_DONE);
29630Sstevel@tonic-gate 
29640Sstevel@tonic-gate 	return (rval);
29650Sstevel@tonic-gate }
29660Sstevel@tonic-gate 
29670Sstevel@tonic-gate void
29680Sstevel@tonic-gate vmem_seg_walk_fini(mdb_walk_state_t *wsp)
29690Sstevel@tonic-gate {
29700Sstevel@tonic-gate 	vmem_seg_walk_t *vsw = wsp->walk_data;
29710Sstevel@tonic-gate 
29720Sstevel@tonic-gate 	mdb_free(vsw, sizeof (vmem_seg_walk_t));
29730Sstevel@tonic-gate }
29740Sstevel@tonic-gate 
29750Sstevel@tonic-gate #define	VMEM_NAMEWIDTH	22
29760Sstevel@tonic-gate 
29770Sstevel@tonic-gate int
29780Sstevel@tonic-gate vmem(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
29790Sstevel@tonic-gate {
29800Sstevel@tonic-gate 	vmem_t v, parent;
29810Sstevel@tonic-gate 	uintptr_t paddr;
29820Sstevel@tonic-gate 	int ident = 0;
29830Sstevel@tonic-gate 	char c[VMEM_NAMEWIDTH];
29840Sstevel@tonic-gate 
29850Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC)) {
29860Sstevel@tonic-gate 		if (mdb_walk_dcmd("vmem", "vmem", argc, argv) == -1) {
29870Sstevel@tonic-gate 			mdb_warn("can't walk vmem");
29880Sstevel@tonic-gate 			return (DCMD_ERR);
29890Sstevel@tonic-gate 		}
29900Sstevel@tonic-gate 		return (DCMD_OK);
29910Sstevel@tonic-gate 	}
29920Sstevel@tonic-gate 
29930Sstevel@tonic-gate 	if (DCMD_HDRSPEC(flags))
29940Sstevel@tonic-gate 		mdb_printf("%-?s %-*s %10s %12s %9s %5s\n",
29950Sstevel@tonic-gate 		    "ADDR", VMEM_NAMEWIDTH, "NAME", "INUSE",
29960Sstevel@tonic-gate 		    "TOTAL", "SUCCEED", "FAIL");
29970Sstevel@tonic-gate 
29980Sstevel@tonic-gate 	if (mdb_vread(&v, sizeof (v), addr) == -1) {
29990Sstevel@tonic-gate 		mdb_warn("couldn't read vmem at %p", addr);
30000Sstevel@tonic-gate 		return (DCMD_ERR);
30010Sstevel@tonic-gate 	}
30020Sstevel@tonic-gate 
30030Sstevel@tonic-gate 	for (paddr = (uintptr_t)v.vm_source; paddr != NULL; ident += 2) {
30040Sstevel@tonic-gate 		if (mdb_vread(&parent, sizeof (parent), paddr) == -1) {
30050Sstevel@tonic-gate 			mdb_warn("couldn't trace %p's ancestry", addr);
30060Sstevel@tonic-gate 			ident = 0;
30070Sstevel@tonic-gate 			break;
30080Sstevel@tonic-gate 		}
30090Sstevel@tonic-gate 		paddr = (uintptr_t)parent.vm_source;
30100Sstevel@tonic-gate 	}
30110Sstevel@tonic-gate 
30120Sstevel@tonic-gate 	(void) mdb_snprintf(c, VMEM_NAMEWIDTH, "%*s%s", ident, "", v.vm_name);
30130Sstevel@tonic-gate 
30140Sstevel@tonic-gate 	mdb_printf("%0?p %-*s %10llu %12llu %9llu %5llu\n",
30150Sstevel@tonic-gate 	    addr, VMEM_NAMEWIDTH, c,
30160Sstevel@tonic-gate 	    v.vm_kstat.vk_mem_inuse, v.vm_kstat.vk_mem_total,
30170Sstevel@tonic-gate 	    v.vm_kstat.vk_alloc, v.vm_kstat.vk_fail);
30180Sstevel@tonic-gate 
30190Sstevel@tonic-gate 	return (DCMD_OK);
30200Sstevel@tonic-gate }
30210Sstevel@tonic-gate 
30220Sstevel@tonic-gate void
30230Sstevel@tonic-gate vmem_seg_help(void)
30240Sstevel@tonic-gate {
30250Sstevel@tonic-gate 	mdb_printf("%s\n",
30260Sstevel@tonic-gate "Display the contents of vmem_seg_ts, with optional filtering.\n"
30270Sstevel@tonic-gate "\n"
30280Sstevel@tonic-gate "A vmem_seg_t represents a range of addresses (or arbitrary numbers),\n"
30290Sstevel@tonic-gate "representing a single chunk of data.  Only ALLOC segments have debugging\n"
30300Sstevel@tonic-gate "information.\n");
30310Sstevel@tonic-gate 	mdb_dec_indent(2);
30320Sstevel@tonic-gate 	mdb_printf("%<b>OPTIONS%</b>\n");
30330Sstevel@tonic-gate 	mdb_inc_indent(2);
30340Sstevel@tonic-gate 	mdb_printf("%s",
30350Sstevel@tonic-gate "  -v    Display the full content of the vmem_seg, including its stack trace\n"
30360Sstevel@tonic-gate "  -s    report the size of the segment, instead of the end address\n"
30370Sstevel@tonic-gate "  -c caller\n"
30380Sstevel@tonic-gate "        filter out segments without the function/PC in their stack trace\n"
30390Sstevel@tonic-gate "  -e earliest\n"
30400Sstevel@tonic-gate "        filter out segments timestamped before earliest\n"
30410Sstevel@tonic-gate "  -l latest\n"
30420Sstevel@tonic-gate "        filter out segments timestamped after latest\n"
30430Sstevel@tonic-gate "  -m minsize\n"
30440Sstevel@tonic-gate "        filer out segments smaller than minsize\n"
30450Sstevel@tonic-gate "  -M maxsize\n"
30460Sstevel@tonic-gate "        filer out segments larger than maxsize\n"
30470Sstevel@tonic-gate "  -t thread\n"
30480Sstevel@tonic-gate "        filter out segments not involving thread\n"
30490Sstevel@tonic-gate "  -T type\n"
30500Sstevel@tonic-gate "        filter out segments not of type 'type'\n"
30510Sstevel@tonic-gate "        type is one of: ALLOC/FREE/SPAN/ROTOR/WALKER\n");
30520Sstevel@tonic-gate }
30530Sstevel@tonic-gate 
30540Sstevel@tonic-gate 
30550Sstevel@tonic-gate /*ARGSUSED*/
30560Sstevel@tonic-gate int
30570Sstevel@tonic-gate vmem_seg(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
30580Sstevel@tonic-gate {
30590Sstevel@tonic-gate 	vmem_seg_t vs;
30600Sstevel@tonic-gate 	uintptr_t *stk = vs.vs_stack;
30610Sstevel@tonic-gate 	uintptr_t sz;
30620Sstevel@tonic-gate 	uint8_t t;
30630Sstevel@tonic-gate 	const char *type = NULL;
30640Sstevel@tonic-gate 	GElf_Sym sym;
30650Sstevel@tonic-gate 	char c[MDB_SYM_NAMLEN];
30660Sstevel@tonic-gate 	int no_debug;
30670Sstevel@tonic-gate 	int i;
30680Sstevel@tonic-gate 	int depth;
30690Sstevel@tonic-gate 	uintptr_t laddr, haddr;
30700Sstevel@tonic-gate 
30710Sstevel@tonic-gate 	uintptr_t caller = NULL, thread = NULL;
30720Sstevel@tonic-gate 	uintptr_t minsize = 0, maxsize = 0;
30730Sstevel@tonic-gate 
30740Sstevel@tonic-gate 	hrtime_t earliest = 0, latest = 0;
30750Sstevel@tonic-gate 
30760Sstevel@tonic-gate 	uint_t size = 0;
30770Sstevel@tonic-gate 	uint_t verbose = 0;
30780Sstevel@tonic-gate 
30790Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC))
30800Sstevel@tonic-gate 		return (DCMD_USAGE);
30810Sstevel@tonic-gate 
30820Sstevel@tonic-gate 	if (mdb_getopts(argc, argv,
30830Sstevel@tonic-gate 	    'c', MDB_OPT_UINTPTR, &caller,
30840Sstevel@tonic-gate 	    'e', MDB_OPT_UINT64, &earliest,
30850Sstevel@tonic-gate 	    'l', MDB_OPT_UINT64, &latest,
30860Sstevel@tonic-gate 	    's', MDB_OPT_SETBITS, TRUE, &size,
30870Sstevel@tonic-gate 	    'm', MDB_OPT_UINTPTR, &minsize,
30880Sstevel@tonic-gate 	    'M', MDB_OPT_UINTPTR, &maxsize,
30890Sstevel@tonic-gate 	    't', MDB_OPT_UINTPTR, &thread,
30900Sstevel@tonic-gate 	    'T', MDB_OPT_STR, &type,
30910Sstevel@tonic-gate 	    'v', MDB_OPT_SETBITS, TRUE, &verbose,
30920Sstevel@tonic-gate 	    NULL) != argc)
30930Sstevel@tonic-gate 		return (DCMD_USAGE);
30940Sstevel@tonic-gate 
30950Sstevel@tonic-gate 	if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) {
30960Sstevel@tonic-gate 		if (verbose) {
30970Sstevel@tonic-gate 			mdb_printf("%16s %4s %16s %16s %16s\n"
30980Sstevel@tonic-gate 			    "%<u>%16s %4s %16s %16s %16s%</u>\n",
30990Sstevel@tonic-gate 			    "ADDR", "TYPE", "START", "END", "SIZE",
31000Sstevel@tonic-gate 			    "", "", "THREAD", "TIMESTAMP", "");
31010Sstevel@tonic-gate 		} else {
31020Sstevel@tonic-gate 			mdb_printf("%?s %4s %?s %?s %s\n", "ADDR", "TYPE",
31030Sstevel@tonic-gate 			    "START", size? "SIZE" : "END", "WHO");
31040Sstevel@tonic-gate 		}
31050Sstevel@tonic-gate 	}
31060Sstevel@tonic-gate 
31070Sstevel@tonic-gate 	if (mdb_vread(&vs, sizeof (vs), addr) == -1) {
31080Sstevel@tonic-gate 		mdb_warn("couldn't read vmem_seg at %p", addr);
31090Sstevel@tonic-gate 		return (DCMD_ERR);
31100Sstevel@tonic-gate 	}
31110Sstevel@tonic-gate 
31120Sstevel@tonic-gate 	if (type != NULL) {
31130Sstevel@tonic-gate 		if (strcmp(type, "ALLC") == 0 || strcmp(type, "ALLOC") == 0)
31140Sstevel@tonic-gate 			t = VMEM_ALLOC;
31150Sstevel@tonic-gate 		else if (strcmp(type, "FREE") == 0)
31160Sstevel@tonic-gate 			t = VMEM_FREE;
31170Sstevel@tonic-gate 		else if (strcmp(type, "SPAN") == 0)
31180Sstevel@tonic-gate 			t = VMEM_SPAN;
31190Sstevel@tonic-gate 		else if (strcmp(type, "ROTR") == 0 ||
31200Sstevel@tonic-gate 		    strcmp(type, "ROTOR") == 0)
31210Sstevel@tonic-gate 			t = VMEM_ROTOR;
31220Sstevel@tonic-gate 		else if (strcmp(type, "WLKR") == 0 ||
31230Sstevel@tonic-gate 		    strcmp(type, "WALKER") == 0)
31240Sstevel@tonic-gate 			t = VMEM_WALKER;
31250Sstevel@tonic-gate 		else {
31260Sstevel@tonic-gate 			mdb_warn("\"%s\" is not a recognized vmem_seg type\n",
31270Sstevel@tonic-gate 			    type);
31280Sstevel@tonic-gate 			return (DCMD_ERR);
31290Sstevel@tonic-gate 		}
31300Sstevel@tonic-gate 
31310Sstevel@tonic-gate 		if (vs.vs_type != t)
31320Sstevel@tonic-gate 			return (DCMD_OK);
31330Sstevel@tonic-gate 	}
31340Sstevel@tonic-gate 
31350Sstevel@tonic-gate 	sz = vs.vs_end - vs.vs_start;
31360Sstevel@tonic-gate 
31370Sstevel@tonic-gate 	if (minsize != 0 && sz < minsize)
31380Sstevel@tonic-gate 		return (DCMD_OK);
31390Sstevel@tonic-gate 
31400Sstevel@tonic-gate 	if (maxsize != 0 && sz > maxsize)
31410Sstevel@tonic-gate 		return (DCMD_OK);
31420Sstevel@tonic-gate 
31430Sstevel@tonic-gate 	t = vs.vs_type;
31440Sstevel@tonic-gate 	depth = vs.vs_depth;
31450Sstevel@tonic-gate 
31460Sstevel@tonic-gate 	/*
31470Sstevel@tonic-gate 	 * debug info, when present, is only accurate for VMEM_ALLOC segments
31480Sstevel@tonic-gate 	 */
31490Sstevel@tonic-gate 	no_debug = (t != VMEM_ALLOC) ||
31500Sstevel@tonic-gate 	    (depth == 0 || depth > VMEM_STACK_DEPTH);
31510Sstevel@tonic-gate 
31520Sstevel@tonic-gate 	if (no_debug) {
31530Sstevel@tonic-gate 		if (caller != NULL || thread != NULL || earliest != 0 ||
31540Sstevel@tonic-gate 		    latest != 0)
31550Sstevel@tonic-gate 			return (DCMD_OK);		/* not enough info */
31560Sstevel@tonic-gate 	} else {
31570Sstevel@tonic-gate 		if (caller != NULL) {
31580Sstevel@tonic-gate 			laddr = caller;
31590Sstevel@tonic-gate 			haddr = caller + sizeof (caller);
31600Sstevel@tonic-gate 
31610Sstevel@tonic-gate 			if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c,
31620Sstevel@tonic-gate 			    sizeof (c), &sym) != -1 &&
31630Sstevel@tonic-gate 			    caller == (uintptr_t)sym.st_value) {
31640Sstevel@tonic-gate 				/*
31650Sstevel@tonic-gate 				 * We were provided an exact symbol value; any
31660Sstevel@tonic-gate 				 * address in the function is valid.
31670Sstevel@tonic-gate 				 */
31680Sstevel@tonic-gate 				laddr = (uintptr_t)sym.st_value;
31690Sstevel@tonic-gate 				haddr = (uintptr_t)sym.st_value + sym.st_size;
31700Sstevel@tonic-gate 			}
31710Sstevel@tonic-gate 
31720Sstevel@tonic-gate 			for (i = 0; i < depth; i++)
31730Sstevel@tonic-gate 				if (vs.vs_stack[i] >= laddr &&
31740Sstevel@tonic-gate 				    vs.vs_stack[i] < haddr)
31750Sstevel@tonic-gate 					break;
31760Sstevel@tonic-gate 
31770Sstevel@tonic-gate 			if (i == depth)
31780Sstevel@tonic-gate 				return (DCMD_OK);
31790Sstevel@tonic-gate 		}
31800Sstevel@tonic-gate 
31810Sstevel@tonic-gate 		if (thread != NULL && (uintptr_t)vs.vs_thread != thread)
31820Sstevel@tonic-gate 			return (DCMD_OK);
31830Sstevel@tonic-gate 
31840Sstevel@tonic-gate 		if (earliest != 0 && vs.vs_timestamp < earliest)
31850Sstevel@tonic-gate 			return (DCMD_OK);
31860Sstevel@tonic-gate 
31870Sstevel@tonic-gate 		if (latest != 0 && vs.vs_timestamp > latest)
31880Sstevel@tonic-gate 			return (DCMD_OK);
31890Sstevel@tonic-gate 	}
31900Sstevel@tonic-gate 
31910Sstevel@tonic-gate 	type = (t == VMEM_ALLOC ? "ALLC" :
31920Sstevel@tonic-gate 	    t == VMEM_FREE ? "FREE" :
31930Sstevel@tonic-gate 	    t == VMEM_SPAN ? "SPAN" :
31940Sstevel@tonic-gate 	    t == VMEM_ROTOR ? "ROTR" :
31950Sstevel@tonic-gate 	    t == VMEM_WALKER ? "WLKR" :
31960Sstevel@tonic-gate 	    "????");
31970Sstevel@tonic-gate 
31980Sstevel@tonic-gate 	if (flags & DCMD_PIPE_OUT) {
31990Sstevel@tonic-gate 		mdb_printf("%#r\n", addr);
32000Sstevel@tonic-gate 		return (DCMD_OK);
32010Sstevel@tonic-gate 	}
32020Sstevel@tonic-gate 
32030Sstevel@tonic-gate 	if (verbose) {
32040Sstevel@tonic-gate 		mdb_printf("%<b>%16p%</b> %4s %16p %16p %16d\n",
32050Sstevel@tonic-gate 		    addr, type, vs.vs_start, vs.vs_end, sz);
32060Sstevel@tonic-gate 
32070Sstevel@tonic-gate 		if (no_debug)
32080Sstevel@tonic-gate 			return (DCMD_OK);
32090Sstevel@tonic-gate 
32100Sstevel@tonic-gate 		mdb_printf("%16s %4s %16d %16llx\n",
32110Sstevel@tonic-gate 		    "", "", vs.vs_thread, vs.vs_timestamp);
32120Sstevel@tonic-gate 
32130Sstevel@tonic-gate 		mdb_inc_indent(17);
32140Sstevel@tonic-gate 		for (i = 0; i < depth; i++) {
32150Sstevel@tonic-gate 			mdb_printf("%a\n", stk[i]);
32160Sstevel@tonic-gate 		}
32170Sstevel@tonic-gate 		mdb_dec_indent(17);
32180Sstevel@tonic-gate 		mdb_printf("\n");
32190Sstevel@tonic-gate 	} else {
32200Sstevel@tonic-gate 		mdb_printf("%0?p %4s %0?p %0?p", addr, type,
32210Sstevel@tonic-gate 		    vs.vs_start, size? sz : vs.vs_end);
32220Sstevel@tonic-gate 
32230Sstevel@tonic-gate 		if (no_debug) {
32240Sstevel@tonic-gate 			mdb_printf("\n");
32250Sstevel@tonic-gate 			return (DCMD_OK);
32260Sstevel@tonic-gate 		}
32270Sstevel@tonic-gate 
32280Sstevel@tonic-gate 		for (i = 0; i < depth; i++) {
32290Sstevel@tonic-gate 			if (mdb_lookup_by_addr(stk[i], MDB_SYM_FUZZY,
32300Sstevel@tonic-gate 			    c, sizeof (c), &sym) == -1)
32310Sstevel@tonic-gate 				continue;
32320Sstevel@tonic-gate 			if (is_umem_sym(c, "vmem_"))
32330Sstevel@tonic-gate 				continue;
32340Sstevel@tonic-gate 			break;
32350Sstevel@tonic-gate 		}
32360Sstevel@tonic-gate 		mdb_printf(" %a\n", stk[i]);
32370Sstevel@tonic-gate 	}
32380Sstevel@tonic-gate 	return (DCMD_OK);
32390Sstevel@tonic-gate }
32400Sstevel@tonic-gate 
32410Sstevel@tonic-gate /*ARGSUSED*/
32420Sstevel@tonic-gate static int
32430Sstevel@tonic-gate showbc(uintptr_t addr, const umem_bufctl_audit_t *bcp, hrtime_t *newest)
32440Sstevel@tonic-gate {
32450Sstevel@tonic-gate 	char name[UMEM_CACHE_NAMELEN + 1];
32460Sstevel@tonic-gate 	hrtime_t delta;
32470Sstevel@tonic-gate 	int i, depth;
32480Sstevel@tonic-gate 
32490Sstevel@tonic-gate 	if (bcp->bc_timestamp == 0)
32500Sstevel@tonic-gate 		return (WALK_DONE);
32510Sstevel@tonic-gate 
32520Sstevel@tonic-gate 	if (*newest == 0)
32530Sstevel@tonic-gate 		*newest = bcp->bc_timestamp;
32540Sstevel@tonic-gate 
32550Sstevel@tonic-gate 	delta = *newest - bcp->bc_timestamp;
32560Sstevel@tonic-gate 	depth = MIN(bcp->bc_depth, umem_stack_depth);
32570Sstevel@tonic-gate 
32580Sstevel@tonic-gate 	if (mdb_readstr(name, sizeof (name), (uintptr_t)
32590Sstevel@tonic-gate 	    &bcp->bc_cache->cache_name) <= 0)
32600Sstevel@tonic-gate 		(void) mdb_snprintf(name, sizeof (name), "%a", bcp->bc_cache);
32610Sstevel@tonic-gate 
32620Sstevel@tonic-gate 	mdb_printf("\nT-%lld.%09lld  addr=%p  %s\n",
32630Sstevel@tonic-gate 	    delta / NANOSEC, delta % NANOSEC, bcp->bc_addr, name);
32640Sstevel@tonic-gate 
32650Sstevel@tonic-gate 	for (i = 0; i < depth; i++)
32660Sstevel@tonic-gate 		mdb_printf("\t %a\n", bcp->bc_stack[i]);
32670Sstevel@tonic-gate 
32680Sstevel@tonic-gate 	return (WALK_NEXT);
32690Sstevel@tonic-gate }
32700Sstevel@tonic-gate 
32710Sstevel@tonic-gate int
32720Sstevel@tonic-gate umalog(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
32730Sstevel@tonic-gate {
32740Sstevel@tonic-gate 	const char *logname = "umem_transaction_log";
32750Sstevel@tonic-gate 	hrtime_t newest = 0;
32760Sstevel@tonic-gate 
32770Sstevel@tonic-gate 	if ((flags & DCMD_ADDRSPEC) || argc > 1)
32780Sstevel@tonic-gate 		return (DCMD_USAGE);
32790Sstevel@tonic-gate 
32800Sstevel@tonic-gate 	if (argc > 0) {
32810Sstevel@tonic-gate 		if (argv->a_type != MDB_TYPE_STRING)
32820Sstevel@tonic-gate 			return (DCMD_USAGE);
32830Sstevel@tonic-gate 		if (strcmp(argv->a_un.a_str, "fail") == 0)
32840Sstevel@tonic-gate 			logname = "umem_failure_log";
32850Sstevel@tonic-gate 		else if (strcmp(argv->a_un.a_str, "slab") == 0)
32860Sstevel@tonic-gate 			logname = "umem_slab_log";
32870Sstevel@tonic-gate 		else
32880Sstevel@tonic-gate 			return (DCMD_USAGE);
32890Sstevel@tonic-gate 	}
32900Sstevel@tonic-gate 
32910Sstevel@tonic-gate 	if (umem_readvar(&addr, logname) == -1) {
32920Sstevel@tonic-gate 		mdb_warn("failed to read %s log header pointer");
32930Sstevel@tonic-gate 		return (DCMD_ERR);
32940Sstevel@tonic-gate 	}
32950Sstevel@tonic-gate 
32960Sstevel@tonic-gate 	if (mdb_pwalk("umem_log", (mdb_walk_cb_t)showbc, &newest, addr) == -1) {
32970Sstevel@tonic-gate 		mdb_warn("failed to walk umem log");
32980Sstevel@tonic-gate 		return (DCMD_ERR);
32990Sstevel@tonic-gate 	}
33000Sstevel@tonic-gate 
33010Sstevel@tonic-gate 	return (DCMD_OK);
33020Sstevel@tonic-gate }
33030Sstevel@tonic-gate 
33040Sstevel@tonic-gate /*
33050Sstevel@tonic-gate  * As the final lure for die-hard crash(1M) users, we provide ::umausers here.
33060Sstevel@tonic-gate  * The first piece is a structure which we use to accumulate umem_cache_t
33070Sstevel@tonic-gate  * addresses of interest.  The umc_add is used as a callback for the umem_cache
33080Sstevel@tonic-gate  * walker; we either add all caches, or ones named explicitly as arguments.
33090Sstevel@tonic-gate  */
33100Sstevel@tonic-gate 
33110Sstevel@tonic-gate typedef struct umclist {
33120Sstevel@tonic-gate 	const char *umc_name;			/* Name to match (or NULL) */
33130Sstevel@tonic-gate 	uintptr_t *umc_caches;			/* List of umem_cache_t addrs */
33140Sstevel@tonic-gate 	int umc_nelems;				/* Num entries in umc_caches */
33150Sstevel@tonic-gate 	int umc_size;				/* Size of umc_caches array */
33160Sstevel@tonic-gate } umclist_t;
33170Sstevel@tonic-gate 
33180Sstevel@tonic-gate static int
33190Sstevel@tonic-gate umc_add(uintptr_t addr, const umem_cache_t *cp, umclist_t *umc)
33200Sstevel@tonic-gate {
33210Sstevel@tonic-gate 	void *p;
33220Sstevel@tonic-gate 	int s;
33230Sstevel@tonic-gate 
33240Sstevel@tonic-gate 	if (umc->umc_name == NULL ||
33250Sstevel@tonic-gate 	    strcmp(cp->cache_name, umc->umc_name) == 0) {
33260Sstevel@tonic-gate 		/*
33270Sstevel@tonic-gate 		 * If we have a match, grow our array (if necessary), and then
33280Sstevel@tonic-gate 		 * add the virtual address of the matching cache to our list.
33290Sstevel@tonic-gate 		 */
33300Sstevel@tonic-gate 		if (umc->umc_nelems >= umc->umc_size) {
33310Sstevel@tonic-gate 			s = umc->umc_size ? umc->umc_size * 2 : 256;
33320Sstevel@tonic-gate 			p = mdb_alloc(sizeof (uintptr_t) * s, UM_SLEEP | UM_GC);
33330Sstevel@tonic-gate 
33340Sstevel@tonic-gate 			bcopy(umc->umc_caches, p,
33350Sstevel@tonic-gate 			    sizeof (uintptr_t) * umc->umc_size);
33360Sstevel@tonic-gate 
33370Sstevel@tonic-gate 			umc->umc_caches = p;
33380Sstevel@tonic-gate 			umc->umc_size = s;
33390Sstevel@tonic-gate 		}
33400Sstevel@tonic-gate 
33410Sstevel@tonic-gate 		umc->umc_caches[umc->umc_nelems++] = addr;
33420Sstevel@tonic-gate 		return (umc->umc_name ? WALK_DONE : WALK_NEXT);
33430Sstevel@tonic-gate 	}
33440Sstevel@tonic-gate 
33450Sstevel@tonic-gate 	return (WALK_NEXT);
33460Sstevel@tonic-gate }
33470Sstevel@tonic-gate 
33480Sstevel@tonic-gate /*
33490Sstevel@tonic-gate  * The second piece of ::umausers is a hash table of allocations.  Each
33500Sstevel@tonic-gate  * allocation owner is identified by its stack trace and data_size.  We then
33510Sstevel@tonic-gate  * track the total bytes of all such allocations, and the number of allocations
33520Sstevel@tonic-gate  * to report at the end.  Once we have a list of caches, we walk through the
33530Sstevel@tonic-gate  * allocated bufctls of each, and update our hash table accordingly.
33540Sstevel@tonic-gate  */
33550Sstevel@tonic-gate 
33560Sstevel@tonic-gate typedef struct umowner {
33570Sstevel@tonic-gate 	struct umowner *umo_head;		/* First hash elt in bucket */
33580Sstevel@tonic-gate 	struct umowner *umo_next;		/* Next hash elt in chain */
33590Sstevel@tonic-gate 	size_t umo_signature;			/* Hash table signature */
33600Sstevel@tonic-gate 	uint_t umo_num;				/* Number of allocations */
33610Sstevel@tonic-gate 	size_t umo_data_size;			/* Size of each allocation */
33620Sstevel@tonic-gate 	size_t umo_total_size;			/* Total bytes of allocation */
33630Sstevel@tonic-gate 	int umo_depth;				/* Depth of stack trace */
33640Sstevel@tonic-gate 	uintptr_t *umo_stack;			/* Stack trace */
33650Sstevel@tonic-gate } umowner_t;
33660Sstevel@tonic-gate 
33670Sstevel@tonic-gate typedef struct umusers {
33680Sstevel@tonic-gate 	const umem_cache_t *umu_cache;		/* Current umem cache */
33690Sstevel@tonic-gate 	umowner_t *umu_hash;			/* Hash table of owners */
33700Sstevel@tonic-gate 	uintptr_t *umu_stacks;			/* stacks for owners */
33710Sstevel@tonic-gate 	int umu_nelems;				/* Number of entries in use */
33720Sstevel@tonic-gate 	int umu_size;				/* Total number of entries */
33730Sstevel@tonic-gate } umusers_t;
33740Sstevel@tonic-gate 
33750Sstevel@tonic-gate static void
33760Sstevel@tonic-gate umu_add(umusers_t *umu, const umem_bufctl_audit_t *bcp,
33770Sstevel@tonic-gate     size_t size, size_t data_size)
33780Sstevel@tonic-gate {
33790Sstevel@tonic-gate 	int i, depth = MIN(bcp->bc_depth, umem_stack_depth);
33800Sstevel@tonic-gate 	size_t bucket, signature = data_size;
33810Sstevel@tonic-gate 	umowner_t *umo, *umoend;
33820Sstevel@tonic-gate 
33830Sstevel@tonic-gate 	/*
33840Sstevel@tonic-gate 	 * If the hash table is full, double its size and rehash everything.
33850Sstevel@tonic-gate 	 */
33860Sstevel@tonic-gate 	if (umu->umu_nelems >= umu->umu_size) {
33870Sstevel@tonic-gate 		int s = umu->umu_size ? umu->umu_size * 2 : 1024;
33880Sstevel@tonic-gate 		size_t umowner_size = sizeof (umowner_t);
33890Sstevel@tonic-gate 		size_t trace_size = umem_stack_depth * sizeof (uintptr_t);
33900Sstevel@tonic-gate 		uintptr_t *new_stacks;
33910Sstevel@tonic-gate 
33920Sstevel@tonic-gate 		umo = mdb_alloc(umowner_size * s, UM_SLEEP | UM_GC);
33930Sstevel@tonic-gate 		new_stacks = mdb_alloc(trace_size * s, UM_SLEEP | UM_GC);
33940Sstevel@tonic-gate 
33950Sstevel@tonic-gate 		bcopy(umu->umu_hash, umo, umowner_size * umu->umu_size);
33960Sstevel@tonic-gate 		bcopy(umu->umu_stacks, new_stacks, trace_size * umu->umu_size);
33970Sstevel@tonic-gate 		umu->umu_hash = umo;
33980Sstevel@tonic-gate 		umu->umu_stacks = new_stacks;
33990Sstevel@tonic-gate 		umu->umu_size = s;
34000Sstevel@tonic-gate 
34010Sstevel@tonic-gate 		umoend = umu->umu_hash + umu->umu_size;
34020Sstevel@tonic-gate 		for (umo = umu->umu_hash; umo < umoend; umo++) {
34030Sstevel@tonic-gate 			umo->umo_head = NULL;
34040Sstevel@tonic-gate 			umo->umo_stack = &umu->umu_stacks[
34050Sstevel@tonic-gate 			    umem_stack_depth * (umo - umu->umu_hash)];
34060Sstevel@tonic-gate 		}
34070Sstevel@tonic-gate 
34080Sstevel@tonic-gate 		umoend = umu->umu_hash + umu->umu_nelems;
34090Sstevel@tonic-gate 		for (umo = umu->umu_hash; umo < umoend; umo++) {
34100Sstevel@tonic-gate 			bucket = umo->umo_signature & (umu->umu_size - 1);
34110Sstevel@tonic-gate 			umo->umo_next = umu->umu_hash[bucket].umo_head;
34120Sstevel@tonic-gate 			umu->umu_hash[bucket].umo_head = umo;
34130Sstevel@tonic-gate 		}
34140Sstevel@tonic-gate 	}
34150Sstevel@tonic-gate 
34160Sstevel@tonic-gate 	/*
34170Sstevel@tonic-gate 	 * Finish computing the hash signature from the stack trace, and then
34180Sstevel@tonic-gate 	 * see if the owner is in the hash table.  If so, update our stats.
34190Sstevel@tonic-gate 	 */
34200Sstevel@tonic-gate 	for (i = 0; i < depth; i++)
34210Sstevel@tonic-gate 		signature += bcp->bc_stack[i];
34220Sstevel@tonic-gate 
34230Sstevel@tonic-gate 	bucket = signature & (umu->umu_size - 1);
34240Sstevel@tonic-gate 
34250Sstevel@tonic-gate 	for (umo = umu->umu_hash[bucket].umo_head; umo; umo = umo->umo_next) {
34260Sstevel@tonic-gate 		if (umo->umo_signature == signature) {
34270Sstevel@tonic-gate 			size_t difference = 0;
34280Sstevel@tonic-gate 
34290Sstevel@tonic-gate 			difference |= umo->umo_data_size - data_size;
34300Sstevel@tonic-gate 			difference |= umo->umo_depth - depth;
34310Sstevel@tonic-gate 
34320Sstevel@tonic-gate 			for (i = 0; i < depth; i++) {
34330Sstevel@tonic-gate 				difference |= umo->umo_stack[i] -
34340Sstevel@tonic-gate 				    bcp->bc_stack[i];
34350Sstevel@tonic-gate 			}
34360Sstevel@tonic-gate 
34370Sstevel@tonic-gate 			if (difference == 0) {
34380Sstevel@tonic-gate 				umo->umo_total_size += size;
34390Sstevel@tonic-gate 				umo->umo_num++;
34400Sstevel@tonic-gate 				return;
34410Sstevel@tonic-gate 			}
34420Sstevel@tonic-gate 		}
34430Sstevel@tonic-gate 	}
34440Sstevel@tonic-gate 
34450Sstevel@tonic-gate 	/*
34460Sstevel@tonic-gate 	 * If the owner is not yet hashed, grab the next element and fill it
34470Sstevel@tonic-gate 	 * in based on the allocation information.
34480Sstevel@tonic-gate 	 */
34490Sstevel@tonic-gate 	umo = &umu->umu_hash[umu->umu_nelems++];
34500Sstevel@tonic-gate 	umo->umo_next = umu->umu_hash[bucket].umo_head;
34510Sstevel@tonic-gate 	umu->umu_hash[bucket].umo_head = umo;
34520Sstevel@tonic-gate 
34530Sstevel@tonic-gate 	umo->umo_signature = signature;
34540Sstevel@tonic-gate 	umo->umo_num = 1;
34550Sstevel@tonic-gate 	umo->umo_data_size = data_size;
34560Sstevel@tonic-gate 	umo->umo_total_size = size;
34570Sstevel@tonic-gate 	umo->umo_depth = depth;
34580Sstevel@tonic-gate 
34590Sstevel@tonic-gate 	for (i = 0; i < depth; i++)
34600Sstevel@tonic-gate 		umo->umo_stack[i] = bcp->bc_stack[i];
34610Sstevel@tonic-gate }
34620Sstevel@tonic-gate 
34630Sstevel@tonic-gate /*
34640Sstevel@tonic-gate  * When ::umausers is invoked without the -f flag, we simply update our hash
34650Sstevel@tonic-gate  * table with the information from each allocated bufctl.
34660Sstevel@tonic-gate  */
34670Sstevel@tonic-gate /*ARGSUSED*/
34680Sstevel@tonic-gate static int
34690Sstevel@tonic-gate umause1(uintptr_t addr, const umem_bufctl_audit_t *bcp, umusers_t *umu)
34700Sstevel@tonic-gate {
34710Sstevel@tonic-gate 	const umem_cache_t *cp = umu->umu_cache;
34720Sstevel@tonic-gate 
34730Sstevel@tonic-gate 	umu_add(umu, bcp, cp->cache_bufsize, cp->cache_bufsize);
34740Sstevel@tonic-gate 	return (WALK_NEXT);
34750Sstevel@tonic-gate }
34760Sstevel@tonic-gate 
34770Sstevel@tonic-gate /*
34780Sstevel@tonic-gate  * When ::umausers is invoked with the -f flag, we print out the information
34790Sstevel@tonic-gate  * for each bufctl as well as updating the hash table.
34800Sstevel@tonic-gate  */
34810Sstevel@tonic-gate static int
34820Sstevel@tonic-gate umause2(uintptr_t addr, const umem_bufctl_audit_t *bcp, umusers_t *umu)
34830Sstevel@tonic-gate {
34840Sstevel@tonic-gate 	int i, depth = MIN(bcp->bc_depth, umem_stack_depth);
34850Sstevel@tonic-gate 	const umem_cache_t *cp = umu->umu_cache;
34860Sstevel@tonic-gate 
34870Sstevel@tonic-gate 	mdb_printf("size %d, addr %p, thread %p, cache %s\n",
34880Sstevel@tonic-gate 	    cp->cache_bufsize, addr, bcp->bc_thread, cp->cache_name);
34890Sstevel@tonic-gate 
34900Sstevel@tonic-gate 	for (i = 0; i < depth; i++)
34910Sstevel@tonic-gate 		mdb_printf("\t %a\n", bcp->bc_stack[i]);
34920Sstevel@tonic-gate 
34930Sstevel@tonic-gate 	umu_add(umu, bcp, cp->cache_bufsize, cp->cache_bufsize);
34940Sstevel@tonic-gate 	return (WALK_NEXT);
34950Sstevel@tonic-gate }
34960Sstevel@tonic-gate 
34970Sstevel@tonic-gate /*
34980Sstevel@tonic-gate  * We sort our results by allocation size before printing them.
34990Sstevel@tonic-gate  */
35000Sstevel@tonic-gate static int
35010Sstevel@tonic-gate umownercmp(const void *lp, const void *rp)
35020Sstevel@tonic-gate {
35030Sstevel@tonic-gate 	const umowner_t *lhs = lp;
35040Sstevel@tonic-gate 	const umowner_t *rhs = rp;
35050Sstevel@tonic-gate 
35060Sstevel@tonic-gate 	return (rhs->umo_total_size - lhs->umo_total_size);
35070Sstevel@tonic-gate }
35080Sstevel@tonic-gate 
35090Sstevel@tonic-gate /*
35100Sstevel@tonic-gate  * The main engine of ::umausers is relatively straightforward: First we
35110Sstevel@tonic-gate  * accumulate our list of umem_cache_t addresses into the umclist_t. Next we
35120Sstevel@tonic-gate  * iterate over the allocated bufctls of each cache in the list.  Finally,
35130Sstevel@tonic-gate  * we sort and print our results.
35140Sstevel@tonic-gate  */
35150Sstevel@tonic-gate /*ARGSUSED*/
35160Sstevel@tonic-gate int
35170Sstevel@tonic-gate umausers(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
35180Sstevel@tonic-gate {
35190Sstevel@tonic-gate 	int mem_threshold = 8192;	/* Minimum # bytes for printing */
35200Sstevel@tonic-gate 	int cnt_threshold = 100;	/* Minimum # blocks for printing */
35210Sstevel@tonic-gate 	int audited_caches = 0;		/* Number of UMF_AUDIT caches found */
35220Sstevel@tonic-gate 	int do_all_caches = 1;		/* Do all caches (no arguments) */
35230Sstevel@tonic-gate 	int opt_e = FALSE;		/* Include "small" users */
35240Sstevel@tonic-gate 	int opt_f = FALSE;		/* Print stack traces */
35250Sstevel@tonic-gate 
35260Sstevel@tonic-gate 	mdb_walk_cb_t callback = (mdb_walk_cb_t)umause1;
35270Sstevel@tonic-gate 	umowner_t *umo, *umoend;
35280Sstevel@tonic-gate 	int i, oelems;
35290Sstevel@tonic-gate 
35300Sstevel@tonic-gate 	umclist_t umc;
35310Sstevel@tonic-gate 	umusers_t umu;
35320Sstevel@tonic-gate 
35330Sstevel@tonic-gate 	if (flags & DCMD_ADDRSPEC)
35340Sstevel@tonic-gate 		return (DCMD_USAGE);
35350Sstevel@tonic-gate 
35360Sstevel@tonic-gate 	bzero(&umc, sizeof (umc));
35370Sstevel@tonic-gate 	bzero(&umu, sizeof (umu));
35380Sstevel@tonic-gate 
35390Sstevel@tonic-gate 	while ((i = mdb_getopts(argc, argv,
35400Sstevel@tonic-gate 	    'e', MDB_OPT_SETBITS, TRUE, &opt_e,
35410Sstevel@tonic-gate 	    'f', MDB_OPT_SETBITS, TRUE, &opt_f, NULL)) != argc) {
35420Sstevel@tonic-gate 
35430Sstevel@tonic-gate 		argv += i;	/* skip past options we just processed */
35440Sstevel@tonic-gate 		argc -= i;	/* adjust argc */
35450Sstevel@tonic-gate 
35460Sstevel@tonic-gate 		if (argv->a_type != MDB_TYPE_STRING || *argv->a_un.a_str == '-')
35470Sstevel@tonic-gate 			return (DCMD_USAGE);
35480Sstevel@tonic-gate 
35490Sstevel@tonic-gate 		oelems = umc.umc_nelems;
35500Sstevel@tonic-gate 		umc.umc_name = argv->a_un.a_str;
35510Sstevel@tonic-gate 		(void) mdb_walk("umem_cache", (mdb_walk_cb_t)umc_add, &umc);
35520Sstevel@tonic-gate 
35530Sstevel@tonic-gate 		if (umc.umc_nelems == oelems) {
35540Sstevel@tonic-gate 			mdb_warn("unknown umem cache: %s\n", umc.umc_name);
35550Sstevel@tonic-gate 			return (DCMD_ERR);
35560Sstevel@tonic-gate 		}
35570Sstevel@tonic-gate 
35580Sstevel@tonic-gate 		do_all_caches = 0;
35590Sstevel@tonic-gate 		argv++;
35600Sstevel@tonic-gate 		argc--;
35610Sstevel@tonic-gate 	}
35620Sstevel@tonic-gate 
35630Sstevel@tonic-gate 	if (opt_e)
35640Sstevel@tonic-gate 		mem_threshold = cnt_threshold = 0;
35650Sstevel@tonic-gate 
35660Sstevel@tonic-gate 	if (opt_f)
35670Sstevel@tonic-gate 		callback = (mdb_walk_cb_t)umause2;
35680Sstevel@tonic-gate 
35690Sstevel@tonic-gate 	if (do_all_caches) {
35700Sstevel@tonic-gate 		umc.umc_name = NULL; /* match all cache names */
35710Sstevel@tonic-gate 		(void) mdb_walk("umem_cache", (mdb_walk_cb_t)umc_add, &umc);
35720Sstevel@tonic-gate 	}
35730Sstevel@tonic-gate 
35740Sstevel@tonic-gate 	for (i = 0; i < umc.umc_nelems; i++) {
35750Sstevel@tonic-gate 		uintptr_t cp = umc.umc_caches[i];
35760Sstevel@tonic-gate 		umem_cache_t c;
35770Sstevel@tonic-gate 
35780Sstevel@tonic-gate 		if (mdb_vread(&c, sizeof (c), cp) == -1) {
35790Sstevel@tonic-gate 			mdb_warn("failed to read cache at %p", cp);
35800Sstevel@tonic-gate 			continue;
35810Sstevel@tonic-gate 		}
35820Sstevel@tonic-gate 
35830Sstevel@tonic-gate 		if (!(c.cache_flags & UMF_AUDIT)) {
35840Sstevel@tonic-gate 			if (!do_all_caches) {
35850Sstevel@tonic-gate 				mdb_warn("UMF_AUDIT is not enabled for %s\n",
35860Sstevel@tonic-gate 				    c.cache_name);
35870Sstevel@tonic-gate 			}
35880Sstevel@tonic-gate 			continue;
35890Sstevel@tonic-gate 		}
35900Sstevel@tonic-gate 
35910Sstevel@tonic-gate 		umu.umu_cache = &c;
35920Sstevel@tonic-gate 		(void) mdb_pwalk("bufctl", callback, &umu, cp);
35930Sstevel@tonic-gate 		audited_caches++;
35940Sstevel@tonic-gate 	}
35950Sstevel@tonic-gate 
35960Sstevel@tonic-gate 	if (audited_caches == 0 && do_all_caches) {
35970Sstevel@tonic-gate 		mdb_warn("UMF_AUDIT is not enabled for any caches\n");
35980Sstevel@tonic-gate 		return (DCMD_ERR);
35990Sstevel@tonic-gate 	}
36000Sstevel@tonic-gate 
36010Sstevel@tonic-gate 	qsort(umu.umu_hash, umu.umu_nelems, sizeof (umowner_t), umownercmp);
36020Sstevel@tonic-gate 	umoend = umu.umu_hash + umu.umu_nelems;
36030Sstevel@tonic-gate 
36040Sstevel@tonic-gate 	for (umo = umu.umu_hash; umo < umoend; umo++) {
36050Sstevel@tonic-gate 		if (umo->umo_total_size < mem_threshold &&
36060Sstevel@tonic-gate 		    umo->umo_num < cnt_threshold)
36070Sstevel@tonic-gate 			continue;
36080Sstevel@tonic-gate 		mdb_printf("%lu bytes for %u allocations with data size %lu:\n",
36090Sstevel@tonic-gate 		    umo->umo_total_size, umo->umo_num, umo->umo_data_size);
36100Sstevel@tonic-gate 		for (i = 0; i < umo->umo_depth; i++)
36110Sstevel@tonic-gate 			mdb_printf("\t %a\n", umo->umo_stack[i]);
36120Sstevel@tonic-gate 	}
36130Sstevel@tonic-gate 
36140Sstevel@tonic-gate 	return (DCMD_OK);
36150Sstevel@tonic-gate }
36161528Sjwadams 
36171528Sjwadams struct malloc_data {
36181528Sjwadams 	uint32_t malloc_size;
36191528Sjwadams 	uint32_t malloc_stat; /* == UMEM_MALLOC_ENCODE(state, malloc_size) */
36201528Sjwadams };
36211528Sjwadams 
36221528Sjwadams #ifdef _LP64
36231528Sjwadams #define	UMI_MAX_BUCKET		(UMEM_MAXBUF - 2*sizeof (struct malloc_data))
36241528Sjwadams #else
36251528Sjwadams #define	UMI_MAX_BUCKET		(UMEM_MAXBUF - sizeof (struct malloc_data))
36261528Sjwadams #endif
36271528Sjwadams 
36281528Sjwadams typedef struct umem_malloc_info {
36291528Sjwadams 	size_t um_total;	/* total allocated buffers */
36301528Sjwadams 	size_t um_malloc;	/* malloc buffers */
36311528Sjwadams 	size_t um_malloc_size;	/* sum of malloc buffer sizes */
36321528Sjwadams 	size_t um_malloc_overhead; /* sum of in-chunk overheads */
36331528Sjwadams 
36341528Sjwadams 	umem_cache_t *um_cp;
36351528Sjwadams 
36361528Sjwadams 	uint_t *um_bucket;
36371528Sjwadams } umem_malloc_info_t;
36381528Sjwadams 
36391528Sjwadams static void
36401528Sjwadams umem_malloc_print_dist(uint_t *um_bucket, size_t minmalloc, size_t maxmalloc,
36411528Sjwadams     size_t maxbuckets, size_t minbucketsize, int geometric)
36421528Sjwadams {
36434688Stomee 	uint64_t um_malloc;
36441528Sjwadams 	int minb = -1;
36451528Sjwadams 	int maxb = -1;
36461528Sjwadams 	int buckets;
36471528Sjwadams 	int nbucks;
36481528Sjwadams 	int i;
36491528Sjwadams 	int b;
36501528Sjwadams 	const int *distarray;
36511528Sjwadams 
36521528Sjwadams 	minb = (int)minmalloc;
36531528Sjwadams 	maxb = (int)maxmalloc;
36541528Sjwadams 
36551528Sjwadams 	nbucks = buckets = maxb - minb + 1;
36561528Sjwadams 
36571528Sjwadams 	um_malloc = 0;
36581528Sjwadams 	for (b = minb; b <= maxb; b++)
36591528Sjwadams 		um_malloc += um_bucket[b];
36601528Sjwadams 
36611528Sjwadams 	if (maxbuckets != 0)
36621528Sjwadams 		buckets = MIN(buckets, maxbuckets);
36631528Sjwadams 
36641528Sjwadams 	if (minbucketsize > 1) {
36651528Sjwadams 		buckets = MIN(buckets, nbucks/minbucketsize);
36661528Sjwadams 		if (buckets == 0) {
36671528Sjwadams 			buckets = 1;
36681528Sjwadams 			minbucketsize = nbucks;
36691528Sjwadams 		}
36701528Sjwadams 	}
36711528Sjwadams 
36721528Sjwadams 	if (geometric)
3673*4798Stomee 		distarray = dist_geometric(buckets, minb, maxb, minbucketsize);
36741528Sjwadams 	else
3675*4798Stomee 		distarray = dist_linear(buckets, minb, maxb);
3676*4798Stomee 
3677*4798Stomee 	dist_print_header("malloc size", 11, "count");
36781528Sjwadams 	for (i = 0; i < buckets; i++) {
3679*4798Stomee 		dist_print_bucket(distarray, i, um_bucket, um_malloc, 11);
36801528Sjwadams 	}
36811528Sjwadams 	mdb_printf("\n");
36821528Sjwadams }
36831528Sjwadams 
36841528Sjwadams /*
36851528Sjwadams  * A malloc()ed buffer looks like:
36861528Sjwadams  *
36871528Sjwadams  *	<----------- mi.malloc_size --->
36881528Sjwadams  *	<----------- cp.cache_bufsize ------------------>
36891528Sjwadams  *	<----------- cp.cache_chunksize -------------------------------->
36901528Sjwadams  *	+-------+-----------------------+---------------+---------------+
36911528Sjwadams  *	|/tag///| mallocsz		|/round-off/////|/debug info////|
36921528Sjwadams  *	+-------+---------------------------------------+---------------+
36931528Sjwadams  *		<-- usable space ------>
36941528Sjwadams  *
36951528Sjwadams  * mallocsz is the argument to malloc(3C).
36961528Sjwadams  * mi.malloc_size is the actual size passed to umem_alloc(), which
36971528Sjwadams  * is rounded up to the smallest available cache size, which is
36981528Sjwadams  * cache_bufsize.  If there is debugging or alignment overhead in
36991528Sjwadams  * the cache, that is reflected in a larger cache_chunksize.
37001528Sjwadams  *
37011528Sjwadams  * The tag at the beginning of the buffer is either 8-bytes or 16-bytes,
37021528Sjwadams  * depending upon the ISA's alignment requirements.  For 32-bit allocations,
37031528Sjwadams  * it is always a 8-byte tag.  For 64-bit allocations larger than 8 bytes,
37041528Sjwadams  * the tag has 8 bytes of padding before it.
37051528Sjwadams  *
37061528Sjwadams  * 32-byte, 64-byte buffers <= 8 bytes:
37071528Sjwadams  *	+-------+-------+--------- ...
37081528Sjwadams  *	|/size//|/stat//| mallocsz ...
37091528Sjwadams  *	+-------+-------+--------- ...
37101528Sjwadams  *			^
37111528Sjwadams  *			pointer returned from malloc(3C)
37121528Sjwadams  *
37131528Sjwadams  * 64-byte buffers > 8 bytes:
37141528Sjwadams  *	+---------------+-------+-------+--------- ...
37151528Sjwadams  *	|/padding///////|/size//|/stat//| mallocsz ...
37161528Sjwadams  *	+---------------+-------+-------+--------- ...
37171528Sjwadams  *					^
37181528Sjwadams  *					pointer returned from malloc(3C)
37191528Sjwadams  *
37201528Sjwadams  * The "size" field is "malloc_size", which is mallocsz + the padding.
37211528Sjwadams  * The "stat" field is derived from malloc_size, and functions as a
37221528Sjwadams  * validation that this buffer is actually from malloc(3C).
37231528Sjwadams  */
37241528Sjwadams /*ARGSUSED*/
37251528Sjwadams static int
37261528Sjwadams um_umem_buffer_cb(uintptr_t addr, void *buf, umem_malloc_info_t *ump)
37271528Sjwadams {
37281528Sjwadams 	struct malloc_data md;
37291528Sjwadams 	size_t m_addr = addr;
37301528Sjwadams 	size_t overhead = sizeof (md);
37311528Sjwadams 	size_t mallocsz;
37321528Sjwadams 
37331528Sjwadams 	ump->um_total++;
37341528Sjwadams 
37351528Sjwadams #ifdef _LP64
37361528Sjwadams 	if (ump->um_cp->cache_bufsize > UMEM_SECOND_ALIGN) {
37371528Sjwadams 		m_addr += overhead;
37381528Sjwadams 		overhead += sizeof (md);
37391528Sjwadams 	}
37401528Sjwadams #endif
37411528Sjwadams 
37421528Sjwadams 	if (mdb_vread(&md, sizeof (md), m_addr) == -1) {
37431528Sjwadams 		mdb_warn("unable to read malloc header at %p", m_addr);
37441528Sjwadams 		return (WALK_NEXT);
37451528Sjwadams 	}
37461528Sjwadams 
37471528Sjwadams 	switch (UMEM_MALLOC_DECODE(md.malloc_stat, md.malloc_size)) {
37481528Sjwadams 	case MALLOC_MAGIC:
37491528Sjwadams #ifdef _LP64
37501528Sjwadams 	case MALLOC_SECOND_MAGIC:
37511528Sjwadams #endif
37521528Sjwadams 		mallocsz = md.malloc_size - overhead;
37531528Sjwadams 
37541528Sjwadams 		ump->um_malloc++;
37551528Sjwadams 		ump->um_malloc_size += mallocsz;
37561528Sjwadams 		ump->um_malloc_overhead += overhead;
37571528Sjwadams 
37581528Sjwadams 		/* include round-off and debug overhead */
37591528Sjwadams 		ump->um_malloc_overhead +=
37601528Sjwadams 		    ump->um_cp->cache_chunksize - md.malloc_size;
37611528Sjwadams 
37621528Sjwadams 		if (ump->um_bucket != NULL && mallocsz <= UMI_MAX_BUCKET)
37631528Sjwadams 			ump->um_bucket[mallocsz]++;
37641528Sjwadams 
37651528Sjwadams 		break;
37661528Sjwadams 	default:
37671528Sjwadams 		break;
37681528Sjwadams 	}
37691528Sjwadams 
37701528Sjwadams 	return (WALK_NEXT);
37711528Sjwadams }
37721528Sjwadams 
37731528Sjwadams int
37741528Sjwadams get_umem_alloc_sizes(int **out, size_t *out_num)
37751528Sjwadams {
37761528Sjwadams 	GElf_Sym sym;
37771528Sjwadams 
37781528Sjwadams 	if (umem_lookup_by_name("umem_alloc_sizes", &sym) == -1) {
37791528Sjwadams 		mdb_warn("unable to look up umem_alloc_sizes");
37801528Sjwadams 		return (-1);
37811528Sjwadams 	}
37821528Sjwadams 
37831528Sjwadams 	*out = mdb_alloc(sym.st_size, UM_SLEEP | UM_GC);
37841528Sjwadams 	*out_num = sym.st_size / sizeof (int);
37851528Sjwadams 
37861528Sjwadams 	if (mdb_vread(*out, sym.st_size, sym.st_value) == -1) {
37871528Sjwadams 		mdb_warn("unable to read umem_alloc_sizes (%p)", sym.st_value);
37881528Sjwadams 		*out = NULL;
37891528Sjwadams 		return (-1);
37901528Sjwadams 	}
37911528Sjwadams 
37921528Sjwadams 	return (0);
37931528Sjwadams }
37941528Sjwadams 
37951528Sjwadams 
37961528Sjwadams static int
37971528Sjwadams um_umem_cache_cb(uintptr_t addr, umem_cache_t *cp, umem_malloc_info_t *ump)
37981528Sjwadams {
37991528Sjwadams 	if (strncmp(cp->cache_name, "umem_alloc_", strlen("umem_alloc_")) != 0)
38001528Sjwadams 		return (WALK_NEXT);
38011528Sjwadams 
38021528Sjwadams 	ump->um_cp = cp;
38031528Sjwadams 
38041528Sjwadams 	if (mdb_pwalk("umem", (mdb_walk_cb_t)um_umem_buffer_cb, ump, addr) ==
38051528Sjwadams 	    -1) {
38061528Sjwadams 		mdb_warn("can't walk 'umem' for cache %p", addr);
38071528Sjwadams 		return (WALK_ERR);
38081528Sjwadams 	}
38091528Sjwadams 
38101528Sjwadams 	return (WALK_NEXT);
38111528Sjwadams }
38121528Sjwadams 
38131528Sjwadams void
38141528Sjwadams umem_malloc_dist_help(void)
38151528Sjwadams {
38161528Sjwadams 	mdb_printf("%s\n",
38171528Sjwadams 	    "report distribution of outstanding malloc()s");
38181528Sjwadams 	mdb_dec_indent(2);
38191528Sjwadams 	mdb_printf("%<b>OPTIONS%</b>\n");
38201528Sjwadams 	mdb_inc_indent(2);
38211528Sjwadams 	mdb_printf("%s",
38221528Sjwadams "  -b maxbins\n"
38231528Sjwadams "        Use at most maxbins bins for the data\n"
38241528Sjwadams "  -B minbinsize\n"
38251528Sjwadams "        Make the bins at least minbinsize bytes apart\n"
38261528Sjwadams "  -d    dump the raw data out, without binning\n"
38271528Sjwadams "  -g    use geometric binning instead of linear binning\n");
38281528Sjwadams }
38291528Sjwadams 
38301528Sjwadams /*ARGSUSED*/
38311528Sjwadams int
38321528Sjwadams umem_malloc_dist(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
38331528Sjwadams {
38341528Sjwadams 	umem_malloc_info_t mi;
38351528Sjwadams 	uint_t geometric = 0;
38361528Sjwadams 	uint_t dump = 0;
38371528Sjwadams 	size_t maxbuckets = 0;
38381528Sjwadams 	size_t minbucketsize = 0;
38391528Sjwadams 
38401528Sjwadams 	size_t minalloc = 0;
38411528Sjwadams 	size_t maxalloc = UMI_MAX_BUCKET;
38421528Sjwadams 
38431528Sjwadams 	if (flags & DCMD_ADDRSPEC)
38441528Sjwadams 		return (DCMD_USAGE);
38451528Sjwadams 
38461528Sjwadams 	if (mdb_getopts(argc, argv,
38471528Sjwadams 	    'd', MDB_OPT_SETBITS, TRUE, &dump,
38481528Sjwadams 	    'g', MDB_OPT_SETBITS, TRUE, &geometric,
38491528Sjwadams 	    'b', MDB_OPT_UINTPTR, &maxbuckets,
38501528Sjwadams 	    'B', MDB_OPT_UINTPTR, &minbucketsize,
38511528Sjwadams 	    0) != argc)
38521528Sjwadams 		return (DCMD_USAGE);
38531528Sjwadams 
38541528Sjwadams 	bzero(&mi, sizeof (mi));
38551528Sjwadams 	mi.um_bucket = mdb_zalloc((UMI_MAX_BUCKET + 1) * sizeof (*mi.um_bucket),
38561528Sjwadams 	    UM_SLEEP | UM_GC);
38571528Sjwadams 
38581528Sjwadams 	if (mdb_walk("umem_cache", (mdb_walk_cb_t)um_umem_cache_cb,
38591528Sjwadams 	    &mi) == -1) {
38601528Sjwadams 		mdb_warn("unable to walk 'umem_cache'");
38611528Sjwadams 		return (DCMD_ERR);
38621528Sjwadams 	}
38631528Sjwadams 
38641528Sjwadams 	if (dump) {
38651528Sjwadams 		int i;
38661528Sjwadams 		for (i = minalloc; i <= maxalloc; i++)
38671528Sjwadams 			mdb_printf("%d\t%d\n", i, mi.um_bucket[i]);
38681528Sjwadams 
38691528Sjwadams 		return (DCMD_OK);
38701528Sjwadams 	}
38711528Sjwadams 
38721528Sjwadams 	umem_malloc_print_dist(mi.um_bucket, minalloc, maxalloc,
38731528Sjwadams 	    maxbuckets, minbucketsize, geometric);
38741528Sjwadams 
38751528Sjwadams 	return (DCMD_OK);
38761528Sjwadams }
38771528Sjwadams 
38781528Sjwadams void
38791528Sjwadams umem_malloc_info_help(void)
38801528Sjwadams {
38811528Sjwadams 	mdb_printf("%s\n",
38821528Sjwadams 	    "report information about malloc()s by cache.  ");
38831528Sjwadams 	mdb_dec_indent(2);
38841528Sjwadams 	mdb_printf("%<b>OPTIONS%</b>\n");
38851528Sjwadams 	mdb_inc_indent(2);
38861528Sjwadams 	mdb_printf("%s",
38871528Sjwadams "  -b maxbins\n"
38881528Sjwadams "        Use at most maxbins bins for the data\n"
38891528Sjwadams "  -B minbinsize\n"
38901528Sjwadams "        Make the bins at least minbinsize bytes apart\n"
38911528Sjwadams "  -d    dump the raw distribution data without binning\n"
38921528Sjwadams #ifndef _KMDB
38931528Sjwadams "  -g    use geometric binning instead of linear binning\n"
38941528Sjwadams #endif
38951528Sjwadams 	    "");
38961528Sjwadams }
38971528Sjwadams int
38981528Sjwadams umem_malloc_info(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
38991528Sjwadams {
39001528Sjwadams 	umem_cache_t c;
39011528Sjwadams 	umem_malloc_info_t mi;
39021528Sjwadams 
39031528Sjwadams 	int skip = 0;
39041528Sjwadams 
39051528Sjwadams 	size_t maxmalloc;
39061528Sjwadams 	size_t overhead;
39071528Sjwadams 	size_t allocated;
39081528Sjwadams 	size_t avg_malloc;
39091528Sjwadams 	size_t overhead_pct;	/* 1000 * overhead_percent */
39101528Sjwadams 
39111528Sjwadams 	uint_t verbose = 0;
39121528Sjwadams 	uint_t dump = 0;
39131528Sjwadams 	uint_t geometric = 0;
39141528Sjwadams 	size_t maxbuckets = 0;
39151528Sjwadams 	size_t minbucketsize = 0;
39161528Sjwadams 
39171528Sjwadams 	int *alloc_sizes;
39181528Sjwadams 	int idx;
39191528Sjwadams 	size_t num;
39201528Sjwadams 	size_t minmalloc;
39211528Sjwadams 
39221528Sjwadams 	if (mdb_getopts(argc, argv,
39231528Sjwadams 	    'd', MDB_OPT_SETBITS, TRUE, &dump,
39241528Sjwadams 	    'g', MDB_OPT_SETBITS, TRUE, &geometric,
39251528Sjwadams 	    'b', MDB_OPT_UINTPTR, &maxbuckets,
39261528Sjwadams 	    'B', MDB_OPT_UINTPTR, &minbucketsize,
39271528Sjwadams 	    0) != argc)
39281528Sjwadams 		return (DCMD_USAGE);
39291528Sjwadams 
39301528Sjwadams 	if (dump || geometric || (maxbuckets != 0) || (minbucketsize != 0))
39311528Sjwadams 		verbose = 1;
39321528Sjwadams 
39331528Sjwadams 	if (!(flags & DCMD_ADDRSPEC)) {
39341528Sjwadams 		if (mdb_walk_dcmd("umem_cache", "umem_malloc_info",
39351528Sjwadams 		    argc, argv) == -1) {
39361528Sjwadams 			mdb_warn("can't walk umem_cache");
39371528Sjwadams 			return (DCMD_ERR);
39381528Sjwadams 		}
39391528Sjwadams 		return (DCMD_OK);
39401528Sjwadams 	}
39411528Sjwadams 
39421528Sjwadams 	if (!mdb_vread(&c, sizeof (c), addr)) {
39431528Sjwadams 		mdb_warn("unable to read cache at %p", addr);
39441528Sjwadams 		return (DCMD_ERR);
39451528Sjwadams 	}
39461528Sjwadams 
39471528Sjwadams 	if (strncmp(c.cache_name, "umem_alloc_", strlen("umem_alloc_")) != 0) {
39481528Sjwadams 		if (!(flags & DCMD_LOOP))
39491528Sjwadams 			mdb_warn("umem_malloc_info: cache \"%s\" is not used "
39501528Sjwadams 			    "by malloc()\n", c.cache_name);
39511528Sjwadams 		skip = 1;
39521528Sjwadams 	}
39531528Sjwadams 
39541528Sjwadams 	/*
39551528Sjwadams 	 * normally, print the header only the first time.  In verbose mode,
39561528Sjwadams 	 * print the header on every non-skipped buffer
39571528Sjwadams 	 */
39581528Sjwadams 	if ((!verbose && DCMD_HDRSPEC(flags)) || (verbose && !skip))
39591528Sjwadams 		mdb_printf("%<ul>%-?s %6s %6s %8s %8s %10s %10s %6s%</ul>\n",
39601528Sjwadams 		    "CACHE", "BUFSZ", "MAXMAL",
39611528Sjwadams 		    "BUFMALLC", "AVG_MAL", "MALLOCED", "OVERHEAD", "%OVER");
39621528Sjwadams 
39631528Sjwadams 	if (skip)
39641528Sjwadams 		return (DCMD_OK);
39651528Sjwadams 
39661528Sjwadams 	maxmalloc = c.cache_bufsize - sizeof (struct malloc_data);
39671528Sjwadams #ifdef _LP64
39681528Sjwadams 	if (c.cache_bufsize > UMEM_SECOND_ALIGN)
39691528Sjwadams 		maxmalloc -= sizeof (struct malloc_data);
39701528Sjwadams #endif
39711528Sjwadams 
39721528Sjwadams 	bzero(&mi, sizeof (mi));
39731528Sjwadams 	mi.um_cp = &c;
39741528Sjwadams 	if (verbose)
39751528Sjwadams 		mi.um_bucket =
39761528Sjwadams 		    mdb_zalloc((UMI_MAX_BUCKET + 1) * sizeof (*mi.um_bucket),
39771528Sjwadams 		    UM_SLEEP | UM_GC);
39781528Sjwadams 
39791528Sjwadams 	if (mdb_pwalk("umem", (mdb_walk_cb_t)um_umem_buffer_cb, &mi, addr) ==
39801528Sjwadams 	    -1) {
39811528Sjwadams 		mdb_warn("can't walk 'umem'");
39821528Sjwadams 		return (DCMD_ERR);
39831528Sjwadams 	}
39841528Sjwadams 
39851528Sjwadams 	overhead = mi.um_malloc_overhead;
39861528Sjwadams 	allocated = mi.um_malloc_size;
39871528Sjwadams 
39881528Sjwadams 	/* do integer round off for the average */
39891528Sjwadams 	if (mi.um_malloc != 0)
39901528Sjwadams 		avg_malloc = (allocated + (mi.um_malloc - 1)/2) / mi.um_malloc;
39911528Sjwadams 	else
39921528Sjwadams 		avg_malloc = 0;
39931528Sjwadams 
39941528Sjwadams 	/*
39951528Sjwadams 	 * include per-slab overhead
39961528Sjwadams 	 *
39971528Sjwadams 	 * Each slab in a given cache is the same size, and has the same
39981528Sjwadams 	 * number of chunks in it;  we read in the first slab on the
39991528Sjwadams 	 * slab list to get the number of chunks for all slabs.  To
40001528Sjwadams 	 * compute the per-slab overhead, we just subtract the chunk usage
40011528Sjwadams 	 * from the slabsize:
40021528Sjwadams 	 *
40031528Sjwadams 	 * +------------+-------+-------+ ... --+-------+-------+-------+
40041528Sjwadams 	 * |////////////|	|	| ...	|	|///////|///////|
40051528Sjwadams 	 * |////color///| chunk	| chunk	| ...	| chunk	|/color/|/slab//|
40061528Sjwadams 	 * |////////////|	|	| ...	|	|///////|///////|
40071528Sjwadams 	 * +------------+-------+-------+ ... --+-------+-------+-------+
40081528Sjwadams 	 * |		\_______chunksize * chunks_____/		|
40091528Sjwadams 	 * \__________________________slabsize__________________________/
40101528Sjwadams 	 *
40111528Sjwadams 	 * For UMF_HASH caches, there is an additional source of overhead;
40121528Sjwadams 	 * the external umem_slab_t and per-chunk bufctl structures.  We
40131528Sjwadams 	 * include those in our per-slab overhead.
40141528Sjwadams 	 *
40151528Sjwadams 	 * Once we have a number for the per-slab overhead, we estimate
40161528Sjwadams 	 * the actual overhead by treating the malloc()ed buffers as if
40171528Sjwadams 	 * they were densely packed:
40181528Sjwadams 	 *
40191528Sjwadams 	 *	additional overhead = (# mallocs) * (per-slab) / (chunks);
40201528Sjwadams 	 *
40211528Sjwadams 	 * carefully ordering the multiply before the divide, to avoid
40221528Sjwadams 	 * round-off error.
40231528Sjwadams 	 */
40241528Sjwadams 	if (mi.um_malloc != 0) {
40251528Sjwadams 		umem_slab_t slab;
40261528Sjwadams 		uintptr_t saddr = (uintptr_t)c.cache_nullslab.slab_next;
40271528Sjwadams 
40281528Sjwadams 		if (mdb_vread(&slab, sizeof (slab), saddr) == -1) {
40291528Sjwadams 			mdb_warn("unable to read slab at %p\n", saddr);
40301528Sjwadams 		} else {
40311528Sjwadams 			long chunks = slab.slab_chunks;
40321528Sjwadams 			if (chunks != 0 && c.cache_chunksize != 0 &&
40331528Sjwadams 			    chunks <= c.cache_slabsize / c.cache_chunksize) {
40341528Sjwadams 				uintmax_t perslab =
40351528Sjwadams 				    c.cache_slabsize -
40361528Sjwadams 				    (c.cache_chunksize * chunks);
40371528Sjwadams 
40381528Sjwadams 				if (c.cache_flags & UMF_HASH) {
40391528Sjwadams 					perslab += sizeof (umem_slab_t) +
40401528Sjwadams 					    chunks *
40411528Sjwadams 					    ((c.cache_flags & UMF_AUDIT) ?
40421528Sjwadams 					    sizeof (umem_bufctl_audit_t) :
40431528Sjwadams 					    sizeof (umem_bufctl_t));
40441528Sjwadams 				}
40451528Sjwadams 				overhead +=
40461528Sjwadams 				    (perslab * (uintmax_t)mi.um_malloc)/chunks;
40471528Sjwadams 			} else {
40481528Sjwadams 				mdb_warn("invalid #chunks (%d) in slab %p\n",
40491528Sjwadams 				    chunks, saddr);
40501528Sjwadams 			}
40511528Sjwadams 		}
40521528Sjwadams 	}
40531528Sjwadams 
40541528Sjwadams 	if (allocated != 0)
40551528Sjwadams 		overhead_pct = (1000ULL * overhead) / allocated;
40561528Sjwadams 	else
40571528Sjwadams 		overhead_pct = 0;
40581528Sjwadams 
40591528Sjwadams 	mdb_printf("%0?p %6ld %6ld %8ld %8ld %10ld %10ld %3ld.%01ld%%\n",
40601528Sjwadams 	    addr, c.cache_bufsize, maxmalloc,
40611528Sjwadams 	    mi.um_malloc, avg_malloc, allocated, overhead,
40621528Sjwadams 	    overhead_pct / 10, overhead_pct % 10);
40631528Sjwadams 
40641528Sjwadams 	if (!verbose)
40651528Sjwadams 		return (DCMD_OK);
40661528Sjwadams 
40671528Sjwadams 	if (!dump)
40681528Sjwadams 		mdb_printf("\n");
40691528Sjwadams 
40701528Sjwadams 	if (get_umem_alloc_sizes(&alloc_sizes, &num) == -1)
40711528Sjwadams 		return (DCMD_ERR);
40721528Sjwadams 
40731528Sjwadams 	for (idx = 0; idx < num; idx++) {
40741528Sjwadams 		if (alloc_sizes[idx] == c.cache_bufsize)
40751528Sjwadams 			break;
40761528Sjwadams 		if (alloc_sizes[idx] == 0) {
40771528Sjwadams 			idx = num;	/* 0-terminated array */
40781528Sjwadams 			break;
40791528Sjwadams 		}
40801528Sjwadams 	}
40811528Sjwadams 	if (idx == num) {
40821528Sjwadams 		mdb_warn(
40831528Sjwadams 		    "cache %p's size (%d) not in umem_alloc_sizes\n",
40841528Sjwadams 		    addr, c.cache_bufsize);
40851528Sjwadams 		return (DCMD_ERR);
40861528Sjwadams 	}
40871528Sjwadams 
40881528Sjwadams 	minmalloc = (idx == 0)? 0 : alloc_sizes[idx - 1];
40891528Sjwadams 	if (minmalloc > 0) {
40901528Sjwadams #ifdef _LP64
40911528Sjwadams 		if (minmalloc > UMEM_SECOND_ALIGN)
40921528Sjwadams 			minmalloc -= sizeof (struct malloc_data);
40931528Sjwadams #endif
40941528Sjwadams 		minmalloc -= sizeof (struct malloc_data);
40951528Sjwadams 		minmalloc += 1;
40961528Sjwadams 	}
40971528Sjwadams 
40981528Sjwadams 	if (dump) {
40991528Sjwadams 		for (idx = minmalloc; idx <= maxmalloc; idx++)
41001528Sjwadams 			mdb_printf("%d\t%d\n", idx, mi.um_bucket[idx]);
41011528Sjwadams 		mdb_printf("\n");
41021528Sjwadams 	} else {
41031528Sjwadams 		umem_malloc_print_dist(mi.um_bucket, minmalloc, maxmalloc,
41041528Sjwadams 		    maxbuckets, minbucketsize, geometric);
41051528Sjwadams 	}
41061528Sjwadams 
41071528Sjwadams 	return (DCMD_OK);
41081528Sjwadams }
4109