10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51528Sjwadams * Common Development and Distribution License (the "License"). 61528Sjwadams * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 22*10388SJonathan.Adams@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #include "umem.h" 270Sstevel@tonic-gate 280Sstevel@tonic-gate #include <sys/vmem_impl_user.h> 290Sstevel@tonic-gate #include <umem_impl.h> 300Sstevel@tonic-gate 310Sstevel@tonic-gate #include <alloca.h> 321528Sjwadams #include <limits.h> 330Sstevel@tonic-gate 340Sstevel@tonic-gate #include "misc.h" 351528Sjwadams #include "leaky.h" 364798Stomee #include "dist.h" 370Sstevel@tonic-gate 380Sstevel@tonic-gate #include "umem_pagesize.h" 390Sstevel@tonic-gate 400Sstevel@tonic-gate #define UM_ALLOCATED 0x1 410Sstevel@tonic-gate #define UM_FREE 0x2 420Sstevel@tonic-gate #define UM_BUFCTL 0x4 430Sstevel@tonic-gate #define UM_HASH 0x8 440Sstevel@tonic-gate 451528Sjwadams int umem_ready; 461528Sjwadams 471528Sjwadams static int umem_stack_depth_warned; 481528Sjwadams static uint32_t umem_max_ncpus; 490Sstevel@tonic-gate uint32_t umem_stack_depth; 501528Sjwadams 510Sstevel@tonic-gate size_t umem_pagesize; 520Sstevel@tonic-gate 530Sstevel@tonic-gate #define UMEM_READVAR(var) \ 540Sstevel@tonic-gate (umem_readvar(&(var), #var) == -1 && \ 551528Sjwadams (mdb_warn("failed to read "#var), 1)) 560Sstevel@tonic-gate 570Sstevel@tonic-gate int 581528Sjwadams umem_update_variables(void) 590Sstevel@tonic-gate { 600Sstevel@tonic-gate size_t pagesize; 610Sstevel@tonic-gate 620Sstevel@tonic-gate /* 631528Sjwadams * Figure out which type of umem is being used; if it's not there 641528Sjwadams * yet, succeed quietly. 650Sstevel@tonic-gate */ 661528Sjwadams if (umem_set_standalone() == -1) { 671528Sjwadams umem_ready = 0; 681528Sjwadams return (0); /* umem not there yet */ 691528Sjwadams } 701528Sjwadams 711528Sjwadams /* 721528Sjwadams * Solaris 9 used a different name for umem_max_ncpus. It's 731528Sjwadams * cheap backwards compatibility to check for both names. 741528Sjwadams */ 751528Sjwadams if (umem_readvar(&umem_max_ncpus, "umem_max_ncpus") == -1 && 761528Sjwadams umem_readvar(&umem_max_ncpus, "max_ncpus") == -1) { 771528Sjwadams mdb_warn("unable to read umem_max_ncpus or max_ncpus"); 781528Sjwadams return (-1); 791528Sjwadams } 801528Sjwadams if (UMEM_READVAR(umem_ready)) 810Sstevel@tonic-gate return (-1); 820Sstevel@tonic-gate if (UMEM_READVAR(umem_stack_depth)) 830Sstevel@tonic-gate return (-1); 840Sstevel@tonic-gate if (UMEM_READVAR(pagesize)) 850Sstevel@tonic-gate return (-1); 860Sstevel@tonic-gate 870Sstevel@tonic-gate if (umem_stack_depth > UMEM_MAX_STACK_DEPTH) { 881528Sjwadams if (umem_stack_depth_warned == 0) { 891528Sjwadams mdb_warn("umem_stack_depth corrupted (%d > %d)\n", 901528Sjwadams umem_stack_depth, UMEM_MAX_STACK_DEPTH); 911528Sjwadams umem_stack_depth_warned = 1; 921528Sjwadams } 930Sstevel@tonic-gate umem_stack_depth = 0; 940Sstevel@tonic-gate } 951528Sjwadams 961528Sjwadams umem_pagesize = pagesize; 971528Sjwadams 980Sstevel@tonic-gate return (0); 990Sstevel@tonic-gate } 1000Sstevel@tonic-gate 1010Sstevel@tonic-gate /*ARGSUSED*/ 1021528Sjwadams static int 1030Sstevel@tonic-gate umem_init_walkers(uintptr_t addr, const umem_cache_t *c, void *ignored) 1040Sstevel@tonic-gate { 1050Sstevel@tonic-gate mdb_walker_t w; 1060Sstevel@tonic-gate char descr[64]; 1070Sstevel@tonic-gate 1080Sstevel@tonic-gate (void) mdb_snprintf(descr, sizeof (descr), 1090Sstevel@tonic-gate "walk the %s cache", c->cache_name); 1100Sstevel@tonic-gate 1110Sstevel@tonic-gate w.walk_name = c->cache_name; 1120Sstevel@tonic-gate w.walk_descr = descr; 1130Sstevel@tonic-gate w.walk_init = umem_walk_init; 1140Sstevel@tonic-gate w.walk_step = umem_walk_step; 1150Sstevel@tonic-gate w.walk_fini = umem_walk_fini; 1160Sstevel@tonic-gate w.walk_init_arg = (void *)addr; 1170Sstevel@tonic-gate 1180Sstevel@tonic-gate if (mdb_add_walker(&w) == -1) 1190Sstevel@tonic-gate mdb_warn("failed to add %s walker", c->cache_name); 1200Sstevel@tonic-gate 1210Sstevel@tonic-gate return (WALK_NEXT); 1220Sstevel@tonic-gate } 1230Sstevel@tonic-gate 1241528Sjwadams /*ARGSUSED*/ 1251528Sjwadams static void 1261528Sjwadams umem_statechange_cb(void *arg) 1271528Sjwadams { 1281528Sjwadams static int been_ready = 0; 1291528Sjwadams 1301528Sjwadams #ifndef _KMDB 1311528Sjwadams leaky_cleanup(1); /* state changes invalidate leaky state */ 1321528Sjwadams #endif 1331528Sjwadams 1341528Sjwadams if (umem_update_variables() == -1) 1351528Sjwadams return; 1361528Sjwadams 1371528Sjwadams if (been_ready) 1381528Sjwadams return; 1391528Sjwadams 1401528Sjwadams if (umem_ready != UMEM_READY) 1411528Sjwadams return; 1421528Sjwadams 1431528Sjwadams been_ready = 1; 1441528Sjwadams (void) mdb_walk("umem_cache", (mdb_walk_cb_t)umem_init_walkers, NULL); 1451528Sjwadams } 1461528Sjwadams 1471528Sjwadams int 1481528Sjwadams umem_init(void) 1491528Sjwadams { 1501528Sjwadams mdb_walker_t w = { 1511528Sjwadams "umem_cache", "walk list of umem caches", umem_cache_walk_init, 1521528Sjwadams umem_cache_walk_step, umem_cache_walk_fini 1531528Sjwadams }; 1541528Sjwadams 1551528Sjwadams if (mdb_add_walker(&w) == -1) { 1561528Sjwadams mdb_warn("failed to add umem_cache walker"); 1571528Sjwadams return (-1); 1581528Sjwadams } 1591528Sjwadams 1601528Sjwadams if (umem_update_variables() == -1) 1611528Sjwadams return (-1); 1621528Sjwadams 1631528Sjwadams /* install a callback so that our variables are always up-to-date */ 1641528Sjwadams (void) mdb_callback_add(MDB_CALLBACK_STCHG, umem_statechange_cb, NULL); 1651528Sjwadams umem_statechange_cb(NULL); 1661528Sjwadams 1671528Sjwadams return (0); 1681528Sjwadams } 1691528Sjwadams 1700Sstevel@tonic-gate int 1710Sstevel@tonic-gate umem_abort_messages(void) 1720Sstevel@tonic-gate { 1730Sstevel@tonic-gate char *umem_error_buffer; 1740Sstevel@tonic-gate uint_t umem_error_begin; 1750Sstevel@tonic-gate GElf_Sym sym; 1760Sstevel@tonic-gate size_t bufsize; 1770Sstevel@tonic-gate 1780Sstevel@tonic-gate if (UMEM_READVAR(umem_error_begin)) 1790Sstevel@tonic-gate return (DCMD_ERR); 1800Sstevel@tonic-gate 1810Sstevel@tonic-gate if (umem_lookup_by_name("umem_error_buffer", &sym) == -1) { 1820Sstevel@tonic-gate mdb_warn("unable to look up umem_error_buffer"); 1830Sstevel@tonic-gate return (DCMD_ERR); 1840Sstevel@tonic-gate } 1850Sstevel@tonic-gate 1860Sstevel@tonic-gate bufsize = (size_t)sym.st_size; 1870Sstevel@tonic-gate 1880Sstevel@tonic-gate umem_error_buffer = mdb_alloc(bufsize+1, UM_SLEEP | UM_GC); 1890Sstevel@tonic-gate 1900Sstevel@tonic-gate if (mdb_vread(umem_error_buffer, bufsize, (uintptr_t)sym.st_value) 1910Sstevel@tonic-gate != bufsize) { 1920Sstevel@tonic-gate mdb_warn("unable to read umem_error_buffer"); 1930Sstevel@tonic-gate return (DCMD_ERR); 1940Sstevel@tonic-gate } 1950Sstevel@tonic-gate /* put a zero after the end of the buffer to simplify printing */ 1960Sstevel@tonic-gate umem_error_buffer[bufsize] = 0; 1970Sstevel@tonic-gate 1980Sstevel@tonic-gate if ((umem_error_begin % bufsize) == 0) 1990Sstevel@tonic-gate mdb_printf("%s\n", umem_error_buffer); 2000Sstevel@tonic-gate else { 2010Sstevel@tonic-gate umem_error_buffer[(umem_error_begin % bufsize) - 1] = 0; 2020Sstevel@tonic-gate mdb_printf("%s%s\n", 2030Sstevel@tonic-gate &umem_error_buffer[umem_error_begin % bufsize], 2040Sstevel@tonic-gate umem_error_buffer); 2050Sstevel@tonic-gate } 2060Sstevel@tonic-gate 2070Sstevel@tonic-gate return (DCMD_OK); 2080Sstevel@tonic-gate } 2090Sstevel@tonic-gate 2100Sstevel@tonic-gate static void 2110Sstevel@tonic-gate umem_log_status(const char *name, umem_log_header_t *val) 2120Sstevel@tonic-gate { 2130Sstevel@tonic-gate umem_log_header_t my_lh; 2140Sstevel@tonic-gate uintptr_t pos = (uintptr_t)val; 2150Sstevel@tonic-gate size_t size; 2160Sstevel@tonic-gate 2170Sstevel@tonic-gate if (pos == NULL) 2180Sstevel@tonic-gate return; 2190Sstevel@tonic-gate 2200Sstevel@tonic-gate if (mdb_vread(&my_lh, sizeof (umem_log_header_t), pos) == -1) { 2210Sstevel@tonic-gate mdb_warn("\nunable to read umem_%s_log pointer %p", 2220Sstevel@tonic-gate name, pos); 2230Sstevel@tonic-gate return; 2240Sstevel@tonic-gate } 2250Sstevel@tonic-gate 2260Sstevel@tonic-gate size = my_lh.lh_chunksize * my_lh.lh_nchunks; 2270Sstevel@tonic-gate 2280Sstevel@tonic-gate if (size % (1024 * 1024) == 0) 2290Sstevel@tonic-gate mdb_printf("%s=%dm ", name, size / (1024 * 1024)); 2300Sstevel@tonic-gate else if (size % 1024 == 0) 2310Sstevel@tonic-gate mdb_printf("%s=%dk ", name, size / 1024); 2320Sstevel@tonic-gate else 2330Sstevel@tonic-gate mdb_printf("%s=%d ", name, size); 2340Sstevel@tonic-gate } 2350Sstevel@tonic-gate 2360Sstevel@tonic-gate typedef struct umem_debug_flags { 2370Sstevel@tonic-gate const char *udf_name; 2380Sstevel@tonic-gate uint_t udf_flags; 2390Sstevel@tonic-gate uint_t udf_clear; /* if 0, uses udf_flags */ 2400Sstevel@tonic-gate } umem_debug_flags_t; 2410Sstevel@tonic-gate 2420Sstevel@tonic-gate umem_debug_flags_t umem_status_flags[] = { 2430Sstevel@tonic-gate { "random", UMF_RANDOMIZE, UMF_RANDOM }, 2440Sstevel@tonic-gate { "default", UMF_AUDIT | UMF_DEADBEEF | UMF_REDZONE | UMF_CONTENTS }, 2450Sstevel@tonic-gate { "audit", UMF_AUDIT }, 2460Sstevel@tonic-gate { "guards", UMF_DEADBEEF | UMF_REDZONE }, 2470Sstevel@tonic-gate { "nosignal", UMF_CHECKSIGNAL }, 2480Sstevel@tonic-gate { "firewall", UMF_FIREWALL }, 2490Sstevel@tonic-gate { "lite", UMF_LITE }, 2500Sstevel@tonic-gate { NULL } 2510Sstevel@tonic-gate }; 2520Sstevel@tonic-gate 2530Sstevel@tonic-gate /*ARGSUSED*/ 2540Sstevel@tonic-gate int 2550Sstevel@tonic-gate umem_status(uintptr_t addr, uint_t flags, int ac, const mdb_arg_t *argv) 2560Sstevel@tonic-gate { 2570Sstevel@tonic-gate int umem_logging; 2580Sstevel@tonic-gate 2590Sstevel@tonic-gate umem_log_header_t *umem_transaction_log; 2600Sstevel@tonic-gate umem_log_header_t *umem_content_log; 2610Sstevel@tonic-gate umem_log_header_t *umem_failure_log; 2620Sstevel@tonic-gate umem_log_header_t *umem_slab_log; 2630Sstevel@tonic-gate 2640Sstevel@tonic-gate mdb_printf("Status:\t\t%s\n", 2650Sstevel@tonic-gate umem_ready == UMEM_READY_INIT_FAILED ? "initialization failed" : 2660Sstevel@tonic-gate umem_ready == UMEM_READY_STARTUP ? "uninitialized" : 2670Sstevel@tonic-gate umem_ready == UMEM_READY_INITING ? "initialization in process" : 2680Sstevel@tonic-gate umem_ready == UMEM_READY ? "ready and active" : 2691528Sjwadams umem_ready == 0 ? "not loaded into address space" : 2700Sstevel@tonic-gate "unknown (umem_ready invalid)"); 2710Sstevel@tonic-gate 2721528Sjwadams if (umem_ready == 0) 2731528Sjwadams return (DCMD_OK); 2741528Sjwadams 2750Sstevel@tonic-gate mdb_printf("Concurrency:\t%d\n", umem_max_ncpus); 2760Sstevel@tonic-gate 2770Sstevel@tonic-gate if (UMEM_READVAR(umem_logging)) 2780Sstevel@tonic-gate goto err; 2790Sstevel@tonic-gate if (UMEM_READVAR(umem_transaction_log)) 2800Sstevel@tonic-gate goto err; 2810Sstevel@tonic-gate if (UMEM_READVAR(umem_content_log)) 2820Sstevel@tonic-gate goto err; 2830Sstevel@tonic-gate if (UMEM_READVAR(umem_failure_log)) 2840Sstevel@tonic-gate goto err; 2850Sstevel@tonic-gate if (UMEM_READVAR(umem_slab_log)) 2860Sstevel@tonic-gate goto err; 2870Sstevel@tonic-gate 2880Sstevel@tonic-gate mdb_printf("Logs:\t\t"); 2890Sstevel@tonic-gate umem_log_status("transaction", umem_transaction_log); 2900Sstevel@tonic-gate umem_log_status("content", umem_content_log); 2910Sstevel@tonic-gate umem_log_status("fail", umem_failure_log); 2920Sstevel@tonic-gate umem_log_status("slab", umem_slab_log); 2930Sstevel@tonic-gate if (!umem_logging) 2940Sstevel@tonic-gate mdb_printf("(inactive)"); 2950Sstevel@tonic-gate mdb_printf("\n"); 2960Sstevel@tonic-gate 2970Sstevel@tonic-gate mdb_printf("Message buffer:\n"); 2980Sstevel@tonic-gate return (umem_abort_messages()); 2990Sstevel@tonic-gate 3000Sstevel@tonic-gate err: 3010Sstevel@tonic-gate mdb_printf("Message buffer:\n"); 3020Sstevel@tonic-gate (void) umem_abort_messages(); 3030Sstevel@tonic-gate return (DCMD_ERR); 3040Sstevel@tonic-gate } 3050Sstevel@tonic-gate 3060Sstevel@tonic-gate typedef struct { 3070Sstevel@tonic-gate uintptr_t ucw_first; 3080Sstevel@tonic-gate uintptr_t ucw_current; 3090Sstevel@tonic-gate } umem_cache_walk_t; 3100Sstevel@tonic-gate 3110Sstevel@tonic-gate int 3120Sstevel@tonic-gate umem_cache_walk_init(mdb_walk_state_t *wsp) 3130Sstevel@tonic-gate { 3140Sstevel@tonic-gate umem_cache_walk_t *ucw; 3150Sstevel@tonic-gate umem_cache_t c; 3160Sstevel@tonic-gate uintptr_t cp; 3170Sstevel@tonic-gate GElf_Sym sym; 3180Sstevel@tonic-gate 3190Sstevel@tonic-gate if (umem_lookup_by_name("umem_null_cache", &sym) == -1) { 3200Sstevel@tonic-gate mdb_warn("couldn't find umem_null_cache"); 3210Sstevel@tonic-gate return (WALK_ERR); 3220Sstevel@tonic-gate } 3230Sstevel@tonic-gate 3240Sstevel@tonic-gate cp = (uintptr_t)sym.st_value; 3250Sstevel@tonic-gate 3260Sstevel@tonic-gate if (mdb_vread(&c, sizeof (umem_cache_t), cp) == -1) { 3270Sstevel@tonic-gate mdb_warn("couldn't read cache at %p", cp); 3280Sstevel@tonic-gate return (WALK_ERR); 3290Sstevel@tonic-gate } 3300Sstevel@tonic-gate 3310Sstevel@tonic-gate ucw = mdb_alloc(sizeof (umem_cache_walk_t), UM_SLEEP); 3320Sstevel@tonic-gate 3330Sstevel@tonic-gate ucw->ucw_first = cp; 3340Sstevel@tonic-gate ucw->ucw_current = (uintptr_t)c.cache_next; 3350Sstevel@tonic-gate wsp->walk_data = ucw; 3360Sstevel@tonic-gate 3370Sstevel@tonic-gate return (WALK_NEXT); 3380Sstevel@tonic-gate } 3390Sstevel@tonic-gate 3400Sstevel@tonic-gate int 3410Sstevel@tonic-gate umem_cache_walk_step(mdb_walk_state_t *wsp) 3420Sstevel@tonic-gate { 3430Sstevel@tonic-gate umem_cache_walk_t *ucw = wsp->walk_data; 3440Sstevel@tonic-gate umem_cache_t c; 3450Sstevel@tonic-gate int status; 3460Sstevel@tonic-gate 3470Sstevel@tonic-gate if (mdb_vread(&c, sizeof (umem_cache_t), ucw->ucw_current) == -1) { 3480Sstevel@tonic-gate mdb_warn("couldn't read cache at %p", ucw->ucw_current); 3490Sstevel@tonic-gate return (WALK_DONE); 3500Sstevel@tonic-gate } 3510Sstevel@tonic-gate 3520Sstevel@tonic-gate status = wsp->walk_callback(ucw->ucw_current, &c, wsp->walk_cbdata); 3530Sstevel@tonic-gate 3540Sstevel@tonic-gate if ((ucw->ucw_current = (uintptr_t)c.cache_next) == ucw->ucw_first) 3550Sstevel@tonic-gate return (WALK_DONE); 3560Sstevel@tonic-gate 3570Sstevel@tonic-gate return (status); 3580Sstevel@tonic-gate } 3590Sstevel@tonic-gate 3600Sstevel@tonic-gate void 3610Sstevel@tonic-gate umem_cache_walk_fini(mdb_walk_state_t *wsp) 3620Sstevel@tonic-gate { 3630Sstevel@tonic-gate umem_cache_walk_t *ucw = wsp->walk_data; 3640Sstevel@tonic-gate mdb_free(ucw, sizeof (umem_cache_walk_t)); 3650Sstevel@tonic-gate } 3660Sstevel@tonic-gate 3670Sstevel@tonic-gate typedef struct { 3680Sstevel@tonic-gate umem_cpu_t *ucw_cpus; 3690Sstevel@tonic-gate uint32_t ucw_current; 3700Sstevel@tonic-gate uint32_t ucw_max; 3710Sstevel@tonic-gate } umem_cpu_walk_state_t; 3720Sstevel@tonic-gate 3730Sstevel@tonic-gate int 3740Sstevel@tonic-gate umem_cpu_walk_init(mdb_walk_state_t *wsp) 3750Sstevel@tonic-gate { 3760Sstevel@tonic-gate umem_cpu_t *umem_cpus; 3770Sstevel@tonic-gate 3780Sstevel@tonic-gate umem_cpu_walk_state_t *ucw; 3790Sstevel@tonic-gate 3800Sstevel@tonic-gate if (umem_readvar(&umem_cpus, "umem_cpus") == -1) { 3810Sstevel@tonic-gate mdb_warn("failed to read 'umem_cpus'"); 3820Sstevel@tonic-gate return (WALK_ERR); 3830Sstevel@tonic-gate } 3840Sstevel@tonic-gate 3850Sstevel@tonic-gate ucw = mdb_alloc(sizeof (*ucw), UM_SLEEP); 3860Sstevel@tonic-gate 3870Sstevel@tonic-gate ucw->ucw_cpus = umem_cpus; 3880Sstevel@tonic-gate ucw->ucw_current = 0; 3890Sstevel@tonic-gate ucw->ucw_max = umem_max_ncpus; 3900Sstevel@tonic-gate 3910Sstevel@tonic-gate wsp->walk_data = ucw; 3920Sstevel@tonic-gate return (WALK_NEXT); 3930Sstevel@tonic-gate } 3940Sstevel@tonic-gate 3950Sstevel@tonic-gate int 3960Sstevel@tonic-gate umem_cpu_walk_step(mdb_walk_state_t *wsp) 3970Sstevel@tonic-gate { 3980Sstevel@tonic-gate umem_cpu_t cpu; 3990Sstevel@tonic-gate umem_cpu_walk_state_t *ucw = wsp->walk_data; 4000Sstevel@tonic-gate 4010Sstevel@tonic-gate uintptr_t caddr; 4020Sstevel@tonic-gate 4030Sstevel@tonic-gate if (ucw->ucw_current >= ucw->ucw_max) 4040Sstevel@tonic-gate return (WALK_DONE); 4050Sstevel@tonic-gate 4060Sstevel@tonic-gate caddr = (uintptr_t)&(ucw->ucw_cpus[ucw->ucw_current]); 4070Sstevel@tonic-gate 4080Sstevel@tonic-gate if (mdb_vread(&cpu, sizeof (umem_cpu_t), caddr) == -1) { 4090Sstevel@tonic-gate mdb_warn("failed to read cpu %d", ucw->ucw_current); 4100Sstevel@tonic-gate return (WALK_ERR); 4110Sstevel@tonic-gate } 4120Sstevel@tonic-gate 4130Sstevel@tonic-gate ucw->ucw_current++; 4140Sstevel@tonic-gate 4150Sstevel@tonic-gate return (wsp->walk_callback(caddr, &cpu, wsp->walk_cbdata)); 4160Sstevel@tonic-gate } 4170Sstevel@tonic-gate 4180Sstevel@tonic-gate void 4190Sstevel@tonic-gate umem_cpu_walk_fini(mdb_walk_state_t *wsp) 4200Sstevel@tonic-gate { 4210Sstevel@tonic-gate umem_cpu_walk_state_t *ucw = wsp->walk_data; 4220Sstevel@tonic-gate 4230Sstevel@tonic-gate mdb_free(ucw, sizeof (*ucw)); 4240Sstevel@tonic-gate } 4250Sstevel@tonic-gate 4260Sstevel@tonic-gate int 4270Sstevel@tonic-gate umem_cpu_cache_walk_init(mdb_walk_state_t *wsp) 4280Sstevel@tonic-gate { 4290Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 4300Sstevel@tonic-gate mdb_warn("umem_cpu_cache doesn't support global walks"); 4310Sstevel@tonic-gate return (WALK_ERR); 4320Sstevel@tonic-gate } 4330Sstevel@tonic-gate 4340Sstevel@tonic-gate if (mdb_layered_walk("umem_cpu", wsp) == -1) { 4350Sstevel@tonic-gate mdb_warn("couldn't walk 'umem_cpu'"); 4360Sstevel@tonic-gate return (WALK_ERR); 4370Sstevel@tonic-gate } 4380Sstevel@tonic-gate 4390Sstevel@tonic-gate wsp->walk_data = (void *)wsp->walk_addr; 4400Sstevel@tonic-gate 4410Sstevel@tonic-gate return (WALK_NEXT); 4420Sstevel@tonic-gate } 4430Sstevel@tonic-gate 4440Sstevel@tonic-gate int 4450Sstevel@tonic-gate umem_cpu_cache_walk_step(mdb_walk_state_t *wsp) 4460Sstevel@tonic-gate { 4470Sstevel@tonic-gate uintptr_t caddr = (uintptr_t)wsp->walk_data; 4480Sstevel@tonic-gate const umem_cpu_t *cpu = wsp->walk_layer; 4490Sstevel@tonic-gate umem_cpu_cache_t cc; 4500Sstevel@tonic-gate 4510Sstevel@tonic-gate caddr += cpu->cpu_cache_offset; 4520Sstevel@tonic-gate 4530Sstevel@tonic-gate if (mdb_vread(&cc, sizeof (umem_cpu_cache_t), caddr) == -1) { 4540Sstevel@tonic-gate mdb_warn("couldn't read umem_cpu_cache at %p", caddr); 4550Sstevel@tonic-gate return (WALK_ERR); 4560Sstevel@tonic-gate } 4570Sstevel@tonic-gate 4580Sstevel@tonic-gate return (wsp->walk_callback(caddr, &cc, wsp->walk_cbdata)); 4590Sstevel@tonic-gate } 4600Sstevel@tonic-gate 4610Sstevel@tonic-gate int 4620Sstevel@tonic-gate umem_slab_walk_init(mdb_walk_state_t *wsp) 4630Sstevel@tonic-gate { 4640Sstevel@tonic-gate uintptr_t caddr = wsp->walk_addr; 4650Sstevel@tonic-gate umem_cache_t c; 4660Sstevel@tonic-gate 4670Sstevel@tonic-gate if (caddr == NULL) { 4680Sstevel@tonic-gate mdb_warn("umem_slab doesn't support global walks\n"); 4690Sstevel@tonic-gate return (WALK_ERR); 4700Sstevel@tonic-gate } 4710Sstevel@tonic-gate 4720Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), caddr) == -1) { 4730Sstevel@tonic-gate mdb_warn("couldn't read umem_cache at %p", caddr); 4740Sstevel@tonic-gate return (WALK_ERR); 4750Sstevel@tonic-gate } 4760Sstevel@tonic-gate 4770Sstevel@tonic-gate wsp->walk_data = 4780Sstevel@tonic-gate (void *)(caddr + offsetof(umem_cache_t, cache_nullslab)); 4790Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)c.cache_nullslab.slab_next; 4800Sstevel@tonic-gate 4810Sstevel@tonic-gate return (WALK_NEXT); 4820Sstevel@tonic-gate } 4830Sstevel@tonic-gate 4840Sstevel@tonic-gate int 4850Sstevel@tonic-gate umem_slab_walk_partial_init(mdb_walk_state_t *wsp) 4860Sstevel@tonic-gate { 4870Sstevel@tonic-gate uintptr_t caddr = wsp->walk_addr; 4880Sstevel@tonic-gate umem_cache_t c; 4890Sstevel@tonic-gate 4900Sstevel@tonic-gate if (caddr == NULL) { 4910Sstevel@tonic-gate mdb_warn("umem_slab_partial doesn't support global walks\n"); 4920Sstevel@tonic-gate return (WALK_ERR); 4930Sstevel@tonic-gate } 4940Sstevel@tonic-gate 4950Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), caddr) == -1) { 4960Sstevel@tonic-gate mdb_warn("couldn't read umem_cache at %p", caddr); 4970Sstevel@tonic-gate return (WALK_ERR); 4980Sstevel@tonic-gate } 4990Sstevel@tonic-gate 5000Sstevel@tonic-gate wsp->walk_data = 5010Sstevel@tonic-gate (void *)(caddr + offsetof(umem_cache_t, cache_nullslab)); 5020Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)c.cache_freelist; 5030Sstevel@tonic-gate 5040Sstevel@tonic-gate /* 5050Sstevel@tonic-gate * Some consumers (umem_walk_step(), in particular) require at 5060Sstevel@tonic-gate * least one callback if there are any buffers in the cache. So 5070Sstevel@tonic-gate * if there are *no* partial slabs, report the last full slab, if 5080Sstevel@tonic-gate * any. 5090Sstevel@tonic-gate * 5100Sstevel@tonic-gate * Yes, this is ugly, but it's cleaner than the other possibilities. 5110Sstevel@tonic-gate */ 5120Sstevel@tonic-gate if ((uintptr_t)wsp->walk_data == wsp->walk_addr) 5130Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)c.cache_nullslab.slab_prev; 5140Sstevel@tonic-gate 5150Sstevel@tonic-gate return (WALK_NEXT); 5160Sstevel@tonic-gate } 5170Sstevel@tonic-gate 5180Sstevel@tonic-gate int 5190Sstevel@tonic-gate umem_slab_walk_step(mdb_walk_state_t *wsp) 5200Sstevel@tonic-gate { 5210Sstevel@tonic-gate umem_slab_t s; 5220Sstevel@tonic-gate uintptr_t addr = wsp->walk_addr; 5230Sstevel@tonic-gate uintptr_t saddr = (uintptr_t)wsp->walk_data; 5240Sstevel@tonic-gate uintptr_t caddr = saddr - offsetof(umem_cache_t, cache_nullslab); 5250Sstevel@tonic-gate 5260Sstevel@tonic-gate if (addr == saddr) 5270Sstevel@tonic-gate return (WALK_DONE); 5280Sstevel@tonic-gate 5290Sstevel@tonic-gate if (mdb_vread(&s, sizeof (s), addr) == -1) { 5300Sstevel@tonic-gate mdb_warn("failed to read slab at %p", wsp->walk_addr); 5310Sstevel@tonic-gate return (WALK_ERR); 5320Sstevel@tonic-gate } 5330Sstevel@tonic-gate 5340Sstevel@tonic-gate if ((uintptr_t)s.slab_cache != caddr) { 5350Sstevel@tonic-gate mdb_warn("slab %p isn't in cache %p (in cache %p)\n", 5360Sstevel@tonic-gate addr, caddr, s.slab_cache); 5370Sstevel@tonic-gate return (WALK_ERR); 5380Sstevel@tonic-gate } 5390Sstevel@tonic-gate 5400Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)s.slab_next; 5410Sstevel@tonic-gate 5420Sstevel@tonic-gate return (wsp->walk_callback(addr, &s, wsp->walk_cbdata)); 5430Sstevel@tonic-gate } 5440Sstevel@tonic-gate 5450Sstevel@tonic-gate int 5460Sstevel@tonic-gate umem_cache(uintptr_t addr, uint_t flags, int ac, const mdb_arg_t *argv) 5470Sstevel@tonic-gate { 5480Sstevel@tonic-gate umem_cache_t c; 5490Sstevel@tonic-gate 5500Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) { 5510Sstevel@tonic-gate if (mdb_walk_dcmd("umem_cache", "umem_cache", ac, argv) == -1) { 5520Sstevel@tonic-gate mdb_warn("can't walk umem_cache"); 5530Sstevel@tonic-gate return (DCMD_ERR); 5540Sstevel@tonic-gate } 5550Sstevel@tonic-gate return (DCMD_OK); 5560Sstevel@tonic-gate } 5570Sstevel@tonic-gate 5580Sstevel@tonic-gate if (DCMD_HDRSPEC(flags)) 5590Sstevel@tonic-gate mdb_printf("%-?s %-25s %4s %8s %8s %8s\n", "ADDR", "NAME", 5600Sstevel@tonic-gate "FLAG", "CFLAG", "BUFSIZE", "BUFTOTL"); 5610Sstevel@tonic-gate 5620Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), addr) == -1) { 5630Sstevel@tonic-gate mdb_warn("couldn't read umem_cache at %p", addr); 5640Sstevel@tonic-gate return (DCMD_ERR); 5650Sstevel@tonic-gate } 5660Sstevel@tonic-gate 5670Sstevel@tonic-gate mdb_printf("%0?p %-25s %04x %08x %8ld %8lld\n", addr, c.cache_name, 5680Sstevel@tonic-gate c.cache_flags, c.cache_cflags, c.cache_bufsize, c.cache_buftotal); 5690Sstevel@tonic-gate 5700Sstevel@tonic-gate return (DCMD_OK); 5710Sstevel@tonic-gate } 5720Sstevel@tonic-gate 5730Sstevel@tonic-gate static int 5740Sstevel@tonic-gate addrcmp(const void *lhs, const void *rhs) 5750Sstevel@tonic-gate { 5760Sstevel@tonic-gate uintptr_t p1 = *((uintptr_t *)lhs); 5770Sstevel@tonic-gate uintptr_t p2 = *((uintptr_t *)rhs); 5780Sstevel@tonic-gate 5790Sstevel@tonic-gate if (p1 < p2) 5800Sstevel@tonic-gate return (-1); 5810Sstevel@tonic-gate if (p1 > p2) 5820Sstevel@tonic-gate return (1); 5830Sstevel@tonic-gate return (0); 5840Sstevel@tonic-gate } 5850Sstevel@tonic-gate 5860Sstevel@tonic-gate static int 5870Sstevel@tonic-gate bufctlcmp(const umem_bufctl_audit_t **lhs, const umem_bufctl_audit_t **rhs) 5880Sstevel@tonic-gate { 5890Sstevel@tonic-gate const umem_bufctl_audit_t *bcp1 = *lhs; 5900Sstevel@tonic-gate const umem_bufctl_audit_t *bcp2 = *rhs; 5910Sstevel@tonic-gate 5920Sstevel@tonic-gate if (bcp1->bc_timestamp > bcp2->bc_timestamp) 5930Sstevel@tonic-gate return (-1); 5940Sstevel@tonic-gate 5950Sstevel@tonic-gate if (bcp1->bc_timestamp < bcp2->bc_timestamp) 5960Sstevel@tonic-gate return (1); 5970Sstevel@tonic-gate 5980Sstevel@tonic-gate return (0); 5990Sstevel@tonic-gate } 6000Sstevel@tonic-gate 6010Sstevel@tonic-gate typedef struct umem_hash_walk { 6020Sstevel@tonic-gate uintptr_t *umhw_table; 6030Sstevel@tonic-gate size_t umhw_nelems; 6040Sstevel@tonic-gate size_t umhw_pos; 6050Sstevel@tonic-gate umem_bufctl_t umhw_cur; 6060Sstevel@tonic-gate } umem_hash_walk_t; 6070Sstevel@tonic-gate 6080Sstevel@tonic-gate int 6090Sstevel@tonic-gate umem_hash_walk_init(mdb_walk_state_t *wsp) 6100Sstevel@tonic-gate { 6110Sstevel@tonic-gate umem_hash_walk_t *umhw; 6120Sstevel@tonic-gate uintptr_t *hash; 6130Sstevel@tonic-gate umem_cache_t c; 6140Sstevel@tonic-gate uintptr_t haddr, addr = wsp->walk_addr; 6150Sstevel@tonic-gate size_t nelems; 6160Sstevel@tonic-gate size_t hsize; 6170Sstevel@tonic-gate 6180Sstevel@tonic-gate if (addr == NULL) { 6190Sstevel@tonic-gate mdb_warn("umem_hash doesn't support global walks\n"); 6200Sstevel@tonic-gate return (WALK_ERR); 6210Sstevel@tonic-gate } 6220Sstevel@tonic-gate 6230Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), addr) == -1) { 6240Sstevel@tonic-gate mdb_warn("couldn't read cache at addr %p", addr); 6250Sstevel@tonic-gate return (WALK_ERR); 6260Sstevel@tonic-gate } 6270Sstevel@tonic-gate 6280Sstevel@tonic-gate if (!(c.cache_flags & UMF_HASH)) { 6290Sstevel@tonic-gate mdb_warn("cache %p doesn't have a hash table\n", addr); 6300Sstevel@tonic-gate return (WALK_DONE); /* nothing to do */ 6310Sstevel@tonic-gate } 6320Sstevel@tonic-gate 6330Sstevel@tonic-gate umhw = mdb_zalloc(sizeof (umem_hash_walk_t), UM_SLEEP); 6340Sstevel@tonic-gate umhw->umhw_cur.bc_next = NULL; 6350Sstevel@tonic-gate umhw->umhw_pos = 0; 6360Sstevel@tonic-gate 6370Sstevel@tonic-gate umhw->umhw_nelems = nelems = c.cache_hash_mask + 1; 6380Sstevel@tonic-gate hsize = nelems * sizeof (uintptr_t); 6390Sstevel@tonic-gate haddr = (uintptr_t)c.cache_hash_table; 6400Sstevel@tonic-gate 6410Sstevel@tonic-gate umhw->umhw_table = hash = mdb_alloc(hsize, UM_SLEEP); 6420Sstevel@tonic-gate if (mdb_vread(hash, hsize, haddr) == -1) { 6430Sstevel@tonic-gate mdb_warn("failed to read hash table at %p", haddr); 6440Sstevel@tonic-gate mdb_free(hash, hsize); 6450Sstevel@tonic-gate mdb_free(umhw, sizeof (umem_hash_walk_t)); 6460Sstevel@tonic-gate return (WALK_ERR); 6470Sstevel@tonic-gate } 6480Sstevel@tonic-gate 6490Sstevel@tonic-gate wsp->walk_data = umhw; 6500Sstevel@tonic-gate 6510Sstevel@tonic-gate return (WALK_NEXT); 6520Sstevel@tonic-gate } 6530Sstevel@tonic-gate 6540Sstevel@tonic-gate int 6550Sstevel@tonic-gate umem_hash_walk_step(mdb_walk_state_t *wsp) 6560Sstevel@tonic-gate { 6570Sstevel@tonic-gate umem_hash_walk_t *umhw = wsp->walk_data; 6580Sstevel@tonic-gate uintptr_t addr = NULL; 6590Sstevel@tonic-gate 6600Sstevel@tonic-gate if ((addr = (uintptr_t)umhw->umhw_cur.bc_next) == NULL) { 6610Sstevel@tonic-gate while (umhw->umhw_pos < umhw->umhw_nelems) { 6620Sstevel@tonic-gate if ((addr = umhw->umhw_table[umhw->umhw_pos++]) != NULL) 6630Sstevel@tonic-gate break; 6640Sstevel@tonic-gate } 6650Sstevel@tonic-gate } 6660Sstevel@tonic-gate if (addr == NULL) 6670Sstevel@tonic-gate return (WALK_DONE); 6680Sstevel@tonic-gate 6690Sstevel@tonic-gate if (mdb_vread(&umhw->umhw_cur, sizeof (umem_bufctl_t), addr) == -1) { 6700Sstevel@tonic-gate mdb_warn("couldn't read umem_bufctl_t at addr %p", addr); 6710Sstevel@tonic-gate return (WALK_ERR); 6720Sstevel@tonic-gate } 6730Sstevel@tonic-gate 6740Sstevel@tonic-gate return (wsp->walk_callback(addr, &umhw->umhw_cur, wsp->walk_cbdata)); 6750Sstevel@tonic-gate } 6760Sstevel@tonic-gate 6770Sstevel@tonic-gate void 6780Sstevel@tonic-gate umem_hash_walk_fini(mdb_walk_state_t *wsp) 6790Sstevel@tonic-gate { 6800Sstevel@tonic-gate umem_hash_walk_t *umhw = wsp->walk_data; 6810Sstevel@tonic-gate 6820Sstevel@tonic-gate if (umhw == NULL) 6830Sstevel@tonic-gate return; 6840Sstevel@tonic-gate 6850Sstevel@tonic-gate mdb_free(umhw->umhw_table, umhw->umhw_nelems * sizeof (uintptr_t)); 6860Sstevel@tonic-gate mdb_free(umhw, sizeof (umem_hash_walk_t)); 6870Sstevel@tonic-gate } 6880Sstevel@tonic-gate 6890Sstevel@tonic-gate /* 6900Sstevel@tonic-gate * Find the address of the bufctl structure for the address 'buf' in cache 6910Sstevel@tonic-gate * 'cp', which is at address caddr, and place it in *out. 6920Sstevel@tonic-gate */ 6930Sstevel@tonic-gate static int 6940Sstevel@tonic-gate umem_hash_lookup(umem_cache_t *cp, uintptr_t caddr, void *buf, uintptr_t *out) 6950Sstevel@tonic-gate { 6960Sstevel@tonic-gate uintptr_t bucket = (uintptr_t)UMEM_HASH(cp, buf); 6970Sstevel@tonic-gate umem_bufctl_t *bcp; 6980Sstevel@tonic-gate umem_bufctl_t bc; 6990Sstevel@tonic-gate 7000Sstevel@tonic-gate if (mdb_vread(&bcp, sizeof (umem_bufctl_t *), bucket) == -1) { 7010Sstevel@tonic-gate mdb_warn("unable to read hash bucket for %p in cache %p", 7020Sstevel@tonic-gate buf, caddr); 7030Sstevel@tonic-gate return (-1); 7040Sstevel@tonic-gate } 7050Sstevel@tonic-gate 7060Sstevel@tonic-gate while (bcp != NULL) { 7070Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (umem_bufctl_t), 7080Sstevel@tonic-gate (uintptr_t)bcp) == -1) { 7090Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", bcp); 7100Sstevel@tonic-gate return (-1); 7110Sstevel@tonic-gate } 7120Sstevel@tonic-gate if (bc.bc_addr == buf) { 7130Sstevel@tonic-gate *out = (uintptr_t)bcp; 7140Sstevel@tonic-gate return (0); 7150Sstevel@tonic-gate } 7160Sstevel@tonic-gate bcp = bc.bc_next; 7170Sstevel@tonic-gate } 7180Sstevel@tonic-gate 7190Sstevel@tonic-gate mdb_warn("unable to find bufctl for %p in cache %p\n", buf, caddr); 7200Sstevel@tonic-gate return (-1); 7210Sstevel@tonic-gate } 7220Sstevel@tonic-gate 7230Sstevel@tonic-gate int 7240Sstevel@tonic-gate umem_get_magsize(const umem_cache_t *cp) 7250Sstevel@tonic-gate { 7260Sstevel@tonic-gate uintptr_t addr = (uintptr_t)cp->cache_magtype; 7270Sstevel@tonic-gate GElf_Sym mt_sym; 7280Sstevel@tonic-gate umem_magtype_t mt; 7290Sstevel@tonic-gate int res; 7300Sstevel@tonic-gate 7310Sstevel@tonic-gate /* 7320Sstevel@tonic-gate * if cpu 0 has a non-zero magsize, it must be correct. caches 7330Sstevel@tonic-gate * with UMF_NOMAGAZINE have disabled their magazine layers, so 7340Sstevel@tonic-gate * it is okay to return 0 for them. 7350Sstevel@tonic-gate */ 7360Sstevel@tonic-gate if ((res = cp->cache_cpu[0].cc_magsize) != 0 || 7370Sstevel@tonic-gate (cp->cache_flags & UMF_NOMAGAZINE)) 7380Sstevel@tonic-gate return (res); 7390Sstevel@tonic-gate 7401528Sjwadams if (umem_lookup_by_name("umem_magtype", &mt_sym) == -1) { 7410Sstevel@tonic-gate mdb_warn("unable to read 'umem_magtype'"); 7420Sstevel@tonic-gate } else if (addr < mt_sym.st_value || 7430Sstevel@tonic-gate addr + sizeof (mt) - 1 > mt_sym.st_value + mt_sym.st_size - 1 || 7440Sstevel@tonic-gate ((addr - mt_sym.st_value) % sizeof (mt)) != 0) { 7450Sstevel@tonic-gate mdb_warn("cache '%s' has invalid magtype pointer (%p)\n", 7460Sstevel@tonic-gate cp->cache_name, addr); 7470Sstevel@tonic-gate return (0); 7480Sstevel@tonic-gate } 7490Sstevel@tonic-gate if (mdb_vread(&mt, sizeof (mt), addr) == -1) { 7500Sstevel@tonic-gate mdb_warn("unable to read magtype at %a", addr); 7510Sstevel@tonic-gate return (0); 7520Sstevel@tonic-gate } 7530Sstevel@tonic-gate return (mt.mt_magsize); 7540Sstevel@tonic-gate } 7550Sstevel@tonic-gate 7560Sstevel@tonic-gate /*ARGSUSED*/ 7570Sstevel@tonic-gate static int 7580Sstevel@tonic-gate umem_estimate_slab(uintptr_t addr, const umem_slab_t *sp, size_t *est) 7590Sstevel@tonic-gate { 7600Sstevel@tonic-gate *est -= (sp->slab_chunks - sp->slab_refcnt); 7610Sstevel@tonic-gate 7620Sstevel@tonic-gate return (WALK_NEXT); 7630Sstevel@tonic-gate } 7640Sstevel@tonic-gate 7650Sstevel@tonic-gate /* 7660Sstevel@tonic-gate * Returns an upper bound on the number of allocated buffers in a given 7670Sstevel@tonic-gate * cache. 7680Sstevel@tonic-gate */ 7690Sstevel@tonic-gate size_t 7700Sstevel@tonic-gate umem_estimate_allocated(uintptr_t addr, const umem_cache_t *cp) 7710Sstevel@tonic-gate { 7720Sstevel@tonic-gate int magsize; 7730Sstevel@tonic-gate size_t cache_est; 7740Sstevel@tonic-gate 7750Sstevel@tonic-gate cache_est = cp->cache_buftotal; 7760Sstevel@tonic-gate 7770Sstevel@tonic-gate (void) mdb_pwalk("umem_slab_partial", 7780Sstevel@tonic-gate (mdb_walk_cb_t)umem_estimate_slab, &cache_est, addr); 7790Sstevel@tonic-gate 7800Sstevel@tonic-gate if ((magsize = umem_get_magsize(cp)) != 0) { 7810Sstevel@tonic-gate size_t mag_est = cp->cache_full.ml_total * magsize; 7820Sstevel@tonic-gate 7830Sstevel@tonic-gate if (cache_est >= mag_est) { 7840Sstevel@tonic-gate cache_est -= mag_est; 7850Sstevel@tonic-gate } else { 7860Sstevel@tonic-gate mdb_warn("cache %p's magazine layer holds more buffers " 7870Sstevel@tonic-gate "than the slab layer.\n", addr); 7880Sstevel@tonic-gate } 7890Sstevel@tonic-gate } 7900Sstevel@tonic-gate return (cache_est); 7910Sstevel@tonic-gate } 7920Sstevel@tonic-gate 7930Sstevel@tonic-gate #define READMAG_ROUNDS(rounds) { \ 7940Sstevel@tonic-gate if (mdb_vread(mp, magbsize, (uintptr_t)ump) == -1) { \ 7950Sstevel@tonic-gate mdb_warn("couldn't read magazine at %p", ump); \ 7960Sstevel@tonic-gate goto fail; \ 7970Sstevel@tonic-gate } \ 7980Sstevel@tonic-gate for (i = 0; i < rounds; i++) { \ 7990Sstevel@tonic-gate maglist[magcnt++] = mp->mag_round[i]; \ 8000Sstevel@tonic-gate if (magcnt == magmax) { \ 8010Sstevel@tonic-gate mdb_warn("%d magazines exceeds fudge factor\n", \ 8020Sstevel@tonic-gate magcnt); \ 8030Sstevel@tonic-gate goto fail; \ 8040Sstevel@tonic-gate } \ 8050Sstevel@tonic-gate } \ 8060Sstevel@tonic-gate } 8070Sstevel@tonic-gate 8080Sstevel@tonic-gate int 8091528Sjwadams umem_read_magazines(umem_cache_t *cp, uintptr_t addr, 8100Sstevel@tonic-gate void ***maglistp, size_t *magcntp, size_t *magmaxp, int alloc_flags) 8110Sstevel@tonic-gate { 8120Sstevel@tonic-gate umem_magazine_t *ump, *mp; 8130Sstevel@tonic-gate void **maglist = NULL; 8140Sstevel@tonic-gate int i, cpu; 8150Sstevel@tonic-gate size_t magsize, magmax, magbsize; 8160Sstevel@tonic-gate size_t magcnt = 0; 8170Sstevel@tonic-gate 8180Sstevel@tonic-gate /* 8190Sstevel@tonic-gate * Read the magtype out of the cache, after verifying the pointer's 8200Sstevel@tonic-gate * correctness. 8210Sstevel@tonic-gate */ 8220Sstevel@tonic-gate magsize = umem_get_magsize(cp); 8231528Sjwadams if (magsize == 0) { 8241528Sjwadams *maglistp = NULL; 8251528Sjwadams *magcntp = 0; 8261528Sjwadams *magmaxp = 0; 8271528Sjwadams return (WALK_NEXT); 8281528Sjwadams } 8290Sstevel@tonic-gate 8300Sstevel@tonic-gate /* 8310Sstevel@tonic-gate * There are several places where we need to go buffer hunting: 8320Sstevel@tonic-gate * the per-CPU loaded magazine, the per-CPU spare full magazine, 8330Sstevel@tonic-gate * and the full magazine list in the depot. 8340Sstevel@tonic-gate * 8350Sstevel@tonic-gate * For an upper bound on the number of buffers in the magazine 8360Sstevel@tonic-gate * layer, we have the number of magazines on the cache_full 8370Sstevel@tonic-gate * list plus at most two magazines per CPU (the loaded and the 8380Sstevel@tonic-gate * spare). Toss in 100 magazines as a fudge factor in case this 8390Sstevel@tonic-gate * is live (the number "100" comes from the same fudge factor in 8400Sstevel@tonic-gate * crash(1M)). 8410Sstevel@tonic-gate */ 8421528Sjwadams magmax = (cp->cache_full.ml_total + 2 * umem_max_ncpus + 100) * magsize; 8430Sstevel@tonic-gate magbsize = offsetof(umem_magazine_t, mag_round[magsize]); 8440Sstevel@tonic-gate 8450Sstevel@tonic-gate if (magbsize >= PAGESIZE / 2) { 8460Sstevel@tonic-gate mdb_warn("magazine size for cache %p unreasonable (%x)\n", 8470Sstevel@tonic-gate addr, magbsize); 8481528Sjwadams return (WALK_ERR); 8490Sstevel@tonic-gate } 8500Sstevel@tonic-gate 8510Sstevel@tonic-gate maglist = mdb_alloc(magmax * sizeof (void *), alloc_flags); 8520Sstevel@tonic-gate mp = mdb_alloc(magbsize, alloc_flags); 8530Sstevel@tonic-gate if (mp == NULL || maglist == NULL) 8540Sstevel@tonic-gate goto fail; 8550Sstevel@tonic-gate 8560Sstevel@tonic-gate /* 8570Sstevel@tonic-gate * First up: the magazines in the depot (i.e. on the cache_full list). 8580Sstevel@tonic-gate */ 8590Sstevel@tonic-gate for (ump = cp->cache_full.ml_list; ump != NULL; ) { 8600Sstevel@tonic-gate READMAG_ROUNDS(magsize); 8610Sstevel@tonic-gate ump = mp->mag_next; 8620Sstevel@tonic-gate 8630Sstevel@tonic-gate if (ump == cp->cache_full.ml_list) 8640Sstevel@tonic-gate break; /* cache_full list loop detected */ 8650Sstevel@tonic-gate } 8660Sstevel@tonic-gate 8670Sstevel@tonic-gate dprintf(("cache_full list done\n")); 8680Sstevel@tonic-gate 8690Sstevel@tonic-gate /* 8700Sstevel@tonic-gate * Now whip through the CPUs, snagging the loaded magazines 8710Sstevel@tonic-gate * and full spares. 8720Sstevel@tonic-gate */ 8731528Sjwadams for (cpu = 0; cpu < umem_max_ncpus; cpu++) { 8740Sstevel@tonic-gate umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu]; 8750Sstevel@tonic-gate 8760Sstevel@tonic-gate dprintf(("reading cpu cache %p\n", 8770Sstevel@tonic-gate (uintptr_t)ccp - (uintptr_t)cp + addr)); 8780Sstevel@tonic-gate 8790Sstevel@tonic-gate if (ccp->cc_rounds > 0 && 8800Sstevel@tonic-gate (ump = ccp->cc_loaded) != NULL) { 8810Sstevel@tonic-gate dprintf(("reading %d loaded rounds\n", ccp->cc_rounds)); 8820Sstevel@tonic-gate READMAG_ROUNDS(ccp->cc_rounds); 8830Sstevel@tonic-gate } 8840Sstevel@tonic-gate 8850Sstevel@tonic-gate if (ccp->cc_prounds > 0 && 8860Sstevel@tonic-gate (ump = ccp->cc_ploaded) != NULL) { 8870Sstevel@tonic-gate dprintf(("reading %d previously loaded rounds\n", 8880Sstevel@tonic-gate ccp->cc_prounds)); 8890Sstevel@tonic-gate READMAG_ROUNDS(ccp->cc_prounds); 8900Sstevel@tonic-gate } 8910Sstevel@tonic-gate } 8920Sstevel@tonic-gate 8930Sstevel@tonic-gate dprintf(("magazine layer: %d buffers\n", magcnt)); 8940Sstevel@tonic-gate 8950Sstevel@tonic-gate if (!(alloc_flags & UM_GC)) 8960Sstevel@tonic-gate mdb_free(mp, magbsize); 8970Sstevel@tonic-gate 8980Sstevel@tonic-gate *maglistp = maglist; 8990Sstevel@tonic-gate *magcntp = magcnt; 9000Sstevel@tonic-gate *magmaxp = magmax; 9010Sstevel@tonic-gate 9020Sstevel@tonic-gate return (WALK_NEXT); 9030Sstevel@tonic-gate 9040Sstevel@tonic-gate fail: 9050Sstevel@tonic-gate if (!(alloc_flags & UM_GC)) { 9060Sstevel@tonic-gate if (mp) 9070Sstevel@tonic-gate mdb_free(mp, magbsize); 9080Sstevel@tonic-gate if (maglist) 9090Sstevel@tonic-gate mdb_free(maglist, magmax * sizeof (void *)); 9100Sstevel@tonic-gate } 9110Sstevel@tonic-gate return (WALK_ERR); 9120Sstevel@tonic-gate } 9130Sstevel@tonic-gate 9140Sstevel@tonic-gate static int 9150Sstevel@tonic-gate umem_walk_callback(mdb_walk_state_t *wsp, uintptr_t buf) 9160Sstevel@tonic-gate { 9170Sstevel@tonic-gate return (wsp->walk_callback(buf, NULL, wsp->walk_cbdata)); 9180Sstevel@tonic-gate } 9190Sstevel@tonic-gate 9200Sstevel@tonic-gate static int 9210Sstevel@tonic-gate bufctl_walk_callback(umem_cache_t *cp, mdb_walk_state_t *wsp, uintptr_t buf) 9220Sstevel@tonic-gate { 9230Sstevel@tonic-gate umem_bufctl_audit_t *b; 9240Sstevel@tonic-gate UMEM_LOCAL_BUFCTL_AUDIT(&b); 9250Sstevel@tonic-gate 9260Sstevel@tonic-gate /* 9270Sstevel@tonic-gate * if UMF_AUDIT is not set, we know that we're looking at a 9280Sstevel@tonic-gate * umem_bufctl_t. 9290Sstevel@tonic-gate */ 9300Sstevel@tonic-gate if (!(cp->cache_flags & UMF_AUDIT) || 9310Sstevel@tonic-gate mdb_vread(b, UMEM_BUFCTL_AUDIT_SIZE, buf) == -1) { 9320Sstevel@tonic-gate (void) memset(b, 0, UMEM_BUFCTL_AUDIT_SIZE); 9330Sstevel@tonic-gate if (mdb_vread(b, sizeof (umem_bufctl_t), buf) == -1) { 9340Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", buf); 9350Sstevel@tonic-gate return (WALK_ERR); 9360Sstevel@tonic-gate } 9370Sstevel@tonic-gate } 9380Sstevel@tonic-gate 9390Sstevel@tonic-gate return (wsp->walk_callback(buf, b, wsp->walk_cbdata)); 9400Sstevel@tonic-gate } 9410Sstevel@tonic-gate 9420Sstevel@tonic-gate typedef struct umem_walk { 9430Sstevel@tonic-gate int umw_type; 9440Sstevel@tonic-gate 9450Sstevel@tonic-gate int umw_addr; /* cache address */ 9460Sstevel@tonic-gate umem_cache_t *umw_cp; 9470Sstevel@tonic-gate size_t umw_csize; 9480Sstevel@tonic-gate 9490Sstevel@tonic-gate /* 9500Sstevel@tonic-gate * magazine layer 9510Sstevel@tonic-gate */ 9520Sstevel@tonic-gate void **umw_maglist; 9530Sstevel@tonic-gate size_t umw_max; 9540Sstevel@tonic-gate size_t umw_count; 9550Sstevel@tonic-gate size_t umw_pos; 9560Sstevel@tonic-gate 9570Sstevel@tonic-gate /* 9580Sstevel@tonic-gate * slab layer 9590Sstevel@tonic-gate */ 9600Sstevel@tonic-gate char *umw_valid; /* to keep track of freed buffers */ 9610Sstevel@tonic-gate char *umw_ubase; /* buffer for slab data */ 9620Sstevel@tonic-gate } umem_walk_t; 9630Sstevel@tonic-gate 9640Sstevel@tonic-gate static int 9650Sstevel@tonic-gate umem_walk_init_common(mdb_walk_state_t *wsp, int type) 9660Sstevel@tonic-gate { 9670Sstevel@tonic-gate umem_walk_t *umw; 9681528Sjwadams int csize; 9690Sstevel@tonic-gate umem_cache_t *cp; 9701528Sjwadams size_t vm_quantum; 9710Sstevel@tonic-gate 9720Sstevel@tonic-gate size_t magmax, magcnt; 9730Sstevel@tonic-gate void **maglist = NULL; 9740Sstevel@tonic-gate uint_t chunksize, slabsize; 9750Sstevel@tonic-gate int status = WALK_ERR; 9760Sstevel@tonic-gate uintptr_t addr = wsp->walk_addr; 9770Sstevel@tonic-gate const char *layered; 9780Sstevel@tonic-gate 9790Sstevel@tonic-gate type &= ~UM_HASH; 9800Sstevel@tonic-gate 9810Sstevel@tonic-gate if (addr == NULL) { 9820Sstevel@tonic-gate mdb_warn("umem walk doesn't support global walks\n"); 9830Sstevel@tonic-gate return (WALK_ERR); 9840Sstevel@tonic-gate } 9850Sstevel@tonic-gate 9860Sstevel@tonic-gate dprintf(("walking %p\n", addr)); 9870Sstevel@tonic-gate 9880Sstevel@tonic-gate /* 9891528Sjwadams * The number of "cpus" determines how large the cache is. 9900Sstevel@tonic-gate */ 9911528Sjwadams csize = UMEM_CACHE_SIZE(umem_max_ncpus); 9920Sstevel@tonic-gate cp = mdb_alloc(csize, UM_SLEEP); 9930Sstevel@tonic-gate 9940Sstevel@tonic-gate if (mdb_vread(cp, csize, addr) == -1) { 9950Sstevel@tonic-gate mdb_warn("couldn't read cache at addr %p", addr); 9960Sstevel@tonic-gate goto out2; 9970Sstevel@tonic-gate } 9980Sstevel@tonic-gate 9991528Sjwadams /* 10001528Sjwadams * It's easy for someone to hand us an invalid cache address. 10011528Sjwadams * Unfortunately, it is hard for this walker to survive an 10021528Sjwadams * invalid cache cleanly. So we make sure that: 10031528Sjwadams * 10041528Sjwadams * 1. the vmem arena for the cache is readable, 10051528Sjwadams * 2. the vmem arena's quantum is a power of 2, 10061528Sjwadams * 3. our slabsize is a multiple of the quantum, and 10071528Sjwadams * 4. our chunksize is >0 and less than our slabsize. 10081528Sjwadams */ 10091528Sjwadams if (mdb_vread(&vm_quantum, sizeof (vm_quantum), 10101528Sjwadams (uintptr_t)&cp->cache_arena->vm_quantum) == -1 || 10111528Sjwadams vm_quantum == 0 || 10121528Sjwadams (vm_quantum & (vm_quantum - 1)) != 0 || 10131528Sjwadams cp->cache_slabsize < vm_quantum || 10141528Sjwadams P2PHASE(cp->cache_slabsize, vm_quantum) != 0 || 10151528Sjwadams cp->cache_chunksize == 0 || 10161528Sjwadams cp->cache_chunksize > cp->cache_slabsize) { 10171528Sjwadams mdb_warn("%p is not a valid umem_cache_t\n", addr); 10181528Sjwadams goto out2; 10191528Sjwadams } 10201528Sjwadams 10210Sstevel@tonic-gate dprintf(("buf total is %d\n", cp->cache_buftotal)); 10220Sstevel@tonic-gate 10230Sstevel@tonic-gate if (cp->cache_buftotal == 0) { 10240Sstevel@tonic-gate mdb_free(cp, csize); 10250Sstevel@tonic-gate return (WALK_DONE); 10260Sstevel@tonic-gate } 10270Sstevel@tonic-gate 10280Sstevel@tonic-gate /* 10290Sstevel@tonic-gate * If they ask for bufctls, but it's a small-slab cache, 10300Sstevel@tonic-gate * there is nothing to report. 10310Sstevel@tonic-gate */ 10320Sstevel@tonic-gate if ((type & UM_BUFCTL) && !(cp->cache_flags & UMF_HASH)) { 10330Sstevel@tonic-gate dprintf(("bufctl requested, not UMF_HASH (flags: %p)\n", 10340Sstevel@tonic-gate cp->cache_flags)); 10350Sstevel@tonic-gate mdb_free(cp, csize); 10360Sstevel@tonic-gate return (WALK_DONE); 10370Sstevel@tonic-gate } 10380Sstevel@tonic-gate 10390Sstevel@tonic-gate /* 10400Sstevel@tonic-gate * Read in the contents of the magazine layer 10410Sstevel@tonic-gate */ 10421528Sjwadams if (umem_read_magazines(cp, addr, &maglist, &magcnt, &magmax, 10431528Sjwadams UM_SLEEP) == WALK_ERR) 10440Sstevel@tonic-gate goto out2; 10450Sstevel@tonic-gate 10460Sstevel@tonic-gate /* 10470Sstevel@tonic-gate * We have all of the buffers from the magazines; if we are walking 10480Sstevel@tonic-gate * allocated buffers, sort them so we can bsearch them later. 10490Sstevel@tonic-gate */ 10500Sstevel@tonic-gate if (type & UM_ALLOCATED) 10510Sstevel@tonic-gate qsort(maglist, magcnt, sizeof (void *), addrcmp); 10520Sstevel@tonic-gate 10530Sstevel@tonic-gate wsp->walk_data = umw = mdb_zalloc(sizeof (umem_walk_t), UM_SLEEP); 10540Sstevel@tonic-gate 10550Sstevel@tonic-gate umw->umw_type = type; 10560Sstevel@tonic-gate umw->umw_addr = addr; 10570Sstevel@tonic-gate umw->umw_cp = cp; 10580Sstevel@tonic-gate umw->umw_csize = csize; 10590Sstevel@tonic-gate umw->umw_maglist = maglist; 10600Sstevel@tonic-gate umw->umw_max = magmax; 10610Sstevel@tonic-gate umw->umw_count = magcnt; 10620Sstevel@tonic-gate umw->umw_pos = 0; 10630Sstevel@tonic-gate 10640Sstevel@tonic-gate /* 10650Sstevel@tonic-gate * When walking allocated buffers in a UMF_HASH cache, we walk the 10660Sstevel@tonic-gate * hash table instead of the slab layer. 10670Sstevel@tonic-gate */ 10680Sstevel@tonic-gate if ((cp->cache_flags & UMF_HASH) && (type & UM_ALLOCATED)) { 10690Sstevel@tonic-gate layered = "umem_hash"; 10700Sstevel@tonic-gate 10710Sstevel@tonic-gate umw->umw_type |= UM_HASH; 10720Sstevel@tonic-gate } else { 10730Sstevel@tonic-gate /* 10740Sstevel@tonic-gate * If we are walking freed buffers, we only need the 10750Sstevel@tonic-gate * magazine layer plus the partially allocated slabs. 10760Sstevel@tonic-gate * To walk allocated buffers, we need all of the slabs. 10770Sstevel@tonic-gate */ 10780Sstevel@tonic-gate if (type & UM_ALLOCATED) 10790Sstevel@tonic-gate layered = "umem_slab"; 10800Sstevel@tonic-gate else 10810Sstevel@tonic-gate layered = "umem_slab_partial"; 10820Sstevel@tonic-gate 10830Sstevel@tonic-gate /* 10840Sstevel@tonic-gate * for small-slab caches, we read in the entire slab. For 10850Sstevel@tonic-gate * freed buffers, we can just walk the freelist. For 10860Sstevel@tonic-gate * allocated buffers, we use a 'valid' array to track 10870Sstevel@tonic-gate * the freed buffers. 10880Sstevel@tonic-gate */ 10890Sstevel@tonic-gate if (!(cp->cache_flags & UMF_HASH)) { 10900Sstevel@tonic-gate chunksize = cp->cache_chunksize; 10910Sstevel@tonic-gate slabsize = cp->cache_slabsize; 10920Sstevel@tonic-gate 10930Sstevel@tonic-gate umw->umw_ubase = mdb_alloc(slabsize + 10940Sstevel@tonic-gate sizeof (umem_bufctl_t), UM_SLEEP); 10950Sstevel@tonic-gate 10960Sstevel@tonic-gate if (type & UM_ALLOCATED) 10970Sstevel@tonic-gate umw->umw_valid = 10980Sstevel@tonic-gate mdb_alloc(slabsize / chunksize, UM_SLEEP); 10990Sstevel@tonic-gate } 11000Sstevel@tonic-gate } 11010Sstevel@tonic-gate 11020Sstevel@tonic-gate status = WALK_NEXT; 11030Sstevel@tonic-gate 11040Sstevel@tonic-gate if (mdb_layered_walk(layered, wsp) == -1) { 11050Sstevel@tonic-gate mdb_warn("unable to start layered '%s' walk", layered); 11060Sstevel@tonic-gate status = WALK_ERR; 11070Sstevel@tonic-gate } 11080Sstevel@tonic-gate 11090Sstevel@tonic-gate out1: 11100Sstevel@tonic-gate if (status == WALK_ERR) { 11110Sstevel@tonic-gate if (umw->umw_valid) 11120Sstevel@tonic-gate mdb_free(umw->umw_valid, slabsize / chunksize); 11130Sstevel@tonic-gate 11140Sstevel@tonic-gate if (umw->umw_ubase) 11150Sstevel@tonic-gate mdb_free(umw->umw_ubase, slabsize + 11160Sstevel@tonic-gate sizeof (umem_bufctl_t)); 11170Sstevel@tonic-gate 11181528Sjwadams if (umw->umw_maglist) 11191528Sjwadams mdb_free(umw->umw_maglist, umw->umw_max * 11201528Sjwadams sizeof (uintptr_t)); 11211528Sjwadams 11220Sstevel@tonic-gate mdb_free(umw, sizeof (umem_walk_t)); 11230Sstevel@tonic-gate wsp->walk_data = NULL; 11240Sstevel@tonic-gate } 11250Sstevel@tonic-gate 11260Sstevel@tonic-gate out2: 11270Sstevel@tonic-gate if (status == WALK_ERR) 11280Sstevel@tonic-gate mdb_free(cp, csize); 11290Sstevel@tonic-gate 11300Sstevel@tonic-gate return (status); 11310Sstevel@tonic-gate } 11320Sstevel@tonic-gate 11330Sstevel@tonic-gate int 11340Sstevel@tonic-gate umem_walk_step(mdb_walk_state_t *wsp) 11350Sstevel@tonic-gate { 11360Sstevel@tonic-gate umem_walk_t *umw = wsp->walk_data; 11370Sstevel@tonic-gate int type = umw->umw_type; 11380Sstevel@tonic-gate umem_cache_t *cp = umw->umw_cp; 11390Sstevel@tonic-gate 11400Sstevel@tonic-gate void **maglist = umw->umw_maglist; 11410Sstevel@tonic-gate int magcnt = umw->umw_count; 11420Sstevel@tonic-gate 11430Sstevel@tonic-gate uintptr_t chunksize, slabsize; 11440Sstevel@tonic-gate uintptr_t addr; 11450Sstevel@tonic-gate const umem_slab_t *sp; 11460Sstevel@tonic-gate const umem_bufctl_t *bcp; 11470Sstevel@tonic-gate umem_bufctl_t bc; 11480Sstevel@tonic-gate 11490Sstevel@tonic-gate int chunks; 11500Sstevel@tonic-gate char *kbase; 11510Sstevel@tonic-gate void *buf; 11520Sstevel@tonic-gate int i, ret; 11530Sstevel@tonic-gate 11540Sstevel@tonic-gate char *valid, *ubase; 11550Sstevel@tonic-gate 11560Sstevel@tonic-gate /* 11570Sstevel@tonic-gate * first, handle the 'umem_hash' layered walk case 11580Sstevel@tonic-gate */ 11590Sstevel@tonic-gate if (type & UM_HASH) { 11600Sstevel@tonic-gate /* 11610Sstevel@tonic-gate * We have a buffer which has been allocated out of the 11620Sstevel@tonic-gate * global layer. We need to make sure that it's not 11630Sstevel@tonic-gate * actually sitting in a magazine before we report it as 11640Sstevel@tonic-gate * an allocated buffer. 11650Sstevel@tonic-gate */ 11660Sstevel@tonic-gate buf = ((const umem_bufctl_t *)wsp->walk_layer)->bc_addr; 11670Sstevel@tonic-gate 11680Sstevel@tonic-gate if (magcnt > 0 && 11690Sstevel@tonic-gate bsearch(&buf, maglist, magcnt, sizeof (void *), 11700Sstevel@tonic-gate addrcmp) != NULL) 11710Sstevel@tonic-gate return (WALK_NEXT); 11720Sstevel@tonic-gate 11730Sstevel@tonic-gate if (type & UM_BUFCTL) 11740Sstevel@tonic-gate return (bufctl_walk_callback(cp, wsp, wsp->walk_addr)); 11750Sstevel@tonic-gate 11760Sstevel@tonic-gate return (umem_walk_callback(wsp, (uintptr_t)buf)); 11770Sstevel@tonic-gate } 11780Sstevel@tonic-gate 11790Sstevel@tonic-gate ret = WALK_NEXT; 11800Sstevel@tonic-gate 11810Sstevel@tonic-gate addr = umw->umw_addr; 11820Sstevel@tonic-gate 11830Sstevel@tonic-gate /* 11840Sstevel@tonic-gate * If we're walking freed buffers, report everything in the 11850Sstevel@tonic-gate * magazine layer before processing the first slab. 11860Sstevel@tonic-gate */ 11870Sstevel@tonic-gate if ((type & UM_FREE) && magcnt != 0) { 11880Sstevel@tonic-gate umw->umw_count = 0; /* only do this once */ 11890Sstevel@tonic-gate for (i = 0; i < magcnt; i++) { 11900Sstevel@tonic-gate buf = maglist[i]; 11910Sstevel@tonic-gate 11920Sstevel@tonic-gate if (type & UM_BUFCTL) { 11930Sstevel@tonic-gate uintptr_t out; 11940Sstevel@tonic-gate 11950Sstevel@tonic-gate if (cp->cache_flags & UMF_BUFTAG) { 11960Sstevel@tonic-gate umem_buftag_t *btp; 11970Sstevel@tonic-gate umem_buftag_t tag; 11980Sstevel@tonic-gate 11990Sstevel@tonic-gate /* LINTED - alignment */ 12000Sstevel@tonic-gate btp = UMEM_BUFTAG(cp, buf); 12010Sstevel@tonic-gate if (mdb_vread(&tag, sizeof (tag), 12020Sstevel@tonic-gate (uintptr_t)btp) == -1) { 12030Sstevel@tonic-gate mdb_warn("reading buftag for " 12040Sstevel@tonic-gate "%p at %p", buf, btp); 12050Sstevel@tonic-gate continue; 12060Sstevel@tonic-gate } 12070Sstevel@tonic-gate out = (uintptr_t)tag.bt_bufctl; 12080Sstevel@tonic-gate } else { 12090Sstevel@tonic-gate if (umem_hash_lookup(cp, addr, buf, 12100Sstevel@tonic-gate &out) == -1) 12110Sstevel@tonic-gate continue; 12120Sstevel@tonic-gate } 12130Sstevel@tonic-gate ret = bufctl_walk_callback(cp, wsp, out); 12140Sstevel@tonic-gate } else { 12150Sstevel@tonic-gate ret = umem_walk_callback(wsp, (uintptr_t)buf); 12160Sstevel@tonic-gate } 12170Sstevel@tonic-gate 12180Sstevel@tonic-gate if (ret != WALK_NEXT) 12190Sstevel@tonic-gate return (ret); 12200Sstevel@tonic-gate } 12210Sstevel@tonic-gate } 12220Sstevel@tonic-gate 12230Sstevel@tonic-gate /* 12240Sstevel@tonic-gate * Handle the buffers in the current slab 12250Sstevel@tonic-gate */ 12260Sstevel@tonic-gate chunksize = cp->cache_chunksize; 12270Sstevel@tonic-gate slabsize = cp->cache_slabsize; 12280Sstevel@tonic-gate 12290Sstevel@tonic-gate sp = wsp->walk_layer; 12300Sstevel@tonic-gate chunks = sp->slab_chunks; 12310Sstevel@tonic-gate kbase = sp->slab_base; 12320Sstevel@tonic-gate 12330Sstevel@tonic-gate dprintf(("kbase is %p\n", kbase)); 12340Sstevel@tonic-gate 12350Sstevel@tonic-gate if (!(cp->cache_flags & UMF_HASH)) { 12360Sstevel@tonic-gate valid = umw->umw_valid; 12370Sstevel@tonic-gate ubase = umw->umw_ubase; 12380Sstevel@tonic-gate 12390Sstevel@tonic-gate if (mdb_vread(ubase, chunks * chunksize, 12400Sstevel@tonic-gate (uintptr_t)kbase) == -1) { 12410Sstevel@tonic-gate mdb_warn("failed to read slab contents at %p", kbase); 12420Sstevel@tonic-gate return (WALK_ERR); 12430Sstevel@tonic-gate } 12440Sstevel@tonic-gate 12450Sstevel@tonic-gate /* 12460Sstevel@tonic-gate * Set up the valid map as fully allocated -- we'll punch 12470Sstevel@tonic-gate * out the freelist. 12480Sstevel@tonic-gate */ 12490Sstevel@tonic-gate if (type & UM_ALLOCATED) 12500Sstevel@tonic-gate (void) memset(valid, 1, chunks); 12510Sstevel@tonic-gate } else { 12520Sstevel@tonic-gate valid = NULL; 12530Sstevel@tonic-gate ubase = NULL; 12540Sstevel@tonic-gate } 12550Sstevel@tonic-gate 12560Sstevel@tonic-gate /* 12570Sstevel@tonic-gate * walk the slab's freelist 12580Sstevel@tonic-gate */ 12590Sstevel@tonic-gate bcp = sp->slab_head; 12600Sstevel@tonic-gate 12610Sstevel@tonic-gate dprintf(("refcnt is %d; chunks is %d\n", sp->slab_refcnt, chunks)); 12620Sstevel@tonic-gate 12630Sstevel@tonic-gate /* 12640Sstevel@tonic-gate * since we could be in the middle of allocating a buffer, 12650Sstevel@tonic-gate * our refcnt could be one higher than it aught. So we 12660Sstevel@tonic-gate * check one further on the freelist than the count allows. 12670Sstevel@tonic-gate */ 12680Sstevel@tonic-gate for (i = sp->slab_refcnt; i <= chunks; i++) { 12690Sstevel@tonic-gate uint_t ndx; 12700Sstevel@tonic-gate 12710Sstevel@tonic-gate dprintf(("bcp is %p\n", bcp)); 12720Sstevel@tonic-gate 12730Sstevel@tonic-gate if (bcp == NULL) { 12740Sstevel@tonic-gate if (i == chunks) 12750Sstevel@tonic-gate break; 12760Sstevel@tonic-gate mdb_warn( 12770Sstevel@tonic-gate "slab %p in cache %p freelist too short by %d\n", 12780Sstevel@tonic-gate sp, addr, chunks - i); 12790Sstevel@tonic-gate break; 12800Sstevel@tonic-gate } 12810Sstevel@tonic-gate 12820Sstevel@tonic-gate if (cp->cache_flags & UMF_HASH) { 12830Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), (uintptr_t)bcp) == -1) { 12840Sstevel@tonic-gate mdb_warn("failed to read bufctl ptr at %p", 12850Sstevel@tonic-gate bcp); 12860Sstevel@tonic-gate break; 12870Sstevel@tonic-gate } 12880Sstevel@tonic-gate buf = bc.bc_addr; 12890Sstevel@tonic-gate } else { 12900Sstevel@tonic-gate /* 12910Sstevel@tonic-gate * Otherwise the buffer is in the slab which 12920Sstevel@tonic-gate * we've read in; we just need to determine 12930Sstevel@tonic-gate * its offset in the slab to find the 12940Sstevel@tonic-gate * umem_bufctl_t. 12950Sstevel@tonic-gate */ 12960Sstevel@tonic-gate bc = *((umem_bufctl_t *) 12970Sstevel@tonic-gate ((uintptr_t)bcp - (uintptr_t)kbase + 12980Sstevel@tonic-gate (uintptr_t)ubase)); 12990Sstevel@tonic-gate 13000Sstevel@tonic-gate buf = UMEM_BUF(cp, bcp); 13010Sstevel@tonic-gate } 13020Sstevel@tonic-gate 13030Sstevel@tonic-gate ndx = ((uintptr_t)buf - (uintptr_t)kbase) / chunksize; 13040Sstevel@tonic-gate 13050Sstevel@tonic-gate if (ndx > slabsize / cp->cache_bufsize) { 13060Sstevel@tonic-gate /* 13070Sstevel@tonic-gate * This is very wrong; we have managed to find 13080Sstevel@tonic-gate * a buffer in the slab which shouldn't 13090Sstevel@tonic-gate * actually be here. Emit a warning, and 13100Sstevel@tonic-gate * try to continue. 13110Sstevel@tonic-gate */ 13120Sstevel@tonic-gate mdb_warn("buf %p is out of range for " 13130Sstevel@tonic-gate "slab %p, cache %p\n", buf, sp, addr); 13140Sstevel@tonic-gate } else if (type & UM_ALLOCATED) { 13150Sstevel@tonic-gate /* 13160Sstevel@tonic-gate * we have found a buffer on the slab's freelist; 13170Sstevel@tonic-gate * clear its entry 13180Sstevel@tonic-gate */ 13190Sstevel@tonic-gate valid[ndx] = 0; 13200Sstevel@tonic-gate } else { 13210Sstevel@tonic-gate /* 13220Sstevel@tonic-gate * Report this freed buffer 13230Sstevel@tonic-gate */ 13240Sstevel@tonic-gate if (type & UM_BUFCTL) { 13250Sstevel@tonic-gate ret = bufctl_walk_callback(cp, wsp, 13260Sstevel@tonic-gate (uintptr_t)bcp); 13270Sstevel@tonic-gate } else { 13280Sstevel@tonic-gate ret = umem_walk_callback(wsp, (uintptr_t)buf); 13290Sstevel@tonic-gate } 13300Sstevel@tonic-gate if (ret != WALK_NEXT) 13310Sstevel@tonic-gate return (ret); 13320Sstevel@tonic-gate } 13330Sstevel@tonic-gate 13340Sstevel@tonic-gate bcp = bc.bc_next; 13350Sstevel@tonic-gate } 13360Sstevel@tonic-gate 13370Sstevel@tonic-gate if (bcp != NULL) { 13380Sstevel@tonic-gate dprintf(("slab %p in cache %p freelist too long (%p)\n", 13390Sstevel@tonic-gate sp, addr, bcp)); 13400Sstevel@tonic-gate } 13410Sstevel@tonic-gate 13420Sstevel@tonic-gate /* 13430Sstevel@tonic-gate * If we are walking freed buffers, the loop above handled reporting 13440Sstevel@tonic-gate * them. 13450Sstevel@tonic-gate */ 13460Sstevel@tonic-gate if (type & UM_FREE) 13470Sstevel@tonic-gate return (WALK_NEXT); 13480Sstevel@tonic-gate 13490Sstevel@tonic-gate if (type & UM_BUFCTL) { 13500Sstevel@tonic-gate mdb_warn("impossible situation: small-slab UM_BUFCTL walk for " 13510Sstevel@tonic-gate "cache %p\n", addr); 13520Sstevel@tonic-gate return (WALK_ERR); 13530Sstevel@tonic-gate } 13540Sstevel@tonic-gate 13550Sstevel@tonic-gate /* 13560Sstevel@tonic-gate * Report allocated buffers, skipping buffers in the magazine layer. 13570Sstevel@tonic-gate * We only get this far for small-slab caches. 13580Sstevel@tonic-gate */ 13590Sstevel@tonic-gate for (i = 0; ret == WALK_NEXT && i < chunks; i++) { 13600Sstevel@tonic-gate buf = (char *)kbase + i * chunksize; 13610Sstevel@tonic-gate 13620Sstevel@tonic-gate if (!valid[i]) 13630Sstevel@tonic-gate continue; /* on slab freelist */ 13640Sstevel@tonic-gate 13650Sstevel@tonic-gate if (magcnt > 0 && 13660Sstevel@tonic-gate bsearch(&buf, maglist, magcnt, sizeof (void *), 13670Sstevel@tonic-gate addrcmp) != NULL) 13680Sstevel@tonic-gate continue; /* in magazine layer */ 13690Sstevel@tonic-gate 13700Sstevel@tonic-gate ret = umem_walk_callback(wsp, (uintptr_t)buf); 13710Sstevel@tonic-gate } 13720Sstevel@tonic-gate return (ret); 13730Sstevel@tonic-gate } 13740Sstevel@tonic-gate 13750Sstevel@tonic-gate void 13760Sstevel@tonic-gate umem_walk_fini(mdb_walk_state_t *wsp) 13770Sstevel@tonic-gate { 13780Sstevel@tonic-gate umem_walk_t *umw = wsp->walk_data; 13790Sstevel@tonic-gate uintptr_t chunksize; 13800Sstevel@tonic-gate uintptr_t slabsize; 13810Sstevel@tonic-gate 13820Sstevel@tonic-gate if (umw == NULL) 13830Sstevel@tonic-gate return; 13840Sstevel@tonic-gate 13850Sstevel@tonic-gate if (umw->umw_maglist != NULL) 13860Sstevel@tonic-gate mdb_free(umw->umw_maglist, umw->umw_max * sizeof (void *)); 13870Sstevel@tonic-gate 13880Sstevel@tonic-gate chunksize = umw->umw_cp->cache_chunksize; 13890Sstevel@tonic-gate slabsize = umw->umw_cp->cache_slabsize; 13900Sstevel@tonic-gate 13910Sstevel@tonic-gate if (umw->umw_valid != NULL) 13920Sstevel@tonic-gate mdb_free(umw->umw_valid, slabsize / chunksize); 13930Sstevel@tonic-gate if (umw->umw_ubase != NULL) 13940Sstevel@tonic-gate mdb_free(umw->umw_ubase, slabsize + sizeof (umem_bufctl_t)); 13950Sstevel@tonic-gate 13960Sstevel@tonic-gate mdb_free(umw->umw_cp, umw->umw_csize); 13970Sstevel@tonic-gate mdb_free(umw, sizeof (umem_walk_t)); 13980Sstevel@tonic-gate } 13990Sstevel@tonic-gate 14000Sstevel@tonic-gate /*ARGSUSED*/ 14010Sstevel@tonic-gate static int 14020Sstevel@tonic-gate umem_walk_all(uintptr_t addr, const umem_cache_t *c, mdb_walk_state_t *wsp) 14030Sstevel@tonic-gate { 14040Sstevel@tonic-gate /* 14050Sstevel@tonic-gate * Buffers allocated from NOTOUCH caches can also show up as freed 14060Sstevel@tonic-gate * memory in other caches. This can be a little confusing, so we 14070Sstevel@tonic-gate * don't walk NOTOUCH caches when walking all caches (thereby assuring 14080Sstevel@tonic-gate * that "::walk umem" and "::walk freemem" yield disjoint output). 14090Sstevel@tonic-gate */ 14100Sstevel@tonic-gate if (c->cache_cflags & UMC_NOTOUCH) 14110Sstevel@tonic-gate return (WALK_NEXT); 14120Sstevel@tonic-gate 14130Sstevel@tonic-gate if (mdb_pwalk(wsp->walk_data, wsp->walk_callback, 14140Sstevel@tonic-gate wsp->walk_cbdata, addr) == -1) 14150Sstevel@tonic-gate return (WALK_DONE); 14160Sstevel@tonic-gate 14170Sstevel@tonic-gate return (WALK_NEXT); 14180Sstevel@tonic-gate } 14190Sstevel@tonic-gate 14200Sstevel@tonic-gate #define UMEM_WALK_ALL(name, wsp) { \ 14210Sstevel@tonic-gate wsp->walk_data = (name); \ 14220Sstevel@tonic-gate if (mdb_walk("umem_cache", (mdb_walk_cb_t)umem_walk_all, wsp) == -1) \ 14230Sstevel@tonic-gate return (WALK_ERR); \ 14240Sstevel@tonic-gate return (WALK_DONE); \ 14250Sstevel@tonic-gate } 14260Sstevel@tonic-gate 14270Sstevel@tonic-gate int 14280Sstevel@tonic-gate umem_walk_init(mdb_walk_state_t *wsp) 14290Sstevel@tonic-gate { 14300Sstevel@tonic-gate if (wsp->walk_arg != NULL) 14310Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)wsp->walk_arg; 14320Sstevel@tonic-gate 14330Sstevel@tonic-gate if (wsp->walk_addr == NULL) 14340Sstevel@tonic-gate UMEM_WALK_ALL("umem", wsp); 14350Sstevel@tonic-gate return (umem_walk_init_common(wsp, UM_ALLOCATED)); 14360Sstevel@tonic-gate } 14370Sstevel@tonic-gate 14380Sstevel@tonic-gate int 14390Sstevel@tonic-gate bufctl_walk_init(mdb_walk_state_t *wsp) 14400Sstevel@tonic-gate { 14410Sstevel@tonic-gate if (wsp->walk_addr == NULL) 14420Sstevel@tonic-gate UMEM_WALK_ALL("bufctl", wsp); 14430Sstevel@tonic-gate return (umem_walk_init_common(wsp, UM_ALLOCATED | UM_BUFCTL)); 14440Sstevel@tonic-gate } 14450Sstevel@tonic-gate 14460Sstevel@tonic-gate int 14470Sstevel@tonic-gate freemem_walk_init(mdb_walk_state_t *wsp) 14480Sstevel@tonic-gate { 14490Sstevel@tonic-gate if (wsp->walk_addr == NULL) 14500Sstevel@tonic-gate UMEM_WALK_ALL("freemem", wsp); 14510Sstevel@tonic-gate return (umem_walk_init_common(wsp, UM_FREE)); 14520Sstevel@tonic-gate } 14530Sstevel@tonic-gate 14540Sstevel@tonic-gate int 14550Sstevel@tonic-gate freectl_walk_init(mdb_walk_state_t *wsp) 14560Sstevel@tonic-gate { 14570Sstevel@tonic-gate if (wsp->walk_addr == NULL) 14580Sstevel@tonic-gate UMEM_WALK_ALL("freectl", wsp); 14590Sstevel@tonic-gate return (umem_walk_init_common(wsp, UM_FREE | UM_BUFCTL)); 14600Sstevel@tonic-gate } 14610Sstevel@tonic-gate 14620Sstevel@tonic-gate typedef struct bufctl_history_walk { 14630Sstevel@tonic-gate void *bhw_next; 14640Sstevel@tonic-gate umem_cache_t *bhw_cache; 14650Sstevel@tonic-gate umem_slab_t *bhw_slab; 14660Sstevel@tonic-gate hrtime_t bhw_timestamp; 14670Sstevel@tonic-gate } bufctl_history_walk_t; 14680Sstevel@tonic-gate 14690Sstevel@tonic-gate int 14700Sstevel@tonic-gate bufctl_history_walk_init(mdb_walk_state_t *wsp) 14710Sstevel@tonic-gate { 14720Sstevel@tonic-gate bufctl_history_walk_t *bhw; 14730Sstevel@tonic-gate umem_bufctl_audit_t bc; 14740Sstevel@tonic-gate umem_bufctl_audit_t bcn; 14750Sstevel@tonic-gate 14760Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 14770Sstevel@tonic-gate mdb_warn("bufctl_history walk doesn't support global walks\n"); 14780Sstevel@tonic-gate return (WALK_ERR); 14790Sstevel@tonic-gate } 14800Sstevel@tonic-gate 14810Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), wsp->walk_addr) == -1) { 14820Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", wsp->walk_addr); 14830Sstevel@tonic-gate return (WALK_ERR); 14840Sstevel@tonic-gate } 14850Sstevel@tonic-gate 14860Sstevel@tonic-gate bhw = mdb_zalloc(sizeof (*bhw), UM_SLEEP); 14870Sstevel@tonic-gate bhw->bhw_timestamp = 0; 14880Sstevel@tonic-gate bhw->bhw_cache = bc.bc_cache; 14890Sstevel@tonic-gate bhw->bhw_slab = bc.bc_slab; 14900Sstevel@tonic-gate 14910Sstevel@tonic-gate /* 14920Sstevel@tonic-gate * sometimes the first log entry matches the base bufctl; in that 14930Sstevel@tonic-gate * case, skip the base bufctl. 14940Sstevel@tonic-gate */ 14950Sstevel@tonic-gate if (bc.bc_lastlog != NULL && 14960Sstevel@tonic-gate mdb_vread(&bcn, sizeof (bcn), (uintptr_t)bc.bc_lastlog) != -1 && 14970Sstevel@tonic-gate bc.bc_addr == bcn.bc_addr && 14980Sstevel@tonic-gate bc.bc_cache == bcn.bc_cache && 14990Sstevel@tonic-gate bc.bc_slab == bcn.bc_slab && 15000Sstevel@tonic-gate bc.bc_timestamp == bcn.bc_timestamp && 15010Sstevel@tonic-gate bc.bc_thread == bcn.bc_thread) 15020Sstevel@tonic-gate bhw->bhw_next = bc.bc_lastlog; 15030Sstevel@tonic-gate else 15040Sstevel@tonic-gate bhw->bhw_next = (void *)wsp->walk_addr; 15050Sstevel@tonic-gate 15060Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)bc.bc_addr; 15070Sstevel@tonic-gate wsp->walk_data = bhw; 15080Sstevel@tonic-gate 15090Sstevel@tonic-gate return (WALK_NEXT); 15100Sstevel@tonic-gate } 15110Sstevel@tonic-gate 15120Sstevel@tonic-gate int 15130Sstevel@tonic-gate bufctl_history_walk_step(mdb_walk_state_t *wsp) 15140Sstevel@tonic-gate { 15150Sstevel@tonic-gate bufctl_history_walk_t *bhw = wsp->walk_data; 15160Sstevel@tonic-gate uintptr_t addr = (uintptr_t)bhw->bhw_next; 15170Sstevel@tonic-gate uintptr_t baseaddr = wsp->walk_addr; 15180Sstevel@tonic-gate umem_bufctl_audit_t *b; 15190Sstevel@tonic-gate UMEM_LOCAL_BUFCTL_AUDIT(&b); 15200Sstevel@tonic-gate 15210Sstevel@tonic-gate if (addr == NULL) 15220Sstevel@tonic-gate return (WALK_DONE); 15230Sstevel@tonic-gate 15240Sstevel@tonic-gate if (mdb_vread(b, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) { 15250Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", bhw->bhw_next); 15260Sstevel@tonic-gate return (WALK_ERR); 15270Sstevel@tonic-gate } 15280Sstevel@tonic-gate 15290Sstevel@tonic-gate /* 15300Sstevel@tonic-gate * The bufctl is only valid if the address, cache, and slab are 15310Sstevel@tonic-gate * correct. We also check that the timestamp is decreasing, to 15320Sstevel@tonic-gate * prevent infinite loops. 15330Sstevel@tonic-gate */ 15340Sstevel@tonic-gate if ((uintptr_t)b->bc_addr != baseaddr || 15350Sstevel@tonic-gate b->bc_cache != bhw->bhw_cache || 15360Sstevel@tonic-gate b->bc_slab != bhw->bhw_slab || 15370Sstevel@tonic-gate (bhw->bhw_timestamp != 0 && b->bc_timestamp >= bhw->bhw_timestamp)) 15380Sstevel@tonic-gate return (WALK_DONE); 15390Sstevel@tonic-gate 15400Sstevel@tonic-gate bhw->bhw_next = b->bc_lastlog; 15410Sstevel@tonic-gate bhw->bhw_timestamp = b->bc_timestamp; 15420Sstevel@tonic-gate 15430Sstevel@tonic-gate return (wsp->walk_callback(addr, b, wsp->walk_cbdata)); 15440Sstevel@tonic-gate } 15450Sstevel@tonic-gate 15460Sstevel@tonic-gate void 15470Sstevel@tonic-gate bufctl_history_walk_fini(mdb_walk_state_t *wsp) 15480Sstevel@tonic-gate { 15490Sstevel@tonic-gate bufctl_history_walk_t *bhw = wsp->walk_data; 15500Sstevel@tonic-gate 15510Sstevel@tonic-gate mdb_free(bhw, sizeof (*bhw)); 15520Sstevel@tonic-gate } 15530Sstevel@tonic-gate 15540Sstevel@tonic-gate typedef struct umem_log_walk { 15550Sstevel@tonic-gate umem_bufctl_audit_t *ulw_base; 15560Sstevel@tonic-gate umem_bufctl_audit_t **ulw_sorted; 15570Sstevel@tonic-gate umem_log_header_t ulw_lh; 15580Sstevel@tonic-gate size_t ulw_size; 15590Sstevel@tonic-gate size_t ulw_maxndx; 15600Sstevel@tonic-gate size_t ulw_ndx; 15610Sstevel@tonic-gate } umem_log_walk_t; 15620Sstevel@tonic-gate 15630Sstevel@tonic-gate int 15640Sstevel@tonic-gate umem_log_walk_init(mdb_walk_state_t *wsp) 15650Sstevel@tonic-gate { 15660Sstevel@tonic-gate uintptr_t lp = wsp->walk_addr; 15670Sstevel@tonic-gate umem_log_walk_t *ulw; 15680Sstevel@tonic-gate umem_log_header_t *lhp; 15690Sstevel@tonic-gate int maxndx, i, j, k; 15700Sstevel@tonic-gate 15710Sstevel@tonic-gate /* 15720Sstevel@tonic-gate * By default (global walk), walk the umem_transaction_log. Otherwise 15730Sstevel@tonic-gate * read the log whose umem_log_header_t is stored at walk_addr. 15740Sstevel@tonic-gate */ 15750Sstevel@tonic-gate if (lp == NULL && umem_readvar(&lp, "umem_transaction_log") == -1) { 15760Sstevel@tonic-gate mdb_warn("failed to read 'umem_transaction_log'"); 15770Sstevel@tonic-gate return (WALK_ERR); 15780Sstevel@tonic-gate } 15790Sstevel@tonic-gate 15800Sstevel@tonic-gate if (lp == NULL) { 15810Sstevel@tonic-gate mdb_warn("log is disabled\n"); 15820Sstevel@tonic-gate return (WALK_ERR); 15830Sstevel@tonic-gate } 15840Sstevel@tonic-gate 15850Sstevel@tonic-gate ulw = mdb_zalloc(sizeof (umem_log_walk_t), UM_SLEEP); 15860Sstevel@tonic-gate lhp = &ulw->ulw_lh; 15870Sstevel@tonic-gate 15880Sstevel@tonic-gate if (mdb_vread(lhp, sizeof (umem_log_header_t), lp) == -1) { 15890Sstevel@tonic-gate mdb_warn("failed to read log header at %p", lp); 15900Sstevel@tonic-gate mdb_free(ulw, sizeof (umem_log_walk_t)); 15910Sstevel@tonic-gate return (WALK_ERR); 15920Sstevel@tonic-gate } 15930Sstevel@tonic-gate 15940Sstevel@tonic-gate ulw->ulw_size = lhp->lh_chunksize * lhp->lh_nchunks; 15950Sstevel@tonic-gate ulw->ulw_base = mdb_alloc(ulw->ulw_size, UM_SLEEP); 15960Sstevel@tonic-gate maxndx = lhp->lh_chunksize / UMEM_BUFCTL_AUDIT_SIZE - 1; 15970Sstevel@tonic-gate 15980Sstevel@tonic-gate if (mdb_vread(ulw->ulw_base, ulw->ulw_size, 15990Sstevel@tonic-gate (uintptr_t)lhp->lh_base) == -1) { 16000Sstevel@tonic-gate mdb_warn("failed to read log at base %p", lhp->lh_base); 16010Sstevel@tonic-gate mdb_free(ulw->ulw_base, ulw->ulw_size); 16020Sstevel@tonic-gate mdb_free(ulw, sizeof (umem_log_walk_t)); 16030Sstevel@tonic-gate return (WALK_ERR); 16040Sstevel@tonic-gate } 16050Sstevel@tonic-gate 16060Sstevel@tonic-gate ulw->ulw_sorted = mdb_alloc(maxndx * lhp->lh_nchunks * 16070Sstevel@tonic-gate sizeof (umem_bufctl_audit_t *), UM_SLEEP); 16080Sstevel@tonic-gate 16090Sstevel@tonic-gate for (i = 0, k = 0; i < lhp->lh_nchunks; i++) { 16100Sstevel@tonic-gate caddr_t chunk = (caddr_t) 16110Sstevel@tonic-gate ((uintptr_t)ulw->ulw_base + i * lhp->lh_chunksize); 16120Sstevel@tonic-gate 16130Sstevel@tonic-gate for (j = 0; j < maxndx; j++) { 16140Sstevel@tonic-gate /* LINTED align */ 16150Sstevel@tonic-gate ulw->ulw_sorted[k++] = (umem_bufctl_audit_t *)chunk; 16160Sstevel@tonic-gate chunk += UMEM_BUFCTL_AUDIT_SIZE; 16170Sstevel@tonic-gate } 16180Sstevel@tonic-gate } 16190Sstevel@tonic-gate 16200Sstevel@tonic-gate qsort(ulw->ulw_sorted, k, sizeof (umem_bufctl_audit_t *), 16210Sstevel@tonic-gate (int(*)(const void *, const void *))bufctlcmp); 16220Sstevel@tonic-gate 16230Sstevel@tonic-gate ulw->ulw_maxndx = k; 16240Sstevel@tonic-gate wsp->walk_data = ulw; 16250Sstevel@tonic-gate 16260Sstevel@tonic-gate return (WALK_NEXT); 16270Sstevel@tonic-gate } 16280Sstevel@tonic-gate 16290Sstevel@tonic-gate int 16300Sstevel@tonic-gate umem_log_walk_step(mdb_walk_state_t *wsp) 16310Sstevel@tonic-gate { 16320Sstevel@tonic-gate umem_log_walk_t *ulw = wsp->walk_data; 16330Sstevel@tonic-gate umem_bufctl_audit_t *bcp; 16340Sstevel@tonic-gate 16350Sstevel@tonic-gate if (ulw->ulw_ndx == ulw->ulw_maxndx) 16360Sstevel@tonic-gate return (WALK_DONE); 16370Sstevel@tonic-gate 16380Sstevel@tonic-gate bcp = ulw->ulw_sorted[ulw->ulw_ndx++]; 16390Sstevel@tonic-gate 16400Sstevel@tonic-gate return (wsp->walk_callback((uintptr_t)bcp - (uintptr_t)ulw->ulw_base + 16410Sstevel@tonic-gate (uintptr_t)ulw->ulw_lh.lh_base, bcp, wsp->walk_cbdata)); 16420Sstevel@tonic-gate } 16430Sstevel@tonic-gate 16440Sstevel@tonic-gate void 16450Sstevel@tonic-gate umem_log_walk_fini(mdb_walk_state_t *wsp) 16460Sstevel@tonic-gate { 16470Sstevel@tonic-gate umem_log_walk_t *ulw = wsp->walk_data; 16480Sstevel@tonic-gate 16490Sstevel@tonic-gate mdb_free(ulw->ulw_base, ulw->ulw_size); 16500Sstevel@tonic-gate mdb_free(ulw->ulw_sorted, ulw->ulw_maxndx * 16510Sstevel@tonic-gate sizeof (umem_bufctl_audit_t *)); 16520Sstevel@tonic-gate mdb_free(ulw, sizeof (umem_log_walk_t)); 16530Sstevel@tonic-gate } 16540Sstevel@tonic-gate 16550Sstevel@tonic-gate typedef struct allocdby_bufctl { 16560Sstevel@tonic-gate uintptr_t abb_addr; 16570Sstevel@tonic-gate hrtime_t abb_ts; 16580Sstevel@tonic-gate } allocdby_bufctl_t; 16590Sstevel@tonic-gate 16600Sstevel@tonic-gate typedef struct allocdby_walk { 16610Sstevel@tonic-gate const char *abw_walk; 16620Sstevel@tonic-gate uintptr_t abw_thread; 16630Sstevel@tonic-gate size_t abw_nbufs; 16640Sstevel@tonic-gate size_t abw_size; 16650Sstevel@tonic-gate allocdby_bufctl_t *abw_buf; 16660Sstevel@tonic-gate size_t abw_ndx; 16670Sstevel@tonic-gate } allocdby_walk_t; 16680Sstevel@tonic-gate 16690Sstevel@tonic-gate int 16700Sstevel@tonic-gate allocdby_walk_bufctl(uintptr_t addr, const umem_bufctl_audit_t *bcp, 16710Sstevel@tonic-gate allocdby_walk_t *abw) 16720Sstevel@tonic-gate { 16730Sstevel@tonic-gate if ((uintptr_t)bcp->bc_thread != abw->abw_thread) 16740Sstevel@tonic-gate return (WALK_NEXT); 16750Sstevel@tonic-gate 16760Sstevel@tonic-gate if (abw->abw_nbufs == abw->abw_size) { 16770Sstevel@tonic-gate allocdby_bufctl_t *buf; 16780Sstevel@tonic-gate size_t oldsize = sizeof (allocdby_bufctl_t) * abw->abw_size; 16790Sstevel@tonic-gate 16800Sstevel@tonic-gate buf = mdb_zalloc(oldsize << 1, UM_SLEEP); 16810Sstevel@tonic-gate 16820Sstevel@tonic-gate bcopy(abw->abw_buf, buf, oldsize); 16830Sstevel@tonic-gate mdb_free(abw->abw_buf, oldsize); 16840Sstevel@tonic-gate 16850Sstevel@tonic-gate abw->abw_size <<= 1; 16860Sstevel@tonic-gate abw->abw_buf = buf; 16870Sstevel@tonic-gate } 16880Sstevel@tonic-gate 16890Sstevel@tonic-gate abw->abw_buf[abw->abw_nbufs].abb_addr = addr; 16900Sstevel@tonic-gate abw->abw_buf[abw->abw_nbufs].abb_ts = bcp->bc_timestamp; 16910Sstevel@tonic-gate abw->abw_nbufs++; 16920Sstevel@tonic-gate 16930Sstevel@tonic-gate return (WALK_NEXT); 16940Sstevel@tonic-gate } 16950Sstevel@tonic-gate 16960Sstevel@tonic-gate /*ARGSUSED*/ 16970Sstevel@tonic-gate int 16980Sstevel@tonic-gate allocdby_walk_cache(uintptr_t addr, const umem_cache_t *c, allocdby_walk_t *abw) 16990Sstevel@tonic-gate { 17000Sstevel@tonic-gate if (mdb_pwalk(abw->abw_walk, (mdb_walk_cb_t)allocdby_walk_bufctl, 17010Sstevel@tonic-gate abw, addr) == -1) { 17020Sstevel@tonic-gate mdb_warn("couldn't walk bufctl for cache %p", addr); 17030Sstevel@tonic-gate return (WALK_DONE); 17040Sstevel@tonic-gate } 17050Sstevel@tonic-gate 17060Sstevel@tonic-gate return (WALK_NEXT); 17070Sstevel@tonic-gate } 17080Sstevel@tonic-gate 17090Sstevel@tonic-gate static int 17100Sstevel@tonic-gate allocdby_cmp(const allocdby_bufctl_t *lhs, const allocdby_bufctl_t *rhs) 17110Sstevel@tonic-gate { 17120Sstevel@tonic-gate if (lhs->abb_ts < rhs->abb_ts) 17130Sstevel@tonic-gate return (1); 17140Sstevel@tonic-gate if (lhs->abb_ts > rhs->abb_ts) 17150Sstevel@tonic-gate return (-1); 17160Sstevel@tonic-gate return (0); 17170Sstevel@tonic-gate } 17180Sstevel@tonic-gate 17190Sstevel@tonic-gate static int 17200Sstevel@tonic-gate allocdby_walk_init_common(mdb_walk_state_t *wsp, const char *walk) 17210Sstevel@tonic-gate { 17220Sstevel@tonic-gate allocdby_walk_t *abw; 17230Sstevel@tonic-gate 17240Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 17250Sstevel@tonic-gate mdb_warn("allocdby walk doesn't support global walks\n"); 17260Sstevel@tonic-gate return (WALK_ERR); 17270Sstevel@tonic-gate } 17280Sstevel@tonic-gate 17290Sstevel@tonic-gate abw = mdb_zalloc(sizeof (allocdby_walk_t), UM_SLEEP); 17300Sstevel@tonic-gate 17310Sstevel@tonic-gate abw->abw_thread = wsp->walk_addr; 17320Sstevel@tonic-gate abw->abw_walk = walk; 17330Sstevel@tonic-gate abw->abw_size = 128; /* something reasonable */ 17340Sstevel@tonic-gate abw->abw_buf = 17350Sstevel@tonic-gate mdb_zalloc(abw->abw_size * sizeof (allocdby_bufctl_t), UM_SLEEP); 17360Sstevel@tonic-gate 17370Sstevel@tonic-gate wsp->walk_data = abw; 17380Sstevel@tonic-gate 17390Sstevel@tonic-gate if (mdb_walk("umem_cache", 17400Sstevel@tonic-gate (mdb_walk_cb_t)allocdby_walk_cache, abw) == -1) { 17410Sstevel@tonic-gate mdb_warn("couldn't walk umem_cache"); 17420Sstevel@tonic-gate allocdby_walk_fini(wsp); 17430Sstevel@tonic-gate return (WALK_ERR); 17440Sstevel@tonic-gate } 17450Sstevel@tonic-gate 17460Sstevel@tonic-gate qsort(abw->abw_buf, abw->abw_nbufs, sizeof (allocdby_bufctl_t), 17470Sstevel@tonic-gate (int(*)(const void *, const void *))allocdby_cmp); 17480Sstevel@tonic-gate 17490Sstevel@tonic-gate return (WALK_NEXT); 17500Sstevel@tonic-gate } 17510Sstevel@tonic-gate 17520Sstevel@tonic-gate int 17530Sstevel@tonic-gate allocdby_walk_init(mdb_walk_state_t *wsp) 17540Sstevel@tonic-gate { 17550Sstevel@tonic-gate return (allocdby_walk_init_common(wsp, "bufctl")); 17560Sstevel@tonic-gate } 17570Sstevel@tonic-gate 17580Sstevel@tonic-gate int 17590Sstevel@tonic-gate freedby_walk_init(mdb_walk_state_t *wsp) 17600Sstevel@tonic-gate { 17610Sstevel@tonic-gate return (allocdby_walk_init_common(wsp, "freectl")); 17620Sstevel@tonic-gate } 17630Sstevel@tonic-gate 17640Sstevel@tonic-gate int 17650Sstevel@tonic-gate allocdby_walk_step(mdb_walk_state_t *wsp) 17660Sstevel@tonic-gate { 17670Sstevel@tonic-gate allocdby_walk_t *abw = wsp->walk_data; 17680Sstevel@tonic-gate uintptr_t addr; 17690Sstevel@tonic-gate umem_bufctl_audit_t *bcp; 17700Sstevel@tonic-gate UMEM_LOCAL_BUFCTL_AUDIT(&bcp); 17710Sstevel@tonic-gate 17720Sstevel@tonic-gate if (abw->abw_ndx == abw->abw_nbufs) 17730Sstevel@tonic-gate return (WALK_DONE); 17740Sstevel@tonic-gate 17750Sstevel@tonic-gate addr = abw->abw_buf[abw->abw_ndx++].abb_addr; 17760Sstevel@tonic-gate 17770Sstevel@tonic-gate if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) { 17780Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr); 17790Sstevel@tonic-gate return (WALK_DONE); 17800Sstevel@tonic-gate } 17810Sstevel@tonic-gate 17820Sstevel@tonic-gate return (wsp->walk_callback(addr, bcp, wsp->walk_cbdata)); 17830Sstevel@tonic-gate } 17840Sstevel@tonic-gate 17850Sstevel@tonic-gate void 17860Sstevel@tonic-gate allocdby_walk_fini(mdb_walk_state_t *wsp) 17870Sstevel@tonic-gate { 17880Sstevel@tonic-gate allocdby_walk_t *abw = wsp->walk_data; 17890Sstevel@tonic-gate 17900Sstevel@tonic-gate mdb_free(abw->abw_buf, sizeof (allocdby_bufctl_t) * abw->abw_size); 17910Sstevel@tonic-gate mdb_free(abw, sizeof (allocdby_walk_t)); 17920Sstevel@tonic-gate } 17930Sstevel@tonic-gate 17940Sstevel@tonic-gate /*ARGSUSED*/ 17950Sstevel@tonic-gate int 17960Sstevel@tonic-gate allocdby_walk(uintptr_t addr, const umem_bufctl_audit_t *bcp, void *ignored) 17970Sstevel@tonic-gate { 17980Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 17990Sstevel@tonic-gate GElf_Sym sym; 18000Sstevel@tonic-gate int i; 18010Sstevel@tonic-gate 18020Sstevel@tonic-gate mdb_printf("%0?p %12llx ", addr, bcp->bc_timestamp); 18030Sstevel@tonic-gate for (i = 0; i < bcp->bc_depth; i++) { 18040Sstevel@tonic-gate if (mdb_lookup_by_addr(bcp->bc_stack[i], 18050Sstevel@tonic-gate MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1) 18060Sstevel@tonic-gate continue; 18070Sstevel@tonic-gate if (is_umem_sym(c, "umem_")) 18080Sstevel@tonic-gate continue; 18090Sstevel@tonic-gate mdb_printf("%s+0x%lx", 18100Sstevel@tonic-gate c, bcp->bc_stack[i] - (uintptr_t)sym.st_value); 18110Sstevel@tonic-gate break; 18120Sstevel@tonic-gate } 18130Sstevel@tonic-gate mdb_printf("\n"); 18140Sstevel@tonic-gate 18150Sstevel@tonic-gate return (WALK_NEXT); 18160Sstevel@tonic-gate } 18170Sstevel@tonic-gate 18180Sstevel@tonic-gate static int 18190Sstevel@tonic-gate allocdby_common(uintptr_t addr, uint_t flags, const char *w) 18200Sstevel@tonic-gate { 18210Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 18220Sstevel@tonic-gate return (DCMD_USAGE); 18230Sstevel@tonic-gate 18240Sstevel@tonic-gate mdb_printf("%-?s %12s %s\n", "BUFCTL", "TIMESTAMP", "CALLER"); 18250Sstevel@tonic-gate 18260Sstevel@tonic-gate if (mdb_pwalk(w, (mdb_walk_cb_t)allocdby_walk, NULL, addr) == -1) { 18270Sstevel@tonic-gate mdb_warn("can't walk '%s' for %p", w, addr); 18280Sstevel@tonic-gate return (DCMD_ERR); 18290Sstevel@tonic-gate } 18300Sstevel@tonic-gate 18310Sstevel@tonic-gate return (DCMD_OK); 18320Sstevel@tonic-gate } 18330Sstevel@tonic-gate 18340Sstevel@tonic-gate /*ARGSUSED*/ 18350Sstevel@tonic-gate int 18360Sstevel@tonic-gate allocdby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 18370Sstevel@tonic-gate { 18380Sstevel@tonic-gate return (allocdby_common(addr, flags, "allocdby")); 18390Sstevel@tonic-gate } 18400Sstevel@tonic-gate 18410Sstevel@tonic-gate /*ARGSUSED*/ 18420Sstevel@tonic-gate int 18430Sstevel@tonic-gate freedby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 18440Sstevel@tonic-gate { 18450Sstevel@tonic-gate return (allocdby_common(addr, flags, "freedby")); 18460Sstevel@tonic-gate } 18470Sstevel@tonic-gate 18480Sstevel@tonic-gate typedef struct whatis { 18490Sstevel@tonic-gate uintptr_t w_addr; 18500Sstevel@tonic-gate const umem_cache_t *w_cache; 18510Sstevel@tonic-gate const vmem_t *w_vmem; 18520Sstevel@tonic-gate int w_found; 18530Sstevel@tonic-gate uint_t w_all; 18540Sstevel@tonic-gate uint_t w_bufctl; 1855*10388SJonathan.Adams@Sun.COM uint_t w_freemem; 1856*10388SJonathan.Adams@Sun.COM uint_t w_quiet; 1857*10388SJonathan.Adams@Sun.COM uint_t w_verbose; 18580Sstevel@tonic-gate } whatis_t; 18590Sstevel@tonic-gate 1860*10388SJonathan.Adams@Sun.COM /* nicely report pointers as offsets from a base */ 1861*10388SJonathan.Adams@Sun.COM static void 1862*10388SJonathan.Adams@Sun.COM whatis_report_pointer(uintptr_t addr, uintptr_t base, const char *description) 1863*10388SJonathan.Adams@Sun.COM { 1864*10388SJonathan.Adams@Sun.COM if (addr == base) 1865*10388SJonathan.Adams@Sun.COM mdb_printf("%p is %s", 1866*10388SJonathan.Adams@Sun.COM addr, description); 1867*10388SJonathan.Adams@Sun.COM else 1868*10388SJonathan.Adams@Sun.COM mdb_printf("%p is %p+%p, %s", 1869*10388SJonathan.Adams@Sun.COM addr, base, addr - base, description); 1870*10388SJonathan.Adams@Sun.COM } 1871*10388SJonathan.Adams@Sun.COM 1872*10388SJonathan.Adams@Sun.COM /* call one of our dcmd functions with "-v" and the provided address */ 1873*10388SJonathan.Adams@Sun.COM static void 1874*10388SJonathan.Adams@Sun.COM whatis_call_printer(mdb_dcmd_f *dcmd, uintptr_t addr) 1875*10388SJonathan.Adams@Sun.COM { 1876*10388SJonathan.Adams@Sun.COM mdb_arg_t a; 1877*10388SJonathan.Adams@Sun.COM a.a_type = MDB_TYPE_STRING; 1878*10388SJonathan.Adams@Sun.COM a.a_un.a_str = "-v"; 1879*10388SJonathan.Adams@Sun.COM 1880*10388SJonathan.Adams@Sun.COM (void) (*dcmd)(addr, DCMD_ADDRSPEC, 1, &a); 1881*10388SJonathan.Adams@Sun.COM } 1882*10388SJonathan.Adams@Sun.COM 18830Sstevel@tonic-gate static void 18840Sstevel@tonic-gate whatis_print_umem(uintptr_t addr, uintptr_t baddr, whatis_t *w) 18850Sstevel@tonic-gate { 1886*10388SJonathan.Adams@Sun.COM const umem_cache_t *cp = w->w_cache; 18870Sstevel@tonic-gate /* LINTED pointer cast may result in improper alignment */ 1888*10388SJonathan.Adams@Sun.COM uintptr_t btaddr = (uintptr_t)UMEM_BUFTAG(cp, addr); 18890Sstevel@tonic-gate intptr_t stat; 1890*10388SJonathan.Adams@Sun.COM int call_printer; 1891*10388SJonathan.Adams@Sun.COM 1892*10388SJonathan.Adams@Sun.COM if (cp->cache_flags & UMF_REDZONE) { 18930Sstevel@tonic-gate umem_buftag_t bt; 18940Sstevel@tonic-gate 18950Sstevel@tonic-gate if (mdb_vread(&bt, sizeof (bt), btaddr) == -1) 18960Sstevel@tonic-gate goto done; 18970Sstevel@tonic-gate 18980Sstevel@tonic-gate stat = (intptr_t)bt.bt_bufctl ^ bt.bt_bxstat; 18990Sstevel@tonic-gate 19000Sstevel@tonic-gate if (stat != UMEM_BUFTAG_ALLOC && stat != UMEM_BUFTAG_FREE) 19010Sstevel@tonic-gate goto done; 19020Sstevel@tonic-gate 19030Sstevel@tonic-gate /* 19040Sstevel@tonic-gate * provide the bufctl ptr if it has useful information 19050Sstevel@tonic-gate */ 1906*10388SJonathan.Adams@Sun.COM if (baddr == 0 && (cp->cache_flags & UMF_AUDIT)) 19070Sstevel@tonic-gate baddr = (uintptr_t)bt.bt_bufctl; 19080Sstevel@tonic-gate } 19090Sstevel@tonic-gate 19100Sstevel@tonic-gate done: 1911*10388SJonathan.Adams@Sun.COM call_printer = 1912*10388SJonathan.Adams@Sun.COM (!w->w_quiet && baddr != 0 && (cp->cache_flags & UMF_AUDIT)); 1913*10388SJonathan.Adams@Sun.COM 1914*10388SJonathan.Adams@Sun.COM whatis_report_pointer(w->w_addr, addr, ""); 1915*10388SJonathan.Adams@Sun.COM 1916*10388SJonathan.Adams@Sun.COM if (baddr != 0 && !call_printer) 1917*10388SJonathan.Adams@Sun.COM mdb_printf("bufctl %p ", baddr); 1918*10388SJonathan.Adams@Sun.COM 1919*10388SJonathan.Adams@Sun.COM mdb_printf("%s from %s%s\n", 1920*10388SJonathan.Adams@Sun.COM (w->w_freemem == FALSE) ? "allocated" : "freed", cp->cache_name, 1921*10388SJonathan.Adams@Sun.COM call_printer ? ":" : ""); 1922*10388SJonathan.Adams@Sun.COM 1923*10388SJonathan.Adams@Sun.COM if (call_printer) 1924*10388SJonathan.Adams@Sun.COM whatis_call_printer(bufctl, baddr); 19250Sstevel@tonic-gate } 19260Sstevel@tonic-gate 19270Sstevel@tonic-gate /*ARGSUSED*/ 19280Sstevel@tonic-gate static int 19290Sstevel@tonic-gate whatis_walk_umem(uintptr_t addr, void *ignored, whatis_t *w) 19300Sstevel@tonic-gate { 19310Sstevel@tonic-gate if (w->w_addr < addr || w->w_addr >= addr + w->w_cache->cache_bufsize) 19320Sstevel@tonic-gate return (WALK_NEXT); 19330Sstevel@tonic-gate 19340Sstevel@tonic-gate whatis_print_umem(addr, 0, w); 19350Sstevel@tonic-gate w->w_found++; 19360Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 19370Sstevel@tonic-gate } 19380Sstevel@tonic-gate 19390Sstevel@tonic-gate static int 19400Sstevel@tonic-gate whatis_walk_seg(uintptr_t addr, const vmem_seg_t *vs, whatis_t *w) 19410Sstevel@tonic-gate { 19420Sstevel@tonic-gate if (w->w_addr < vs->vs_start || w->w_addr >= vs->vs_end) 19430Sstevel@tonic-gate return (WALK_NEXT); 19440Sstevel@tonic-gate 1945*10388SJonathan.Adams@Sun.COM whatis_report_pointer(w->w_addr, vs->vs_start, ""); 19460Sstevel@tonic-gate 19470Sstevel@tonic-gate /* 1948*10388SJonathan.Adams@Sun.COM * If we're not going to print it anyway, provide the vmem_seg pointer 1949*10388SJonathan.Adams@Sun.COM * if it has a stack trace. 19500Sstevel@tonic-gate */ 1951*10388SJonathan.Adams@Sun.COM if (w->w_quiet && (w->w_bufctl || 1952*10388SJonathan.Adams@Sun.COM (vs->vs_type == VMEM_ALLOC && vs->vs_depth != 0))) { 1953*10388SJonathan.Adams@Sun.COM mdb_printf("vmem_seg %p ", addr); 19540Sstevel@tonic-gate } 19550Sstevel@tonic-gate 1956*10388SJonathan.Adams@Sun.COM mdb_printf("%s from %s vmem arena%s\n", 1957*10388SJonathan.Adams@Sun.COM (w->w_freemem == FALSE) ? "allocated" : "freed", 1958*10388SJonathan.Adams@Sun.COM w->w_vmem->vm_name, !w->w_quiet ? ":" : ""); 1959*10388SJonathan.Adams@Sun.COM 1960*10388SJonathan.Adams@Sun.COM if (!w->w_quiet) 1961*10388SJonathan.Adams@Sun.COM whatis_call_printer(vmem_seg, addr); 19620Sstevel@tonic-gate 19630Sstevel@tonic-gate w->w_found++; 19640Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 19650Sstevel@tonic-gate } 19660Sstevel@tonic-gate 19670Sstevel@tonic-gate static int 19680Sstevel@tonic-gate whatis_walk_vmem(uintptr_t addr, const vmem_t *vmem, whatis_t *w) 19690Sstevel@tonic-gate { 19700Sstevel@tonic-gate const char *nm = vmem->vm_name; 19710Sstevel@tonic-gate w->w_vmem = vmem; 19720Sstevel@tonic-gate w->w_freemem = FALSE; 19730Sstevel@tonic-gate 19740Sstevel@tonic-gate if (w->w_verbose) 19750Sstevel@tonic-gate mdb_printf("Searching vmem arena %s...\n", nm); 19760Sstevel@tonic-gate 19770Sstevel@tonic-gate if (mdb_pwalk("vmem_alloc", 19780Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_seg, w, addr) == -1) { 19790Sstevel@tonic-gate mdb_warn("can't walk vmem seg for %p", addr); 19800Sstevel@tonic-gate return (WALK_NEXT); 19810Sstevel@tonic-gate } 19820Sstevel@tonic-gate 19830Sstevel@tonic-gate if (w->w_found && w->w_all == FALSE) 19840Sstevel@tonic-gate return (WALK_DONE); 19850Sstevel@tonic-gate 19860Sstevel@tonic-gate if (w->w_verbose) 19870Sstevel@tonic-gate mdb_printf("Searching vmem arena %s for free virtual...\n", nm); 19880Sstevel@tonic-gate 19890Sstevel@tonic-gate w->w_freemem = TRUE; 19900Sstevel@tonic-gate 19910Sstevel@tonic-gate if (mdb_pwalk("vmem_free", 19920Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_seg, w, addr) == -1) { 19930Sstevel@tonic-gate mdb_warn("can't walk vmem seg for %p", addr); 19940Sstevel@tonic-gate return (WALK_NEXT); 19950Sstevel@tonic-gate } 19960Sstevel@tonic-gate 19970Sstevel@tonic-gate return (w->w_found && w->w_all == FALSE ? WALK_DONE : WALK_NEXT); 19980Sstevel@tonic-gate } 19990Sstevel@tonic-gate 20000Sstevel@tonic-gate /*ARGSUSED*/ 20010Sstevel@tonic-gate static int 20020Sstevel@tonic-gate whatis_walk_bufctl(uintptr_t baddr, const umem_bufctl_t *bcp, whatis_t *w) 20030Sstevel@tonic-gate { 20040Sstevel@tonic-gate uintptr_t addr; 20050Sstevel@tonic-gate 20060Sstevel@tonic-gate if (bcp == NULL) 20070Sstevel@tonic-gate return (WALK_NEXT); 20080Sstevel@tonic-gate 20090Sstevel@tonic-gate addr = (uintptr_t)bcp->bc_addr; 20100Sstevel@tonic-gate 20110Sstevel@tonic-gate if (w->w_addr < addr || w->w_addr >= addr + w->w_cache->cache_bufsize) 20120Sstevel@tonic-gate return (WALK_NEXT); 20130Sstevel@tonic-gate 20140Sstevel@tonic-gate whatis_print_umem(addr, baddr, w); 20150Sstevel@tonic-gate w->w_found++; 20160Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 20170Sstevel@tonic-gate } 20180Sstevel@tonic-gate 20190Sstevel@tonic-gate static int 20200Sstevel@tonic-gate whatis_walk_cache(uintptr_t addr, const umem_cache_t *c, whatis_t *w) 20210Sstevel@tonic-gate { 20220Sstevel@tonic-gate char *walk, *freewalk; 20230Sstevel@tonic-gate mdb_walk_cb_t func; 20240Sstevel@tonic-gate 2025*10388SJonathan.Adams@Sun.COM /* For caches with auditing info, we always walk the bufctls */ 2026*10388SJonathan.Adams@Sun.COM if (w->w_bufctl || (c->cache_flags & UMF_AUDIT)) { 2027*10388SJonathan.Adams@Sun.COM walk = "bufctl"; 2028*10388SJonathan.Adams@Sun.COM freewalk = "freectl"; 2029*10388SJonathan.Adams@Sun.COM func = (mdb_walk_cb_t)whatis_walk_bufctl; 2030*10388SJonathan.Adams@Sun.COM } else { 20310Sstevel@tonic-gate walk = "umem"; 20320Sstevel@tonic-gate freewalk = "freemem"; 20330Sstevel@tonic-gate func = (mdb_walk_cb_t)whatis_walk_umem; 20340Sstevel@tonic-gate } 20350Sstevel@tonic-gate 20360Sstevel@tonic-gate if (w->w_verbose) 20370Sstevel@tonic-gate mdb_printf("Searching %s...\n", c->cache_name); 20380Sstevel@tonic-gate 20390Sstevel@tonic-gate w->w_cache = c; 20400Sstevel@tonic-gate w->w_freemem = FALSE; 20410Sstevel@tonic-gate 20420Sstevel@tonic-gate if (mdb_pwalk(walk, func, w, addr) == -1) { 20430Sstevel@tonic-gate mdb_warn("can't find %s walker", walk); 20440Sstevel@tonic-gate return (WALK_DONE); 20450Sstevel@tonic-gate } 20460Sstevel@tonic-gate 20470Sstevel@tonic-gate if (w->w_found && w->w_all == FALSE) 20480Sstevel@tonic-gate return (WALK_DONE); 20490Sstevel@tonic-gate 20500Sstevel@tonic-gate /* 20510Sstevel@tonic-gate * We have searched for allocated memory; now search for freed memory. 20520Sstevel@tonic-gate */ 20530Sstevel@tonic-gate if (w->w_verbose) 20540Sstevel@tonic-gate mdb_printf("Searching %s for free memory...\n", c->cache_name); 20550Sstevel@tonic-gate 20560Sstevel@tonic-gate w->w_freemem = TRUE; 20570Sstevel@tonic-gate 20580Sstevel@tonic-gate if (mdb_pwalk(freewalk, func, w, addr) == -1) { 20590Sstevel@tonic-gate mdb_warn("can't find %s walker", freewalk); 20600Sstevel@tonic-gate return (WALK_DONE); 20610Sstevel@tonic-gate } 20620Sstevel@tonic-gate 20630Sstevel@tonic-gate return (w->w_found && w->w_all == FALSE ? WALK_DONE : WALK_NEXT); 20640Sstevel@tonic-gate } 20650Sstevel@tonic-gate 20660Sstevel@tonic-gate static int 20670Sstevel@tonic-gate whatis_walk_touch(uintptr_t addr, const umem_cache_t *c, whatis_t *w) 20680Sstevel@tonic-gate { 20690Sstevel@tonic-gate if (c->cache_cflags & UMC_NOTOUCH) 20700Sstevel@tonic-gate return (WALK_NEXT); 20710Sstevel@tonic-gate 20720Sstevel@tonic-gate return (whatis_walk_cache(addr, c, w)); 20730Sstevel@tonic-gate } 20740Sstevel@tonic-gate 20750Sstevel@tonic-gate static int 20760Sstevel@tonic-gate whatis_walk_notouch(uintptr_t addr, const umem_cache_t *c, whatis_t *w) 20770Sstevel@tonic-gate { 20780Sstevel@tonic-gate if (!(c->cache_cflags & UMC_NOTOUCH)) 20790Sstevel@tonic-gate return (WALK_NEXT); 20800Sstevel@tonic-gate 20810Sstevel@tonic-gate return (whatis_walk_cache(addr, c, w)); 20820Sstevel@tonic-gate } 20830Sstevel@tonic-gate 20840Sstevel@tonic-gate int 20850Sstevel@tonic-gate whatis(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 20860Sstevel@tonic-gate { 20870Sstevel@tonic-gate whatis_t w; 20880Sstevel@tonic-gate 20890Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 20900Sstevel@tonic-gate return (DCMD_USAGE); 20910Sstevel@tonic-gate 2092*10388SJonathan.Adams@Sun.COM w.w_all = FALSE; 20930Sstevel@tonic-gate w.w_bufctl = FALSE; 2094*10388SJonathan.Adams@Sun.COM w.w_quiet = FALSE; 2095*10388SJonathan.Adams@Sun.COM w.w_verbose = FALSE; 20960Sstevel@tonic-gate 20970Sstevel@tonic-gate if (mdb_getopts(argc, argv, 2098*10388SJonathan.Adams@Sun.COM 'a', MDB_OPT_SETBITS, TRUE, &w.w_all, 2099*10388SJonathan.Adams@Sun.COM 'b', MDB_OPT_SETBITS, TRUE, &w.w_bufctl, 2100*10388SJonathan.Adams@Sun.COM 'q', MDB_OPT_SETBITS, TRUE, &w.w_quiet, 21010Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &w.w_verbose, 2102*10388SJonathan.Adams@Sun.COM NULL) != argc) 21030Sstevel@tonic-gate return (DCMD_USAGE); 21040Sstevel@tonic-gate 21050Sstevel@tonic-gate w.w_addr = addr; 21060Sstevel@tonic-gate w.w_found = 0; 21070Sstevel@tonic-gate 21080Sstevel@tonic-gate /* 21090Sstevel@tonic-gate * Mappings and threads should eventually be added here. 21100Sstevel@tonic-gate */ 21110Sstevel@tonic-gate if (mdb_walk("umem_cache", 21120Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_touch, &w) == -1) { 21130Sstevel@tonic-gate mdb_warn("couldn't find umem_cache walker"); 21140Sstevel@tonic-gate return (DCMD_ERR); 21150Sstevel@tonic-gate } 21160Sstevel@tonic-gate 21170Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 21180Sstevel@tonic-gate return (DCMD_OK); 21190Sstevel@tonic-gate 21200Sstevel@tonic-gate if (mdb_walk("umem_cache", 21210Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_notouch, &w) == -1) { 21220Sstevel@tonic-gate mdb_warn("couldn't find umem_cache walker"); 21230Sstevel@tonic-gate return (DCMD_ERR); 21240Sstevel@tonic-gate } 21250Sstevel@tonic-gate 21260Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 21270Sstevel@tonic-gate return (DCMD_OK); 21280Sstevel@tonic-gate 21290Sstevel@tonic-gate if (mdb_walk("vmem_postfix", 21300Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_vmem, &w) == -1) { 21310Sstevel@tonic-gate mdb_warn("couldn't find vmem_postfix walker"); 21320Sstevel@tonic-gate return (DCMD_ERR); 21330Sstevel@tonic-gate } 21340Sstevel@tonic-gate 21350Sstevel@tonic-gate if (w.w_found == 0) 21360Sstevel@tonic-gate mdb_printf("%p is unknown\n", addr); 21370Sstevel@tonic-gate 21380Sstevel@tonic-gate return (DCMD_OK); 21390Sstevel@tonic-gate } 21400Sstevel@tonic-gate 21410Sstevel@tonic-gate typedef struct umem_log_cpu { 21420Sstevel@tonic-gate uintptr_t umc_low; 21430Sstevel@tonic-gate uintptr_t umc_high; 21440Sstevel@tonic-gate } umem_log_cpu_t; 21450Sstevel@tonic-gate 21460Sstevel@tonic-gate int 21470Sstevel@tonic-gate umem_log_walk(uintptr_t addr, const umem_bufctl_audit_t *b, umem_log_cpu_t *umc) 21480Sstevel@tonic-gate { 21490Sstevel@tonic-gate int i; 21500Sstevel@tonic-gate 21510Sstevel@tonic-gate for (i = 0; i < umem_max_ncpus; i++) { 21520Sstevel@tonic-gate if (addr >= umc[i].umc_low && addr < umc[i].umc_high) 21530Sstevel@tonic-gate break; 21540Sstevel@tonic-gate } 21550Sstevel@tonic-gate 21560Sstevel@tonic-gate if (i == umem_max_ncpus) 21570Sstevel@tonic-gate mdb_printf(" "); 21580Sstevel@tonic-gate else 21590Sstevel@tonic-gate mdb_printf("%3d", i); 21600Sstevel@tonic-gate 21610Sstevel@tonic-gate mdb_printf(" %0?p %0?p %16llx %0?p\n", addr, b->bc_addr, 21620Sstevel@tonic-gate b->bc_timestamp, b->bc_thread); 21630Sstevel@tonic-gate 21640Sstevel@tonic-gate return (WALK_NEXT); 21650Sstevel@tonic-gate } 21660Sstevel@tonic-gate 21670Sstevel@tonic-gate /*ARGSUSED*/ 21680Sstevel@tonic-gate int 21690Sstevel@tonic-gate umem_log(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 21700Sstevel@tonic-gate { 21710Sstevel@tonic-gate umem_log_header_t lh; 21720Sstevel@tonic-gate umem_cpu_log_header_t clh; 21730Sstevel@tonic-gate uintptr_t lhp, clhp; 21740Sstevel@tonic-gate umem_log_cpu_t *umc; 21750Sstevel@tonic-gate int i; 21760Sstevel@tonic-gate 21770Sstevel@tonic-gate if (umem_readvar(&lhp, "umem_transaction_log") == -1) { 21780Sstevel@tonic-gate mdb_warn("failed to read 'umem_transaction_log'"); 21790Sstevel@tonic-gate return (DCMD_ERR); 21800Sstevel@tonic-gate } 21810Sstevel@tonic-gate 21820Sstevel@tonic-gate if (lhp == NULL) { 21830Sstevel@tonic-gate mdb_warn("no umem transaction log\n"); 21840Sstevel@tonic-gate return (DCMD_ERR); 21850Sstevel@tonic-gate } 21860Sstevel@tonic-gate 21870Sstevel@tonic-gate if (mdb_vread(&lh, sizeof (umem_log_header_t), lhp) == -1) { 21880Sstevel@tonic-gate mdb_warn("failed to read log header at %p", lhp); 21890Sstevel@tonic-gate return (DCMD_ERR); 21900Sstevel@tonic-gate } 21910Sstevel@tonic-gate 21920Sstevel@tonic-gate clhp = lhp + ((uintptr_t)&lh.lh_cpu[0] - (uintptr_t)&lh); 21930Sstevel@tonic-gate 21940Sstevel@tonic-gate umc = mdb_zalloc(sizeof (umem_log_cpu_t) * umem_max_ncpus, 21950Sstevel@tonic-gate UM_SLEEP | UM_GC); 21960Sstevel@tonic-gate 21970Sstevel@tonic-gate for (i = 0; i < umem_max_ncpus; i++) { 21980Sstevel@tonic-gate if (mdb_vread(&clh, sizeof (clh), clhp) == -1) { 21990Sstevel@tonic-gate mdb_warn("cannot read cpu %d's log header at %p", 22000Sstevel@tonic-gate i, clhp); 22010Sstevel@tonic-gate return (DCMD_ERR); 22020Sstevel@tonic-gate } 22030Sstevel@tonic-gate 22040Sstevel@tonic-gate umc[i].umc_low = clh.clh_chunk * lh.lh_chunksize + 22050Sstevel@tonic-gate (uintptr_t)lh.lh_base; 22060Sstevel@tonic-gate umc[i].umc_high = (uintptr_t)clh.clh_current; 22070Sstevel@tonic-gate 22080Sstevel@tonic-gate clhp += sizeof (umem_cpu_log_header_t); 22090Sstevel@tonic-gate } 22100Sstevel@tonic-gate 22110Sstevel@tonic-gate if (DCMD_HDRSPEC(flags)) { 22120Sstevel@tonic-gate mdb_printf("%3s %-?s %-?s %16s %-?s\n", "CPU", "ADDR", 22130Sstevel@tonic-gate "BUFADDR", "TIMESTAMP", "THREAD"); 22140Sstevel@tonic-gate } 22150Sstevel@tonic-gate 22160Sstevel@tonic-gate /* 22170Sstevel@tonic-gate * If we have been passed an address, we'll just print out that 22180Sstevel@tonic-gate * log entry. 22190Sstevel@tonic-gate */ 22200Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) { 22210Sstevel@tonic-gate umem_bufctl_audit_t *bp; 22220Sstevel@tonic-gate UMEM_LOCAL_BUFCTL_AUDIT(&bp); 22230Sstevel@tonic-gate 22240Sstevel@tonic-gate if (mdb_vread(bp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) { 22250Sstevel@tonic-gate mdb_warn("failed to read bufctl at %p", addr); 22260Sstevel@tonic-gate return (DCMD_ERR); 22270Sstevel@tonic-gate } 22280Sstevel@tonic-gate 22290Sstevel@tonic-gate (void) umem_log_walk(addr, bp, umc); 22300Sstevel@tonic-gate 22310Sstevel@tonic-gate return (DCMD_OK); 22320Sstevel@tonic-gate } 22330Sstevel@tonic-gate 22340Sstevel@tonic-gate if (mdb_walk("umem_log", (mdb_walk_cb_t)umem_log_walk, umc) == -1) { 22350Sstevel@tonic-gate mdb_warn("can't find umem log walker"); 22360Sstevel@tonic-gate return (DCMD_ERR); 22370Sstevel@tonic-gate } 22380Sstevel@tonic-gate 22390Sstevel@tonic-gate return (DCMD_OK); 22400Sstevel@tonic-gate } 22410Sstevel@tonic-gate 22420Sstevel@tonic-gate typedef struct bufctl_history_cb { 22430Sstevel@tonic-gate int bhc_flags; 22440Sstevel@tonic-gate int bhc_argc; 22450Sstevel@tonic-gate const mdb_arg_t *bhc_argv; 22460Sstevel@tonic-gate int bhc_ret; 22470Sstevel@tonic-gate } bufctl_history_cb_t; 22480Sstevel@tonic-gate 22490Sstevel@tonic-gate /*ARGSUSED*/ 22500Sstevel@tonic-gate static int 22510Sstevel@tonic-gate bufctl_history_callback(uintptr_t addr, const void *ign, void *arg) 22520Sstevel@tonic-gate { 22530Sstevel@tonic-gate bufctl_history_cb_t *bhc = arg; 22540Sstevel@tonic-gate 22550Sstevel@tonic-gate bhc->bhc_ret = 22560Sstevel@tonic-gate bufctl(addr, bhc->bhc_flags, bhc->bhc_argc, bhc->bhc_argv); 22570Sstevel@tonic-gate 22580Sstevel@tonic-gate bhc->bhc_flags &= ~DCMD_LOOPFIRST; 22590Sstevel@tonic-gate 22600Sstevel@tonic-gate return ((bhc->bhc_ret == DCMD_OK)? WALK_NEXT : WALK_DONE); 22610Sstevel@tonic-gate } 22620Sstevel@tonic-gate 22630Sstevel@tonic-gate void 22640Sstevel@tonic-gate bufctl_help(void) 22650Sstevel@tonic-gate { 22660Sstevel@tonic-gate mdb_printf("%s\n", 22670Sstevel@tonic-gate "Display the contents of umem_bufctl_audit_ts, with optional filtering.\n"); 22680Sstevel@tonic-gate mdb_dec_indent(2); 22690Sstevel@tonic-gate mdb_printf("%<b>OPTIONS%</b>\n"); 22700Sstevel@tonic-gate mdb_inc_indent(2); 22710Sstevel@tonic-gate mdb_printf("%s", 22720Sstevel@tonic-gate " -v Display the full content of the bufctl, including its stack trace\n" 22730Sstevel@tonic-gate " -h retrieve the bufctl's transaction history, if available\n" 22740Sstevel@tonic-gate " -a addr\n" 22750Sstevel@tonic-gate " filter out bufctls not involving the buffer at addr\n" 22760Sstevel@tonic-gate " -c caller\n" 22770Sstevel@tonic-gate " filter out bufctls without the function/PC in their stack trace\n" 22780Sstevel@tonic-gate " -e earliest\n" 22790Sstevel@tonic-gate " filter out bufctls timestamped before earliest\n" 22800Sstevel@tonic-gate " -l latest\n" 22810Sstevel@tonic-gate " filter out bufctls timestamped after latest\n" 22820Sstevel@tonic-gate " -t thread\n" 22830Sstevel@tonic-gate " filter out bufctls not involving thread\n"); 22840Sstevel@tonic-gate } 22850Sstevel@tonic-gate 22860Sstevel@tonic-gate int 22870Sstevel@tonic-gate bufctl(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 22880Sstevel@tonic-gate { 22890Sstevel@tonic-gate uint_t verbose = FALSE; 22900Sstevel@tonic-gate uint_t history = FALSE; 22910Sstevel@tonic-gate uint_t in_history = FALSE; 22920Sstevel@tonic-gate uintptr_t caller = NULL, thread = NULL; 22930Sstevel@tonic-gate uintptr_t laddr, haddr, baddr = NULL; 22940Sstevel@tonic-gate hrtime_t earliest = 0, latest = 0; 22950Sstevel@tonic-gate int i, depth; 22960Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 22970Sstevel@tonic-gate GElf_Sym sym; 22980Sstevel@tonic-gate umem_bufctl_audit_t *bcp; 22990Sstevel@tonic-gate UMEM_LOCAL_BUFCTL_AUDIT(&bcp); 23000Sstevel@tonic-gate 23010Sstevel@tonic-gate if (mdb_getopts(argc, argv, 23020Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &verbose, 23030Sstevel@tonic-gate 'h', MDB_OPT_SETBITS, TRUE, &history, 23040Sstevel@tonic-gate 'H', MDB_OPT_SETBITS, TRUE, &in_history, /* internal */ 23050Sstevel@tonic-gate 'c', MDB_OPT_UINTPTR, &caller, 23060Sstevel@tonic-gate 't', MDB_OPT_UINTPTR, &thread, 23070Sstevel@tonic-gate 'e', MDB_OPT_UINT64, &earliest, 23080Sstevel@tonic-gate 'l', MDB_OPT_UINT64, &latest, 23090Sstevel@tonic-gate 'a', MDB_OPT_UINTPTR, &baddr, NULL) != argc) 23100Sstevel@tonic-gate return (DCMD_USAGE); 23110Sstevel@tonic-gate 23120Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 23130Sstevel@tonic-gate return (DCMD_USAGE); 23140Sstevel@tonic-gate 23150Sstevel@tonic-gate if (in_history && !history) 23160Sstevel@tonic-gate return (DCMD_USAGE); 23170Sstevel@tonic-gate 23180Sstevel@tonic-gate if (history && !in_history) { 23190Sstevel@tonic-gate mdb_arg_t *nargv = mdb_zalloc(sizeof (*nargv) * (argc + 1), 23200Sstevel@tonic-gate UM_SLEEP | UM_GC); 23210Sstevel@tonic-gate bufctl_history_cb_t bhc; 23220Sstevel@tonic-gate 23230Sstevel@tonic-gate nargv[0].a_type = MDB_TYPE_STRING; 23240Sstevel@tonic-gate nargv[0].a_un.a_str = "-H"; /* prevent recursion */ 23250Sstevel@tonic-gate 23260Sstevel@tonic-gate for (i = 0; i < argc; i++) 23270Sstevel@tonic-gate nargv[i + 1] = argv[i]; 23280Sstevel@tonic-gate 23290Sstevel@tonic-gate /* 23300Sstevel@tonic-gate * When in history mode, we treat each element as if it 23310Sstevel@tonic-gate * were in a seperate loop, so that the headers group 23320Sstevel@tonic-gate * bufctls with similar histories. 23330Sstevel@tonic-gate */ 23340Sstevel@tonic-gate bhc.bhc_flags = flags | DCMD_LOOP | DCMD_LOOPFIRST; 23350Sstevel@tonic-gate bhc.bhc_argc = argc + 1; 23360Sstevel@tonic-gate bhc.bhc_argv = nargv; 23370Sstevel@tonic-gate bhc.bhc_ret = DCMD_OK; 23380Sstevel@tonic-gate 23390Sstevel@tonic-gate if (mdb_pwalk("bufctl_history", bufctl_history_callback, &bhc, 23400Sstevel@tonic-gate addr) == -1) { 23410Sstevel@tonic-gate mdb_warn("unable to walk bufctl_history"); 23420Sstevel@tonic-gate return (DCMD_ERR); 23430Sstevel@tonic-gate } 23440Sstevel@tonic-gate 23450Sstevel@tonic-gate if (bhc.bhc_ret == DCMD_OK && !(flags & DCMD_PIPE_OUT)) 23460Sstevel@tonic-gate mdb_printf("\n"); 23470Sstevel@tonic-gate 23480Sstevel@tonic-gate return (bhc.bhc_ret); 23490Sstevel@tonic-gate } 23500Sstevel@tonic-gate 23510Sstevel@tonic-gate if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) { 23520Sstevel@tonic-gate if (verbose) { 23530Sstevel@tonic-gate mdb_printf("%16s %16s %16s %16s\n" 23540Sstevel@tonic-gate "%<u>%16s %16s %16s %16s%</u>\n", 23550Sstevel@tonic-gate "ADDR", "BUFADDR", "TIMESTAMP", "THREAD", 23560Sstevel@tonic-gate "", "CACHE", "LASTLOG", "CONTENTS"); 23570Sstevel@tonic-gate } else { 23580Sstevel@tonic-gate mdb_printf("%<u>%-?s %-?s %-12s %5s %s%</u>\n", 23590Sstevel@tonic-gate "ADDR", "BUFADDR", "TIMESTAMP", "THRD", "CALLER"); 23600Sstevel@tonic-gate } 23610Sstevel@tonic-gate } 23620Sstevel@tonic-gate 23630Sstevel@tonic-gate if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) { 23640Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr); 23650Sstevel@tonic-gate return (DCMD_ERR); 23660Sstevel@tonic-gate } 23670Sstevel@tonic-gate 23680Sstevel@tonic-gate /* 23690Sstevel@tonic-gate * Guard against bogus bc_depth in case the bufctl is corrupt or 23700Sstevel@tonic-gate * the address does not really refer to a bufctl. 23710Sstevel@tonic-gate */ 23720Sstevel@tonic-gate depth = MIN(bcp->bc_depth, umem_stack_depth); 23730Sstevel@tonic-gate 23740Sstevel@tonic-gate if (caller != NULL) { 23750Sstevel@tonic-gate laddr = caller; 23760Sstevel@tonic-gate haddr = caller + sizeof (caller); 23770Sstevel@tonic-gate 23780Sstevel@tonic-gate if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, sizeof (c), 23790Sstevel@tonic-gate &sym) != -1 && caller == (uintptr_t)sym.st_value) { 23800Sstevel@tonic-gate /* 23810Sstevel@tonic-gate * We were provided an exact symbol value; any 23820Sstevel@tonic-gate * address in the function is valid. 23830Sstevel@tonic-gate */ 23840Sstevel@tonic-gate laddr = (uintptr_t)sym.st_value; 23850Sstevel@tonic-gate haddr = (uintptr_t)sym.st_value + sym.st_size; 23860Sstevel@tonic-gate } 23870Sstevel@tonic-gate 23880Sstevel@tonic-gate for (i = 0; i < depth; i++) 23890Sstevel@tonic-gate if (bcp->bc_stack[i] >= laddr && 23900Sstevel@tonic-gate bcp->bc_stack[i] < haddr) 23910Sstevel@tonic-gate break; 23920Sstevel@tonic-gate 23930Sstevel@tonic-gate if (i == depth) 23940Sstevel@tonic-gate return (DCMD_OK); 23950Sstevel@tonic-gate } 23960Sstevel@tonic-gate 23970Sstevel@tonic-gate if (thread != NULL && (uintptr_t)bcp->bc_thread != thread) 23980Sstevel@tonic-gate return (DCMD_OK); 23990Sstevel@tonic-gate 24000Sstevel@tonic-gate if (earliest != 0 && bcp->bc_timestamp < earliest) 24010Sstevel@tonic-gate return (DCMD_OK); 24020Sstevel@tonic-gate 24030Sstevel@tonic-gate if (latest != 0 && bcp->bc_timestamp > latest) 24040Sstevel@tonic-gate return (DCMD_OK); 24050Sstevel@tonic-gate 24060Sstevel@tonic-gate if (baddr != 0 && (uintptr_t)bcp->bc_addr != baddr) 24070Sstevel@tonic-gate return (DCMD_OK); 24080Sstevel@tonic-gate 24090Sstevel@tonic-gate if (flags & DCMD_PIPE_OUT) { 24100Sstevel@tonic-gate mdb_printf("%#r\n", addr); 24110Sstevel@tonic-gate return (DCMD_OK); 24120Sstevel@tonic-gate } 24130Sstevel@tonic-gate 24140Sstevel@tonic-gate if (verbose) { 24150Sstevel@tonic-gate mdb_printf( 24160Sstevel@tonic-gate "%<b>%16p%</b> %16p %16llx %16d\n" 24170Sstevel@tonic-gate "%16s %16p %16p %16p\n", 24180Sstevel@tonic-gate addr, bcp->bc_addr, bcp->bc_timestamp, bcp->bc_thread, 24190Sstevel@tonic-gate "", bcp->bc_cache, bcp->bc_lastlog, bcp->bc_contents); 24200Sstevel@tonic-gate 24210Sstevel@tonic-gate mdb_inc_indent(17); 24220Sstevel@tonic-gate for (i = 0; i < depth; i++) 24230Sstevel@tonic-gate mdb_printf("%a\n", bcp->bc_stack[i]); 24240Sstevel@tonic-gate mdb_dec_indent(17); 24250Sstevel@tonic-gate mdb_printf("\n"); 24260Sstevel@tonic-gate } else { 24270Sstevel@tonic-gate mdb_printf("%0?p %0?p %12llx %5d", addr, bcp->bc_addr, 24280Sstevel@tonic-gate bcp->bc_timestamp, bcp->bc_thread); 24290Sstevel@tonic-gate 24300Sstevel@tonic-gate for (i = 0; i < depth; i++) { 24310Sstevel@tonic-gate if (mdb_lookup_by_addr(bcp->bc_stack[i], 24320Sstevel@tonic-gate MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1) 24330Sstevel@tonic-gate continue; 24340Sstevel@tonic-gate if (is_umem_sym(c, "umem_")) 24350Sstevel@tonic-gate continue; 24360Sstevel@tonic-gate mdb_printf(" %a\n", bcp->bc_stack[i]); 24370Sstevel@tonic-gate break; 24380Sstevel@tonic-gate } 24390Sstevel@tonic-gate 24400Sstevel@tonic-gate if (i >= depth) 24410Sstevel@tonic-gate mdb_printf("\n"); 24420Sstevel@tonic-gate } 24430Sstevel@tonic-gate 24440Sstevel@tonic-gate return (DCMD_OK); 24450Sstevel@tonic-gate } 24460Sstevel@tonic-gate 24470Sstevel@tonic-gate /*ARGSUSED*/ 24480Sstevel@tonic-gate int 24490Sstevel@tonic-gate bufctl_audit(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 24500Sstevel@tonic-gate { 24510Sstevel@tonic-gate mdb_arg_t a; 24520Sstevel@tonic-gate 24530Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 24540Sstevel@tonic-gate return (DCMD_USAGE); 24550Sstevel@tonic-gate 24560Sstevel@tonic-gate if (argc != 0) 24570Sstevel@tonic-gate return (DCMD_USAGE); 24580Sstevel@tonic-gate 24590Sstevel@tonic-gate a.a_type = MDB_TYPE_STRING; 24600Sstevel@tonic-gate a.a_un.a_str = "-v"; 24610Sstevel@tonic-gate 24620Sstevel@tonic-gate return (bufctl(addr, flags, 1, &a)); 24630Sstevel@tonic-gate } 24640Sstevel@tonic-gate 24650Sstevel@tonic-gate typedef struct umem_verify { 24660Sstevel@tonic-gate uint64_t *umv_buf; /* buffer to read cache contents into */ 24670Sstevel@tonic-gate size_t umv_size; /* number of bytes in umv_buf */ 24680Sstevel@tonic-gate int umv_corruption; /* > 0 if corruption found. */ 24690Sstevel@tonic-gate int umv_besilent; /* report actual corruption sites */ 24700Sstevel@tonic-gate struct umem_cache umv_cache; /* the cache we're operating on */ 24710Sstevel@tonic-gate } umem_verify_t; 24720Sstevel@tonic-gate 24730Sstevel@tonic-gate /* 24740Sstevel@tonic-gate * verify_pattern() 24750Sstevel@tonic-gate * verify that buf is filled with the pattern pat. 24760Sstevel@tonic-gate */ 24770Sstevel@tonic-gate static int64_t 24780Sstevel@tonic-gate verify_pattern(uint64_t *buf_arg, size_t size, uint64_t pat) 24790Sstevel@tonic-gate { 24800Sstevel@tonic-gate /*LINTED*/ 24810Sstevel@tonic-gate uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 24820Sstevel@tonic-gate uint64_t *buf; 24830Sstevel@tonic-gate 24840Sstevel@tonic-gate for (buf = buf_arg; buf < bufend; buf++) 24850Sstevel@tonic-gate if (*buf != pat) 24860Sstevel@tonic-gate return ((uintptr_t)buf - (uintptr_t)buf_arg); 24870Sstevel@tonic-gate return (-1); 24880Sstevel@tonic-gate } 24890Sstevel@tonic-gate 24900Sstevel@tonic-gate /* 24910Sstevel@tonic-gate * verify_buftag() 24920Sstevel@tonic-gate * verify that btp->bt_bxstat == (bcp ^ pat) 24930Sstevel@tonic-gate */ 24940Sstevel@tonic-gate static int 24950Sstevel@tonic-gate verify_buftag(umem_buftag_t *btp, uintptr_t pat) 24960Sstevel@tonic-gate { 24970Sstevel@tonic-gate return (btp->bt_bxstat == ((intptr_t)btp->bt_bufctl ^ pat) ? 0 : -1); 24980Sstevel@tonic-gate } 24990Sstevel@tonic-gate 25000Sstevel@tonic-gate /* 25010Sstevel@tonic-gate * verify_free() 25020Sstevel@tonic-gate * verify the integrity of a free block of memory by checking 25030Sstevel@tonic-gate * that it is filled with 0xdeadbeef and that its buftag is sane. 25040Sstevel@tonic-gate */ 25050Sstevel@tonic-gate /*ARGSUSED1*/ 25060Sstevel@tonic-gate static int 25070Sstevel@tonic-gate verify_free(uintptr_t addr, const void *data, void *private) 25080Sstevel@tonic-gate { 25090Sstevel@tonic-gate umem_verify_t *umv = (umem_verify_t *)private; 25100Sstevel@tonic-gate uint64_t *buf = umv->umv_buf; /* buf to validate */ 25110Sstevel@tonic-gate int64_t corrupt; /* corruption offset */ 25120Sstevel@tonic-gate umem_buftag_t *buftagp; /* ptr to buftag */ 25130Sstevel@tonic-gate umem_cache_t *cp = &umv->umv_cache; 25140Sstevel@tonic-gate int besilent = umv->umv_besilent; 25150Sstevel@tonic-gate 25160Sstevel@tonic-gate /*LINTED*/ 25170Sstevel@tonic-gate buftagp = UMEM_BUFTAG(cp, buf); 25180Sstevel@tonic-gate 25190Sstevel@tonic-gate /* 25200Sstevel@tonic-gate * Read the buffer to check. 25210Sstevel@tonic-gate */ 25220Sstevel@tonic-gate if (mdb_vread(buf, umv->umv_size, addr) == -1) { 25230Sstevel@tonic-gate if (!besilent) 25240Sstevel@tonic-gate mdb_warn("couldn't read %p", addr); 25250Sstevel@tonic-gate return (WALK_NEXT); 25260Sstevel@tonic-gate } 25270Sstevel@tonic-gate 25280Sstevel@tonic-gate if ((corrupt = verify_pattern(buf, cp->cache_verify, 25290Sstevel@tonic-gate UMEM_FREE_PATTERN)) >= 0) { 25300Sstevel@tonic-gate if (!besilent) 25310Sstevel@tonic-gate mdb_printf("buffer %p (free) seems corrupted, at %p\n", 25320Sstevel@tonic-gate addr, (uintptr_t)addr + corrupt); 25330Sstevel@tonic-gate goto corrupt; 25340Sstevel@tonic-gate } 25350Sstevel@tonic-gate 25360Sstevel@tonic-gate if ((cp->cache_flags & UMF_HASH) && 25370Sstevel@tonic-gate buftagp->bt_redzone != UMEM_REDZONE_PATTERN) { 25380Sstevel@tonic-gate if (!besilent) 25390Sstevel@tonic-gate mdb_printf("buffer %p (free) seems to " 25400Sstevel@tonic-gate "have a corrupt redzone pattern\n", addr); 25410Sstevel@tonic-gate goto corrupt; 25420Sstevel@tonic-gate } 25430Sstevel@tonic-gate 25440Sstevel@tonic-gate /* 25450Sstevel@tonic-gate * confirm bufctl pointer integrity. 25460Sstevel@tonic-gate */ 25470Sstevel@tonic-gate if (verify_buftag(buftagp, UMEM_BUFTAG_FREE) == -1) { 25480Sstevel@tonic-gate if (!besilent) 25490Sstevel@tonic-gate mdb_printf("buffer %p (free) has a corrupt " 25500Sstevel@tonic-gate "buftag\n", addr); 25510Sstevel@tonic-gate goto corrupt; 25520Sstevel@tonic-gate } 25530Sstevel@tonic-gate 25540Sstevel@tonic-gate return (WALK_NEXT); 25550Sstevel@tonic-gate corrupt: 25560Sstevel@tonic-gate umv->umv_corruption++; 25570Sstevel@tonic-gate return (WALK_NEXT); 25580Sstevel@tonic-gate } 25590Sstevel@tonic-gate 25600Sstevel@tonic-gate /* 25610Sstevel@tonic-gate * verify_alloc() 25620Sstevel@tonic-gate * Verify that the buftag of an allocated buffer makes sense with respect 25630Sstevel@tonic-gate * to the buffer. 25640Sstevel@tonic-gate */ 25650Sstevel@tonic-gate /*ARGSUSED1*/ 25660Sstevel@tonic-gate static int 25670Sstevel@tonic-gate verify_alloc(uintptr_t addr, const void *data, void *private) 25680Sstevel@tonic-gate { 25690Sstevel@tonic-gate umem_verify_t *umv = (umem_verify_t *)private; 25700Sstevel@tonic-gate umem_cache_t *cp = &umv->umv_cache; 25710Sstevel@tonic-gate uint64_t *buf = umv->umv_buf; /* buf to validate */ 25720Sstevel@tonic-gate /*LINTED*/ 25730Sstevel@tonic-gate umem_buftag_t *buftagp = UMEM_BUFTAG(cp, buf); 25740Sstevel@tonic-gate uint32_t *ip = (uint32_t *)buftagp; 25750Sstevel@tonic-gate uint8_t *bp = (uint8_t *)buf; 25760Sstevel@tonic-gate int looks_ok = 0, size_ok = 1; /* flags for finding corruption */ 25770Sstevel@tonic-gate int besilent = umv->umv_besilent; 25780Sstevel@tonic-gate 25790Sstevel@tonic-gate /* 25800Sstevel@tonic-gate * Read the buffer to check. 25810Sstevel@tonic-gate */ 25820Sstevel@tonic-gate if (mdb_vread(buf, umv->umv_size, addr) == -1) { 25830Sstevel@tonic-gate if (!besilent) 25840Sstevel@tonic-gate mdb_warn("couldn't read %p", addr); 25850Sstevel@tonic-gate return (WALK_NEXT); 25860Sstevel@tonic-gate } 25870Sstevel@tonic-gate 25880Sstevel@tonic-gate /* 25890Sstevel@tonic-gate * There are two cases to handle: 25900Sstevel@tonic-gate * 1. If the buf was alloc'd using umem_cache_alloc, it will have 25910Sstevel@tonic-gate * 0xfeedfacefeedface at the end of it 25920Sstevel@tonic-gate * 2. If the buf was alloc'd using umem_alloc, it will have 25930Sstevel@tonic-gate * 0xbb just past the end of the region in use. At the buftag, 25940Sstevel@tonic-gate * it will have 0xfeedface (or, if the whole buffer is in use, 25950Sstevel@tonic-gate * 0xfeedface & bb000000 or 0xfeedfacf & 000000bb depending on 25960Sstevel@tonic-gate * endianness), followed by 32 bits containing the offset of the 25970Sstevel@tonic-gate * 0xbb byte in the buffer. 25980Sstevel@tonic-gate * 25990Sstevel@tonic-gate * Finally, the two 32-bit words that comprise the second half of the 26000Sstevel@tonic-gate * buftag should xor to UMEM_BUFTAG_ALLOC 26010Sstevel@tonic-gate */ 26020Sstevel@tonic-gate 26030Sstevel@tonic-gate if (buftagp->bt_redzone == UMEM_REDZONE_PATTERN) 26040Sstevel@tonic-gate looks_ok = 1; 26050Sstevel@tonic-gate else if (!UMEM_SIZE_VALID(ip[1])) 26060Sstevel@tonic-gate size_ok = 0; 26070Sstevel@tonic-gate else if (bp[UMEM_SIZE_DECODE(ip[1])] == UMEM_REDZONE_BYTE) 26080Sstevel@tonic-gate looks_ok = 1; 26090Sstevel@tonic-gate else 26100Sstevel@tonic-gate size_ok = 0; 26110Sstevel@tonic-gate 26120Sstevel@tonic-gate if (!size_ok) { 26130Sstevel@tonic-gate if (!besilent) 26140Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a corrupt " 26150Sstevel@tonic-gate "redzone size encoding\n", addr); 26160Sstevel@tonic-gate goto corrupt; 26170Sstevel@tonic-gate } 26180Sstevel@tonic-gate 26190Sstevel@tonic-gate if (!looks_ok) { 26200Sstevel@tonic-gate if (!besilent) 26210Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a corrupt " 26220Sstevel@tonic-gate "redzone signature\n", addr); 26230Sstevel@tonic-gate goto corrupt; 26240Sstevel@tonic-gate } 26250Sstevel@tonic-gate 26260Sstevel@tonic-gate if (verify_buftag(buftagp, UMEM_BUFTAG_ALLOC) == -1) { 26270Sstevel@tonic-gate if (!besilent) 26280Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a " 26290Sstevel@tonic-gate "corrupt buftag\n", addr); 26300Sstevel@tonic-gate goto corrupt; 26310Sstevel@tonic-gate } 26320Sstevel@tonic-gate 26330Sstevel@tonic-gate return (WALK_NEXT); 26340Sstevel@tonic-gate corrupt: 26350Sstevel@tonic-gate umv->umv_corruption++; 26360Sstevel@tonic-gate return (WALK_NEXT); 26370Sstevel@tonic-gate } 26380Sstevel@tonic-gate 26390Sstevel@tonic-gate /*ARGSUSED2*/ 26400Sstevel@tonic-gate int 26410Sstevel@tonic-gate umem_verify(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 26420Sstevel@tonic-gate { 26430Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) { 26440Sstevel@tonic-gate int check_alloc = 0, check_free = 0; 26450Sstevel@tonic-gate umem_verify_t umv; 26460Sstevel@tonic-gate 26470Sstevel@tonic-gate if (mdb_vread(&umv.umv_cache, sizeof (umv.umv_cache), 26480Sstevel@tonic-gate addr) == -1) { 26490Sstevel@tonic-gate mdb_warn("couldn't read umem_cache %p", addr); 26500Sstevel@tonic-gate return (DCMD_ERR); 26510Sstevel@tonic-gate } 26520Sstevel@tonic-gate 26530Sstevel@tonic-gate umv.umv_size = umv.umv_cache.cache_buftag + 26540Sstevel@tonic-gate sizeof (umem_buftag_t); 26550Sstevel@tonic-gate umv.umv_buf = mdb_alloc(umv.umv_size, UM_SLEEP | UM_GC); 26560Sstevel@tonic-gate umv.umv_corruption = 0; 26570Sstevel@tonic-gate 26580Sstevel@tonic-gate if ((umv.umv_cache.cache_flags & UMF_REDZONE)) { 26590Sstevel@tonic-gate check_alloc = 1; 26600Sstevel@tonic-gate if (umv.umv_cache.cache_flags & UMF_DEADBEEF) 26610Sstevel@tonic-gate check_free = 1; 26620Sstevel@tonic-gate } else { 26630Sstevel@tonic-gate if (!(flags & DCMD_LOOP)) { 26640Sstevel@tonic-gate mdb_warn("cache %p (%s) does not have " 26650Sstevel@tonic-gate "redzone checking enabled\n", addr, 26660Sstevel@tonic-gate umv.umv_cache.cache_name); 26670Sstevel@tonic-gate } 26680Sstevel@tonic-gate return (DCMD_ERR); 26690Sstevel@tonic-gate } 26700Sstevel@tonic-gate 26710Sstevel@tonic-gate if (flags & DCMD_LOOP) { 26720Sstevel@tonic-gate /* 26730Sstevel@tonic-gate * table mode, don't print out every corrupt buffer 26740Sstevel@tonic-gate */ 26750Sstevel@tonic-gate umv.umv_besilent = 1; 26760Sstevel@tonic-gate } else { 26770Sstevel@tonic-gate mdb_printf("Summary for cache '%s'\n", 26780Sstevel@tonic-gate umv.umv_cache.cache_name); 26790Sstevel@tonic-gate mdb_inc_indent(2); 26800Sstevel@tonic-gate umv.umv_besilent = 0; 26810Sstevel@tonic-gate } 26820Sstevel@tonic-gate 26830Sstevel@tonic-gate if (check_alloc) 26840Sstevel@tonic-gate (void) mdb_pwalk("umem", verify_alloc, &umv, addr); 26850Sstevel@tonic-gate if (check_free) 26860Sstevel@tonic-gate (void) mdb_pwalk("freemem", verify_free, &umv, addr); 26870Sstevel@tonic-gate 26880Sstevel@tonic-gate if (flags & DCMD_LOOP) { 26890Sstevel@tonic-gate if (umv.umv_corruption == 0) { 26900Sstevel@tonic-gate mdb_printf("%-*s %?p clean\n", 26910Sstevel@tonic-gate UMEM_CACHE_NAMELEN, 26920Sstevel@tonic-gate umv.umv_cache.cache_name, addr); 26930Sstevel@tonic-gate } else { 26940Sstevel@tonic-gate char *s = ""; /* optional s in "buffer[s]" */ 26950Sstevel@tonic-gate if (umv.umv_corruption > 1) 26960Sstevel@tonic-gate s = "s"; 26970Sstevel@tonic-gate 26980Sstevel@tonic-gate mdb_printf("%-*s %?p %d corrupt buffer%s\n", 26990Sstevel@tonic-gate UMEM_CACHE_NAMELEN, 27000Sstevel@tonic-gate umv.umv_cache.cache_name, addr, 27010Sstevel@tonic-gate umv.umv_corruption, s); 27020Sstevel@tonic-gate } 27030Sstevel@tonic-gate } else { 27040Sstevel@tonic-gate /* 27050Sstevel@tonic-gate * This is the more verbose mode, when the user has 27060Sstevel@tonic-gate * type addr::umem_verify. If the cache was clean, 27070Sstevel@tonic-gate * nothing will have yet been printed. So say something. 27080Sstevel@tonic-gate */ 27090Sstevel@tonic-gate if (umv.umv_corruption == 0) 27100Sstevel@tonic-gate mdb_printf("clean\n"); 27110Sstevel@tonic-gate 27120Sstevel@tonic-gate mdb_dec_indent(2); 27130Sstevel@tonic-gate } 27140Sstevel@tonic-gate } else { 27150Sstevel@tonic-gate /* 27160Sstevel@tonic-gate * If the user didn't specify a cache to verify, we'll walk all 27170Sstevel@tonic-gate * umem_cache's, specifying ourself as a callback for each... 27180Sstevel@tonic-gate * this is the equivalent of '::walk umem_cache .::umem_verify' 27190Sstevel@tonic-gate */ 27200Sstevel@tonic-gate mdb_printf("%<u>%-*s %-?s %-20s%</b>\n", UMEM_CACHE_NAMELEN, 27210Sstevel@tonic-gate "Cache Name", "Addr", "Cache Integrity"); 27220Sstevel@tonic-gate (void) (mdb_walk_dcmd("umem_cache", "umem_verify", 0, NULL)); 27230Sstevel@tonic-gate } 27240Sstevel@tonic-gate 27250Sstevel@tonic-gate return (DCMD_OK); 27260Sstevel@tonic-gate } 27270Sstevel@tonic-gate 27280Sstevel@tonic-gate typedef struct vmem_node { 27290Sstevel@tonic-gate struct vmem_node *vn_next; 27300Sstevel@tonic-gate struct vmem_node *vn_parent; 27310Sstevel@tonic-gate struct vmem_node *vn_sibling; 27320Sstevel@tonic-gate struct vmem_node *vn_children; 27330Sstevel@tonic-gate uintptr_t vn_addr; 27340Sstevel@tonic-gate int vn_marked; 27350Sstevel@tonic-gate vmem_t vn_vmem; 27360Sstevel@tonic-gate } vmem_node_t; 27370Sstevel@tonic-gate 27380Sstevel@tonic-gate typedef struct vmem_walk { 27390Sstevel@tonic-gate vmem_node_t *vw_root; 27400Sstevel@tonic-gate vmem_node_t *vw_current; 27410Sstevel@tonic-gate } vmem_walk_t; 27420Sstevel@tonic-gate 27430Sstevel@tonic-gate int 27440Sstevel@tonic-gate vmem_walk_init(mdb_walk_state_t *wsp) 27450Sstevel@tonic-gate { 27460Sstevel@tonic-gate uintptr_t vaddr, paddr; 27470Sstevel@tonic-gate vmem_node_t *head = NULL, *root = NULL, *current = NULL, *parent, *vp; 27480Sstevel@tonic-gate vmem_walk_t *vw; 27490Sstevel@tonic-gate 27500Sstevel@tonic-gate if (umem_readvar(&vaddr, "vmem_list") == -1) { 27510Sstevel@tonic-gate mdb_warn("couldn't read 'vmem_list'"); 27520Sstevel@tonic-gate return (WALK_ERR); 27530Sstevel@tonic-gate } 27540Sstevel@tonic-gate 27550Sstevel@tonic-gate while (vaddr != NULL) { 27560Sstevel@tonic-gate vp = mdb_zalloc(sizeof (vmem_node_t), UM_SLEEP); 27570Sstevel@tonic-gate vp->vn_addr = vaddr; 27580Sstevel@tonic-gate vp->vn_next = head; 27590Sstevel@tonic-gate head = vp; 27600Sstevel@tonic-gate 27610Sstevel@tonic-gate if (vaddr == wsp->walk_addr) 27620Sstevel@tonic-gate current = vp; 27630Sstevel@tonic-gate 27640Sstevel@tonic-gate if (mdb_vread(&vp->vn_vmem, sizeof (vmem_t), vaddr) == -1) { 27650Sstevel@tonic-gate mdb_warn("couldn't read vmem_t at %p", vaddr); 27660Sstevel@tonic-gate goto err; 27670Sstevel@tonic-gate } 27680Sstevel@tonic-gate 27690Sstevel@tonic-gate vaddr = (uintptr_t)vp->vn_vmem.vm_next; 27700Sstevel@tonic-gate } 27710Sstevel@tonic-gate 27720Sstevel@tonic-gate for (vp = head; vp != NULL; vp = vp->vn_next) { 27730Sstevel@tonic-gate 27740Sstevel@tonic-gate if ((paddr = (uintptr_t)vp->vn_vmem.vm_source) == NULL) { 27750Sstevel@tonic-gate vp->vn_sibling = root; 27760Sstevel@tonic-gate root = vp; 27770Sstevel@tonic-gate continue; 27780Sstevel@tonic-gate } 27790Sstevel@tonic-gate 27800Sstevel@tonic-gate for (parent = head; parent != NULL; parent = parent->vn_next) { 27810Sstevel@tonic-gate if (parent->vn_addr != paddr) 27820Sstevel@tonic-gate continue; 27830Sstevel@tonic-gate vp->vn_sibling = parent->vn_children; 27840Sstevel@tonic-gate parent->vn_children = vp; 27850Sstevel@tonic-gate vp->vn_parent = parent; 27860Sstevel@tonic-gate break; 27870Sstevel@tonic-gate } 27880Sstevel@tonic-gate 27890Sstevel@tonic-gate if (parent == NULL) { 27900Sstevel@tonic-gate mdb_warn("couldn't find %p's parent (%p)\n", 27910Sstevel@tonic-gate vp->vn_addr, paddr); 27920Sstevel@tonic-gate goto err; 27930Sstevel@tonic-gate } 27940Sstevel@tonic-gate } 27950Sstevel@tonic-gate 27960Sstevel@tonic-gate vw = mdb_zalloc(sizeof (vmem_walk_t), UM_SLEEP); 27970Sstevel@tonic-gate vw->vw_root = root; 27980Sstevel@tonic-gate 27990Sstevel@tonic-gate if (current != NULL) 28000Sstevel@tonic-gate vw->vw_current = current; 28010Sstevel@tonic-gate else 28020Sstevel@tonic-gate vw->vw_current = root; 28030Sstevel@tonic-gate 28040Sstevel@tonic-gate wsp->walk_data = vw; 28050Sstevel@tonic-gate return (WALK_NEXT); 28060Sstevel@tonic-gate err: 28070Sstevel@tonic-gate for (vp = head; head != NULL; vp = head) { 28080Sstevel@tonic-gate head = vp->vn_next; 28090Sstevel@tonic-gate mdb_free(vp, sizeof (vmem_node_t)); 28100Sstevel@tonic-gate } 28110Sstevel@tonic-gate 28120Sstevel@tonic-gate return (WALK_ERR); 28130Sstevel@tonic-gate } 28140Sstevel@tonic-gate 28150Sstevel@tonic-gate int 28160Sstevel@tonic-gate vmem_walk_step(mdb_walk_state_t *wsp) 28170Sstevel@tonic-gate { 28180Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 28190Sstevel@tonic-gate vmem_node_t *vp; 28200Sstevel@tonic-gate int rval; 28210Sstevel@tonic-gate 28220Sstevel@tonic-gate if ((vp = vw->vw_current) == NULL) 28230Sstevel@tonic-gate return (WALK_DONE); 28240Sstevel@tonic-gate 28250Sstevel@tonic-gate rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata); 28260Sstevel@tonic-gate 28270Sstevel@tonic-gate if (vp->vn_children != NULL) { 28280Sstevel@tonic-gate vw->vw_current = vp->vn_children; 28290Sstevel@tonic-gate return (rval); 28300Sstevel@tonic-gate } 28310Sstevel@tonic-gate 28320Sstevel@tonic-gate do { 28330Sstevel@tonic-gate vw->vw_current = vp->vn_sibling; 28340Sstevel@tonic-gate vp = vp->vn_parent; 28350Sstevel@tonic-gate } while (vw->vw_current == NULL && vp != NULL); 28360Sstevel@tonic-gate 28370Sstevel@tonic-gate return (rval); 28380Sstevel@tonic-gate } 28390Sstevel@tonic-gate 28400Sstevel@tonic-gate /* 28410Sstevel@tonic-gate * The "vmem_postfix" walk walks the vmem arenas in post-fix order; all 28420Sstevel@tonic-gate * children are visited before their parent. We perform the postfix walk 28430Sstevel@tonic-gate * iteratively (rather than recursively) to allow mdb to regain control 28440Sstevel@tonic-gate * after each callback. 28450Sstevel@tonic-gate */ 28460Sstevel@tonic-gate int 28470Sstevel@tonic-gate vmem_postfix_walk_step(mdb_walk_state_t *wsp) 28480Sstevel@tonic-gate { 28490Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 28500Sstevel@tonic-gate vmem_node_t *vp = vw->vw_current; 28510Sstevel@tonic-gate int rval; 28520Sstevel@tonic-gate 28530Sstevel@tonic-gate /* 28540Sstevel@tonic-gate * If this node is marked, then we know that we have already visited 28550Sstevel@tonic-gate * all of its children. If the node has any siblings, they need to 28560Sstevel@tonic-gate * be visited next; otherwise, we need to visit the parent. Note 28570Sstevel@tonic-gate * that vp->vn_marked will only be zero on the first invocation of 28580Sstevel@tonic-gate * the step function. 28590Sstevel@tonic-gate */ 28600Sstevel@tonic-gate if (vp->vn_marked) { 28610Sstevel@tonic-gate if (vp->vn_sibling != NULL) 28620Sstevel@tonic-gate vp = vp->vn_sibling; 28630Sstevel@tonic-gate else if (vp->vn_parent != NULL) 28640Sstevel@tonic-gate vp = vp->vn_parent; 28650Sstevel@tonic-gate else { 28660Sstevel@tonic-gate /* 28670Sstevel@tonic-gate * We have neither a parent, nor a sibling, and we 28680Sstevel@tonic-gate * have already been visited; we're done. 28690Sstevel@tonic-gate */ 28700Sstevel@tonic-gate return (WALK_DONE); 28710Sstevel@tonic-gate } 28720Sstevel@tonic-gate } 28730Sstevel@tonic-gate 28740Sstevel@tonic-gate /* 28750Sstevel@tonic-gate * Before we visit this node, visit its children. 28760Sstevel@tonic-gate */ 28770Sstevel@tonic-gate while (vp->vn_children != NULL && !vp->vn_children->vn_marked) 28780Sstevel@tonic-gate vp = vp->vn_children; 28790Sstevel@tonic-gate 28800Sstevel@tonic-gate vp->vn_marked = 1; 28810Sstevel@tonic-gate vw->vw_current = vp; 28820Sstevel@tonic-gate rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata); 28830Sstevel@tonic-gate 28840Sstevel@tonic-gate return (rval); 28850Sstevel@tonic-gate } 28860Sstevel@tonic-gate 28870Sstevel@tonic-gate void 28880Sstevel@tonic-gate vmem_walk_fini(mdb_walk_state_t *wsp) 28890Sstevel@tonic-gate { 28900Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 28910Sstevel@tonic-gate vmem_node_t *root = vw->vw_root; 28920Sstevel@tonic-gate int done; 28930Sstevel@tonic-gate 28940Sstevel@tonic-gate if (root == NULL) 28950Sstevel@tonic-gate return; 28960Sstevel@tonic-gate 28970Sstevel@tonic-gate if ((vw->vw_root = root->vn_children) != NULL) 28980Sstevel@tonic-gate vmem_walk_fini(wsp); 28990Sstevel@tonic-gate 29000Sstevel@tonic-gate vw->vw_root = root->vn_sibling; 29010Sstevel@tonic-gate done = (root->vn_sibling == NULL && root->vn_parent == NULL); 29020Sstevel@tonic-gate mdb_free(root, sizeof (vmem_node_t)); 29030Sstevel@tonic-gate 29040Sstevel@tonic-gate if (done) { 29050Sstevel@tonic-gate mdb_free(vw, sizeof (vmem_walk_t)); 29060Sstevel@tonic-gate } else { 29070Sstevel@tonic-gate vmem_walk_fini(wsp); 29080Sstevel@tonic-gate } 29090Sstevel@tonic-gate } 29100Sstevel@tonic-gate 29110Sstevel@tonic-gate typedef struct vmem_seg_walk { 29120Sstevel@tonic-gate uint8_t vsw_type; 29130Sstevel@tonic-gate uintptr_t vsw_start; 29140Sstevel@tonic-gate uintptr_t vsw_current; 29150Sstevel@tonic-gate } vmem_seg_walk_t; 29160Sstevel@tonic-gate 29170Sstevel@tonic-gate /*ARGSUSED*/ 29180Sstevel@tonic-gate int 29190Sstevel@tonic-gate vmem_seg_walk_common_init(mdb_walk_state_t *wsp, uint8_t type, char *name) 29200Sstevel@tonic-gate { 29210Sstevel@tonic-gate vmem_seg_walk_t *vsw; 29220Sstevel@tonic-gate 29230Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 29240Sstevel@tonic-gate mdb_warn("vmem_%s does not support global walks\n", name); 29250Sstevel@tonic-gate return (WALK_ERR); 29260Sstevel@tonic-gate } 29270Sstevel@tonic-gate 29280Sstevel@tonic-gate wsp->walk_data = vsw = mdb_alloc(sizeof (vmem_seg_walk_t), UM_SLEEP); 29290Sstevel@tonic-gate 29300Sstevel@tonic-gate vsw->vsw_type = type; 29310Sstevel@tonic-gate vsw->vsw_start = wsp->walk_addr + OFFSETOF(vmem_t, vm_seg0); 29320Sstevel@tonic-gate vsw->vsw_current = vsw->vsw_start; 29330Sstevel@tonic-gate 29340Sstevel@tonic-gate return (WALK_NEXT); 29350Sstevel@tonic-gate } 29360Sstevel@tonic-gate 29370Sstevel@tonic-gate /* 29380Sstevel@tonic-gate * vmem segments can't have type 0 (this should be added to vmem_impl.h). 29390Sstevel@tonic-gate */ 29400Sstevel@tonic-gate #define VMEM_NONE 0 29410Sstevel@tonic-gate 29420Sstevel@tonic-gate int 29430Sstevel@tonic-gate vmem_alloc_walk_init(mdb_walk_state_t *wsp) 29440Sstevel@tonic-gate { 29450Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_ALLOC, "alloc")); 29460Sstevel@tonic-gate } 29470Sstevel@tonic-gate 29480Sstevel@tonic-gate int 29490Sstevel@tonic-gate vmem_free_walk_init(mdb_walk_state_t *wsp) 29500Sstevel@tonic-gate { 29510Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_FREE, "free")); 29520Sstevel@tonic-gate } 29530Sstevel@tonic-gate 29540Sstevel@tonic-gate int 29550Sstevel@tonic-gate vmem_span_walk_init(mdb_walk_state_t *wsp) 29560Sstevel@tonic-gate { 29570Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_SPAN, "span")); 29580Sstevel@tonic-gate } 29590Sstevel@tonic-gate 29600Sstevel@tonic-gate int 29610Sstevel@tonic-gate vmem_seg_walk_init(mdb_walk_state_t *wsp) 29620Sstevel@tonic-gate { 29630Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_NONE, "seg")); 29640Sstevel@tonic-gate } 29650Sstevel@tonic-gate 29660Sstevel@tonic-gate int 29670Sstevel@tonic-gate vmem_seg_walk_step(mdb_walk_state_t *wsp) 29680Sstevel@tonic-gate { 29690Sstevel@tonic-gate vmem_seg_t seg; 29700Sstevel@tonic-gate vmem_seg_walk_t *vsw = wsp->walk_data; 29710Sstevel@tonic-gate uintptr_t addr = vsw->vsw_current; 29720Sstevel@tonic-gate static size_t seg_size = 0; 29730Sstevel@tonic-gate int rval; 29740Sstevel@tonic-gate 29750Sstevel@tonic-gate if (!seg_size) { 29760Sstevel@tonic-gate if (umem_readvar(&seg_size, "vmem_seg_size") == -1) { 29770Sstevel@tonic-gate mdb_warn("failed to read 'vmem_seg_size'"); 29780Sstevel@tonic-gate seg_size = sizeof (vmem_seg_t); 29790Sstevel@tonic-gate } 29800Sstevel@tonic-gate } 29810Sstevel@tonic-gate 29820Sstevel@tonic-gate if (seg_size < sizeof (seg)) 29830Sstevel@tonic-gate bzero((caddr_t)&seg + seg_size, sizeof (seg) - seg_size); 29840Sstevel@tonic-gate 29850Sstevel@tonic-gate if (mdb_vread(&seg, seg_size, addr) == -1) { 29860Sstevel@tonic-gate mdb_warn("couldn't read vmem_seg at %p", addr); 29870Sstevel@tonic-gate return (WALK_ERR); 29880Sstevel@tonic-gate } 29890Sstevel@tonic-gate 29900Sstevel@tonic-gate vsw->vsw_current = (uintptr_t)seg.vs_anext; 29910Sstevel@tonic-gate if (vsw->vsw_type != VMEM_NONE && seg.vs_type != vsw->vsw_type) { 29920Sstevel@tonic-gate rval = WALK_NEXT; 29930Sstevel@tonic-gate } else { 29940Sstevel@tonic-gate rval = wsp->walk_callback(addr, &seg, wsp->walk_cbdata); 29950Sstevel@tonic-gate } 29960Sstevel@tonic-gate 29970Sstevel@tonic-gate if (vsw->vsw_current == vsw->vsw_start) 29980Sstevel@tonic-gate return (WALK_DONE); 29990Sstevel@tonic-gate 30000Sstevel@tonic-gate return (rval); 30010Sstevel@tonic-gate } 30020Sstevel@tonic-gate 30030Sstevel@tonic-gate void 30040Sstevel@tonic-gate vmem_seg_walk_fini(mdb_walk_state_t *wsp) 30050Sstevel@tonic-gate { 30060Sstevel@tonic-gate vmem_seg_walk_t *vsw = wsp->walk_data; 30070Sstevel@tonic-gate 30080Sstevel@tonic-gate mdb_free(vsw, sizeof (vmem_seg_walk_t)); 30090Sstevel@tonic-gate } 30100Sstevel@tonic-gate 30110Sstevel@tonic-gate #define VMEM_NAMEWIDTH 22 30120Sstevel@tonic-gate 30130Sstevel@tonic-gate int 30140Sstevel@tonic-gate vmem(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 30150Sstevel@tonic-gate { 30160Sstevel@tonic-gate vmem_t v, parent; 30170Sstevel@tonic-gate uintptr_t paddr; 30180Sstevel@tonic-gate int ident = 0; 30190Sstevel@tonic-gate char c[VMEM_NAMEWIDTH]; 30200Sstevel@tonic-gate 30210Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) { 30220Sstevel@tonic-gate if (mdb_walk_dcmd("vmem", "vmem", argc, argv) == -1) { 30230Sstevel@tonic-gate mdb_warn("can't walk vmem"); 30240Sstevel@tonic-gate return (DCMD_ERR); 30250Sstevel@tonic-gate } 30260Sstevel@tonic-gate return (DCMD_OK); 30270Sstevel@tonic-gate } 30280Sstevel@tonic-gate 30290Sstevel@tonic-gate if (DCMD_HDRSPEC(flags)) 30300Sstevel@tonic-gate mdb_printf("%-?s %-*s %10s %12s %9s %5s\n", 30310Sstevel@tonic-gate "ADDR", VMEM_NAMEWIDTH, "NAME", "INUSE", 30320Sstevel@tonic-gate "TOTAL", "SUCCEED", "FAIL"); 30330Sstevel@tonic-gate 30340Sstevel@tonic-gate if (mdb_vread(&v, sizeof (v), addr) == -1) { 30350Sstevel@tonic-gate mdb_warn("couldn't read vmem at %p", addr); 30360Sstevel@tonic-gate return (DCMD_ERR); 30370Sstevel@tonic-gate } 30380Sstevel@tonic-gate 30390Sstevel@tonic-gate for (paddr = (uintptr_t)v.vm_source; paddr != NULL; ident += 2) { 30400Sstevel@tonic-gate if (mdb_vread(&parent, sizeof (parent), paddr) == -1) { 30410Sstevel@tonic-gate mdb_warn("couldn't trace %p's ancestry", addr); 30420Sstevel@tonic-gate ident = 0; 30430Sstevel@tonic-gate break; 30440Sstevel@tonic-gate } 30450Sstevel@tonic-gate paddr = (uintptr_t)parent.vm_source; 30460Sstevel@tonic-gate } 30470Sstevel@tonic-gate 30480Sstevel@tonic-gate (void) mdb_snprintf(c, VMEM_NAMEWIDTH, "%*s%s", ident, "", v.vm_name); 30490Sstevel@tonic-gate 30500Sstevel@tonic-gate mdb_printf("%0?p %-*s %10llu %12llu %9llu %5llu\n", 30510Sstevel@tonic-gate addr, VMEM_NAMEWIDTH, c, 30520Sstevel@tonic-gate v.vm_kstat.vk_mem_inuse, v.vm_kstat.vk_mem_total, 30530Sstevel@tonic-gate v.vm_kstat.vk_alloc, v.vm_kstat.vk_fail); 30540Sstevel@tonic-gate 30550Sstevel@tonic-gate return (DCMD_OK); 30560Sstevel@tonic-gate } 30570Sstevel@tonic-gate 30580Sstevel@tonic-gate void 30590Sstevel@tonic-gate vmem_seg_help(void) 30600Sstevel@tonic-gate { 30610Sstevel@tonic-gate mdb_printf("%s\n", 30620Sstevel@tonic-gate "Display the contents of vmem_seg_ts, with optional filtering.\n" 30630Sstevel@tonic-gate "\n" 30640Sstevel@tonic-gate "A vmem_seg_t represents a range of addresses (or arbitrary numbers),\n" 30650Sstevel@tonic-gate "representing a single chunk of data. Only ALLOC segments have debugging\n" 30660Sstevel@tonic-gate "information.\n"); 30670Sstevel@tonic-gate mdb_dec_indent(2); 30680Sstevel@tonic-gate mdb_printf("%<b>OPTIONS%</b>\n"); 30690Sstevel@tonic-gate mdb_inc_indent(2); 30700Sstevel@tonic-gate mdb_printf("%s", 30710Sstevel@tonic-gate " -v Display the full content of the vmem_seg, including its stack trace\n" 30720Sstevel@tonic-gate " -s report the size of the segment, instead of the end address\n" 30730Sstevel@tonic-gate " -c caller\n" 30740Sstevel@tonic-gate " filter out segments without the function/PC in their stack trace\n" 30750Sstevel@tonic-gate " -e earliest\n" 30760Sstevel@tonic-gate " filter out segments timestamped before earliest\n" 30770Sstevel@tonic-gate " -l latest\n" 30780Sstevel@tonic-gate " filter out segments timestamped after latest\n" 30790Sstevel@tonic-gate " -m minsize\n" 30800Sstevel@tonic-gate " filer out segments smaller than minsize\n" 30810Sstevel@tonic-gate " -M maxsize\n" 30820Sstevel@tonic-gate " filer out segments larger than maxsize\n" 30830Sstevel@tonic-gate " -t thread\n" 30840Sstevel@tonic-gate " filter out segments not involving thread\n" 30850Sstevel@tonic-gate " -T type\n" 30860Sstevel@tonic-gate " filter out segments not of type 'type'\n" 30870Sstevel@tonic-gate " type is one of: ALLOC/FREE/SPAN/ROTOR/WALKER\n"); 30880Sstevel@tonic-gate } 30890Sstevel@tonic-gate 30900Sstevel@tonic-gate 30910Sstevel@tonic-gate /*ARGSUSED*/ 30920Sstevel@tonic-gate int 30930Sstevel@tonic-gate vmem_seg(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 30940Sstevel@tonic-gate { 30950Sstevel@tonic-gate vmem_seg_t vs; 30960Sstevel@tonic-gate uintptr_t *stk = vs.vs_stack; 30970Sstevel@tonic-gate uintptr_t sz; 30980Sstevel@tonic-gate uint8_t t; 30990Sstevel@tonic-gate const char *type = NULL; 31000Sstevel@tonic-gate GElf_Sym sym; 31010Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 31020Sstevel@tonic-gate int no_debug; 31030Sstevel@tonic-gate int i; 31040Sstevel@tonic-gate int depth; 31050Sstevel@tonic-gate uintptr_t laddr, haddr; 31060Sstevel@tonic-gate 31070Sstevel@tonic-gate uintptr_t caller = NULL, thread = NULL; 31080Sstevel@tonic-gate uintptr_t minsize = 0, maxsize = 0; 31090Sstevel@tonic-gate 31100Sstevel@tonic-gate hrtime_t earliest = 0, latest = 0; 31110Sstevel@tonic-gate 31120Sstevel@tonic-gate uint_t size = 0; 31130Sstevel@tonic-gate uint_t verbose = 0; 31140Sstevel@tonic-gate 31150Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 31160Sstevel@tonic-gate return (DCMD_USAGE); 31170Sstevel@tonic-gate 31180Sstevel@tonic-gate if (mdb_getopts(argc, argv, 31190Sstevel@tonic-gate 'c', MDB_OPT_UINTPTR, &caller, 31200Sstevel@tonic-gate 'e', MDB_OPT_UINT64, &earliest, 31210Sstevel@tonic-gate 'l', MDB_OPT_UINT64, &latest, 31220Sstevel@tonic-gate 's', MDB_OPT_SETBITS, TRUE, &size, 31230Sstevel@tonic-gate 'm', MDB_OPT_UINTPTR, &minsize, 31240Sstevel@tonic-gate 'M', MDB_OPT_UINTPTR, &maxsize, 31250Sstevel@tonic-gate 't', MDB_OPT_UINTPTR, &thread, 31260Sstevel@tonic-gate 'T', MDB_OPT_STR, &type, 31270Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &verbose, 31280Sstevel@tonic-gate NULL) != argc) 31290Sstevel@tonic-gate return (DCMD_USAGE); 31300Sstevel@tonic-gate 31310Sstevel@tonic-gate if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) { 31320Sstevel@tonic-gate if (verbose) { 31330Sstevel@tonic-gate mdb_printf("%16s %4s %16s %16s %16s\n" 31340Sstevel@tonic-gate "%<u>%16s %4s %16s %16s %16s%</u>\n", 31350Sstevel@tonic-gate "ADDR", "TYPE", "START", "END", "SIZE", 31360Sstevel@tonic-gate "", "", "THREAD", "TIMESTAMP", ""); 31370Sstevel@tonic-gate } else { 31380Sstevel@tonic-gate mdb_printf("%?s %4s %?s %?s %s\n", "ADDR", "TYPE", 31390Sstevel@tonic-gate "START", size? "SIZE" : "END", "WHO"); 31400Sstevel@tonic-gate } 31410Sstevel@tonic-gate } 31420Sstevel@tonic-gate 31430Sstevel@tonic-gate if (mdb_vread(&vs, sizeof (vs), addr) == -1) { 31440Sstevel@tonic-gate mdb_warn("couldn't read vmem_seg at %p", addr); 31450Sstevel@tonic-gate return (DCMD_ERR); 31460Sstevel@tonic-gate } 31470Sstevel@tonic-gate 31480Sstevel@tonic-gate if (type != NULL) { 31490Sstevel@tonic-gate if (strcmp(type, "ALLC") == 0 || strcmp(type, "ALLOC") == 0) 31500Sstevel@tonic-gate t = VMEM_ALLOC; 31510Sstevel@tonic-gate else if (strcmp(type, "FREE") == 0) 31520Sstevel@tonic-gate t = VMEM_FREE; 31530Sstevel@tonic-gate else if (strcmp(type, "SPAN") == 0) 31540Sstevel@tonic-gate t = VMEM_SPAN; 31550Sstevel@tonic-gate else if (strcmp(type, "ROTR") == 0 || 31560Sstevel@tonic-gate strcmp(type, "ROTOR") == 0) 31570Sstevel@tonic-gate t = VMEM_ROTOR; 31580Sstevel@tonic-gate else if (strcmp(type, "WLKR") == 0 || 31590Sstevel@tonic-gate strcmp(type, "WALKER") == 0) 31600Sstevel@tonic-gate t = VMEM_WALKER; 31610Sstevel@tonic-gate else { 31620Sstevel@tonic-gate mdb_warn("\"%s\" is not a recognized vmem_seg type\n", 31630Sstevel@tonic-gate type); 31640Sstevel@tonic-gate return (DCMD_ERR); 31650Sstevel@tonic-gate } 31660Sstevel@tonic-gate 31670Sstevel@tonic-gate if (vs.vs_type != t) 31680Sstevel@tonic-gate return (DCMD_OK); 31690Sstevel@tonic-gate } 31700Sstevel@tonic-gate 31710Sstevel@tonic-gate sz = vs.vs_end - vs.vs_start; 31720Sstevel@tonic-gate 31730Sstevel@tonic-gate if (minsize != 0 && sz < minsize) 31740Sstevel@tonic-gate return (DCMD_OK); 31750Sstevel@tonic-gate 31760Sstevel@tonic-gate if (maxsize != 0 && sz > maxsize) 31770Sstevel@tonic-gate return (DCMD_OK); 31780Sstevel@tonic-gate 31790Sstevel@tonic-gate t = vs.vs_type; 31800Sstevel@tonic-gate depth = vs.vs_depth; 31810Sstevel@tonic-gate 31820Sstevel@tonic-gate /* 31830Sstevel@tonic-gate * debug info, when present, is only accurate for VMEM_ALLOC segments 31840Sstevel@tonic-gate */ 31850Sstevel@tonic-gate no_debug = (t != VMEM_ALLOC) || 31860Sstevel@tonic-gate (depth == 0 || depth > VMEM_STACK_DEPTH); 31870Sstevel@tonic-gate 31880Sstevel@tonic-gate if (no_debug) { 31890Sstevel@tonic-gate if (caller != NULL || thread != NULL || earliest != 0 || 31900Sstevel@tonic-gate latest != 0) 31910Sstevel@tonic-gate return (DCMD_OK); /* not enough info */ 31920Sstevel@tonic-gate } else { 31930Sstevel@tonic-gate if (caller != NULL) { 31940Sstevel@tonic-gate laddr = caller; 31950Sstevel@tonic-gate haddr = caller + sizeof (caller); 31960Sstevel@tonic-gate 31970Sstevel@tonic-gate if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, 31980Sstevel@tonic-gate sizeof (c), &sym) != -1 && 31990Sstevel@tonic-gate caller == (uintptr_t)sym.st_value) { 32000Sstevel@tonic-gate /* 32010Sstevel@tonic-gate * We were provided an exact symbol value; any 32020Sstevel@tonic-gate * address in the function is valid. 32030Sstevel@tonic-gate */ 32040Sstevel@tonic-gate laddr = (uintptr_t)sym.st_value; 32050Sstevel@tonic-gate haddr = (uintptr_t)sym.st_value + sym.st_size; 32060Sstevel@tonic-gate } 32070Sstevel@tonic-gate 32080Sstevel@tonic-gate for (i = 0; i < depth; i++) 32090Sstevel@tonic-gate if (vs.vs_stack[i] >= laddr && 32100Sstevel@tonic-gate vs.vs_stack[i] < haddr) 32110Sstevel@tonic-gate break; 32120Sstevel@tonic-gate 32130Sstevel@tonic-gate if (i == depth) 32140Sstevel@tonic-gate return (DCMD_OK); 32150Sstevel@tonic-gate } 32160Sstevel@tonic-gate 32170Sstevel@tonic-gate if (thread != NULL && (uintptr_t)vs.vs_thread != thread) 32180Sstevel@tonic-gate return (DCMD_OK); 32190Sstevel@tonic-gate 32200Sstevel@tonic-gate if (earliest != 0 && vs.vs_timestamp < earliest) 32210Sstevel@tonic-gate return (DCMD_OK); 32220Sstevel@tonic-gate 32230Sstevel@tonic-gate if (latest != 0 && vs.vs_timestamp > latest) 32240Sstevel@tonic-gate return (DCMD_OK); 32250Sstevel@tonic-gate } 32260Sstevel@tonic-gate 32270Sstevel@tonic-gate type = (t == VMEM_ALLOC ? "ALLC" : 32280Sstevel@tonic-gate t == VMEM_FREE ? "FREE" : 32290Sstevel@tonic-gate t == VMEM_SPAN ? "SPAN" : 32300Sstevel@tonic-gate t == VMEM_ROTOR ? "ROTR" : 32310Sstevel@tonic-gate t == VMEM_WALKER ? "WLKR" : 32320Sstevel@tonic-gate "????"); 32330Sstevel@tonic-gate 32340Sstevel@tonic-gate if (flags & DCMD_PIPE_OUT) { 32350Sstevel@tonic-gate mdb_printf("%#r\n", addr); 32360Sstevel@tonic-gate return (DCMD_OK); 32370Sstevel@tonic-gate } 32380Sstevel@tonic-gate 32390Sstevel@tonic-gate if (verbose) { 32400Sstevel@tonic-gate mdb_printf("%<b>%16p%</b> %4s %16p %16p %16d\n", 32410Sstevel@tonic-gate addr, type, vs.vs_start, vs.vs_end, sz); 32420Sstevel@tonic-gate 32430Sstevel@tonic-gate if (no_debug) 32440Sstevel@tonic-gate return (DCMD_OK); 32450Sstevel@tonic-gate 32460Sstevel@tonic-gate mdb_printf("%16s %4s %16d %16llx\n", 32470Sstevel@tonic-gate "", "", vs.vs_thread, vs.vs_timestamp); 32480Sstevel@tonic-gate 32490Sstevel@tonic-gate mdb_inc_indent(17); 32500Sstevel@tonic-gate for (i = 0; i < depth; i++) { 32510Sstevel@tonic-gate mdb_printf("%a\n", stk[i]); 32520Sstevel@tonic-gate } 32530Sstevel@tonic-gate mdb_dec_indent(17); 32540Sstevel@tonic-gate mdb_printf("\n"); 32550Sstevel@tonic-gate } else { 32560Sstevel@tonic-gate mdb_printf("%0?p %4s %0?p %0?p", addr, type, 32570Sstevel@tonic-gate vs.vs_start, size? sz : vs.vs_end); 32580Sstevel@tonic-gate 32590Sstevel@tonic-gate if (no_debug) { 32600Sstevel@tonic-gate mdb_printf("\n"); 32610Sstevel@tonic-gate return (DCMD_OK); 32620Sstevel@tonic-gate } 32630Sstevel@tonic-gate 32640Sstevel@tonic-gate for (i = 0; i < depth; i++) { 32650Sstevel@tonic-gate if (mdb_lookup_by_addr(stk[i], MDB_SYM_FUZZY, 32660Sstevel@tonic-gate c, sizeof (c), &sym) == -1) 32670Sstevel@tonic-gate continue; 32680Sstevel@tonic-gate if (is_umem_sym(c, "vmem_")) 32690Sstevel@tonic-gate continue; 32700Sstevel@tonic-gate break; 32710Sstevel@tonic-gate } 32720Sstevel@tonic-gate mdb_printf(" %a\n", stk[i]); 32730Sstevel@tonic-gate } 32740Sstevel@tonic-gate return (DCMD_OK); 32750Sstevel@tonic-gate } 32760Sstevel@tonic-gate 32770Sstevel@tonic-gate /*ARGSUSED*/ 32780Sstevel@tonic-gate static int 32790Sstevel@tonic-gate showbc(uintptr_t addr, const umem_bufctl_audit_t *bcp, hrtime_t *newest) 32800Sstevel@tonic-gate { 32810Sstevel@tonic-gate char name[UMEM_CACHE_NAMELEN + 1]; 32820Sstevel@tonic-gate hrtime_t delta; 32830Sstevel@tonic-gate int i, depth; 32840Sstevel@tonic-gate 32850Sstevel@tonic-gate if (bcp->bc_timestamp == 0) 32860Sstevel@tonic-gate return (WALK_DONE); 32870Sstevel@tonic-gate 32880Sstevel@tonic-gate if (*newest == 0) 32890Sstevel@tonic-gate *newest = bcp->bc_timestamp; 32900Sstevel@tonic-gate 32910Sstevel@tonic-gate delta = *newest - bcp->bc_timestamp; 32920Sstevel@tonic-gate depth = MIN(bcp->bc_depth, umem_stack_depth); 32930Sstevel@tonic-gate 32940Sstevel@tonic-gate if (mdb_readstr(name, sizeof (name), (uintptr_t) 32950Sstevel@tonic-gate &bcp->bc_cache->cache_name) <= 0) 32960Sstevel@tonic-gate (void) mdb_snprintf(name, sizeof (name), "%a", bcp->bc_cache); 32970Sstevel@tonic-gate 32980Sstevel@tonic-gate mdb_printf("\nT-%lld.%09lld addr=%p %s\n", 32990Sstevel@tonic-gate delta / NANOSEC, delta % NANOSEC, bcp->bc_addr, name); 33000Sstevel@tonic-gate 33010Sstevel@tonic-gate for (i = 0; i < depth; i++) 33020Sstevel@tonic-gate mdb_printf("\t %a\n", bcp->bc_stack[i]); 33030Sstevel@tonic-gate 33040Sstevel@tonic-gate return (WALK_NEXT); 33050Sstevel@tonic-gate } 33060Sstevel@tonic-gate 33070Sstevel@tonic-gate int 33080Sstevel@tonic-gate umalog(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 33090Sstevel@tonic-gate { 33100Sstevel@tonic-gate const char *logname = "umem_transaction_log"; 33110Sstevel@tonic-gate hrtime_t newest = 0; 33120Sstevel@tonic-gate 33130Sstevel@tonic-gate if ((flags & DCMD_ADDRSPEC) || argc > 1) 33140Sstevel@tonic-gate return (DCMD_USAGE); 33150Sstevel@tonic-gate 33160Sstevel@tonic-gate if (argc > 0) { 33170Sstevel@tonic-gate if (argv->a_type != MDB_TYPE_STRING) 33180Sstevel@tonic-gate return (DCMD_USAGE); 33190Sstevel@tonic-gate if (strcmp(argv->a_un.a_str, "fail") == 0) 33200Sstevel@tonic-gate logname = "umem_failure_log"; 33210Sstevel@tonic-gate else if (strcmp(argv->a_un.a_str, "slab") == 0) 33220Sstevel@tonic-gate logname = "umem_slab_log"; 33230Sstevel@tonic-gate else 33240Sstevel@tonic-gate return (DCMD_USAGE); 33250Sstevel@tonic-gate } 33260Sstevel@tonic-gate 33270Sstevel@tonic-gate if (umem_readvar(&addr, logname) == -1) { 33280Sstevel@tonic-gate mdb_warn("failed to read %s log header pointer"); 33290Sstevel@tonic-gate return (DCMD_ERR); 33300Sstevel@tonic-gate } 33310Sstevel@tonic-gate 33320Sstevel@tonic-gate if (mdb_pwalk("umem_log", (mdb_walk_cb_t)showbc, &newest, addr) == -1) { 33330Sstevel@tonic-gate mdb_warn("failed to walk umem log"); 33340Sstevel@tonic-gate return (DCMD_ERR); 33350Sstevel@tonic-gate } 33360Sstevel@tonic-gate 33370Sstevel@tonic-gate return (DCMD_OK); 33380Sstevel@tonic-gate } 33390Sstevel@tonic-gate 33400Sstevel@tonic-gate /* 33410Sstevel@tonic-gate * As the final lure for die-hard crash(1M) users, we provide ::umausers here. 33420Sstevel@tonic-gate * The first piece is a structure which we use to accumulate umem_cache_t 33430Sstevel@tonic-gate * addresses of interest. The umc_add is used as a callback for the umem_cache 33440Sstevel@tonic-gate * walker; we either add all caches, or ones named explicitly as arguments. 33450Sstevel@tonic-gate */ 33460Sstevel@tonic-gate 33470Sstevel@tonic-gate typedef struct umclist { 33480Sstevel@tonic-gate const char *umc_name; /* Name to match (or NULL) */ 33490Sstevel@tonic-gate uintptr_t *umc_caches; /* List of umem_cache_t addrs */ 33500Sstevel@tonic-gate int umc_nelems; /* Num entries in umc_caches */ 33510Sstevel@tonic-gate int umc_size; /* Size of umc_caches array */ 33520Sstevel@tonic-gate } umclist_t; 33530Sstevel@tonic-gate 33540Sstevel@tonic-gate static int 33550Sstevel@tonic-gate umc_add(uintptr_t addr, const umem_cache_t *cp, umclist_t *umc) 33560Sstevel@tonic-gate { 33570Sstevel@tonic-gate void *p; 33580Sstevel@tonic-gate int s; 33590Sstevel@tonic-gate 33600Sstevel@tonic-gate if (umc->umc_name == NULL || 33610Sstevel@tonic-gate strcmp(cp->cache_name, umc->umc_name) == 0) { 33620Sstevel@tonic-gate /* 33630Sstevel@tonic-gate * If we have a match, grow our array (if necessary), and then 33640Sstevel@tonic-gate * add the virtual address of the matching cache to our list. 33650Sstevel@tonic-gate */ 33660Sstevel@tonic-gate if (umc->umc_nelems >= umc->umc_size) { 33670Sstevel@tonic-gate s = umc->umc_size ? umc->umc_size * 2 : 256; 33680Sstevel@tonic-gate p = mdb_alloc(sizeof (uintptr_t) * s, UM_SLEEP | UM_GC); 33690Sstevel@tonic-gate 33700Sstevel@tonic-gate bcopy(umc->umc_caches, p, 33710Sstevel@tonic-gate sizeof (uintptr_t) * umc->umc_size); 33720Sstevel@tonic-gate 33730Sstevel@tonic-gate umc->umc_caches = p; 33740Sstevel@tonic-gate umc->umc_size = s; 33750Sstevel@tonic-gate } 33760Sstevel@tonic-gate 33770Sstevel@tonic-gate umc->umc_caches[umc->umc_nelems++] = addr; 33780Sstevel@tonic-gate return (umc->umc_name ? WALK_DONE : WALK_NEXT); 33790Sstevel@tonic-gate } 33800Sstevel@tonic-gate 33810Sstevel@tonic-gate return (WALK_NEXT); 33820Sstevel@tonic-gate } 33830Sstevel@tonic-gate 33840Sstevel@tonic-gate /* 33850Sstevel@tonic-gate * The second piece of ::umausers is a hash table of allocations. Each 33860Sstevel@tonic-gate * allocation owner is identified by its stack trace and data_size. We then 33870Sstevel@tonic-gate * track the total bytes of all such allocations, and the number of allocations 33880Sstevel@tonic-gate * to report at the end. Once we have a list of caches, we walk through the 33890Sstevel@tonic-gate * allocated bufctls of each, and update our hash table accordingly. 33900Sstevel@tonic-gate */ 33910Sstevel@tonic-gate 33920Sstevel@tonic-gate typedef struct umowner { 33930Sstevel@tonic-gate struct umowner *umo_head; /* First hash elt in bucket */ 33940Sstevel@tonic-gate struct umowner *umo_next; /* Next hash elt in chain */ 33950Sstevel@tonic-gate size_t umo_signature; /* Hash table signature */ 33960Sstevel@tonic-gate uint_t umo_num; /* Number of allocations */ 33970Sstevel@tonic-gate size_t umo_data_size; /* Size of each allocation */ 33980Sstevel@tonic-gate size_t umo_total_size; /* Total bytes of allocation */ 33990Sstevel@tonic-gate int umo_depth; /* Depth of stack trace */ 34000Sstevel@tonic-gate uintptr_t *umo_stack; /* Stack trace */ 34010Sstevel@tonic-gate } umowner_t; 34020Sstevel@tonic-gate 34030Sstevel@tonic-gate typedef struct umusers { 34040Sstevel@tonic-gate const umem_cache_t *umu_cache; /* Current umem cache */ 34050Sstevel@tonic-gate umowner_t *umu_hash; /* Hash table of owners */ 34060Sstevel@tonic-gate uintptr_t *umu_stacks; /* stacks for owners */ 34070Sstevel@tonic-gate int umu_nelems; /* Number of entries in use */ 34080Sstevel@tonic-gate int umu_size; /* Total number of entries */ 34090Sstevel@tonic-gate } umusers_t; 34100Sstevel@tonic-gate 34110Sstevel@tonic-gate static void 34120Sstevel@tonic-gate umu_add(umusers_t *umu, const umem_bufctl_audit_t *bcp, 34130Sstevel@tonic-gate size_t size, size_t data_size) 34140Sstevel@tonic-gate { 34150Sstevel@tonic-gate int i, depth = MIN(bcp->bc_depth, umem_stack_depth); 34160Sstevel@tonic-gate size_t bucket, signature = data_size; 34170Sstevel@tonic-gate umowner_t *umo, *umoend; 34180Sstevel@tonic-gate 34190Sstevel@tonic-gate /* 34200Sstevel@tonic-gate * If the hash table is full, double its size and rehash everything. 34210Sstevel@tonic-gate */ 34220Sstevel@tonic-gate if (umu->umu_nelems >= umu->umu_size) { 34230Sstevel@tonic-gate int s = umu->umu_size ? umu->umu_size * 2 : 1024; 34240Sstevel@tonic-gate size_t umowner_size = sizeof (umowner_t); 34250Sstevel@tonic-gate size_t trace_size = umem_stack_depth * sizeof (uintptr_t); 34260Sstevel@tonic-gate uintptr_t *new_stacks; 34270Sstevel@tonic-gate 34280Sstevel@tonic-gate umo = mdb_alloc(umowner_size * s, UM_SLEEP | UM_GC); 34290Sstevel@tonic-gate new_stacks = mdb_alloc(trace_size * s, UM_SLEEP | UM_GC); 34300Sstevel@tonic-gate 34310Sstevel@tonic-gate bcopy(umu->umu_hash, umo, umowner_size * umu->umu_size); 34320Sstevel@tonic-gate bcopy(umu->umu_stacks, new_stacks, trace_size * umu->umu_size); 34330Sstevel@tonic-gate umu->umu_hash = umo; 34340Sstevel@tonic-gate umu->umu_stacks = new_stacks; 34350Sstevel@tonic-gate umu->umu_size = s; 34360Sstevel@tonic-gate 34370Sstevel@tonic-gate umoend = umu->umu_hash + umu->umu_size; 34380Sstevel@tonic-gate for (umo = umu->umu_hash; umo < umoend; umo++) { 34390Sstevel@tonic-gate umo->umo_head = NULL; 34400Sstevel@tonic-gate umo->umo_stack = &umu->umu_stacks[ 34410Sstevel@tonic-gate umem_stack_depth * (umo - umu->umu_hash)]; 34420Sstevel@tonic-gate } 34430Sstevel@tonic-gate 34440Sstevel@tonic-gate umoend = umu->umu_hash + umu->umu_nelems; 34450Sstevel@tonic-gate for (umo = umu->umu_hash; umo < umoend; umo++) { 34460Sstevel@tonic-gate bucket = umo->umo_signature & (umu->umu_size - 1); 34470Sstevel@tonic-gate umo->umo_next = umu->umu_hash[bucket].umo_head; 34480Sstevel@tonic-gate umu->umu_hash[bucket].umo_head = umo; 34490Sstevel@tonic-gate } 34500Sstevel@tonic-gate } 34510Sstevel@tonic-gate 34520Sstevel@tonic-gate /* 34530Sstevel@tonic-gate * Finish computing the hash signature from the stack trace, and then 34540Sstevel@tonic-gate * see if the owner is in the hash table. If so, update our stats. 34550Sstevel@tonic-gate */ 34560Sstevel@tonic-gate for (i = 0; i < depth; i++) 34570Sstevel@tonic-gate signature += bcp->bc_stack[i]; 34580Sstevel@tonic-gate 34590Sstevel@tonic-gate bucket = signature & (umu->umu_size - 1); 34600Sstevel@tonic-gate 34610Sstevel@tonic-gate for (umo = umu->umu_hash[bucket].umo_head; umo; umo = umo->umo_next) { 34620Sstevel@tonic-gate if (umo->umo_signature == signature) { 34630Sstevel@tonic-gate size_t difference = 0; 34640Sstevel@tonic-gate 34650Sstevel@tonic-gate difference |= umo->umo_data_size - data_size; 34660Sstevel@tonic-gate difference |= umo->umo_depth - depth; 34670Sstevel@tonic-gate 34680Sstevel@tonic-gate for (i = 0; i < depth; i++) { 34690Sstevel@tonic-gate difference |= umo->umo_stack[i] - 34700Sstevel@tonic-gate bcp->bc_stack[i]; 34710Sstevel@tonic-gate } 34720Sstevel@tonic-gate 34730Sstevel@tonic-gate if (difference == 0) { 34740Sstevel@tonic-gate umo->umo_total_size += size; 34750Sstevel@tonic-gate umo->umo_num++; 34760Sstevel@tonic-gate return; 34770Sstevel@tonic-gate } 34780Sstevel@tonic-gate } 34790Sstevel@tonic-gate } 34800Sstevel@tonic-gate 34810Sstevel@tonic-gate /* 34820Sstevel@tonic-gate * If the owner is not yet hashed, grab the next element and fill it 34830Sstevel@tonic-gate * in based on the allocation information. 34840Sstevel@tonic-gate */ 34850Sstevel@tonic-gate umo = &umu->umu_hash[umu->umu_nelems++]; 34860Sstevel@tonic-gate umo->umo_next = umu->umu_hash[bucket].umo_head; 34870Sstevel@tonic-gate umu->umu_hash[bucket].umo_head = umo; 34880Sstevel@tonic-gate 34890Sstevel@tonic-gate umo->umo_signature = signature; 34900Sstevel@tonic-gate umo->umo_num = 1; 34910Sstevel@tonic-gate umo->umo_data_size = data_size; 34920Sstevel@tonic-gate umo->umo_total_size = size; 34930Sstevel@tonic-gate umo->umo_depth = depth; 34940Sstevel@tonic-gate 34950Sstevel@tonic-gate for (i = 0; i < depth; i++) 34960Sstevel@tonic-gate umo->umo_stack[i] = bcp->bc_stack[i]; 34970Sstevel@tonic-gate } 34980Sstevel@tonic-gate 34990Sstevel@tonic-gate /* 35000Sstevel@tonic-gate * When ::umausers is invoked without the -f flag, we simply update our hash 35010Sstevel@tonic-gate * table with the information from each allocated bufctl. 35020Sstevel@tonic-gate */ 35030Sstevel@tonic-gate /*ARGSUSED*/ 35040Sstevel@tonic-gate static int 35050Sstevel@tonic-gate umause1(uintptr_t addr, const umem_bufctl_audit_t *bcp, umusers_t *umu) 35060Sstevel@tonic-gate { 35070Sstevel@tonic-gate const umem_cache_t *cp = umu->umu_cache; 35080Sstevel@tonic-gate 35090Sstevel@tonic-gate umu_add(umu, bcp, cp->cache_bufsize, cp->cache_bufsize); 35100Sstevel@tonic-gate return (WALK_NEXT); 35110Sstevel@tonic-gate } 35120Sstevel@tonic-gate 35130Sstevel@tonic-gate /* 35140Sstevel@tonic-gate * When ::umausers is invoked with the -f flag, we print out the information 35150Sstevel@tonic-gate * for each bufctl as well as updating the hash table. 35160Sstevel@tonic-gate */ 35170Sstevel@tonic-gate static int 35180Sstevel@tonic-gate umause2(uintptr_t addr, const umem_bufctl_audit_t *bcp, umusers_t *umu) 35190Sstevel@tonic-gate { 35200Sstevel@tonic-gate int i, depth = MIN(bcp->bc_depth, umem_stack_depth); 35210Sstevel@tonic-gate const umem_cache_t *cp = umu->umu_cache; 35220Sstevel@tonic-gate 35230Sstevel@tonic-gate mdb_printf("size %d, addr %p, thread %p, cache %s\n", 35240Sstevel@tonic-gate cp->cache_bufsize, addr, bcp->bc_thread, cp->cache_name); 35250Sstevel@tonic-gate 35260Sstevel@tonic-gate for (i = 0; i < depth; i++) 35270Sstevel@tonic-gate mdb_printf("\t %a\n", bcp->bc_stack[i]); 35280Sstevel@tonic-gate 35290Sstevel@tonic-gate umu_add(umu, bcp, cp->cache_bufsize, cp->cache_bufsize); 35300Sstevel@tonic-gate return (WALK_NEXT); 35310Sstevel@tonic-gate } 35320Sstevel@tonic-gate 35330Sstevel@tonic-gate /* 35340Sstevel@tonic-gate * We sort our results by allocation size before printing them. 35350Sstevel@tonic-gate */ 35360Sstevel@tonic-gate static int 35370Sstevel@tonic-gate umownercmp(const void *lp, const void *rp) 35380Sstevel@tonic-gate { 35390Sstevel@tonic-gate const umowner_t *lhs = lp; 35400Sstevel@tonic-gate const umowner_t *rhs = rp; 35410Sstevel@tonic-gate 35420Sstevel@tonic-gate return (rhs->umo_total_size - lhs->umo_total_size); 35430Sstevel@tonic-gate } 35440Sstevel@tonic-gate 35450Sstevel@tonic-gate /* 35460Sstevel@tonic-gate * The main engine of ::umausers is relatively straightforward: First we 35470Sstevel@tonic-gate * accumulate our list of umem_cache_t addresses into the umclist_t. Next we 35480Sstevel@tonic-gate * iterate over the allocated bufctls of each cache in the list. Finally, 35490Sstevel@tonic-gate * we sort and print our results. 35500Sstevel@tonic-gate */ 35510Sstevel@tonic-gate /*ARGSUSED*/ 35520Sstevel@tonic-gate int 35530Sstevel@tonic-gate umausers(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 35540Sstevel@tonic-gate { 35550Sstevel@tonic-gate int mem_threshold = 8192; /* Minimum # bytes for printing */ 35560Sstevel@tonic-gate int cnt_threshold = 100; /* Minimum # blocks for printing */ 35570Sstevel@tonic-gate int audited_caches = 0; /* Number of UMF_AUDIT caches found */ 35580Sstevel@tonic-gate int do_all_caches = 1; /* Do all caches (no arguments) */ 35590Sstevel@tonic-gate int opt_e = FALSE; /* Include "small" users */ 35600Sstevel@tonic-gate int opt_f = FALSE; /* Print stack traces */ 35610Sstevel@tonic-gate 35620Sstevel@tonic-gate mdb_walk_cb_t callback = (mdb_walk_cb_t)umause1; 35630Sstevel@tonic-gate umowner_t *umo, *umoend; 35640Sstevel@tonic-gate int i, oelems; 35650Sstevel@tonic-gate 35660Sstevel@tonic-gate umclist_t umc; 35670Sstevel@tonic-gate umusers_t umu; 35680Sstevel@tonic-gate 35690Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) 35700Sstevel@tonic-gate return (DCMD_USAGE); 35710Sstevel@tonic-gate 35720Sstevel@tonic-gate bzero(&umc, sizeof (umc)); 35730Sstevel@tonic-gate bzero(&umu, sizeof (umu)); 35740Sstevel@tonic-gate 35750Sstevel@tonic-gate while ((i = mdb_getopts(argc, argv, 35760Sstevel@tonic-gate 'e', MDB_OPT_SETBITS, TRUE, &opt_e, 35770Sstevel@tonic-gate 'f', MDB_OPT_SETBITS, TRUE, &opt_f, NULL)) != argc) { 35780Sstevel@tonic-gate 35790Sstevel@tonic-gate argv += i; /* skip past options we just processed */ 35800Sstevel@tonic-gate argc -= i; /* adjust argc */ 35810Sstevel@tonic-gate 35820Sstevel@tonic-gate if (argv->a_type != MDB_TYPE_STRING || *argv->a_un.a_str == '-') 35830Sstevel@tonic-gate return (DCMD_USAGE); 35840Sstevel@tonic-gate 35850Sstevel@tonic-gate oelems = umc.umc_nelems; 35860Sstevel@tonic-gate umc.umc_name = argv->a_un.a_str; 35870Sstevel@tonic-gate (void) mdb_walk("umem_cache", (mdb_walk_cb_t)umc_add, &umc); 35880Sstevel@tonic-gate 35890Sstevel@tonic-gate if (umc.umc_nelems == oelems) { 35900Sstevel@tonic-gate mdb_warn("unknown umem cache: %s\n", umc.umc_name); 35910Sstevel@tonic-gate return (DCMD_ERR); 35920Sstevel@tonic-gate } 35930Sstevel@tonic-gate 35940Sstevel@tonic-gate do_all_caches = 0; 35950Sstevel@tonic-gate argv++; 35960Sstevel@tonic-gate argc--; 35970Sstevel@tonic-gate } 35980Sstevel@tonic-gate 35990Sstevel@tonic-gate if (opt_e) 36000Sstevel@tonic-gate mem_threshold = cnt_threshold = 0; 36010Sstevel@tonic-gate 36020Sstevel@tonic-gate if (opt_f) 36030Sstevel@tonic-gate callback = (mdb_walk_cb_t)umause2; 36040Sstevel@tonic-gate 36050Sstevel@tonic-gate if (do_all_caches) { 36060Sstevel@tonic-gate umc.umc_name = NULL; /* match all cache names */ 36070Sstevel@tonic-gate (void) mdb_walk("umem_cache", (mdb_walk_cb_t)umc_add, &umc); 36080Sstevel@tonic-gate } 36090Sstevel@tonic-gate 36100Sstevel@tonic-gate for (i = 0; i < umc.umc_nelems; i++) { 36110Sstevel@tonic-gate uintptr_t cp = umc.umc_caches[i]; 36120Sstevel@tonic-gate umem_cache_t c; 36130Sstevel@tonic-gate 36140Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), cp) == -1) { 36150Sstevel@tonic-gate mdb_warn("failed to read cache at %p", cp); 36160Sstevel@tonic-gate continue; 36170Sstevel@tonic-gate } 36180Sstevel@tonic-gate 36190Sstevel@tonic-gate if (!(c.cache_flags & UMF_AUDIT)) { 36200Sstevel@tonic-gate if (!do_all_caches) { 36210Sstevel@tonic-gate mdb_warn("UMF_AUDIT is not enabled for %s\n", 36220Sstevel@tonic-gate c.cache_name); 36230Sstevel@tonic-gate } 36240Sstevel@tonic-gate continue; 36250Sstevel@tonic-gate } 36260Sstevel@tonic-gate 36270Sstevel@tonic-gate umu.umu_cache = &c; 36280Sstevel@tonic-gate (void) mdb_pwalk("bufctl", callback, &umu, cp); 36290Sstevel@tonic-gate audited_caches++; 36300Sstevel@tonic-gate } 36310Sstevel@tonic-gate 36320Sstevel@tonic-gate if (audited_caches == 0 && do_all_caches) { 36330Sstevel@tonic-gate mdb_warn("UMF_AUDIT is not enabled for any caches\n"); 36340Sstevel@tonic-gate return (DCMD_ERR); 36350Sstevel@tonic-gate } 36360Sstevel@tonic-gate 36370Sstevel@tonic-gate qsort(umu.umu_hash, umu.umu_nelems, sizeof (umowner_t), umownercmp); 36380Sstevel@tonic-gate umoend = umu.umu_hash + umu.umu_nelems; 36390Sstevel@tonic-gate 36400Sstevel@tonic-gate for (umo = umu.umu_hash; umo < umoend; umo++) { 36410Sstevel@tonic-gate if (umo->umo_total_size < mem_threshold && 36420Sstevel@tonic-gate umo->umo_num < cnt_threshold) 36430Sstevel@tonic-gate continue; 36440Sstevel@tonic-gate mdb_printf("%lu bytes for %u allocations with data size %lu:\n", 36450Sstevel@tonic-gate umo->umo_total_size, umo->umo_num, umo->umo_data_size); 36460Sstevel@tonic-gate for (i = 0; i < umo->umo_depth; i++) 36470Sstevel@tonic-gate mdb_printf("\t %a\n", umo->umo_stack[i]); 36480Sstevel@tonic-gate } 36490Sstevel@tonic-gate 36500Sstevel@tonic-gate return (DCMD_OK); 36510Sstevel@tonic-gate } 36521528Sjwadams 36531528Sjwadams struct malloc_data { 36541528Sjwadams uint32_t malloc_size; 36551528Sjwadams uint32_t malloc_stat; /* == UMEM_MALLOC_ENCODE(state, malloc_size) */ 36561528Sjwadams }; 36571528Sjwadams 36581528Sjwadams #ifdef _LP64 36591528Sjwadams #define UMI_MAX_BUCKET (UMEM_MAXBUF - 2*sizeof (struct malloc_data)) 36601528Sjwadams #else 36611528Sjwadams #define UMI_MAX_BUCKET (UMEM_MAXBUF - sizeof (struct malloc_data)) 36621528Sjwadams #endif 36631528Sjwadams 36641528Sjwadams typedef struct umem_malloc_info { 36651528Sjwadams size_t um_total; /* total allocated buffers */ 36661528Sjwadams size_t um_malloc; /* malloc buffers */ 36671528Sjwadams size_t um_malloc_size; /* sum of malloc buffer sizes */ 36681528Sjwadams size_t um_malloc_overhead; /* sum of in-chunk overheads */ 36691528Sjwadams 36701528Sjwadams umem_cache_t *um_cp; 36711528Sjwadams 36721528Sjwadams uint_t *um_bucket; 36731528Sjwadams } umem_malloc_info_t; 36741528Sjwadams 36751528Sjwadams static void 36761528Sjwadams umem_malloc_print_dist(uint_t *um_bucket, size_t minmalloc, size_t maxmalloc, 36771528Sjwadams size_t maxbuckets, size_t minbucketsize, int geometric) 36781528Sjwadams { 36794688Stomee uint64_t um_malloc; 36801528Sjwadams int minb = -1; 36811528Sjwadams int maxb = -1; 36821528Sjwadams int buckets; 36831528Sjwadams int nbucks; 36841528Sjwadams int i; 36851528Sjwadams int b; 36861528Sjwadams const int *distarray; 36871528Sjwadams 36881528Sjwadams minb = (int)minmalloc; 36891528Sjwadams maxb = (int)maxmalloc; 36901528Sjwadams 36911528Sjwadams nbucks = buckets = maxb - minb + 1; 36921528Sjwadams 36931528Sjwadams um_malloc = 0; 36941528Sjwadams for (b = minb; b <= maxb; b++) 36951528Sjwadams um_malloc += um_bucket[b]; 36961528Sjwadams 36971528Sjwadams if (maxbuckets != 0) 36981528Sjwadams buckets = MIN(buckets, maxbuckets); 36991528Sjwadams 37001528Sjwadams if (minbucketsize > 1) { 37011528Sjwadams buckets = MIN(buckets, nbucks/minbucketsize); 37021528Sjwadams if (buckets == 0) { 37031528Sjwadams buckets = 1; 37041528Sjwadams minbucketsize = nbucks; 37051528Sjwadams } 37061528Sjwadams } 37071528Sjwadams 37081528Sjwadams if (geometric) 37094798Stomee distarray = dist_geometric(buckets, minb, maxb, minbucketsize); 37101528Sjwadams else 37114798Stomee distarray = dist_linear(buckets, minb, maxb); 37124798Stomee 37134798Stomee dist_print_header("malloc size", 11, "count"); 37141528Sjwadams for (i = 0; i < buckets; i++) { 37154798Stomee dist_print_bucket(distarray, i, um_bucket, um_malloc, 11); 37161528Sjwadams } 37171528Sjwadams mdb_printf("\n"); 37181528Sjwadams } 37191528Sjwadams 37201528Sjwadams /* 37211528Sjwadams * A malloc()ed buffer looks like: 37221528Sjwadams * 37231528Sjwadams * <----------- mi.malloc_size ---> 37241528Sjwadams * <----------- cp.cache_bufsize ------------------> 37251528Sjwadams * <----------- cp.cache_chunksize --------------------------------> 37261528Sjwadams * +-------+-----------------------+---------------+---------------+ 37271528Sjwadams * |/tag///| mallocsz |/round-off/////|/debug info////| 37281528Sjwadams * +-------+---------------------------------------+---------------+ 37291528Sjwadams * <-- usable space ------> 37301528Sjwadams * 37311528Sjwadams * mallocsz is the argument to malloc(3C). 37321528Sjwadams * mi.malloc_size is the actual size passed to umem_alloc(), which 37331528Sjwadams * is rounded up to the smallest available cache size, which is 37341528Sjwadams * cache_bufsize. If there is debugging or alignment overhead in 37351528Sjwadams * the cache, that is reflected in a larger cache_chunksize. 37361528Sjwadams * 37371528Sjwadams * The tag at the beginning of the buffer is either 8-bytes or 16-bytes, 37381528Sjwadams * depending upon the ISA's alignment requirements. For 32-bit allocations, 37391528Sjwadams * it is always a 8-byte tag. For 64-bit allocations larger than 8 bytes, 37401528Sjwadams * the tag has 8 bytes of padding before it. 37411528Sjwadams * 37421528Sjwadams * 32-byte, 64-byte buffers <= 8 bytes: 37431528Sjwadams * +-------+-------+--------- ... 37441528Sjwadams * |/size//|/stat//| mallocsz ... 37451528Sjwadams * +-------+-------+--------- ... 37461528Sjwadams * ^ 37471528Sjwadams * pointer returned from malloc(3C) 37481528Sjwadams * 37491528Sjwadams * 64-byte buffers > 8 bytes: 37501528Sjwadams * +---------------+-------+-------+--------- ... 37511528Sjwadams * |/padding///////|/size//|/stat//| mallocsz ... 37521528Sjwadams * +---------------+-------+-------+--------- ... 37531528Sjwadams * ^ 37541528Sjwadams * pointer returned from malloc(3C) 37551528Sjwadams * 37561528Sjwadams * The "size" field is "malloc_size", which is mallocsz + the padding. 37571528Sjwadams * The "stat" field is derived from malloc_size, and functions as a 37581528Sjwadams * validation that this buffer is actually from malloc(3C). 37591528Sjwadams */ 37601528Sjwadams /*ARGSUSED*/ 37611528Sjwadams static int 37621528Sjwadams um_umem_buffer_cb(uintptr_t addr, void *buf, umem_malloc_info_t *ump) 37631528Sjwadams { 37641528Sjwadams struct malloc_data md; 37651528Sjwadams size_t m_addr = addr; 37661528Sjwadams size_t overhead = sizeof (md); 37671528Sjwadams size_t mallocsz; 37681528Sjwadams 37691528Sjwadams ump->um_total++; 37701528Sjwadams 37711528Sjwadams #ifdef _LP64 37721528Sjwadams if (ump->um_cp->cache_bufsize > UMEM_SECOND_ALIGN) { 37731528Sjwadams m_addr += overhead; 37741528Sjwadams overhead += sizeof (md); 37751528Sjwadams } 37761528Sjwadams #endif 37771528Sjwadams 37781528Sjwadams if (mdb_vread(&md, sizeof (md), m_addr) == -1) { 37791528Sjwadams mdb_warn("unable to read malloc header at %p", m_addr); 37801528Sjwadams return (WALK_NEXT); 37811528Sjwadams } 37821528Sjwadams 37831528Sjwadams switch (UMEM_MALLOC_DECODE(md.malloc_stat, md.malloc_size)) { 37841528Sjwadams case MALLOC_MAGIC: 37851528Sjwadams #ifdef _LP64 37861528Sjwadams case MALLOC_SECOND_MAGIC: 37871528Sjwadams #endif 37881528Sjwadams mallocsz = md.malloc_size - overhead; 37891528Sjwadams 37901528Sjwadams ump->um_malloc++; 37911528Sjwadams ump->um_malloc_size += mallocsz; 37921528Sjwadams ump->um_malloc_overhead += overhead; 37931528Sjwadams 37941528Sjwadams /* include round-off and debug overhead */ 37951528Sjwadams ump->um_malloc_overhead += 37961528Sjwadams ump->um_cp->cache_chunksize - md.malloc_size; 37971528Sjwadams 37981528Sjwadams if (ump->um_bucket != NULL && mallocsz <= UMI_MAX_BUCKET) 37991528Sjwadams ump->um_bucket[mallocsz]++; 38001528Sjwadams 38011528Sjwadams break; 38021528Sjwadams default: 38031528Sjwadams break; 38041528Sjwadams } 38051528Sjwadams 38061528Sjwadams return (WALK_NEXT); 38071528Sjwadams } 38081528Sjwadams 38091528Sjwadams int 38101528Sjwadams get_umem_alloc_sizes(int **out, size_t *out_num) 38111528Sjwadams { 38121528Sjwadams GElf_Sym sym; 38131528Sjwadams 38141528Sjwadams if (umem_lookup_by_name("umem_alloc_sizes", &sym) == -1) { 38151528Sjwadams mdb_warn("unable to look up umem_alloc_sizes"); 38161528Sjwadams return (-1); 38171528Sjwadams } 38181528Sjwadams 38191528Sjwadams *out = mdb_alloc(sym.st_size, UM_SLEEP | UM_GC); 38201528Sjwadams *out_num = sym.st_size / sizeof (int); 38211528Sjwadams 38221528Sjwadams if (mdb_vread(*out, sym.st_size, sym.st_value) == -1) { 38231528Sjwadams mdb_warn("unable to read umem_alloc_sizes (%p)", sym.st_value); 38241528Sjwadams *out = NULL; 38251528Sjwadams return (-1); 38261528Sjwadams } 38271528Sjwadams 38281528Sjwadams return (0); 38291528Sjwadams } 38301528Sjwadams 38311528Sjwadams 38321528Sjwadams static int 38331528Sjwadams um_umem_cache_cb(uintptr_t addr, umem_cache_t *cp, umem_malloc_info_t *ump) 38341528Sjwadams { 38351528Sjwadams if (strncmp(cp->cache_name, "umem_alloc_", strlen("umem_alloc_")) != 0) 38361528Sjwadams return (WALK_NEXT); 38371528Sjwadams 38381528Sjwadams ump->um_cp = cp; 38391528Sjwadams 38401528Sjwadams if (mdb_pwalk("umem", (mdb_walk_cb_t)um_umem_buffer_cb, ump, addr) == 38411528Sjwadams -1) { 38421528Sjwadams mdb_warn("can't walk 'umem' for cache %p", addr); 38431528Sjwadams return (WALK_ERR); 38441528Sjwadams } 38451528Sjwadams 38461528Sjwadams return (WALK_NEXT); 38471528Sjwadams } 38481528Sjwadams 38491528Sjwadams void 38501528Sjwadams umem_malloc_dist_help(void) 38511528Sjwadams { 38521528Sjwadams mdb_printf("%s\n", 38531528Sjwadams "report distribution of outstanding malloc()s"); 38541528Sjwadams mdb_dec_indent(2); 38551528Sjwadams mdb_printf("%<b>OPTIONS%</b>\n"); 38561528Sjwadams mdb_inc_indent(2); 38571528Sjwadams mdb_printf("%s", 38581528Sjwadams " -b maxbins\n" 38591528Sjwadams " Use at most maxbins bins for the data\n" 38601528Sjwadams " -B minbinsize\n" 38611528Sjwadams " Make the bins at least minbinsize bytes apart\n" 38621528Sjwadams " -d dump the raw data out, without binning\n" 38631528Sjwadams " -g use geometric binning instead of linear binning\n"); 38641528Sjwadams } 38651528Sjwadams 38661528Sjwadams /*ARGSUSED*/ 38671528Sjwadams int 38681528Sjwadams umem_malloc_dist(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 38691528Sjwadams { 38701528Sjwadams umem_malloc_info_t mi; 38711528Sjwadams uint_t geometric = 0; 38721528Sjwadams uint_t dump = 0; 38731528Sjwadams size_t maxbuckets = 0; 38741528Sjwadams size_t minbucketsize = 0; 38751528Sjwadams 38761528Sjwadams size_t minalloc = 0; 38771528Sjwadams size_t maxalloc = UMI_MAX_BUCKET; 38781528Sjwadams 38791528Sjwadams if (flags & DCMD_ADDRSPEC) 38801528Sjwadams return (DCMD_USAGE); 38811528Sjwadams 38821528Sjwadams if (mdb_getopts(argc, argv, 38831528Sjwadams 'd', MDB_OPT_SETBITS, TRUE, &dump, 38841528Sjwadams 'g', MDB_OPT_SETBITS, TRUE, &geometric, 38851528Sjwadams 'b', MDB_OPT_UINTPTR, &maxbuckets, 38861528Sjwadams 'B', MDB_OPT_UINTPTR, &minbucketsize, 38871528Sjwadams 0) != argc) 38881528Sjwadams return (DCMD_USAGE); 38891528Sjwadams 38901528Sjwadams bzero(&mi, sizeof (mi)); 38911528Sjwadams mi.um_bucket = mdb_zalloc((UMI_MAX_BUCKET + 1) * sizeof (*mi.um_bucket), 38921528Sjwadams UM_SLEEP | UM_GC); 38931528Sjwadams 38941528Sjwadams if (mdb_walk("umem_cache", (mdb_walk_cb_t)um_umem_cache_cb, 38951528Sjwadams &mi) == -1) { 38961528Sjwadams mdb_warn("unable to walk 'umem_cache'"); 38971528Sjwadams return (DCMD_ERR); 38981528Sjwadams } 38991528Sjwadams 39001528Sjwadams if (dump) { 39011528Sjwadams int i; 39021528Sjwadams for (i = minalloc; i <= maxalloc; i++) 39031528Sjwadams mdb_printf("%d\t%d\n", i, mi.um_bucket[i]); 39041528Sjwadams 39051528Sjwadams return (DCMD_OK); 39061528Sjwadams } 39071528Sjwadams 39081528Sjwadams umem_malloc_print_dist(mi.um_bucket, minalloc, maxalloc, 39091528Sjwadams maxbuckets, minbucketsize, geometric); 39101528Sjwadams 39111528Sjwadams return (DCMD_OK); 39121528Sjwadams } 39131528Sjwadams 39141528Sjwadams void 39151528Sjwadams umem_malloc_info_help(void) 39161528Sjwadams { 39171528Sjwadams mdb_printf("%s\n", 39181528Sjwadams "report information about malloc()s by cache. "); 39191528Sjwadams mdb_dec_indent(2); 39201528Sjwadams mdb_printf("%<b>OPTIONS%</b>\n"); 39211528Sjwadams mdb_inc_indent(2); 39221528Sjwadams mdb_printf("%s", 39231528Sjwadams " -b maxbins\n" 39241528Sjwadams " Use at most maxbins bins for the data\n" 39251528Sjwadams " -B minbinsize\n" 39261528Sjwadams " Make the bins at least minbinsize bytes apart\n" 39271528Sjwadams " -d dump the raw distribution data without binning\n" 39281528Sjwadams #ifndef _KMDB 39291528Sjwadams " -g use geometric binning instead of linear binning\n" 39301528Sjwadams #endif 39311528Sjwadams ""); 39321528Sjwadams } 39331528Sjwadams int 39341528Sjwadams umem_malloc_info(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 39351528Sjwadams { 39361528Sjwadams umem_cache_t c; 39371528Sjwadams umem_malloc_info_t mi; 39381528Sjwadams 39391528Sjwadams int skip = 0; 39401528Sjwadams 39411528Sjwadams size_t maxmalloc; 39421528Sjwadams size_t overhead; 39431528Sjwadams size_t allocated; 39441528Sjwadams size_t avg_malloc; 39451528Sjwadams size_t overhead_pct; /* 1000 * overhead_percent */ 39461528Sjwadams 39471528Sjwadams uint_t verbose = 0; 39481528Sjwadams uint_t dump = 0; 39491528Sjwadams uint_t geometric = 0; 39501528Sjwadams size_t maxbuckets = 0; 39511528Sjwadams size_t minbucketsize = 0; 39521528Sjwadams 39531528Sjwadams int *alloc_sizes; 39541528Sjwadams int idx; 39551528Sjwadams size_t num; 39561528Sjwadams size_t minmalloc; 39571528Sjwadams 39581528Sjwadams if (mdb_getopts(argc, argv, 39591528Sjwadams 'd', MDB_OPT_SETBITS, TRUE, &dump, 39601528Sjwadams 'g', MDB_OPT_SETBITS, TRUE, &geometric, 39611528Sjwadams 'b', MDB_OPT_UINTPTR, &maxbuckets, 39621528Sjwadams 'B', MDB_OPT_UINTPTR, &minbucketsize, 39631528Sjwadams 0) != argc) 39641528Sjwadams return (DCMD_USAGE); 39651528Sjwadams 39661528Sjwadams if (dump || geometric || (maxbuckets != 0) || (minbucketsize != 0)) 39671528Sjwadams verbose = 1; 39681528Sjwadams 39691528Sjwadams if (!(flags & DCMD_ADDRSPEC)) { 39701528Sjwadams if (mdb_walk_dcmd("umem_cache", "umem_malloc_info", 39711528Sjwadams argc, argv) == -1) { 39721528Sjwadams mdb_warn("can't walk umem_cache"); 39731528Sjwadams return (DCMD_ERR); 39741528Sjwadams } 39751528Sjwadams return (DCMD_OK); 39761528Sjwadams } 39771528Sjwadams 39781528Sjwadams if (!mdb_vread(&c, sizeof (c), addr)) { 39791528Sjwadams mdb_warn("unable to read cache at %p", addr); 39801528Sjwadams return (DCMD_ERR); 39811528Sjwadams } 39821528Sjwadams 39831528Sjwadams if (strncmp(c.cache_name, "umem_alloc_", strlen("umem_alloc_")) != 0) { 39841528Sjwadams if (!(flags & DCMD_LOOP)) 39851528Sjwadams mdb_warn("umem_malloc_info: cache \"%s\" is not used " 39861528Sjwadams "by malloc()\n", c.cache_name); 39871528Sjwadams skip = 1; 39881528Sjwadams } 39891528Sjwadams 39901528Sjwadams /* 39911528Sjwadams * normally, print the header only the first time. In verbose mode, 39921528Sjwadams * print the header on every non-skipped buffer 39931528Sjwadams */ 39941528Sjwadams if ((!verbose && DCMD_HDRSPEC(flags)) || (verbose && !skip)) 39951528Sjwadams mdb_printf("%<ul>%-?s %6s %6s %8s %8s %10s %10s %6s%</ul>\n", 39961528Sjwadams "CACHE", "BUFSZ", "MAXMAL", 39971528Sjwadams "BUFMALLC", "AVG_MAL", "MALLOCED", "OVERHEAD", "%OVER"); 39981528Sjwadams 39991528Sjwadams if (skip) 40001528Sjwadams return (DCMD_OK); 40011528Sjwadams 40021528Sjwadams maxmalloc = c.cache_bufsize - sizeof (struct malloc_data); 40031528Sjwadams #ifdef _LP64 40041528Sjwadams if (c.cache_bufsize > UMEM_SECOND_ALIGN) 40051528Sjwadams maxmalloc -= sizeof (struct malloc_data); 40061528Sjwadams #endif 40071528Sjwadams 40081528Sjwadams bzero(&mi, sizeof (mi)); 40091528Sjwadams mi.um_cp = &c; 40101528Sjwadams if (verbose) 40111528Sjwadams mi.um_bucket = 40121528Sjwadams mdb_zalloc((UMI_MAX_BUCKET + 1) * sizeof (*mi.um_bucket), 40131528Sjwadams UM_SLEEP | UM_GC); 40141528Sjwadams 40151528Sjwadams if (mdb_pwalk("umem", (mdb_walk_cb_t)um_umem_buffer_cb, &mi, addr) == 40161528Sjwadams -1) { 40171528Sjwadams mdb_warn("can't walk 'umem'"); 40181528Sjwadams return (DCMD_ERR); 40191528Sjwadams } 40201528Sjwadams 40211528Sjwadams overhead = mi.um_malloc_overhead; 40221528Sjwadams allocated = mi.um_malloc_size; 40231528Sjwadams 40241528Sjwadams /* do integer round off for the average */ 40251528Sjwadams if (mi.um_malloc != 0) 40261528Sjwadams avg_malloc = (allocated + (mi.um_malloc - 1)/2) / mi.um_malloc; 40271528Sjwadams else 40281528Sjwadams avg_malloc = 0; 40291528Sjwadams 40301528Sjwadams /* 40311528Sjwadams * include per-slab overhead 40321528Sjwadams * 40331528Sjwadams * Each slab in a given cache is the same size, and has the same 40341528Sjwadams * number of chunks in it; we read in the first slab on the 40351528Sjwadams * slab list to get the number of chunks for all slabs. To 40361528Sjwadams * compute the per-slab overhead, we just subtract the chunk usage 40371528Sjwadams * from the slabsize: 40381528Sjwadams * 40391528Sjwadams * +------------+-------+-------+ ... --+-------+-------+-------+ 40401528Sjwadams * |////////////| | | ... | |///////|///////| 40411528Sjwadams * |////color///| chunk | chunk | ... | chunk |/color/|/slab//| 40421528Sjwadams * |////////////| | | ... | |///////|///////| 40431528Sjwadams * +------------+-------+-------+ ... --+-------+-------+-------+ 40441528Sjwadams * | \_______chunksize * chunks_____/ | 40451528Sjwadams * \__________________________slabsize__________________________/ 40461528Sjwadams * 40471528Sjwadams * For UMF_HASH caches, there is an additional source of overhead; 40481528Sjwadams * the external umem_slab_t and per-chunk bufctl structures. We 40491528Sjwadams * include those in our per-slab overhead. 40501528Sjwadams * 40511528Sjwadams * Once we have a number for the per-slab overhead, we estimate 40521528Sjwadams * the actual overhead by treating the malloc()ed buffers as if 40531528Sjwadams * they were densely packed: 40541528Sjwadams * 40551528Sjwadams * additional overhead = (# mallocs) * (per-slab) / (chunks); 40561528Sjwadams * 40571528Sjwadams * carefully ordering the multiply before the divide, to avoid 40581528Sjwadams * round-off error. 40591528Sjwadams */ 40601528Sjwadams if (mi.um_malloc != 0) { 40611528Sjwadams umem_slab_t slab; 40621528Sjwadams uintptr_t saddr = (uintptr_t)c.cache_nullslab.slab_next; 40631528Sjwadams 40641528Sjwadams if (mdb_vread(&slab, sizeof (slab), saddr) == -1) { 40651528Sjwadams mdb_warn("unable to read slab at %p\n", saddr); 40661528Sjwadams } else { 40671528Sjwadams long chunks = slab.slab_chunks; 40681528Sjwadams if (chunks != 0 && c.cache_chunksize != 0 && 40691528Sjwadams chunks <= c.cache_slabsize / c.cache_chunksize) { 40701528Sjwadams uintmax_t perslab = 40711528Sjwadams c.cache_slabsize - 40721528Sjwadams (c.cache_chunksize * chunks); 40731528Sjwadams 40741528Sjwadams if (c.cache_flags & UMF_HASH) { 40751528Sjwadams perslab += sizeof (umem_slab_t) + 40761528Sjwadams chunks * 40771528Sjwadams ((c.cache_flags & UMF_AUDIT) ? 40781528Sjwadams sizeof (umem_bufctl_audit_t) : 40791528Sjwadams sizeof (umem_bufctl_t)); 40801528Sjwadams } 40811528Sjwadams overhead += 40821528Sjwadams (perslab * (uintmax_t)mi.um_malloc)/chunks; 40831528Sjwadams } else { 40841528Sjwadams mdb_warn("invalid #chunks (%d) in slab %p\n", 40851528Sjwadams chunks, saddr); 40861528Sjwadams } 40871528Sjwadams } 40881528Sjwadams } 40891528Sjwadams 40901528Sjwadams if (allocated != 0) 40911528Sjwadams overhead_pct = (1000ULL * overhead) / allocated; 40921528Sjwadams else 40931528Sjwadams overhead_pct = 0; 40941528Sjwadams 40951528Sjwadams mdb_printf("%0?p %6ld %6ld %8ld %8ld %10ld %10ld %3ld.%01ld%%\n", 40961528Sjwadams addr, c.cache_bufsize, maxmalloc, 40971528Sjwadams mi.um_malloc, avg_malloc, allocated, overhead, 40981528Sjwadams overhead_pct / 10, overhead_pct % 10); 40991528Sjwadams 41001528Sjwadams if (!verbose) 41011528Sjwadams return (DCMD_OK); 41021528Sjwadams 41031528Sjwadams if (!dump) 41041528Sjwadams mdb_printf("\n"); 41051528Sjwadams 41061528Sjwadams if (get_umem_alloc_sizes(&alloc_sizes, &num) == -1) 41071528Sjwadams return (DCMD_ERR); 41081528Sjwadams 41091528Sjwadams for (idx = 0; idx < num; idx++) { 41101528Sjwadams if (alloc_sizes[idx] == c.cache_bufsize) 41111528Sjwadams break; 41121528Sjwadams if (alloc_sizes[idx] == 0) { 41131528Sjwadams idx = num; /* 0-terminated array */ 41141528Sjwadams break; 41151528Sjwadams } 41161528Sjwadams } 41171528Sjwadams if (idx == num) { 41181528Sjwadams mdb_warn( 41191528Sjwadams "cache %p's size (%d) not in umem_alloc_sizes\n", 41201528Sjwadams addr, c.cache_bufsize); 41211528Sjwadams return (DCMD_ERR); 41221528Sjwadams } 41231528Sjwadams 41241528Sjwadams minmalloc = (idx == 0)? 0 : alloc_sizes[idx - 1]; 41251528Sjwadams if (minmalloc > 0) { 41261528Sjwadams #ifdef _LP64 41271528Sjwadams if (minmalloc > UMEM_SECOND_ALIGN) 41281528Sjwadams minmalloc -= sizeof (struct malloc_data); 41291528Sjwadams #endif 41301528Sjwadams minmalloc -= sizeof (struct malloc_data); 41311528Sjwadams minmalloc += 1; 41321528Sjwadams } 41331528Sjwadams 41341528Sjwadams if (dump) { 41351528Sjwadams for (idx = minmalloc; idx <= maxmalloc; idx++) 41361528Sjwadams mdb_printf("%d\t%d\n", idx, mi.um_bucket[idx]); 41371528Sjwadams mdb_printf("\n"); 41381528Sjwadams } else { 41391528Sjwadams umem_malloc_print_dist(mi.um_bucket, minmalloc, maxmalloc, 41401528Sjwadams maxbuckets, minbucketsize, geometric); 41411528Sjwadams } 41421528Sjwadams 41431528Sjwadams return (DCMD_OK); 41441528Sjwadams } 4145