10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51528Sjwadams * Common Development and Distribution License (the "License"). 61528Sjwadams * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 22*6712Stomee * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate #include <mdb/mdb_param.h> 290Sstevel@tonic-gate #include <mdb/mdb_modapi.h> 300Sstevel@tonic-gate #include <mdb/mdb_ctf.h> 310Sstevel@tonic-gate #include <sys/cpuvar.h> 320Sstevel@tonic-gate #include <sys/kmem_impl.h> 330Sstevel@tonic-gate #include <sys/vmem_impl.h> 340Sstevel@tonic-gate #include <sys/machelf.h> 350Sstevel@tonic-gate #include <sys/modctl.h> 360Sstevel@tonic-gate #include <sys/kobj.h> 370Sstevel@tonic-gate #include <sys/panic.h> 380Sstevel@tonic-gate #include <sys/stack.h> 390Sstevel@tonic-gate #include <sys/sysmacros.h> 400Sstevel@tonic-gate #include <vm/page.h> 410Sstevel@tonic-gate 42*6712Stomee #include "avl.h" 43*6712Stomee #include "combined.h" 444798Stomee #include "dist.h" 450Sstevel@tonic-gate #include "kmem.h" 461528Sjwadams #include "leaky.h" 47*6712Stomee #include "list.h" 480Sstevel@tonic-gate 490Sstevel@tonic-gate #define dprintf(x) if (mdb_debug_level) { \ 500Sstevel@tonic-gate mdb_printf("kmem debug: "); \ 510Sstevel@tonic-gate /*CSTYLED*/\ 520Sstevel@tonic-gate mdb_printf x ;\ 530Sstevel@tonic-gate } 540Sstevel@tonic-gate 550Sstevel@tonic-gate #define KM_ALLOCATED 0x01 560Sstevel@tonic-gate #define KM_FREE 0x02 570Sstevel@tonic-gate #define KM_BUFCTL 0x04 580Sstevel@tonic-gate #define KM_CONSTRUCTED 0x08 /* only constructed free buffers */ 590Sstevel@tonic-gate #define KM_HASH 0x10 600Sstevel@tonic-gate 610Sstevel@tonic-gate static int mdb_debug_level = 0; 620Sstevel@tonic-gate 630Sstevel@tonic-gate /*ARGSUSED*/ 640Sstevel@tonic-gate static int 650Sstevel@tonic-gate kmem_init_walkers(uintptr_t addr, const kmem_cache_t *c, void *ignored) 660Sstevel@tonic-gate { 670Sstevel@tonic-gate mdb_walker_t w; 680Sstevel@tonic-gate char descr[64]; 690Sstevel@tonic-gate 700Sstevel@tonic-gate (void) mdb_snprintf(descr, sizeof (descr), 710Sstevel@tonic-gate "walk the %s cache", c->cache_name); 720Sstevel@tonic-gate 730Sstevel@tonic-gate w.walk_name = c->cache_name; 740Sstevel@tonic-gate w.walk_descr = descr; 750Sstevel@tonic-gate w.walk_init = kmem_walk_init; 760Sstevel@tonic-gate w.walk_step = kmem_walk_step; 770Sstevel@tonic-gate w.walk_fini = kmem_walk_fini; 780Sstevel@tonic-gate w.walk_init_arg = (void *)addr; 790Sstevel@tonic-gate 800Sstevel@tonic-gate if (mdb_add_walker(&w) == -1) 810Sstevel@tonic-gate mdb_warn("failed to add %s walker", c->cache_name); 820Sstevel@tonic-gate 830Sstevel@tonic-gate return (WALK_NEXT); 840Sstevel@tonic-gate } 850Sstevel@tonic-gate 860Sstevel@tonic-gate /*ARGSUSED*/ 870Sstevel@tonic-gate int 880Sstevel@tonic-gate kmem_debug(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 890Sstevel@tonic-gate { 900Sstevel@tonic-gate mdb_debug_level ^= 1; 910Sstevel@tonic-gate 920Sstevel@tonic-gate mdb_printf("kmem: debugging is now %s\n", 930Sstevel@tonic-gate mdb_debug_level ? "on" : "off"); 940Sstevel@tonic-gate 950Sstevel@tonic-gate return (DCMD_OK); 960Sstevel@tonic-gate } 970Sstevel@tonic-gate 980Sstevel@tonic-gate int 990Sstevel@tonic-gate kmem_cache_walk_init(mdb_walk_state_t *wsp) 1000Sstevel@tonic-gate { 1010Sstevel@tonic-gate GElf_Sym sym; 1020Sstevel@tonic-gate 103*6712Stomee if (mdb_lookup_by_name("kmem_caches", &sym) == -1) { 104*6712Stomee mdb_warn("couldn't find kmem_caches"); 1050Sstevel@tonic-gate return (WALK_ERR); 1060Sstevel@tonic-gate } 1070Sstevel@tonic-gate 108*6712Stomee wsp->walk_addr = (uintptr_t)sym.st_value; 109*6712Stomee 110*6712Stomee return (list_walk_init_named(wsp, "cache list", "cache")); 1110Sstevel@tonic-gate } 1120Sstevel@tonic-gate 1130Sstevel@tonic-gate int 1140Sstevel@tonic-gate kmem_cpu_cache_walk_init(mdb_walk_state_t *wsp) 1150Sstevel@tonic-gate { 1160Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 1170Sstevel@tonic-gate mdb_warn("kmem_cpu_cache doesn't support global walks"); 1180Sstevel@tonic-gate return (WALK_ERR); 1190Sstevel@tonic-gate } 1200Sstevel@tonic-gate 1210Sstevel@tonic-gate if (mdb_layered_walk("cpu", wsp) == -1) { 1220Sstevel@tonic-gate mdb_warn("couldn't walk 'cpu'"); 1230Sstevel@tonic-gate return (WALK_ERR); 1240Sstevel@tonic-gate } 1250Sstevel@tonic-gate 1260Sstevel@tonic-gate wsp->walk_data = (void *)wsp->walk_addr; 1270Sstevel@tonic-gate 1280Sstevel@tonic-gate return (WALK_NEXT); 1290Sstevel@tonic-gate } 1300Sstevel@tonic-gate 1310Sstevel@tonic-gate int 1320Sstevel@tonic-gate kmem_cpu_cache_walk_step(mdb_walk_state_t *wsp) 1330Sstevel@tonic-gate { 1340Sstevel@tonic-gate uintptr_t caddr = (uintptr_t)wsp->walk_data; 1350Sstevel@tonic-gate const cpu_t *cpu = wsp->walk_layer; 1360Sstevel@tonic-gate kmem_cpu_cache_t cc; 1370Sstevel@tonic-gate 1380Sstevel@tonic-gate caddr += cpu->cpu_cache_offset; 1390Sstevel@tonic-gate 1400Sstevel@tonic-gate if (mdb_vread(&cc, sizeof (kmem_cpu_cache_t), caddr) == -1) { 1410Sstevel@tonic-gate mdb_warn("couldn't read kmem_cpu_cache at %p", caddr); 1420Sstevel@tonic-gate return (WALK_ERR); 1430Sstevel@tonic-gate } 1440Sstevel@tonic-gate 1450Sstevel@tonic-gate return (wsp->walk_callback(caddr, &cc, wsp->walk_cbdata)); 1460Sstevel@tonic-gate } 1470Sstevel@tonic-gate 148*6712Stomee static int 149*6712Stomee kmem_slab_check(void *p, uintptr_t saddr, void *arg) 150*6712Stomee { 151*6712Stomee kmem_slab_t *sp = p; 152*6712Stomee uintptr_t caddr = (uintptr_t)arg; 153*6712Stomee if ((uintptr_t)sp->slab_cache != caddr) { 154*6712Stomee mdb_warn("slab %p isn't in cache %p (in cache %p)\n", 155*6712Stomee saddr, caddr, sp->slab_cache); 156*6712Stomee return (-1); 157*6712Stomee } 158*6712Stomee 159*6712Stomee return (0); 160*6712Stomee } 161*6712Stomee 162*6712Stomee static int 163*6712Stomee kmem_partial_slab_check(void *p, uintptr_t saddr, void *arg) 164*6712Stomee { 165*6712Stomee kmem_slab_t *sp = p; 166*6712Stomee 167*6712Stomee int rc = kmem_slab_check(p, saddr, arg); 168*6712Stomee if (rc != 0) { 169*6712Stomee return (rc); 170*6712Stomee } 171*6712Stomee 172*6712Stomee if (!KMEM_SLAB_IS_PARTIAL(sp)) { 173*6712Stomee mdb_warn("slab %p is not a partial slab\n", saddr); 174*6712Stomee return (-1); 175*6712Stomee } 176*6712Stomee 177*6712Stomee return (0); 178*6712Stomee } 179*6712Stomee 180*6712Stomee static int 181*6712Stomee kmem_complete_slab_check(void *p, uintptr_t saddr, void *arg) 182*6712Stomee { 183*6712Stomee kmem_slab_t *sp = p; 184*6712Stomee 185*6712Stomee int rc = kmem_slab_check(p, saddr, arg); 186*6712Stomee if (rc != 0) { 187*6712Stomee return (rc); 188*6712Stomee } 189*6712Stomee 190*6712Stomee if (!KMEM_SLAB_IS_ALL_USED(sp)) { 191*6712Stomee mdb_warn("slab %p is not completely allocated\n", saddr); 192*6712Stomee return (-1); 193*6712Stomee } 194*6712Stomee 195*6712Stomee return (0); 196*6712Stomee } 197*6712Stomee 198*6712Stomee typedef struct { 199*6712Stomee uintptr_t kns_cache_addr; 200*6712Stomee int kns_nslabs; 201*6712Stomee } kmem_nth_slab_t; 202*6712Stomee 203*6712Stomee static int 204*6712Stomee kmem_nth_slab_check(void *p, uintptr_t saddr, void *arg) 205*6712Stomee { 206*6712Stomee kmem_nth_slab_t *chkp = arg; 207*6712Stomee 208*6712Stomee int rc = kmem_slab_check(p, saddr, (void *)chkp->kns_cache_addr); 209*6712Stomee if (rc != 0) { 210*6712Stomee return (rc); 211*6712Stomee } 212*6712Stomee 213*6712Stomee return (chkp->kns_nslabs-- == 0 ? 1 : 0); 214*6712Stomee } 215*6712Stomee 216*6712Stomee static int 217*6712Stomee kmem_complete_slab_walk_init(mdb_walk_state_t *wsp) 218*6712Stomee { 219*6712Stomee uintptr_t caddr = wsp->walk_addr; 220*6712Stomee 221*6712Stomee wsp->walk_addr = (uintptr_t)(caddr + 222*6712Stomee offsetof(kmem_cache_t, cache_complete_slabs)); 223*6712Stomee 224*6712Stomee return (list_walk_init_checked(wsp, "slab list", "slab", 225*6712Stomee kmem_complete_slab_check, (void *)caddr)); 226*6712Stomee } 227*6712Stomee 228*6712Stomee static int 229*6712Stomee kmem_partial_slab_walk_init(mdb_walk_state_t *wsp) 230*6712Stomee { 231*6712Stomee uintptr_t caddr = wsp->walk_addr; 232*6712Stomee 233*6712Stomee wsp->walk_addr = (uintptr_t)(caddr + 234*6712Stomee offsetof(kmem_cache_t, cache_partial_slabs)); 235*6712Stomee 236*6712Stomee return (avl_walk_init_checked(wsp, "slab list", "slab", 237*6712Stomee kmem_partial_slab_check, (void *)caddr)); 238*6712Stomee } 239*6712Stomee 2400Sstevel@tonic-gate int 2410Sstevel@tonic-gate kmem_slab_walk_init(mdb_walk_state_t *wsp) 2420Sstevel@tonic-gate { 2430Sstevel@tonic-gate uintptr_t caddr = wsp->walk_addr; 2440Sstevel@tonic-gate 2450Sstevel@tonic-gate if (caddr == NULL) { 2460Sstevel@tonic-gate mdb_warn("kmem_slab doesn't support global walks\n"); 2470Sstevel@tonic-gate return (WALK_ERR); 2480Sstevel@tonic-gate } 2490Sstevel@tonic-gate 250*6712Stomee combined_walk_init(wsp); 251*6712Stomee combined_walk_add(wsp, 252*6712Stomee kmem_complete_slab_walk_init, list_walk_step, list_walk_fini); 253*6712Stomee combined_walk_add(wsp, 254*6712Stomee kmem_partial_slab_walk_init, avl_walk_step, avl_walk_fini); 2550Sstevel@tonic-gate 2560Sstevel@tonic-gate return (WALK_NEXT); 2570Sstevel@tonic-gate } 2580Sstevel@tonic-gate 259*6712Stomee static int 260*6712Stomee kmem_first_complete_slab_walk_init(mdb_walk_state_t *wsp) 261*6712Stomee { 262*6712Stomee uintptr_t caddr = wsp->walk_addr; 263*6712Stomee kmem_nth_slab_t *chk; 264*6712Stomee 265*6712Stomee chk = mdb_alloc(sizeof (kmem_nth_slab_t), 266*6712Stomee UM_SLEEP | UM_GC); 267*6712Stomee chk->kns_cache_addr = caddr; 268*6712Stomee chk->kns_nslabs = 1; 269*6712Stomee wsp->walk_addr = (uintptr_t)(caddr + 270*6712Stomee offsetof(kmem_cache_t, cache_complete_slabs)); 271*6712Stomee 272*6712Stomee return (list_walk_init_checked(wsp, "slab list", "slab", 273*6712Stomee kmem_nth_slab_check, chk)); 274*6712Stomee } 275*6712Stomee 2760Sstevel@tonic-gate int 2770Sstevel@tonic-gate kmem_slab_walk_partial_init(mdb_walk_state_t *wsp) 2780Sstevel@tonic-gate { 2790Sstevel@tonic-gate uintptr_t caddr = wsp->walk_addr; 2800Sstevel@tonic-gate kmem_cache_t c; 2810Sstevel@tonic-gate 2820Sstevel@tonic-gate if (caddr == NULL) { 2830Sstevel@tonic-gate mdb_warn("kmem_slab_partial doesn't support global walks\n"); 2840Sstevel@tonic-gate return (WALK_ERR); 2850Sstevel@tonic-gate } 2860Sstevel@tonic-gate 2870Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), caddr) == -1) { 2880Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache at %p", caddr); 2890Sstevel@tonic-gate return (WALK_ERR); 2900Sstevel@tonic-gate } 2910Sstevel@tonic-gate 292*6712Stomee combined_walk_init(wsp); 2930Sstevel@tonic-gate 2940Sstevel@tonic-gate /* 2950Sstevel@tonic-gate * Some consumers (umem_walk_step(), in particular) require at 2960Sstevel@tonic-gate * least one callback if there are any buffers in the cache. So 297*6712Stomee * if there are *no* partial slabs, report the first full slab, if 2980Sstevel@tonic-gate * any. 2990Sstevel@tonic-gate * 3000Sstevel@tonic-gate * Yes, this is ugly, but it's cleaner than the other possibilities. 3010Sstevel@tonic-gate */ 302*6712Stomee if (c.cache_partial_slabs.avl_numnodes == 0) { 303*6712Stomee combined_walk_add(wsp, kmem_first_complete_slab_walk_init, 304*6712Stomee list_walk_step, list_walk_fini); 305*6712Stomee } else { 306*6712Stomee combined_walk_add(wsp, kmem_partial_slab_walk_init, 307*6712Stomee avl_walk_step, avl_walk_fini); 308*6712Stomee } 3090Sstevel@tonic-gate 3100Sstevel@tonic-gate return (WALK_NEXT); 3110Sstevel@tonic-gate } 3120Sstevel@tonic-gate 3130Sstevel@tonic-gate int 3140Sstevel@tonic-gate kmem_cache(uintptr_t addr, uint_t flags, int ac, const mdb_arg_t *argv) 3150Sstevel@tonic-gate { 3160Sstevel@tonic-gate kmem_cache_t c; 317*6712Stomee const char *filter = NULL; 318*6712Stomee 319*6712Stomee if (mdb_getopts(ac, argv, 320*6712Stomee 'n', MDB_OPT_STR, &filter, 321*6712Stomee NULL) != ac) { 322*6712Stomee return (DCMD_USAGE); 323*6712Stomee } 3240Sstevel@tonic-gate 3250Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) { 3260Sstevel@tonic-gate if (mdb_walk_dcmd("kmem_cache", "kmem_cache", ac, argv) == -1) { 3270Sstevel@tonic-gate mdb_warn("can't walk kmem_cache"); 3280Sstevel@tonic-gate return (DCMD_ERR); 3290Sstevel@tonic-gate } 3300Sstevel@tonic-gate return (DCMD_OK); 3310Sstevel@tonic-gate } 3320Sstevel@tonic-gate 3330Sstevel@tonic-gate if (DCMD_HDRSPEC(flags)) 3340Sstevel@tonic-gate mdb_printf("%-?s %-25s %4s %6s %8s %8s\n", "ADDR", "NAME", 3350Sstevel@tonic-gate "FLAG", "CFLAG", "BUFSIZE", "BUFTOTL"); 3360Sstevel@tonic-gate 3370Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), addr) == -1) { 3380Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache at %p", addr); 3390Sstevel@tonic-gate return (DCMD_ERR); 3400Sstevel@tonic-gate } 3410Sstevel@tonic-gate 342*6712Stomee if ((filter != NULL) && (strstr(c.cache_name, filter) == NULL)) 343*6712Stomee return (DCMD_OK); 344*6712Stomee 3450Sstevel@tonic-gate mdb_printf("%0?p %-25s %04x %06x %8ld %8lld\n", addr, c.cache_name, 3460Sstevel@tonic-gate c.cache_flags, c.cache_cflags, c.cache_bufsize, c.cache_buftotal); 3470Sstevel@tonic-gate 3480Sstevel@tonic-gate return (DCMD_OK); 3490Sstevel@tonic-gate } 3500Sstevel@tonic-gate 351*6712Stomee void 352*6712Stomee kmem_cache_help(void) 353*6712Stomee { 354*6712Stomee mdb_printf("%s", "Print kernel memory caches.\n\n"); 355*6712Stomee mdb_dec_indent(2); 356*6712Stomee mdb_printf("%<b>OPTIONS%</b>\n"); 357*6712Stomee mdb_inc_indent(2); 358*6712Stomee mdb_printf("%s", 359*6712Stomee " -n name\n" 360*6712Stomee " name of kmem cache (or matching partial name)\n" 361*6712Stomee "\n" 362*6712Stomee "Column\tDescription\n" 363*6712Stomee "\n" 364*6712Stomee "ADDR\t\taddress of kmem cache\n" 365*6712Stomee "NAME\t\tname of kmem cache\n" 366*6712Stomee "FLAG\t\tvarious cache state flags\n" 367*6712Stomee "CFLAG\t\tcache creation flags\n" 368*6712Stomee "BUFSIZE\tobject size in bytes\n" 369*6712Stomee "BUFTOTL\tcurrent total buffers in cache (allocated and free)\n"); 370*6712Stomee } 3714688Stomee 3724688Stomee #define LABEL_WIDTH 11 3734688Stomee static void 3744688Stomee kmem_slabs_print_dist(uint_t *ks_bucket, size_t buffers_per_slab, 3754688Stomee size_t maxbuckets, size_t minbucketsize) 3764688Stomee { 3774688Stomee uint64_t total; 3784688Stomee int buckets; 3794688Stomee int i; 3804688Stomee const int *distarray; 3814688Stomee int complete[2]; 3824688Stomee 3834688Stomee buckets = buffers_per_slab; 3844688Stomee 3854688Stomee total = 0; 3864688Stomee for (i = 0; i <= buffers_per_slab; i++) 3874688Stomee total += ks_bucket[i]; 3884688Stomee 3894688Stomee if (maxbuckets > 1) 3904688Stomee buckets = MIN(buckets, maxbuckets); 3914688Stomee 3924688Stomee if (minbucketsize > 1) { 3934688Stomee /* 3944688Stomee * minbucketsize does not apply to the first bucket reserved 3954688Stomee * for completely allocated slabs 3964688Stomee */ 3974688Stomee buckets = MIN(buckets, 1 + ((buffers_per_slab - 1) / 3984688Stomee minbucketsize)); 3994688Stomee if ((buckets < 2) && (buffers_per_slab > 1)) { 4004688Stomee buckets = 2; 4014688Stomee minbucketsize = (buffers_per_slab - 1); 4024688Stomee } 4034688Stomee } 4044688Stomee 4054688Stomee /* 4064688Stomee * The first printed bucket is reserved for completely allocated slabs. 4074688Stomee * Passing (buckets - 1) excludes that bucket from the generated 4084688Stomee * distribution, since we're handling it as a special case. 4094688Stomee */ 4104688Stomee complete[0] = buffers_per_slab; 4114688Stomee complete[1] = buffers_per_slab + 1; 4124798Stomee distarray = dist_linear(buckets - 1, 1, buffers_per_slab - 1); 4134688Stomee 4144688Stomee mdb_printf("%*s\n", LABEL_WIDTH, "Allocated"); 4154798Stomee dist_print_header("Buffers", LABEL_WIDTH, "Slabs"); 4164798Stomee 4174798Stomee dist_print_bucket(complete, 0, ks_bucket, total, LABEL_WIDTH); 4184688Stomee /* 4194688Stomee * Print bucket ranges in descending order after the first bucket for 4204688Stomee * completely allocated slabs, so a person can see immediately whether 4214688Stomee * or not there is fragmentation without having to scan possibly 4224688Stomee * multiple screens of output. Starting at (buckets - 2) excludes the 4234688Stomee * extra terminating bucket. 4244688Stomee */ 4254688Stomee for (i = buckets - 2; i >= 0; i--) { 4264798Stomee dist_print_bucket(distarray, i, ks_bucket, total, LABEL_WIDTH); 4274688Stomee } 4284688Stomee mdb_printf("\n"); 4294688Stomee } 4304688Stomee #undef LABEL_WIDTH 4314688Stomee 4324688Stomee /*ARGSUSED*/ 4334688Stomee static int 4344688Stomee kmem_first_slab(uintptr_t addr, const kmem_slab_t *sp, boolean_t *is_slab) 4354688Stomee { 4364688Stomee *is_slab = B_TRUE; 4374688Stomee return (WALK_DONE); 4384688Stomee } 4394688Stomee 4404688Stomee /*ARGSUSED*/ 4414688Stomee static int 4424688Stomee kmem_first_partial_slab(uintptr_t addr, const kmem_slab_t *sp, 4434688Stomee boolean_t *is_slab) 4444688Stomee { 4454688Stomee /* 446*6712Stomee * The "kmem_partial_slab" walker reports the first full slab if there 4474688Stomee * are no partial slabs (for the sake of consumers that require at least 4484688Stomee * one callback if there are any buffers in the cache). 4494688Stomee */ 450*6712Stomee *is_slab = KMEM_SLAB_IS_PARTIAL(sp); 4514688Stomee return (WALK_DONE); 4524688Stomee } 4534688Stomee 454*6712Stomee typedef struct kmem_slab_usage { 455*6712Stomee int ksu_refcnt; /* count of allocated buffers on slab */ 456*6712Stomee boolean_t ksu_nomove; /* slab marked non-reclaimable */ 457*6712Stomee } kmem_slab_usage_t; 458*6712Stomee 459*6712Stomee typedef struct kmem_slab_stats { 460*6712Stomee const kmem_cache_t *ks_cp; 461*6712Stomee int ks_slabs; /* slabs in cache */ 462*6712Stomee int ks_partial_slabs; /* partially allocated slabs in cache */ 463*6712Stomee uint64_t ks_unused_buffers; /* total unused buffers in cache */ 464*6712Stomee int ks_max_buffers_per_slab; /* max buffers per slab */ 465*6712Stomee int ks_usage_len; /* ks_usage array length */ 466*6712Stomee kmem_slab_usage_t *ks_usage; /* partial slab usage */ 467*6712Stomee uint_t *ks_bucket; /* slab usage distribution */ 468*6712Stomee } kmem_slab_stats_t; 469*6712Stomee 4704688Stomee /*ARGSUSED*/ 4714688Stomee static int 4724688Stomee kmem_slablist_stat(uintptr_t addr, const kmem_slab_t *sp, 4734688Stomee kmem_slab_stats_t *ks) 4744688Stomee { 4754688Stomee kmem_slab_usage_t *ksu; 4764688Stomee long unused; 4774688Stomee 4784688Stomee ks->ks_slabs++; 4794688Stomee ks->ks_bucket[sp->slab_refcnt]++; 4804688Stomee 4814688Stomee unused = (sp->slab_chunks - sp->slab_refcnt); 4824688Stomee if (unused == 0) { 4834688Stomee return (WALK_NEXT); 4844688Stomee } 4854688Stomee 4864688Stomee ks->ks_partial_slabs++; 4874688Stomee ks->ks_unused_buffers += unused; 4884688Stomee 4894688Stomee if (ks->ks_partial_slabs > ks->ks_usage_len) { 4904688Stomee kmem_slab_usage_t *usage; 4914688Stomee int len = ks->ks_usage_len; 4924688Stomee 4934688Stomee len = (len == 0 ? 16 : len * 2); 4944688Stomee usage = mdb_zalloc(len * sizeof (kmem_slab_usage_t), UM_SLEEP); 4954688Stomee if (ks->ks_usage != NULL) { 4964688Stomee bcopy(ks->ks_usage, usage, 4974688Stomee ks->ks_usage_len * sizeof (kmem_slab_usage_t)); 4984688Stomee mdb_free(ks->ks_usage, 4994688Stomee ks->ks_usage_len * sizeof (kmem_slab_usage_t)); 5004688Stomee } 5014688Stomee ks->ks_usage = usage; 5024688Stomee ks->ks_usage_len = len; 5034688Stomee } 5044688Stomee 5054688Stomee ksu = &ks->ks_usage[ks->ks_partial_slabs - 1]; 5064688Stomee ksu->ksu_refcnt = sp->slab_refcnt; 507*6712Stomee ksu->ksu_nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE); 5084688Stomee return (WALK_NEXT); 5094688Stomee } 5104688Stomee 5114688Stomee static void 5124688Stomee kmem_slabs_header() 5134688Stomee { 5144688Stomee mdb_printf("%-25s %8s %8s %9s %9s %6s\n", 5154688Stomee "", "", "Partial", "", "Unused", ""); 5164688Stomee mdb_printf("%-25s %8s %8s %9s %9s %6s\n", 5174688Stomee "Cache Name", "Slabs", "Slabs", "Buffers", "Buffers", "Waste"); 5184688Stomee mdb_printf("%-25s %8s %8s %9s %9s %6s\n", 5194688Stomee "-------------------------", "--------", "--------", "---------", 5204688Stomee "---------", "------"); 5214688Stomee } 5224688Stomee 5234688Stomee int 5244688Stomee kmem_slabs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 5254688Stomee { 5264688Stomee kmem_cache_t c; 5274688Stomee kmem_slab_stats_t stats; 5284688Stomee mdb_walk_cb_t cb; 5294688Stomee int pct; 5304688Stomee int tenths_pct; 5314688Stomee size_t maxbuckets = 1; 5324688Stomee size_t minbucketsize = 0; 5334688Stomee const char *filter = NULL; 534*6712Stomee const char *name = NULL; 5354688Stomee uint_t opt_v = FALSE; 536*6712Stomee boolean_t buckets = B_FALSE; 5374688Stomee boolean_t skip = B_FALSE; 5384688Stomee 5394688Stomee if (mdb_getopts(argc, argv, 5404688Stomee 'B', MDB_OPT_UINTPTR, &minbucketsize, 5414688Stomee 'b', MDB_OPT_UINTPTR, &maxbuckets, 5424688Stomee 'n', MDB_OPT_STR, &filter, 543*6712Stomee 'N', MDB_OPT_STR, &name, 5444688Stomee 'v', MDB_OPT_SETBITS, TRUE, &opt_v, 5454688Stomee NULL) != argc) { 5464688Stomee return (DCMD_USAGE); 5474688Stomee } 5484688Stomee 549*6712Stomee if ((maxbuckets != 1) || (minbucketsize != 0)) { 550*6712Stomee buckets = B_TRUE; 5514688Stomee } 5524688Stomee 5534688Stomee if (!(flags & DCMD_ADDRSPEC)) { 5544688Stomee if (mdb_walk_dcmd("kmem_cache", "kmem_slabs", argc, 5554688Stomee argv) == -1) { 5564688Stomee mdb_warn("can't walk kmem_cache"); 5574688Stomee return (DCMD_ERR); 5584688Stomee } 5594688Stomee return (DCMD_OK); 5604688Stomee } 5614688Stomee 5624688Stomee if (mdb_vread(&c, sizeof (c), addr) == -1) { 5634688Stomee mdb_warn("couldn't read kmem_cache at %p", addr); 5644688Stomee return (DCMD_ERR); 5654688Stomee } 5664688Stomee 567*6712Stomee if (name == NULL) { 568*6712Stomee skip = ((filter != NULL) && 569*6712Stomee (strstr(c.cache_name, filter) == NULL)); 570*6712Stomee } else if (filter == NULL) { 571*6712Stomee skip = (strcmp(c.cache_name, name) != 0); 572*6712Stomee } else { 573*6712Stomee /* match either -n or -N */ 574*6712Stomee skip = ((strcmp(c.cache_name, name) != 0) && 575*6712Stomee (strstr(c.cache_name, filter) == NULL)); 5764688Stomee } 5774688Stomee 578*6712Stomee if (!(opt_v || buckets) && DCMD_HDRSPEC(flags)) { 5794688Stomee kmem_slabs_header(); 580*6712Stomee } else if ((opt_v || buckets) && !skip) { 5814688Stomee if (DCMD_HDRSPEC(flags)) { 5824688Stomee kmem_slabs_header(); 5834688Stomee } else { 5844688Stomee boolean_t is_slab = B_FALSE; 5854688Stomee const char *walker_name; 5864688Stomee if (opt_v) { 5874688Stomee cb = (mdb_walk_cb_t)kmem_first_partial_slab; 5884688Stomee walker_name = "kmem_slab_partial"; 5894688Stomee } else { 5904688Stomee cb = (mdb_walk_cb_t)kmem_first_slab; 5914688Stomee walker_name = "kmem_slab"; 5924688Stomee } 5934688Stomee (void) mdb_pwalk(walker_name, cb, &is_slab, addr); 5944688Stomee if (is_slab) { 5954688Stomee kmem_slabs_header(); 5964688Stomee } 5974688Stomee } 5984688Stomee } 5994688Stomee 6004688Stomee if (skip) { 6014688Stomee return (DCMD_OK); 6024688Stomee } 6034688Stomee 6044688Stomee bzero(&stats, sizeof (kmem_slab_stats_t)); 605*6712Stomee stats.ks_cp = &c; 606*6712Stomee stats.ks_max_buffers_per_slab = c.cache_maxchunks; 607*6712Stomee /* +1 to include a zero bucket */ 608*6712Stomee stats.ks_bucket = mdb_zalloc((stats.ks_max_buffers_per_slab + 1) * 609*6712Stomee sizeof (*stats.ks_bucket), UM_SLEEP); 6104688Stomee cb = (mdb_walk_cb_t)kmem_slablist_stat; 6114688Stomee (void) mdb_pwalk("kmem_slab", cb, &stats, addr); 6124688Stomee 6134688Stomee if (c.cache_buftotal == 0) { 6144688Stomee pct = 0; 6154688Stomee tenths_pct = 0; 6164688Stomee } else { 6174688Stomee uint64_t n = stats.ks_unused_buffers * 10000; 6184688Stomee pct = (int)(n / c.cache_buftotal); 6194688Stomee tenths_pct = pct - ((pct / 100) * 100); 6204688Stomee tenths_pct = (tenths_pct + 5) / 10; /* round nearest tenth */ 6214688Stomee if (tenths_pct == 10) { 6224688Stomee pct += 100; 6234688Stomee tenths_pct = 0; 6244688Stomee } 6254688Stomee } 6264688Stomee 6274688Stomee pct /= 100; 6284688Stomee mdb_printf("%-25s %8d %8d %9lld %9lld %3d.%1d%%\n", c.cache_name, 6294688Stomee stats.ks_slabs, stats.ks_partial_slabs, c.cache_buftotal, 6304688Stomee stats.ks_unused_buffers, pct, tenths_pct); 6314688Stomee 6324688Stomee if (maxbuckets == 0) { 633*6712Stomee maxbuckets = stats.ks_max_buffers_per_slab; 6344688Stomee } 6354688Stomee 6364688Stomee if (((maxbuckets > 1) || (minbucketsize > 0)) && 6374688Stomee (stats.ks_slabs > 0)) { 6384688Stomee mdb_printf("\n"); 6394688Stomee kmem_slabs_print_dist(stats.ks_bucket, 640*6712Stomee stats.ks_max_buffers_per_slab, maxbuckets, minbucketsize); 641*6712Stomee } 642*6712Stomee 643*6712Stomee mdb_free(stats.ks_bucket, (stats.ks_max_buffers_per_slab + 1) * 644*6712Stomee sizeof (*stats.ks_bucket)); 645*6712Stomee 646*6712Stomee if (!opt_v) { 647*6712Stomee return (DCMD_OK); 6484688Stomee } 6494688Stomee 6504688Stomee if (opt_v && (stats.ks_partial_slabs > 0)) { 6514688Stomee int i; 6524688Stomee kmem_slab_usage_t *ksu; 6534688Stomee 6544688Stomee mdb_printf(" %d complete, %d partial", 6554688Stomee (stats.ks_slabs - stats.ks_partial_slabs), 6564688Stomee stats.ks_partial_slabs); 6574688Stomee if (stats.ks_partial_slabs > 0) { 658*6712Stomee mdb_printf(" (%d):", stats.ks_max_buffers_per_slab); 6594688Stomee } 6604688Stomee for (i = 0; i < stats.ks_partial_slabs; i++) { 6614688Stomee ksu = &stats.ks_usage[i]; 662*6712Stomee if (ksu->ksu_nomove) { 663*6712Stomee const char *symbol = "*"; 664*6712Stomee mdb_printf(" %d%s", ksu->ksu_refcnt, symbol); 665*6712Stomee } else { 666*6712Stomee mdb_printf(" %d", ksu->ksu_refcnt); 667*6712Stomee } 6684688Stomee } 6694688Stomee mdb_printf("\n\n"); 6704688Stomee } 6714688Stomee 6724688Stomee if (stats.ks_usage_len > 0) { 6734688Stomee mdb_free(stats.ks_usage, 6744688Stomee stats.ks_usage_len * sizeof (kmem_slab_usage_t)); 6754688Stomee } 6764688Stomee 6774688Stomee return (DCMD_OK); 6784688Stomee } 6794688Stomee 6804688Stomee void 6814688Stomee kmem_slabs_help(void) 6824688Stomee { 683*6712Stomee mdb_printf("%s", 684*6712Stomee "Display slab usage per kmem cache.\n\n"); 6854688Stomee mdb_dec_indent(2); 6864688Stomee mdb_printf("%<b>OPTIONS%</b>\n"); 6874688Stomee mdb_inc_indent(2); 6884688Stomee mdb_printf("%s", 6894688Stomee " -n name\n" 6904688Stomee " name of kmem cache (or matching partial name)\n" 691*6712Stomee " -N name\n" 692*6712Stomee " exact name of kmem cache\n" 6934688Stomee " -b maxbins\n" 6944688Stomee " Print a distribution of allocated buffers per slab using at\n" 6954688Stomee " most maxbins bins. The first bin is reserved for completely\n" 6964688Stomee " allocated slabs. Setting maxbins to zero (-b 0) has the same\n" 6974688Stomee " effect as specifying the maximum allocated buffers per slab\n" 6984688Stomee " or setting minbinsize to 1 (-B 1).\n" 6994688Stomee " -B minbinsize\n" 7004688Stomee " Print a distribution of allocated buffers per slab, making\n" 7014688Stomee " all bins (except the first, reserved for completely allocated\n" 7024688Stomee " slabs) at least minbinsize buffers apart.\n" 7034688Stomee " -v verbose output: List the allocated buffer count of each partial\n" 7044688Stomee " slab on the free list in order from front to back to show how\n" 7054688Stomee " closely the slabs are ordered by usage. For example\n" 7064688Stomee "\n" 7074688Stomee " 10 complete, 3 partial (8): 7 3 1\n" 7084688Stomee "\n" 7094688Stomee " means there are thirteen slabs with eight buffers each, including\n" 7104688Stomee " three partially allocated slabs with less than all eight buffers\n" 7114688Stomee " allocated.\n" 7124688Stomee "\n" 7134688Stomee " Buffer allocations are always from the front of the partial slab\n" 7144688Stomee " list. When a buffer is freed from a completely used slab, that\n" 7154688Stomee " slab is added to the front of the partial slab list. Assuming\n" 7164688Stomee " that all buffers are equally likely to be freed soon, the\n" 7174688Stomee " desired order of partial slabs is most-used at the front of the\n" 7184688Stomee " list and least-used at the back (as in the example above).\n" 7194688Stomee " However, if a slab contains an allocated buffer that will not\n" 7204688Stomee " soon be freed, it would be better for that slab to be at the\n" 721*6712Stomee " front where all of its buffers can be allocated. Taking a slab\n" 722*6712Stomee " off the partial slab list (either with all buffers freed or all\n" 723*6712Stomee " buffers allocated) reduces cache fragmentation.\n" 724*6712Stomee "\n" 725*6712Stomee " A slab's allocated buffer count representing a partial slab (9 in\n" 726*6712Stomee " the example below) may be marked as follows:\n" 727*6712Stomee "\n" 728*6712Stomee " 9* An asterisk indicates that kmem has marked the slab non-\n" 729*6712Stomee " reclaimable because the kmem client refused to move one of the\n" 730*6712Stomee " slab's buffers. Since kmem does not expect to completely free the\n" 731*6712Stomee " slab, it moves it to the front of the list in the hope of\n" 732*6712Stomee " completely allocating it instead. A slab marked with an asterisk\n" 733*6712Stomee " stays marked for as long as it remains on the partial slab list.\n" 7344688Stomee "\n" 7354688Stomee "Column\t\tDescription\n" 7364688Stomee "\n" 7374688Stomee "Cache Name\t\tname of kmem cache\n" 7384688Stomee "Slabs\t\t\ttotal slab count\n" 7394688Stomee "Partial Slabs\t\tcount of partially allocated slabs on the free list\n" 7404688Stomee "Buffers\t\ttotal buffer count (Slabs * (buffers per slab))\n" 7414688Stomee "Unused Buffers\tcount of unallocated buffers across all partial slabs\n" 7424688Stomee "Waste\t\t\t(Unused Buffers / Buffers) does not include space\n" 7434688Stomee "\t\t\t for accounting structures (debug mode), slab\n" 7444688Stomee "\t\t\t coloring (incremental small offsets to stagger\n" 7454688Stomee "\t\t\t buffer alignment), or the per-CPU magazine layer\n"); 7464688Stomee } 7474688Stomee 7480Sstevel@tonic-gate static int 7490Sstevel@tonic-gate addrcmp(const void *lhs, const void *rhs) 7500Sstevel@tonic-gate { 7510Sstevel@tonic-gate uintptr_t p1 = *((uintptr_t *)lhs); 7520Sstevel@tonic-gate uintptr_t p2 = *((uintptr_t *)rhs); 7530Sstevel@tonic-gate 7540Sstevel@tonic-gate if (p1 < p2) 7550Sstevel@tonic-gate return (-1); 7560Sstevel@tonic-gate if (p1 > p2) 7570Sstevel@tonic-gate return (1); 7580Sstevel@tonic-gate return (0); 7590Sstevel@tonic-gate } 7600Sstevel@tonic-gate 7610Sstevel@tonic-gate static int 7620Sstevel@tonic-gate bufctlcmp(const kmem_bufctl_audit_t **lhs, const kmem_bufctl_audit_t **rhs) 7630Sstevel@tonic-gate { 7640Sstevel@tonic-gate const kmem_bufctl_audit_t *bcp1 = *lhs; 7650Sstevel@tonic-gate const kmem_bufctl_audit_t *bcp2 = *rhs; 7660Sstevel@tonic-gate 7670Sstevel@tonic-gate if (bcp1->bc_timestamp > bcp2->bc_timestamp) 7680Sstevel@tonic-gate return (-1); 7690Sstevel@tonic-gate 7700Sstevel@tonic-gate if (bcp1->bc_timestamp < bcp2->bc_timestamp) 7710Sstevel@tonic-gate return (1); 7720Sstevel@tonic-gate 7730Sstevel@tonic-gate return (0); 7740Sstevel@tonic-gate } 7750Sstevel@tonic-gate 7760Sstevel@tonic-gate typedef struct kmem_hash_walk { 7770Sstevel@tonic-gate uintptr_t *kmhw_table; 7780Sstevel@tonic-gate size_t kmhw_nelems; 7790Sstevel@tonic-gate size_t kmhw_pos; 7800Sstevel@tonic-gate kmem_bufctl_t kmhw_cur; 7810Sstevel@tonic-gate } kmem_hash_walk_t; 7820Sstevel@tonic-gate 7830Sstevel@tonic-gate int 7840Sstevel@tonic-gate kmem_hash_walk_init(mdb_walk_state_t *wsp) 7850Sstevel@tonic-gate { 7860Sstevel@tonic-gate kmem_hash_walk_t *kmhw; 7870Sstevel@tonic-gate uintptr_t *hash; 7880Sstevel@tonic-gate kmem_cache_t c; 7890Sstevel@tonic-gate uintptr_t haddr, addr = wsp->walk_addr; 7900Sstevel@tonic-gate size_t nelems; 7910Sstevel@tonic-gate size_t hsize; 7920Sstevel@tonic-gate 7930Sstevel@tonic-gate if (addr == NULL) { 7940Sstevel@tonic-gate mdb_warn("kmem_hash doesn't support global walks\n"); 7950Sstevel@tonic-gate return (WALK_ERR); 7960Sstevel@tonic-gate } 7970Sstevel@tonic-gate 7980Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), addr) == -1) { 7990Sstevel@tonic-gate mdb_warn("couldn't read cache at addr %p", addr); 8000Sstevel@tonic-gate return (WALK_ERR); 8010Sstevel@tonic-gate } 8020Sstevel@tonic-gate 8030Sstevel@tonic-gate if (!(c.cache_flags & KMF_HASH)) { 8040Sstevel@tonic-gate mdb_warn("cache %p doesn't have a hash table\n", addr); 8050Sstevel@tonic-gate return (WALK_DONE); /* nothing to do */ 8060Sstevel@tonic-gate } 8070Sstevel@tonic-gate 8080Sstevel@tonic-gate kmhw = mdb_zalloc(sizeof (kmem_hash_walk_t), UM_SLEEP); 8090Sstevel@tonic-gate kmhw->kmhw_cur.bc_next = NULL; 8100Sstevel@tonic-gate kmhw->kmhw_pos = 0; 8110Sstevel@tonic-gate 8120Sstevel@tonic-gate kmhw->kmhw_nelems = nelems = c.cache_hash_mask + 1; 8130Sstevel@tonic-gate hsize = nelems * sizeof (uintptr_t); 8140Sstevel@tonic-gate haddr = (uintptr_t)c.cache_hash_table; 8150Sstevel@tonic-gate 8160Sstevel@tonic-gate kmhw->kmhw_table = hash = mdb_alloc(hsize, UM_SLEEP); 8170Sstevel@tonic-gate if (mdb_vread(hash, hsize, haddr) == -1) { 8180Sstevel@tonic-gate mdb_warn("failed to read hash table at %p", haddr); 8190Sstevel@tonic-gate mdb_free(hash, hsize); 8200Sstevel@tonic-gate mdb_free(kmhw, sizeof (kmem_hash_walk_t)); 8210Sstevel@tonic-gate return (WALK_ERR); 8220Sstevel@tonic-gate } 8230Sstevel@tonic-gate 8240Sstevel@tonic-gate wsp->walk_data = kmhw; 8250Sstevel@tonic-gate 8260Sstevel@tonic-gate return (WALK_NEXT); 8270Sstevel@tonic-gate } 8280Sstevel@tonic-gate 8290Sstevel@tonic-gate int 8300Sstevel@tonic-gate kmem_hash_walk_step(mdb_walk_state_t *wsp) 8310Sstevel@tonic-gate { 8320Sstevel@tonic-gate kmem_hash_walk_t *kmhw = wsp->walk_data; 8330Sstevel@tonic-gate uintptr_t addr = NULL; 8340Sstevel@tonic-gate 8350Sstevel@tonic-gate if ((addr = (uintptr_t)kmhw->kmhw_cur.bc_next) == NULL) { 8360Sstevel@tonic-gate while (kmhw->kmhw_pos < kmhw->kmhw_nelems) { 8370Sstevel@tonic-gate if ((addr = kmhw->kmhw_table[kmhw->kmhw_pos++]) != NULL) 8380Sstevel@tonic-gate break; 8390Sstevel@tonic-gate } 8400Sstevel@tonic-gate } 8410Sstevel@tonic-gate if (addr == NULL) 8420Sstevel@tonic-gate return (WALK_DONE); 8430Sstevel@tonic-gate 8440Sstevel@tonic-gate if (mdb_vread(&kmhw->kmhw_cur, sizeof (kmem_bufctl_t), addr) == -1) { 8450Sstevel@tonic-gate mdb_warn("couldn't read kmem_bufctl_t at addr %p", addr); 8460Sstevel@tonic-gate return (WALK_ERR); 8470Sstevel@tonic-gate } 8480Sstevel@tonic-gate 8490Sstevel@tonic-gate return (wsp->walk_callback(addr, &kmhw->kmhw_cur, wsp->walk_cbdata)); 8500Sstevel@tonic-gate } 8510Sstevel@tonic-gate 8520Sstevel@tonic-gate void 8530Sstevel@tonic-gate kmem_hash_walk_fini(mdb_walk_state_t *wsp) 8540Sstevel@tonic-gate { 8550Sstevel@tonic-gate kmem_hash_walk_t *kmhw = wsp->walk_data; 8560Sstevel@tonic-gate 8570Sstevel@tonic-gate if (kmhw == NULL) 8580Sstevel@tonic-gate return; 8590Sstevel@tonic-gate 8600Sstevel@tonic-gate mdb_free(kmhw->kmhw_table, kmhw->kmhw_nelems * sizeof (uintptr_t)); 8610Sstevel@tonic-gate mdb_free(kmhw, sizeof (kmem_hash_walk_t)); 8620Sstevel@tonic-gate } 8630Sstevel@tonic-gate 8640Sstevel@tonic-gate /* 8650Sstevel@tonic-gate * Find the address of the bufctl structure for the address 'buf' in cache 8660Sstevel@tonic-gate * 'cp', which is at address caddr, and place it in *out. 8670Sstevel@tonic-gate */ 8680Sstevel@tonic-gate static int 8690Sstevel@tonic-gate kmem_hash_lookup(kmem_cache_t *cp, uintptr_t caddr, void *buf, uintptr_t *out) 8700Sstevel@tonic-gate { 8710Sstevel@tonic-gate uintptr_t bucket = (uintptr_t)KMEM_HASH(cp, buf); 8720Sstevel@tonic-gate kmem_bufctl_t *bcp; 8730Sstevel@tonic-gate kmem_bufctl_t bc; 8740Sstevel@tonic-gate 8750Sstevel@tonic-gate if (mdb_vread(&bcp, sizeof (kmem_bufctl_t *), bucket) == -1) { 8760Sstevel@tonic-gate mdb_warn("unable to read hash bucket for %p in cache %p", 8770Sstevel@tonic-gate buf, caddr); 8780Sstevel@tonic-gate return (-1); 8790Sstevel@tonic-gate } 8800Sstevel@tonic-gate 8810Sstevel@tonic-gate while (bcp != NULL) { 8820Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (kmem_bufctl_t), 8830Sstevel@tonic-gate (uintptr_t)bcp) == -1) { 8840Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", bcp); 8850Sstevel@tonic-gate return (-1); 8860Sstevel@tonic-gate } 8870Sstevel@tonic-gate if (bc.bc_addr == buf) { 8880Sstevel@tonic-gate *out = (uintptr_t)bcp; 8890Sstevel@tonic-gate return (0); 8900Sstevel@tonic-gate } 8910Sstevel@tonic-gate bcp = bc.bc_next; 8920Sstevel@tonic-gate } 8930Sstevel@tonic-gate 8940Sstevel@tonic-gate mdb_warn("unable to find bufctl for %p in cache %p\n", buf, caddr); 8950Sstevel@tonic-gate return (-1); 8960Sstevel@tonic-gate } 8970Sstevel@tonic-gate 8980Sstevel@tonic-gate int 8990Sstevel@tonic-gate kmem_get_magsize(const kmem_cache_t *cp) 9000Sstevel@tonic-gate { 9010Sstevel@tonic-gate uintptr_t addr = (uintptr_t)cp->cache_magtype; 9020Sstevel@tonic-gate GElf_Sym mt_sym; 9030Sstevel@tonic-gate kmem_magtype_t mt; 9040Sstevel@tonic-gate int res; 9050Sstevel@tonic-gate 9060Sstevel@tonic-gate /* 9070Sstevel@tonic-gate * if cpu 0 has a non-zero magsize, it must be correct. caches 9080Sstevel@tonic-gate * with KMF_NOMAGAZINE have disabled their magazine layers, so 9090Sstevel@tonic-gate * it is okay to return 0 for them. 9100Sstevel@tonic-gate */ 9110Sstevel@tonic-gate if ((res = cp->cache_cpu[0].cc_magsize) != 0 || 9120Sstevel@tonic-gate (cp->cache_flags & KMF_NOMAGAZINE)) 9130Sstevel@tonic-gate return (res); 9140Sstevel@tonic-gate 9150Sstevel@tonic-gate if (mdb_lookup_by_name("kmem_magtype", &mt_sym) == -1) { 9160Sstevel@tonic-gate mdb_warn("unable to read 'kmem_magtype'"); 9170Sstevel@tonic-gate } else if (addr < mt_sym.st_value || 9180Sstevel@tonic-gate addr + sizeof (mt) - 1 > mt_sym.st_value + mt_sym.st_size - 1 || 9190Sstevel@tonic-gate ((addr - mt_sym.st_value) % sizeof (mt)) != 0) { 9200Sstevel@tonic-gate mdb_warn("cache '%s' has invalid magtype pointer (%p)\n", 9210Sstevel@tonic-gate cp->cache_name, addr); 9220Sstevel@tonic-gate return (0); 9230Sstevel@tonic-gate } 9240Sstevel@tonic-gate if (mdb_vread(&mt, sizeof (mt), addr) == -1) { 9250Sstevel@tonic-gate mdb_warn("unable to read magtype at %a", addr); 9260Sstevel@tonic-gate return (0); 9270Sstevel@tonic-gate } 9280Sstevel@tonic-gate return (mt.mt_magsize); 9290Sstevel@tonic-gate } 9300Sstevel@tonic-gate 9310Sstevel@tonic-gate /*ARGSUSED*/ 9320Sstevel@tonic-gate static int 9330Sstevel@tonic-gate kmem_estimate_slab(uintptr_t addr, const kmem_slab_t *sp, size_t *est) 9340Sstevel@tonic-gate { 9350Sstevel@tonic-gate *est -= (sp->slab_chunks - sp->slab_refcnt); 9360Sstevel@tonic-gate 9370Sstevel@tonic-gate return (WALK_NEXT); 9380Sstevel@tonic-gate } 9390Sstevel@tonic-gate 9400Sstevel@tonic-gate /* 9410Sstevel@tonic-gate * Returns an upper bound on the number of allocated buffers in a given 9420Sstevel@tonic-gate * cache. 9430Sstevel@tonic-gate */ 9440Sstevel@tonic-gate size_t 9450Sstevel@tonic-gate kmem_estimate_allocated(uintptr_t addr, const kmem_cache_t *cp) 9460Sstevel@tonic-gate { 9470Sstevel@tonic-gate int magsize; 9480Sstevel@tonic-gate size_t cache_est; 9490Sstevel@tonic-gate 9500Sstevel@tonic-gate cache_est = cp->cache_buftotal; 9510Sstevel@tonic-gate 9520Sstevel@tonic-gate (void) mdb_pwalk("kmem_slab_partial", 9530Sstevel@tonic-gate (mdb_walk_cb_t)kmem_estimate_slab, &cache_est, addr); 9540Sstevel@tonic-gate 9550Sstevel@tonic-gate if ((magsize = kmem_get_magsize(cp)) != 0) { 9560Sstevel@tonic-gate size_t mag_est = cp->cache_full.ml_total * magsize; 9570Sstevel@tonic-gate 9580Sstevel@tonic-gate if (cache_est >= mag_est) { 9590Sstevel@tonic-gate cache_est -= mag_est; 9600Sstevel@tonic-gate } else { 9610Sstevel@tonic-gate mdb_warn("cache %p's magazine layer holds more buffers " 9620Sstevel@tonic-gate "than the slab layer.\n", addr); 9630Sstevel@tonic-gate } 9640Sstevel@tonic-gate } 9650Sstevel@tonic-gate return (cache_est); 9660Sstevel@tonic-gate } 9670Sstevel@tonic-gate 9680Sstevel@tonic-gate #define READMAG_ROUNDS(rounds) { \ 9690Sstevel@tonic-gate if (mdb_vread(mp, magbsize, (uintptr_t)kmp) == -1) { \ 9700Sstevel@tonic-gate mdb_warn("couldn't read magazine at %p", kmp); \ 9710Sstevel@tonic-gate goto fail; \ 9720Sstevel@tonic-gate } \ 9730Sstevel@tonic-gate for (i = 0; i < rounds; i++) { \ 9740Sstevel@tonic-gate maglist[magcnt++] = mp->mag_round[i]; \ 9750Sstevel@tonic-gate if (magcnt == magmax) { \ 9760Sstevel@tonic-gate mdb_warn("%d magazines exceeds fudge factor\n", \ 9770Sstevel@tonic-gate magcnt); \ 9780Sstevel@tonic-gate goto fail; \ 9790Sstevel@tonic-gate } \ 9800Sstevel@tonic-gate } \ 9810Sstevel@tonic-gate } 9820Sstevel@tonic-gate 9830Sstevel@tonic-gate int 9840Sstevel@tonic-gate kmem_read_magazines(kmem_cache_t *cp, uintptr_t addr, int ncpus, 9850Sstevel@tonic-gate void ***maglistp, size_t *magcntp, size_t *magmaxp, int alloc_flags) 9860Sstevel@tonic-gate { 9870Sstevel@tonic-gate kmem_magazine_t *kmp, *mp; 9880Sstevel@tonic-gate void **maglist = NULL; 9890Sstevel@tonic-gate int i, cpu; 9900Sstevel@tonic-gate size_t magsize, magmax, magbsize; 9910Sstevel@tonic-gate size_t magcnt = 0; 9920Sstevel@tonic-gate 9930Sstevel@tonic-gate /* 9940Sstevel@tonic-gate * Read the magtype out of the cache, after verifying the pointer's 9950Sstevel@tonic-gate * correctness. 9960Sstevel@tonic-gate */ 9970Sstevel@tonic-gate magsize = kmem_get_magsize(cp); 9981528Sjwadams if (magsize == 0) { 9991528Sjwadams *maglistp = NULL; 10001528Sjwadams *magcntp = 0; 10011528Sjwadams *magmaxp = 0; 10021528Sjwadams return (WALK_NEXT); 10031528Sjwadams } 10040Sstevel@tonic-gate 10050Sstevel@tonic-gate /* 10060Sstevel@tonic-gate * There are several places where we need to go buffer hunting: 10070Sstevel@tonic-gate * the per-CPU loaded magazine, the per-CPU spare full magazine, 10080Sstevel@tonic-gate * and the full magazine list in the depot. 10090Sstevel@tonic-gate * 10100Sstevel@tonic-gate * For an upper bound on the number of buffers in the magazine 10110Sstevel@tonic-gate * layer, we have the number of magazines on the cache_full 10120Sstevel@tonic-gate * list plus at most two magazines per CPU (the loaded and the 10130Sstevel@tonic-gate * spare). Toss in 100 magazines as a fudge factor in case this 10140Sstevel@tonic-gate * is live (the number "100" comes from the same fudge factor in 10150Sstevel@tonic-gate * crash(1M)). 10160Sstevel@tonic-gate */ 10170Sstevel@tonic-gate magmax = (cp->cache_full.ml_total + 2 * ncpus + 100) * magsize; 10180Sstevel@tonic-gate magbsize = offsetof(kmem_magazine_t, mag_round[magsize]); 10190Sstevel@tonic-gate 10200Sstevel@tonic-gate if (magbsize >= PAGESIZE / 2) { 10210Sstevel@tonic-gate mdb_warn("magazine size for cache %p unreasonable (%x)\n", 10220Sstevel@tonic-gate addr, magbsize); 10231528Sjwadams return (WALK_ERR); 10240Sstevel@tonic-gate } 10250Sstevel@tonic-gate 10260Sstevel@tonic-gate maglist = mdb_alloc(magmax * sizeof (void *), alloc_flags); 10270Sstevel@tonic-gate mp = mdb_alloc(magbsize, alloc_flags); 10280Sstevel@tonic-gate if (mp == NULL || maglist == NULL) 10290Sstevel@tonic-gate goto fail; 10300Sstevel@tonic-gate 10310Sstevel@tonic-gate /* 10320Sstevel@tonic-gate * First up: the magazines in the depot (i.e. on the cache_full list). 10330Sstevel@tonic-gate */ 10340Sstevel@tonic-gate for (kmp = cp->cache_full.ml_list; kmp != NULL; ) { 10350Sstevel@tonic-gate READMAG_ROUNDS(magsize); 10360Sstevel@tonic-gate kmp = mp->mag_next; 10370Sstevel@tonic-gate 10380Sstevel@tonic-gate if (kmp == cp->cache_full.ml_list) 10390Sstevel@tonic-gate break; /* cache_full list loop detected */ 10400Sstevel@tonic-gate } 10410Sstevel@tonic-gate 10420Sstevel@tonic-gate dprintf(("cache_full list done\n")); 10430Sstevel@tonic-gate 10440Sstevel@tonic-gate /* 10450Sstevel@tonic-gate * Now whip through the CPUs, snagging the loaded magazines 10460Sstevel@tonic-gate * and full spares. 10470Sstevel@tonic-gate */ 10480Sstevel@tonic-gate for (cpu = 0; cpu < ncpus; cpu++) { 10490Sstevel@tonic-gate kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu]; 10500Sstevel@tonic-gate 10510Sstevel@tonic-gate dprintf(("reading cpu cache %p\n", 10520Sstevel@tonic-gate (uintptr_t)ccp - (uintptr_t)cp + addr)); 10530Sstevel@tonic-gate 10540Sstevel@tonic-gate if (ccp->cc_rounds > 0 && 10550Sstevel@tonic-gate (kmp = ccp->cc_loaded) != NULL) { 10560Sstevel@tonic-gate dprintf(("reading %d loaded rounds\n", ccp->cc_rounds)); 10570Sstevel@tonic-gate READMAG_ROUNDS(ccp->cc_rounds); 10580Sstevel@tonic-gate } 10590Sstevel@tonic-gate 10600Sstevel@tonic-gate if (ccp->cc_prounds > 0 && 10610Sstevel@tonic-gate (kmp = ccp->cc_ploaded) != NULL) { 10620Sstevel@tonic-gate dprintf(("reading %d previously loaded rounds\n", 10630Sstevel@tonic-gate ccp->cc_prounds)); 10640Sstevel@tonic-gate READMAG_ROUNDS(ccp->cc_prounds); 10650Sstevel@tonic-gate } 10660Sstevel@tonic-gate } 10670Sstevel@tonic-gate 10680Sstevel@tonic-gate dprintf(("magazine layer: %d buffers\n", magcnt)); 10690Sstevel@tonic-gate 10700Sstevel@tonic-gate if (!(alloc_flags & UM_GC)) 10710Sstevel@tonic-gate mdb_free(mp, magbsize); 10720Sstevel@tonic-gate 10730Sstevel@tonic-gate *maglistp = maglist; 10740Sstevel@tonic-gate *magcntp = magcnt; 10750Sstevel@tonic-gate *magmaxp = magmax; 10760Sstevel@tonic-gate 10770Sstevel@tonic-gate return (WALK_NEXT); 10780Sstevel@tonic-gate 10790Sstevel@tonic-gate fail: 10800Sstevel@tonic-gate if (!(alloc_flags & UM_GC)) { 10810Sstevel@tonic-gate if (mp) 10820Sstevel@tonic-gate mdb_free(mp, magbsize); 10830Sstevel@tonic-gate if (maglist) 10840Sstevel@tonic-gate mdb_free(maglist, magmax * sizeof (void *)); 10850Sstevel@tonic-gate } 10860Sstevel@tonic-gate return (WALK_ERR); 10870Sstevel@tonic-gate } 10880Sstevel@tonic-gate 10890Sstevel@tonic-gate static int 10900Sstevel@tonic-gate kmem_walk_callback(mdb_walk_state_t *wsp, uintptr_t buf) 10910Sstevel@tonic-gate { 10920Sstevel@tonic-gate return (wsp->walk_callback(buf, NULL, wsp->walk_cbdata)); 10930Sstevel@tonic-gate } 10940Sstevel@tonic-gate 10950Sstevel@tonic-gate static int 10960Sstevel@tonic-gate bufctl_walk_callback(kmem_cache_t *cp, mdb_walk_state_t *wsp, uintptr_t buf) 10970Sstevel@tonic-gate { 10980Sstevel@tonic-gate kmem_bufctl_audit_t b; 10990Sstevel@tonic-gate 11000Sstevel@tonic-gate /* 11010Sstevel@tonic-gate * if KMF_AUDIT is not set, we know that we're looking at a 11020Sstevel@tonic-gate * kmem_bufctl_t. 11030Sstevel@tonic-gate */ 11040Sstevel@tonic-gate if (!(cp->cache_flags & KMF_AUDIT) || 11050Sstevel@tonic-gate mdb_vread(&b, sizeof (kmem_bufctl_audit_t), buf) == -1) { 11060Sstevel@tonic-gate (void) memset(&b, 0, sizeof (b)); 11070Sstevel@tonic-gate if (mdb_vread(&b, sizeof (kmem_bufctl_t), buf) == -1) { 11080Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", buf); 11090Sstevel@tonic-gate return (WALK_ERR); 11100Sstevel@tonic-gate } 11110Sstevel@tonic-gate } 11120Sstevel@tonic-gate 11130Sstevel@tonic-gate return (wsp->walk_callback(buf, &b, wsp->walk_cbdata)); 11140Sstevel@tonic-gate } 11150Sstevel@tonic-gate 11160Sstevel@tonic-gate typedef struct kmem_walk { 11170Sstevel@tonic-gate int kmw_type; 11180Sstevel@tonic-gate 11190Sstevel@tonic-gate int kmw_addr; /* cache address */ 11200Sstevel@tonic-gate kmem_cache_t *kmw_cp; 11210Sstevel@tonic-gate size_t kmw_csize; 11220Sstevel@tonic-gate 11230Sstevel@tonic-gate /* 11240Sstevel@tonic-gate * magazine layer 11250Sstevel@tonic-gate */ 11260Sstevel@tonic-gate void **kmw_maglist; 11270Sstevel@tonic-gate size_t kmw_max; 11280Sstevel@tonic-gate size_t kmw_count; 11290Sstevel@tonic-gate size_t kmw_pos; 11300Sstevel@tonic-gate 11310Sstevel@tonic-gate /* 11320Sstevel@tonic-gate * slab layer 11330Sstevel@tonic-gate */ 11340Sstevel@tonic-gate char *kmw_valid; /* to keep track of freed buffers */ 11350Sstevel@tonic-gate char *kmw_ubase; /* buffer for slab data */ 11360Sstevel@tonic-gate } kmem_walk_t; 11370Sstevel@tonic-gate 11380Sstevel@tonic-gate static int 11390Sstevel@tonic-gate kmem_walk_init_common(mdb_walk_state_t *wsp, int type) 11400Sstevel@tonic-gate { 11410Sstevel@tonic-gate kmem_walk_t *kmw; 11420Sstevel@tonic-gate int ncpus, csize; 11430Sstevel@tonic-gate kmem_cache_t *cp; 11441528Sjwadams size_t vm_quantum; 11450Sstevel@tonic-gate 11460Sstevel@tonic-gate size_t magmax, magcnt; 11470Sstevel@tonic-gate void **maglist = NULL; 11480Sstevel@tonic-gate uint_t chunksize, slabsize; 11490Sstevel@tonic-gate int status = WALK_ERR; 11500Sstevel@tonic-gate uintptr_t addr = wsp->walk_addr; 11510Sstevel@tonic-gate const char *layered; 11520Sstevel@tonic-gate 11530Sstevel@tonic-gate type &= ~KM_HASH; 11540Sstevel@tonic-gate 11550Sstevel@tonic-gate if (addr == NULL) { 11560Sstevel@tonic-gate mdb_warn("kmem walk doesn't support global walks\n"); 11570Sstevel@tonic-gate return (WALK_ERR); 11580Sstevel@tonic-gate } 11590Sstevel@tonic-gate 11600Sstevel@tonic-gate dprintf(("walking %p\n", addr)); 11610Sstevel@tonic-gate 11620Sstevel@tonic-gate /* 11630Sstevel@tonic-gate * First we need to figure out how many CPUs are configured in the 11640Sstevel@tonic-gate * system to know how much to slurp out. 11650Sstevel@tonic-gate */ 11660Sstevel@tonic-gate mdb_readvar(&ncpus, "max_ncpus"); 11670Sstevel@tonic-gate 11680Sstevel@tonic-gate csize = KMEM_CACHE_SIZE(ncpus); 11690Sstevel@tonic-gate cp = mdb_alloc(csize, UM_SLEEP); 11700Sstevel@tonic-gate 11710Sstevel@tonic-gate if (mdb_vread(cp, csize, addr) == -1) { 11720Sstevel@tonic-gate mdb_warn("couldn't read cache at addr %p", addr); 11730Sstevel@tonic-gate goto out2; 11740Sstevel@tonic-gate } 11750Sstevel@tonic-gate 11761528Sjwadams /* 11771528Sjwadams * It's easy for someone to hand us an invalid cache address. 11781528Sjwadams * Unfortunately, it is hard for this walker to survive an 11791528Sjwadams * invalid cache cleanly. So we make sure that: 11801528Sjwadams * 11811528Sjwadams * 1. the vmem arena for the cache is readable, 11821528Sjwadams * 2. the vmem arena's quantum is a power of 2, 11831528Sjwadams * 3. our slabsize is a multiple of the quantum, and 11841528Sjwadams * 4. our chunksize is >0 and less than our slabsize. 11851528Sjwadams */ 11861528Sjwadams if (mdb_vread(&vm_quantum, sizeof (vm_quantum), 11871528Sjwadams (uintptr_t)&cp->cache_arena->vm_quantum) == -1 || 11881528Sjwadams vm_quantum == 0 || 11891528Sjwadams (vm_quantum & (vm_quantum - 1)) != 0 || 11901528Sjwadams cp->cache_slabsize < vm_quantum || 11911528Sjwadams P2PHASE(cp->cache_slabsize, vm_quantum) != 0 || 11921528Sjwadams cp->cache_chunksize == 0 || 11931528Sjwadams cp->cache_chunksize > cp->cache_slabsize) { 11941528Sjwadams mdb_warn("%p is not a valid kmem_cache_t\n", addr); 11951528Sjwadams goto out2; 11961528Sjwadams } 11971528Sjwadams 11980Sstevel@tonic-gate dprintf(("buf total is %d\n", cp->cache_buftotal)); 11990Sstevel@tonic-gate 12000Sstevel@tonic-gate if (cp->cache_buftotal == 0) { 12010Sstevel@tonic-gate mdb_free(cp, csize); 12020Sstevel@tonic-gate return (WALK_DONE); 12030Sstevel@tonic-gate } 12040Sstevel@tonic-gate 12050Sstevel@tonic-gate /* 12060Sstevel@tonic-gate * If they ask for bufctls, but it's a small-slab cache, 12070Sstevel@tonic-gate * there is nothing to report. 12080Sstevel@tonic-gate */ 12090Sstevel@tonic-gate if ((type & KM_BUFCTL) && !(cp->cache_flags & KMF_HASH)) { 12100Sstevel@tonic-gate dprintf(("bufctl requested, not KMF_HASH (flags: %p)\n", 12110Sstevel@tonic-gate cp->cache_flags)); 12120Sstevel@tonic-gate mdb_free(cp, csize); 12130Sstevel@tonic-gate return (WALK_DONE); 12140Sstevel@tonic-gate } 12150Sstevel@tonic-gate 12160Sstevel@tonic-gate /* 12170Sstevel@tonic-gate * If they want constructed buffers, but there's no constructor or 12180Sstevel@tonic-gate * the cache has DEADBEEF checking enabled, there is nothing to report. 12190Sstevel@tonic-gate */ 12200Sstevel@tonic-gate if ((type & KM_CONSTRUCTED) && (!(type & KM_FREE) || 12210Sstevel@tonic-gate cp->cache_constructor == NULL || 12220Sstevel@tonic-gate (cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) == KMF_DEADBEEF)) { 12230Sstevel@tonic-gate mdb_free(cp, csize); 12240Sstevel@tonic-gate return (WALK_DONE); 12250Sstevel@tonic-gate } 12260Sstevel@tonic-gate 12270Sstevel@tonic-gate /* 12280Sstevel@tonic-gate * Read in the contents of the magazine layer 12290Sstevel@tonic-gate */ 12300Sstevel@tonic-gate if (kmem_read_magazines(cp, addr, ncpus, &maglist, &magcnt, 12310Sstevel@tonic-gate &magmax, UM_SLEEP) == WALK_ERR) 12320Sstevel@tonic-gate goto out2; 12330Sstevel@tonic-gate 12340Sstevel@tonic-gate /* 12350Sstevel@tonic-gate * We have all of the buffers from the magazines; if we are walking 12360Sstevel@tonic-gate * allocated buffers, sort them so we can bsearch them later. 12370Sstevel@tonic-gate */ 12380Sstevel@tonic-gate if (type & KM_ALLOCATED) 12390Sstevel@tonic-gate qsort(maglist, magcnt, sizeof (void *), addrcmp); 12400Sstevel@tonic-gate 12410Sstevel@tonic-gate wsp->walk_data = kmw = mdb_zalloc(sizeof (kmem_walk_t), UM_SLEEP); 12420Sstevel@tonic-gate 12430Sstevel@tonic-gate kmw->kmw_type = type; 12440Sstevel@tonic-gate kmw->kmw_addr = addr; 12450Sstevel@tonic-gate kmw->kmw_cp = cp; 12460Sstevel@tonic-gate kmw->kmw_csize = csize; 12470Sstevel@tonic-gate kmw->kmw_maglist = maglist; 12480Sstevel@tonic-gate kmw->kmw_max = magmax; 12490Sstevel@tonic-gate kmw->kmw_count = magcnt; 12500Sstevel@tonic-gate kmw->kmw_pos = 0; 12510Sstevel@tonic-gate 12520Sstevel@tonic-gate /* 12530Sstevel@tonic-gate * When walking allocated buffers in a KMF_HASH cache, we walk the 12540Sstevel@tonic-gate * hash table instead of the slab layer. 12550Sstevel@tonic-gate */ 12560Sstevel@tonic-gate if ((cp->cache_flags & KMF_HASH) && (type & KM_ALLOCATED)) { 12570Sstevel@tonic-gate layered = "kmem_hash"; 12580Sstevel@tonic-gate 12590Sstevel@tonic-gate kmw->kmw_type |= KM_HASH; 12600Sstevel@tonic-gate } else { 12610Sstevel@tonic-gate /* 12620Sstevel@tonic-gate * If we are walking freed buffers, we only need the 12630Sstevel@tonic-gate * magazine layer plus the partially allocated slabs. 12640Sstevel@tonic-gate * To walk allocated buffers, we need all of the slabs. 12650Sstevel@tonic-gate */ 12660Sstevel@tonic-gate if (type & KM_ALLOCATED) 12670Sstevel@tonic-gate layered = "kmem_slab"; 12680Sstevel@tonic-gate else 12690Sstevel@tonic-gate layered = "kmem_slab_partial"; 12700Sstevel@tonic-gate 12710Sstevel@tonic-gate /* 12720Sstevel@tonic-gate * for small-slab caches, we read in the entire slab. For 12730Sstevel@tonic-gate * freed buffers, we can just walk the freelist. For 12740Sstevel@tonic-gate * allocated buffers, we use a 'valid' array to track 12750Sstevel@tonic-gate * the freed buffers. 12760Sstevel@tonic-gate */ 12770Sstevel@tonic-gate if (!(cp->cache_flags & KMF_HASH)) { 12780Sstevel@tonic-gate chunksize = cp->cache_chunksize; 12790Sstevel@tonic-gate slabsize = cp->cache_slabsize; 12800Sstevel@tonic-gate 12810Sstevel@tonic-gate kmw->kmw_ubase = mdb_alloc(slabsize + 12820Sstevel@tonic-gate sizeof (kmem_bufctl_t), UM_SLEEP); 12830Sstevel@tonic-gate 12840Sstevel@tonic-gate if (type & KM_ALLOCATED) 12850Sstevel@tonic-gate kmw->kmw_valid = 12860Sstevel@tonic-gate mdb_alloc(slabsize / chunksize, UM_SLEEP); 12870Sstevel@tonic-gate } 12880Sstevel@tonic-gate } 12890Sstevel@tonic-gate 12900Sstevel@tonic-gate status = WALK_NEXT; 12910Sstevel@tonic-gate 12920Sstevel@tonic-gate if (mdb_layered_walk(layered, wsp) == -1) { 12930Sstevel@tonic-gate mdb_warn("unable to start layered '%s' walk", layered); 12940Sstevel@tonic-gate status = WALK_ERR; 12950Sstevel@tonic-gate } 12960Sstevel@tonic-gate 12970Sstevel@tonic-gate out1: 12980Sstevel@tonic-gate if (status == WALK_ERR) { 12990Sstevel@tonic-gate if (kmw->kmw_valid) 13000Sstevel@tonic-gate mdb_free(kmw->kmw_valid, slabsize / chunksize); 13010Sstevel@tonic-gate 13020Sstevel@tonic-gate if (kmw->kmw_ubase) 13030Sstevel@tonic-gate mdb_free(kmw->kmw_ubase, slabsize + 13040Sstevel@tonic-gate sizeof (kmem_bufctl_t)); 13050Sstevel@tonic-gate 13061528Sjwadams if (kmw->kmw_maglist) 13071528Sjwadams mdb_free(kmw->kmw_maglist, 13081528Sjwadams kmw->kmw_max * sizeof (uintptr_t)); 13091528Sjwadams 13100Sstevel@tonic-gate mdb_free(kmw, sizeof (kmem_walk_t)); 13110Sstevel@tonic-gate wsp->walk_data = NULL; 13120Sstevel@tonic-gate } 13130Sstevel@tonic-gate 13140Sstevel@tonic-gate out2: 13150Sstevel@tonic-gate if (status == WALK_ERR) 13160Sstevel@tonic-gate mdb_free(cp, csize); 13170Sstevel@tonic-gate 13180Sstevel@tonic-gate return (status); 13190Sstevel@tonic-gate } 13200Sstevel@tonic-gate 13210Sstevel@tonic-gate int 13220Sstevel@tonic-gate kmem_walk_step(mdb_walk_state_t *wsp) 13230Sstevel@tonic-gate { 13240Sstevel@tonic-gate kmem_walk_t *kmw = wsp->walk_data; 13250Sstevel@tonic-gate int type = kmw->kmw_type; 13260Sstevel@tonic-gate kmem_cache_t *cp = kmw->kmw_cp; 13270Sstevel@tonic-gate 13280Sstevel@tonic-gate void **maglist = kmw->kmw_maglist; 13290Sstevel@tonic-gate int magcnt = kmw->kmw_count; 13300Sstevel@tonic-gate 13310Sstevel@tonic-gate uintptr_t chunksize, slabsize; 13320Sstevel@tonic-gate uintptr_t addr; 13330Sstevel@tonic-gate const kmem_slab_t *sp; 13340Sstevel@tonic-gate const kmem_bufctl_t *bcp; 13350Sstevel@tonic-gate kmem_bufctl_t bc; 13360Sstevel@tonic-gate 13370Sstevel@tonic-gate int chunks; 13380Sstevel@tonic-gate char *kbase; 13390Sstevel@tonic-gate void *buf; 13400Sstevel@tonic-gate int i, ret; 13410Sstevel@tonic-gate 13420Sstevel@tonic-gate char *valid, *ubase; 13430Sstevel@tonic-gate 13440Sstevel@tonic-gate /* 13450Sstevel@tonic-gate * first, handle the 'kmem_hash' layered walk case 13460Sstevel@tonic-gate */ 13470Sstevel@tonic-gate if (type & KM_HASH) { 13480Sstevel@tonic-gate /* 13490Sstevel@tonic-gate * We have a buffer which has been allocated out of the 13500Sstevel@tonic-gate * global layer. We need to make sure that it's not 13510Sstevel@tonic-gate * actually sitting in a magazine before we report it as 13520Sstevel@tonic-gate * an allocated buffer. 13530Sstevel@tonic-gate */ 13540Sstevel@tonic-gate buf = ((const kmem_bufctl_t *)wsp->walk_layer)->bc_addr; 13550Sstevel@tonic-gate 13560Sstevel@tonic-gate if (magcnt > 0 && 13570Sstevel@tonic-gate bsearch(&buf, maglist, magcnt, sizeof (void *), 13580Sstevel@tonic-gate addrcmp) != NULL) 13590Sstevel@tonic-gate return (WALK_NEXT); 13600Sstevel@tonic-gate 13610Sstevel@tonic-gate if (type & KM_BUFCTL) 13620Sstevel@tonic-gate return (bufctl_walk_callback(cp, wsp, wsp->walk_addr)); 13630Sstevel@tonic-gate 13640Sstevel@tonic-gate return (kmem_walk_callback(wsp, (uintptr_t)buf)); 13650Sstevel@tonic-gate } 13660Sstevel@tonic-gate 13670Sstevel@tonic-gate ret = WALK_NEXT; 13680Sstevel@tonic-gate 13690Sstevel@tonic-gate addr = kmw->kmw_addr; 13700Sstevel@tonic-gate 13710Sstevel@tonic-gate /* 13720Sstevel@tonic-gate * If we're walking freed buffers, report everything in the 13730Sstevel@tonic-gate * magazine layer before processing the first slab. 13740Sstevel@tonic-gate */ 13750Sstevel@tonic-gate if ((type & KM_FREE) && magcnt != 0) { 13760Sstevel@tonic-gate kmw->kmw_count = 0; /* only do this once */ 13770Sstevel@tonic-gate for (i = 0; i < magcnt; i++) { 13780Sstevel@tonic-gate buf = maglist[i]; 13790Sstevel@tonic-gate 13800Sstevel@tonic-gate if (type & KM_BUFCTL) { 13810Sstevel@tonic-gate uintptr_t out; 13820Sstevel@tonic-gate 13830Sstevel@tonic-gate if (cp->cache_flags & KMF_BUFTAG) { 13840Sstevel@tonic-gate kmem_buftag_t *btp; 13850Sstevel@tonic-gate kmem_buftag_t tag; 13860Sstevel@tonic-gate 13870Sstevel@tonic-gate /* LINTED - alignment */ 13880Sstevel@tonic-gate btp = KMEM_BUFTAG(cp, buf); 13890Sstevel@tonic-gate if (mdb_vread(&tag, sizeof (tag), 13900Sstevel@tonic-gate (uintptr_t)btp) == -1) { 13910Sstevel@tonic-gate mdb_warn("reading buftag for " 13920Sstevel@tonic-gate "%p at %p", buf, btp); 13930Sstevel@tonic-gate continue; 13940Sstevel@tonic-gate } 13950Sstevel@tonic-gate out = (uintptr_t)tag.bt_bufctl; 13960Sstevel@tonic-gate } else { 13970Sstevel@tonic-gate if (kmem_hash_lookup(cp, addr, buf, 13980Sstevel@tonic-gate &out) == -1) 13990Sstevel@tonic-gate continue; 14000Sstevel@tonic-gate } 14010Sstevel@tonic-gate ret = bufctl_walk_callback(cp, wsp, out); 14020Sstevel@tonic-gate } else { 14030Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf); 14040Sstevel@tonic-gate } 14050Sstevel@tonic-gate 14060Sstevel@tonic-gate if (ret != WALK_NEXT) 14070Sstevel@tonic-gate return (ret); 14080Sstevel@tonic-gate } 14090Sstevel@tonic-gate } 14100Sstevel@tonic-gate 14110Sstevel@tonic-gate /* 14120Sstevel@tonic-gate * If they want constructed buffers, we're finished, since the 14130Sstevel@tonic-gate * magazine layer holds them all. 14140Sstevel@tonic-gate */ 14150Sstevel@tonic-gate if (type & KM_CONSTRUCTED) 14160Sstevel@tonic-gate return (WALK_DONE); 14170Sstevel@tonic-gate 14180Sstevel@tonic-gate /* 14190Sstevel@tonic-gate * Handle the buffers in the current slab 14200Sstevel@tonic-gate */ 14210Sstevel@tonic-gate chunksize = cp->cache_chunksize; 14220Sstevel@tonic-gate slabsize = cp->cache_slabsize; 14230Sstevel@tonic-gate 14240Sstevel@tonic-gate sp = wsp->walk_layer; 14250Sstevel@tonic-gate chunks = sp->slab_chunks; 14260Sstevel@tonic-gate kbase = sp->slab_base; 14270Sstevel@tonic-gate 14280Sstevel@tonic-gate dprintf(("kbase is %p\n", kbase)); 14290Sstevel@tonic-gate 14300Sstevel@tonic-gate if (!(cp->cache_flags & KMF_HASH)) { 14310Sstevel@tonic-gate valid = kmw->kmw_valid; 14320Sstevel@tonic-gate ubase = kmw->kmw_ubase; 14330Sstevel@tonic-gate 14340Sstevel@tonic-gate if (mdb_vread(ubase, chunks * chunksize, 14350Sstevel@tonic-gate (uintptr_t)kbase) == -1) { 14360Sstevel@tonic-gate mdb_warn("failed to read slab contents at %p", kbase); 14370Sstevel@tonic-gate return (WALK_ERR); 14380Sstevel@tonic-gate } 14390Sstevel@tonic-gate 14400Sstevel@tonic-gate /* 14410Sstevel@tonic-gate * Set up the valid map as fully allocated -- we'll punch 14420Sstevel@tonic-gate * out the freelist. 14430Sstevel@tonic-gate */ 14440Sstevel@tonic-gate if (type & KM_ALLOCATED) 14450Sstevel@tonic-gate (void) memset(valid, 1, chunks); 14460Sstevel@tonic-gate } else { 14470Sstevel@tonic-gate valid = NULL; 14480Sstevel@tonic-gate ubase = NULL; 14490Sstevel@tonic-gate } 14500Sstevel@tonic-gate 14510Sstevel@tonic-gate /* 14520Sstevel@tonic-gate * walk the slab's freelist 14530Sstevel@tonic-gate */ 14540Sstevel@tonic-gate bcp = sp->slab_head; 14550Sstevel@tonic-gate 14560Sstevel@tonic-gate dprintf(("refcnt is %d; chunks is %d\n", sp->slab_refcnt, chunks)); 14570Sstevel@tonic-gate 14580Sstevel@tonic-gate /* 14590Sstevel@tonic-gate * since we could be in the middle of allocating a buffer, 14600Sstevel@tonic-gate * our refcnt could be one higher than it aught. So we 14610Sstevel@tonic-gate * check one further on the freelist than the count allows. 14620Sstevel@tonic-gate */ 14630Sstevel@tonic-gate for (i = sp->slab_refcnt; i <= chunks; i++) { 14640Sstevel@tonic-gate uint_t ndx; 14650Sstevel@tonic-gate 14660Sstevel@tonic-gate dprintf(("bcp is %p\n", bcp)); 14670Sstevel@tonic-gate 14680Sstevel@tonic-gate if (bcp == NULL) { 14690Sstevel@tonic-gate if (i == chunks) 14700Sstevel@tonic-gate break; 14710Sstevel@tonic-gate mdb_warn( 14720Sstevel@tonic-gate "slab %p in cache %p freelist too short by %d\n", 14730Sstevel@tonic-gate sp, addr, chunks - i); 14740Sstevel@tonic-gate break; 14750Sstevel@tonic-gate } 14760Sstevel@tonic-gate 14770Sstevel@tonic-gate if (cp->cache_flags & KMF_HASH) { 14780Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), (uintptr_t)bcp) == -1) { 14790Sstevel@tonic-gate mdb_warn("failed to read bufctl ptr at %p", 14800Sstevel@tonic-gate bcp); 14810Sstevel@tonic-gate break; 14820Sstevel@tonic-gate } 14830Sstevel@tonic-gate buf = bc.bc_addr; 14840Sstevel@tonic-gate } else { 14850Sstevel@tonic-gate /* 14860Sstevel@tonic-gate * Otherwise the buffer is in the slab which 14870Sstevel@tonic-gate * we've read in; we just need to determine 14880Sstevel@tonic-gate * its offset in the slab to find the 14890Sstevel@tonic-gate * kmem_bufctl_t. 14900Sstevel@tonic-gate */ 14910Sstevel@tonic-gate bc = *((kmem_bufctl_t *) 14920Sstevel@tonic-gate ((uintptr_t)bcp - (uintptr_t)kbase + 14930Sstevel@tonic-gate (uintptr_t)ubase)); 14940Sstevel@tonic-gate 14950Sstevel@tonic-gate buf = KMEM_BUF(cp, bcp); 14960Sstevel@tonic-gate } 14970Sstevel@tonic-gate 14980Sstevel@tonic-gate ndx = ((uintptr_t)buf - (uintptr_t)kbase) / chunksize; 14990Sstevel@tonic-gate 15000Sstevel@tonic-gate if (ndx > slabsize / cp->cache_bufsize) { 15010Sstevel@tonic-gate /* 15020Sstevel@tonic-gate * This is very wrong; we have managed to find 15030Sstevel@tonic-gate * a buffer in the slab which shouldn't 15040Sstevel@tonic-gate * actually be here. Emit a warning, and 15050Sstevel@tonic-gate * try to continue. 15060Sstevel@tonic-gate */ 15070Sstevel@tonic-gate mdb_warn("buf %p is out of range for " 15080Sstevel@tonic-gate "slab %p, cache %p\n", buf, sp, addr); 15090Sstevel@tonic-gate } else if (type & KM_ALLOCATED) { 15100Sstevel@tonic-gate /* 15110Sstevel@tonic-gate * we have found a buffer on the slab's freelist; 15120Sstevel@tonic-gate * clear its entry 15130Sstevel@tonic-gate */ 15140Sstevel@tonic-gate valid[ndx] = 0; 15150Sstevel@tonic-gate } else { 15160Sstevel@tonic-gate /* 15170Sstevel@tonic-gate * Report this freed buffer 15180Sstevel@tonic-gate */ 15190Sstevel@tonic-gate if (type & KM_BUFCTL) { 15200Sstevel@tonic-gate ret = bufctl_walk_callback(cp, wsp, 15210Sstevel@tonic-gate (uintptr_t)bcp); 15220Sstevel@tonic-gate } else { 15230Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf); 15240Sstevel@tonic-gate } 15250Sstevel@tonic-gate if (ret != WALK_NEXT) 15260Sstevel@tonic-gate return (ret); 15270Sstevel@tonic-gate } 15280Sstevel@tonic-gate 15290Sstevel@tonic-gate bcp = bc.bc_next; 15300Sstevel@tonic-gate } 15310Sstevel@tonic-gate 15320Sstevel@tonic-gate if (bcp != NULL) { 15330Sstevel@tonic-gate dprintf(("slab %p in cache %p freelist too long (%p)\n", 15340Sstevel@tonic-gate sp, addr, bcp)); 15350Sstevel@tonic-gate } 15360Sstevel@tonic-gate 15370Sstevel@tonic-gate /* 15380Sstevel@tonic-gate * If we are walking freed buffers, the loop above handled reporting 15390Sstevel@tonic-gate * them. 15400Sstevel@tonic-gate */ 15410Sstevel@tonic-gate if (type & KM_FREE) 15420Sstevel@tonic-gate return (WALK_NEXT); 15430Sstevel@tonic-gate 15440Sstevel@tonic-gate if (type & KM_BUFCTL) { 15450Sstevel@tonic-gate mdb_warn("impossible situation: small-slab KM_BUFCTL walk for " 15460Sstevel@tonic-gate "cache %p\n", addr); 15470Sstevel@tonic-gate return (WALK_ERR); 15480Sstevel@tonic-gate } 15490Sstevel@tonic-gate 15500Sstevel@tonic-gate /* 15510Sstevel@tonic-gate * Report allocated buffers, skipping buffers in the magazine layer. 15520Sstevel@tonic-gate * We only get this far for small-slab caches. 15530Sstevel@tonic-gate */ 15540Sstevel@tonic-gate for (i = 0; ret == WALK_NEXT && i < chunks; i++) { 15550Sstevel@tonic-gate buf = (char *)kbase + i * chunksize; 15560Sstevel@tonic-gate 15570Sstevel@tonic-gate if (!valid[i]) 15580Sstevel@tonic-gate continue; /* on slab freelist */ 15590Sstevel@tonic-gate 15600Sstevel@tonic-gate if (magcnt > 0 && 15610Sstevel@tonic-gate bsearch(&buf, maglist, magcnt, sizeof (void *), 15620Sstevel@tonic-gate addrcmp) != NULL) 15630Sstevel@tonic-gate continue; /* in magazine layer */ 15640Sstevel@tonic-gate 15650Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf); 15660Sstevel@tonic-gate } 15670Sstevel@tonic-gate return (ret); 15680Sstevel@tonic-gate } 15690Sstevel@tonic-gate 15700Sstevel@tonic-gate void 15710Sstevel@tonic-gate kmem_walk_fini(mdb_walk_state_t *wsp) 15720Sstevel@tonic-gate { 15730Sstevel@tonic-gate kmem_walk_t *kmw = wsp->walk_data; 15740Sstevel@tonic-gate uintptr_t chunksize; 15750Sstevel@tonic-gate uintptr_t slabsize; 15760Sstevel@tonic-gate 15770Sstevel@tonic-gate if (kmw == NULL) 15780Sstevel@tonic-gate return; 15790Sstevel@tonic-gate 15800Sstevel@tonic-gate if (kmw->kmw_maglist != NULL) 15810Sstevel@tonic-gate mdb_free(kmw->kmw_maglist, kmw->kmw_max * sizeof (void *)); 15820Sstevel@tonic-gate 15830Sstevel@tonic-gate chunksize = kmw->kmw_cp->cache_chunksize; 15840Sstevel@tonic-gate slabsize = kmw->kmw_cp->cache_slabsize; 15850Sstevel@tonic-gate 15860Sstevel@tonic-gate if (kmw->kmw_valid != NULL) 15870Sstevel@tonic-gate mdb_free(kmw->kmw_valid, slabsize / chunksize); 15880Sstevel@tonic-gate if (kmw->kmw_ubase != NULL) 15890Sstevel@tonic-gate mdb_free(kmw->kmw_ubase, slabsize + sizeof (kmem_bufctl_t)); 15900Sstevel@tonic-gate 15910Sstevel@tonic-gate mdb_free(kmw->kmw_cp, kmw->kmw_csize); 15920Sstevel@tonic-gate mdb_free(kmw, sizeof (kmem_walk_t)); 15930Sstevel@tonic-gate } 15940Sstevel@tonic-gate 15950Sstevel@tonic-gate /*ARGSUSED*/ 15960Sstevel@tonic-gate static int 15970Sstevel@tonic-gate kmem_walk_all(uintptr_t addr, const kmem_cache_t *c, mdb_walk_state_t *wsp) 15980Sstevel@tonic-gate { 15990Sstevel@tonic-gate /* 16000Sstevel@tonic-gate * Buffers allocated from NOTOUCH caches can also show up as freed 16010Sstevel@tonic-gate * memory in other caches. This can be a little confusing, so we 16020Sstevel@tonic-gate * don't walk NOTOUCH caches when walking all caches (thereby assuring 16030Sstevel@tonic-gate * that "::walk kmem" and "::walk freemem" yield disjoint output). 16040Sstevel@tonic-gate */ 16050Sstevel@tonic-gate if (c->cache_cflags & KMC_NOTOUCH) 16060Sstevel@tonic-gate return (WALK_NEXT); 16070Sstevel@tonic-gate 16080Sstevel@tonic-gate if (mdb_pwalk(wsp->walk_data, wsp->walk_callback, 16090Sstevel@tonic-gate wsp->walk_cbdata, addr) == -1) 16100Sstevel@tonic-gate return (WALK_DONE); 16110Sstevel@tonic-gate 16120Sstevel@tonic-gate return (WALK_NEXT); 16130Sstevel@tonic-gate } 16140Sstevel@tonic-gate 16150Sstevel@tonic-gate #define KMEM_WALK_ALL(name, wsp) { \ 16160Sstevel@tonic-gate wsp->walk_data = (name); \ 16170Sstevel@tonic-gate if (mdb_walk("kmem_cache", (mdb_walk_cb_t)kmem_walk_all, wsp) == -1) \ 16180Sstevel@tonic-gate return (WALK_ERR); \ 16190Sstevel@tonic-gate return (WALK_DONE); \ 16200Sstevel@tonic-gate } 16210Sstevel@tonic-gate 16220Sstevel@tonic-gate int 16230Sstevel@tonic-gate kmem_walk_init(mdb_walk_state_t *wsp) 16240Sstevel@tonic-gate { 16250Sstevel@tonic-gate if (wsp->walk_arg != NULL) 16260Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)wsp->walk_arg; 16270Sstevel@tonic-gate 16280Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16290Sstevel@tonic-gate KMEM_WALK_ALL("kmem", wsp); 16300Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_ALLOCATED)); 16310Sstevel@tonic-gate } 16320Sstevel@tonic-gate 16330Sstevel@tonic-gate int 16340Sstevel@tonic-gate bufctl_walk_init(mdb_walk_state_t *wsp) 16350Sstevel@tonic-gate { 16360Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16370Sstevel@tonic-gate KMEM_WALK_ALL("bufctl", wsp); 16380Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_ALLOCATED | KM_BUFCTL)); 16390Sstevel@tonic-gate } 16400Sstevel@tonic-gate 16410Sstevel@tonic-gate int 16420Sstevel@tonic-gate freemem_walk_init(mdb_walk_state_t *wsp) 16430Sstevel@tonic-gate { 16440Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16450Sstevel@tonic-gate KMEM_WALK_ALL("freemem", wsp); 16460Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE)); 16470Sstevel@tonic-gate } 16480Sstevel@tonic-gate 16490Sstevel@tonic-gate int 16500Sstevel@tonic-gate freemem_constructed_walk_init(mdb_walk_state_t *wsp) 16510Sstevel@tonic-gate { 16520Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16530Sstevel@tonic-gate KMEM_WALK_ALL("freemem_constructed", wsp); 16540Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE | KM_CONSTRUCTED)); 16550Sstevel@tonic-gate } 16560Sstevel@tonic-gate 16570Sstevel@tonic-gate int 16580Sstevel@tonic-gate freectl_walk_init(mdb_walk_state_t *wsp) 16590Sstevel@tonic-gate { 16600Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16610Sstevel@tonic-gate KMEM_WALK_ALL("freectl", wsp); 16620Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE | KM_BUFCTL)); 16630Sstevel@tonic-gate } 16640Sstevel@tonic-gate 16650Sstevel@tonic-gate int 16660Sstevel@tonic-gate freectl_constructed_walk_init(mdb_walk_state_t *wsp) 16670Sstevel@tonic-gate { 16680Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16690Sstevel@tonic-gate KMEM_WALK_ALL("freectl_constructed", wsp); 16700Sstevel@tonic-gate return (kmem_walk_init_common(wsp, 16710Sstevel@tonic-gate KM_FREE | KM_BUFCTL | KM_CONSTRUCTED)); 16720Sstevel@tonic-gate } 16730Sstevel@tonic-gate 16740Sstevel@tonic-gate typedef struct bufctl_history_walk { 16750Sstevel@tonic-gate void *bhw_next; 16760Sstevel@tonic-gate kmem_cache_t *bhw_cache; 16770Sstevel@tonic-gate kmem_slab_t *bhw_slab; 16780Sstevel@tonic-gate hrtime_t bhw_timestamp; 16790Sstevel@tonic-gate } bufctl_history_walk_t; 16800Sstevel@tonic-gate 16810Sstevel@tonic-gate int 16820Sstevel@tonic-gate bufctl_history_walk_init(mdb_walk_state_t *wsp) 16830Sstevel@tonic-gate { 16840Sstevel@tonic-gate bufctl_history_walk_t *bhw; 16850Sstevel@tonic-gate kmem_bufctl_audit_t bc; 16860Sstevel@tonic-gate kmem_bufctl_audit_t bcn; 16870Sstevel@tonic-gate 16880Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 16890Sstevel@tonic-gate mdb_warn("bufctl_history walk doesn't support global walks\n"); 16900Sstevel@tonic-gate return (WALK_ERR); 16910Sstevel@tonic-gate } 16920Sstevel@tonic-gate 16930Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), wsp->walk_addr) == -1) { 16940Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", wsp->walk_addr); 16950Sstevel@tonic-gate return (WALK_ERR); 16960Sstevel@tonic-gate } 16970Sstevel@tonic-gate 16980Sstevel@tonic-gate bhw = mdb_zalloc(sizeof (*bhw), UM_SLEEP); 16990Sstevel@tonic-gate bhw->bhw_timestamp = 0; 17000Sstevel@tonic-gate bhw->bhw_cache = bc.bc_cache; 17010Sstevel@tonic-gate bhw->bhw_slab = bc.bc_slab; 17020Sstevel@tonic-gate 17030Sstevel@tonic-gate /* 17040Sstevel@tonic-gate * sometimes the first log entry matches the base bufctl; in that 17050Sstevel@tonic-gate * case, skip the base bufctl. 17060Sstevel@tonic-gate */ 17070Sstevel@tonic-gate if (bc.bc_lastlog != NULL && 17080Sstevel@tonic-gate mdb_vread(&bcn, sizeof (bcn), (uintptr_t)bc.bc_lastlog) != -1 && 17090Sstevel@tonic-gate bc.bc_addr == bcn.bc_addr && 17100Sstevel@tonic-gate bc.bc_cache == bcn.bc_cache && 17110Sstevel@tonic-gate bc.bc_slab == bcn.bc_slab && 17120Sstevel@tonic-gate bc.bc_timestamp == bcn.bc_timestamp && 17130Sstevel@tonic-gate bc.bc_thread == bcn.bc_thread) 17140Sstevel@tonic-gate bhw->bhw_next = bc.bc_lastlog; 17150Sstevel@tonic-gate else 17160Sstevel@tonic-gate bhw->bhw_next = (void *)wsp->walk_addr; 17170Sstevel@tonic-gate 17180Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)bc.bc_addr; 17190Sstevel@tonic-gate wsp->walk_data = bhw; 17200Sstevel@tonic-gate 17210Sstevel@tonic-gate return (WALK_NEXT); 17220Sstevel@tonic-gate } 17230Sstevel@tonic-gate 17240Sstevel@tonic-gate int 17250Sstevel@tonic-gate bufctl_history_walk_step(mdb_walk_state_t *wsp) 17260Sstevel@tonic-gate { 17270Sstevel@tonic-gate bufctl_history_walk_t *bhw = wsp->walk_data; 17280Sstevel@tonic-gate uintptr_t addr = (uintptr_t)bhw->bhw_next; 17290Sstevel@tonic-gate uintptr_t baseaddr = wsp->walk_addr; 17300Sstevel@tonic-gate kmem_bufctl_audit_t bc; 17310Sstevel@tonic-gate 17320Sstevel@tonic-gate if (addr == NULL) 17330Sstevel@tonic-gate return (WALK_DONE); 17340Sstevel@tonic-gate 17350Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) { 17360Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", bhw->bhw_next); 17370Sstevel@tonic-gate return (WALK_ERR); 17380Sstevel@tonic-gate } 17390Sstevel@tonic-gate 17400Sstevel@tonic-gate /* 17410Sstevel@tonic-gate * The bufctl is only valid if the address, cache, and slab are 17420Sstevel@tonic-gate * correct. We also check that the timestamp is decreasing, to 17430Sstevel@tonic-gate * prevent infinite loops. 17440Sstevel@tonic-gate */ 17450Sstevel@tonic-gate if ((uintptr_t)bc.bc_addr != baseaddr || 17460Sstevel@tonic-gate bc.bc_cache != bhw->bhw_cache || 17470Sstevel@tonic-gate bc.bc_slab != bhw->bhw_slab || 17480Sstevel@tonic-gate (bhw->bhw_timestamp != 0 && bc.bc_timestamp >= bhw->bhw_timestamp)) 17490Sstevel@tonic-gate return (WALK_DONE); 17500Sstevel@tonic-gate 17510Sstevel@tonic-gate bhw->bhw_next = bc.bc_lastlog; 17520Sstevel@tonic-gate bhw->bhw_timestamp = bc.bc_timestamp; 17530Sstevel@tonic-gate 17540Sstevel@tonic-gate return (wsp->walk_callback(addr, &bc, wsp->walk_cbdata)); 17550Sstevel@tonic-gate } 17560Sstevel@tonic-gate 17570Sstevel@tonic-gate void 17580Sstevel@tonic-gate bufctl_history_walk_fini(mdb_walk_state_t *wsp) 17590Sstevel@tonic-gate { 17600Sstevel@tonic-gate bufctl_history_walk_t *bhw = wsp->walk_data; 17610Sstevel@tonic-gate 17620Sstevel@tonic-gate mdb_free(bhw, sizeof (*bhw)); 17630Sstevel@tonic-gate } 17640Sstevel@tonic-gate 17650Sstevel@tonic-gate typedef struct kmem_log_walk { 17660Sstevel@tonic-gate kmem_bufctl_audit_t *klw_base; 17670Sstevel@tonic-gate kmem_bufctl_audit_t **klw_sorted; 17680Sstevel@tonic-gate kmem_log_header_t klw_lh; 17690Sstevel@tonic-gate size_t klw_size; 17700Sstevel@tonic-gate size_t klw_maxndx; 17710Sstevel@tonic-gate size_t klw_ndx; 17720Sstevel@tonic-gate } kmem_log_walk_t; 17730Sstevel@tonic-gate 17740Sstevel@tonic-gate int 17750Sstevel@tonic-gate kmem_log_walk_init(mdb_walk_state_t *wsp) 17760Sstevel@tonic-gate { 17770Sstevel@tonic-gate uintptr_t lp = wsp->walk_addr; 17780Sstevel@tonic-gate kmem_log_walk_t *klw; 17790Sstevel@tonic-gate kmem_log_header_t *lhp; 17800Sstevel@tonic-gate int maxndx, i, j, k; 17810Sstevel@tonic-gate 17820Sstevel@tonic-gate /* 17830Sstevel@tonic-gate * By default (global walk), walk the kmem_transaction_log. Otherwise 17840Sstevel@tonic-gate * read the log whose kmem_log_header_t is stored at walk_addr. 17850Sstevel@tonic-gate */ 17860Sstevel@tonic-gate if (lp == NULL && mdb_readvar(&lp, "kmem_transaction_log") == -1) { 17870Sstevel@tonic-gate mdb_warn("failed to read 'kmem_transaction_log'"); 17880Sstevel@tonic-gate return (WALK_ERR); 17890Sstevel@tonic-gate } 17900Sstevel@tonic-gate 17910Sstevel@tonic-gate if (lp == NULL) { 17920Sstevel@tonic-gate mdb_warn("log is disabled\n"); 17930Sstevel@tonic-gate return (WALK_ERR); 17940Sstevel@tonic-gate } 17950Sstevel@tonic-gate 17960Sstevel@tonic-gate klw = mdb_zalloc(sizeof (kmem_log_walk_t), UM_SLEEP); 17970Sstevel@tonic-gate lhp = &klw->klw_lh; 17980Sstevel@tonic-gate 17990Sstevel@tonic-gate if (mdb_vread(lhp, sizeof (kmem_log_header_t), lp) == -1) { 18000Sstevel@tonic-gate mdb_warn("failed to read log header at %p", lp); 18010Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t)); 18020Sstevel@tonic-gate return (WALK_ERR); 18030Sstevel@tonic-gate } 18040Sstevel@tonic-gate 18050Sstevel@tonic-gate klw->klw_size = lhp->lh_chunksize * lhp->lh_nchunks; 18060Sstevel@tonic-gate klw->klw_base = mdb_alloc(klw->klw_size, UM_SLEEP); 18070Sstevel@tonic-gate maxndx = lhp->lh_chunksize / sizeof (kmem_bufctl_audit_t) - 1; 18080Sstevel@tonic-gate 18090Sstevel@tonic-gate if (mdb_vread(klw->klw_base, klw->klw_size, 18100Sstevel@tonic-gate (uintptr_t)lhp->lh_base) == -1) { 18110Sstevel@tonic-gate mdb_warn("failed to read log at base %p", lhp->lh_base); 18120Sstevel@tonic-gate mdb_free(klw->klw_base, klw->klw_size); 18130Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t)); 18140Sstevel@tonic-gate return (WALK_ERR); 18150Sstevel@tonic-gate } 18160Sstevel@tonic-gate 18170Sstevel@tonic-gate klw->klw_sorted = mdb_alloc(maxndx * lhp->lh_nchunks * 18180Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t *), UM_SLEEP); 18190Sstevel@tonic-gate 18200Sstevel@tonic-gate for (i = 0, k = 0; i < lhp->lh_nchunks; i++) { 18210Sstevel@tonic-gate kmem_bufctl_audit_t *chunk = (kmem_bufctl_audit_t *) 18220Sstevel@tonic-gate ((uintptr_t)klw->klw_base + i * lhp->lh_chunksize); 18230Sstevel@tonic-gate 18240Sstevel@tonic-gate for (j = 0; j < maxndx; j++) 18250Sstevel@tonic-gate klw->klw_sorted[k++] = &chunk[j]; 18260Sstevel@tonic-gate } 18270Sstevel@tonic-gate 18280Sstevel@tonic-gate qsort(klw->klw_sorted, k, sizeof (kmem_bufctl_audit_t *), 18290Sstevel@tonic-gate (int(*)(const void *, const void *))bufctlcmp); 18300Sstevel@tonic-gate 18310Sstevel@tonic-gate klw->klw_maxndx = k; 18320Sstevel@tonic-gate wsp->walk_data = klw; 18330Sstevel@tonic-gate 18340Sstevel@tonic-gate return (WALK_NEXT); 18350Sstevel@tonic-gate } 18360Sstevel@tonic-gate 18370Sstevel@tonic-gate int 18380Sstevel@tonic-gate kmem_log_walk_step(mdb_walk_state_t *wsp) 18390Sstevel@tonic-gate { 18400Sstevel@tonic-gate kmem_log_walk_t *klw = wsp->walk_data; 18410Sstevel@tonic-gate kmem_bufctl_audit_t *bcp; 18420Sstevel@tonic-gate 18430Sstevel@tonic-gate if (klw->klw_ndx == klw->klw_maxndx) 18440Sstevel@tonic-gate return (WALK_DONE); 18450Sstevel@tonic-gate 18460Sstevel@tonic-gate bcp = klw->klw_sorted[klw->klw_ndx++]; 18470Sstevel@tonic-gate 18480Sstevel@tonic-gate return (wsp->walk_callback((uintptr_t)bcp - (uintptr_t)klw->klw_base + 18490Sstevel@tonic-gate (uintptr_t)klw->klw_lh.lh_base, bcp, wsp->walk_cbdata)); 18500Sstevel@tonic-gate } 18510Sstevel@tonic-gate 18520Sstevel@tonic-gate void 18530Sstevel@tonic-gate kmem_log_walk_fini(mdb_walk_state_t *wsp) 18540Sstevel@tonic-gate { 18550Sstevel@tonic-gate kmem_log_walk_t *klw = wsp->walk_data; 18560Sstevel@tonic-gate 18570Sstevel@tonic-gate mdb_free(klw->klw_base, klw->klw_size); 18580Sstevel@tonic-gate mdb_free(klw->klw_sorted, klw->klw_maxndx * 18590Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t *)); 18600Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t)); 18610Sstevel@tonic-gate } 18620Sstevel@tonic-gate 18630Sstevel@tonic-gate typedef struct allocdby_bufctl { 18640Sstevel@tonic-gate uintptr_t abb_addr; 18650Sstevel@tonic-gate hrtime_t abb_ts; 18660Sstevel@tonic-gate } allocdby_bufctl_t; 18670Sstevel@tonic-gate 18680Sstevel@tonic-gate typedef struct allocdby_walk { 18690Sstevel@tonic-gate const char *abw_walk; 18700Sstevel@tonic-gate uintptr_t abw_thread; 18710Sstevel@tonic-gate size_t abw_nbufs; 18720Sstevel@tonic-gate size_t abw_size; 18730Sstevel@tonic-gate allocdby_bufctl_t *abw_buf; 18740Sstevel@tonic-gate size_t abw_ndx; 18750Sstevel@tonic-gate } allocdby_walk_t; 18760Sstevel@tonic-gate 18770Sstevel@tonic-gate int 18780Sstevel@tonic-gate allocdby_walk_bufctl(uintptr_t addr, const kmem_bufctl_audit_t *bcp, 18790Sstevel@tonic-gate allocdby_walk_t *abw) 18800Sstevel@tonic-gate { 18810Sstevel@tonic-gate if ((uintptr_t)bcp->bc_thread != abw->abw_thread) 18820Sstevel@tonic-gate return (WALK_NEXT); 18830Sstevel@tonic-gate 18840Sstevel@tonic-gate if (abw->abw_nbufs == abw->abw_size) { 18850Sstevel@tonic-gate allocdby_bufctl_t *buf; 18860Sstevel@tonic-gate size_t oldsize = sizeof (allocdby_bufctl_t) * abw->abw_size; 18870Sstevel@tonic-gate 18880Sstevel@tonic-gate buf = mdb_zalloc(oldsize << 1, UM_SLEEP); 18890Sstevel@tonic-gate 18900Sstevel@tonic-gate bcopy(abw->abw_buf, buf, oldsize); 18910Sstevel@tonic-gate mdb_free(abw->abw_buf, oldsize); 18920Sstevel@tonic-gate 18930Sstevel@tonic-gate abw->abw_size <<= 1; 18940Sstevel@tonic-gate abw->abw_buf = buf; 18950Sstevel@tonic-gate } 18960Sstevel@tonic-gate 18970Sstevel@tonic-gate abw->abw_buf[abw->abw_nbufs].abb_addr = addr; 18980Sstevel@tonic-gate abw->abw_buf[abw->abw_nbufs].abb_ts = bcp->bc_timestamp; 18990Sstevel@tonic-gate abw->abw_nbufs++; 19000Sstevel@tonic-gate 19010Sstevel@tonic-gate return (WALK_NEXT); 19020Sstevel@tonic-gate } 19030Sstevel@tonic-gate 19040Sstevel@tonic-gate /*ARGSUSED*/ 19050Sstevel@tonic-gate int 19060Sstevel@tonic-gate allocdby_walk_cache(uintptr_t addr, const kmem_cache_t *c, allocdby_walk_t *abw) 19070Sstevel@tonic-gate { 19080Sstevel@tonic-gate if (mdb_pwalk(abw->abw_walk, (mdb_walk_cb_t)allocdby_walk_bufctl, 19090Sstevel@tonic-gate abw, addr) == -1) { 19100Sstevel@tonic-gate mdb_warn("couldn't walk bufctl for cache %p", addr); 19110Sstevel@tonic-gate return (WALK_DONE); 19120Sstevel@tonic-gate } 19130Sstevel@tonic-gate 19140Sstevel@tonic-gate return (WALK_NEXT); 19150Sstevel@tonic-gate } 19160Sstevel@tonic-gate 19170Sstevel@tonic-gate static int 19180Sstevel@tonic-gate allocdby_cmp(const allocdby_bufctl_t *lhs, const allocdby_bufctl_t *rhs) 19190Sstevel@tonic-gate { 19200Sstevel@tonic-gate if (lhs->abb_ts < rhs->abb_ts) 19210Sstevel@tonic-gate return (1); 19220Sstevel@tonic-gate if (lhs->abb_ts > rhs->abb_ts) 19230Sstevel@tonic-gate return (-1); 19240Sstevel@tonic-gate return (0); 19250Sstevel@tonic-gate } 19260Sstevel@tonic-gate 19270Sstevel@tonic-gate static int 19280Sstevel@tonic-gate allocdby_walk_init_common(mdb_walk_state_t *wsp, const char *walk) 19290Sstevel@tonic-gate { 19300Sstevel@tonic-gate allocdby_walk_t *abw; 19310Sstevel@tonic-gate 19320Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 19330Sstevel@tonic-gate mdb_warn("allocdby walk doesn't support global walks\n"); 19340Sstevel@tonic-gate return (WALK_ERR); 19350Sstevel@tonic-gate } 19360Sstevel@tonic-gate 19370Sstevel@tonic-gate abw = mdb_zalloc(sizeof (allocdby_walk_t), UM_SLEEP); 19380Sstevel@tonic-gate 19390Sstevel@tonic-gate abw->abw_thread = wsp->walk_addr; 19400Sstevel@tonic-gate abw->abw_walk = walk; 19410Sstevel@tonic-gate abw->abw_size = 128; /* something reasonable */ 19420Sstevel@tonic-gate abw->abw_buf = 19430Sstevel@tonic-gate mdb_zalloc(abw->abw_size * sizeof (allocdby_bufctl_t), UM_SLEEP); 19440Sstevel@tonic-gate 19450Sstevel@tonic-gate wsp->walk_data = abw; 19460Sstevel@tonic-gate 19470Sstevel@tonic-gate if (mdb_walk("kmem_cache", 19480Sstevel@tonic-gate (mdb_walk_cb_t)allocdby_walk_cache, abw) == -1) { 19490Sstevel@tonic-gate mdb_warn("couldn't walk kmem_cache"); 19500Sstevel@tonic-gate allocdby_walk_fini(wsp); 19510Sstevel@tonic-gate return (WALK_ERR); 19520Sstevel@tonic-gate } 19530Sstevel@tonic-gate 19540Sstevel@tonic-gate qsort(abw->abw_buf, abw->abw_nbufs, sizeof (allocdby_bufctl_t), 19550Sstevel@tonic-gate (int(*)(const void *, const void *))allocdby_cmp); 19560Sstevel@tonic-gate 19570Sstevel@tonic-gate return (WALK_NEXT); 19580Sstevel@tonic-gate } 19590Sstevel@tonic-gate 19600Sstevel@tonic-gate int 19610Sstevel@tonic-gate allocdby_walk_init(mdb_walk_state_t *wsp) 19620Sstevel@tonic-gate { 19630Sstevel@tonic-gate return (allocdby_walk_init_common(wsp, "bufctl")); 19640Sstevel@tonic-gate } 19650Sstevel@tonic-gate 19660Sstevel@tonic-gate int 19670Sstevel@tonic-gate freedby_walk_init(mdb_walk_state_t *wsp) 19680Sstevel@tonic-gate { 19690Sstevel@tonic-gate return (allocdby_walk_init_common(wsp, "freectl")); 19700Sstevel@tonic-gate } 19710Sstevel@tonic-gate 19720Sstevel@tonic-gate int 19730Sstevel@tonic-gate allocdby_walk_step(mdb_walk_state_t *wsp) 19740Sstevel@tonic-gate { 19750Sstevel@tonic-gate allocdby_walk_t *abw = wsp->walk_data; 19760Sstevel@tonic-gate kmem_bufctl_audit_t bc; 19770Sstevel@tonic-gate uintptr_t addr; 19780Sstevel@tonic-gate 19790Sstevel@tonic-gate if (abw->abw_ndx == abw->abw_nbufs) 19800Sstevel@tonic-gate return (WALK_DONE); 19810Sstevel@tonic-gate 19820Sstevel@tonic-gate addr = abw->abw_buf[abw->abw_ndx++].abb_addr; 19830Sstevel@tonic-gate 19840Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) { 19850Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr); 19860Sstevel@tonic-gate return (WALK_DONE); 19870Sstevel@tonic-gate } 19880Sstevel@tonic-gate 19890Sstevel@tonic-gate return (wsp->walk_callback(addr, &bc, wsp->walk_cbdata)); 19900Sstevel@tonic-gate } 19910Sstevel@tonic-gate 19920Sstevel@tonic-gate void 19930Sstevel@tonic-gate allocdby_walk_fini(mdb_walk_state_t *wsp) 19940Sstevel@tonic-gate { 19950Sstevel@tonic-gate allocdby_walk_t *abw = wsp->walk_data; 19960Sstevel@tonic-gate 19970Sstevel@tonic-gate mdb_free(abw->abw_buf, sizeof (allocdby_bufctl_t) * abw->abw_size); 19980Sstevel@tonic-gate mdb_free(abw, sizeof (allocdby_walk_t)); 19990Sstevel@tonic-gate } 20000Sstevel@tonic-gate 20010Sstevel@tonic-gate /*ARGSUSED*/ 20020Sstevel@tonic-gate int 20030Sstevel@tonic-gate allocdby_walk(uintptr_t addr, const kmem_bufctl_audit_t *bcp, void *ignored) 20040Sstevel@tonic-gate { 20050Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 20060Sstevel@tonic-gate GElf_Sym sym; 20070Sstevel@tonic-gate int i; 20080Sstevel@tonic-gate 20090Sstevel@tonic-gate mdb_printf("%0?p %12llx ", addr, bcp->bc_timestamp); 20100Sstevel@tonic-gate for (i = 0; i < bcp->bc_depth; i++) { 20110Sstevel@tonic-gate if (mdb_lookup_by_addr(bcp->bc_stack[i], 20120Sstevel@tonic-gate MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1) 20130Sstevel@tonic-gate continue; 20140Sstevel@tonic-gate if (strncmp(c, "kmem_", 5) == 0) 20150Sstevel@tonic-gate continue; 20160Sstevel@tonic-gate mdb_printf("%s+0x%lx", 20170Sstevel@tonic-gate c, bcp->bc_stack[i] - (uintptr_t)sym.st_value); 20180Sstevel@tonic-gate break; 20190Sstevel@tonic-gate } 20200Sstevel@tonic-gate mdb_printf("\n"); 20210Sstevel@tonic-gate 20220Sstevel@tonic-gate return (WALK_NEXT); 20230Sstevel@tonic-gate } 20240Sstevel@tonic-gate 20250Sstevel@tonic-gate static int 20260Sstevel@tonic-gate allocdby_common(uintptr_t addr, uint_t flags, const char *w) 20270Sstevel@tonic-gate { 20280Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 20290Sstevel@tonic-gate return (DCMD_USAGE); 20300Sstevel@tonic-gate 20310Sstevel@tonic-gate mdb_printf("%-?s %12s %s\n", "BUFCTL", "TIMESTAMP", "CALLER"); 20320Sstevel@tonic-gate 20330Sstevel@tonic-gate if (mdb_pwalk(w, (mdb_walk_cb_t)allocdby_walk, NULL, addr) == -1) { 20340Sstevel@tonic-gate mdb_warn("can't walk '%s' for %p", w, addr); 20350Sstevel@tonic-gate return (DCMD_ERR); 20360Sstevel@tonic-gate } 20370Sstevel@tonic-gate 20380Sstevel@tonic-gate return (DCMD_OK); 20390Sstevel@tonic-gate } 20400Sstevel@tonic-gate 20410Sstevel@tonic-gate /*ARGSUSED*/ 20420Sstevel@tonic-gate int 20430Sstevel@tonic-gate allocdby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 20440Sstevel@tonic-gate { 20450Sstevel@tonic-gate return (allocdby_common(addr, flags, "allocdby")); 20460Sstevel@tonic-gate } 20470Sstevel@tonic-gate 20480Sstevel@tonic-gate /*ARGSUSED*/ 20490Sstevel@tonic-gate int 20500Sstevel@tonic-gate freedby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 20510Sstevel@tonic-gate { 20520Sstevel@tonic-gate return (allocdby_common(addr, flags, "freedby")); 20530Sstevel@tonic-gate } 20540Sstevel@tonic-gate 20550Sstevel@tonic-gate /* 20560Sstevel@tonic-gate * Return a string describing the address in relation to the given thread's 20570Sstevel@tonic-gate * stack. 20580Sstevel@tonic-gate * 20590Sstevel@tonic-gate * - If the thread state is TS_FREE, return " (inactive interrupt thread)". 20600Sstevel@tonic-gate * 20610Sstevel@tonic-gate * - If the address is above the stack pointer, return an empty string 20620Sstevel@tonic-gate * signifying that the address is active. 20630Sstevel@tonic-gate * 20640Sstevel@tonic-gate * - If the address is below the stack pointer, and the thread is not on proc, 20650Sstevel@tonic-gate * return " (below sp)". 20660Sstevel@tonic-gate * 20670Sstevel@tonic-gate * - If the address is below the stack pointer, and the thread is on proc, 20680Sstevel@tonic-gate * return " (possibly below sp)". Depending on context, we may or may not 20690Sstevel@tonic-gate * have an accurate t_sp. 20700Sstevel@tonic-gate */ 20710Sstevel@tonic-gate static const char * 20720Sstevel@tonic-gate stack_active(const kthread_t *t, uintptr_t addr) 20730Sstevel@tonic-gate { 20740Sstevel@tonic-gate uintptr_t panicstk; 20750Sstevel@tonic-gate GElf_Sym sym; 20760Sstevel@tonic-gate 20770Sstevel@tonic-gate if (t->t_state == TS_FREE) 20780Sstevel@tonic-gate return (" (inactive interrupt thread)"); 20790Sstevel@tonic-gate 20800Sstevel@tonic-gate /* 20810Sstevel@tonic-gate * Check to see if we're on the panic stack. If so, ignore t_sp, as it 20820Sstevel@tonic-gate * no longer relates to the thread's real stack. 20830Sstevel@tonic-gate */ 20840Sstevel@tonic-gate if (mdb_lookup_by_name("panic_stack", &sym) == 0) { 20850Sstevel@tonic-gate panicstk = (uintptr_t)sym.st_value; 20860Sstevel@tonic-gate 20870Sstevel@tonic-gate if (t->t_sp >= panicstk && t->t_sp < panicstk + PANICSTKSIZE) 20880Sstevel@tonic-gate return (""); 20890Sstevel@tonic-gate } 20900Sstevel@tonic-gate 20910Sstevel@tonic-gate if (addr >= t->t_sp + STACK_BIAS) 20920Sstevel@tonic-gate return (""); 20930Sstevel@tonic-gate 20940Sstevel@tonic-gate if (t->t_state == TS_ONPROC) 20950Sstevel@tonic-gate return (" (possibly below sp)"); 20960Sstevel@tonic-gate 20970Sstevel@tonic-gate return (" (below sp)"); 20980Sstevel@tonic-gate } 20990Sstevel@tonic-gate 21000Sstevel@tonic-gate typedef struct whatis { 21010Sstevel@tonic-gate uintptr_t w_addr; 21020Sstevel@tonic-gate const kmem_cache_t *w_cache; 21030Sstevel@tonic-gate const vmem_t *w_vmem; 21040Sstevel@tonic-gate size_t w_slab_align; 21050Sstevel@tonic-gate int w_slab_found; 21060Sstevel@tonic-gate int w_found; 21070Sstevel@tonic-gate int w_kmem_lite_count; 21080Sstevel@tonic-gate uint_t w_verbose; 21090Sstevel@tonic-gate uint_t w_freemem; 21100Sstevel@tonic-gate uint_t w_all; 21110Sstevel@tonic-gate uint_t w_bufctl; 21120Sstevel@tonic-gate uint_t w_idspace; 21130Sstevel@tonic-gate } whatis_t; 21140Sstevel@tonic-gate 21150Sstevel@tonic-gate static void 21160Sstevel@tonic-gate whatis_print_kmem(uintptr_t addr, uintptr_t baddr, whatis_t *w) 21170Sstevel@tonic-gate { 21180Sstevel@tonic-gate /* LINTED pointer cast may result in improper alignment */ 21190Sstevel@tonic-gate uintptr_t btaddr = (uintptr_t)KMEM_BUFTAG(w->w_cache, addr); 21200Sstevel@tonic-gate intptr_t stat; 21210Sstevel@tonic-gate int count = 0; 21220Sstevel@tonic-gate int i; 21230Sstevel@tonic-gate pc_t callers[16]; 21240Sstevel@tonic-gate 21250Sstevel@tonic-gate if (w->w_cache->cache_flags & KMF_REDZONE) { 21260Sstevel@tonic-gate kmem_buftag_t bt; 21270Sstevel@tonic-gate 21280Sstevel@tonic-gate if (mdb_vread(&bt, sizeof (bt), btaddr) == -1) 21290Sstevel@tonic-gate goto done; 21300Sstevel@tonic-gate 21310Sstevel@tonic-gate stat = (intptr_t)bt.bt_bufctl ^ bt.bt_bxstat; 21320Sstevel@tonic-gate 21330Sstevel@tonic-gate if (stat != KMEM_BUFTAG_ALLOC && stat != KMEM_BUFTAG_FREE) 21340Sstevel@tonic-gate goto done; 21350Sstevel@tonic-gate 21360Sstevel@tonic-gate /* 21370Sstevel@tonic-gate * provide the bufctl ptr if it has useful information 21380Sstevel@tonic-gate */ 21390Sstevel@tonic-gate if (baddr == 0 && (w->w_cache->cache_flags & KMF_AUDIT)) 21400Sstevel@tonic-gate baddr = (uintptr_t)bt.bt_bufctl; 21410Sstevel@tonic-gate 21420Sstevel@tonic-gate if (w->w_cache->cache_flags & KMF_LITE) { 21430Sstevel@tonic-gate count = w->w_kmem_lite_count; 21440Sstevel@tonic-gate 21450Sstevel@tonic-gate if (count * sizeof (pc_t) > sizeof (callers)) 21460Sstevel@tonic-gate count = 0; 21470Sstevel@tonic-gate 21480Sstevel@tonic-gate if (count > 0 && 21490Sstevel@tonic-gate mdb_vread(callers, count * sizeof (pc_t), 21500Sstevel@tonic-gate btaddr + 21510Sstevel@tonic-gate offsetof(kmem_buftag_lite_t, bt_history)) == -1) 21520Sstevel@tonic-gate count = 0; 21530Sstevel@tonic-gate 21540Sstevel@tonic-gate /* 21550Sstevel@tonic-gate * skip unused callers 21560Sstevel@tonic-gate */ 21570Sstevel@tonic-gate while (count > 0 && callers[count - 1] == 21580Sstevel@tonic-gate (pc_t)KMEM_UNINITIALIZED_PATTERN) 21590Sstevel@tonic-gate count--; 21600Sstevel@tonic-gate } 21610Sstevel@tonic-gate } 21620Sstevel@tonic-gate 21630Sstevel@tonic-gate done: 21640Sstevel@tonic-gate if (baddr == 0) 21650Sstevel@tonic-gate mdb_printf("%p is %p+%p, %s from %s\n", 21660Sstevel@tonic-gate w->w_addr, addr, w->w_addr - addr, 21670Sstevel@tonic-gate w->w_freemem == FALSE ? "allocated" : "freed", 21680Sstevel@tonic-gate w->w_cache->cache_name); 21690Sstevel@tonic-gate else 21700Sstevel@tonic-gate mdb_printf("%p is %p+%p, bufctl %p %s from %s\n", 21710Sstevel@tonic-gate w->w_addr, addr, w->w_addr - addr, baddr, 21720Sstevel@tonic-gate w->w_freemem == FALSE ? "allocated" : "freed", 21730Sstevel@tonic-gate w->w_cache->cache_name); 21740Sstevel@tonic-gate 21750Sstevel@tonic-gate if (count > 0) { 21760Sstevel@tonic-gate mdb_inc_indent(8); 21770Sstevel@tonic-gate mdb_printf("recent caller%s: %a%s", (count != 1)? "s":"", 21780Sstevel@tonic-gate callers[0], (count != 1)? ", ":"\n"); 21790Sstevel@tonic-gate for (i = 1; i < count; i++) 21800Sstevel@tonic-gate mdb_printf("%a%s", callers[i], 21810Sstevel@tonic-gate (i + 1 < count)? ", ":"\n"); 21820Sstevel@tonic-gate mdb_dec_indent(8); 21830Sstevel@tonic-gate } 21840Sstevel@tonic-gate } 21850Sstevel@tonic-gate 21860Sstevel@tonic-gate /*ARGSUSED*/ 21870Sstevel@tonic-gate static int 21880Sstevel@tonic-gate whatis_walk_kmem(uintptr_t addr, void *ignored, whatis_t *w) 21890Sstevel@tonic-gate { 21900Sstevel@tonic-gate if (w->w_addr < addr || w->w_addr >= addr + w->w_cache->cache_bufsize) 21910Sstevel@tonic-gate return (WALK_NEXT); 21920Sstevel@tonic-gate 21930Sstevel@tonic-gate whatis_print_kmem(addr, 0, w); 21940Sstevel@tonic-gate w->w_found++; 21950Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 21960Sstevel@tonic-gate } 21970Sstevel@tonic-gate 21980Sstevel@tonic-gate static int 21990Sstevel@tonic-gate whatis_walk_seg(uintptr_t addr, const vmem_seg_t *vs, whatis_t *w) 22000Sstevel@tonic-gate { 22010Sstevel@tonic-gate if (w->w_addr < vs->vs_start || w->w_addr >= vs->vs_end) 22020Sstevel@tonic-gate return (WALK_NEXT); 22030Sstevel@tonic-gate 22040Sstevel@tonic-gate mdb_printf("%p is %p+%p ", w->w_addr, 22050Sstevel@tonic-gate vs->vs_start, w->w_addr - vs->vs_start); 22060Sstevel@tonic-gate 22070Sstevel@tonic-gate /* 22080Sstevel@tonic-gate * Always provide the vmem_seg pointer if it has a stack trace. 22090Sstevel@tonic-gate */ 22100Sstevel@tonic-gate if (w->w_bufctl == TRUE || 22110Sstevel@tonic-gate (vs->vs_type == VMEM_ALLOC && vs->vs_depth != 0)) { 22120Sstevel@tonic-gate mdb_printf("(vmem_seg %p) ", addr); 22130Sstevel@tonic-gate } 22140Sstevel@tonic-gate 22150Sstevel@tonic-gate mdb_printf("%sfrom %s vmem arena\n", w->w_freemem == TRUE ? 22160Sstevel@tonic-gate "freed " : "", w->w_vmem->vm_name); 22170Sstevel@tonic-gate 22180Sstevel@tonic-gate w->w_found++; 22190Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 22200Sstevel@tonic-gate } 22210Sstevel@tonic-gate 22220Sstevel@tonic-gate static int 22230Sstevel@tonic-gate whatis_walk_vmem(uintptr_t addr, const vmem_t *vmem, whatis_t *w) 22240Sstevel@tonic-gate { 22250Sstevel@tonic-gate const char *nm = vmem->vm_name; 22260Sstevel@tonic-gate w->w_vmem = vmem; 22270Sstevel@tonic-gate w->w_freemem = FALSE; 22280Sstevel@tonic-gate 22290Sstevel@tonic-gate if (((vmem->vm_cflags & VMC_IDENTIFIER) != 0) ^ w->w_idspace) 22300Sstevel@tonic-gate return (WALK_NEXT); 22310Sstevel@tonic-gate 22320Sstevel@tonic-gate if (w->w_verbose) 22330Sstevel@tonic-gate mdb_printf("Searching vmem arena %s...\n", nm); 22340Sstevel@tonic-gate 22350Sstevel@tonic-gate if (mdb_pwalk("vmem_alloc", 22360Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_seg, w, addr) == -1) { 22370Sstevel@tonic-gate mdb_warn("can't walk vmem seg for %p", addr); 22380Sstevel@tonic-gate return (WALK_NEXT); 22390Sstevel@tonic-gate } 22400Sstevel@tonic-gate 22410Sstevel@tonic-gate if (w->w_found && w->w_all == FALSE) 22420Sstevel@tonic-gate return (WALK_DONE); 22430Sstevel@tonic-gate 22440Sstevel@tonic-gate if (w->w_verbose) 22450Sstevel@tonic-gate mdb_printf("Searching vmem arena %s for free virtual...\n", nm); 22460Sstevel@tonic-gate 22470Sstevel@tonic-gate w->w_freemem = TRUE; 22480Sstevel@tonic-gate 22490Sstevel@tonic-gate if (mdb_pwalk("vmem_free", 22500Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_seg, w, addr) == -1) { 22510Sstevel@tonic-gate mdb_warn("can't walk vmem seg for %p", addr); 22520Sstevel@tonic-gate return (WALK_NEXT); 22530Sstevel@tonic-gate } 22540Sstevel@tonic-gate 22550Sstevel@tonic-gate return (w->w_found && w->w_all == FALSE ? WALK_DONE : WALK_NEXT); 22560Sstevel@tonic-gate } 22570Sstevel@tonic-gate 22580Sstevel@tonic-gate /*ARGSUSED*/ 22590Sstevel@tonic-gate static int 22600Sstevel@tonic-gate whatis_walk_bufctl(uintptr_t baddr, const kmem_bufctl_t *bcp, whatis_t *w) 22610Sstevel@tonic-gate { 22620Sstevel@tonic-gate uintptr_t addr; 22630Sstevel@tonic-gate 22640Sstevel@tonic-gate if (bcp == NULL) 22650Sstevel@tonic-gate return (WALK_NEXT); 22660Sstevel@tonic-gate 22670Sstevel@tonic-gate addr = (uintptr_t)bcp->bc_addr; 22680Sstevel@tonic-gate 22690Sstevel@tonic-gate if (w->w_addr < addr || w->w_addr >= addr + w->w_cache->cache_bufsize) 22700Sstevel@tonic-gate return (WALK_NEXT); 22710Sstevel@tonic-gate 22720Sstevel@tonic-gate whatis_print_kmem(addr, baddr, w); 22730Sstevel@tonic-gate w->w_found++; 22740Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 22750Sstevel@tonic-gate } 22760Sstevel@tonic-gate 22770Sstevel@tonic-gate /*ARGSUSED*/ 22780Sstevel@tonic-gate static int 22790Sstevel@tonic-gate whatis_walk_slab(uintptr_t saddr, const kmem_slab_t *sp, whatis_t *w) 22800Sstevel@tonic-gate { 22810Sstevel@tonic-gate uintptr_t base = P2ALIGN((uintptr_t)sp->slab_base, w->w_slab_align); 22820Sstevel@tonic-gate 22830Sstevel@tonic-gate if ((w->w_addr - base) >= w->w_cache->cache_slabsize) 22840Sstevel@tonic-gate return (WALK_NEXT); 22850Sstevel@tonic-gate 22860Sstevel@tonic-gate w->w_slab_found++; 22870Sstevel@tonic-gate return (WALK_DONE); 22880Sstevel@tonic-gate } 22890Sstevel@tonic-gate 22900Sstevel@tonic-gate static int 22910Sstevel@tonic-gate whatis_walk_cache(uintptr_t addr, const kmem_cache_t *c, whatis_t *w) 22920Sstevel@tonic-gate { 22930Sstevel@tonic-gate char *walk, *freewalk; 22940Sstevel@tonic-gate mdb_walk_cb_t func; 22950Sstevel@tonic-gate vmem_t *vmp = c->cache_arena; 22960Sstevel@tonic-gate 22970Sstevel@tonic-gate if (((c->cache_flags & VMC_IDENTIFIER) != 0) ^ w->w_idspace) 22980Sstevel@tonic-gate return (WALK_NEXT); 22990Sstevel@tonic-gate 23000Sstevel@tonic-gate if (w->w_bufctl == FALSE) { 23010Sstevel@tonic-gate walk = "kmem"; 23020Sstevel@tonic-gate freewalk = "freemem"; 23030Sstevel@tonic-gate func = (mdb_walk_cb_t)whatis_walk_kmem; 23040Sstevel@tonic-gate } else { 23050Sstevel@tonic-gate walk = "bufctl"; 23060Sstevel@tonic-gate freewalk = "freectl"; 23070Sstevel@tonic-gate func = (mdb_walk_cb_t)whatis_walk_bufctl; 23080Sstevel@tonic-gate } 23090Sstevel@tonic-gate 23100Sstevel@tonic-gate w->w_cache = c; 23110Sstevel@tonic-gate 23120Sstevel@tonic-gate if (w->w_verbose) 23130Sstevel@tonic-gate mdb_printf("Searching %s's slabs...\n", c->cache_name); 23140Sstevel@tonic-gate 23150Sstevel@tonic-gate /* 23160Sstevel@tonic-gate * Verify that the address is in one of the cache's slabs. If not, 23170Sstevel@tonic-gate * we can skip the more expensive walkers. (this is purely a 23180Sstevel@tonic-gate * heuristic -- as long as there are no false-negatives, we'll be fine) 23190Sstevel@tonic-gate * 23200Sstevel@tonic-gate * We try to get the cache's arena's quantum, since to accurately 23210Sstevel@tonic-gate * get the base of a slab, you have to align it to the quantum. If 23220Sstevel@tonic-gate * it doesn't look sensible, we fall back to not aligning. 23230Sstevel@tonic-gate */ 23240Sstevel@tonic-gate if (mdb_vread(&w->w_slab_align, sizeof (w->w_slab_align), 23250Sstevel@tonic-gate (uintptr_t)&vmp->vm_quantum) == -1) { 23260Sstevel@tonic-gate mdb_warn("unable to read %p->cache_arena->vm_quantum", c); 23270Sstevel@tonic-gate w->w_slab_align = 1; 23280Sstevel@tonic-gate } 23290Sstevel@tonic-gate 23300Sstevel@tonic-gate if ((c->cache_slabsize < w->w_slab_align) || w->w_slab_align == 0 || 23310Sstevel@tonic-gate (w->w_slab_align & (w->w_slab_align - 1))) { 23320Sstevel@tonic-gate mdb_warn("%p's arena has invalid quantum (0x%p)\n", c, 23330Sstevel@tonic-gate w->w_slab_align); 23340Sstevel@tonic-gate w->w_slab_align = 1; 23350Sstevel@tonic-gate } 23360Sstevel@tonic-gate 23370Sstevel@tonic-gate w->w_slab_found = 0; 23380Sstevel@tonic-gate if (mdb_pwalk("kmem_slab", (mdb_walk_cb_t)whatis_walk_slab, w, 23390Sstevel@tonic-gate addr) == -1) { 23400Sstevel@tonic-gate mdb_warn("can't find kmem_slab walker"); 23410Sstevel@tonic-gate return (WALK_DONE); 23420Sstevel@tonic-gate } 23430Sstevel@tonic-gate if (w->w_slab_found == 0) 23440Sstevel@tonic-gate return (WALK_NEXT); 23450Sstevel@tonic-gate 23460Sstevel@tonic-gate if (c->cache_flags & KMF_LITE) { 23470Sstevel@tonic-gate if (mdb_readvar(&w->w_kmem_lite_count, 23480Sstevel@tonic-gate "kmem_lite_count") == -1 || w->w_kmem_lite_count > 16) 23490Sstevel@tonic-gate w->w_kmem_lite_count = 0; 23500Sstevel@tonic-gate } 23510Sstevel@tonic-gate 23520Sstevel@tonic-gate if (w->w_verbose) 23530Sstevel@tonic-gate mdb_printf("Searching %s...\n", c->cache_name); 23540Sstevel@tonic-gate 23550Sstevel@tonic-gate w->w_freemem = FALSE; 23560Sstevel@tonic-gate 23570Sstevel@tonic-gate if (mdb_pwalk(walk, func, w, addr) == -1) { 23580Sstevel@tonic-gate mdb_warn("can't find %s walker", walk); 23590Sstevel@tonic-gate return (WALK_DONE); 23600Sstevel@tonic-gate } 23610Sstevel@tonic-gate 23620Sstevel@tonic-gate if (w->w_found && w->w_all == FALSE) 23630Sstevel@tonic-gate return (WALK_DONE); 23640Sstevel@tonic-gate 23650Sstevel@tonic-gate /* 23660Sstevel@tonic-gate * We have searched for allocated memory; now search for freed memory. 23670Sstevel@tonic-gate */ 23680Sstevel@tonic-gate if (w->w_verbose) 23690Sstevel@tonic-gate mdb_printf("Searching %s for free memory...\n", c->cache_name); 23700Sstevel@tonic-gate 23710Sstevel@tonic-gate w->w_freemem = TRUE; 23720Sstevel@tonic-gate 23730Sstevel@tonic-gate if (mdb_pwalk(freewalk, func, w, addr) == -1) { 23740Sstevel@tonic-gate mdb_warn("can't find %s walker", freewalk); 23750Sstevel@tonic-gate return (WALK_DONE); 23760Sstevel@tonic-gate } 23770Sstevel@tonic-gate 23780Sstevel@tonic-gate return (w->w_found && w->w_all == FALSE ? WALK_DONE : WALK_NEXT); 23790Sstevel@tonic-gate } 23800Sstevel@tonic-gate 23810Sstevel@tonic-gate static int 23820Sstevel@tonic-gate whatis_walk_touch(uintptr_t addr, const kmem_cache_t *c, whatis_t *w) 23830Sstevel@tonic-gate { 23840Sstevel@tonic-gate if (c->cache_cflags & KMC_NOTOUCH) 23850Sstevel@tonic-gate return (WALK_NEXT); 23860Sstevel@tonic-gate 23870Sstevel@tonic-gate return (whatis_walk_cache(addr, c, w)); 23880Sstevel@tonic-gate } 23890Sstevel@tonic-gate 23900Sstevel@tonic-gate static int 23910Sstevel@tonic-gate whatis_walk_notouch(uintptr_t addr, const kmem_cache_t *c, whatis_t *w) 23920Sstevel@tonic-gate { 23930Sstevel@tonic-gate if (!(c->cache_cflags & KMC_NOTOUCH)) 23940Sstevel@tonic-gate return (WALK_NEXT); 23950Sstevel@tonic-gate 23960Sstevel@tonic-gate return (whatis_walk_cache(addr, c, w)); 23970Sstevel@tonic-gate } 23980Sstevel@tonic-gate 23990Sstevel@tonic-gate static int 24000Sstevel@tonic-gate whatis_walk_thread(uintptr_t addr, const kthread_t *t, whatis_t *w) 24010Sstevel@tonic-gate { 24020Sstevel@tonic-gate /* 24030Sstevel@tonic-gate * Often, one calls ::whatis on an address from a thread structure. 24040Sstevel@tonic-gate * We use this opportunity to short circuit this case... 24050Sstevel@tonic-gate */ 24060Sstevel@tonic-gate if (w->w_addr >= addr && w->w_addr < addr + sizeof (kthread_t)) { 24070Sstevel@tonic-gate mdb_printf("%p is %p+%p, allocated as a thread structure\n", 24080Sstevel@tonic-gate w->w_addr, addr, w->w_addr - addr); 24090Sstevel@tonic-gate w->w_found++; 24100Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 24110Sstevel@tonic-gate } 24120Sstevel@tonic-gate 24130Sstevel@tonic-gate if (w->w_addr < (uintptr_t)t->t_stkbase || 24140Sstevel@tonic-gate w->w_addr > (uintptr_t)t->t_stk) 24150Sstevel@tonic-gate return (WALK_NEXT); 24160Sstevel@tonic-gate 24170Sstevel@tonic-gate if (t->t_stkbase == NULL) 24180Sstevel@tonic-gate return (WALK_NEXT); 24190Sstevel@tonic-gate 24200Sstevel@tonic-gate mdb_printf("%p is in thread %p's stack%s\n", w->w_addr, addr, 24210Sstevel@tonic-gate stack_active(t, w->w_addr)); 24220Sstevel@tonic-gate 24230Sstevel@tonic-gate w->w_found++; 24240Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 24250Sstevel@tonic-gate } 24260Sstevel@tonic-gate 24270Sstevel@tonic-gate static int 24280Sstevel@tonic-gate whatis_walk_modctl(uintptr_t addr, const struct modctl *m, whatis_t *w) 24290Sstevel@tonic-gate { 24300Sstevel@tonic-gate struct module mod; 24310Sstevel@tonic-gate char name[MODMAXNAMELEN], *where; 24320Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 24330Sstevel@tonic-gate Shdr shdr; 24340Sstevel@tonic-gate GElf_Sym sym; 24350Sstevel@tonic-gate 24360Sstevel@tonic-gate if (m->mod_mp == NULL) 24370Sstevel@tonic-gate return (WALK_NEXT); 24380Sstevel@tonic-gate 24390Sstevel@tonic-gate if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) { 24400Sstevel@tonic-gate mdb_warn("couldn't read modctl %p's module", addr); 24410Sstevel@tonic-gate return (WALK_NEXT); 24420Sstevel@tonic-gate } 24430Sstevel@tonic-gate 24440Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.text && 24450Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.text + mod.text_size) { 24460Sstevel@tonic-gate where = "text segment"; 24470Sstevel@tonic-gate goto found; 24480Sstevel@tonic-gate } 24490Sstevel@tonic-gate 24500Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.data && 24510Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.data + mod.data_size) { 24520Sstevel@tonic-gate where = "data segment"; 24530Sstevel@tonic-gate goto found; 24540Sstevel@tonic-gate } 24550Sstevel@tonic-gate 24560Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.bss && 24570Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.bss + mod.bss_size) { 24580Sstevel@tonic-gate where = "bss"; 24590Sstevel@tonic-gate goto found; 24600Sstevel@tonic-gate } 24610Sstevel@tonic-gate 24620Sstevel@tonic-gate if (mdb_vread(&shdr, sizeof (shdr), (uintptr_t)mod.symhdr) == -1) { 24630Sstevel@tonic-gate mdb_warn("couldn't read symbol header for %p's module", addr); 24640Sstevel@tonic-gate return (WALK_NEXT); 24650Sstevel@tonic-gate } 24660Sstevel@tonic-gate 24670Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.symtbl && w->w_addr < 24680Sstevel@tonic-gate (uintptr_t)mod.symtbl + (uintptr_t)mod.nsyms * shdr.sh_entsize) { 24690Sstevel@tonic-gate where = "symtab"; 24700Sstevel@tonic-gate goto found; 24710Sstevel@tonic-gate } 24720Sstevel@tonic-gate 24730Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.symspace && 24740Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.symspace + (uintptr_t)mod.symsize) { 24750Sstevel@tonic-gate where = "symspace"; 24760Sstevel@tonic-gate goto found; 24770Sstevel@tonic-gate } 24780Sstevel@tonic-gate 24790Sstevel@tonic-gate return (WALK_NEXT); 24800Sstevel@tonic-gate 24810Sstevel@tonic-gate found: 24820Sstevel@tonic-gate if (mdb_readstr(name, sizeof (name), (uintptr_t)m->mod_modname) == -1) 24830Sstevel@tonic-gate (void) mdb_snprintf(name, sizeof (name), "0x%p", addr); 24840Sstevel@tonic-gate 24850Sstevel@tonic-gate mdb_printf("%p is ", w->w_addr); 24860Sstevel@tonic-gate 24870Sstevel@tonic-gate /* 24880Sstevel@tonic-gate * If we found this address in a module, then there's a chance that 24890Sstevel@tonic-gate * it's actually a named symbol. Try the symbol lookup. 24900Sstevel@tonic-gate */ 24910Sstevel@tonic-gate if (mdb_lookup_by_addr(w->w_addr, MDB_SYM_FUZZY, c, sizeof (c), 24920Sstevel@tonic-gate &sym) != -1 && w->w_addr >= (uintptr_t)sym.st_value && 24930Sstevel@tonic-gate w->w_addr < (uintptr_t)sym.st_value + sym.st_size) { 24940Sstevel@tonic-gate mdb_printf("%s+%lx ", c, w->w_addr - (uintptr_t)sym.st_value); 24950Sstevel@tonic-gate } 24960Sstevel@tonic-gate 24970Sstevel@tonic-gate mdb_printf("in %s's %s\n", name, where); 24980Sstevel@tonic-gate 24990Sstevel@tonic-gate w->w_found++; 25000Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 25010Sstevel@tonic-gate } 25020Sstevel@tonic-gate 25030Sstevel@tonic-gate /*ARGSUSED*/ 25040Sstevel@tonic-gate static int 25050Sstevel@tonic-gate whatis_walk_page(uintptr_t addr, const void *ignored, whatis_t *w) 25060Sstevel@tonic-gate { 25070Sstevel@tonic-gate static int machsize = 0; 25080Sstevel@tonic-gate mdb_ctf_id_t id; 25090Sstevel@tonic-gate 25100Sstevel@tonic-gate if (machsize == 0) { 25110Sstevel@tonic-gate if (mdb_ctf_lookup_by_name("unix`page_t", &id) == 0) 25120Sstevel@tonic-gate machsize = mdb_ctf_type_size(id); 25130Sstevel@tonic-gate else { 25140Sstevel@tonic-gate mdb_warn("could not get size of page_t"); 25150Sstevel@tonic-gate machsize = sizeof (page_t); 25160Sstevel@tonic-gate } 25170Sstevel@tonic-gate } 25180Sstevel@tonic-gate 25190Sstevel@tonic-gate if (w->w_addr < addr || w->w_addr >= addr + machsize) 25200Sstevel@tonic-gate return (WALK_NEXT); 25210Sstevel@tonic-gate 25220Sstevel@tonic-gate mdb_printf("%p is %p+%p, allocated as a page structure\n", 25230Sstevel@tonic-gate w->w_addr, addr, w->w_addr - addr); 25240Sstevel@tonic-gate 25250Sstevel@tonic-gate w->w_found++; 25260Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 25270Sstevel@tonic-gate } 25280Sstevel@tonic-gate 25290Sstevel@tonic-gate int 25300Sstevel@tonic-gate whatis(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 25310Sstevel@tonic-gate { 25320Sstevel@tonic-gate whatis_t w; 25330Sstevel@tonic-gate 25340Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 25350Sstevel@tonic-gate return (DCMD_USAGE); 25360Sstevel@tonic-gate 25370Sstevel@tonic-gate w.w_verbose = FALSE; 25380Sstevel@tonic-gate w.w_bufctl = FALSE; 25390Sstevel@tonic-gate w.w_all = FALSE; 25400Sstevel@tonic-gate w.w_idspace = FALSE; 25410Sstevel@tonic-gate 25420Sstevel@tonic-gate if (mdb_getopts(argc, argv, 25430Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &w.w_verbose, 25440Sstevel@tonic-gate 'a', MDB_OPT_SETBITS, TRUE, &w.w_all, 25450Sstevel@tonic-gate 'i', MDB_OPT_SETBITS, TRUE, &w.w_idspace, 25460Sstevel@tonic-gate 'b', MDB_OPT_SETBITS, TRUE, &w.w_bufctl, NULL) != argc) 25470Sstevel@tonic-gate return (DCMD_USAGE); 25480Sstevel@tonic-gate 25490Sstevel@tonic-gate w.w_addr = addr; 25500Sstevel@tonic-gate w.w_found = 0; 25510Sstevel@tonic-gate 25520Sstevel@tonic-gate if (w.w_verbose) 25530Sstevel@tonic-gate mdb_printf("Searching modules...\n"); 25540Sstevel@tonic-gate 25550Sstevel@tonic-gate if (!w.w_idspace) { 25560Sstevel@tonic-gate if (mdb_walk("modctl", (mdb_walk_cb_t)whatis_walk_modctl, &w) 25570Sstevel@tonic-gate == -1) { 25580Sstevel@tonic-gate mdb_warn("couldn't find modctl walker"); 25590Sstevel@tonic-gate return (DCMD_ERR); 25600Sstevel@tonic-gate } 25610Sstevel@tonic-gate 25620Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 25630Sstevel@tonic-gate return (DCMD_OK); 25640Sstevel@tonic-gate 25650Sstevel@tonic-gate /* 25660Sstevel@tonic-gate * Now search all thread stacks. Yes, this is a little weak; we 25670Sstevel@tonic-gate * can save a lot of work by first checking to see if the 25680Sstevel@tonic-gate * address is in segkp vs. segkmem. But hey, computers are 25690Sstevel@tonic-gate * fast. 25700Sstevel@tonic-gate */ 25710Sstevel@tonic-gate if (w.w_verbose) 25720Sstevel@tonic-gate mdb_printf("Searching threads...\n"); 25730Sstevel@tonic-gate 25740Sstevel@tonic-gate if (mdb_walk("thread", (mdb_walk_cb_t)whatis_walk_thread, &w) 25750Sstevel@tonic-gate == -1) { 25760Sstevel@tonic-gate mdb_warn("couldn't find thread walker"); 25770Sstevel@tonic-gate return (DCMD_ERR); 25780Sstevel@tonic-gate } 25790Sstevel@tonic-gate 25800Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 25810Sstevel@tonic-gate return (DCMD_OK); 25820Sstevel@tonic-gate 25830Sstevel@tonic-gate if (w.w_verbose) 25840Sstevel@tonic-gate mdb_printf("Searching page structures...\n"); 25850Sstevel@tonic-gate 25860Sstevel@tonic-gate if (mdb_walk("page", (mdb_walk_cb_t)whatis_walk_page, &w) 25870Sstevel@tonic-gate == -1) { 25880Sstevel@tonic-gate mdb_warn("couldn't find page walker"); 25890Sstevel@tonic-gate return (DCMD_ERR); 25900Sstevel@tonic-gate } 25910Sstevel@tonic-gate 25920Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 25930Sstevel@tonic-gate return (DCMD_OK); 25940Sstevel@tonic-gate } 25950Sstevel@tonic-gate 25960Sstevel@tonic-gate if (mdb_walk("kmem_cache", 25970Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_touch, &w) == -1) { 25980Sstevel@tonic-gate mdb_warn("couldn't find kmem_cache walker"); 25990Sstevel@tonic-gate return (DCMD_ERR); 26000Sstevel@tonic-gate } 26010Sstevel@tonic-gate 26020Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 26030Sstevel@tonic-gate return (DCMD_OK); 26040Sstevel@tonic-gate 26050Sstevel@tonic-gate if (mdb_walk("kmem_cache", 26060Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_notouch, &w) == -1) { 26070Sstevel@tonic-gate mdb_warn("couldn't find kmem_cache walker"); 26080Sstevel@tonic-gate return (DCMD_ERR); 26090Sstevel@tonic-gate } 26100Sstevel@tonic-gate 26110Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 26120Sstevel@tonic-gate return (DCMD_OK); 26130Sstevel@tonic-gate 26140Sstevel@tonic-gate if (mdb_walk("vmem_postfix", 26150Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_vmem, &w) == -1) { 26160Sstevel@tonic-gate mdb_warn("couldn't find vmem_postfix walker"); 26170Sstevel@tonic-gate return (DCMD_ERR); 26180Sstevel@tonic-gate } 26190Sstevel@tonic-gate 26200Sstevel@tonic-gate if (w.w_found == 0) 26210Sstevel@tonic-gate mdb_printf("%p is unknown\n", addr); 26220Sstevel@tonic-gate 26230Sstevel@tonic-gate return (DCMD_OK); 26240Sstevel@tonic-gate } 26250Sstevel@tonic-gate 26260Sstevel@tonic-gate void 26270Sstevel@tonic-gate whatis_help(void) 26280Sstevel@tonic-gate { 26290Sstevel@tonic-gate mdb_printf( 26300Sstevel@tonic-gate "Given a virtual address, attempt to determine where it came\n" 26310Sstevel@tonic-gate "from.\n" 26320Sstevel@tonic-gate "\n" 26330Sstevel@tonic-gate "\t-v\tVerbose output; display caches/arenas/etc as they are\n" 26340Sstevel@tonic-gate "\t\tsearched\n" 26350Sstevel@tonic-gate "\t-a\tFind all possible sources. Default behavior is to stop at\n" 26360Sstevel@tonic-gate "\t\tthe first (most specific) source.\n" 26370Sstevel@tonic-gate "\t-i\tSearch only identifier arenas and caches. By default\n" 26380Sstevel@tonic-gate "\t\tthese are ignored.\n" 26390Sstevel@tonic-gate "\t-b\tReport bufctls and vmem_segs for matches in kmem and vmem,\n" 26400Sstevel@tonic-gate "\t\trespectively. Warning: if the buffer exists, but does not\n" 26410Sstevel@tonic-gate "\t\thave a bufctl, it will not be reported.\n"); 26420Sstevel@tonic-gate } 26430Sstevel@tonic-gate 26440Sstevel@tonic-gate typedef struct kmem_log_cpu { 26450Sstevel@tonic-gate uintptr_t kmc_low; 26460Sstevel@tonic-gate uintptr_t kmc_high; 26470Sstevel@tonic-gate } kmem_log_cpu_t; 26480Sstevel@tonic-gate 26490Sstevel@tonic-gate typedef struct kmem_log_data { 26500Sstevel@tonic-gate uintptr_t kmd_addr; 26510Sstevel@tonic-gate kmem_log_cpu_t *kmd_cpu; 26520Sstevel@tonic-gate } kmem_log_data_t; 26530Sstevel@tonic-gate 26540Sstevel@tonic-gate int 26550Sstevel@tonic-gate kmem_log_walk(uintptr_t addr, const kmem_bufctl_audit_t *b, 26560Sstevel@tonic-gate kmem_log_data_t *kmd) 26570Sstevel@tonic-gate { 26580Sstevel@tonic-gate int i; 26590Sstevel@tonic-gate kmem_log_cpu_t *kmc = kmd->kmd_cpu; 26600Sstevel@tonic-gate size_t bufsize; 26610Sstevel@tonic-gate 26620Sstevel@tonic-gate for (i = 0; i < NCPU; i++) { 26630Sstevel@tonic-gate if (addr >= kmc[i].kmc_low && addr < kmc[i].kmc_high) 26640Sstevel@tonic-gate break; 26650Sstevel@tonic-gate } 26660Sstevel@tonic-gate 26670Sstevel@tonic-gate if (kmd->kmd_addr) { 26680Sstevel@tonic-gate if (b->bc_cache == NULL) 26690Sstevel@tonic-gate return (WALK_NEXT); 26700Sstevel@tonic-gate 26710Sstevel@tonic-gate if (mdb_vread(&bufsize, sizeof (bufsize), 26720Sstevel@tonic-gate (uintptr_t)&b->bc_cache->cache_bufsize) == -1) { 26730Sstevel@tonic-gate mdb_warn( 26740Sstevel@tonic-gate "failed to read cache_bufsize for cache at %p", 26750Sstevel@tonic-gate b->bc_cache); 26760Sstevel@tonic-gate return (WALK_ERR); 26770Sstevel@tonic-gate } 26780Sstevel@tonic-gate 26790Sstevel@tonic-gate if (kmd->kmd_addr < (uintptr_t)b->bc_addr || 26800Sstevel@tonic-gate kmd->kmd_addr >= (uintptr_t)b->bc_addr + bufsize) 26810Sstevel@tonic-gate return (WALK_NEXT); 26820Sstevel@tonic-gate } 26830Sstevel@tonic-gate 26840Sstevel@tonic-gate if (i == NCPU) 26850Sstevel@tonic-gate mdb_printf(" "); 26860Sstevel@tonic-gate else 26870Sstevel@tonic-gate mdb_printf("%3d", i); 26880Sstevel@tonic-gate 26890Sstevel@tonic-gate mdb_printf(" %0?p %0?p %16llx %0?p\n", addr, b->bc_addr, 26900Sstevel@tonic-gate b->bc_timestamp, b->bc_thread); 26910Sstevel@tonic-gate 26920Sstevel@tonic-gate return (WALK_NEXT); 26930Sstevel@tonic-gate } 26940Sstevel@tonic-gate 26950Sstevel@tonic-gate /*ARGSUSED*/ 26960Sstevel@tonic-gate int 26970Sstevel@tonic-gate kmem_log(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 26980Sstevel@tonic-gate { 26990Sstevel@tonic-gate kmem_log_header_t lh; 27000Sstevel@tonic-gate kmem_cpu_log_header_t clh; 27010Sstevel@tonic-gate uintptr_t lhp, clhp; 27020Sstevel@tonic-gate int ncpus; 27030Sstevel@tonic-gate uintptr_t *cpu; 27040Sstevel@tonic-gate GElf_Sym sym; 27050Sstevel@tonic-gate kmem_log_cpu_t *kmc; 27060Sstevel@tonic-gate int i; 27070Sstevel@tonic-gate kmem_log_data_t kmd; 27080Sstevel@tonic-gate uint_t opt_b = FALSE; 27090Sstevel@tonic-gate 27100Sstevel@tonic-gate if (mdb_getopts(argc, argv, 27110Sstevel@tonic-gate 'b', MDB_OPT_SETBITS, TRUE, &opt_b, NULL) != argc) 27120Sstevel@tonic-gate return (DCMD_USAGE); 27130Sstevel@tonic-gate 27140Sstevel@tonic-gate if (mdb_readvar(&lhp, "kmem_transaction_log") == -1) { 27150Sstevel@tonic-gate mdb_warn("failed to read 'kmem_transaction_log'"); 27160Sstevel@tonic-gate return (DCMD_ERR); 27170Sstevel@tonic-gate } 27180Sstevel@tonic-gate 27190Sstevel@tonic-gate if (lhp == NULL) { 27200Sstevel@tonic-gate mdb_warn("no kmem transaction log\n"); 27210Sstevel@tonic-gate return (DCMD_ERR); 27220Sstevel@tonic-gate } 27230Sstevel@tonic-gate 27240Sstevel@tonic-gate mdb_readvar(&ncpus, "ncpus"); 27250Sstevel@tonic-gate 27260Sstevel@tonic-gate if (mdb_vread(&lh, sizeof (kmem_log_header_t), lhp) == -1) { 27270Sstevel@tonic-gate mdb_warn("failed to read log header at %p", lhp); 27280Sstevel@tonic-gate return (DCMD_ERR); 27290Sstevel@tonic-gate } 27300Sstevel@tonic-gate 27310Sstevel@tonic-gate clhp = lhp + ((uintptr_t)&lh.lh_cpu[0] - (uintptr_t)&lh); 27320Sstevel@tonic-gate 27330Sstevel@tonic-gate cpu = mdb_alloc(sizeof (uintptr_t) * NCPU, UM_SLEEP | UM_GC); 27340Sstevel@tonic-gate 27350Sstevel@tonic-gate if (mdb_lookup_by_name("cpu", &sym) == -1) { 27360Sstevel@tonic-gate mdb_warn("couldn't find 'cpu' array"); 27370Sstevel@tonic-gate return (DCMD_ERR); 27380Sstevel@tonic-gate } 27390Sstevel@tonic-gate 27400Sstevel@tonic-gate if (sym.st_size != NCPU * sizeof (uintptr_t)) { 27410Sstevel@tonic-gate mdb_warn("expected 'cpu' to be of size %d; found %d\n", 27420Sstevel@tonic-gate NCPU * sizeof (uintptr_t), sym.st_size); 27430Sstevel@tonic-gate return (DCMD_ERR); 27440Sstevel@tonic-gate } 27450Sstevel@tonic-gate 27460Sstevel@tonic-gate if (mdb_vread(cpu, sym.st_size, (uintptr_t)sym.st_value) == -1) { 27470Sstevel@tonic-gate mdb_warn("failed to read cpu array at %p", sym.st_value); 27480Sstevel@tonic-gate return (DCMD_ERR); 27490Sstevel@tonic-gate } 27500Sstevel@tonic-gate 27510Sstevel@tonic-gate kmc = mdb_zalloc(sizeof (kmem_log_cpu_t) * NCPU, UM_SLEEP | UM_GC); 27520Sstevel@tonic-gate kmd.kmd_addr = NULL; 27530Sstevel@tonic-gate kmd.kmd_cpu = kmc; 27540Sstevel@tonic-gate 27550Sstevel@tonic-gate for (i = 0; i < NCPU; i++) { 27560Sstevel@tonic-gate 27570Sstevel@tonic-gate if (cpu[i] == NULL) 27580Sstevel@tonic-gate continue; 27590Sstevel@tonic-gate 27600Sstevel@tonic-gate if (mdb_vread(&clh, sizeof (clh), clhp) == -1) { 27610Sstevel@tonic-gate mdb_warn("cannot read cpu %d's log header at %p", 27620Sstevel@tonic-gate i, clhp); 27630Sstevel@tonic-gate return (DCMD_ERR); 27640Sstevel@tonic-gate } 27650Sstevel@tonic-gate 27660Sstevel@tonic-gate kmc[i].kmc_low = clh.clh_chunk * lh.lh_chunksize + 27670Sstevel@tonic-gate (uintptr_t)lh.lh_base; 27680Sstevel@tonic-gate kmc[i].kmc_high = (uintptr_t)clh.clh_current; 27690Sstevel@tonic-gate 27700Sstevel@tonic-gate clhp += sizeof (kmem_cpu_log_header_t); 27710Sstevel@tonic-gate } 27720Sstevel@tonic-gate 27730Sstevel@tonic-gate mdb_printf("%3s %-?s %-?s %16s %-?s\n", "CPU", "ADDR", "BUFADDR", 27740Sstevel@tonic-gate "TIMESTAMP", "THREAD"); 27750Sstevel@tonic-gate 27760Sstevel@tonic-gate /* 27770Sstevel@tonic-gate * If we have been passed an address, print out only log entries 27780Sstevel@tonic-gate * corresponding to that address. If opt_b is specified, then interpret 27790Sstevel@tonic-gate * the address as a bufctl. 27800Sstevel@tonic-gate */ 27810Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) { 27820Sstevel@tonic-gate kmem_bufctl_audit_t b; 27830Sstevel@tonic-gate 27840Sstevel@tonic-gate if (opt_b) { 27850Sstevel@tonic-gate kmd.kmd_addr = addr; 27860Sstevel@tonic-gate } else { 27870Sstevel@tonic-gate if (mdb_vread(&b, 27880Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t), addr) == -1) { 27890Sstevel@tonic-gate mdb_warn("failed to read bufctl at %p", addr); 27900Sstevel@tonic-gate return (DCMD_ERR); 27910Sstevel@tonic-gate } 27920Sstevel@tonic-gate 27930Sstevel@tonic-gate (void) kmem_log_walk(addr, &b, &kmd); 27940Sstevel@tonic-gate 27950Sstevel@tonic-gate return (DCMD_OK); 27960Sstevel@tonic-gate } 27970Sstevel@tonic-gate } 27980Sstevel@tonic-gate 27990Sstevel@tonic-gate if (mdb_walk("kmem_log", (mdb_walk_cb_t)kmem_log_walk, &kmd) == -1) { 28000Sstevel@tonic-gate mdb_warn("can't find kmem log walker"); 28010Sstevel@tonic-gate return (DCMD_ERR); 28020Sstevel@tonic-gate } 28030Sstevel@tonic-gate 28040Sstevel@tonic-gate return (DCMD_OK); 28050Sstevel@tonic-gate } 28060Sstevel@tonic-gate 28070Sstevel@tonic-gate typedef struct bufctl_history_cb { 28080Sstevel@tonic-gate int bhc_flags; 28090Sstevel@tonic-gate int bhc_argc; 28100Sstevel@tonic-gate const mdb_arg_t *bhc_argv; 28110Sstevel@tonic-gate int bhc_ret; 28120Sstevel@tonic-gate } bufctl_history_cb_t; 28130Sstevel@tonic-gate 28140Sstevel@tonic-gate /*ARGSUSED*/ 28150Sstevel@tonic-gate static int 28160Sstevel@tonic-gate bufctl_history_callback(uintptr_t addr, const void *ign, void *arg) 28170Sstevel@tonic-gate { 28180Sstevel@tonic-gate bufctl_history_cb_t *bhc = arg; 28190Sstevel@tonic-gate 28200Sstevel@tonic-gate bhc->bhc_ret = 28210Sstevel@tonic-gate bufctl(addr, bhc->bhc_flags, bhc->bhc_argc, bhc->bhc_argv); 28220Sstevel@tonic-gate 28230Sstevel@tonic-gate bhc->bhc_flags &= ~DCMD_LOOPFIRST; 28240Sstevel@tonic-gate 28250Sstevel@tonic-gate return ((bhc->bhc_ret == DCMD_OK)? WALK_NEXT : WALK_DONE); 28260Sstevel@tonic-gate } 28270Sstevel@tonic-gate 28280Sstevel@tonic-gate void 28290Sstevel@tonic-gate bufctl_help(void) 28300Sstevel@tonic-gate { 2831*6712Stomee mdb_printf("%s", 2832*6712Stomee "Display the contents of kmem_bufctl_audit_ts, with optional filtering.\n\n"); 28330Sstevel@tonic-gate mdb_dec_indent(2); 28340Sstevel@tonic-gate mdb_printf("%<b>OPTIONS%</b>\n"); 28350Sstevel@tonic-gate mdb_inc_indent(2); 28360Sstevel@tonic-gate mdb_printf("%s", 28370Sstevel@tonic-gate " -v Display the full content of the bufctl, including its stack trace\n" 28380Sstevel@tonic-gate " -h retrieve the bufctl's transaction history, if available\n" 28390Sstevel@tonic-gate " -a addr\n" 28400Sstevel@tonic-gate " filter out bufctls not involving the buffer at addr\n" 28410Sstevel@tonic-gate " -c caller\n" 28420Sstevel@tonic-gate " filter out bufctls without the function/PC in their stack trace\n" 28430Sstevel@tonic-gate " -e earliest\n" 28440Sstevel@tonic-gate " filter out bufctls timestamped before earliest\n" 28450Sstevel@tonic-gate " -l latest\n" 28460Sstevel@tonic-gate " filter out bufctls timestamped after latest\n" 28470Sstevel@tonic-gate " -t thread\n" 28480Sstevel@tonic-gate " filter out bufctls not involving thread\n"); 28490Sstevel@tonic-gate } 28500Sstevel@tonic-gate 28510Sstevel@tonic-gate int 28520Sstevel@tonic-gate bufctl(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 28530Sstevel@tonic-gate { 28540Sstevel@tonic-gate kmem_bufctl_audit_t bc; 28550Sstevel@tonic-gate uint_t verbose = FALSE; 28560Sstevel@tonic-gate uint_t history = FALSE; 28570Sstevel@tonic-gate uint_t in_history = FALSE; 28580Sstevel@tonic-gate uintptr_t caller = NULL, thread = NULL; 28590Sstevel@tonic-gate uintptr_t laddr, haddr, baddr = NULL; 28600Sstevel@tonic-gate hrtime_t earliest = 0, latest = 0; 28610Sstevel@tonic-gate int i, depth; 28620Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 28630Sstevel@tonic-gate GElf_Sym sym; 28640Sstevel@tonic-gate 28650Sstevel@tonic-gate if (mdb_getopts(argc, argv, 28660Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &verbose, 28670Sstevel@tonic-gate 'h', MDB_OPT_SETBITS, TRUE, &history, 28680Sstevel@tonic-gate 'H', MDB_OPT_SETBITS, TRUE, &in_history, /* internal */ 28690Sstevel@tonic-gate 'c', MDB_OPT_UINTPTR, &caller, 28700Sstevel@tonic-gate 't', MDB_OPT_UINTPTR, &thread, 28710Sstevel@tonic-gate 'e', MDB_OPT_UINT64, &earliest, 28720Sstevel@tonic-gate 'l', MDB_OPT_UINT64, &latest, 28730Sstevel@tonic-gate 'a', MDB_OPT_UINTPTR, &baddr, NULL) != argc) 28740Sstevel@tonic-gate return (DCMD_USAGE); 28750Sstevel@tonic-gate 28760Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 28770Sstevel@tonic-gate return (DCMD_USAGE); 28780Sstevel@tonic-gate 28790Sstevel@tonic-gate if (in_history && !history) 28800Sstevel@tonic-gate return (DCMD_USAGE); 28810Sstevel@tonic-gate 28820Sstevel@tonic-gate if (history && !in_history) { 28830Sstevel@tonic-gate mdb_arg_t *nargv = mdb_zalloc(sizeof (*nargv) * (argc + 1), 28840Sstevel@tonic-gate UM_SLEEP | UM_GC); 28850Sstevel@tonic-gate bufctl_history_cb_t bhc; 28860Sstevel@tonic-gate 28870Sstevel@tonic-gate nargv[0].a_type = MDB_TYPE_STRING; 28880Sstevel@tonic-gate nargv[0].a_un.a_str = "-H"; /* prevent recursion */ 28890Sstevel@tonic-gate 28900Sstevel@tonic-gate for (i = 0; i < argc; i++) 28910Sstevel@tonic-gate nargv[i + 1] = argv[i]; 28920Sstevel@tonic-gate 28930Sstevel@tonic-gate /* 28940Sstevel@tonic-gate * When in history mode, we treat each element as if it 28950Sstevel@tonic-gate * were in a seperate loop, so that the headers group 28960Sstevel@tonic-gate * bufctls with similar histories. 28970Sstevel@tonic-gate */ 28980Sstevel@tonic-gate bhc.bhc_flags = flags | DCMD_LOOP | DCMD_LOOPFIRST; 28990Sstevel@tonic-gate bhc.bhc_argc = argc + 1; 29000Sstevel@tonic-gate bhc.bhc_argv = nargv; 29010Sstevel@tonic-gate bhc.bhc_ret = DCMD_OK; 29020Sstevel@tonic-gate 29030Sstevel@tonic-gate if (mdb_pwalk("bufctl_history", bufctl_history_callback, &bhc, 29040Sstevel@tonic-gate addr) == -1) { 29050Sstevel@tonic-gate mdb_warn("unable to walk bufctl_history"); 29060Sstevel@tonic-gate return (DCMD_ERR); 29070Sstevel@tonic-gate } 29080Sstevel@tonic-gate 29090Sstevel@tonic-gate if (bhc.bhc_ret == DCMD_OK && !(flags & DCMD_PIPE_OUT)) 29100Sstevel@tonic-gate mdb_printf("\n"); 29110Sstevel@tonic-gate 29120Sstevel@tonic-gate return (bhc.bhc_ret); 29130Sstevel@tonic-gate } 29140Sstevel@tonic-gate 29150Sstevel@tonic-gate if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) { 29160Sstevel@tonic-gate if (verbose) { 29170Sstevel@tonic-gate mdb_printf("%16s %16s %16s %16s\n" 29180Sstevel@tonic-gate "%<u>%16s %16s %16s %16s%</u>\n", 29190Sstevel@tonic-gate "ADDR", "BUFADDR", "TIMESTAMP", "THREAD", 29200Sstevel@tonic-gate "", "CACHE", "LASTLOG", "CONTENTS"); 29210Sstevel@tonic-gate } else { 29220Sstevel@tonic-gate mdb_printf("%<u>%-?s %-?s %-12s %-?s %s%</u>\n", 29230Sstevel@tonic-gate "ADDR", "BUFADDR", "TIMESTAMP", "THREAD", "CALLER"); 29240Sstevel@tonic-gate } 29250Sstevel@tonic-gate } 29260Sstevel@tonic-gate 29270Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) { 29280Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr); 29290Sstevel@tonic-gate return (DCMD_ERR); 29300Sstevel@tonic-gate } 29310Sstevel@tonic-gate 29320Sstevel@tonic-gate /* 29330Sstevel@tonic-gate * Guard against bogus bc_depth in case the bufctl is corrupt or 29340Sstevel@tonic-gate * the address does not really refer to a bufctl. 29350Sstevel@tonic-gate */ 29360Sstevel@tonic-gate depth = MIN(bc.bc_depth, KMEM_STACK_DEPTH); 29370Sstevel@tonic-gate 29380Sstevel@tonic-gate if (caller != NULL) { 29390Sstevel@tonic-gate laddr = caller; 29400Sstevel@tonic-gate haddr = caller + sizeof (caller); 29410Sstevel@tonic-gate 29420Sstevel@tonic-gate if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, sizeof (c), 29430Sstevel@tonic-gate &sym) != -1 && caller == (uintptr_t)sym.st_value) { 29440Sstevel@tonic-gate /* 29450Sstevel@tonic-gate * We were provided an exact symbol value; any 29460Sstevel@tonic-gate * address in the function is valid. 29470Sstevel@tonic-gate */ 29480Sstevel@tonic-gate laddr = (uintptr_t)sym.st_value; 29490Sstevel@tonic-gate haddr = (uintptr_t)sym.st_value + sym.st_size; 29500Sstevel@tonic-gate } 29510Sstevel@tonic-gate 29520Sstevel@tonic-gate for (i = 0; i < depth; i++) 29530Sstevel@tonic-gate if (bc.bc_stack[i] >= laddr && bc.bc_stack[i] < haddr) 29540Sstevel@tonic-gate break; 29550Sstevel@tonic-gate 29560Sstevel@tonic-gate if (i == depth) 29570Sstevel@tonic-gate return (DCMD_OK); 29580Sstevel@tonic-gate } 29590Sstevel@tonic-gate 29600Sstevel@tonic-gate if (thread != NULL && (uintptr_t)bc.bc_thread != thread) 29610Sstevel@tonic-gate return (DCMD_OK); 29620Sstevel@tonic-gate 29630Sstevel@tonic-gate if (earliest != 0 && bc.bc_timestamp < earliest) 29640Sstevel@tonic-gate return (DCMD_OK); 29650Sstevel@tonic-gate 29660Sstevel@tonic-gate if (latest != 0 && bc.bc_timestamp > latest) 29670Sstevel@tonic-gate return (DCMD_OK); 29680Sstevel@tonic-gate 29690Sstevel@tonic-gate if (baddr != 0 && (uintptr_t)bc.bc_addr != baddr) 29700Sstevel@tonic-gate return (DCMD_OK); 29710Sstevel@tonic-gate 29720Sstevel@tonic-gate if (flags & DCMD_PIPE_OUT) { 29730Sstevel@tonic-gate mdb_printf("%#lr\n", addr); 29740Sstevel@tonic-gate return (DCMD_OK); 29750Sstevel@tonic-gate } 29760Sstevel@tonic-gate 29770Sstevel@tonic-gate if (verbose) { 29780Sstevel@tonic-gate mdb_printf( 29790Sstevel@tonic-gate "%<b>%16p%</b> %16p %16llx %16p\n" 29800Sstevel@tonic-gate "%16s %16p %16p %16p\n", 29810Sstevel@tonic-gate addr, bc.bc_addr, bc.bc_timestamp, bc.bc_thread, 29820Sstevel@tonic-gate "", bc.bc_cache, bc.bc_lastlog, bc.bc_contents); 29830Sstevel@tonic-gate 29840Sstevel@tonic-gate mdb_inc_indent(17); 29850Sstevel@tonic-gate for (i = 0; i < depth; i++) 29860Sstevel@tonic-gate mdb_printf("%a\n", bc.bc_stack[i]); 29870Sstevel@tonic-gate mdb_dec_indent(17); 29880Sstevel@tonic-gate mdb_printf("\n"); 29890Sstevel@tonic-gate } else { 29900Sstevel@tonic-gate mdb_printf("%0?p %0?p %12llx %0?p", addr, bc.bc_addr, 29910Sstevel@tonic-gate bc.bc_timestamp, bc.bc_thread); 29920Sstevel@tonic-gate 29930Sstevel@tonic-gate for (i = 0; i < depth; i++) { 29940Sstevel@tonic-gate if (mdb_lookup_by_addr(bc.bc_stack[i], 29950Sstevel@tonic-gate MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1) 29960Sstevel@tonic-gate continue; 29970Sstevel@tonic-gate if (strncmp(c, "kmem_", 5) == 0) 29980Sstevel@tonic-gate continue; 29990Sstevel@tonic-gate mdb_printf(" %a\n", bc.bc_stack[i]); 30000Sstevel@tonic-gate break; 30010Sstevel@tonic-gate } 30020Sstevel@tonic-gate 30030Sstevel@tonic-gate if (i >= depth) 30040Sstevel@tonic-gate mdb_printf("\n"); 30050Sstevel@tonic-gate } 30060Sstevel@tonic-gate 30070Sstevel@tonic-gate return (DCMD_OK); 30080Sstevel@tonic-gate } 30090Sstevel@tonic-gate 30100Sstevel@tonic-gate typedef struct kmem_verify { 30110Sstevel@tonic-gate uint64_t *kmv_buf; /* buffer to read cache contents into */ 30120Sstevel@tonic-gate size_t kmv_size; /* number of bytes in kmv_buf */ 30130Sstevel@tonic-gate int kmv_corruption; /* > 0 if corruption found. */ 30140Sstevel@tonic-gate int kmv_besilent; /* report actual corruption sites */ 30150Sstevel@tonic-gate struct kmem_cache kmv_cache; /* the cache we're operating on */ 30160Sstevel@tonic-gate } kmem_verify_t; 30170Sstevel@tonic-gate 30180Sstevel@tonic-gate /* 30190Sstevel@tonic-gate * verify_pattern() 30200Sstevel@tonic-gate * verify that buf is filled with the pattern pat. 30210Sstevel@tonic-gate */ 30220Sstevel@tonic-gate static int64_t 30230Sstevel@tonic-gate verify_pattern(uint64_t *buf_arg, size_t size, uint64_t pat) 30240Sstevel@tonic-gate { 30250Sstevel@tonic-gate /*LINTED*/ 30260Sstevel@tonic-gate uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 30270Sstevel@tonic-gate uint64_t *buf; 30280Sstevel@tonic-gate 30290Sstevel@tonic-gate for (buf = buf_arg; buf < bufend; buf++) 30300Sstevel@tonic-gate if (*buf != pat) 30310Sstevel@tonic-gate return ((uintptr_t)buf - (uintptr_t)buf_arg); 30320Sstevel@tonic-gate return (-1); 30330Sstevel@tonic-gate } 30340Sstevel@tonic-gate 30350Sstevel@tonic-gate /* 30360Sstevel@tonic-gate * verify_buftag() 30370Sstevel@tonic-gate * verify that btp->bt_bxstat == (bcp ^ pat) 30380Sstevel@tonic-gate */ 30390Sstevel@tonic-gate static int 30400Sstevel@tonic-gate verify_buftag(kmem_buftag_t *btp, uintptr_t pat) 30410Sstevel@tonic-gate { 30420Sstevel@tonic-gate return (btp->bt_bxstat == ((intptr_t)btp->bt_bufctl ^ pat) ? 0 : -1); 30430Sstevel@tonic-gate } 30440Sstevel@tonic-gate 30450Sstevel@tonic-gate /* 30460Sstevel@tonic-gate * verify_free() 30470Sstevel@tonic-gate * verify the integrity of a free block of memory by checking 30480Sstevel@tonic-gate * that it is filled with 0xdeadbeef and that its buftag is sane. 30490Sstevel@tonic-gate */ 30500Sstevel@tonic-gate /*ARGSUSED1*/ 30510Sstevel@tonic-gate static int 30520Sstevel@tonic-gate verify_free(uintptr_t addr, const void *data, void *private) 30530Sstevel@tonic-gate { 30540Sstevel@tonic-gate kmem_verify_t *kmv = (kmem_verify_t *)private; 30550Sstevel@tonic-gate uint64_t *buf = kmv->kmv_buf; /* buf to validate */ 30560Sstevel@tonic-gate int64_t corrupt; /* corruption offset */ 30570Sstevel@tonic-gate kmem_buftag_t *buftagp; /* ptr to buftag */ 30580Sstevel@tonic-gate kmem_cache_t *cp = &kmv->kmv_cache; 30590Sstevel@tonic-gate int besilent = kmv->kmv_besilent; 30600Sstevel@tonic-gate 30610Sstevel@tonic-gate /*LINTED*/ 30620Sstevel@tonic-gate buftagp = KMEM_BUFTAG(cp, buf); 30630Sstevel@tonic-gate 30640Sstevel@tonic-gate /* 30650Sstevel@tonic-gate * Read the buffer to check. 30660Sstevel@tonic-gate */ 30670Sstevel@tonic-gate if (mdb_vread(buf, kmv->kmv_size, addr) == -1) { 30680Sstevel@tonic-gate if (!besilent) 30690Sstevel@tonic-gate mdb_warn("couldn't read %p", addr); 30700Sstevel@tonic-gate return (WALK_NEXT); 30710Sstevel@tonic-gate } 30720Sstevel@tonic-gate 30730Sstevel@tonic-gate if ((corrupt = verify_pattern(buf, cp->cache_verify, 30740Sstevel@tonic-gate KMEM_FREE_PATTERN)) >= 0) { 30750Sstevel@tonic-gate if (!besilent) 30760Sstevel@tonic-gate mdb_printf("buffer %p (free) seems corrupted, at %p\n", 30770Sstevel@tonic-gate addr, (uintptr_t)addr + corrupt); 30780Sstevel@tonic-gate goto corrupt; 30790Sstevel@tonic-gate } 30800Sstevel@tonic-gate /* 30810Sstevel@tonic-gate * When KMF_LITE is set, buftagp->bt_redzone is used to hold 30820Sstevel@tonic-gate * the first bytes of the buffer, hence we cannot check for red 30830Sstevel@tonic-gate * zone corruption. 30840Sstevel@tonic-gate */ 30850Sstevel@tonic-gate if ((cp->cache_flags & (KMF_HASH | KMF_LITE)) == KMF_HASH && 30860Sstevel@tonic-gate buftagp->bt_redzone != KMEM_REDZONE_PATTERN) { 30870Sstevel@tonic-gate if (!besilent) 30880Sstevel@tonic-gate mdb_printf("buffer %p (free) seems to " 30890Sstevel@tonic-gate "have a corrupt redzone pattern\n", addr); 30900Sstevel@tonic-gate goto corrupt; 30910Sstevel@tonic-gate } 30920Sstevel@tonic-gate 30930Sstevel@tonic-gate /* 30940Sstevel@tonic-gate * confirm bufctl pointer integrity. 30950Sstevel@tonic-gate */ 30960Sstevel@tonic-gate if (verify_buftag(buftagp, KMEM_BUFTAG_FREE) == -1) { 30970Sstevel@tonic-gate if (!besilent) 30980Sstevel@tonic-gate mdb_printf("buffer %p (free) has a corrupt " 30990Sstevel@tonic-gate "buftag\n", addr); 31000Sstevel@tonic-gate goto corrupt; 31010Sstevel@tonic-gate } 31020Sstevel@tonic-gate 31030Sstevel@tonic-gate return (WALK_NEXT); 31040Sstevel@tonic-gate corrupt: 31050Sstevel@tonic-gate kmv->kmv_corruption++; 31060Sstevel@tonic-gate return (WALK_NEXT); 31070Sstevel@tonic-gate } 31080Sstevel@tonic-gate 31090Sstevel@tonic-gate /* 31100Sstevel@tonic-gate * verify_alloc() 31110Sstevel@tonic-gate * Verify that the buftag of an allocated buffer makes sense with respect 31120Sstevel@tonic-gate * to the buffer. 31130Sstevel@tonic-gate */ 31140Sstevel@tonic-gate /*ARGSUSED1*/ 31150Sstevel@tonic-gate static int 31160Sstevel@tonic-gate verify_alloc(uintptr_t addr, const void *data, void *private) 31170Sstevel@tonic-gate { 31180Sstevel@tonic-gate kmem_verify_t *kmv = (kmem_verify_t *)private; 31190Sstevel@tonic-gate kmem_cache_t *cp = &kmv->kmv_cache; 31200Sstevel@tonic-gate uint64_t *buf = kmv->kmv_buf; /* buf to validate */ 31210Sstevel@tonic-gate /*LINTED*/ 31220Sstevel@tonic-gate kmem_buftag_t *buftagp = KMEM_BUFTAG(cp, buf); 31230Sstevel@tonic-gate uint32_t *ip = (uint32_t *)buftagp; 31240Sstevel@tonic-gate uint8_t *bp = (uint8_t *)buf; 31250Sstevel@tonic-gate int looks_ok = 0, size_ok = 1; /* flags for finding corruption */ 31260Sstevel@tonic-gate int besilent = kmv->kmv_besilent; 31270Sstevel@tonic-gate 31280Sstevel@tonic-gate /* 31290Sstevel@tonic-gate * Read the buffer to check. 31300Sstevel@tonic-gate */ 31310Sstevel@tonic-gate if (mdb_vread(buf, kmv->kmv_size, addr) == -1) { 31320Sstevel@tonic-gate if (!besilent) 31330Sstevel@tonic-gate mdb_warn("couldn't read %p", addr); 31340Sstevel@tonic-gate return (WALK_NEXT); 31350Sstevel@tonic-gate } 31360Sstevel@tonic-gate 31370Sstevel@tonic-gate /* 31380Sstevel@tonic-gate * There are two cases to handle: 31390Sstevel@tonic-gate * 1. If the buf was alloc'd using kmem_cache_alloc, it will have 31400Sstevel@tonic-gate * 0xfeedfacefeedface at the end of it 31410Sstevel@tonic-gate * 2. If the buf was alloc'd using kmem_alloc, it will have 31420Sstevel@tonic-gate * 0xbb just past the end of the region in use. At the buftag, 31430Sstevel@tonic-gate * it will have 0xfeedface (or, if the whole buffer is in use, 31440Sstevel@tonic-gate * 0xfeedface & bb000000 or 0xfeedfacf & 000000bb depending on 31450Sstevel@tonic-gate * endianness), followed by 32 bits containing the offset of the 31460Sstevel@tonic-gate * 0xbb byte in the buffer. 31470Sstevel@tonic-gate * 31480Sstevel@tonic-gate * Finally, the two 32-bit words that comprise the second half of the 31490Sstevel@tonic-gate * buftag should xor to KMEM_BUFTAG_ALLOC 31500Sstevel@tonic-gate */ 31510Sstevel@tonic-gate 31520Sstevel@tonic-gate if (buftagp->bt_redzone == KMEM_REDZONE_PATTERN) 31530Sstevel@tonic-gate looks_ok = 1; 31540Sstevel@tonic-gate else if (!KMEM_SIZE_VALID(ip[1])) 31550Sstevel@tonic-gate size_ok = 0; 31560Sstevel@tonic-gate else if (bp[KMEM_SIZE_DECODE(ip[1])] == KMEM_REDZONE_BYTE) 31570Sstevel@tonic-gate looks_ok = 1; 31580Sstevel@tonic-gate else 31590Sstevel@tonic-gate size_ok = 0; 31600Sstevel@tonic-gate 31610Sstevel@tonic-gate if (!size_ok) { 31620Sstevel@tonic-gate if (!besilent) 31630Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a corrupt " 31640Sstevel@tonic-gate "redzone size encoding\n", addr); 31650Sstevel@tonic-gate goto corrupt; 31660Sstevel@tonic-gate } 31670Sstevel@tonic-gate 31680Sstevel@tonic-gate if (!looks_ok) { 31690Sstevel@tonic-gate if (!besilent) 31700Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a corrupt " 31710Sstevel@tonic-gate "redzone signature\n", addr); 31720Sstevel@tonic-gate goto corrupt; 31730Sstevel@tonic-gate } 31740Sstevel@tonic-gate 31750Sstevel@tonic-gate if (verify_buftag(buftagp, KMEM_BUFTAG_ALLOC) == -1) { 31760Sstevel@tonic-gate if (!besilent) 31770Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a " 31780Sstevel@tonic-gate "corrupt buftag\n", addr); 31790Sstevel@tonic-gate goto corrupt; 31800Sstevel@tonic-gate } 31810Sstevel@tonic-gate 31820Sstevel@tonic-gate return (WALK_NEXT); 31830Sstevel@tonic-gate corrupt: 31840Sstevel@tonic-gate kmv->kmv_corruption++; 31850Sstevel@tonic-gate return (WALK_NEXT); 31860Sstevel@tonic-gate } 31870Sstevel@tonic-gate 31880Sstevel@tonic-gate /*ARGSUSED2*/ 31890Sstevel@tonic-gate int 31900Sstevel@tonic-gate kmem_verify(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 31910Sstevel@tonic-gate { 31920Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) { 31930Sstevel@tonic-gate int check_alloc = 0, check_free = 0; 31940Sstevel@tonic-gate kmem_verify_t kmv; 31950Sstevel@tonic-gate 31960Sstevel@tonic-gate if (mdb_vread(&kmv.kmv_cache, sizeof (kmv.kmv_cache), 31970Sstevel@tonic-gate addr) == -1) { 31980Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache %p", addr); 31990Sstevel@tonic-gate return (DCMD_ERR); 32000Sstevel@tonic-gate } 32010Sstevel@tonic-gate 32020Sstevel@tonic-gate kmv.kmv_size = kmv.kmv_cache.cache_buftag + 32030Sstevel@tonic-gate sizeof (kmem_buftag_t); 32040Sstevel@tonic-gate kmv.kmv_buf = mdb_alloc(kmv.kmv_size, UM_SLEEP | UM_GC); 32050Sstevel@tonic-gate kmv.kmv_corruption = 0; 32060Sstevel@tonic-gate 32070Sstevel@tonic-gate if ((kmv.kmv_cache.cache_flags & KMF_REDZONE)) { 32080Sstevel@tonic-gate check_alloc = 1; 32090Sstevel@tonic-gate if (kmv.kmv_cache.cache_flags & KMF_DEADBEEF) 32100Sstevel@tonic-gate check_free = 1; 32110Sstevel@tonic-gate } else { 32120Sstevel@tonic-gate if (!(flags & DCMD_LOOP)) { 32130Sstevel@tonic-gate mdb_warn("cache %p (%s) does not have " 32140Sstevel@tonic-gate "redzone checking enabled\n", addr, 32150Sstevel@tonic-gate kmv.kmv_cache.cache_name); 32160Sstevel@tonic-gate } 32170Sstevel@tonic-gate return (DCMD_ERR); 32180Sstevel@tonic-gate } 32190Sstevel@tonic-gate 32200Sstevel@tonic-gate if (flags & DCMD_LOOP) { 32210Sstevel@tonic-gate /* 32220Sstevel@tonic-gate * table mode, don't print out every corrupt buffer 32230Sstevel@tonic-gate */ 32240Sstevel@tonic-gate kmv.kmv_besilent = 1; 32250Sstevel@tonic-gate } else { 32260Sstevel@tonic-gate mdb_printf("Summary for cache '%s'\n", 32270Sstevel@tonic-gate kmv.kmv_cache.cache_name); 32280Sstevel@tonic-gate mdb_inc_indent(2); 32290Sstevel@tonic-gate kmv.kmv_besilent = 0; 32300Sstevel@tonic-gate } 32310Sstevel@tonic-gate 32320Sstevel@tonic-gate if (check_alloc) 32330Sstevel@tonic-gate (void) mdb_pwalk("kmem", verify_alloc, &kmv, addr); 32340Sstevel@tonic-gate if (check_free) 32350Sstevel@tonic-gate (void) mdb_pwalk("freemem", verify_free, &kmv, addr); 32360Sstevel@tonic-gate 32370Sstevel@tonic-gate if (flags & DCMD_LOOP) { 32380Sstevel@tonic-gate if (kmv.kmv_corruption == 0) { 32390Sstevel@tonic-gate mdb_printf("%-*s %?p clean\n", 32400Sstevel@tonic-gate KMEM_CACHE_NAMELEN, 32410Sstevel@tonic-gate kmv.kmv_cache.cache_name, addr); 32420Sstevel@tonic-gate } else { 32430Sstevel@tonic-gate char *s = ""; /* optional s in "buffer[s]" */ 32440Sstevel@tonic-gate if (kmv.kmv_corruption > 1) 32450Sstevel@tonic-gate s = "s"; 32460Sstevel@tonic-gate 32470Sstevel@tonic-gate mdb_printf("%-*s %?p %d corrupt buffer%s\n", 32480Sstevel@tonic-gate KMEM_CACHE_NAMELEN, 32490Sstevel@tonic-gate kmv.kmv_cache.cache_name, addr, 32500Sstevel@tonic-gate kmv.kmv_corruption, s); 32510Sstevel@tonic-gate } 32520Sstevel@tonic-gate } else { 32530Sstevel@tonic-gate /* 32540Sstevel@tonic-gate * This is the more verbose mode, when the user has 32550Sstevel@tonic-gate * type addr::kmem_verify. If the cache was clean, 32560Sstevel@tonic-gate * nothing will have yet been printed. So say something. 32570Sstevel@tonic-gate */ 32580Sstevel@tonic-gate if (kmv.kmv_corruption == 0) 32590Sstevel@tonic-gate mdb_printf("clean\n"); 32600Sstevel@tonic-gate 32610Sstevel@tonic-gate mdb_dec_indent(2); 32620Sstevel@tonic-gate } 32630Sstevel@tonic-gate } else { 32640Sstevel@tonic-gate /* 32650Sstevel@tonic-gate * If the user didn't specify a cache to verify, we'll walk all 32660Sstevel@tonic-gate * kmem_cache's, specifying ourself as a callback for each... 32670Sstevel@tonic-gate * this is the equivalent of '::walk kmem_cache .::kmem_verify' 32680Sstevel@tonic-gate */ 32690Sstevel@tonic-gate mdb_printf("%<u>%-*s %-?s %-20s%</b>\n", KMEM_CACHE_NAMELEN, 32700Sstevel@tonic-gate "Cache Name", "Addr", "Cache Integrity"); 32710Sstevel@tonic-gate (void) (mdb_walk_dcmd("kmem_cache", "kmem_verify", 0, NULL)); 32720Sstevel@tonic-gate } 32730Sstevel@tonic-gate 32740Sstevel@tonic-gate return (DCMD_OK); 32750Sstevel@tonic-gate } 32760Sstevel@tonic-gate 32770Sstevel@tonic-gate typedef struct vmem_node { 32780Sstevel@tonic-gate struct vmem_node *vn_next; 32790Sstevel@tonic-gate struct vmem_node *vn_parent; 32800Sstevel@tonic-gate struct vmem_node *vn_sibling; 32810Sstevel@tonic-gate struct vmem_node *vn_children; 32820Sstevel@tonic-gate uintptr_t vn_addr; 32830Sstevel@tonic-gate int vn_marked; 32840Sstevel@tonic-gate vmem_t vn_vmem; 32850Sstevel@tonic-gate } vmem_node_t; 32860Sstevel@tonic-gate 32870Sstevel@tonic-gate typedef struct vmem_walk { 32880Sstevel@tonic-gate vmem_node_t *vw_root; 32890Sstevel@tonic-gate vmem_node_t *vw_current; 32900Sstevel@tonic-gate } vmem_walk_t; 32910Sstevel@tonic-gate 32920Sstevel@tonic-gate int 32930Sstevel@tonic-gate vmem_walk_init(mdb_walk_state_t *wsp) 32940Sstevel@tonic-gate { 32950Sstevel@tonic-gate uintptr_t vaddr, paddr; 32960Sstevel@tonic-gate vmem_node_t *head = NULL, *root = NULL, *current = NULL, *parent, *vp; 32970Sstevel@tonic-gate vmem_walk_t *vw; 32980Sstevel@tonic-gate 32990Sstevel@tonic-gate if (mdb_readvar(&vaddr, "vmem_list") == -1) { 33000Sstevel@tonic-gate mdb_warn("couldn't read 'vmem_list'"); 33010Sstevel@tonic-gate return (WALK_ERR); 33020Sstevel@tonic-gate } 33030Sstevel@tonic-gate 33040Sstevel@tonic-gate while (vaddr != NULL) { 33050Sstevel@tonic-gate vp = mdb_zalloc(sizeof (vmem_node_t), UM_SLEEP); 33060Sstevel@tonic-gate vp->vn_addr = vaddr; 33070Sstevel@tonic-gate vp->vn_next = head; 33080Sstevel@tonic-gate head = vp; 33090Sstevel@tonic-gate 33100Sstevel@tonic-gate if (vaddr == wsp->walk_addr) 33110Sstevel@tonic-gate current = vp; 33120Sstevel@tonic-gate 33130Sstevel@tonic-gate if (mdb_vread(&vp->vn_vmem, sizeof (vmem_t), vaddr) == -1) { 33140Sstevel@tonic-gate mdb_warn("couldn't read vmem_t at %p", vaddr); 33150Sstevel@tonic-gate goto err; 33160Sstevel@tonic-gate } 33170Sstevel@tonic-gate 33180Sstevel@tonic-gate vaddr = (uintptr_t)vp->vn_vmem.vm_next; 33190Sstevel@tonic-gate } 33200Sstevel@tonic-gate 33210Sstevel@tonic-gate for (vp = head; vp != NULL; vp = vp->vn_next) { 33220Sstevel@tonic-gate 33230Sstevel@tonic-gate if ((paddr = (uintptr_t)vp->vn_vmem.vm_source) == NULL) { 33240Sstevel@tonic-gate vp->vn_sibling = root; 33250Sstevel@tonic-gate root = vp; 33260Sstevel@tonic-gate continue; 33270Sstevel@tonic-gate } 33280Sstevel@tonic-gate 33290Sstevel@tonic-gate for (parent = head; parent != NULL; parent = parent->vn_next) { 33300Sstevel@tonic-gate if (parent->vn_addr != paddr) 33310Sstevel@tonic-gate continue; 33320Sstevel@tonic-gate vp->vn_sibling = parent->vn_children; 33330Sstevel@tonic-gate parent->vn_children = vp; 33340Sstevel@tonic-gate vp->vn_parent = parent; 33350Sstevel@tonic-gate break; 33360Sstevel@tonic-gate } 33370Sstevel@tonic-gate 33380Sstevel@tonic-gate if (parent == NULL) { 33390Sstevel@tonic-gate mdb_warn("couldn't find %p's parent (%p)\n", 33400Sstevel@tonic-gate vp->vn_addr, paddr); 33410Sstevel@tonic-gate goto err; 33420Sstevel@tonic-gate } 33430Sstevel@tonic-gate } 33440Sstevel@tonic-gate 33450Sstevel@tonic-gate vw = mdb_zalloc(sizeof (vmem_walk_t), UM_SLEEP); 33460Sstevel@tonic-gate vw->vw_root = root; 33470Sstevel@tonic-gate 33480Sstevel@tonic-gate if (current != NULL) 33490Sstevel@tonic-gate vw->vw_current = current; 33500Sstevel@tonic-gate else 33510Sstevel@tonic-gate vw->vw_current = root; 33520Sstevel@tonic-gate 33530Sstevel@tonic-gate wsp->walk_data = vw; 33540Sstevel@tonic-gate return (WALK_NEXT); 33550Sstevel@tonic-gate err: 33560Sstevel@tonic-gate for (vp = head; head != NULL; vp = head) { 33570Sstevel@tonic-gate head = vp->vn_next; 33580Sstevel@tonic-gate mdb_free(vp, sizeof (vmem_node_t)); 33590Sstevel@tonic-gate } 33600Sstevel@tonic-gate 33610Sstevel@tonic-gate return (WALK_ERR); 33620Sstevel@tonic-gate } 33630Sstevel@tonic-gate 33640Sstevel@tonic-gate int 33650Sstevel@tonic-gate vmem_walk_step(mdb_walk_state_t *wsp) 33660Sstevel@tonic-gate { 33670Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 33680Sstevel@tonic-gate vmem_node_t *vp; 33690Sstevel@tonic-gate int rval; 33700Sstevel@tonic-gate 33710Sstevel@tonic-gate if ((vp = vw->vw_current) == NULL) 33720Sstevel@tonic-gate return (WALK_DONE); 33730Sstevel@tonic-gate 33740Sstevel@tonic-gate rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata); 33750Sstevel@tonic-gate 33760Sstevel@tonic-gate if (vp->vn_children != NULL) { 33770Sstevel@tonic-gate vw->vw_current = vp->vn_children; 33780Sstevel@tonic-gate return (rval); 33790Sstevel@tonic-gate } 33800Sstevel@tonic-gate 33810Sstevel@tonic-gate do { 33820Sstevel@tonic-gate vw->vw_current = vp->vn_sibling; 33830Sstevel@tonic-gate vp = vp->vn_parent; 33840Sstevel@tonic-gate } while (vw->vw_current == NULL && vp != NULL); 33850Sstevel@tonic-gate 33860Sstevel@tonic-gate return (rval); 33870Sstevel@tonic-gate } 33880Sstevel@tonic-gate 33890Sstevel@tonic-gate /* 33900Sstevel@tonic-gate * The "vmem_postfix" walk walks the vmem arenas in post-fix order; all 33910Sstevel@tonic-gate * children are visited before their parent. We perform the postfix walk 33920Sstevel@tonic-gate * iteratively (rather than recursively) to allow mdb to regain control 33930Sstevel@tonic-gate * after each callback. 33940Sstevel@tonic-gate */ 33950Sstevel@tonic-gate int 33960Sstevel@tonic-gate vmem_postfix_walk_step(mdb_walk_state_t *wsp) 33970Sstevel@tonic-gate { 33980Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 33990Sstevel@tonic-gate vmem_node_t *vp = vw->vw_current; 34000Sstevel@tonic-gate int rval; 34010Sstevel@tonic-gate 34020Sstevel@tonic-gate /* 34030Sstevel@tonic-gate * If this node is marked, then we know that we have already visited 34040Sstevel@tonic-gate * all of its children. If the node has any siblings, they need to 34050Sstevel@tonic-gate * be visited next; otherwise, we need to visit the parent. Note 34060Sstevel@tonic-gate * that vp->vn_marked will only be zero on the first invocation of 34070Sstevel@tonic-gate * the step function. 34080Sstevel@tonic-gate */ 34090Sstevel@tonic-gate if (vp->vn_marked) { 34100Sstevel@tonic-gate if (vp->vn_sibling != NULL) 34110Sstevel@tonic-gate vp = vp->vn_sibling; 34120Sstevel@tonic-gate else if (vp->vn_parent != NULL) 34130Sstevel@tonic-gate vp = vp->vn_parent; 34140Sstevel@tonic-gate else { 34150Sstevel@tonic-gate /* 34160Sstevel@tonic-gate * We have neither a parent, nor a sibling, and we 34170Sstevel@tonic-gate * have already been visited; we're done. 34180Sstevel@tonic-gate */ 34190Sstevel@tonic-gate return (WALK_DONE); 34200Sstevel@tonic-gate } 34210Sstevel@tonic-gate } 34220Sstevel@tonic-gate 34230Sstevel@tonic-gate /* 34240Sstevel@tonic-gate * Before we visit this node, visit its children. 34250Sstevel@tonic-gate */ 34260Sstevel@tonic-gate while (vp->vn_children != NULL && !vp->vn_children->vn_marked) 34270Sstevel@tonic-gate vp = vp->vn_children; 34280Sstevel@tonic-gate 34290Sstevel@tonic-gate vp->vn_marked = 1; 34300Sstevel@tonic-gate vw->vw_current = vp; 34310Sstevel@tonic-gate rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata); 34320Sstevel@tonic-gate 34330Sstevel@tonic-gate return (rval); 34340Sstevel@tonic-gate } 34350Sstevel@tonic-gate 34360Sstevel@tonic-gate void 34370Sstevel@tonic-gate vmem_walk_fini(mdb_walk_state_t *wsp) 34380Sstevel@tonic-gate { 34390Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 34400Sstevel@tonic-gate vmem_node_t *root = vw->vw_root; 34410Sstevel@tonic-gate int done; 34420Sstevel@tonic-gate 34430Sstevel@tonic-gate if (root == NULL) 34440Sstevel@tonic-gate return; 34450Sstevel@tonic-gate 34460Sstevel@tonic-gate if ((vw->vw_root = root->vn_children) != NULL) 34470Sstevel@tonic-gate vmem_walk_fini(wsp); 34480Sstevel@tonic-gate 34490Sstevel@tonic-gate vw->vw_root = root->vn_sibling; 34500Sstevel@tonic-gate done = (root->vn_sibling == NULL && root->vn_parent == NULL); 34510Sstevel@tonic-gate mdb_free(root, sizeof (vmem_node_t)); 34520Sstevel@tonic-gate 34530Sstevel@tonic-gate if (done) { 34540Sstevel@tonic-gate mdb_free(vw, sizeof (vmem_walk_t)); 34550Sstevel@tonic-gate } else { 34560Sstevel@tonic-gate vmem_walk_fini(wsp); 34570Sstevel@tonic-gate } 34580Sstevel@tonic-gate } 34590Sstevel@tonic-gate 34600Sstevel@tonic-gate typedef struct vmem_seg_walk { 34610Sstevel@tonic-gate uint8_t vsw_type; 34620Sstevel@tonic-gate uintptr_t vsw_start; 34630Sstevel@tonic-gate uintptr_t vsw_current; 34640Sstevel@tonic-gate } vmem_seg_walk_t; 34650Sstevel@tonic-gate 34660Sstevel@tonic-gate /*ARGSUSED*/ 34670Sstevel@tonic-gate int 34680Sstevel@tonic-gate vmem_seg_walk_common_init(mdb_walk_state_t *wsp, uint8_t type, char *name) 34690Sstevel@tonic-gate { 34700Sstevel@tonic-gate vmem_seg_walk_t *vsw; 34710Sstevel@tonic-gate 34720Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 34730Sstevel@tonic-gate mdb_warn("vmem_%s does not support global walks\n", name); 34740Sstevel@tonic-gate return (WALK_ERR); 34750Sstevel@tonic-gate } 34760Sstevel@tonic-gate 34770Sstevel@tonic-gate wsp->walk_data = vsw = mdb_alloc(sizeof (vmem_seg_walk_t), UM_SLEEP); 34780Sstevel@tonic-gate 34790Sstevel@tonic-gate vsw->vsw_type = type; 34800Sstevel@tonic-gate vsw->vsw_start = wsp->walk_addr + offsetof(vmem_t, vm_seg0); 34810Sstevel@tonic-gate vsw->vsw_current = vsw->vsw_start; 34820Sstevel@tonic-gate 34830Sstevel@tonic-gate return (WALK_NEXT); 34840Sstevel@tonic-gate } 34850Sstevel@tonic-gate 34860Sstevel@tonic-gate /* 34870Sstevel@tonic-gate * vmem segments can't have type 0 (this should be added to vmem_impl.h). 34880Sstevel@tonic-gate */ 34890Sstevel@tonic-gate #define VMEM_NONE 0 34900Sstevel@tonic-gate 34910Sstevel@tonic-gate int 34920Sstevel@tonic-gate vmem_alloc_walk_init(mdb_walk_state_t *wsp) 34930Sstevel@tonic-gate { 34940Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_ALLOC, "alloc")); 34950Sstevel@tonic-gate } 34960Sstevel@tonic-gate 34970Sstevel@tonic-gate int 34980Sstevel@tonic-gate vmem_free_walk_init(mdb_walk_state_t *wsp) 34990Sstevel@tonic-gate { 35000Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_FREE, "free")); 35010Sstevel@tonic-gate } 35020Sstevel@tonic-gate 35030Sstevel@tonic-gate int 35040Sstevel@tonic-gate vmem_span_walk_init(mdb_walk_state_t *wsp) 35050Sstevel@tonic-gate { 35060Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_SPAN, "span")); 35070Sstevel@tonic-gate } 35080Sstevel@tonic-gate 35090Sstevel@tonic-gate int 35100Sstevel@tonic-gate vmem_seg_walk_init(mdb_walk_state_t *wsp) 35110Sstevel@tonic-gate { 35120Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_NONE, "seg")); 35130Sstevel@tonic-gate } 35140Sstevel@tonic-gate 35150Sstevel@tonic-gate int 35160Sstevel@tonic-gate vmem_seg_walk_step(mdb_walk_state_t *wsp) 35170Sstevel@tonic-gate { 35180Sstevel@tonic-gate vmem_seg_t seg; 35190Sstevel@tonic-gate vmem_seg_walk_t *vsw = wsp->walk_data; 35200Sstevel@tonic-gate uintptr_t addr = vsw->vsw_current; 35210Sstevel@tonic-gate static size_t seg_size = 0; 35220Sstevel@tonic-gate int rval; 35230Sstevel@tonic-gate 35240Sstevel@tonic-gate if (!seg_size) { 35250Sstevel@tonic-gate if (mdb_readvar(&seg_size, "vmem_seg_size") == -1) { 35260Sstevel@tonic-gate mdb_warn("failed to read 'vmem_seg_size'"); 35270Sstevel@tonic-gate seg_size = sizeof (vmem_seg_t); 35280Sstevel@tonic-gate } 35290Sstevel@tonic-gate } 35300Sstevel@tonic-gate 35310Sstevel@tonic-gate if (seg_size < sizeof (seg)) 35320Sstevel@tonic-gate bzero((caddr_t)&seg + seg_size, sizeof (seg) - seg_size); 35330Sstevel@tonic-gate 35340Sstevel@tonic-gate if (mdb_vread(&seg, seg_size, addr) == -1) { 35350Sstevel@tonic-gate mdb_warn("couldn't read vmem_seg at %p", addr); 35360Sstevel@tonic-gate return (WALK_ERR); 35370Sstevel@tonic-gate } 35380Sstevel@tonic-gate 35390Sstevel@tonic-gate vsw->vsw_current = (uintptr_t)seg.vs_anext; 35400Sstevel@tonic-gate if (vsw->vsw_type != VMEM_NONE && seg.vs_type != vsw->vsw_type) { 35410Sstevel@tonic-gate rval = WALK_NEXT; 35420Sstevel@tonic-gate } else { 35430Sstevel@tonic-gate rval = wsp->walk_callback(addr, &seg, wsp->walk_cbdata); 35440Sstevel@tonic-gate } 35450Sstevel@tonic-gate 35460Sstevel@tonic-gate if (vsw->vsw_current == vsw->vsw_start) 35470Sstevel@tonic-gate return (WALK_DONE); 35480Sstevel@tonic-gate 35490Sstevel@tonic-gate return (rval); 35500Sstevel@tonic-gate } 35510Sstevel@tonic-gate 35520Sstevel@tonic-gate void 35530Sstevel@tonic-gate vmem_seg_walk_fini(mdb_walk_state_t *wsp) 35540Sstevel@tonic-gate { 35550Sstevel@tonic-gate vmem_seg_walk_t *vsw = wsp->walk_data; 35560Sstevel@tonic-gate 35570Sstevel@tonic-gate mdb_free(vsw, sizeof (vmem_seg_walk_t)); 35580Sstevel@tonic-gate } 35590Sstevel@tonic-gate 35600Sstevel@tonic-gate #define VMEM_NAMEWIDTH 22 35610Sstevel@tonic-gate 35620Sstevel@tonic-gate int 35630Sstevel@tonic-gate vmem(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 35640Sstevel@tonic-gate { 35650Sstevel@tonic-gate vmem_t v, parent; 35660Sstevel@tonic-gate vmem_kstat_t *vkp = &v.vm_kstat; 35670Sstevel@tonic-gate uintptr_t paddr; 35680Sstevel@tonic-gate int ident = 0; 35690Sstevel@tonic-gate char c[VMEM_NAMEWIDTH]; 35700Sstevel@tonic-gate 35710Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) { 35720Sstevel@tonic-gate if (mdb_walk_dcmd("vmem", "vmem", argc, argv) == -1) { 35730Sstevel@tonic-gate mdb_warn("can't walk vmem"); 35740Sstevel@tonic-gate return (DCMD_ERR); 35750Sstevel@tonic-gate } 35760Sstevel@tonic-gate return (DCMD_OK); 35770Sstevel@tonic-gate } 35780Sstevel@tonic-gate 35790Sstevel@tonic-gate if (DCMD_HDRSPEC(flags)) 35800Sstevel@tonic-gate mdb_printf("%-?s %-*s %10s %12s %9s %5s\n", 35810Sstevel@tonic-gate "ADDR", VMEM_NAMEWIDTH, "NAME", "INUSE", 35820Sstevel@tonic-gate "TOTAL", "SUCCEED", "FAIL"); 35830Sstevel@tonic-gate 35840Sstevel@tonic-gate if (mdb_vread(&v, sizeof (v), addr) == -1) { 35850Sstevel@tonic-gate mdb_warn("couldn't read vmem at %p", addr); 35860Sstevel@tonic-gate return (DCMD_ERR); 35870Sstevel@tonic-gate } 35880Sstevel@tonic-gate 35890Sstevel@tonic-gate for (paddr = (uintptr_t)v.vm_source; paddr != NULL; ident += 2) { 35900Sstevel@tonic-gate if (mdb_vread(&parent, sizeof (parent), paddr) == -1) { 35910Sstevel@tonic-gate mdb_warn("couldn't trace %p's ancestry", addr); 35920Sstevel@tonic-gate ident = 0; 35930Sstevel@tonic-gate break; 35940Sstevel@tonic-gate } 35950Sstevel@tonic-gate paddr = (uintptr_t)parent.vm_source; 35960Sstevel@tonic-gate } 35970Sstevel@tonic-gate 35980Sstevel@tonic-gate (void) mdb_snprintf(c, VMEM_NAMEWIDTH, "%*s%s", ident, "", v.vm_name); 35990Sstevel@tonic-gate 36000Sstevel@tonic-gate mdb_printf("%0?p %-*s %10llu %12llu %9llu %5llu\n", 36010Sstevel@tonic-gate addr, VMEM_NAMEWIDTH, c, 36020Sstevel@tonic-gate vkp->vk_mem_inuse.value.ui64, vkp->vk_mem_total.value.ui64, 36030Sstevel@tonic-gate vkp->vk_alloc.value.ui64, vkp->vk_fail.value.ui64); 36040Sstevel@tonic-gate 36050Sstevel@tonic-gate return (DCMD_OK); 36060Sstevel@tonic-gate } 36070Sstevel@tonic-gate 36080Sstevel@tonic-gate void 36090Sstevel@tonic-gate vmem_seg_help(void) 36100Sstevel@tonic-gate { 3611*6712Stomee mdb_printf("%s", 3612*6712Stomee "Display the contents of vmem_seg_ts, with optional filtering.\n\n" 36130Sstevel@tonic-gate "\n" 36140Sstevel@tonic-gate "A vmem_seg_t represents a range of addresses (or arbitrary numbers),\n" 36150Sstevel@tonic-gate "representing a single chunk of data. Only ALLOC segments have debugging\n" 36160Sstevel@tonic-gate "information.\n"); 36170Sstevel@tonic-gate mdb_dec_indent(2); 36180Sstevel@tonic-gate mdb_printf("%<b>OPTIONS%</b>\n"); 36190Sstevel@tonic-gate mdb_inc_indent(2); 36200Sstevel@tonic-gate mdb_printf("%s", 36210Sstevel@tonic-gate " -v Display the full content of the vmem_seg, including its stack trace\n" 36220Sstevel@tonic-gate " -s report the size of the segment, instead of the end address\n" 36230Sstevel@tonic-gate " -c caller\n" 36240Sstevel@tonic-gate " filter out segments without the function/PC in their stack trace\n" 36250Sstevel@tonic-gate " -e earliest\n" 36260Sstevel@tonic-gate " filter out segments timestamped before earliest\n" 36270Sstevel@tonic-gate " -l latest\n" 36280Sstevel@tonic-gate " filter out segments timestamped after latest\n" 36290Sstevel@tonic-gate " -m minsize\n" 36300Sstevel@tonic-gate " filer out segments smaller than minsize\n" 36310Sstevel@tonic-gate " -M maxsize\n" 36320Sstevel@tonic-gate " filer out segments larger than maxsize\n" 36330Sstevel@tonic-gate " -t thread\n" 36340Sstevel@tonic-gate " filter out segments not involving thread\n" 36350Sstevel@tonic-gate " -T type\n" 36360Sstevel@tonic-gate " filter out segments not of type 'type'\n" 36370Sstevel@tonic-gate " type is one of: ALLOC/FREE/SPAN/ROTOR/WALKER\n"); 36380Sstevel@tonic-gate } 36390Sstevel@tonic-gate 36400Sstevel@tonic-gate /*ARGSUSED*/ 36410Sstevel@tonic-gate int 36420Sstevel@tonic-gate vmem_seg(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 36430Sstevel@tonic-gate { 36440Sstevel@tonic-gate vmem_seg_t vs; 36450Sstevel@tonic-gate pc_t *stk = vs.vs_stack; 36460Sstevel@tonic-gate uintptr_t sz; 36470Sstevel@tonic-gate uint8_t t; 36480Sstevel@tonic-gate const char *type = NULL; 36490Sstevel@tonic-gate GElf_Sym sym; 36500Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 36510Sstevel@tonic-gate int no_debug; 36520Sstevel@tonic-gate int i; 36530Sstevel@tonic-gate int depth; 36540Sstevel@tonic-gate uintptr_t laddr, haddr; 36550Sstevel@tonic-gate 36560Sstevel@tonic-gate uintptr_t caller = NULL, thread = NULL; 36570Sstevel@tonic-gate uintptr_t minsize = 0, maxsize = 0; 36580Sstevel@tonic-gate 36590Sstevel@tonic-gate hrtime_t earliest = 0, latest = 0; 36600Sstevel@tonic-gate 36610Sstevel@tonic-gate uint_t size = 0; 36620Sstevel@tonic-gate uint_t verbose = 0; 36630Sstevel@tonic-gate 36640Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 36650Sstevel@tonic-gate return (DCMD_USAGE); 36660Sstevel@tonic-gate 36670Sstevel@tonic-gate if (mdb_getopts(argc, argv, 36680Sstevel@tonic-gate 'c', MDB_OPT_UINTPTR, &caller, 36690Sstevel@tonic-gate 'e', MDB_OPT_UINT64, &earliest, 36700Sstevel@tonic-gate 'l', MDB_OPT_UINT64, &latest, 36710Sstevel@tonic-gate 's', MDB_OPT_SETBITS, TRUE, &size, 36720Sstevel@tonic-gate 'm', MDB_OPT_UINTPTR, &minsize, 36730Sstevel@tonic-gate 'M', MDB_OPT_UINTPTR, &maxsize, 36740Sstevel@tonic-gate 't', MDB_OPT_UINTPTR, &thread, 36750Sstevel@tonic-gate 'T', MDB_OPT_STR, &type, 36760Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &verbose, 36770Sstevel@tonic-gate NULL) != argc) 36780Sstevel@tonic-gate return (DCMD_USAGE); 36790Sstevel@tonic-gate 36800Sstevel@tonic-gate if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) { 36810Sstevel@tonic-gate if (verbose) { 36820Sstevel@tonic-gate mdb_printf("%16s %4s %16s %16s %16s\n" 36830Sstevel@tonic-gate "%<u>%16s %4s %16s %16s %16s%</u>\n", 36840Sstevel@tonic-gate "ADDR", "TYPE", "START", "END", "SIZE", 36850Sstevel@tonic-gate "", "", "THREAD", "TIMESTAMP", ""); 36860Sstevel@tonic-gate } else { 36870Sstevel@tonic-gate mdb_printf("%?s %4s %?s %?s %s\n", "ADDR", "TYPE", 36880Sstevel@tonic-gate "START", size? "SIZE" : "END", "WHO"); 36890Sstevel@tonic-gate } 36900Sstevel@tonic-gate } 36910Sstevel@tonic-gate 36920Sstevel@tonic-gate if (mdb_vread(&vs, sizeof (vs), addr) == -1) { 36930Sstevel@tonic-gate mdb_warn("couldn't read vmem_seg at %p", addr); 36940Sstevel@tonic-gate return (DCMD_ERR); 36950Sstevel@tonic-gate } 36960Sstevel@tonic-gate 36970Sstevel@tonic-gate if (type != NULL) { 36980Sstevel@tonic-gate if (strcmp(type, "ALLC") == 0 || strcmp(type, "ALLOC") == 0) 36990Sstevel@tonic-gate t = VMEM_ALLOC; 37000Sstevel@tonic-gate else if (strcmp(type, "FREE") == 0) 37010Sstevel@tonic-gate t = VMEM_FREE; 37020Sstevel@tonic-gate else if (strcmp(type, "SPAN") == 0) 37030Sstevel@tonic-gate t = VMEM_SPAN; 37040Sstevel@tonic-gate else if (strcmp(type, "ROTR") == 0 || 37050Sstevel@tonic-gate strcmp(type, "ROTOR") == 0) 37060Sstevel@tonic-gate t = VMEM_ROTOR; 37070Sstevel@tonic-gate else if (strcmp(type, "WLKR") == 0 || 37080Sstevel@tonic-gate strcmp(type, "WALKER") == 0) 37090Sstevel@tonic-gate t = VMEM_WALKER; 37100Sstevel@tonic-gate else { 37110Sstevel@tonic-gate mdb_warn("\"%s\" is not a recognized vmem_seg type\n", 37120Sstevel@tonic-gate type); 37130Sstevel@tonic-gate return (DCMD_ERR); 37140Sstevel@tonic-gate } 37150Sstevel@tonic-gate 37160Sstevel@tonic-gate if (vs.vs_type != t) 37170Sstevel@tonic-gate return (DCMD_OK); 37180Sstevel@tonic-gate } 37190Sstevel@tonic-gate 37200Sstevel@tonic-gate sz = vs.vs_end - vs.vs_start; 37210Sstevel@tonic-gate 37220Sstevel@tonic-gate if (minsize != 0 && sz < minsize) 37230Sstevel@tonic-gate return (DCMD_OK); 37240Sstevel@tonic-gate 37250Sstevel@tonic-gate if (maxsize != 0 && sz > maxsize) 37260Sstevel@tonic-gate return (DCMD_OK); 37270Sstevel@tonic-gate 37280Sstevel@tonic-gate t = vs.vs_type; 37290Sstevel@tonic-gate depth = vs.vs_depth; 37300Sstevel@tonic-gate 37310Sstevel@tonic-gate /* 37320Sstevel@tonic-gate * debug info, when present, is only accurate for VMEM_ALLOC segments 37330Sstevel@tonic-gate */ 37340Sstevel@tonic-gate no_debug = (t != VMEM_ALLOC) || 37350Sstevel@tonic-gate (depth == 0 || depth > VMEM_STACK_DEPTH); 37360Sstevel@tonic-gate 37370Sstevel@tonic-gate if (no_debug) { 37380Sstevel@tonic-gate if (caller != NULL || thread != NULL || earliest != 0 || 37390Sstevel@tonic-gate latest != 0) 37400Sstevel@tonic-gate return (DCMD_OK); /* not enough info */ 37410Sstevel@tonic-gate } else { 37420Sstevel@tonic-gate if (caller != NULL) { 37430Sstevel@tonic-gate laddr = caller; 37440Sstevel@tonic-gate haddr = caller + sizeof (caller); 37450Sstevel@tonic-gate 37460Sstevel@tonic-gate if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, 37470Sstevel@tonic-gate sizeof (c), &sym) != -1 && 37480Sstevel@tonic-gate caller == (uintptr_t)sym.st_value) { 37490Sstevel@tonic-gate /* 37500Sstevel@tonic-gate * We were provided an exact symbol value; any 37510Sstevel@tonic-gate * address in the function is valid. 37520Sstevel@tonic-gate */ 37530Sstevel@tonic-gate laddr = (uintptr_t)sym.st_value; 37540Sstevel@tonic-gate haddr = (uintptr_t)sym.st_value + sym.st_size; 37550Sstevel@tonic-gate } 37560Sstevel@tonic-gate 37570Sstevel@tonic-gate for (i = 0; i < depth; i++) 37580Sstevel@tonic-gate if (vs.vs_stack[i] >= laddr && 37590Sstevel@tonic-gate vs.vs_stack[i] < haddr) 37600Sstevel@tonic-gate break; 37610Sstevel@tonic-gate 37620Sstevel@tonic-gate if (i == depth) 37630Sstevel@tonic-gate return (DCMD_OK); 37640Sstevel@tonic-gate } 37650Sstevel@tonic-gate 37660Sstevel@tonic-gate if (thread != NULL && (uintptr_t)vs.vs_thread != thread) 37670Sstevel@tonic-gate return (DCMD_OK); 37680Sstevel@tonic-gate 37690Sstevel@tonic-gate if (earliest != 0 && vs.vs_timestamp < earliest) 37700Sstevel@tonic-gate return (DCMD_OK); 37710Sstevel@tonic-gate 37720Sstevel@tonic-gate if (latest != 0 && vs.vs_timestamp > latest) 37730Sstevel@tonic-gate return (DCMD_OK); 37740Sstevel@tonic-gate } 37750Sstevel@tonic-gate 37760Sstevel@tonic-gate type = (t == VMEM_ALLOC ? "ALLC" : 37770Sstevel@tonic-gate t == VMEM_FREE ? "FREE" : 37780Sstevel@tonic-gate t == VMEM_SPAN ? "SPAN" : 37790Sstevel@tonic-gate t == VMEM_ROTOR ? "ROTR" : 37800Sstevel@tonic-gate t == VMEM_WALKER ? "WLKR" : 37810Sstevel@tonic-gate "????"); 37820Sstevel@tonic-gate 37830Sstevel@tonic-gate if (flags & DCMD_PIPE_OUT) { 37840Sstevel@tonic-gate mdb_printf("%#lr\n", addr); 37850Sstevel@tonic-gate return (DCMD_OK); 37860Sstevel@tonic-gate } 37870Sstevel@tonic-gate 37880Sstevel@tonic-gate if (verbose) { 37890Sstevel@tonic-gate mdb_printf("%<b>%16p%</b> %4s %16p %16p %16d\n", 37900Sstevel@tonic-gate addr, type, vs.vs_start, vs.vs_end, sz); 37910Sstevel@tonic-gate 37920Sstevel@tonic-gate if (no_debug) 37930Sstevel@tonic-gate return (DCMD_OK); 37940Sstevel@tonic-gate 37950Sstevel@tonic-gate mdb_printf("%16s %4s %16p %16llx\n", 37960Sstevel@tonic-gate "", "", vs.vs_thread, vs.vs_timestamp); 37970Sstevel@tonic-gate 37980Sstevel@tonic-gate mdb_inc_indent(17); 37990Sstevel@tonic-gate for (i = 0; i < depth; i++) { 38000Sstevel@tonic-gate mdb_printf("%a\n", stk[i]); 38010Sstevel@tonic-gate } 38020Sstevel@tonic-gate mdb_dec_indent(17); 38030Sstevel@tonic-gate mdb_printf("\n"); 38040Sstevel@tonic-gate } else { 38050Sstevel@tonic-gate mdb_printf("%0?p %4s %0?p %0?p", addr, type, 38060Sstevel@tonic-gate vs.vs_start, size? sz : vs.vs_end); 38070Sstevel@tonic-gate 38080Sstevel@tonic-gate if (no_debug) { 38090Sstevel@tonic-gate mdb_printf("\n"); 38100Sstevel@tonic-gate return (DCMD_OK); 38110Sstevel@tonic-gate } 38120Sstevel@tonic-gate 38130Sstevel@tonic-gate for (i = 0; i < depth; i++) { 38140Sstevel@tonic-gate if (mdb_lookup_by_addr(stk[i], MDB_SYM_FUZZY, 38150Sstevel@tonic-gate c, sizeof (c), &sym) == -1) 38160Sstevel@tonic-gate continue; 38170Sstevel@tonic-gate if (strncmp(c, "vmem_", 5) == 0) 38180Sstevel@tonic-gate continue; 38190Sstevel@tonic-gate break; 38200Sstevel@tonic-gate } 38210Sstevel@tonic-gate mdb_printf(" %a\n", stk[i]); 38220Sstevel@tonic-gate } 38230Sstevel@tonic-gate return (DCMD_OK); 38240Sstevel@tonic-gate } 38250Sstevel@tonic-gate 38260Sstevel@tonic-gate typedef struct kmalog_data { 38270Sstevel@tonic-gate uintptr_t kma_addr; 38280Sstevel@tonic-gate hrtime_t kma_newest; 38290Sstevel@tonic-gate } kmalog_data_t; 38300Sstevel@tonic-gate 38310Sstevel@tonic-gate /*ARGSUSED*/ 38320Sstevel@tonic-gate static int 38330Sstevel@tonic-gate showbc(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmalog_data_t *kma) 38340Sstevel@tonic-gate { 38350Sstevel@tonic-gate char name[KMEM_CACHE_NAMELEN + 1]; 38360Sstevel@tonic-gate hrtime_t delta; 38370Sstevel@tonic-gate int i, depth; 38380Sstevel@tonic-gate size_t bufsize; 38390Sstevel@tonic-gate 38400Sstevel@tonic-gate if (bcp->bc_timestamp == 0) 38410Sstevel@tonic-gate return (WALK_DONE); 38420Sstevel@tonic-gate 38430Sstevel@tonic-gate if (kma->kma_newest == 0) 38440Sstevel@tonic-gate kma->kma_newest = bcp->bc_timestamp; 38450Sstevel@tonic-gate 38460Sstevel@tonic-gate if (kma->kma_addr) { 38470Sstevel@tonic-gate if (mdb_vread(&bufsize, sizeof (bufsize), 38480Sstevel@tonic-gate (uintptr_t)&bcp->bc_cache->cache_bufsize) == -1) { 38490Sstevel@tonic-gate mdb_warn( 38500Sstevel@tonic-gate "failed to read cache_bufsize for cache at %p", 38510Sstevel@tonic-gate bcp->bc_cache); 38520Sstevel@tonic-gate return (WALK_ERR); 38530Sstevel@tonic-gate } 38540Sstevel@tonic-gate 38550Sstevel@tonic-gate if (kma->kma_addr < (uintptr_t)bcp->bc_addr || 38560Sstevel@tonic-gate kma->kma_addr >= (uintptr_t)bcp->bc_addr + bufsize) 38570Sstevel@tonic-gate return (WALK_NEXT); 38580Sstevel@tonic-gate } 38590Sstevel@tonic-gate 38600Sstevel@tonic-gate delta = kma->kma_newest - bcp->bc_timestamp; 38610Sstevel@tonic-gate depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH); 38620Sstevel@tonic-gate 38630Sstevel@tonic-gate if (mdb_readstr(name, sizeof (name), (uintptr_t) 38640Sstevel@tonic-gate &bcp->bc_cache->cache_name) <= 0) 38650Sstevel@tonic-gate (void) mdb_snprintf(name, sizeof (name), "%a", bcp->bc_cache); 38660Sstevel@tonic-gate 38670Sstevel@tonic-gate mdb_printf("\nT-%lld.%09lld addr=%p %s\n", 38680Sstevel@tonic-gate delta / NANOSEC, delta % NANOSEC, bcp->bc_addr, name); 38690Sstevel@tonic-gate 38700Sstevel@tonic-gate for (i = 0; i < depth; i++) 38710Sstevel@tonic-gate mdb_printf("\t %a\n", bcp->bc_stack[i]); 38720Sstevel@tonic-gate 38730Sstevel@tonic-gate return (WALK_NEXT); 38740Sstevel@tonic-gate } 38750Sstevel@tonic-gate 38760Sstevel@tonic-gate int 38770Sstevel@tonic-gate kmalog(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 38780Sstevel@tonic-gate { 38790Sstevel@tonic-gate const char *logname = "kmem_transaction_log"; 38800Sstevel@tonic-gate kmalog_data_t kma; 38810Sstevel@tonic-gate 38820Sstevel@tonic-gate if (argc > 1) 38830Sstevel@tonic-gate return (DCMD_USAGE); 38840Sstevel@tonic-gate 38850Sstevel@tonic-gate kma.kma_newest = 0; 38860Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) 38870Sstevel@tonic-gate kma.kma_addr = addr; 38880Sstevel@tonic-gate else 38890Sstevel@tonic-gate kma.kma_addr = NULL; 38900Sstevel@tonic-gate 38910Sstevel@tonic-gate if (argc > 0) { 38920Sstevel@tonic-gate if (argv->a_type != MDB_TYPE_STRING) 38930Sstevel@tonic-gate return (DCMD_USAGE); 38940Sstevel@tonic-gate if (strcmp(argv->a_un.a_str, "fail") == 0) 38950Sstevel@tonic-gate logname = "kmem_failure_log"; 38960Sstevel@tonic-gate else if (strcmp(argv->a_un.a_str, "slab") == 0) 38970Sstevel@tonic-gate logname = "kmem_slab_log"; 38980Sstevel@tonic-gate else 38990Sstevel@tonic-gate return (DCMD_USAGE); 39000Sstevel@tonic-gate } 39010Sstevel@tonic-gate 39020Sstevel@tonic-gate if (mdb_readvar(&addr, logname) == -1) { 39030Sstevel@tonic-gate mdb_warn("failed to read %s log header pointer"); 39040Sstevel@tonic-gate return (DCMD_ERR); 39050Sstevel@tonic-gate } 39060Sstevel@tonic-gate 39070Sstevel@tonic-gate if (mdb_pwalk("kmem_log", (mdb_walk_cb_t)showbc, &kma, addr) == -1) { 39080Sstevel@tonic-gate mdb_warn("failed to walk kmem log"); 39090Sstevel@tonic-gate return (DCMD_ERR); 39100Sstevel@tonic-gate } 39110Sstevel@tonic-gate 39120Sstevel@tonic-gate return (DCMD_OK); 39130Sstevel@tonic-gate } 39140Sstevel@tonic-gate 39150Sstevel@tonic-gate /* 39160Sstevel@tonic-gate * As the final lure for die-hard crash(1M) users, we provide ::kmausers here. 39170Sstevel@tonic-gate * The first piece is a structure which we use to accumulate kmem_cache_t 39180Sstevel@tonic-gate * addresses of interest. The kmc_add is used as a callback for the kmem_cache 39190Sstevel@tonic-gate * walker; we either add all caches, or ones named explicitly as arguments. 39200Sstevel@tonic-gate */ 39210Sstevel@tonic-gate 39220Sstevel@tonic-gate typedef struct kmclist { 39230Sstevel@tonic-gate const char *kmc_name; /* Name to match (or NULL) */ 39240Sstevel@tonic-gate uintptr_t *kmc_caches; /* List of kmem_cache_t addrs */ 39250Sstevel@tonic-gate int kmc_nelems; /* Num entries in kmc_caches */ 39260Sstevel@tonic-gate int kmc_size; /* Size of kmc_caches array */ 39270Sstevel@tonic-gate } kmclist_t; 39280Sstevel@tonic-gate 39290Sstevel@tonic-gate static int 39300Sstevel@tonic-gate kmc_add(uintptr_t addr, const kmem_cache_t *cp, kmclist_t *kmc) 39310Sstevel@tonic-gate { 39320Sstevel@tonic-gate void *p; 39330Sstevel@tonic-gate int s; 39340Sstevel@tonic-gate 39350Sstevel@tonic-gate if (kmc->kmc_name == NULL || 39360Sstevel@tonic-gate strcmp(cp->cache_name, kmc->kmc_name) == 0) { 39370Sstevel@tonic-gate /* 39380Sstevel@tonic-gate * If we have a match, grow our array (if necessary), and then 39390Sstevel@tonic-gate * add the virtual address of the matching cache to our list. 39400Sstevel@tonic-gate */ 39410Sstevel@tonic-gate if (kmc->kmc_nelems >= kmc->kmc_size) { 39420Sstevel@tonic-gate s = kmc->kmc_size ? kmc->kmc_size * 2 : 256; 39430Sstevel@tonic-gate p = mdb_alloc(sizeof (uintptr_t) * s, UM_SLEEP | UM_GC); 39440Sstevel@tonic-gate 39450Sstevel@tonic-gate bcopy(kmc->kmc_caches, p, 39460Sstevel@tonic-gate sizeof (uintptr_t) * kmc->kmc_size); 39470Sstevel@tonic-gate 39480Sstevel@tonic-gate kmc->kmc_caches = p; 39490Sstevel@tonic-gate kmc->kmc_size = s; 39500Sstevel@tonic-gate } 39510Sstevel@tonic-gate 39520Sstevel@tonic-gate kmc->kmc_caches[kmc->kmc_nelems++] = addr; 39530Sstevel@tonic-gate return (kmc->kmc_name ? WALK_DONE : WALK_NEXT); 39540Sstevel@tonic-gate } 39550Sstevel@tonic-gate 39560Sstevel@tonic-gate return (WALK_NEXT); 39570Sstevel@tonic-gate } 39580Sstevel@tonic-gate 39590Sstevel@tonic-gate /* 39600Sstevel@tonic-gate * The second piece of ::kmausers is a hash table of allocations. Each 39610Sstevel@tonic-gate * allocation owner is identified by its stack trace and data_size. We then 39620Sstevel@tonic-gate * track the total bytes of all such allocations, and the number of allocations 39630Sstevel@tonic-gate * to report at the end. Once we have a list of caches, we walk through the 39640Sstevel@tonic-gate * allocated bufctls of each, and update our hash table accordingly. 39650Sstevel@tonic-gate */ 39660Sstevel@tonic-gate 39670Sstevel@tonic-gate typedef struct kmowner { 39680Sstevel@tonic-gate struct kmowner *kmo_head; /* First hash elt in bucket */ 39690Sstevel@tonic-gate struct kmowner *kmo_next; /* Next hash elt in chain */ 39700Sstevel@tonic-gate size_t kmo_signature; /* Hash table signature */ 39710Sstevel@tonic-gate uint_t kmo_num; /* Number of allocations */ 39720Sstevel@tonic-gate size_t kmo_data_size; /* Size of each allocation */ 39730Sstevel@tonic-gate size_t kmo_total_size; /* Total bytes of allocation */ 39740Sstevel@tonic-gate int kmo_depth; /* Depth of stack trace */ 39750Sstevel@tonic-gate uintptr_t kmo_stack[KMEM_STACK_DEPTH]; /* Stack trace */ 39760Sstevel@tonic-gate } kmowner_t; 39770Sstevel@tonic-gate 39780Sstevel@tonic-gate typedef struct kmusers { 39790Sstevel@tonic-gate uintptr_t kmu_addr; /* address of interest */ 39800Sstevel@tonic-gate const kmem_cache_t *kmu_cache; /* Current kmem cache */ 39810Sstevel@tonic-gate kmowner_t *kmu_hash; /* Hash table of owners */ 39820Sstevel@tonic-gate int kmu_nelems; /* Number of entries in use */ 39830Sstevel@tonic-gate int kmu_size; /* Total number of entries */ 39840Sstevel@tonic-gate } kmusers_t; 39850Sstevel@tonic-gate 39860Sstevel@tonic-gate static void 39870Sstevel@tonic-gate kmu_add(kmusers_t *kmu, const kmem_bufctl_audit_t *bcp, 39880Sstevel@tonic-gate size_t size, size_t data_size) 39890Sstevel@tonic-gate { 39900Sstevel@tonic-gate int i, depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH); 39910Sstevel@tonic-gate size_t bucket, signature = data_size; 39920Sstevel@tonic-gate kmowner_t *kmo, *kmoend; 39930Sstevel@tonic-gate 39940Sstevel@tonic-gate /* 39950Sstevel@tonic-gate * If the hash table is full, double its size and rehash everything. 39960Sstevel@tonic-gate */ 39970Sstevel@tonic-gate if (kmu->kmu_nelems >= kmu->kmu_size) { 39980Sstevel@tonic-gate int s = kmu->kmu_size ? kmu->kmu_size * 2 : 1024; 39990Sstevel@tonic-gate 40000Sstevel@tonic-gate kmo = mdb_alloc(sizeof (kmowner_t) * s, UM_SLEEP | UM_GC); 40010Sstevel@tonic-gate bcopy(kmu->kmu_hash, kmo, sizeof (kmowner_t) * kmu->kmu_size); 40020Sstevel@tonic-gate kmu->kmu_hash = kmo; 40030Sstevel@tonic-gate kmu->kmu_size = s; 40040Sstevel@tonic-gate 40050Sstevel@tonic-gate kmoend = kmu->kmu_hash + kmu->kmu_size; 40060Sstevel@tonic-gate for (kmo = kmu->kmu_hash; kmo < kmoend; kmo++) 40070Sstevel@tonic-gate kmo->kmo_head = NULL; 40080Sstevel@tonic-gate 40090Sstevel@tonic-gate kmoend = kmu->kmu_hash + kmu->kmu_nelems; 40100Sstevel@tonic-gate for (kmo = kmu->kmu_hash; kmo < kmoend; kmo++) { 40110Sstevel@tonic-gate bucket = kmo->kmo_signature & (kmu->kmu_size - 1); 40120Sstevel@tonic-gate kmo->kmo_next = kmu->kmu_hash[bucket].kmo_head; 40130Sstevel@tonic-gate kmu->kmu_hash[bucket].kmo_head = kmo; 40140Sstevel@tonic-gate } 40150Sstevel@tonic-gate } 40160Sstevel@tonic-gate 40170Sstevel@tonic-gate /* 40180Sstevel@tonic-gate * Finish computing the hash signature from the stack trace, and then 40190Sstevel@tonic-gate * see if the owner is in the hash table. If so, update our stats. 40200Sstevel@tonic-gate */ 40210Sstevel@tonic-gate for (i = 0; i < depth; i++) 40220Sstevel@tonic-gate signature += bcp->bc_stack[i]; 40230Sstevel@tonic-gate 40240Sstevel@tonic-gate bucket = signature & (kmu->kmu_size - 1); 40250Sstevel@tonic-gate 40260Sstevel@tonic-gate for (kmo = kmu->kmu_hash[bucket].kmo_head; kmo; kmo = kmo->kmo_next) { 40270Sstevel@tonic-gate if (kmo->kmo_signature == signature) { 40280Sstevel@tonic-gate size_t difference = 0; 40290Sstevel@tonic-gate 40300Sstevel@tonic-gate difference |= kmo->kmo_data_size - data_size; 40310Sstevel@tonic-gate difference |= kmo->kmo_depth - depth; 40320Sstevel@tonic-gate 40330Sstevel@tonic-gate for (i = 0; i < depth; i++) { 40340Sstevel@tonic-gate difference |= kmo->kmo_stack[i] - 40350Sstevel@tonic-gate bcp->bc_stack[i]; 40360Sstevel@tonic-gate } 40370Sstevel@tonic-gate 40380Sstevel@tonic-gate if (difference == 0) { 40390Sstevel@tonic-gate kmo->kmo_total_size += size; 40400Sstevel@tonic-gate kmo->kmo_num++; 40410Sstevel@tonic-gate return; 40420Sstevel@tonic-gate } 40430Sstevel@tonic-gate } 40440Sstevel@tonic-gate } 40450Sstevel@tonic-gate 40460Sstevel@tonic-gate /* 40470Sstevel@tonic-gate * If the owner is not yet hashed, grab the next element and fill it 40480Sstevel@tonic-gate * in based on the allocation information. 40490Sstevel@tonic-gate */ 40500Sstevel@tonic-gate kmo = &kmu->kmu_hash[kmu->kmu_nelems++]; 40510Sstevel@tonic-gate kmo->kmo_next = kmu->kmu_hash[bucket].kmo_head; 40520Sstevel@tonic-gate kmu->kmu_hash[bucket].kmo_head = kmo; 40530Sstevel@tonic-gate 40540Sstevel@tonic-gate kmo->kmo_signature = signature; 40550Sstevel@tonic-gate kmo->kmo_num = 1; 40560Sstevel@tonic-gate kmo->kmo_data_size = data_size; 40570Sstevel@tonic-gate kmo->kmo_total_size = size; 40580Sstevel@tonic-gate kmo->kmo_depth = depth; 40590Sstevel@tonic-gate 40600Sstevel@tonic-gate for (i = 0; i < depth; i++) 40610Sstevel@tonic-gate kmo->kmo_stack[i] = bcp->bc_stack[i]; 40620Sstevel@tonic-gate } 40630Sstevel@tonic-gate 40640Sstevel@tonic-gate /* 40650Sstevel@tonic-gate * When ::kmausers is invoked without the -f flag, we simply update our hash 40660Sstevel@tonic-gate * table with the information from each allocated bufctl. 40670Sstevel@tonic-gate */ 40680Sstevel@tonic-gate /*ARGSUSED*/ 40690Sstevel@tonic-gate static int 40700Sstevel@tonic-gate kmause1(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmusers_t *kmu) 40710Sstevel@tonic-gate { 40720Sstevel@tonic-gate const kmem_cache_t *cp = kmu->kmu_cache; 40730Sstevel@tonic-gate 40740Sstevel@tonic-gate kmu_add(kmu, bcp, cp->cache_bufsize, cp->cache_bufsize); 40750Sstevel@tonic-gate return (WALK_NEXT); 40760Sstevel@tonic-gate } 40770Sstevel@tonic-gate 40780Sstevel@tonic-gate /* 40790Sstevel@tonic-gate * When ::kmausers is invoked with the -f flag, we print out the information 40800Sstevel@tonic-gate * for each bufctl as well as updating the hash table. 40810Sstevel@tonic-gate */ 40820Sstevel@tonic-gate static int 40830Sstevel@tonic-gate kmause2(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmusers_t *kmu) 40840Sstevel@tonic-gate { 40850Sstevel@tonic-gate int i, depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH); 40860Sstevel@tonic-gate const kmem_cache_t *cp = kmu->kmu_cache; 40870Sstevel@tonic-gate kmem_bufctl_t bufctl; 40880Sstevel@tonic-gate 40890Sstevel@tonic-gate if (kmu->kmu_addr) { 40900Sstevel@tonic-gate if (mdb_vread(&bufctl, sizeof (bufctl), addr) == -1) 40910Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr); 40920Sstevel@tonic-gate else if (kmu->kmu_addr < (uintptr_t)bufctl.bc_addr || 40930Sstevel@tonic-gate kmu->kmu_addr >= (uintptr_t)bufctl.bc_addr + 40940Sstevel@tonic-gate cp->cache_bufsize) 40950Sstevel@tonic-gate return (WALK_NEXT); 40960Sstevel@tonic-gate } 40970Sstevel@tonic-gate 40980Sstevel@tonic-gate mdb_printf("size %d, addr %p, thread %p, cache %s\n", 40990Sstevel@tonic-gate cp->cache_bufsize, addr, bcp->bc_thread, cp->cache_name); 41000Sstevel@tonic-gate 41010Sstevel@tonic-gate for (i = 0; i < depth; i++) 41020Sstevel@tonic-gate mdb_printf("\t %a\n", bcp->bc_stack[i]); 41030Sstevel@tonic-gate 41040Sstevel@tonic-gate kmu_add(kmu, bcp, cp->cache_bufsize, cp->cache_bufsize); 41050Sstevel@tonic-gate return (WALK_NEXT); 41060Sstevel@tonic-gate } 41070Sstevel@tonic-gate 41080Sstevel@tonic-gate /* 41090Sstevel@tonic-gate * We sort our results by allocation size before printing them. 41100Sstevel@tonic-gate */ 41110Sstevel@tonic-gate static int 41120Sstevel@tonic-gate kmownercmp(const void *lp, const void *rp) 41130Sstevel@tonic-gate { 41140Sstevel@tonic-gate const kmowner_t *lhs = lp; 41150Sstevel@tonic-gate const kmowner_t *rhs = rp; 41160Sstevel@tonic-gate 41170Sstevel@tonic-gate return (rhs->kmo_total_size - lhs->kmo_total_size); 41180Sstevel@tonic-gate } 41190Sstevel@tonic-gate 41200Sstevel@tonic-gate /* 41210Sstevel@tonic-gate * The main engine of ::kmausers is relatively straightforward: First we 41220Sstevel@tonic-gate * accumulate our list of kmem_cache_t addresses into the kmclist_t. Next we 41230Sstevel@tonic-gate * iterate over the allocated bufctls of each cache in the list. Finally, 41240Sstevel@tonic-gate * we sort and print our results. 41250Sstevel@tonic-gate */ 41260Sstevel@tonic-gate /*ARGSUSED*/ 41270Sstevel@tonic-gate int 41280Sstevel@tonic-gate kmausers(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 41290Sstevel@tonic-gate { 41300Sstevel@tonic-gate int mem_threshold = 8192; /* Minimum # bytes for printing */ 41310Sstevel@tonic-gate int cnt_threshold = 100; /* Minimum # blocks for printing */ 41320Sstevel@tonic-gate int audited_caches = 0; /* Number of KMF_AUDIT caches found */ 41330Sstevel@tonic-gate int do_all_caches = 1; /* Do all caches (no arguments) */ 41340Sstevel@tonic-gate int opt_e = FALSE; /* Include "small" users */ 41350Sstevel@tonic-gate int opt_f = FALSE; /* Print stack traces */ 41360Sstevel@tonic-gate 41370Sstevel@tonic-gate mdb_walk_cb_t callback = (mdb_walk_cb_t)kmause1; 41380Sstevel@tonic-gate kmowner_t *kmo, *kmoend; 41390Sstevel@tonic-gate int i, oelems; 41400Sstevel@tonic-gate 41410Sstevel@tonic-gate kmclist_t kmc; 41420Sstevel@tonic-gate kmusers_t kmu; 41430Sstevel@tonic-gate 41440Sstevel@tonic-gate bzero(&kmc, sizeof (kmc)); 41450Sstevel@tonic-gate bzero(&kmu, sizeof (kmu)); 41460Sstevel@tonic-gate 41470Sstevel@tonic-gate while ((i = mdb_getopts(argc, argv, 41480Sstevel@tonic-gate 'e', MDB_OPT_SETBITS, TRUE, &opt_e, 41490Sstevel@tonic-gate 'f', MDB_OPT_SETBITS, TRUE, &opt_f, NULL)) != argc) { 41500Sstevel@tonic-gate 41510Sstevel@tonic-gate argv += i; /* skip past options we just processed */ 41520Sstevel@tonic-gate argc -= i; /* adjust argc */ 41530Sstevel@tonic-gate 41540Sstevel@tonic-gate if (argv->a_type != MDB_TYPE_STRING || *argv->a_un.a_str == '-') 41550Sstevel@tonic-gate return (DCMD_USAGE); 41560Sstevel@tonic-gate 41570Sstevel@tonic-gate oelems = kmc.kmc_nelems; 41580Sstevel@tonic-gate kmc.kmc_name = argv->a_un.a_str; 41590Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmc_add, &kmc); 41600Sstevel@tonic-gate 41610Sstevel@tonic-gate if (kmc.kmc_nelems == oelems) { 41620Sstevel@tonic-gate mdb_warn("unknown kmem cache: %s\n", kmc.kmc_name); 41630Sstevel@tonic-gate return (DCMD_ERR); 41640Sstevel@tonic-gate } 41650Sstevel@tonic-gate 41660Sstevel@tonic-gate do_all_caches = 0; 41670Sstevel@tonic-gate argv++; 41680Sstevel@tonic-gate argc--; 41690Sstevel@tonic-gate } 41700Sstevel@tonic-gate 41710Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) { 41720Sstevel@tonic-gate opt_f = TRUE; 41730Sstevel@tonic-gate kmu.kmu_addr = addr; 41740Sstevel@tonic-gate } else { 41750Sstevel@tonic-gate kmu.kmu_addr = NULL; 41760Sstevel@tonic-gate } 41770Sstevel@tonic-gate 41780Sstevel@tonic-gate if (opt_e) 41790Sstevel@tonic-gate mem_threshold = cnt_threshold = 0; 41800Sstevel@tonic-gate 41810Sstevel@tonic-gate if (opt_f) 41820Sstevel@tonic-gate callback = (mdb_walk_cb_t)kmause2; 41830Sstevel@tonic-gate 41840Sstevel@tonic-gate if (do_all_caches) { 41850Sstevel@tonic-gate kmc.kmc_name = NULL; /* match all cache names */ 41860Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmc_add, &kmc); 41870Sstevel@tonic-gate } 41880Sstevel@tonic-gate 41890Sstevel@tonic-gate for (i = 0; i < kmc.kmc_nelems; i++) { 41900Sstevel@tonic-gate uintptr_t cp = kmc.kmc_caches[i]; 41910Sstevel@tonic-gate kmem_cache_t c; 41920Sstevel@tonic-gate 41930Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), cp) == -1) { 41940Sstevel@tonic-gate mdb_warn("failed to read cache at %p", cp); 41950Sstevel@tonic-gate continue; 41960Sstevel@tonic-gate } 41970Sstevel@tonic-gate 41980Sstevel@tonic-gate if (!(c.cache_flags & KMF_AUDIT)) { 41990Sstevel@tonic-gate if (!do_all_caches) { 42000Sstevel@tonic-gate mdb_warn("KMF_AUDIT is not enabled for %s\n", 42010Sstevel@tonic-gate c.cache_name); 42020Sstevel@tonic-gate } 42030Sstevel@tonic-gate continue; 42040Sstevel@tonic-gate } 42050Sstevel@tonic-gate 42060Sstevel@tonic-gate kmu.kmu_cache = &c; 42070Sstevel@tonic-gate (void) mdb_pwalk("bufctl", callback, &kmu, cp); 42080Sstevel@tonic-gate audited_caches++; 42090Sstevel@tonic-gate } 42100Sstevel@tonic-gate 42110Sstevel@tonic-gate if (audited_caches == 0 && do_all_caches) { 42120Sstevel@tonic-gate mdb_warn("KMF_AUDIT is not enabled for any caches\n"); 42130Sstevel@tonic-gate return (DCMD_ERR); 42140Sstevel@tonic-gate } 42150Sstevel@tonic-gate 42160Sstevel@tonic-gate qsort(kmu.kmu_hash, kmu.kmu_nelems, sizeof (kmowner_t), kmownercmp); 42170Sstevel@tonic-gate kmoend = kmu.kmu_hash + kmu.kmu_nelems; 42180Sstevel@tonic-gate 42190Sstevel@tonic-gate for (kmo = kmu.kmu_hash; kmo < kmoend; kmo++) { 42200Sstevel@tonic-gate if (kmo->kmo_total_size < mem_threshold && 42210Sstevel@tonic-gate kmo->kmo_num < cnt_threshold) 42220Sstevel@tonic-gate continue; 42230Sstevel@tonic-gate mdb_printf("%lu bytes for %u allocations with data size %lu:\n", 42240Sstevel@tonic-gate kmo->kmo_total_size, kmo->kmo_num, kmo->kmo_data_size); 42250Sstevel@tonic-gate for (i = 0; i < kmo->kmo_depth; i++) 42260Sstevel@tonic-gate mdb_printf("\t %a\n", kmo->kmo_stack[i]); 42270Sstevel@tonic-gate } 42280Sstevel@tonic-gate 42290Sstevel@tonic-gate return (DCMD_OK); 42300Sstevel@tonic-gate } 42310Sstevel@tonic-gate 42320Sstevel@tonic-gate void 42330Sstevel@tonic-gate kmausers_help(void) 42340Sstevel@tonic-gate { 42350Sstevel@tonic-gate mdb_printf( 42360Sstevel@tonic-gate "Displays the largest users of the kmem allocator, sorted by \n" 42370Sstevel@tonic-gate "trace. If one or more caches is specified, only those caches\n" 42380Sstevel@tonic-gate "will be searched. By default, all caches are searched. If an\n" 42390Sstevel@tonic-gate "address is specified, then only those allocations which include\n" 42400Sstevel@tonic-gate "the given address are displayed. Specifying an address implies\n" 42410Sstevel@tonic-gate "-f.\n" 42420Sstevel@tonic-gate "\n" 42430Sstevel@tonic-gate "\t-e\tInclude all users, not just the largest\n" 42440Sstevel@tonic-gate "\t-f\tDisplay individual allocations. By default, users are\n" 42450Sstevel@tonic-gate "\t\tgrouped by stack\n"); 42460Sstevel@tonic-gate } 42470Sstevel@tonic-gate 42480Sstevel@tonic-gate static int 42490Sstevel@tonic-gate kmem_ready_check(void) 42500Sstevel@tonic-gate { 42510Sstevel@tonic-gate int ready; 42520Sstevel@tonic-gate 42530Sstevel@tonic-gate if (mdb_readvar(&ready, "kmem_ready") < 0) 42540Sstevel@tonic-gate return (-1); /* errno is set for us */ 42550Sstevel@tonic-gate 42560Sstevel@tonic-gate return (ready); 42570Sstevel@tonic-gate } 42580Sstevel@tonic-gate 42590Sstevel@tonic-gate /*ARGSUSED*/ 42600Sstevel@tonic-gate static void 42611528Sjwadams kmem_statechange_cb(void *arg) 42620Sstevel@tonic-gate { 42631528Sjwadams static int been_ready = 0; 42641528Sjwadams 42651528Sjwadams leaky_cleanup(1); /* state changes invalidate leaky state */ 42661528Sjwadams 42671528Sjwadams if (been_ready) 42681528Sjwadams return; 42691528Sjwadams 42700Sstevel@tonic-gate if (kmem_ready_check() <= 0) 42710Sstevel@tonic-gate return; 42720Sstevel@tonic-gate 42731528Sjwadams been_ready = 1; 42740Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmem_init_walkers, NULL); 42750Sstevel@tonic-gate } 42760Sstevel@tonic-gate 42770Sstevel@tonic-gate void 42780Sstevel@tonic-gate kmem_init(void) 42790Sstevel@tonic-gate { 42800Sstevel@tonic-gate mdb_walker_t w = { 42810Sstevel@tonic-gate "kmem_cache", "walk list of kmem caches", kmem_cache_walk_init, 4282*6712Stomee list_walk_step, list_walk_fini 42830Sstevel@tonic-gate }; 42840Sstevel@tonic-gate 42850Sstevel@tonic-gate /* 42860Sstevel@tonic-gate * If kmem is ready, we'll need to invoke the kmem_cache walker 42870Sstevel@tonic-gate * immediately. Walkers in the linkage structure won't be ready until 42880Sstevel@tonic-gate * _mdb_init returns, so we'll need to add this one manually. If kmem 42890Sstevel@tonic-gate * is ready, we'll use the walker to initialize the caches. If kmem 42900Sstevel@tonic-gate * isn't ready, we'll register a callback that will allow us to defer 42910Sstevel@tonic-gate * cache walking until it is. 42920Sstevel@tonic-gate */ 42930Sstevel@tonic-gate if (mdb_add_walker(&w) != 0) { 42940Sstevel@tonic-gate mdb_warn("failed to add kmem_cache walker"); 42950Sstevel@tonic-gate return; 42960Sstevel@tonic-gate } 42970Sstevel@tonic-gate 42981528Sjwadams (void) mdb_callback_add(MDB_CALLBACK_STCHG, kmem_statechange_cb, NULL); 42991528Sjwadams kmem_statechange_cb(NULL); 43000Sstevel@tonic-gate } 43010Sstevel@tonic-gate 43020Sstevel@tonic-gate typedef struct whatthread { 43030Sstevel@tonic-gate uintptr_t wt_target; 43040Sstevel@tonic-gate int wt_verbose; 43050Sstevel@tonic-gate } whatthread_t; 43060Sstevel@tonic-gate 43070Sstevel@tonic-gate static int 43080Sstevel@tonic-gate whatthread_walk_thread(uintptr_t addr, const kthread_t *t, whatthread_t *w) 43090Sstevel@tonic-gate { 43100Sstevel@tonic-gate uintptr_t current, data; 43110Sstevel@tonic-gate 43120Sstevel@tonic-gate if (t->t_stkbase == NULL) 43130Sstevel@tonic-gate return (WALK_NEXT); 43140Sstevel@tonic-gate 43150Sstevel@tonic-gate /* 43160Sstevel@tonic-gate * Warn about swapped out threads, but drive on anyway 43170Sstevel@tonic-gate */ 43180Sstevel@tonic-gate if (!(t->t_schedflag & TS_LOAD)) { 43190Sstevel@tonic-gate mdb_warn("thread %p's stack swapped out\n", addr); 43200Sstevel@tonic-gate return (WALK_NEXT); 43210Sstevel@tonic-gate } 43220Sstevel@tonic-gate 43230Sstevel@tonic-gate /* 43240Sstevel@tonic-gate * Search the thread's stack for the given pointer. Note that it would 43250Sstevel@tonic-gate * be more efficient to follow ::kgrep's lead and read in page-sized 43260Sstevel@tonic-gate * chunks, but this routine is already fast and simple. 43270Sstevel@tonic-gate */ 43280Sstevel@tonic-gate for (current = (uintptr_t)t->t_stkbase; current < (uintptr_t)t->t_stk; 43290Sstevel@tonic-gate current += sizeof (uintptr_t)) { 43300Sstevel@tonic-gate if (mdb_vread(&data, sizeof (data), current) == -1) { 43310Sstevel@tonic-gate mdb_warn("couldn't read thread %p's stack at %p", 43320Sstevel@tonic-gate addr, current); 43330Sstevel@tonic-gate return (WALK_ERR); 43340Sstevel@tonic-gate } 43350Sstevel@tonic-gate 43360Sstevel@tonic-gate if (data == w->wt_target) { 43370Sstevel@tonic-gate if (w->wt_verbose) { 43380Sstevel@tonic-gate mdb_printf("%p in thread %p's stack%s\n", 43390Sstevel@tonic-gate current, addr, stack_active(t, current)); 43400Sstevel@tonic-gate } else { 43410Sstevel@tonic-gate mdb_printf("%#lr\n", addr); 43420Sstevel@tonic-gate return (WALK_NEXT); 43430Sstevel@tonic-gate } 43440Sstevel@tonic-gate } 43450Sstevel@tonic-gate } 43460Sstevel@tonic-gate 43470Sstevel@tonic-gate return (WALK_NEXT); 43480Sstevel@tonic-gate } 43490Sstevel@tonic-gate 43500Sstevel@tonic-gate int 43510Sstevel@tonic-gate whatthread(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 43520Sstevel@tonic-gate { 43530Sstevel@tonic-gate whatthread_t w; 43540Sstevel@tonic-gate 43550Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 43560Sstevel@tonic-gate return (DCMD_USAGE); 43570Sstevel@tonic-gate 43580Sstevel@tonic-gate w.wt_verbose = FALSE; 43590Sstevel@tonic-gate w.wt_target = addr; 43600Sstevel@tonic-gate 43610Sstevel@tonic-gate if (mdb_getopts(argc, argv, 43620Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &w.wt_verbose, NULL) != argc) 43630Sstevel@tonic-gate return (DCMD_USAGE); 43640Sstevel@tonic-gate 43650Sstevel@tonic-gate if (mdb_walk("thread", (mdb_walk_cb_t)whatthread_walk_thread, &w) 43660Sstevel@tonic-gate == -1) { 43670Sstevel@tonic-gate mdb_warn("couldn't walk threads"); 43680Sstevel@tonic-gate return (DCMD_ERR); 43690Sstevel@tonic-gate } 43700Sstevel@tonic-gate 43710Sstevel@tonic-gate return (DCMD_OK); 43720Sstevel@tonic-gate } 4373