1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate #include <mdb/mdb_param.h> 30*0Sstevel@tonic-gate #include <mdb/mdb_modapi.h> 31*0Sstevel@tonic-gate #include <mdb/mdb_ctf.h> 32*0Sstevel@tonic-gate #include <sys/cpuvar.h> 33*0Sstevel@tonic-gate #include <sys/kmem_impl.h> 34*0Sstevel@tonic-gate #include <sys/vmem_impl.h> 35*0Sstevel@tonic-gate #include <sys/machelf.h> 36*0Sstevel@tonic-gate #include <sys/modctl.h> 37*0Sstevel@tonic-gate #include <sys/kobj.h> 38*0Sstevel@tonic-gate #include <sys/panic.h> 39*0Sstevel@tonic-gate #include <sys/stack.h> 40*0Sstevel@tonic-gate #include <sys/sysmacros.h> 41*0Sstevel@tonic-gate #include <vm/page.h> 42*0Sstevel@tonic-gate 43*0Sstevel@tonic-gate #include "kmem.h" 44*0Sstevel@tonic-gate 45*0Sstevel@tonic-gate #define dprintf(x) if (mdb_debug_level) { \ 46*0Sstevel@tonic-gate mdb_printf("kmem debug: "); \ 47*0Sstevel@tonic-gate /*CSTYLED*/\ 48*0Sstevel@tonic-gate mdb_printf x ;\ 49*0Sstevel@tonic-gate } 50*0Sstevel@tonic-gate 51*0Sstevel@tonic-gate #define KM_ALLOCATED 0x01 52*0Sstevel@tonic-gate #define KM_FREE 0x02 53*0Sstevel@tonic-gate #define KM_BUFCTL 0x04 54*0Sstevel@tonic-gate #define KM_CONSTRUCTED 0x08 /* only constructed free buffers */ 55*0Sstevel@tonic-gate #define KM_HASH 0x10 56*0Sstevel@tonic-gate 57*0Sstevel@tonic-gate static int mdb_debug_level = 0; 58*0Sstevel@tonic-gate 59*0Sstevel@tonic-gate static void *kmem_ready_cbhdl; 60*0Sstevel@tonic-gate 61*0Sstevel@tonic-gate /*ARGSUSED*/ 62*0Sstevel@tonic-gate static int 63*0Sstevel@tonic-gate kmem_init_walkers(uintptr_t addr, const kmem_cache_t *c, void *ignored) 64*0Sstevel@tonic-gate { 65*0Sstevel@tonic-gate mdb_walker_t w; 66*0Sstevel@tonic-gate char descr[64]; 67*0Sstevel@tonic-gate 68*0Sstevel@tonic-gate (void) mdb_snprintf(descr, sizeof (descr), 69*0Sstevel@tonic-gate "walk the %s cache", c->cache_name); 70*0Sstevel@tonic-gate 71*0Sstevel@tonic-gate w.walk_name = c->cache_name; 72*0Sstevel@tonic-gate w.walk_descr = descr; 73*0Sstevel@tonic-gate w.walk_init = kmem_walk_init; 74*0Sstevel@tonic-gate w.walk_step = kmem_walk_step; 75*0Sstevel@tonic-gate w.walk_fini = kmem_walk_fini; 76*0Sstevel@tonic-gate w.walk_init_arg = (void *)addr; 77*0Sstevel@tonic-gate 78*0Sstevel@tonic-gate if (mdb_add_walker(&w) == -1) 79*0Sstevel@tonic-gate mdb_warn("failed to add %s walker", c->cache_name); 80*0Sstevel@tonic-gate 81*0Sstevel@tonic-gate return (WALK_NEXT); 82*0Sstevel@tonic-gate } 83*0Sstevel@tonic-gate 84*0Sstevel@tonic-gate /*ARGSUSED*/ 85*0Sstevel@tonic-gate int 86*0Sstevel@tonic-gate kmem_debug(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 87*0Sstevel@tonic-gate { 88*0Sstevel@tonic-gate mdb_debug_level ^= 1; 89*0Sstevel@tonic-gate 90*0Sstevel@tonic-gate mdb_printf("kmem: debugging is now %s\n", 91*0Sstevel@tonic-gate mdb_debug_level ? "on" : "off"); 92*0Sstevel@tonic-gate 93*0Sstevel@tonic-gate return (DCMD_OK); 94*0Sstevel@tonic-gate } 95*0Sstevel@tonic-gate 96*0Sstevel@tonic-gate typedef struct { 97*0Sstevel@tonic-gate uintptr_t kcw_first; 98*0Sstevel@tonic-gate uintptr_t kcw_current; 99*0Sstevel@tonic-gate } kmem_cache_walk_t; 100*0Sstevel@tonic-gate 101*0Sstevel@tonic-gate int 102*0Sstevel@tonic-gate kmem_cache_walk_init(mdb_walk_state_t *wsp) 103*0Sstevel@tonic-gate { 104*0Sstevel@tonic-gate kmem_cache_walk_t *kcw; 105*0Sstevel@tonic-gate kmem_cache_t c; 106*0Sstevel@tonic-gate uintptr_t cp; 107*0Sstevel@tonic-gate GElf_Sym sym; 108*0Sstevel@tonic-gate 109*0Sstevel@tonic-gate if (mdb_lookup_by_name("kmem_null_cache", &sym) == -1) { 110*0Sstevel@tonic-gate mdb_warn("couldn't find kmem_null_cache"); 111*0Sstevel@tonic-gate return (WALK_ERR); 112*0Sstevel@tonic-gate } 113*0Sstevel@tonic-gate 114*0Sstevel@tonic-gate cp = (uintptr_t)sym.st_value; 115*0Sstevel@tonic-gate 116*0Sstevel@tonic-gate if (mdb_vread(&c, sizeof (kmem_cache_t), cp) == -1) { 117*0Sstevel@tonic-gate mdb_warn("couldn't read cache at %p", cp); 118*0Sstevel@tonic-gate return (WALK_ERR); 119*0Sstevel@tonic-gate } 120*0Sstevel@tonic-gate 121*0Sstevel@tonic-gate kcw = mdb_alloc(sizeof (kmem_cache_walk_t), UM_SLEEP); 122*0Sstevel@tonic-gate 123*0Sstevel@tonic-gate kcw->kcw_first = cp; 124*0Sstevel@tonic-gate kcw->kcw_current = (uintptr_t)c.cache_next; 125*0Sstevel@tonic-gate wsp->walk_data = kcw; 126*0Sstevel@tonic-gate 127*0Sstevel@tonic-gate return (WALK_NEXT); 128*0Sstevel@tonic-gate } 129*0Sstevel@tonic-gate 130*0Sstevel@tonic-gate int 131*0Sstevel@tonic-gate kmem_cache_walk_step(mdb_walk_state_t *wsp) 132*0Sstevel@tonic-gate { 133*0Sstevel@tonic-gate kmem_cache_walk_t *kcw = wsp->walk_data; 134*0Sstevel@tonic-gate kmem_cache_t c; 135*0Sstevel@tonic-gate int status; 136*0Sstevel@tonic-gate 137*0Sstevel@tonic-gate if (mdb_vread(&c, sizeof (kmem_cache_t), kcw->kcw_current) == -1) { 138*0Sstevel@tonic-gate mdb_warn("couldn't read cache at %p", kcw->kcw_current); 139*0Sstevel@tonic-gate return (WALK_DONE); 140*0Sstevel@tonic-gate } 141*0Sstevel@tonic-gate 142*0Sstevel@tonic-gate status = wsp->walk_callback(kcw->kcw_current, &c, wsp->walk_cbdata); 143*0Sstevel@tonic-gate 144*0Sstevel@tonic-gate if ((kcw->kcw_current = (uintptr_t)c.cache_next) == kcw->kcw_first) 145*0Sstevel@tonic-gate return (WALK_DONE); 146*0Sstevel@tonic-gate 147*0Sstevel@tonic-gate return (status); 148*0Sstevel@tonic-gate } 149*0Sstevel@tonic-gate 150*0Sstevel@tonic-gate void 151*0Sstevel@tonic-gate kmem_cache_walk_fini(mdb_walk_state_t *wsp) 152*0Sstevel@tonic-gate { 153*0Sstevel@tonic-gate kmem_cache_walk_t *kcw = wsp->walk_data; 154*0Sstevel@tonic-gate mdb_free(kcw, sizeof (kmem_cache_walk_t)); 155*0Sstevel@tonic-gate } 156*0Sstevel@tonic-gate 157*0Sstevel@tonic-gate int 158*0Sstevel@tonic-gate kmem_cpu_cache_walk_init(mdb_walk_state_t *wsp) 159*0Sstevel@tonic-gate { 160*0Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 161*0Sstevel@tonic-gate mdb_warn("kmem_cpu_cache doesn't support global walks"); 162*0Sstevel@tonic-gate return (WALK_ERR); 163*0Sstevel@tonic-gate } 164*0Sstevel@tonic-gate 165*0Sstevel@tonic-gate if (mdb_layered_walk("cpu", wsp) == -1) { 166*0Sstevel@tonic-gate mdb_warn("couldn't walk 'cpu'"); 167*0Sstevel@tonic-gate return (WALK_ERR); 168*0Sstevel@tonic-gate } 169*0Sstevel@tonic-gate 170*0Sstevel@tonic-gate wsp->walk_data = (void *)wsp->walk_addr; 171*0Sstevel@tonic-gate 172*0Sstevel@tonic-gate return (WALK_NEXT); 173*0Sstevel@tonic-gate } 174*0Sstevel@tonic-gate 175*0Sstevel@tonic-gate int 176*0Sstevel@tonic-gate kmem_cpu_cache_walk_step(mdb_walk_state_t *wsp) 177*0Sstevel@tonic-gate { 178*0Sstevel@tonic-gate uintptr_t caddr = (uintptr_t)wsp->walk_data; 179*0Sstevel@tonic-gate const cpu_t *cpu = wsp->walk_layer; 180*0Sstevel@tonic-gate kmem_cpu_cache_t cc; 181*0Sstevel@tonic-gate 182*0Sstevel@tonic-gate caddr += cpu->cpu_cache_offset; 183*0Sstevel@tonic-gate 184*0Sstevel@tonic-gate if (mdb_vread(&cc, sizeof (kmem_cpu_cache_t), caddr) == -1) { 185*0Sstevel@tonic-gate mdb_warn("couldn't read kmem_cpu_cache at %p", caddr); 186*0Sstevel@tonic-gate return (WALK_ERR); 187*0Sstevel@tonic-gate } 188*0Sstevel@tonic-gate 189*0Sstevel@tonic-gate return (wsp->walk_callback(caddr, &cc, wsp->walk_cbdata)); 190*0Sstevel@tonic-gate } 191*0Sstevel@tonic-gate 192*0Sstevel@tonic-gate int 193*0Sstevel@tonic-gate kmem_slab_walk_init(mdb_walk_state_t *wsp) 194*0Sstevel@tonic-gate { 195*0Sstevel@tonic-gate uintptr_t caddr = wsp->walk_addr; 196*0Sstevel@tonic-gate kmem_cache_t c; 197*0Sstevel@tonic-gate 198*0Sstevel@tonic-gate if (caddr == NULL) { 199*0Sstevel@tonic-gate mdb_warn("kmem_slab doesn't support global walks\n"); 200*0Sstevel@tonic-gate return (WALK_ERR); 201*0Sstevel@tonic-gate } 202*0Sstevel@tonic-gate 203*0Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), caddr) == -1) { 204*0Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache at %p", caddr); 205*0Sstevel@tonic-gate return (WALK_ERR); 206*0Sstevel@tonic-gate } 207*0Sstevel@tonic-gate 208*0Sstevel@tonic-gate wsp->walk_data = 209*0Sstevel@tonic-gate (void *)(caddr + offsetof(kmem_cache_t, cache_nullslab)); 210*0Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)c.cache_nullslab.slab_next; 211*0Sstevel@tonic-gate 212*0Sstevel@tonic-gate return (WALK_NEXT); 213*0Sstevel@tonic-gate } 214*0Sstevel@tonic-gate 215*0Sstevel@tonic-gate int 216*0Sstevel@tonic-gate kmem_slab_walk_partial_init(mdb_walk_state_t *wsp) 217*0Sstevel@tonic-gate { 218*0Sstevel@tonic-gate uintptr_t caddr = wsp->walk_addr; 219*0Sstevel@tonic-gate kmem_cache_t c; 220*0Sstevel@tonic-gate 221*0Sstevel@tonic-gate if (caddr == NULL) { 222*0Sstevel@tonic-gate mdb_warn("kmem_slab_partial doesn't support global walks\n"); 223*0Sstevel@tonic-gate return (WALK_ERR); 224*0Sstevel@tonic-gate } 225*0Sstevel@tonic-gate 226*0Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), caddr) == -1) { 227*0Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache at %p", caddr); 228*0Sstevel@tonic-gate return (WALK_ERR); 229*0Sstevel@tonic-gate } 230*0Sstevel@tonic-gate 231*0Sstevel@tonic-gate wsp->walk_data = 232*0Sstevel@tonic-gate (void *)(caddr + offsetof(kmem_cache_t, cache_nullslab)); 233*0Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)c.cache_freelist; 234*0Sstevel@tonic-gate 235*0Sstevel@tonic-gate /* 236*0Sstevel@tonic-gate * Some consumers (umem_walk_step(), in particular) require at 237*0Sstevel@tonic-gate * least one callback if there are any buffers in the cache. So 238*0Sstevel@tonic-gate * if there are *no* partial slabs, report the last full slab, if 239*0Sstevel@tonic-gate * any. 240*0Sstevel@tonic-gate * 241*0Sstevel@tonic-gate * Yes, this is ugly, but it's cleaner than the other possibilities. 242*0Sstevel@tonic-gate */ 243*0Sstevel@tonic-gate if ((uintptr_t)wsp->walk_data == wsp->walk_addr) 244*0Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)c.cache_nullslab.slab_prev; 245*0Sstevel@tonic-gate 246*0Sstevel@tonic-gate return (WALK_NEXT); 247*0Sstevel@tonic-gate } 248*0Sstevel@tonic-gate 249*0Sstevel@tonic-gate int 250*0Sstevel@tonic-gate kmem_slab_walk_step(mdb_walk_state_t *wsp) 251*0Sstevel@tonic-gate { 252*0Sstevel@tonic-gate kmem_slab_t s; 253*0Sstevel@tonic-gate uintptr_t addr = wsp->walk_addr; 254*0Sstevel@tonic-gate uintptr_t saddr = (uintptr_t)wsp->walk_data; 255*0Sstevel@tonic-gate uintptr_t caddr = saddr - offsetof(kmem_cache_t, cache_nullslab); 256*0Sstevel@tonic-gate 257*0Sstevel@tonic-gate if (addr == saddr) 258*0Sstevel@tonic-gate return (WALK_DONE); 259*0Sstevel@tonic-gate 260*0Sstevel@tonic-gate if (mdb_vread(&s, sizeof (s), addr) == -1) { 261*0Sstevel@tonic-gate mdb_warn("failed to read slab at %p", wsp->walk_addr); 262*0Sstevel@tonic-gate return (WALK_ERR); 263*0Sstevel@tonic-gate } 264*0Sstevel@tonic-gate 265*0Sstevel@tonic-gate if ((uintptr_t)s.slab_cache != caddr) { 266*0Sstevel@tonic-gate mdb_warn("slab %p isn't in cache %p (in cache %p)\n", 267*0Sstevel@tonic-gate addr, caddr, s.slab_cache); 268*0Sstevel@tonic-gate return (WALK_ERR); 269*0Sstevel@tonic-gate } 270*0Sstevel@tonic-gate 271*0Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)s.slab_next; 272*0Sstevel@tonic-gate 273*0Sstevel@tonic-gate return (wsp->walk_callback(addr, &s, wsp->walk_cbdata)); 274*0Sstevel@tonic-gate } 275*0Sstevel@tonic-gate 276*0Sstevel@tonic-gate int 277*0Sstevel@tonic-gate kmem_cache(uintptr_t addr, uint_t flags, int ac, const mdb_arg_t *argv) 278*0Sstevel@tonic-gate { 279*0Sstevel@tonic-gate kmem_cache_t c; 280*0Sstevel@tonic-gate 281*0Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) { 282*0Sstevel@tonic-gate if (mdb_walk_dcmd("kmem_cache", "kmem_cache", ac, argv) == -1) { 283*0Sstevel@tonic-gate mdb_warn("can't walk kmem_cache"); 284*0Sstevel@tonic-gate return (DCMD_ERR); 285*0Sstevel@tonic-gate } 286*0Sstevel@tonic-gate return (DCMD_OK); 287*0Sstevel@tonic-gate } 288*0Sstevel@tonic-gate 289*0Sstevel@tonic-gate if (DCMD_HDRSPEC(flags)) 290*0Sstevel@tonic-gate mdb_printf("%-?s %-25s %4s %6s %8s %8s\n", "ADDR", "NAME", 291*0Sstevel@tonic-gate "FLAG", "CFLAG", "BUFSIZE", "BUFTOTL"); 292*0Sstevel@tonic-gate 293*0Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), addr) == -1) { 294*0Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache at %p", addr); 295*0Sstevel@tonic-gate return (DCMD_ERR); 296*0Sstevel@tonic-gate } 297*0Sstevel@tonic-gate 298*0Sstevel@tonic-gate mdb_printf("%0?p %-25s %04x %06x %8ld %8lld\n", addr, c.cache_name, 299*0Sstevel@tonic-gate c.cache_flags, c.cache_cflags, c.cache_bufsize, c.cache_buftotal); 300*0Sstevel@tonic-gate 301*0Sstevel@tonic-gate return (DCMD_OK); 302*0Sstevel@tonic-gate } 303*0Sstevel@tonic-gate 304*0Sstevel@tonic-gate static int 305*0Sstevel@tonic-gate addrcmp(const void *lhs, const void *rhs) 306*0Sstevel@tonic-gate { 307*0Sstevel@tonic-gate uintptr_t p1 = *((uintptr_t *)lhs); 308*0Sstevel@tonic-gate uintptr_t p2 = *((uintptr_t *)rhs); 309*0Sstevel@tonic-gate 310*0Sstevel@tonic-gate if (p1 < p2) 311*0Sstevel@tonic-gate return (-1); 312*0Sstevel@tonic-gate if (p1 > p2) 313*0Sstevel@tonic-gate return (1); 314*0Sstevel@tonic-gate return (0); 315*0Sstevel@tonic-gate } 316*0Sstevel@tonic-gate 317*0Sstevel@tonic-gate static int 318*0Sstevel@tonic-gate bufctlcmp(const kmem_bufctl_audit_t **lhs, const kmem_bufctl_audit_t **rhs) 319*0Sstevel@tonic-gate { 320*0Sstevel@tonic-gate const kmem_bufctl_audit_t *bcp1 = *lhs; 321*0Sstevel@tonic-gate const kmem_bufctl_audit_t *bcp2 = *rhs; 322*0Sstevel@tonic-gate 323*0Sstevel@tonic-gate if (bcp1->bc_timestamp > bcp2->bc_timestamp) 324*0Sstevel@tonic-gate return (-1); 325*0Sstevel@tonic-gate 326*0Sstevel@tonic-gate if (bcp1->bc_timestamp < bcp2->bc_timestamp) 327*0Sstevel@tonic-gate return (1); 328*0Sstevel@tonic-gate 329*0Sstevel@tonic-gate return (0); 330*0Sstevel@tonic-gate } 331*0Sstevel@tonic-gate 332*0Sstevel@tonic-gate typedef struct kmem_hash_walk { 333*0Sstevel@tonic-gate uintptr_t *kmhw_table; 334*0Sstevel@tonic-gate size_t kmhw_nelems; 335*0Sstevel@tonic-gate size_t kmhw_pos; 336*0Sstevel@tonic-gate kmem_bufctl_t kmhw_cur; 337*0Sstevel@tonic-gate } kmem_hash_walk_t; 338*0Sstevel@tonic-gate 339*0Sstevel@tonic-gate int 340*0Sstevel@tonic-gate kmem_hash_walk_init(mdb_walk_state_t *wsp) 341*0Sstevel@tonic-gate { 342*0Sstevel@tonic-gate kmem_hash_walk_t *kmhw; 343*0Sstevel@tonic-gate uintptr_t *hash; 344*0Sstevel@tonic-gate kmem_cache_t c; 345*0Sstevel@tonic-gate uintptr_t haddr, addr = wsp->walk_addr; 346*0Sstevel@tonic-gate size_t nelems; 347*0Sstevel@tonic-gate size_t hsize; 348*0Sstevel@tonic-gate 349*0Sstevel@tonic-gate if (addr == NULL) { 350*0Sstevel@tonic-gate mdb_warn("kmem_hash doesn't support global walks\n"); 351*0Sstevel@tonic-gate return (WALK_ERR); 352*0Sstevel@tonic-gate } 353*0Sstevel@tonic-gate 354*0Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), addr) == -1) { 355*0Sstevel@tonic-gate mdb_warn("couldn't read cache at addr %p", addr); 356*0Sstevel@tonic-gate return (WALK_ERR); 357*0Sstevel@tonic-gate } 358*0Sstevel@tonic-gate 359*0Sstevel@tonic-gate if (!(c.cache_flags & KMF_HASH)) { 360*0Sstevel@tonic-gate mdb_warn("cache %p doesn't have a hash table\n", addr); 361*0Sstevel@tonic-gate return (WALK_DONE); /* nothing to do */ 362*0Sstevel@tonic-gate } 363*0Sstevel@tonic-gate 364*0Sstevel@tonic-gate kmhw = mdb_zalloc(sizeof (kmem_hash_walk_t), UM_SLEEP); 365*0Sstevel@tonic-gate kmhw->kmhw_cur.bc_next = NULL; 366*0Sstevel@tonic-gate kmhw->kmhw_pos = 0; 367*0Sstevel@tonic-gate 368*0Sstevel@tonic-gate kmhw->kmhw_nelems = nelems = c.cache_hash_mask + 1; 369*0Sstevel@tonic-gate hsize = nelems * sizeof (uintptr_t); 370*0Sstevel@tonic-gate haddr = (uintptr_t)c.cache_hash_table; 371*0Sstevel@tonic-gate 372*0Sstevel@tonic-gate kmhw->kmhw_table = hash = mdb_alloc(hsize, UM_SLEEP); 373*0Sstevel@tonic-gate if (mdb_vread(hash, hsize, haddr) == -1) { 374*0Sstevel@tonic-gate mdb_warn("failed to read hash table at %p", haddr); 375*0Sstevel@tonic-gate mdb_free(hash, hsize); 376*0Sstevel@tonic-gate mdb_free(kmhw, sizeof (kmem_hash_walk_t)); 377*0Sstevel@tonic-gate return (WALK_ERR); 378*0Sstevel@tonic-gate } 379*0Sstevel@tonic-gate 380*0Sstevel@tonic-gate wsp->walk_data = kmhw; 381*0Sstevel@tonic-gate 382*0Sstevel@tonic-gate return (WALK_NEXT); 383*0Sstevel@tonic-gate } 384*0Sstevel@tonic-gate 385*0Sstevel@tonic-gate int 386*0Sstevel@tonic-gate kmem_hash_walk_step(mdb_walk_state_t *wsp) 387*0Sstevel@tonic-gate { 388*0Sstevel@tonic-gate kmem_hash_walk_t *kmhw = wsp->walk_data; 389*0Sstevel@tonic-gate uintptr_t addr = NULL; 390*0Sstevel@tonic-gate 391*0Sstevel@tonic-gate if ((addr = (uintptr_t)kmhw->kmhw_cur.bc_next) == NULL) { 392*0Sstevel@tonic-gate while (kmhw->kmhw_pos < kmhw->kmhw_nelems) { 393*0Sstevel@tonic-gate if ((addr = kmhw->kmhw_table[kmhw->kmhw_pos++]) != NULL) 394*0Sstevel@tonic-gate break; 395*0Sstevel@tonic-gate } 396*0Sstevel@tonic-gate } 397*0Sstevel@tonic-gate if (addr == NULL) 398*0Sstevel@tonic-gate return (WALK_DONE); 399*0Sstevel@tonic-gate 400*0Sstevel@tonic-gate if (mdb_vread(&kmhw->kmhw_cur, sizeof (kmem_bufctl_t), addr) == -1) { 401*0Sstevel@tonic-gate mdb_warn("couldn't read kmem_bufctl_t at addr %p", addr); 402*0Sstevel@tonic-gate return (WALK_ERR); 403*0Sstevel@tonic-gate } 404*0Sstevel@tonic-gate 405*0Sstevel@tonic-gate return (wsp->walk_callback(addr, &kmhw->kmhw_cur, wsp->walk_cbdata)); 406*0Sstevel@tonic-gate } 407*0Sstevel@tonic-gate 408*0Sstevel@tonic-gate void 409*0Sstevel@tonic-gate kmem_hash_walk_fini(mdb_walk_state_t *wsp) 410*0Sstevel@tonic-gate { 411*0Sstevel@tonic-gate kmem_hash_walk_t *kmhw = wsp->walk_data; 412*0Sstevel@tonic-gate 413*0Sstevel@tonic-gate if (kmhw == NULL) 414*0Sstevel@tonic-gate return; 415*0Sstevel@tonic-gate 416*0Sstevel@tonic-gate mdb_free(kmhw->kmhw_table, kmhw->kmhw_nelems * sizeof (uintptr_t)); 417*0Sstevel@tonic-gate mdb_free(kmhw, sizeof (kmem_hash_walk_t)); 418*0Sstevel@tonic-gate } 419*0Sstevel@tonic-gate 420*0Sstevel@tonic-gate /* 421*0Sstevel@tonic-gate * Find the address of the bufctl structure for the address 'buf' in cache 422*0Sstevel@tonic-gate * 'cp', which is at address caddr, and place it in *out. 423*0Sstevel@tonic-gate */ 424*0Sstevel@tonic-gate static int 425*0Sstevel@tonic-gate kmem_hash_lookup(kmem_cache_t *cp, uintptr_t caddr, void *buf, uintptr_t *out) 426*0Sstevel@tonic-gate { 427*0Sstevel@tonic-gate uintptr_t bucket = (uintptr_t)KMEM_HASH(cp, buf); 428*0Sstevel@tonic-gate kmem_bufctl_t *bcp; 429*0Sstevel@tonic-gate kmem_bufctl_t bc; 430*0Sstevel@tonic-gate 431*0Sstevel@tonic-gate if (mdb_vread(&bcp, sizeof (kmem_bufctl_t *), bucket) == -1) { 432*0Sstevel@tonic-gate mdb_warn("unable to read hash bucket for %p in cache %p", 433*0Sstevel@tonic-gate buf, caddr); 434*0Sstevel@tonic-gate return (-1); 435*0Sstevel@tonic-gate } 436*0Sstevel@tonic-gate 437*0Sstevel@tonic-gate while (bcp != NULL) { 438*0Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (kmem_bufctl_t), 439*0Sstevel@tonic-gate (uintptr_t)bcp) == -1) { 440*0Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", bcp); 441*0Sstevel@tonic-gate return (-1); 442*0Sstevel@tonic-gate } 443*0Sstevel@tonic-gate if (bc.bc_addr == buf) { 444*0Sstevel@tonic-gate *out = (uintptr_t)bcp; 445*0Sstevel@tonic-gate return (0); 446*0Sstevel@tonic-gate } 447*0Sstevel@tonic-gate bcp = bc.bc_next; 448*0Sstevel@tonic-gate } 449*0Sstevel@tonic-gate 450*0Sstevel@tonic-gate mdb_warn("unable to find bufctl for %p in cache %p\n", buf, caddr); 451*0Sstevel@tonic-gate return (-1); 452*0Sstevel@tonic-gate } 453*0Sstevel@tonic-gate 454*0Sstevel@tonic-gate int 455*0Sstevel@tonic-gate kmem_get_magsize(const kmem_cache_t *cp) 456*0Sstevel@tonic-gate { 457*0Sstevel@tonic-gate uintptr_t addr = (uintptr_t)cp->cache_magtype; 458*0Sstevel@tonic-gate GElf_Sym mt_sym; 459*0Sstevel@tonic-gate kmem_magtype_t mt; 460*0Sstevel@tonic-gate int res; 461*0Sstevel@tonic-gate 462*0Sstevel@tonic-gate /* 463*0Sstevel@tonic-gate * if cpu 0 has a non-zero magsize, it must be correct. caches 464*0Sstevel@tonic-gate * with KMF_NOMAGAZINE have disabled their magazine layers, so 465*0Sstevel@tonic-gate * it is okay to return 0 for them. 466*0Sstevel@tonic-gate */ 467*0Sstevel@tonic-gate if ((res = cp->cache_cpu[0].cc_magsize) != 0 || 468*0Sstevel@tonic-gate (cp->cache_flags & KMF_NOMAGAZINE)) 469*0Sstevel@tonic-gate return (res); 470*0Sstevel@tonic-gate 471*0Sstevel@tonic-gate if (mdb_lookup_by_name("kmem_magtype", &mt_sym) == -1) { 472*0Sstevel@tonic-gate mdb_warn("unable to read 'kmem_magtype'"); 473*0Sstevel@tonic-gate } else if (addr < mt_sym.st_value || 474*0Sstevel@tonic-gate addr + sizeof (mt) - 1 > mt_sym.st_value + mt_sym.st_size - 1 || 475*0Sstevel@tonic-gate ((addr - mt_sym.st_value) % sizeof (mt)) != 0) { 476*0Sstevel@tonic-gate mdb_warn("cache '%s' has invalid magtype pointer (%p)\n", 477*0Sstevel@tonic-gate cp->cache_name, addr); 478*0Sstevel@tonic-gate return (0); 479*0Sstevel@tonic-gate } 480*0Sstevel@tonic-gate if (mdb_vread(&mt, sizeof (mt), addr) == -1) { 481*0Sstevel@tonic-gate mdb_warn("unable to read magtype at %a", addr); 482*0Sstevel@tonic-gate return (0); 483*0Sstevel@tonic-gate } 484*0Sstevel@tonic-gate return (mt.mt_magsize); 485*0Sstevel@tonic-gate } 486*0Sstevel@tonic-gate 487*0Sstevel@tonic-gate /*ARGSUSED*/ 488*0Sstevel@tonic-gate static int 489*0Sstevel@tonic-gate kmem_estimate_slab(uintptr_t addr, const kmem_slab_t *sp, size_t *est) 490*0Sstevel@tonic-gate { 491*0Sstevel@tonic-gate *est -= (sp->slab_chunks - sp->slab_refcnt); 492*0Sstevel@tonic-gate 493*0Sstevel@tonic-gate return (WALK_NEXT); 494*0Sstevel@tonic-gate } 495*0Sstevel@tonic-gate 496*0Sstevel@tonic-gate /* 497*0Sstevel@tonic-gate * Returns an upper bound on the number of allocated buffers in a given 498*0Sstevel@tonic-gate * cache. 499*0Sstevel@tonic-gate */ 500*0Sstevel@tonic-gate size_t 501*0Sstevel@tonic-gate kmem_estimate_allocated(uintptr_t addr, const kmem_cache_t *cp) 502*0Sstevel@tonic-gate { 503*0Sstevel@tonic-gate int magsize; 504*0Sstevel@tonic-gate size_t cache_est; 505*0Sstevel@tonic-gate 506*0Sstevel@tonic-gate cache_est = cp->cache_buftotal; 507*0Sstevel@tonic-gate 508*0Sstevel@tonic-gate (void) mdb_pwalk("kmem_slab_partial", 509*0Sstevel@tonic-gate (mdb_walk_cb_t)kmem_estimate_slab, &cache_est, addr); 510*0Sstevel@tonic-gate 511*0Sstevel@tonic-gate if ((magsize = kmem_get_magsize(cp)) != 0) { 512*0Sstevel@tonic-gate size_t mag_est = cp->cache_full.ml_total * magsize; 513*0Sstevel@tonic-gate 514*0Sstevel@tonic-gate if (cache_est >= mag_est) { 515*0Sstevel@tonic-gate cache_est -= mag_est; 516*0Sstevel@tonic-gate } else { 517*0Sstevel@tonic-gate mdb_warn("cache %p's magazine layer holds more buffers " 518*0Sstevel@tonic-gate "than the slab layer.\n", addr); 519*0Sstevel@tonic-gate } 520*0Sstevel@tonic-gate } 521*0Sstevel@tonic-gate return (cache_est); 522*0Sstevel@tonic-gate } 523*0Sstevel@tonic-gate 524*0Sstevel@tonic-gate #define READMAG_ROUNDS(rounds) { \ 525*0Sstevel@tonic-gate if (mdb_vread(mp, magbsize, (uintptr_t)kmp) == -1) { \ 526*0Sstevel@tonic-gate mdb_warn("couldn't read magazine at %p", kmp); \ 527*0Sstevel@tonic-gate goto fail; \ 528*0Sstevel@tonic-gate } \ 529*0Sstevel@tonic-gate for (i = 0; i < rounds; i++) { \ 530*0Sstevel@tonic-gate maglist[magcnt++] = mp->mag_round[i]; \ 531*0Sstevel@tonic-gate if (magcnt == magmax) { \ 532*0Sstevel@tonic-gate mdb_warn("%d magazines exceeds fudge factor\n", \ 533*0Sstevel@tonic-gate magcnt); \ 534*0Sstevel@tonic-gate goto fail; \ 535*0Sstevel@tonic-gate } \ 536*0Sstevel@tonic-gate } \ 537*0Sstevel@tonic-gate } 538*0Sstevel@tonic-gate 539*0Sstevel@tonic-gate int 540*0Sstevel@tonic-gate kmem_read_magazines(kmem_cache_t *cp, uintptr_t addr, int ncpus, 541*0Sstevel@tonic-gate void ***maglistp, size_t *magcntp, size_t *magmaxp, int alloc_flags) 542*0Sstevel@tonic-gate { 543*0Sstevel@tonic-gate kmem_magazine_t *kmp, *mp; 544*0Sstevel@tonic-gate void **maglist = NULL; 545*0Sstevel@tonic-gate int i, cpu; 546*0Sstevel@tonic-gate size_t magsize, magmax, magbsize; 547*0Sstevel@tonic-gate size_t magcnt = 0; 548*0Sstevel@tonic-gate 549*0Sstevel@tonic-gate /* 550*0Sstevel@tonic-gate * Read the magtype out of the cache, after verifying the pointer's 551*0Sstevel@tonic-gate * correctness. 552*0Sstevel@tonic-gate */ 553*0Sstevel@tonic-gate magsize = kmem_get_magsize(cp); 554*0Sstevel@tonic-gate if (magsize == 0) 555*0Sstevel@tonic-gate magsize = 1; 556*0Sstevel@tonic-gate 557*0Sstevel@tonic-gate /* 558*0Sstevel@tonic-gate * There are several places where we need to go buffer hunting: 559*0Sstevel@tonic-gate * the per-CPU loaded magazine, the per-CPU spare full magazine, 560*0Sstevel@tonic-gate * and the full magazine list in the depot. 561*0Sstevel@tonic-gate * 562*0Sstevel@tonic-gate * For an upper bound on the number of buffers in the magazine 563*0Sstevel@tonic-gate * layer, we have the number of magazines on the cache_full 564*0Sstevel@tonic-gate * list plus at most two magazines per CPU (the loaded and the 565*0Sstevel@tonic-gate * spare). Toss in 100 magazines as a fudge factor in case this 566*0Sstevel@tonic-gate * is live (the number "100" comes from the same fudge factor in 567*0Sstevel@tonic-gate * crash(1M)). 568*0Sstevel@tonic-gate */ 569*0Sstevel@tonic-gate magmax = (cp->cache_full.ml_total + 2 * ncpus + 100) * magsize; 570*0Sstevel@tonic-gate magbsize = offsetof(kmem_magazine_t, mag_round[magsize]); 571*0Sstevel@tonic-gate 572*0Sstevel@tonic-gate if (magbsize >= PAGESIZE / 2) { 573*0Sstevel@tonic-gate mdb_warn("magazine size for cache %p unreasonable (%x)\n", 574*0Sstevel@tonic-gate addr, magbsize); 575*0Sstevel@tonic-gate goto fail; 576*0Sstevel@tonic-gate } 577*0Sstevel@tonic-gate 578*0Sstevel@tonic-gate maglist = mdb_alloc(magmax * sizeof (void *), alloc_flags); 579*0Sstevel@tonic-gate mp = mdb_alloc(magbsize, alloc_flags); 580*0Sstevel@tonic-gate if (mp == NULL || maglist == NULL) 581*0Sstevel@tonic-gate goto fail; 582*0Sstevel@tonic-gate 583*0Sstevel@tonic-gate /* 584*0Sstevel@tonic-gate * First up: the magazines in the depot (i.e. on the cache_full list). 585*0Sstevel@tonic-gate */ 586*0Sstevel@tonic-gate for (kmp = cp->cache_full.ml_list; kmp != NULL; ) { 587*0Sstevel@tonic-gate READMAG_ROUNDS(magsize); 588*0Sstevel@tonic-gate kmp = mp->mag_next; 589*0Sstevel@tonic-gate 590*0Sstevel@tonic-gate if (kmp == cp->cache_full.ml_list) 591*0Sstevel@tonic-gate break; /* cache_full list loop detected */ 592*0Sstevel@tonic-gate } 593*0Sstevel@tonic-gate 594*0Sstevel@tonic-gate dprintf(("cache_full list done\n")); 595*0Sstevel@tonic-gate 596*0Sstevel@tonic-gate /* 597*0Sstevel@tonic-gate * Now whip through the CPUs, snagging the loaded magazines 598*0Sstevel@tonic-gate * and full spares. 599*0Sstevel@tonic-gate */ 600*0Sstevel@tonic-gate for (cpu = 0; cpu < ncpus; cpu++) { 601*0Sstevel@tonic-gate kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu]; 602*0Sstevel@tonic-gate 603*0Sstevel@tonic-gate dprintf(("reading cpu cache %p\n", 604*0Sstevel@tonic-gate (uintptr_t)ccp - (uintptr_t)cp + addr)); 605*0Sstevel@tonic-gate 606*0Sstevel@tonic-gate if (ccp->cc_rounds > 0 && 607*0Sstevel@tonic-gate (kmp = ccp->cc_loaded) != NULL) { 608*0Sstevel@tonic-gate dprintf(("reading %d loaded rounds\n", ccp->cc_rounds)); 609*0Sstevel@tonic-gate READMAG_ROUNDS(ccp->cc_rounds); 610*0Sstevel@tonic-gate } 611*0Sstevel@tonic-gate 612*0Sstevel@tonic-gate if (ccp->cc_prounds > 0 && 613*0Sstevel@tonic-gate (kmp = ccp->cc_ploaded) != NULL) { 614*0Sstevel@tonic-gate dprintf(("reading %d previously loaded rounds\n", 615*0Sstevel@tonic-gate ccp->cc_prounds)); 616*0Sstevel@tonic-gate READMAG_ROUNDS(ccp->cc_prounds); 617*0Sstevel@tonic-gate } 618*0Sstevel@tonic-gate } 619*0Sstevel@tonic-gate 620*0Sstevel@tonic-gate dprintf(("magazine layer: %d buffers\n", magcnt)); 621*0Sstevel@tonic-gate 622*0Sstevel@tonic-gate if (!(alloc_flags & UM_GC)) 623*0Sstevel@tonic-gate mdb_free(mp, magbsize); 624*0Sstevel@tonic-gate 625*0Sstevel@tonic-gate *maglistp = maglist; 626*0Sstevel@tonic-gate *magcntp = magcnt; 627*0Sstevel@tonic-gate *magmaxp = magmax; 628*0Sstevel@tonic-gate 629*0Sstevel@tonic-gate return (WALK_NEXT); 630*0Sstevel@tonic-gate 631*0Sstevel@tonic-gate fail: 632*0Sstevel@tonic-gate if (!(alloc_flags & UM_GC)) { 633*0Sstevel@tonic-gate if (mp) 634*0Sstevel@tonic-gate mdb_free(mp, magbsize); 635*0Sstevel@tonic-gate if (maglist) 636*0Sstevel@tonic-gate mdb_free(maglist, magmax * sizeof (void *)); 637*0Sstevel@tonic-gate } 638*0Sstevel@tonic-gate return (WALK_ERR); 639*0Sstevel@tonic-gate } 640*0Sstevel@tonic-gate 641*0Sstevel@tonic-gate static int 642*0Sstevel@tonic-gate kmem_walk_callback(mdb_walk_state_t *wsp, uintptr_t buf) 643*0Sstevel@tonic-gate { 644*0Sstevel@tonic-gate return (wsp->walk_callback(buf, NULL, wsp->walk_cbdata)); 645*0Sstevel@tonic-gate } 646*0Sstevel@tonic-gate 647*0Sstevel@tonic-gate static int 648*0Sstevel@tonic-gate bufctl_walk_callback(kmem_cache_t *cp, mdb_walk_state_t *wsp, uintptr_t buf) 649*0Sstevel@tonic-gate { 650*0Sstevel@tonic-gate kmem_bufctl_audit_t b; 651*0Sstevel@tonic-gate 652*0Sstevel@tonic-gate /* 653*0Sstevel@tonic-gate * if KMF_AUDIT is not set, we know that we're looking at a 654*0Sstevel@tonic-gate * kmem_bufctl_t. 655*0Sstevel@tonic-gate */ 656*0Sstevel@tonic-gate if (!(cp->cache_flags & KMF_AUDIT) || 657*0Sstevel@tonic-gate mdb_vread(&b, sizeof (kmem_bufctl_audit_t), buf) == -1) { 658*0Sstevel@tonic-gate (void) memset(&b, 0, sizeof (b)); 659*0Sstevel@tonic-gate if (mdb_vread(&b, sizeof (kmem_bufctl_t), buf) == -1) { 660*0Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", buf); 661*0Sstevel@tonic-gate return (WALK_ERR); 662*0Sstevel@tonic-gate } 663*0Sstevel@tonic-gate } 664*0Sstevel@tonic-gate 665*0Sstevel@tonic-gate return (wsp->walk_callback(buf, &b, wsp->walk_cbdata)); 666*0Sstevel@tonic-gate } 667*0Sstevel@tonic-gate 668*0Sstevel@tonic-gate typedef struct kmem_walk { 669*0Sstevel@tonic-gate int kmw_type; 670*0Sstevel@tonic-gate 671*0Sstevel@tonic-gate int kmw_addr; /* cache address */ 672*0Sstevel@tonic-gate kmem_cache_t *kmw_cp; 673*0Sstevel@tonic-gate size_t kmw_csize; 674*0Sstevel@tonic-gate 675*0Sstevel@tonic-gate /* 676*0Sstevel@tonic-gate * magazine layer 677*0Sstevel@tonic-gate */ 678*0Sstevel@tonic-gate void **kmw_maglist; 679*0Sstevel@tonic-gate size_t kmw_max; 680*0Sstevel@tonic-gate size_t kmw_count; 681*0Sstevel@tonic-gate size_t kmw_pos; 682*0Sstevel@tonic-gate 683*0Sstevel@tonic-gate /* 684*0Sstevel@tonic-gate * slab layer 685*0Sstevel@tonic-gate */ 686*0Sstevel@tonic-gate char *kmw_valid; /* to keep track of freed buffers */ 687*0Sstevel@tonic-gate char *kmw_ubase; /* buffer for slab data */ 688*0Sstevel@tonic-gate } kmem_walk_t; 689*0Sstevel@tonic-gate 690*0Sstevel@tonic-gate static int 691*0Sstevel@tonic-gate kmem_walk_init_common(mdb_walk_state_t *wsp, int type) 692*0Sstevel@tonic-gate { 693*0Sstevel@tonic-gate kmem_walk_t *kmw; 694*0Sstevel@tonic-gate int ncpus, csize; 695*0Sstevel@tonic-gate kmem_cache_t *cp; 696*0Sstevel@tonic-gate 697*0Sstevel@tonic-gate size_t magmax, magcnt; 698*0Sstevel@tonic-gate void **maglist = NULL; 699*0Sstevel@tonic-gate uint_t chunksize, slabsize; 700*0Sstevel@tonic-gate int status = WALK_ERR; 701*0Sstevel@tonic-gate uintptr_t addr = wsp->walk_addr; 702*0Sstevel@tonic-gate const char *layered; 703*0Sstevel@tonic-gate 704*0Sstevel@tonic-gate type &= ~KM_HASH; 705*0Sstevel@tonic-gate 706*0Sstevel@tonic-gate if (addr == NULL) { 707*0Sstevel@tonic-gate mdb_warn("kmem walk doesn't support global walks\n"); 708*0Sstevel@tonic-gate return (WALK_ERR); 709*0Sstevel@tonic-gate } 710*0Sstevel@tonic-gate 711*0Sstevel@tonic-gate dprintf(("walking %p\n", addr)); 712*0Sstevel@tonic-gate 713*0Sstevel@tonic-gate /* 714*0Sstevel@tonic-gate * First we need to figure out how many CPUs are configured in the 715*0Sstevel@tonic-gate * system to know how much to slurp out. 716*0Sstevel@tonic-gate */ 717*0Sstevel@tonic-gate mdb_readvar(&ncpus, "max_ncpus"); 718*0Sstevel@tonic-gate 719*0Sstevel@tonic-gate csize = KMEM_CACHE_SIZE(ncpus); 720*0Sstevel@tonic-gate cp = mdb_alloc(csize, UM_SLEEP); 721*0Sstevel@tonic-gate 722*0Sstevel@tonic-gate if (mdb_vread(cp, csize, addr) == -1) { 723*0Sstevel@tonic-gate mdb_warn("couldn't read cache at addr %p", addr); 724*0Sstevel@tonic-gate goto out2; 725*0Sstevel@tonic-gate } 726*0Sstevel@tonic-gate 727*0Sstevel@tonic-gate dprintf(("buf total is %d\n", cp->cache_buftotal)); 728*0Sstevel@tonic-gate 729*0Sstevel@tonic-gate if (cp->cache_buftotal == 0) { 730*0Sstevel@tonic-gate mdb_free(cp, csize); 731*0Sstevel@tonic-gate return (WALK_DONE); 732*0Sstevel@tonic-gate } 733*0Sstevel@tonic-gate 734*0Sstevel@tonic-gate /* 735*0Sstevel@tonic-gate * If they ask for bufctls, but it's a small-slab cache, 736*0Sstevel@tonic-gate * there is nothing to report. 737*0Sstevel@tonic-gate */ 738*0Sstevel@tonic-gate if ((type & KM_BUFCTL) && !(cp->cache_flags & KMF_HASH)) { 739*0Sstevel@tonic-gate dprintf(("bufctl requested, not KMF_HASH (flags: %p)\n", 740*0Sstevel@tonic-gate cp->cache_flags)); 741*0Sstevel@tonic-gate mdb_free(cp, csize); 742*0Sstevel@tonic-gate return (WALK_DONE); 743*0Sstevel@tonic-gate } 744*0Sstevel@tonic-gate 745*0Sstevel@tonic-gate /* 746*0Sstevel@tonic-gate * If they want constructed buffers, but there's no constructor or 747*0Sstevel@tonic-gate * the cache has DEADBEEF checking enabled, there is nothing to report. 748*0Sstevel@tonic-gate */ 749*0Sstevel@tonic-gate if ((type & KM_CONSTRUCTED) && (!(type & KM_FREE) || 750*0Sstevel@tonic-gate cp->cache_constructor == NULL || 751*0Sstevel@tonic-gate (cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) == KMF_DEADBEEF)) { 752*0Sstevel@tonic-gate mdb_free(cp, csize); 753*0Sstevel@tonic-gate return (WALK_DONE); 754*0Sstevel@tonic-gate } 755*0Sstevel@tonic-gate 756*0Sstevel@tonic-gate /* 757*0Sstevel@tonic-gate * Read in the contents of the magazine layer 758*0Sstevel@tonic-gate */ 759*0Sstevel@tonic-gate if (kmem_read_magazines(cp, addr, ncpus, &maglist, &magcnt, 760*0Sstevel@tonic-gate &magmax, UM_SLEEP) == WALK_ERR) 761*0Sstevel@tonic-gate goto out2; 762*0Sstevel@tonic-gate 763*0Sstevel@tonic-gate /* 764*0Sstevel@tonic-gate * We have all of the buffers from the magazines; if we are walking 765*0Sstevel@tonic-gate * allocated buffers, sort them so we can bsearch them later. 766*0Sstevel@tonic-gate */ 767*0Sstevel@tonic-gate if (type & KM_ALLOCATED) 768*0Sstevel@tonic-gate qsort(maglist, magcnt, sizeof (void *), addrcmp); 769*0Sstevel@tonic-gate 770*0Sstevel@tonic-gate wsp->walk_data = kmw = mdb_zalloc(sizeof (kmem_walk_t), UM_SLEEP); 771*0Sstevel@tonic-gate 772*0Sstevel@tonic-gate kmw->kmw_type = type; 773*0Sstevel@tonic-gate kmw->kmw_addr = addr; 774*0Sstevel@tonic-gate kmw->kmw_cp = cp; 775*0Sstevel@tonic-gate kmw->kmw_csize = csize; 776*0Sstevel@tonic-gate kmw->kmw_maglist = maglist; 777*0Sstevel@tonic-gate kmw->kmw_max = magmax; 778*0Sstevel@tonic-gate kmw->kmw_count = magcnt; 779*0Sstevel@tonic-gate kmw->kmw_pos = 0; 780*0Sstevel@tonic-gate 781*0Sstevel@tonic-gate /* 782*0Sstevel@tonic-gate * When walking allocated buffers in a KMF_HASH cache, we walk the 783*0Sstevel@tonic-gate * hash table instead of the slab layer. 784*0Sstevel@tonic-gate */ 785*0Sstevel@tonic-gate if ((cp->cache_flags & KMF_HASH) && (type & KM_ALLOCATED)) { 786*0Sstevel@tonic-gate layered = "kmem_hash"; 787*0Sstevel@tonic-gate 788*0Sstevel@tonic-gate kmw->kmw_type |= KM_HASH; 789*0Sstevel@tonic-gate } else { 790*0Sstevel@tonic-gate /* 791*0Sstevel@tonic-gate * If we are walking freed buffers, we only need the 792*0Sstevel@tonic-gate * magazine layer plus the partially allocated slabs. 793*0Sstevel@tonic-gate * To walk allocated buffers, we need all of the slabs. 794*0Sstevel@tonic-gate */ 795*0Sstevel@tonic-gate if (type & KM_ALLOCATED) 796*0Sstevel@tonic-gate layered = "kmem_slab"; 797*0Sstevel@tonic-gate else 798*0Sstevel@tonic-gate layered = "kmem_slab_partial"; 799*0Sstevel@tonic-gate 800*0Sstevel@tonic-gate /* 801*0Sstevel@tonic-gate * for small-slab caches, we read in the entire slab. For 802*0Sstevel@tonic-gate * freed buffers, we can just walk the freelist. For 803*0Sstevel@tonic-gate * allocated buffers, we use a 'valid' array to track 804*0Sstevel@tonic-gate * the freed buffers. 805*0Sstevel@tonic-gate */ 806*0Sstevel@tonic-gate if (!(cp->cache_flags & KMF_HASH)) { 807*0Sstevel@tonic-gate chunksize = cp->cache_chunksize; 808*0Sstevel@tonic-gate slabsize = cp->cache_slabsize; 809*0Sstevel@tonic-gate 810*0Sstevel@tonic-gate kmw->kmw_ubase = mdb_alloc(slabsize + 811*0Sstevel@tonic-gate sizeof (kmem_bufctl_t), UM_SLEEP); 812*0Sstevel@tonic-gate 813*0Sstevel@tonic-gate if (type & KM_ALLOCATED) 814*0Sstevel@tonic-gate kmw->kmw_valid = 815*0Sstevel@tonic-gate mdb_alloc(slabsize / chunksize, UM_SLEEP); 816*0Sstevel@tonic-gate } 817*0Sstevel@tonic-gate } 818*0Sstevel@tonic-gate 819*0Sstevel@tonic-gate status = WALK_NEXT; 820*0Sstevel@tonic-gate 821*0Sstevel@tonic-gate if (mdb_layered_walk(layered, wsp) == -1) { 822*0Sstevel@tonic-gate mdb_warn("unable to start layered '%s' walk", layered); 823*0Sstevel@tonic-gate status = WALK_ERR; 824*0Sstevel@tonic-gate } 825*0Sstevel@tonic-gate 826*0Sstevel@tonic-gate out1: 827*0Sstevel@tonic-gate if (status == WALK_ERR) { 828*0Sstevel@tonic-gate if (kmw->kmw_valid) 829*0Sstevel@tonic-gate mdb_free(kmw->kmw_valid, slabsize / chunksize); 830*0Sstevel@tonic-gate 831*0Sstevel@tonic-gate if (kmw->kmw_ubase) 832*0Sstevel@tonic-gate mdb_free(kmw->kmw_ubase, slabsize + 833*0Sstevel@tonic-gate sizeof (kmem_bufctl_t)); 834*0Sstevel@tonic-gate 835*0Sstevel@tonic-gate mdb_free(kmw->kmw_maglist, kmw->kmw_max * sizeof (uintptr_t)); 836*0Sstevel@tonic-gate mdb_free(kmw, sizeof (kmem_walk_t)); 837*0Sstevel@tonic-gate wsp->walk_data = NULL; 838*0Sstevel@tonic-gate } 839*0Sstevel@tonic-gate 840*0Sstevel@tonic-gate out2: 841*0Sstevel@tonic-gate if (status == WALK_ERR) 842*0Sstevel@tonic-gate mdb_free(cp, csize); 843*0Sstevel@tonic-gate 844*0Sstevel@tonic-gate return (status); 845*0Sstevel@tonic-gate } 846*0Sstevel@tonic-gate 847*0Sstevel@tonic-gate int 848*0Sstevel@tonic-gate kmem_walk_step(mdb_walk_state_t *wsp) 849*0Sstevel@tonic-gate { 850*0Sstevel@tonic-gate kmem_walk_t *kmw = wsp->walk_data; 851*0Sstevel@tonic-gate int type = kmw->kmw_type; 852*0Sstevel@tonic-gate kmem_cache_t *cp = kmw->kmw_cp; 853*0Sstevel@tonic-gate 854*0Sstevel@tonic-gate void **maglist = kmw->kmw_maglist; 855*0Sstevel@tonic-gate int magcnt = kmw->kmw_count; 856*0Sstevel@tonic-gate 857*0Sstevel@tonic-gate uintptr_t chunksize, slabsize; 858*0Sstevel@tonic-gate uintptr_t addr; 859*0Sstevel@tonic-gate const kmem_slab_t *sp; 860*0Sstevel@tonic-gate const kmem_bufctl_t *bcp; 861*0Sstevel@tonic-gate kmem_bufctl_t bc; 862*0Sstevel@tonic-gate 863*0Sstevel@tonic-gate int chunks; 864*0Sstevel@tonic-gate char *kbase; 865*0Sstevel@tonic-gate void *buf; 866*0Sstevel@tonic-gate int i, ret; 867*0Sstevel@tonic-gate 868*0Sstevel@tonic-gate char *valid, *ubase; 869*0Sstevel@tonic-gate 870*0Sstevel@tonic-gate /* 871*0Sstevel@tonic-gate * first, handle the 'kmem_hash' layered walk case 872*0Sstevel@tonic-gate */ 873*0Sstevel@tonic-gate if (type & KM_HASH) { 874*0Sstevel@tonic-gate /* 875*0Sstevel@tonic-gate * We have a buffer which has been allocated out of the 876*0Sstevel@tonic-gate * global layer. We need to make sure that it's not 877*0Sstevel@tonic-gate * actually sitting in a magazine before we report it as 878*0Sstevel@tonic-gate * an allocated buffer. 879*0Sstevel@tonic-gate */ 880*0Sstevel@tonic-gate buf = ((const kmem_bufctl_t *)wsp->walk_layer)->bc_addr; 881*0Sstevel@tonic-gate 882*0Sstevel@tonic-gate if (magcnt > 0 && 883*0Sstevel@tonic-gate bsearch(&buf, maglist, magcnt, sizeof (void *), 884*0Sstevel@tonic-gate addrcmp) != NULL) 885*0Sstevel@tonic-gate return (WALK_NEXT); 886*0Sstevel@tonic-gate 887*0Sstevel@tonic-gate if (type & KM_BUFCTL) 888*0Sstevel@tonic-gate return (bufctl_walk_callback(cp, wsp, wsp->walk_addr)); 889*0Sstevel@tonic-gate 890*0Sstevel@tonic-gate return (kmem_walk_callback(wsp, (uintptr_t)buf)); 891*0Sstevel@tonic-gate } 892*0Sstevel@tonic-gate 893*0Sstevel@tonic-gate ret = WALK_NEXT; 894*0Sstevel@tonic-gate 895*0Sstevel@tonic-gate addr = kmw->kmw_addr; 896*0Sstevel@tonic-gate 897*0Sstevel@tonic-gate /* 898*0Sstevel@tonic-gate * If we're walking freed buffers, report everything in the 899*0Sstevel@tonic-gate * magazine layer before processing the first slab. 900*0Sstevel@tonic-gate */ 901*0Sstevel@tonic-gate if ((type & KM_FREE) && magcnt != 0) { 902*0Sstevel@tonic-gate kmw->kmw_count = 0; /* only do this once */ 903*0Sstevel@tonic-gate for (i = 0; i < magcnt; i++) { 904*0Sstevel@tonic-gate buf = maglist[i]; 905*0Sstevel@tonic-gate 906*0Sstevel@tonic-gate if (type & KM_BUFCTL) { 907*0Sstevel@tonic-gate uintptr_t out; 908*0Sstevel@tonic-gate 909*0Sstevel@tonic-gate if (cp->cache_flags & KMF_BUFTAG) { 910*0Sstevel@tonic-gate kmem_buftag_t *btp; 911*0Sstevel@tonic-gate kmem_buftag_t tag; 912*0Sstevel@tonic-gate 913*0Sstevel@tonic-gate /* LINTED - alignment */ 914*0Sstevel@tonic-gate btp = KMEM_BUFTAG(cp, buf); 915*0Sstevel@tonic-gate if (mdb_vread(&tag, sizeof (tag), 916*0Sstevel@tonic-gate (uintptr_t)btp) == -1) { 917*0Sstevel@tonic-gate mdb_warn("reading buftag for " 918*0Sstevel@tonic-gate "%p at %p", buf, btp); 919*0Sstevel@tonic-gate continue; 920*0Sstevel@tonic-gate } 921*0Sstevel@tonic-gate out = (uintptr_t)tag.bt_bufctl; 922*0Sstevel@tonic-gate } else { 923*0Sstevel@tonic-gate if (kmem_hash_lookup(cp, addr, buf, 924*0Sstevel@tonic-gate &out) == -1) 925*0Sstevel@tonic-gate continue; 926*0Sstevel@tonic-gate } 927*0Sstevel@tonic-gate ret = bufctl_walk_callback(cp, wsp, out); 928*0Sstevel@tonic-gate } else { 929*0Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf); 930*0Sstevel@tonic-gate } 931*0Sstevel@tonic-gate 932*0Sstevel@tonic-gate if (ret != WALK_NEXT) 933*0Sstevel@tonic-gate return (ret); 934*0Sstevel@tonic-gate } 935*0Sstevel@tonic-gate } 936*0Sstevel@tonic-gate 937*0Sstevel@tonic-gate /* 938*0Sstevel@tonic-gate * If they want constructed buffers, we're finished, since the 939*0Sstevel@tonic-gate * magazine layer holds them all. 940*0Sstevel@tonic-gate */ 941*0Sstevel@tonic-gate if (type & KM_CONSTRUCTED) 942*0Sstevel@tonic-gate return (WALK_DONE); 943*0Sstevel@tonic-gate 944*0Sstevel@tonic-gate /* 945*0Sstevel@tonic-gate * Handle the buffers in the current slab 946*0Sstevel@tonic-gate */ 947*0Sstevel@tonic-gate chunksize = cp->cache_chunksize; 948*0Sstevel@tonic-gate slabsize = cp->cache_slabsize; 949*0Sstevel@tonic-gate 950*0Sstevel@tonic-gate sp = wsp->walk_layer; 951*0Sstevel@tonic-gate chunks = sp->slab_chunks; 952*0Sstevel@tonic-gate kbase = sp->slab_base; 953*0Sstevel@tonic-gate 954*0Sstevel@tonic-gate dprintf(("kbase is %p\n", kbase)); 955*0Sstevel@tonic-gate 956*0Sstevel@tonic-gate if (!(cp->cache_flags & KMF_HASH)) { 957*0Sstevel@tonic-gate valid = kmw->kmw_valid; 958*0Sstevel@tonic-gate ubase = kmw->kmw_ubase; 959*0Sstevel@tonic-gate 960*0Sstevel@tonic-gate if (mdb_vread(ubase, chunks * chunksize, 961*0Sstevel@tonic-gate (uintptr_t)kbase) == -1) { 962*0Sstevel@tonic-gate mdb_warn("failed to read slab contents at %p", kbase); 963*0Sstevel@tonic-gate return (WALK_ERR); 964*0Sstevel@tonic-gate } 965*0Sstevel@tonic-gate 966*0Sstevel@tonic-gate /* 967*0Sstevel@tonic-gate * Set up the valid map as fully allocated -- we'll punch 968*0Sstevel@tonic-gate * out the freelist. 969*0Sstevel@tonic-gate */ 970*0Sstevel@tonic-gate if (type & KM_ALLOCATED) 971*0Sstevel@tonic-gate (void) memset(valid, 1, chunks); 972*0Sstevel@tonic-gate } else { 973*0Sstevel@tonic-gate valid = NULL; 974*0Sstevel@tonic-gate ubase = NULL; 975*0Sstevel@tonic-gate } 976*0Sstevel@tonic-gate 977*0Sstevel@tonic-gate /* 978*0Sstevel@tonic-gate * walk the slab's freelist 979*0Sstevel@tonic-gate */ 980*0Sstevel@tonic-gate bcp = sp->slab_head; 981*0Sstevel@tonic-gate 982*0Sstevel@tonic-gate dprintf(("refcnt is %d; chunks is %d\n", sp->slab_refcnt, chunks)); 983*0Sstevel@tonic-gate 984*0Sstevel@tonic-gate /* 985*0Sstevel@tonic-gate * since we could be in the middle of allocating a buffer, 986*0Sstevel@tonic-gate * our refcnt could be one higher than it aught. So we 987*0Sstevel@tonic-gate * check one further on the freelist than the count allows. 988*0Sstevel@tonic-gate */ 989*0Sstevel@tonic-gate for (i = sp->slab_refcnt; i <= chunks; i++) { 990*0Sstevel@tonic-gate uint_t ndx; 991*0Sstevel@tonic-gate 992*0Sstevel@tonic-gate dprintf(("bcp is %p\n", bcp)); 993*0Sstevel@tonic-gate 994*0Sstevel@tonic-gate if (bcp == NULL) { 995*0Sstevel@tonic-gate if (i == chunks) 996*0Sstevel@tonic-gate break; 997*0Sstevel@tonic-gate mdb_warn( 998*0Sstevel@tonic-gate "slab %p in cache %p freelist too short by %d\n", 999*0Sstevel@tonic-gate sp, addr, chunks - i); 1000*0Sstevel@tonic-gate break; 1001*0Sstevel@tonic-gate } 1002*0Sstevel@tonic-gate 1003*0Sstevel@tonic-gate if (cp->cache_flags & KMF_HASH) { 1004*0Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), (uintptr_t)bcp) == -1) { 1005*0Sstevel@tonic-gate mdb_warn("failed to read bufctl ptr at %p", 1006*0Sstevel@tonic-gate bcp); 1007*0Sstevel@tonic-gate break; 1008*0Sstevel@tonic-gate } 1009*0Sstevel@tonic-gate buf = bc.bc_addr; 1010*0Sstevel@tonic-gate } else { 1011*0Sstevel@tonic-gate /* 1012*0Sstevel@tonic-gate * Otherwise the buffer is in the slab which 1013*0Sstevel@tonic-gate * we've read in; we just need to determine 1014*0Sstevel@tonic-gate * its offset in the slab to find the 1015*0Sstevel@tonic-gate * kmem_bufctl_t. 1016*0Sstevel@tonic-gate */ 1017*0Sstevel@tonic-gate bc = *((kmem_bufctl_t *) 1018*0Sstevel@tonic-gate ((uintptr_t)bcp - (uintptr_t)kbase + 1019*0Sstevel@tonic-gate (uintptr_t)ubase)); 1020*0Sstevel@tonic-gate 1021*0Sstevel@tonic-gate buf = KMEM_BUF(cp, bcp); 1022*0Sstevel@tonic-gate } 1023*0Sstevel@tonic-gate 1024*0Sstevel@tonic-gate ndx = ((uintptr_t)buf - (uintptr_t)kbase) / chunksize; 1025*0Sstevel@tonic-gate 1026*0Sstevel@tonic-gate if (ndx > slabsize / cp->cache_bufsize) { 1027*0Sstevel@tonic-gate /* 1028*0Sstevel@tonic-gate * This is very wrong; we have managed to find 1029*0Sstevel@tonic-gate * a buffer in the slab which shouldn't 1030*0Sstevel@tonic-gate * actually be here. Emit a warning, and 1031*0Sstevel@tonic-gate * try to continue. 1032*0Sstevel@tonic-gate */ 1033*0Sstevel@tonic-gate mdb_warn("buf %p is out of range for " 1034*0Sstevel@tonic-gate "slab %p, cache %p\n", buf, sp, addr); 1035*0Sstevel@tonic-gate } else if (type & KM_ALLOCATED) { 1036*0Sstevel@tonic-gate /* 1037*0Sstevel@tonic-gate * we have found a buffer on the slab's freelist; 1038*0Sstevel@tonic-gate * clear its entry 1039*0Sstevel@tonic-gate */ 1040*0Sstevel@tonic-gate valid[ndx] = 0; 1041*0Sstevel@tonic-gate } else { 1042*0Sstevel@tonic-gate /* 1043*0Sstevel@tonic-gate * Report this freed buffer 1044*0Sstevel@tonic-gate */ 1045*0Sstevel@tonic-gate if (type & KM_BUFCTL) { 1046*0Sstevel@tonic-gate ret = bufctl_walk_callback(cp, wsp, 1047*0Sstevel@tonic-gate (uintptr_t)bcp); 1048*0Sstevel@tonic-gate } else { 1049*0Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf); 1050*0Sstevel@tonic-gate } 1051*0Sstevel@tonic-gate if (ret != WALK_NEXT) 1052*0Sstevel@tonic-gate return (ret); 1053*0Sstevel@tonic-gate } 1054*0Sstevel@tonic-gate 1055*0Sstevel@tonic-gate bcp = bc.bc_next; 1056*0Sstevel@tonic-gate } 1057*0Sstevel@tonic-gate 1058*0Sstevel@tonic-gate if (bcp != NULL) { 1059*0Sstevel@tonic-gate dprintf(("slab %p in cache %p freelist too long (%p)\n", 1060*0Sstevel@tonic-gate sp, addr, bcp)); 1061*0Sstevel@tonic-gate } 1062*0Sstevel@tonic-gate 1063*0Sstevel@tonic-gate /* 1064*0Sstevel@tonic-gate * If we are walking freed buffers, the loop above handled reporting 1065*0Sstevel@tonic-gate * them. 1066*0Sstevel@tonic-gate */ 1067*0Sstevel@tonic-gate if (type & KM_FREE) 1068*0Sstevel@tonic-gate return (WALK_NEXT); 1069*0Sstevel@tonic-gate 1070*0Sstevel@tonic-gate if (type & KM_BUFCTL) { 1071*0Sstevel@tonic-gate mdb_warn("impossible situation: small-slab KM_BUFCTL walk for " 1072*0Sstevel@tonic-gate "cache %p\n", addr); 1073*0Sstevel@tonic-gate return (WALK_ERR); 1074*0Sstevel@tonic-gate } 1075*0Sstevel@tonic-gate 1076*0Sstevel@tonic-gate /* 1077*0Sstevel@tonic-gate * Report allocated buffers, skipping buffers in the magazine layer. 1078*0Sstevel@tonic-gate * We only get this far for small-slab caches. 1079*0Sstevel@tonic-gate */ 1080*0Sstevel@tonic-gate for (i = 0; ret == WALK_NEXT && i < chunks; i++) { 1081*0Sstevel@tonic-gate buf = (char *)kbase + i * chunksize; 1082*0Sstevel@tonic-gate 1083*0Sstevel@tonic-gate if (!valid[i]) 1084*0Sstevel@tonic-gate continue; /* on slab freelist */ 1085*0Sstevel@tonic-gate 1086*0Sstevel@tonic-gate if (magcnt > 0 && 1087*0Sstevel@tonic-gate bsearch(&buf, maglist, magcnt, sizeof (void *), 1088*0Sstevel@tonic-gate addrcmp) != NULL) 1089*0Sstevel@tonic-gate continue; /* in magazine layer */ 1090*0Sstevel@tonic-gate 1091*0Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf); 1092*0Sstevel@tonic-gate } 1093*0Sstevel@tonic-gate return (ret); 1094*0Sstevel@tonic-gate } 1095*0Sstevel@tonic-gate 1096*0Sstevel@tonic-gate void 1097*0Sstevel@tonic-gate kmem_walk_fini(mdb_walk_state_t *wsp) 1098*0Sstevel@tonic-gate { 1099*0Sstevel@tonic-gate kmem_walk_t *kmw = wsp->walk_data; 1100*0Sstevel@tonic-gate uintptr_t chunksize; 1101*0Sstevel@tonic-gate uintptr_t slabsize; 1102*0Sstevel@tonic-gate 1103*0Sstevel@tonic-gate if (kmw == NULL) 1104*0Sstevel@tonic-gate return; 1105*0Sstevel@tonic-gate 1106*0Sstevel@tonic-gate if (kmw->kmw_maglist != NULL) 1107*0Sstevel@tonic-gate mdb_free(kmw->kmw_maglist, kmw->kmw_max * sizeof (void *)); 1108*0Sstevel@tonic-gate 1109*0Sstevel@tonic-gate chunksize = kmw->kmw_cp->cache_chunksize; 1110*0Sstevel@tonic-gate slabsize = kmw->kmw_cp->cache_slabsize; 1111*0Sstevel@tonic-gate 1112*0Sstevel@tonic-gate if (kmw->kmw_valid != NULL) 1113*0Sstevel@tonic-gate mdb_free(kmw->kmw_valid, slabsize / chunksize); 1114*0Sstevel@tonic-gate if (kmw->kmw_ubase != NULL) 1115*0Sstevel@tonic-gate mdb_free(kmw->kmw_ubase, slabsize + sizeof (kmem_bufctl_t)); 1116*0Sstevel@tonic-gate 1117*0Sstevel@tonic-gate mdb_free(kmw->kmw_cp, kmw->kmw_csize); 1118*0Sstevel@tonic-gate mdb_free(kmw, sizeof (kmem_walk_t)); 1119*0Sstevel@tonic-gate } 1120*0Sstevel@tonic-gate 1121*0Sstevel@tonic-gate /*ARGSUSED*/ 1122*0Sstevel@tonic-gate static int 1123*0Sstevel@tonic-gate kmem_walk_all(uintptr_t addr, const kmem_cache_t *c, mdb_walk_state_t *wsp) 1124*0Sstevel@tonic-gate { 1125*0Sstevel@tonic-gate /* 1126*0Sstevel@tonic-gate * Buffers allocated from NOTOUCH caches can also show up as freed 1127*0Sstevel@tonic-gate * memory in other caches. This can be a little confusing, so we 1128*0Sstevel@tonic-gate * don't walk NOTOUCH caches when walking all caches (thereby assuring 1129*0Sstevel@tonic-gate * that "::walk kmem" and "::walk freemem" yield disjoint output). 1130*0Sstevel@tonic-gate */ 1131*0Sstevel@tonic-gate if (c->cache_cflags & KMC_NOTOUCH) 1132*0Sstevel@tonic-gate return (WALK_NEXT); 1133*0Sstevel@tonic-gate 1134*0Sstevel@tonic-gate if (mdb_pwalk(wsp->walk_data, wsp->walk_callback, 1135*0Sstevel@tonic-gate wsp->walk_cbdata, addr) == -1) 1136*0Sstevel@tonic-gate return (WALK_DONE); 1137*0Sstevel@tonic-gate 1138*0Sstevel@tonic-gate return (WALK_NEXT); 1139*0Sstevel@tonic-gate } 1140*0Sstevel@tonic-gate 1141*0Sstevel@tonic-gate #define KMEM_WALK_ALL(name, wsp) { \ 1142*0Sstevel@tonic-gate wsp->walk_data = (name); \ 1143*0Sstevel@tonic-gate if (mdb_walk("kmem_cache", (mdb_walk_cb_t)kmem_walk_all, wsp) == -1) \ 1144*0Sstevel@tonic-gate return (WALK_ERR); \ 1145*0Sstevel@tonic-gate return (WALK_DONE); \ 1146*0Sstevel@tonic-gate } 1147*0Sstevel@tonic-gate 1148*0Sstevel@tonic-gate int 1149*0Sstevel@tonic-gate kmem_walk_init(mdb_walk_state_t *wsp) 1150*0Sstevel@tonic-gate { 1151*0Sstevel@tonic-gate if (wsp->walk_arg != NULL) 1152*0Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)wsp->walk_arg; 1153*0Sstevel@tonic-gate 1154*0Sstevel@tonic-gate if (wsp->walk_addr == NULL) 1155*0Sstevel@tonic-gate KMEM_WALK_ALL("kmem", wsp); 1156*0Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_ALLOCATED)); 1157*0Sstevel@tonic-gate } 1158*0Sstevel@tonic-gate 1159*0Sstevel@tonic-gate int 1160*0Sstevel@tonic-gate bufctl_walk_init(mdb_walk_state_t *wsp) 1161*0Sstevel@tonic-gate { 1162*0Sstevel@tonic-gate if (wsp->walk_addr == NULL) 1163*0Sstevel@tonic-gate KMEM_WALK_ALL("bufctl", wsp); 1164*0Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_ALLOCATED | KM_BUFCTL)); 1165*0Sstevel@tonic-gate } 1166*0Sstevel@tonic-gate 1167*0Sstevel@tonic-gate int 1168*0Sstevel@tonic-gate freemem_walk_init(mdb_walk_state_t *wsp) 1169*0Sstevel@tonic-gate { 1170*0Sstevel@tonic-gate if (wsp->walk_addr == NULL) 1171*0Sstevel@tonic-gate KMEM_WALK_ALL("freemem", wsp); 1172*0Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE)); 1173*0Sstevel@tonic-gate } 1174*0Sstevel@tonic-gate 1175*0Sstevel@tonic-gate int 1176*0Sstevel@tonic-gate freemem_constructed_walk_init(mdb_walk_state_t *wsp) 1177*0Sstevel@tonic-gate { 1178*0Sstevel@tonic-gate if (wsp->walk_addr == NULL) 1179*0Sstevel@tonic-gate KMEM_WALK_ALL("freemem_constructed", wsp); 1180*0Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE | KM_CONSTRUCTED)); 1181*0Sstevel@tonic-gate } 1182*0Sstevel@tonic-gate 1183*0Sstevel@tonic-gate int 1184*0Sstevel@tonic-gate freectl_walk_init(mdb_walk_state_t *wsp) 1185*0Sstevel@tonic-gate { 1186*0Sstevel@tonic-gate if (wsp->walk_addr == NULL) 1187*0Sstevel@tonic-gate KMEM_WALK_ALL("freectl", wsp); 1188*0Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE | KM_BUFCTL)); 1189*0Sstevel@tonic-gate } 1190*0Sstevel@tonic-gate 1191*0Sstevel@tonic-gate int 1192*0Sstevel@tonic-gate freectl_constructed_walk_init(mdb_walk_state_t *wsp) 1193*0Sstevel@tonic-gate { 1194*0Sstevel@tonic-gate if (wsp->walk_addr == NULL) 1195*0Sstevel@tonic-gate KMEM_WALK_ALL("freectl_constructed", wsp); 1196*0Sstevel@tonic-gate return (kmem_walk_init_common(wsp, 1197*0Sstevel@tonic-gate KM_FREE | KM_BUFCTL | KM_CONSTRUCTED)); 1198*0Sstevel@tonic-gate } 1199*0Sstevel@tonic-gate 1200*0Sstevel@tonic-gate typedef struct bufctl_history_walk { 1201*0Sstevel@tonic-gate void *bhw_next; 1202*0Sstevel@tonic-gate kmem_cache_t *bhw_cache; 1203*0Sstevel@tonic-gate kmem_slab_t *bhw_slab; 1204*0Sstevel@tonic-gate hrtime_t bhw_timestamp; 1205*0Sstevel@tonic-gate } bufctl_history_walk_t; 1206*0Sstevel@tonic-gate 1207*0Sstevel@tonic-gate int 1208*0Sstevel@tonic-gate bufctl_history_walk_init(mdb_walk_state_t *wsp) 1209*0Sstevel@tonic-gate { 1210*0Sstevel@tonic-gate bufctl_history_walk_t *bhw; 1211*0Sstevel@tonic-gate kmem_bufctl_audit_t bc; 1212*0Sstevel@tonic-gate kmem_bufctl_audit_t bcn; 1213*0Sstevel@tonic-gate 1214*0Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 1215*0Sstevel@tonic-gate mdb_warn("bufctl_history walk doesn't support global walks\n"); 1216*0Sstevel@tonic-gate return (WALK_ERR); 1217*0Sstevel@tonic-gate } 1218*0Sstevel@tonic-gate 1219*0Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), wsp->walk_addr) == -1) { 1220*0Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", wsp->walk_addr); 1221*0Sstevel@tonic-gate return (WALK_ERR); 1222*0Sstevel@tonic-gate } 1223*0Sstevel@tonic-gate 1224*0Sstevel@tonic-gate bhw = mdb_zalloc(sizeof (*bhw), UM_SLEEP); 1225*0Sstevel@tonic-gate bhw->bhw_timestamp = 0; 1226*0Sstevel@tonic-gate bhw->bhw_cache = bc.bc_cache; 1227*0Sstevel@tonic-gate bhw->bhw_slab = bc.bc_slab; 1228*0Sstevel@tonic-gate 1229*0Sstevel@tonic-gate /* 1230*0Sstevel@tonic-gate * sometimes the first log entry matches the base bufctl; in that 1231*0Sstevel@tonic-gate * case, skip the base bufctl. 1232*0Sstevel@tonic-gate */ 1233*0Sstevel@tonic-gate if (bc.bc_lastlog != NULL && 1234*0Sstevel@tonic-gate mdb_vread(&bcn, sizeof (bcn), (uintptr_t)bc.bc_lastlog) != -1 && 1235*0Sstevel@tonic-gate bc.bc_addr == bcn.bc_addr && 1236*0Sstevel@tonic-gate bc.bc_cache == bcn.bc_cache && 1237*0Sstevel@tonic-gate bc.bc_slab == bcn.bc_slab && 1238*0Sstevel@tonic-gate bc.bc_timestamp == bcn.bc_timestamp && 1239*0Sstevel@tonic-gate bc.bc_thread == bcn.bc_thread) 1240*0Sstevel@tonic-gate bhw->bhw_next = bc.bc_lastlog; 1241*0Sstevel@tonic-gate else 1242*0Sstevel@tonic-gate bhw->bhw_next = (void *)wsp->walk_addr; 1243*0Sstevel@tonic-gate 1244*0Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)bc.bc_addr; 1245*0Sstevel@tonic-gate wsp->walk_data = bhw; 1246*0Sstevel@tonic-gate 1247*0Sstevel@tonic-gate return (WALK_NEXT); 1248*0Sstevel@tonic-gate } 1249*0Sstevel@tonic-gate 1250*0Sstevel@tonic-gate int 1251*0Sstevel@tonic-gate bufctl_history_walk_step(mdb_walk_state_t *wsp) 1252*0Sstevel@tonic-gate { 1253*0Sstevel@tonic-gate bufctl_history_walk_t *bhw = wsp->walk_data; 1254*0Sstevel@tonic-gate uintptr_t addr = (uintptr_t)bhw->bhw_next; 1255*0Sstevel@tonic-gate uintptr_t baseaddr = wsp->walk_addr; 1256*0Sstevel@tonic-gate kmem_bufctl_audit_t bc; 1257*0Sstevel@tonic-gate 1258*0Sstevel@tonic-gate if (addr == NULL) 1259*0Sstevel@tonic-gate return (WALK_DONE); 1260*0Sstevel@tonic-gate 1261*0Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) { 1262*0Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", bhw->bhw_next); 1263*0Sstevel@tonic-gate return (WALK_ERR); 1264*0Sstevel@tonic-gate } 1265*0Sstevel@tonic-gate 1266*0Sstevel@tonic-gate /* 1267*0Sstevel@tonic-gate * The bufctl is only valid if the address, cache, and slab are 1268*0Sstevel@tonic-gate * correct. We also check that the timestamp is decreasing, to 1269*0Sstevel@tonic-gate * prevent infinite loops. 1270*0Sstevel@tonic-gate */ 1271*0Sstevel@tonic-gate if ((uintptr_t)bc.bc_addr != baseaddr || 1272*0Sstevel@tonic-gate bc.bc_cache != bhw->bhw_cache || 1273*0Sstevel@tonic-gate bc.bc_slab != bhw->bhw_slab || 1274*0Sstevel@tonic-gate (bhw->bhw_timestamp != 0 && bc.bc_timestamp >= bhw->bhw_timestamp)) 1275*0Sstevel@tonic-gate return (WALK_DONE); 1276*0Sstevel@tonic-gate 1277*0Sstevel@tonic-gate bhw->bhw_next = bc.bc_lastlog; 1278*0Sstevel@tonic-gate bhw->bhw_timestamp = bc.bc_timestamp; 1279*0Sstevel@tonic-gate 1280*0Sstevel@tonic-gate return (wsp->walk_callback(addr, &bc, wsp->walk_cbdata)); 1281*0Sstevel@tonic-gate } 1282*0Sstevel@tonic-gate 1283*0Sstevel@tonic-gate void 1284*0Sstevel@tonic-gate bufctl_history_walk_fini(mdb_walk_state_t *wsp) 1285*0Sstevel@tonic-gate { 1286*0Sstevel@tonic-gate bufctl_history_walk_t *bhw = wsp->walk_data; 1287*0Sstevel@tonic-gate 1288*0Sstevel@tonic-gate mdb_free(bhw, sizeof (*bhw)); 1289*0Sstevel@tonic-gate } 1290*0Sstevel@tonic-gate 1291*0Sstevel@tonic-gate typedef struct kmem_log_walk { 1292*0Sstevel@tonic-gate kmem_bufctl_audit_t *klw_base; 1293*0Sstevel@tonic-gate kmem_bufctl_audit_t **klw_sorted; 1294*0Sstevel@tonic-gate kmem_log_header_t klw_lh; 1295*0Sstevel@tonic-gate size_t klw_size; 1296*0Sstevel@tonic-gate size_t klw_maxndx; 1297*0Sstevel@tonic-gate size_t klw_ndx; 1298*0Sstevel@tonic-gate } kmem_log_walk_t; 1299*0Sstevel@tonic-gate 1300*0Sstevel@tonic-gate int 1301*0Sstevel@tonic-gate kmem_log_walk_init(mdb_walk_state_t *wsp) 1302*0Sstevel@tonic-gate { 1303*0Sstevel@tonic-gate uintptr_t lp = wsp->walk_addr; 1304*0Sstevel@tonic-gate kmem_log_walk_t *klw; 1305*0Sstevel@tonic-gate kmem_log_header_t *lhp; 1306*0Sstevel@tonic-gate int maxndx, i, j, k; 1307*0Sstevel@tonic-gate 1308*0Sstevel@tonic-gate /* 1309*0Sstevel@tonic-gate * By default (global walk), walk the kmem_transaction_log. Otherwise 1310*0Sstevel@tonic-gate * read the log whose kmem_log_header_t is stored at walk_addr. 1311*0Sstevel@tonic-gate */ 1312*0Sstevel@tonic-gate if (lp == NULL && mdb_readvar(&lp, "kmem_transaction_log") == -1) { 1313*0Sstevel@tonic-gate mdb_warn("failed to read 'kmem_transaction_log'"); 1314*0Sstevel@tonic-gate return (WALK_ERR); 1315*0Sstevel@tonic-gate } 1316*0Sstevel@tonic-gate 1317*0Sstevel@tonic-gate if (lp == NULL) { 1318*0Sstevel@tonic-gate mdb_warn("log is disabled\n"); 1319*0Sstevel@tonic-gate return (WALK_ERR); 1320*0Sstevel@tonic-gate } 1321*0Sstevel@tonic-gate 1322*0Sstevel@tonic-gate klw = mdb_zalloc(sizeof (kmem_log_walk_t), UM_SLEEP); 1323*0Sstevel@tonic-gate lhp = &klw->klw_lh; 1324*0Sstevel@tonic-gate 1325*0Sstevel@tonic-gate if (mdb_vread(lhp, sizeof (kmem_log_header_t), lp) == -1) { 1326*0Sstevel@tonic-gate mdb_warn("failed to read log header at %p", lp); 1327*0Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t)); 1328*0Sstevel@tonic-gate return (WALK_ERR); 1329*0Sstevel@tonic-gate } 1330*0Sstevel@tonic-gate 1331*0Sstevel@tonic-gate klw->klw_size = lhp->lh_chunksize * lhp->lh_nchunks; 1332*0Sstevel@tonic-gate klw->klw_base = mdb_alloc(klw->klw_size, UM_SLEEP); 1333*0Sstevel@tonic-gate maxndx = lhp->lh_chunksize / sizeof (kmem_bufctl_audit_t) - 1; 1334*0Sstevel@tonic-gate 1335*0Sstevel@tonic-gate if (mdb_vread(klw->klw_base, klw->klw_size, 1336*0Sstevel@tonic-gate (uintptr_t)lhp->lh_base) == -1) { 1337*0Sstevel@tonic-gate mdb_warn("failed to read log at base %p", lhp->lh_base); 1338*0Sstevel@tonic-gate mdb_free(klw->klw_base, klw->klw_size); 1339*0Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t)); 1340*0Sstevel@tonic-gate return (WALK_ERR); 1341*0Sstevel@tonic-gate } 1342*0Sstevel@tonic-gate 1343*0Sstevel@tonic-gate klw->klw_sorted = mdb_alloc(maxndx * lhp->lh_nchunks * 1344*0Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t *), UM_SLEEP); 1345*0Sstevel@tonic-gate 1346*0Sstevel@tonic-gate for (i = 0, k = 0; i < lhp->lh_nchunks; i++) { 1347*0Sstevel@tonic-gate kmem_bufctl_audit_t *chunk = (kmem_bufctl_audit_t *) 1348*0Sstevel@tonic-gate ((uintptr_t)klw->klw_base + i * lhp->lh_chunksize); 1349*0Sstevel@tonic-gate 1350*0Sstevel@tonic-gate for (j = 0; j < maxndx; j++) 1351*0Sstevel@tonic-gate klw->klw_sorted[k++] = &chunk[j]; 1352*0Sstevel@tonic-gate } 1353*0Sstevel@tonic-gate 1354*0Sstevel@tonic-gate qsort(klw->klw_sorted, k, sizeof (kmem_bufctl_audit_t *), 1355*0Sstevel@tonic-gate (int(*)(const void *, const void *))bufctlcmp); 1356*0Sstevel@tonic-gate 1357*0Sstevel@tonic-gate klw->klw_maxndx = k; 1358*0Sstevel@tonic-gate wsp->walk_data = klw; 1359*0Sstevel@tonic-gate 1360*0Sstevel@tonic-gate return (WALK_NEXT); 1361*0Sstevel@tonic-gate } 1362*0Sstevel@tonic-gate 1363*0Sstevel@tonic-gate int 1364*0Sstevel@tonic-gate kmem_log_walk_step(mdb_walk_state_t *wsp) 1365*0Sstevel@tonic-gate { 1366*0Sstevel@tonic-gate kmem_log_walk_t *klw = wsp->walk_data; 1367*0Sstevel@tonic-gate kmem_bufctl_audit_t *bcp; 1368*0Sstevel@tonic-gate 1369*0Sstevel@tonic-gate if (klw->klw_ndx == klw->klw_maxndx) 1370*0Sstevel@tonic-gate return (WALK_DONE); 1371*0Sstevel@tonic-gate 1372*0Sstevel@tonic-gate bcp = klw->klw_sorted[klw->klw_ndx++]; 1373*0Sstevel@tonic-gate 1374*0Sstevel@tonic-gate return (wsp->walk_callback((uintptr_t)bcp - (uintptr_t)klw->klw_base + 1375*0Sstevel@tonic-gate (uintptr_t)klw->klw_lh.lh_base, bcp, wsp->walk_cbdata)); 1376*0Sstevel@tonic-gate } 1377*0Sstevel@tonic-gate 1378*0Sstevel@tonic-gate void 1379*0Sstevel@tonic-gate kmem_log_walk_fini(mdb_walk_state_t *wsp) 1380*0Sstevel@tonic-gate { 1381*0Sstevel@tonic-gate kmem_log_walk_t *klw = wsp->walk_data; 1382*0Sstevel@tonic-gate 1383*0Sstevel@tonic-gate mdb_free(klw->klw_base, klw->klw_size); 1384*0Sstevel@tonic-gate mdb_free(klw->klw_sorted, klw->klw_maxndx * 1385*0Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t *)); 1386*0Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t)); 1387*0Sstevel@tonic-gate } 1388*0Sstevel@tonic-gate 1389*0Sstevel@tonic-gate typedef struct allocdby_bufctl { 1390*0Sstevel@tonic-gate uintptr_t abb_addr; 1391*0Sstevel@tonic-gate hrtime_t abb_ts; 1392*0Sstevel@tonic-gate } allocdby_bufctl_t; 1393*0Sstevel@tonic-gate 1394*0Sstevel@tonic-gate typedef struct allocdby_walk { 1395*0Sstevel@tonic-gate const char *abw_walk; 1396*0Sstevel@tonic-gate uintptr_t abw_thread; 1397*0Sstevel@tonic-gate size_t abw_nbufs; 1398*0Sstevel@tonic-gate size_t abw_size; 1399*0Sstevel@tonic-gate allocdby_bufctl_t *abw_buf; 1400*0Sstevel@tonic-gate size_t abw_ndx; 1401*0Sstevel@tonic-gate } allocdby_walk_t; 1402*0Sstevel@tonic-gate 1403*0Sstevel@tonic-gate int 1404*0Sstevel@tonic-gate allocdby_walk_bufctl(uintptr_t addr, const kmem_bufctl_audit_t *bcp, 1405*0Sstevel@tonic-gate allocdby_walk_t *abw) 1406*0Sstevel@tonic-gate { 1407*0Sstevel@tonic-gate if ((uintptr_t)bcp->bc_thread != abw->abw_thread) 1408*0Sstevel@tonic-gate return (WALK_NEXT); 1409*0Sstevel@tonic-gate 1410*0Sstevel@tonic-gate if (abw->abw_nbufs == abw->abw_size) { 1411*0Sstevel@tonic-gate allocdby_bufctl_t *buf; 1412*0Sstevel@tonic-gate size_t oldsize = sizeof (allocdby_bufctl_t) * abw->abw_size; 1413*0Sstevel@tonic-gate 1414*0Sstevel@tonic-gate buf = mdb_zalloc(oldsize << 1, UM_SLEEP); 1415*0Sstevel@tonic-gate 1416*0Sstevel@tonic-gate bcopy(abw->abw_buf, buf, oldsize); 1417*0Sstevel@tonic-gate mdb_free(abw->abw_buf, oldsize); 1418*0Sstevel@tonic-gate 1419*0Sstevel@tonic-gate abw->abw_size <<= 1; 1420*0Sstevel@tonic-gate abw->abw_buf = buf; 1421*0Sstevel@tonic-gate } 1422*0Sstevel@tonic-gate 1423*0Sstevel@tonic-gate abw->abw_buf[abw->abw_nbufs].abb_addr = addr; 1424*0Sstevel@tonic-gate abw->abw_buf[abw->abw_nbufs].abb_ts = bcp->bc_timestamp; 1425*0Sstevel@tonic-gate abw->abw_nbufs++; 1426*0Sstevel@tonic-gate 1427*0Sstevel@tonic-gate return (WALK_NEXT); 1428*0Sstevel@tonic-gate } 1429*0Sstevel@tonic-gate 1430*0Sstevel@tonic-gate /*ARGSUSED*/ 1431*0Sstevel@tonic-gate int 1432*0Sstevel@tonic-gate allocdby_walk_cache(uintptr_t addr, const kmem_cache_t *c, allocdby_walk_t *abw) 1433*0Sstevel@tonic-gate { 1434*0Sstevel@tonic-gate if (mdb_pwalk(abw->abw_walk, (mdb_walk_cb_t)allocdby_walk_bufctl, 1435*0Sstevel@tonic-gate abw, addr) == -1) { 1436*0Sstevel@tonic-gate mdb_warn("couldn't walk bufctl for cache %p", addr); 1437*0Sstevel@tonic-gate return (WALK_DONE); 1438*0Sstevel@tonic-gate } 1439*0Sstevel@tonic-gate 1440*0Sstevel@tonic-gate return (WALK_NEXT); 1441*0Sstevel@tonic-gate } 1442*0Sstevel@tonic-gate 1443*0Sstevel@tonic-gate static int 1444*0Sstevel@tonic-gate allocdby_cmp(const allocdby_bufctl_t *lhs, const allocdby_bufctl_t *rhs) 1445*0Sstevel@tonic-gate { 1446*0Sstevel@tonic-gate if (lhs->abb_ts < rhs->abb_ts) 1447*0Sstevel@tonic-gate return (1); 1448*0Sstevel@tonic-gate if (lhs->abb_ts > rhs->abb_ts) 1449*0Sstevel@tonic-gate return (-1); 1450*0Sstevel@tonic-gate return (0); 1451*0Sstevel@tonic-gate } 1452*0Sstevel@tonic-gate 1453*0Sstevel@tonic-gate static int 1454*0Sstevel@tonic-gate allocdby_walk_init_common(mdb_walk_state_t *wsp, const char *walk) 1455*0Sstevel@tonic-gate { 1456*0Sstevel@tonic-gate allocdby_walk_t *abw; 1457*0Sstevel@tonic-gate 1458*0Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 1459*0Sstevel@tonic-gate mdb_warn("allocdby walk doesn't support global walks\n"); 1460*0Sstevel@tonic-gate return (WALK_ERR); 1461*0Sstevel@tonic-gate } 1462*0Sstevel@tonic-gate 1463*0Sstevel@tonic-gate abw = mdb_zalloc(sizeof (allocdby_walk_t), UM_SLEEP); 1464*0Sstevel@tonic-gate 1465*0Sstevel@tonic-gate abw->abw_thread = wsp->walk_addr; 1466*0Sstevel@tonic-gate abw->abw_walk = walk; 1467*0Sstevel@tonic-gate abw->abw_size = 128; /* something reasonable */ 1468*0Sstevel@tonic-gate abw->abw_buf = 1469*0Sstevel@tonic-gate mdb_zalloc(abw->abw_size * sizeof (allocdby_bufctl_t), UM_SLEEP); 1470*0Sstevel@tonic-gate 1471*0Sstevel@tonic-gate wsp->walk_data = abw; 1472*0Sstevel@tonic-gate 1473*0Sstevel@tonic-gate if (mdb_walk("kmem_cache", 1474*0Sstevel@tonic-gate (mdb_walk_cb_t)allocdby_walk_cache, abw) == -1) { 1475*0Sstevel@tonic-gate mdb_warn("couldn't walk kmem_cache"); 1476*0Sstevel@tonic-gate allocdby_walk_fini(wsp); 1477*0Sstevel@tonic-gate return (WALK_ERR); 1478*0Sstevel@tonic-gate } 1479*0Sstevel@tonic-gate 1480*0Sstevel@tonic-gate qsort(abw->abw_buf, abw->abw_nbufs, sizeof (allocdby_bufctl_t), 1481*0Sstevel@tonic-gate (int(*)(const void *, const void *))allocdby_cmp); 1482*0Sstevel@tonic-gate 1483*0Sstevel@tonic-gate return (WALK_NEXT); 1484*0Sstevel@tonic-gate } 1485*0Sstevel@tonic-gate 1486*0Sstevel@tonic-gate int 1487*0Sstevel@tonic-gate allocdby_walk_init(mdb_walk_state_t *wsp) 1488*0Sstevel@tonic-gate { 1489*0Sstevel@tonic-gate return (allocdby_walk_init_common(wsp, "bufctl")); 1490*0Sstevel@tonic-gate } 1491*0Sstevel@tonic-gate 1492*0Sstevel@tonic-gate int 1493*0Sstevel@tonic-gate freedby_walk_init(mdb_walk_state_t *wsp) 1494*0Sstevel@tonic-gate { 1495*0Sstevel@tonic-gate return (allocdby_walk_init_common(wsp, "freectl")); 1496*0Sstevel@tonic-gate } 1497*0Sstevel@tonic-gate 1498*0Sstevel@tonic-gate int 1499*0Sstevel@tonic-gate allocdby_walk_step(mdb_walk_state_t *wsp) 1500*0Sstevel@tonic-gate { 1501*0Sstevel@tonic-gate allocdby_walk_t *abw = wsp->walk_data; 1502*0Sstevel@tonic-gate kmem_bufctl_audit_t bc; 1503*0Sstevel@tonic-gate uintptr_t addr; 1504*0Sstevel@tonic-gate 1505*0Sstevel@tonic-gate if (abw->abw_ndx == abw->abw_nbufs) 1506*0Sstevel@tonic-gate return (WALK_DONE); 1507*0Sstevel@tonic-gate 1508*0Sstevel@tonic-gate addr = abw->abw_buf[abw->abw_ndx++].abb_addr; 1509*0Sstevel@tonic-gate 1510*0Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) { 1511*0Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr); 1512*0Sstevel@tonic-gate return (WALK_DONE); 1513*0Sstevel@tonic-gate } 1514*0Sstevel@tonic-gate 1515*0Sstevel@tonic-gate return (wsp->walk_callback(addr, &bc, wsp->walk_cbdata)); 1516*0Sstevel@tonic-gate } 1517*0Sstevel@tonic-gate 1518*0Sstevel@tonic-gate void 1519*0Sstevel@tonic-gate allocdby_walk_fini(mdb_walk_state_t *wsp) 1520*0Sstevel@tonic-gate { 1521*0Sstevel@tonic-gate allocdby_walk_t *abw = wsp->walk_data; 1522*0Sstevel@tonic-gate 1523*0Sstevel@tonic-gate mdb_free(abw->abw_buf, sizeof (allocdby_bufctl_t) * abw->abw_size); 1524*0Sstevel@tonic-gate mdb_free(abw, sizeof (allocdby_walk_t)); 1525*0Sstevel@tonic-gate } 1526*0Sstevel@tonic-gate 1527*0Sstevel@tonic-gate /*ARGSUSED*/ 1528*0Sstevel@tonic-gate int 1529*0Sstevel@tonic-gate allocdby_walk(uintptr_t addr, const kmem_bufctl_audit_t *bcp, void *ignored) 1530*0Sstevel@tonic-gate { 1531*0Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 1532*0Sstevel@tonic-gate GElf_Sym sym; 1533*0Sstevel@tonic-gate int i; 1534*0Sstevel@tonic-gate 1535*0Sstevel@tonic-gate mdb_printf("%0?p %12llx ", addr, bcp->bc_timestamp); 1536*0Sstevel@tonic-gate for (i = 0; i < bcp->bc_depth; i++) { 1537*0Sstevel@tonic-gate if (mdb_lookup_by_addr(bcp->bc_stack[i], 1538*0Sstevel@tonic-gate MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1) 1539*0Sstevel@tonic-gate continue; 1540*0Sstevel@tonic-gate if (strncmp(c, "kmem_", 5) == 0) 1541*0Sstevel@tonic-gate continue; 1542*0Sstevel@tonic-gate mdb_printf("%s+0x%lx", 1543*0Sstevel@tonic-gate c, bcp->bc_stack[i] - (uintptr_t)sym.st_value); 1544*0Sstevel@tonic-gate break; 1545*0Sstevel@tonic-gate } 1546*0Sstevel@tonic-gate mdb_printf("\n"); 1547*0Sstevel@tonic-gate 1548*0Sstevel@tonic-gate return (WALK_NEXT); 1549*0Sstevel@tonic-gate } 1550*0Sstevel@tonic-gate 1551*0Sstevel@tonic-gate static int 1552*0Sstevel@tonic-gate allocdby_common(uintptr_t addr, uint_t flags, const char *w) 1553*0Sstevel@tonic-gate { 1554*0Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 1555*0Sstevel@tonic-gate return (DCMD_USAGE); 1556*0Sstevel@tonic-gate 1557*0Sstevel@tonic-gate mdb_printf("%-?s %12s %s\n", "BUFCTL", "TIMESTAMP", "CALLER"); 1558*0Sstevel@tonic-gate 1559*0Sstevel@tonic-gate if (mdb_pwalk(w, (mdb_walk_cb_t)allocdby_walk, NULL, addr) == -1) { 1560*0Sstevel@tonic-gate mdb_warn("can't walk '%s' for %p", w, addr); 1561*0Sstevel@tonic-gate return (DCMD_ERR); 1562*0Sstevel@tonic-gate } 1563*0Sstevel@tonic-gate 1564*0Sstevel@tonic-gate return (DCMD_OK); 1565*0Sstevel@tonic-gate } 1566*0Sstevel@tonic-gate 1567*0Sstevel@tonic-gate /*ARGSUSED*/ 1568*0Sstevel@tonic-gate int 1569*0Sstevel@tonic-gate allocdby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 1570*0Sstevel@tonic-gate { 1571*0Sstevel@tonic-gate return (allocdby_common(addr, flags, "allocdby")); 1572*0Sstevel@tonic-gate } 1573*0Sstevel@tonic-gate 1574*0Sstevel@tonic-gate /*ARGSUSED*/ 1575*0Sstevel@tonic-gate int 1576*0Sstevel@tonic-gate freedby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 1577*0Sstevel@tonic-gate { 1578*0Sstevel@tonic-gate return (allocdby_common(addr, flags, "freedby")); 1579*0Sstevel@tonic-gate } 1580*0Sstevel@tonic-gate 1581*0Sstevel@tonic-gate /* 1582*0Sstevel@tonic-gate * Return a string describing the address in relation to the given thread's 1583*0Sstevel@tonic-gate * stack. 1584*0Sstevel@tonic-gate * 1585*0Sstevel@tonic-gate * - If the thread state is TS_FREE, return " (inactive interrupt thread)". 1586*0Sstevel@tonic-gate * 1587*0Sstevel@tonic-gate * - If the address is above the stack pointer, return an empty string 1588*0Sstevel@tonic-gate * signifying that the address is active. 1589*0Sstevel@tonic-gate * 1590*0Sstevel@tonic-gate * - If the address is below the stack pointer, and the thread is not on proc, 1591*0Sstevel@tonic-gate * return " (below sp)". 1592*0Sstevel@tonic-gate * 1593*0Sstevel@tonic-gate * - If the address is below the stack pointer, and the thread is on proc, 1594*0Sstevel@tonic-gate * return " (possibly below sp)". Depending on context, we may or may not 1595*0Sstevel@tonic-gate * have an accurate t_sp. 1596*0Sstevel@tonic-gate */ 1597*0Sstevel@tonic-gate static const char * 1598*0Sstevel@tonic-gate stack_active(const kthread_t *t, uintptr_t addr) 1599*0Sstevel@tonic-gate { 1600*0Sstevel@tonic-gate uintptr_t panicstk; 1601*0Sstevel@tonic-gate GElf_Sym sym; 1602*0Sstevel@tonic-gate 1603*0Sstevel@tonic-gate if (t->t_state == TS_FREE) 1604*0Sstevel@tonic-gate return (" (inactive interrupt thread)"); 1605*0Sstevel@tonic-gate 1606*0Sstevel@tonic-gate /* 1607*0Sstevel@tonic-gate * Check to see if we're on the panic stack. If so, ignore t_sp, as it 1608*0Sstevel@tonic-gate * no longer relates to the thread's real stack. 1609*0Sstevel@tonic-gate */ 1610*0Sstevel@tonic-gate if (mdb_lookup_by_name("panic_stack", &sym) == 0) { 1611*0Sstevel@tonic-gate panicstk = (uintptr_t)sym.st_value; 1612*0Sstevel@tonic-gate 1613*0Sstevel@tonic-gate if (t->t_sp >= panicstk && t->t_sp < panicstk + PANICSTKSIZE) 1614*0Sstevel@tonic-gate return (""); 1615*0Sstevel@tonic-gate } 1616*0Sstevel@tonic-gate 1617*0Sstevel@tonic-gate if (addr >= t->t_sp + STACK_BIAS) 1618*0Sstevel@tonic-gate return (""); 1619*0Sstevel@tonic-gate 1620*0Sstevel@tonic-gate if (t->t_state == TS_ONPROC) 1621*0Sstevel@tonic-gate return (" (possibly below sp)"); 1622*0Sstevel@tonic-gate 1623*0Sstevel@tonic-gate return (" (below sp)"); 1624*0Sstevel@tonic-gate } 1625*0Sstevel@tonic-gate 1626*0Sstevel@tonic-gate typedef struct whatis { 1627*0Sstevel@tonic-gate uintptr_t w_addr; 1628*0Sstevel@tonic-gate const kmem_cache_t *w_cache; 1629*0Sstevel@tonic-gate const vmem_t *w_vmem; 1630*0Sstevel@tonic-gate size_t w_slab_align; 1631*0Sstevel@tonic-gate int w_slab_found; 1632*0Sstevel@tonic-gate int w_found; 1633*0Sstevel@tonic-gate int w_kmem_lite_count; 1634*0Sstevel@tonic-gate uint_t w_verbose; 1635*0Sstevel@tonic-gate uint_t w_freemem; 1636*0Sstevel@tonic-gate uint_t w_all; 1637*0Sstevel@tonic-gate uint_t w_bufctl; 1638*0Sstevel@tonic-gate uint_t w_idspace; 1639*0Sstevel@tonic-gate } whatis_t; 1640*0Sstevel@tonic-gate 1641*0Sstevel@tonic-gate static void 1642*0Sstevel@tonic-gate whatis_print_kmem(uintptr_t addr, uintptr_t baddr, whatis_t *w) 1643*0Sstevel@tonic-gate { 1644*0Sstevel@tonic-gate /* LINTED pointer cast may result in improper alignment */ 1645*0Sstevel@tonic-gate uintptr_t btaddr = (uintptr_t)KMEM_BUFTAG(w->w_cache, addr); 1646*0Sstevel@tonic-gate intptr_t stat; 1647*0Sstevel@tonic-gate int count = 0; 1648*0Sstevel@tonic-gate int i; 1649*0Sstevel@tonic-gate pc_t callers[16]; 1650*0Sstevel@tonic-gate 1651*0Sstevel@tonic-gate if (w->w_cache->cache_flags & KMF_REDZONE) { 1652*0Sstevel@tonic-gate kmem_buftag_t bt; 1653*0Sstevel@tonic-gate 1654*0Sstevel@tonic-gate if (mdb_vread(&bt, sizeof (bt), btaddr) == -1) 1655*0Sstevel@tonic-gate goto done; 1656*0Sstevel@tonic-gate 1657*0Sstevel@tonic-gate stat = (intptr_t)bt.bt_bufctl ^ bt.bt_bxstat; 1658*0Sstevel@tonic-gate 1659*0Sstevel@tonic-gate if (stat != KMEM_BUFTAG_ALLOC && stat != KMEM_BUFTAG_FREE) 1660*0Sstevel@tonic-gate goto done; 1661*0Sstevel@tonic-gate 1662*0Sstevel@tonic-gate /* 1663*0Sstevel@tonic-gate * provide the bufctl ptr if it has useful information 1664*0Sstevel@tonic-gate */ 1665*0Sstevel@tonic-gate if (baddr == 0 && (w->w_cache->cache_flags & KMF_AUDIT)) 1666*0Sstevel@tonic-gate baddr = (uintptr_t)bt.bt_bufctl; 1667*0Sstevel@tonic-gate 1668*0Sstevel@tonic-gate if (w->w_cache->cache_flags & KMF_LITE) { 1669*0Sstevel@tonic-gate count = w->w_kmem_lite_count; 1670*0Sstevel@tonic-gate 1671*0Sstevel@tonic-gate if (count * sizeof (pc_t) > sizeof (callers)) 1672*0Sstevel@tonic-gate count = 0; 1673*0Sstevel@tonic-gate 1674*0Sstevel@tonic-gate if (count > 0 && 1675*0Sstevel@tonic-gate mdb_vread(callers, count * sizeof (pc_t), 1676*0Sstevel@tonic-gate btaddr + 1677*0Sstevel@tonic-gate offsetof(kmem_buftag_lite_t, bt_history)) == -1) 1678*0Sstevel@tonic-gate count = 0; 1679*0Sstevel@tonic-gate 1680*0Sstevel@tonic-gate /* 1681*0Sstevel@tonic-gate * skip unused callers 1682*0Sstevel@tonic-gate */ 1683*0Sstevel@tonic-gate while (count > 0 && callers[count - 1] == 1684*0Sstevel@tonic-gate (pc_t)KMEM_UNINITIALIZED_PATTERN) 1685*0Sstevel@tonic-gate count--; 1686*0Sstevel@tonic-gate } 1687*0Sstevel@tonic-gate } 1688*0Sstevel@tonic-gate 1689*0Sstevel@tonic-gate done: 1690*0Sstevel@tonic-gate if (baddr == 0) 1691*0Sstevel@tonic-gate mdb_printf("%p is %p+%p, %s from %s\n", 1692*0Sstevel@tonic-gate w->w_addr, addr, w->w_addr - addr, 1693*0Sstevel@tonic-gate w->w_freemem == FALSE ? "allocated" : "freed", 1694*0Sstevel@tonic-gate w->w_cache->cache_name); 1695*0Sstevel@tonic-gate else 1696*0Sstevel@tonic-gate mdb_printf("%p is %p+%p, bufctl %p %s from %s\n", 1697*0Sstevel@tonic-gate w->w_addr, addr, w->w_addr - addr, baddr, 1698*0Sstevel@tonic-gate w->w_freemem == FALSE ? "allocated" : "freed", 1699*0Sstevel@tonic-gate w->w_cache->cache_name); 1700*0Sstevel@tonic-gate 1701*0Sstevel@tonic-gate if (count > 0) { 1702*0Sstevel@tonic-gate mdb_inc_indent(8); 1703*0Sstevel@tonic-gate mdb_printf("recent caller%s: %a%s", (count != 1)? "s":"", 1704*0Sstevel@tonic-gate callers[0], (count != 1)? ", ":"\n"); 1705*0Sstevel@tonic-gate for (i = 1; i < count; i++) 1706*0Sstevel@tonic-gate mdb_printf("%a%s", callers[i], 1707*0Sstevel@tonic-gate (i + 1 < count)? ", ":"\n"); 1708*0Sstevel@tonic-gate mdb_dec_indent(8); 1709*0Sstevel@tonic-gate } 1710*0Sstevel@tonic-gate } 1711*0Sstevel@tonic-gate 1712*0Sstevel@tonic-gate /*ARGSUSED*/ 1713*0Sstevel@tonic-gate static int 1714*0Sstevel@tonic-gate whatis_walk_kmem(uintptr_t addr, void *ignored, whatis_t *w) 1715*0Sstevel@tonic-gate { 1716*0Sstevel@tonic-gate if (w->w_addr < addr || w->w_addr >= addr + w->w_cache->cache_bufsize) 1717*0Sstevel@tonic-gate return (WALK_NEXT); 1718*0Sstevel@tonic-gate 1719*0Sstevel@tonic-gate whatis_print_kmem(addr, 0, w); 1720*0Sstevel@tonic-gate w->w_found++; 1721*0Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 1722*0Sstevel@tonic-gate } 1723*0Sstevel@tonic-gate 1724*0Sstevel@tonic-gate static int 1725*0Sstevel@tonic-gate whatis_walk_seg(uintptr_t addr, const vmem_seg_t *vs, whatis_t *w) 1726*0Sstevel@tonic-gate { 1727*0Sstevel@tonic-gate if (w->w_addr < vs->vs_start || w->w_addr >= vs->vs_end) 1728*0Sstevel@tonic-gate return (WALK_NEXT); 1729*0Sstevel@tonic-gate 1730*0Sstevel@tonic-gate mdb_printf("%p is %p+%p ", w->w_addr, 1731*0Sstevel@tonic-gate vs->vs_start, w->w_addr - vs->vs_start); 1732*0Sstevel@tonic-gate 1733*0Sstevel@tonic-gate /* 1734*0Sstevel@tonic-gate * Always provide the vmem_seg pointer if it has a stack trace. 1735*0Sstevel@tonic-gate */ 1736*0Sstevel@tonic-gate if (w->w_bufctl == TRUE || 1737*0Sstevel@tonic-gate (vs->vs_type == VMEM_ALLOC && vs->vs_depth != 0)) { 1738*0Sstevel@tonic-gate mdb_printf("(vmem_seg %p) ", addr); 1739*0Sstevel@tonic-gate } 1740*0Sstevel@tonic-gate 1741*0Sstevel@tonic-gate mdb_printf("%sfrom %s vmem arena\n", w->w_freemem == TRUE ? 1742*0Sstevel@tonic-gate "freed " : "", w->w_vmem->vm_name); 1743*0Sstevel@tonic-gate 1744*0Sstevel@tonic-gate w->w_found++; 1745*0Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 1746*0Sstevel@tonic-gate } 1747*0Sstevel@tonic-gate 1748*0Sstevel@tonic-gate static int 1749*0Sstevel@tonic-gate whatis_walk_vmem(uintptr_t addr, const vmem_t *vmem, whatis_t *w) 1750*0Sstevel@tonic-gate { 1751*0Sstevel@tonic-gate const char *nm = vmem->vm_name; 1752*0Sstevel@tonic-gate w->w_vmem = vmem; 1753*0Sstevel@tonic-gate w->w_freemem = FALSE; 1754*0Sstevel@tonic-gate 1755*0Sstevel@tonic-gate if (((vmem->vm_cflags & VMC_IDENTIFIER) != 0) ^ w->w_idspace) 1756*0Sstevel@tonic-gate return (WALK_NEXT); 1757*0Sstevel@tonic-gate 1758*0Sstevel@tonic-gate if (w->w_verbose) 1759*0Sstevel@tonic-gate mdb_printf("Searching vmem arena %s...\n", nm); 1760*0Sstevel@tonic-gate 1761*0Sstevel@tonic-gate if (mdb_pwalk("vmem_alloc", 1762*0Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_seg, w, addr) == -1) { 1763*0Sstevel@tonic-gate mdb_warn("can't walk vmem seg for %p", addr); 1764*0Sstevel@tonic-gate return (WALK_NEXT); 1765*0Sstevel@tonic-gate } 1766*0Sstevel@tonic-gate 1767*0Sstevel@tonic-gate if (w->w_found && w->w_all == FALSE) 1768*0Sstevel@tonic-gate return (WALK_DONE); 1769*0Sstevel@tonic-gate 1770*0Sstevel@tonic-gate if (w->w_verbose) 1771*0Sstevel@tonic-gate mdb_printf("Searching vmem arena %s for free virtual...\n", nm); 1772*0Sstevel@tonic-gate 1773*0Sstevel@tonic-gate w->w_freemem = TRUE; 1774*0Sstevel@tonic-gate 1775*0Sstevel@tonic-gate if (mdb_pwalk("vmem_free", 1776*0Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_seg, w, addr) == -1) { 1777*0Sstevel@tonic-gate mdb_warn("can't walk vmem seg for %p", addr); 1778*0Sstevel@tonic-gate return (WALK_NEXT); 1779*0Sstevel@tonic-gate } 1780*0Sstevel@tonic-gate 1781*0Sstevel@tonic-gate return (w->w_found && w->w_all == FALSE ? WALK_DONE : WALK_NEXT); 1782*0Sstevel@tonic-gate } 1783*0Sstevel@tonic-gate 1784*0Sstevel@tonic-gate /*ARGSUSED*/ 1785*0Sstevel@tonic-gate static int 1786*0Sstevel@tonic-gate whatis_walk_bufctl(uintptr_t baddr, const kmem_bufctl_t *bcp, whatis_t *w) 1787*0Sstevel@tonic-gate { 1788*0Sstevel@tonic-gate uintptr_t addr; 1789*0Sstevel@tonic-gate 1790*0Sstevel@tonic-gate if (bcp == NULL) 1791*0Sstevel@tonic-gate return (WALK_NEXT); 1792*0Sstevel@tonic-gate 1793*0Sstevel@tonic-gate addr = (uintptr_t)bcp->bc_addr; 1794*0Sstevel@tonic-gate 1795*0Sstevel@tonic-gate if (w->w_addr < addr || w->w_addr >= addr + w->w_cache->cache_bufsize) 1796*0Sstevel@tonic-gate return (WALK_NEXT); 1797*0Sstevel@tonic-gate 1798*0Sstevel@tonic-gate whatis_print_kmem(addr, baddr, w); 1799*0Sstevel@tonic-gate w->w_found++; 1800*0Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 1801*0Sstevel@tonic-gate } 1802*0Sstevel@tonic-gate 1803*0Sstevel@tonic-gate /*ARGSUSED*/ 1804*0Sstevel@tonic-gate static int 1805*0Sstevel@tonic-gate whatis_walk_slab(uintptr_t saddr, const kmem_slab_t *sp, whatis_t *w) 1806*0Sstevel@tonic-gate { 1807*0Sstevel@tonic-gate uintptr_t base = P2ALIGN((uintptr_t)sp->slab_base, w->w_slab_align); 1808*0Sstevel@tonic-gate 1809*0Sstevel@tonic-gate if ((w->w_addr - base) >= w->w_cache->cache_slabsize) 1810*0Sstevel@tonic-gate return (WALK_NEXT); 1811*0Sstevel@tonic-gate 1812*0Sstevel@tonic-gate w->w_slab_found++; 1813*0Sstevel@tonic-gate return (WALK_DONE); 1814*0Sstevel@tonic-gate } 1815*0Sstevel@tonic-gate 1816*0Sstevel@tonic-gate static int 1817*0Sstevel@tonic-gate whatis_walk_cache(uintptr_t addr, const kmem_cache_t *c, whatis_t *w) 1818*0Sstevel@tonic-gate { 1819*0Sstevel@tonic-gate char *walk, *freewalk; 1820*0Sstevel@tonic-gate mdb_walk_cb_t func; 1821*0Sstevel@tonic-gate vmem_t *vmp = c->cache_arena; 1822*0Sstevel@tonic-gate 1823*0Sstevel@tonic-gate if (((c->cache_flags & VMC_IDENTIFIER) != 0) ^ w->w_idspace) 1824*0Sstevel@tonic-gate return (WALK_NEXT); 1825*0Sstevel@tonic-gate 1826*0Sstevel@tonic-gate if (w->w_bufctl == FALSE) { 1827*0Sstevel@tonic-gate walk = "kmem"; 1828*0Sstevel@tonic-gate freewalk = "freemem"; 1829*0Sstevel@tonic-gate func = (mdb_walk_cb_t)whatis_walk_kmem; 1830*0Sstevel@tonic-gate } else { 1831*0Sstevel@tonic-gate walk = "bufctl"; 1832*0Sstevel@tonic-gate freewalk = "freectl"; 1833*0Sstevel@tonic-gate func = (mdb_walk_cb_t)whatis_walk_bufctl; 1834*0Sstevel@tonic-gate } 1835*0Sstevel@tonic-gate 1836*0Sstevel@tonic-gate w->w_cache = c; 1837*0Sstevel@tonic-gate 1838*0Sstevel@tonic-gate if (w->w_verbose) 1839*0Sstevel@tonic-gate mdb_printf("Searching %s's slabs...\n", c->cache_name); 1840*0Sstevel@tonic-gate 1841*0Sstevel@tonic-gate /* 1842*0Sstevel@tonic-gate * Verify that the address is in one of the cache's slabs. If not, 1843*0Sstevel@tonic-gate * we can skip the more expensive walkers. (this is purely a 1844*0Sstevel@tonic-gate * heuristic -- as long as there are no false-negatives, we'll be fine) 1845*0Sstevel@tonic-gate * 1846*0Sstevel@tonic-gate * We try to get the cache's arena's quantum, since to accurately 1847*0Sstevel@tonic-gate * get the base of a slab, you have to align it to the quantum. If 1848*0Sstevel@tonic-gate * it doesn't look sensible, we fall back to not aligning. 1849*0Sstevel@tonic-gate */ 1850*0Sstevel@tonic-gate if (mdb_vread(&w->w_slab_align, sizeof (w->w_slab_align), 1851*0Sstevel@tonic-gate (uintptr_t)&vmp->vm_quantum) == -1) { 1852*0Sstevel@tonic-gate mdb_warn("unable to read %p->cache_arena->vm_quantum", c); 1853*0Sstevel@tonic-gate w->w_slab_align = 1; 1854*0Sstevel@tonic-gate } 1855*0Sstevel@tonic-gate 1856*0Sstevel@tonic-gate if ((c->cache_slabsize < w->w_slab_align) || w->w_slab_align == 0 || 1857*0Sstevel@tonic-gate (w->w_slab_align & (w->w_slab_align - 1))) { 1858*0Sstevel@tonic-gate mdb_warn("%p's arena has invalid quantum (0x%p)\n", c, 1859*0Sstevel@tonic-gate w->w_slab_align); 1860*0Sstevel@tonic-gate w->w_slab_align = 1; 1861*0Sstevel@tonic-gate } 1862*0Sstevel@tonic-gate 1863*0Sstevel@tonic-gate w->w_slab_found = 0; 1864*0Sstevel@tonic-gate if (mdb_pwalk("kmem_slab", (mdb_walk_cb_t)whatis_walk_slab, w, 1865*0Sstevel@tonic-gate addr) == -1) { 1866*0Sstevel@tonic-gate mdb_warn("can't find kmem_slab walker"); 1867*0Sstevel@tonic-gate return (WALK_DONE); 1868*0Sstevel@tonic-gate } 1869*0Sstevel@tonic-gate if (w->w_slab_found == 0) 1870*0Sstevel@tonic-gate return (WALK_NEXT); 1871*0Sstevel@tonic-gate 1872*0Sstevel@tonic-gate if (c->cache_flags & KMF_LITE) { 1873*0Sstevel@tonic-gate if (mdb_readvar(&w->w_kmem_lite_count, 1874*0Sstevel@tonic-gate "kmem_lite_count") == -1 || w->w_kmem_lite_count > 16) 1875*0Sstevel@tonic-gate w->w_kmem_lite_count = 0; 1876*0Sstevel@tonic-gate } 1877*0Sstevel@tonic-gate 1878*0Sstevel@tonic-gate if (w->w_verbose) 1879*0Sstevel@tonic-gate mdb_printf("Searching %s...\n", c->cache_name); 1880*0Sstevel@tonic-gate 1881*0Sstevel@tonic-gate w->w_freemem = FALSE; 1882*0Sstevel@tonic-gate 1883*0Sstevel@tonic-gate if (mdb_pwalk(walk, func, w, addr) == -1) { 1884*0Sstevel@tonic-gate mdb_warn("can't find %s walker", walk); 1885*0Sstevel@tonic-gate return (WALK_DONE); 1886*0Sstevel@tonic-gate } 1887*0Sstevel@tonic-gate 1888*0Sstevel@tonic-gate if (w->w_found && w->w_all == FALSE) 1889*0Sstevel@tonic-gate return (WALK_DONE); 1890*0Sstevel@tonic-gate 1891*0Sstevel@tonic-gate /* 1892*0Sstevel@tonic-gate * We have searched for allocated memory; now search for freed memory. 1893*0Sstevel@tonic-gate */ 1894*0Sstevel@tonic-gate if (w->w_verbose) 1895*0Sstevel@tonic-gate mdb_printf("Searching %s for free memory...\n", c->cache_name); 1896*0Sstevel@tonic-gate 1897*0Sstevel@tonic-gate w->w_freemem = TRUE; 1898*0Sstevel@tonic-gate 1899*0Sstevel@tonic-gate if (mdb_pwalk(freewalk, func, w, addr) == -1) { 1900*0Sstevel@tonic-gate mdb_warn("can't find %s walker", freewalk); 1901*0Sstevel@tonic-gate return (WALK_DONE); 1902*0Sstevel@tonic-gate } 1903*0Sstevel@tonic-gate 1904*0Sstevel@tonic-gate return (w->w_found && w->w_all == FALSE ? WALK_DONE : WALK_NEXT); 1905*0Sstevel@tonic-gate } 1906*0Sstevel@tonic-gate 1907*0Sstevel@tonic-gate static int 1908*0Sstevel@tonic-gate whatis_walk_touch(uintptr_t addr, const kmem_cache_t *c, whatis_t *w) 1909*0Sstevel@tonic-gate { 1910*0Sstevel@tonic-gate if (c->cache_cflags & KMC_NOTOUCH) 1911*0Sstevel@tonic-gate return (WALK_NEXT); 1912*0Sstevel@tonic-gate 1913*0Sstevel@tonic-gate return (whatis_walk_cache(addr, c, w)); 1914*0Sstevel@tonic-gate } 1915*0Sstevel@tonic-gate 1916*0Sstevel@tonic-gate static int 1917*0Sstevel@tonic-gate whatis_walk_notouch(uintptr_t addr, const kmem_cache_t *c, whatis_t *w) 1918*0Sstevel@tonic-gate { 1919*0Sstevel@tonic-gate if (!(c->cache_cflags & KMC_NOTOUCH)) 1920*0Sstevel@tonic-gate return (WALK_NEXT); 1921*0Sstevel@tonic-gate 1922*0Sstevel@tonic-gate return (whatis_walk_cache(addr, c, w)); 1923*0Sstevel@tonic-gate } 1924*0Sstevel@tonic-gate 1925*0Sstevel@tonic-gate static int 1926*0Sstevel@tonic-gate whatis_walk_thread(uintptr_t addr, const kthread_t *t, whatis_t *w) 1927*0Sstevel@tonic-gate { 1928*0Sstevel@tonic-gate /* 1929*0Sstevel@tonic-gate * Often, one calls ::whatis on an address from a thread structure. 1930*0Sstevel@tonic-gate * We use this opportunity to short circuit this case... 1931*0Sstevel@tonic-gate */ 1932*0Sstevel@tonic-gate if (w->w_addr >= addr && w->w_addr < addr + sizeof (kthread_t)) { 1933*0Sstevel@tonic-gate mdb_printf("%p is %p+%p, allocated as a thread structure\n", 1934*0Sstevel@tonic-gate w->w_addr, addr, w->w_addr - addr); 1935*0Sstevel@tonic-gate w->w_found++; 1936*0Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 1937*0Sstevel@tonic-gate } 1938*0Sstevel@tonic-gate 1939*0Sstevel@tonic-gate if (w->w_addr < (uintptr_t)t->t_stkbase || 1940*0Sstevel@tonic-gate w->w_addr > (uintptr_t)t->t_stk) 1941*0Sstevel@tonic-gate return (WALK_NEXT); 1942*0Sstevel@tonic-gate 1943*0Sstevel@tonic-gate if (t->t_stkbase == NULL) 1944*0Sstevel@tonic-gate return (WALK_NEXT); 1945*0Sstevel@tonic-gate 1946*0Sstevel@tonic-gate mdb_printf("%p is in thread %p's stack%s\n", w->w_addr, addr, 1947*0Sstevel@tonic-gate stack_active(t, w->w_addr)); 1948*0Sstevel@tonic-gate 1949*0Sstevel@tonic-gate w->w_found++; 1950*0Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 1951*0Sstevel@tonic-gate } 1952*0Sstevel@tonic-gate 1953*0Sstevel@tonic-gate static int 1954*0Sstevel@tonic-gate whatis_walk_modctl(uintptr_t addr, const struct modctl *m, whatis_t *w) 1955*0Sstevel@tonic-gate { 1956*0Sstevel@tonic-gate struct module mod; 1957*0Sstevel@tonic-gate char name[MODMAXNAMELEN], *where; 1958*0Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 1959*0Sstevel@tonic-gate Shdr shdr; 1960*0Sstevel@tonic-gate GElf_Sym sym; 1961*0Sstevel@tonic-gate 1962*0Sstevel@tonic-gate if (m->mod_mp == NULL) 1963*0Sstevel@tonic-gate return (WALK_NEXT); 1964*0Sstevel@tonic-gate 1965*0Sstevel@tonic-gate if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) { 1966*0Sstevel@tonic-gate mdb_warn("couldn't read modctl %p's module", addr); 1967*0Sstevel@tonic-gate return (WALK_NEXT); 1968*0Sstevel@tonic-gate } 1969*0Sstevel@tonic-gate 1970*0Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.text && 1971*0Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.text + mod.text_size) { 1972*0Sstevel@tonic-gate where = "text segment"; 1973*0Sstevel@tonic-gate goto found; 1974*0Sstevel@tonic-gate } 1975*0Sstevel@tonic-gate 1976*0Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.data && 1977*0Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.data + mod.data_size) { 1978*0Sstevel@tonic-gate where = "data segment"; 1979*0Sstevel@tonic-gate goto found; 1980*0Sstevel@tonic-gate } 1981*0Sstevel@tonic-gate 1982*0Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.bss && 1983*0Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.bss + mod.bss_size) { 1984*0Sstevel@tonic-gate where = "bss"; 1985*0Sstevel@tonic-gate goto found; 1986*0Sstevel@tonic-gate } 1987*0Sstevel@tonic-gate 1988*0Sstevel@tonic-gate if (mdb_vread(&shdr, sizeof (shdr), (uintptr_t)mod.symhdr) == -1) { 1989*0Sstevel@tonic-gate mdb_warn("couldn't read symbol header for %p's module", addr); 1990*0Sstevel@tonic-gate return (WALK_NEXT); 1991*0Sstevel@tonic-gate } 1992*0Sstevel@tonic-gate 1993*0Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.symtbl && w->w_addr < 1994*0Sstevel@tonic-gate (uintptr_t)mod.symtbl + (uintptr_t)mod.nsyms * shdr.sh_entsize) { 1995*0Sstevel@tonic-gate where = "symtab"; 1996*0Sstevel@tonic-gate goto found; 1997*0Sstevel@tonic-gate } 1998*0Sstevel@tonic-gate 1999*0Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.symspace && 2000*0Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.symspace + (uintptr_t)mod.symsize) { 2001*0Sstevel@tonic-gate where = "symspace"; 2002*0Sstevel@tonic-gate goto found; 2003*0Sstevel@tonic-gate } 2004*0Sstevel@tonic-gate 2005*0Sstevel@tonic-gate return (WALK_NEXT); 2006*0Sstevel@tonic-gate 2007*0Sstevel@tonic-gate found: 2008*0Sstevel@tonic-gate if (mdb_readstr(name, sizeof (name), (uintptr_t)m->mod_modname) == -1) 2009*0Sstevel@tonic-gate (void) mdb_snprintf(name, sizeof (name), "0x%p", addr); 2010*0Sstevel@tonic-gate 2011*0Sstevel@tonic-gate mdb_printf("%p is ", w->w_addr); 2012*0Sstevel@tonic-gate 2013*0Sstevel@tonic-gate /* 2014*0Sstevel@tonic-gate * If we found this address in a module, then there's a chance that 2015*0Sstevel@tonic-gate * it's actually a named symbol. Try the symbol lookup. 2016*0Sstevel@tonic-gate */ 2017*0Sstevel@tonic-gate if (mdb_lookup_by_addr(w->w_addr, MDB_SYM_FUZZY, c, sizeof (c), 2018*0Sstevel@tonic-gate &sym) != -1 && w->w_addr >= (uintptr_t)sym.st_value && 2019*0Sstevel@tonic-gate w->w_addr < (uintptr_t)sym.st_value + sym.st_size) { 2020*0Sstevel@tonic-gate mdb_printf("%s+%lx ", c, w->w_addr - (uintptr_t)sym.st_value); 2021*0Sstevel@tonic-gate } 2022*0Sstevel@tonic-gate 2023*0Sstevel@tonic-gate mdb_printf("in %s's %s\n", name, where); 2024*0Sstevel@tonic-gate 2025*0Sstevel@tonic-gate w->w_found++; 2026*0Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 2027*0Sstevel@tonic-gate } 2028*0Sstevel@tonic-gate 2029*0Sstevel@tonic-gate /*ARGSUSED*/ 2030*0Sstevel@tonic-gate static int 2031*0Sstevel@tonic-gate whatis_walk_page(uintptr_t addr, const void *ignored, whatis_t *w) 2032*0Sstevel@tonic-gate { 2033*0Sstevel@tonic-gate static int machsize = 0; 2034*0Sstevel@tonic-gate mdb_ctf_id_t id; 2035*0Sstevel@tonic-gate 2036*0Sstevel@tonic-gate if (machsize == 0) { 2037*0Sstevel@tonic-gate if (mdb_ctf_lookup_by_name("unix`page_t", &id) == 0) 2038*0Sstevel@tonic-gate machsize = mdb_ctf_type_size(id); 2039*0Sstevel@tonic-gate else { 2040*0Sstevel@tonic-gate mdb_warn("could not get size of page_t"); 2041*0Sstevel@tonic-gate machsize = sizeof (page_t); 2042*0Sstevel@tonic-gate } 2043*0Sstevel@tonic-gate } 2044*0Sstevel@tonic-gate 2045*0Sstevel@tonic-gate if (w->w_addr < addr || w->w_addr >= addr + machsize) 2046*0Sstevel@tonic-gate return (WALK_NEXT); 2047*0Sstevel@tonic-gate 2048*0Sstevel@tonic-gate mdb_printf("%p is %p+%p, allocated as a page structure\n", 2049*0Sstevel@tonic-gate w->w_addr, addr, w->w_addr - addr); 2050*0Sstevel@tonic-gate 2051*0Sstevel@tonic-gate w->w_found++; 2052*0Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 2053*0Sstevel@tonic-gate } 2054*0Sstevel@tonic-gate 2055*0Sstevel@tonic-gate int 2056*0Sstevel@tonic-gate whatis(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 2057*0Sstevel@tonic-gate { 2058*0Sstevel@tonic-gate whatis_t w; 2059*0Sstevel@tonic-gate 2060*0Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 2061*0Sstevel@tonic-gate return (DCMD_USAGE); 2062*0Sstevel@tonic-gate 2063*0Sstevel@tonic-gate w.w_verbose = FALSE; 2064*0Sstevel@tonic-gate w.w_bufctl = FALSE; 2065*0Sstevel@tonic-gate w.w_all = FALSE; 2066*0Sstevel@tonic-gate w.w_idspace = FALSE; 2067*0Sstevel@tonic-gate 2068*0Sstevel@tonic-gate if (mdb_getopts(argc, argv, 2069*0Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &w.w_verbose, 2070*0Sstevel@tonic-gate 'a', MDB_OPT_SETBITS, TRUE, &w.w_all, 2071*0Sstevel@tonic-gate 'i', MDB_OPT_SETBITS, TRUE, &w.w_idspace, 2072*0Sstevel@tonic-gate 'b', MDB_OPT_SETBITS, TRUE, &w.w_bufctl, NULL) != argc) 2073*0Sstevel@tonic-gate return (DCMD_USAGE); 2074*0Sstevel@tonic-gate 2075*0Sstevel@tonic-gate w.w_addr = addr; 2076*0Sstevel@tonic-gate w.w_found = 0; 2077*0Sstevel@tonic-gate 2078*0Sstevel@tonic-gate if (w.w_verbose) 2079*0Sstevel@tonic-gate mdb_printf("Searching modules...\n"); 2080*0Sstevel@tonic-gate 2081*0Sstevel@tonic-gate if (!w.w_idspace) { 2082*0Sstevel@tonic-gate if (mdb_walk("modctl", (mdb_walk_cb_t)whatis_walk_modctl, &w) 2083*0Sstevel@tonic-gate == -1) { 2084*0Sstevel@tonic-gate mdb_warn("couldn't find modctl walker"); 2085*0Sstevel@tonic-gate return (DCMD_ERR); 2086*0Sstevel@tonic-gate } 2087*0Sstevel@tonic-gate 2088*0Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 2089*0Sstevel@tonic-gate return (DCMD_OK); 2090*0Sstevel@tonic-gate 2091*0Sstevel@tonic-gate /* 2092*0Sstevel@tonic-gate * Now search all thread stacks. Yes, this is a little weak; we 2093*0Sstevel@tonic-gate * can save a lot of work by first checking to see if the 2094*0Sstevel@tonic-gate * address is in segkp vs. segkmem. But hey, computers are 2095*0Sstevel@tonic-gate * fast. 2096*0Sstevel@tonic-gate */ 2097*0Sstevel@tonic-gate if (w.w_verbose) 2098*0Sstevel@tonic-gate mdb_printf("Searching threads...\n"); 2099*0Sstevel@tonic-gate 2100*0Sstevel@tonic-gate if (mdb_walk("thread", (mdb_walk_cb_t)whatis_walk_thread, &w) 2101*0Sstevel@tonic-gate == -1) { 2102*0Sstevel@tonic-gate mdb_warn("couldn't find thread walker"); 2103*0Sstevel@tonic-gate return (DCMD_ERR); 2104*0Sstevel@tonic-gate } 2105*0Sstevel@tonic-gate 2106*0Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 2107*0Sstevel@tonic-gate return (DCMD_OK); 2108*0Sstevel@tonic-gate 2109*0Sstevel@tonic-gate if (w.w_verbose) 2110*0Sstevel@tonic-gate mdb_printf("Searching page structures...\n"); 2111*0Sstevel@tonic-gate 2112*0Sstevel@tonic-gate if (mdb_walk("page", (mdb_walk_cb_t)whatis_walk_page, &w) 2113*0Sstevel@tonic-gate == -1) { 2114*0Sstevel@tonic-gate mdb_warn("couldn't find page walker"); 2115*0Sstevel@tonic-gate return (DCMD_ERR); 2116*0Sstevel@tonic-gate } 2117*0Sstevel@tonic-gate 2118*0Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 2119*0Sstevel@tonic-gate return (DCMD_OK); 2120*0Sstevel@tonic-gate } 2121*0Sstevel@tonic-gate 2122*0Sstevel@tonic-gate if (mdb_walk("kmem_cache", 2123*0Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_touch, &w) == -1) { 2124*0Sstevel@tonic-gate mdb_warn("couldn't find kmem_cache walker"); 2125*0Sstevel@tonic-gate return (DCMD_ERR); 2126*0Sstevel@tonic-gate } 2127*0Sstevel@tonic-gate 2128*0Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 2129*0Sstevel@tonic-gate return (DCMD_OK); 2130*0Sstevel@tonic-gate 2131*0Sstevel@tonic-gate if (mdb_walk("kmem_cache", 2132*0Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_notouch, &w) == -1) { 2133*0Sstevel@tonic-gate mdb_warn("couldn't find kmem_cache walker"); 2134*0Sstevel@tonic-gate return (DCMD_ERR); 2135*0Sstevel@tonic-gate } 2136*0Sstevel@tonic-gate 2137*0Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 2138*0Sstevel@tonic-gate return (DCMD_OK); 2139*0Sstevel@tonic-gate 2140*0Sstevel@tonic-gate if (mdb_walk("vmem_postfix", 2141*0Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_vmem, &w) == -1) { 2142*0Sstevel@tonic-gate mdb_warn("couldn't find vmem_postfix walker"); 2143*0Sstevel@tonic-gate return (DCMD_ERR); 2144*0Sstevel@tonic-gate } 2145*0Sstevel@tonic-gate 2146*0Sstevel@tonic-gate if (w.w_found == 0) 2147*0Sstevel@tonic-gate mdb_printf("%p is unknown\n", addr); 2148*0Sstevel@tonic-gate 2149*0Sstevel@tonic-gate return (DCMD_OK); 2150*0Sstevel@tonic-gate } 2151*0Sstevel@tonic-gate 2152*0Sstevel@tonic-gate void 2153*0Sstevel@tonic-gate whatis_help(void) 2154*0Sstevel@tonic-gate { 2155*0Sstevel@tonic-gate mdb_printf( 2156*0Sstevel@tonic-gate "Given a virtual address, attempt to determine where it came\n" 2157*0Sstevel@tonic-gate "from.\n" 2158*0Sstevel@tonic-gate "\n" 2159*0Sstevel@tonic-gate "\t-v\tVerbose output; display caches/arenas/etc as they are\n" 2160*0Sstevel@tonic-gate "\t\tsearched\n" 2161*0Sstevel@tonic-gate "\t-a\tFind all possible sources. Default behavior is to stop at\n" 2162*0Sstevel@tonic-gate "\t\tthe first (most specific) source.\n" 2163*0Sstevel@tonic-gate "\t-i\tSearch only identifier arenas and caches. By default\n" 2164*0Sstevel@tonic-gate "\t\tthese are ignored.\n" 2165*0Sstevel@tonic-gate "\t-b\tReport bufctls and vmem_segs for matches in kmem and vmem,\n" 2166*0Sstevel@tonic-gate "\t\trespectively. Warning: if the buffer exists, but does not\n" 2167*0Sstevel@tonic-gate "\t\thave a bufctl, it will not be reported.\n"); 2168*0Sstevel@tonic-gate } 2169*0Sstevel@tonic-gate 2170*0Sstevel@tonic-gate typedef struct kmem_log_cpu { 2171*0Sstevel@tonic-gate uintptr_t kmc_low; 2172*0Sstevel@tonic-gate uintptr_t kmc_high; 2173*0Sstevel@tonic-gate } kmem_log_cpu_t; 2174*0Sstevel@tonic-gate 2175*0Sstevel@tonic-gate typedef struct kmem_log_data { 2176*0Sstevel@tonic-gate uintptr_t kmd_addr; 2177*0Sstevel@tonic-gate kmem_log_cpu_t *kmd_cpu; 2178*0Sstevel@tonic-gate } kmem_log_data_t; 2179*0Sstevel@tonic-gate 2180*0Sstevel@tonic-gate int 2181*0Sstevel@tonic-gate kmem_log_walk(uintptr_t addr, const kmem_bufctl_audit_t *b, 2182*0Sstevel@tonic-gate kmem_log_data_t *kmd) 2183*0Sstevel@tonic-gate { 2184*0Sstevel@tonic-gate int i; 2185*0Sstevel@tonic-gate kmem_log_cpu_t *kmc = kmd->kmd_cpu; 2186*0Sstevel@tonic-gate size_t bufsize; 2187*0Sstevel@tonic-gate 2188*0Sstevel@tonic-gate for (i = 0; i < NCPU; i++) { 2189*0Sstevel@tonic-gate if (addr >= kmc[i].kmc_low && addr < kmc[i].kmc_high) 2190*0Sstevel@tonic-gate break; 2191*0Sstevel@tonic-gate } 2192*0Sstevel@tonic-gate 2193*0Sstevel@tonic-gate if (kmd->kmd_addr) { 2194*0Sstevel@tonic-gate if (b->bc_cache == NULL) 2195*0Sstevel@tonic-gate return (WALK_NEXT); 2196*0Sstevel@tonic-gate 2197*0Sstevel@tonic-gate if (mdb_vread(&bufsize, sizeof (bufsize), 2198*0Sstevel@tonic-gate (uintptr_t)&b->bc_cache->cache_bufsize) == -1) { 2199*0Sstevel@tonic-gate mdb_warn( 2200*0Sstevel@tonic-gate "failed to read cache_bufsize for cache at %p", 2201*0Sstevel@tonic-gate b->bc_cache); 2202*0Sstevel@tonic-gate return (WALK_ERR); 2203*0Sstevel@tonic-gate } 2204*0Sstevel@tonic-gate 2205*0Sstevel@tonic-gate if (kmd->kmd_addr < (uintptr_t)b->bc_addr || 2206*0Sstevel@tonic-gate kmd->kmd_addr >= (uintptr_t)b->bc_addr + bufsize) 2207*0Sstevel@tonic-gate return (WALK_NEXT); 2208*0Sstevel@tonic-gate } 2209*0Sstevel@tonic-gate 2210*0Sstevel@tonic-gate if (i == NCPU) 2211*0Sstevel@tonic-gate mdb_printf(" "); 2212*0Sstevel@tonic-gate else 2213*0Sstevel@tonic-gate mdb_printf("%3d", i); 2214*0Sstevel@tonic-gate 2215*0Sstevel@tonic-gate mdb_printf(" %0?p %0?p %16llx %0?p\n", addr, b->bc_addr, 2216*0Sstevel@tonic-gate b->bc_timestamp, b->bc_thread); 2217*0Sstevel@tonic-gate 2218*0Sstevel@tonic-gate return (WALK_NEXT); 2219*0Sstevel@tonic-gate } 2220*0Sstevel@tonic-gate 2221*0Sstevel@tonic-gate /*ARGSUSED*/ 2222*0Sstevel@tonic-gate int 2223*0Sstevel@tonic-gate kmem_log(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 2224*0Sstevel@tonic-gate { 2225*0Sstevel@tonic-gate kmem_log_header_t lh; 2226*0Sstevel@tonic-gate kmem_cpu_log_header_t clh; 2227*0Sstevel@tonic-gate uintptr_t lhp, clhp; 2228*0Sstevel@tonic-gate int ncpus; 2229*0Sstevel@tonic-gate uintptr_t *cpu; 2230*0Sstevel@tonic-gate GElf_Sym sym; 2231*0Sstevel@tonic-gate kmem_log_cpu_t *kmc; 2232*0Sstevel@tonic-gate int i; 2233*0Sstevel@tonic-gate kmem_log_data_t kmd; 2234*0Sstevel@tonic-gate uint_t opt_b = FALSE; 2235*0Sstevel@tonic-gate 2236*0Sstevel@tonic-gate if (mdb_getopts(argc, argv, 2237*0Sstevel@tonic-gate 'b', MDB_OPT_SETBITS, TRUE, &opt_b, NULL) != argc) 2238*0Sstevel@tonic-gate return (DCMD_USAGE); 2239*0Sstevel@tonic-gate 2240*0Sstevel@tonic-gate if (mdb_readvar(&lhp, "kmem_transaction_log") == -1) { 2241*0Sstevel@tonic-gate mdb_warn("failed to read 'kmem_transaction_log'"); 2242*0Sstevel@tonic-gate return (DCMD_ERR); 2243*0Sstevel@tonic-gate } 2244*0Sstevel@tonic-gate 2245*0Sstevel@tonic-gate if (lhp == NULL) { 2246*0Sstevel@tonic-gate mdb_warn("no kmem transaction log\n"); 2247*0Sstevel@tonic-gate return (DCMD_ERR); 2248*0Sstevel@tonic-gate } 2249*0Sstevel@tonic-gate 2250*0Sstevel@tonic-gate mdb_readvar(&ncpus, "ncpus"); 2251*0Sstevel@tonic-gate 2252*0Sstevel@tonic-gate if (mdb_vread(&lh, sizeof (kmem_log_header_t), lhp) == -1) { 2253*0Sstevel@tonic-gate mdb_warn("failed to read log header at %p", lhp); 2254*0Sstevel@tonic-gate return (DCMD_ERR); 2255*0Sstevel@tonic-gate } 2256*0Sstevel@tonic-gate 2257*0Sstevel@tonic-gate clhp = lhp + ((uintptr_t)&lh.lh_cpu[0] - (uintptr_t)&lh); 2258*0Sstevel@tonic-gate 2259*0Sstevel@tonic-gate cpu = mdb_alloc(sizeof (uintptr_t) * NCPU, UM_SLEEP | UM_GC); 2260*0Sstevel@tonic-gate 2261*0Sstevel@tonic-gate if (mdb_lookup_by_name("cpu", &sym) == -1) { 2262*0Sstevel@tonic-gate mdb_warn("couldn't find 'cpu' array"); 2263*0Sstevel@tonic-gate return (DCMD_ERR); 2264*0Sstevel@tonic-gate } 2265*0Sstevel@tonic-gate 2266*0Sstevel@tonic-gate if (sym.st_size != NCPU * sizeof (uintptr_t)) { 2267*0Sstevel@tonic-gate mdb_warn("expected 'cpu' to be of size %d; found %d\n", 2268*0Sstevel@tonic-gate NCPU * sizeof (uintptr_t), sym.st_size); 2269*0Sstevel@tonic-gate return (DCMD_ERR); 2270*0Sstevel@tonic-gate } 2271*0Sstevel@tonic-gate 2272*0Sstevel@tonic-gate if (mdb_vread(cpu, sym.st_size, (uintptr_t)sym.st_value) == -1) { 2273*0Sstevel@tonic-gate mdb_warn("failed to read cpu array at %p", sym.st_value); 2274*0Sstevel@tonic-gate return (DCMD_ERR); 2275*0Sstevel@tonic-gate } 2276*0Sstevel@tonic-gate 2277*0Sstevel@tonic-gate kmc = mdb_zalloc(sizeof (kmem_log_cpu_t) * NCPU, UM_SLEEP | UM_GC); 2278*0Sstevel@tonic-gate kmd.kmd_addr = NULL; 2279*0Sstevel@tonic-gate kmd.kmd_cpu = kmc; 2280*0Sstevel@tonic-gate 2281*0Sstevel@tonic-gate for (i = 0; i < NCPU; i++) { 2282*0Sstevel@tonic-gate 2283*0Sstevel@tonic-gate if (cpu[i] == NULL) 2284*0Sstevel@tonic-gate continue; 2285*0Sstevel@tonic-gate 2286*0Sstevel@tonic-gate if (mdb_vread(&clh, sizeof (clh), clhp) == -1) { 2287*0Sstevel@tonic-gate mdb_warn("cannot read cpu %d's log header at %p", 2288*0Sstevel@tonic-gate i, clhp); 2289*0Sstevel@tonic-gate return (DCMD_ERR); 2290*0Sstevel@tonic-gate } 2291*0Sstevel@tonic-gate 2292*0Sstevel@tonic-gate kmc[i].kmc_low = clh.clh_chunk * lh.lh_chunksize + 2293*0Sstevel@tonic-gate (uintptr_t)lh.lh_base; 2294*0Sstevel@tonic-gate kmc[i].kmc_high = (uintptr_t)clh.clh_current; 2295*0Sstevel@tonic-gate 2296*0Sstevel@tonic-gate clhp += sizeof (kmem_cpu_log_header_t); 2297*0Sstevel@tonic-gate } 2298*0Sstevel@tonic-gate 2299*0Sstevel@tonic-gate mdb_printf("%3s %-?s %-?s %16s %-?s\n", "CPU", "ADDR", "BUFADDR", 2300*0Sstevel@tonic-gate "TIMESTAMP", "THREAD"); 2301*0Sstevel@tonic-gate 2302*0Sstevel@tonic-gate /* 2303*0Sstevel@tonic-gate * If we have been passed an address, print out only log entries 2304*0Sstevel@tonic-gate * corresponding to that address. If opt_b is specified, then interpret 2305*0Sstevel@tonic-gate * the address as a bufctl. 2306*0Sstevel@tonic-gate */ 2307*0Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) { 2308*0Sstevel@tonic-gate kmem_bufctl_audit_t b; 2309*0Sstevel@tonic-gate 2310*0Sstevel@tonic-gate if (opt_b) { 2311*0Sstevel@tonic-gate kmd.kmd_addr = addr; 2312*0Sstevel@tonic-gate } else { 2313*0Sstevel@tonic-gate if (mdb_vread(&b, 2314*0Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t), addr) == -1) { 2315*0Sstevel@tonic-gate mdb_warn("failed to read bufctl at %p", addr); 2316*0Sstevel@tonic-gate return (DCMD_ERR); 2317*0Sstevel@tonic-gate } 2318*0Sstevel@tonic-gate 2319*0Sstevel@tonic-gate (void) kmem_log_walk(addr, &b, &kmd); 2320*0Sstevel@tonic-gate 2321*0Sstevel@tonic-gate return (DCMD_OK); 2322*0Sstevel@tonic-gate } 2323*0Sstevel@tonic-gate } 2324*0Sstevel@tonic-gate 2325*0Sstevel@tonic-gate if (mdb_walk("kmem_log", (mdb_walk_cb_t)kmem_log_walk, &kmd) == -1) { 2326*0Sstevel@tonic-gate mdb_warn("can't find kmem log walker"); 2327*0Sstevel@tonic-gate return (DCMD_ERR); 2328*0Sstevel@tonic-gate } 2329*0Sstevel@tonic-gate 2330*0Sstevel@tonic-gate return (DCMD_OK); 2331*0Sstevel@tonic-gate } 2332*0Sstevel@tonic-gate 2333*0Sstevel@tonic-gate typedef struct bufctl_history_cb { 2334*0Sstevel@tonic-gate int bhc_flags; 2335*0Sstevel@tonic-gate int bhc_argc; 2336*0Sstevel@tonic-gate const mdb_arg_t *bhc_argv; 2337*0Sstevel@tonic-gate int bhc_ret; 2338*0Sstevel@tonic-gate } bufctl_history_cb_t; 2339*0Sstevel@tonic-gate 2340*0Sstevel@tonic-gate /*ARGSUSED*/ 2341*0Sstevel@tonic-gate static int 2342*0Sstevel@tonic-gate bufctl_history_callback(uintptr_t addr, const void *ign, void *arg) 2343*0Sstevel@tonic-gate { 2344*0Sstevel@tonic-gate bufctl_history_cb_t *bhc = arg; 2345*0Sstevel@tonic-gate 2346*0Sstevel@tonic-gate bhc->bhc_ret = 2347*0Sstevel@tonic-gate bufctl(addr, bhc->bhc_flags, bhc->bhc_argc, bhc->bhc_argv); 2348*0Sstevel@tonic-gate 2349*0Sstevel@tonic-gate bhc->bhc_flags &= ~DCMD_LOOPFIRST; 2350*0Sstevel@tonic-gate 2351*0Sstevel@tonic-gate return ((bhc->bhc_ret == DCMD_OK)? WALK_NEXT : WALK_DONE); 2352*0Sstevel@tonic-gate } 2353*0Sstevel@tonic-gate 2354*0Sstevel@tonic-gate void 2355*0Sstevel@tonic-gate bufctl_help(void) 2356*0Sstevel@tonic-gate { 2357*0Sstevel@tonic-gate mdb_printf("%s\n", 2358*0Sstevel@tonic-gate "Display the contents of kmem_bufctl_audit_ts, with optional filtering.\n"); 2359*0Sstevel@tonic-gate mdb_dec_indent(2); 2360*0Sstevel@tonic-gate mdb_printf("%<b>OPTIONS%</b>\n"); 2361*0Sstevel@tonic-gate mdb_inc_indent(2); 2362*0Sstevel@tonic-gate mdb_printf("%s", 2363*0Sstevel@tonic-gate " -v Display the full content of the bufctl, including its stack trace\n" 2364*0Sstevel@tonic-gate " -h retrieve the bufctl's transaction history, if available\n" 2365*0Sstevel@tonic-gate " -a addr\n" 2366*0Sstevel@tonic-gate " filter out bufctls not involving the buffer at addr\n" 2367*0Sstevel@tonic-gate " -c caller\n" 2368*0Sstevel@tonic-gate " filter out bufctls without the function/PC in their stack trace\n" 2369*0Sstevel@tonic-gate " -e earliest\n" 2370*0Sstevel@tonic-gate " filter out bufctls timestamped before earliest\n" 2371*0Sstevel@tonic-gate " -l latest\n" 2372*0Sstevel@tonic-gate " filter out bufctls timestamped after latest\n" 2373*0Sstevel@tonic-gate " -t thread\n" 2374*0Sstevel@tonic-gate " filter out bufctls not involving thread\n"); 2375*0Sstevel@tonic-gate } 2376*0Sstevel@tonic-gate 2377*0Sstevel@tonic-gate int 2378*0Sstevel@tonic-gate bufctl(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 2379*0Sstevel@tonic-gate { 2380*0Sstevel@tonic-gate kmem_bufctl_audit_t bc; 2381*0Sstevel@tonic-gate uint_t verbose = FALSE; 2382*0Sstevel@tonic-gate uint_t history = FALSE; 2383*0Sstevel@tonic-gate uint_t in_history = FALSE; 2384*0Sstevel@tonic-gate uintptr_t caller = NULL, thread = NULL; 2385*0Sstevel@tonic-gate uintptr_t laddr, haddr, baddr = NULL; 2386*0Sstevel@tonic-gate hrtime_t earliest = 0, latest = 0; 2387*0Sstevel@tonic-gate int i, depth; 2388*0Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 2389*0Sstevel@tonic-gate GElf_Sym sym; 2390*0Sstevel@tonic-gate 2391*0Sstevel@tonic-gate if (mdb_getopts(argc, argv, 2392*0Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &verbose, 2393*0Sstevel@tonic-gate 'h', MDB_OPT_SETBITS, TRUE, &history, 2394*0Sstevel@tonic-gate 'H', MDB_OPT_SETBITS, TRUE, &in_history, /* internal */ 2395*0Sstevel@tonic-gate 'c', MDB_OPT_UINTPTR, &caller, 2396*0Sstevel@tonic-gate 't', MDB_OPT_UINTPTR, &thread, 2397*0Sstevel@tonic-gate 'e', MDB_OPT_UINT64, &earliest, 2398*0Sstevel@tonic-gate 'l', MDB_OPT_UINT64, &latest, 2399*0Sstevel@tonic-gate 'a', MDB_OPT_UINTPTR, &baddr, NULL) != argc) 2400*0Sstevel@tonic-gate return (DCMD_USAGE); 2401*0Sstevel@tonic-gate 2402*0Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 2403*0Sstevel@tonic-gate return (DCMD_USAGE); 2404*0Sstevel@tonic-gate 2405*0Sstevel@tonic-gate if (in_history && !history) 2406*0Sstevel@tonic-gate return (DCMD_USAGE); 2407*0Sstevel@tonic-gate 2408*0Sstevel@tonic-gate if (history && !in_history) { 2409*0Sstevel@tonic-gate mdb_arg_t *nargv = mdb_zalloc(sizeof (*nargv) * (argc + 1), 2410*0Sstevel@tonic-gate UM_SLEEP | UM_GC); 2411*0Sstevel@tonic-gate bufctl_history_cb_t bhc; 2412*0Sstevel@tonic-gate 2413*0Sstevel@tonic-gate nargv[0].a_type = MDB_TYPE_STRING; 2414*0Sstevel@tonic-gate nargv[0].a_un.a_str = "-H"; /* prevent recursion */ 2415*0Sstevel@tonic-gate 2416*0Sstevel@tonic-gate for (i = 0; i < argc; i++) 2417*0Sstevel@tonic-gate nargv[i + 1] = argv[i]; 2418*0Sstevel@tonic-gate 2419*0Sstevel@tonic-gate /* 2420*0Sstevel@tonic-gate * When in history mode, we treat each element as if it 2421*0Sstevel@tonic-gate * were in a seperate loop, so that the headers group 2422*0Sstevel@tonic-gate * bufctls with similar histories. 2423*0Sstevel@tonic-gate */ 2424*0Sstevel@tonic-gate bhc.bhc_flags = flags | DCMD_LOOP | DCMD_LOOPFIRST; 2425*0Sstevel@tonic-gate bhc.bhc_argc = argc + 1; 2426*0Sstevel@tonic-gate bhc.bhc_argv = nargv; 2427*0Sstevel@tonic-gate bhc.bhc_ret = DCMD_OK; 2428*0Sstevel@tonic-gate 2429*0Sstevel@tonic-gate if (mdb_pwalk("bufctl_history", bufctl_history_callback, &bhc, 2430*0Sstevel@tonic-gate addr) == -1) { 2431*0Sstevel@tonic-gate mdb_warn("unable to walk bufctl_history"); 2432*0Sstevel@tonic-gate return (DCMD_ERR); 2433*0Sstevel@tonic-gate } 2434*0Sstevel@tonic-gate 2435*0Sstevel@tonic-gate if (bhc.bhc_ret == DCMD_OK && !(flags & DCMD_PIPE_OUT)) 2436*0Sstevel@tonic-gate mdb_printf("\n"); 2437*0Sstevel@tonic-gate 2438*0Sstevel@tonic-gate return (bhc.bhc_ret); 2439*0Sstevel@tonic-gate } 2440*0Sstevel@tonic-gate 2441*0Sstevel@tonic-gate if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) { 2442*0Sstevel@tonic-gate if (verbose) { 2443*0Sstevel@tonic-gate mdb_printf("%16s %16s %16s %16s\n" 2444*0Sstevel@tonic-gate "%<u>%16s %16s %16s %16s%</u>\n", 2445*0Sstevel@tonic-gate "ADDR", "BUFADDR", "TIMESTAMP", "THREAD", 2446*0Sstevel@tonic-gate "", "CACHE", "LASTLOG", "CONTENTS"); 2447*0Sstevel@tonic-gate } else { 2448*0Sstevel@tonic-gate mdb_printf("%<u>%-?s %-?s %-12s %-?s %s%</u>\n", 2449*0Sstevel@tonic-gate "ADDR", "BUFADDR", "TIMESTAMP", "THREAD", "CALLER"); 2450*0Sstevel@tonic-gate } 2451*0Sstevel@tonic-gate } 2452*0Sstevel@tonic-gate 2453*0Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) { 2454*0Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr); 2455*0Sstevel@tonic-gate return (DCMD_ERR); 2456*0Sstevel@tonic-gate } 2457*0Sstevel@tonic-gate 2458*0Sstevel@tonic-gate /* 2459*0Sstevel@tonic-gate * Guard against bogus bc_depth in case the bufctl is corrupt or 2460*0Sstevel@tonic-gate * the address does not really refer to a bufctl. 2461*0Sstevel@tonic-gate */ 2462*0Sstevel@tonic-gate depth = MIN(bc.bc_depth, KMEM_STACK_DEPTH); 2463*0Sstevel@tonic-gate 2464*0Sstevel@tonic-gate if (caller != NULL) { 2465*0Sstevel@tonic-gate laddr = caller; 2466*0Sstevel@tonic-gate haddr = caller + sizeof (caller); 2467*0Sstevel@tonic-gate 2468*0Sstevel@tonic-gate if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, sizeof (c), 2469*0Sstevel@tonic-gate &sym) != -1 && caller == (uintptr_t)sym.st_value) { 2470*0Sstevel@tonic-gate /* 2471*0Sstevel@tonic-gate * We were provided an exact symbol value; any 2472*0Sstevel@tonic-gate * address in the function is valid. 2473*0Sstevel@tonic-gate */ 2474*0Sstevel@tonic-gate laddr = (uintptr_t)sym.st_value; 2475*0Sstevel@tonic-gate haddr = (uintptr_t)sym.st_value + sym.st_size; 2476*0Sstevel@tonic-gate } 2477*0Sstevel@tonic-gate 2478*0Sstevel@tonic-gate for (i = 0; i < depth; i++) 2479*0Sstevel@tonic-gate if (bc.bc_stack[i] >= laddr && bc.bc_stack[i] < haddr) 2480*0Sstevel@tonic-gate break; 2481*0Sstevel@tonic-gate 2482*0Sstevel@tonic-gate if (i == depth) 2483*0Sstevel@tonic-gate return (DCMD_OK); 2484*0Sstevel@tonic-gate } 2485*0Sstevel@tonic-gate 2486*0Sstevel@tonic-gate if (thread != NULL && (uintptr_t)bc.bc_thread != thread) 2487*0Sstevel@tonic-gate return (DCMD_OK); 2488*0Sstevel@tonic-gate 2489*0Sstevel@tonic-gate if (earliest != 0 && bc.bc_timestamp < earliest) 2490*0Sstevel@tonic-gate return (DCMD_OK); 2491*0Sstevel@tonic-gate 2492*0Sstevel@tonic-gate if (latest != 0 && bc.bc_timestamp > latest) 2493*0Sstevel@tonic-gate return (DCMD_OK); 2494*0Sstevel@tonic-gate 2495*0Sstevel@tonic-gate if (baddr != 0 && (uintptr_t)bc.bc_addr != baddr) 2496*0Sstevel@tonic-gate return (DCMD_OK); 2497*0Sstevel@tonic-gate 2498*0Sstevel@tonic-gate if (flags & DCMD_PIPE_OUT) { 2499*0Sstevel@tonic-gate mdb_printf("%#lr\n", addr); 2500*0Sstevel@tonic-gate return (DCMD_OK); 2501*0Sstevel@tonic-gate } 2502*0Sstevel@tonic-gate 2503*0Sstevel@tonic-gate if (verbose) { 2504*0Sstevel@tonic-gate mdb_printf( 2505*0Sstevel@tonic-gate "%<b>%16p%</b> %16p %16llx %16p\n" 2506*0Sstevel@tonic-gate "%16s %16p %16p %16p\n", 2507*0Sstevel@tonic-gate addr, bc.bc_addr, bc.bc_timestamp, bc.bc_thread, 2508*0Sstevel@tonic-gate "", bc.bc_cache, bc.bc_lastlog, bc.bc_contents); 2509*0Sstevel@tonic-gate 2510*0Sstevel@tonic-gate mdb_inc_indent(17); 2511*0Sstevel@tonic-gate for (i = 0; i < depth; i++) 2512*0Sstevel@tonic-gate mdb_printf("%a\n", bc.bc_stack[i]); 2513*0Sstevel@tonic-gate mdb_dec_indent(17); 2514*0Sstevel@tonic-gate mdb_printf("\n"); 2515*0Sstevel@tonic-gate } else { 2516*0Sstevel@tonic-gate mdb_printf("%0?p %0?p %12llx %0?p", addr, bc.bc_addr, 2517*0Sstevel@tonic-gate bc.bc_timestamp, bc.bc_thread); 2518*0Sstevel@tonic-gate 2519*0Sstevel@tonic-gate for (i = 0; i < depth; i++) { 2520*0Sstevel@tonic-gate if (mdb_lookup_by_addr(bc.bc_stack[i], 2521*0Sstevel@tonic-gate MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1) 2522*0Sstevel@tonic-gate continue; 2523*0Sstevel@tonic-gate if (strncmp(c, "kmem_", 5) == 0) 2524*0Sstevel@tonic-gate continue; 2525*0Sstevel@tonic-gate mdb_printf(" %a\n", bc.bc_stack[i]); 2526*0Sstevel@tonic-gate break; 2527*0Sstevel@tonic-gate } 2528*0Sstevel@tonic-gate 2529*0Sstevel@tonic-gate if (i >= depth) 2530*0Sstevel@tonic-gate mdb_printf("\n"); 2531*0Sstevel@tonic-gate } 2532*0Sstevel@tonic-gate 2533*0Sstevel@tonic-gate return (DCMD_OK); 2534*0Sstevel@tonic-gate } 2535*0Sstevel@tonic-gate 2536*0Sstevel@tonic-gate typedef struct kmem_verify { 2537*0Sstevel@tonic-gate uint64_t *kmv_buf; /* buffer to read cache contents into */ 2538*0Sstevel@tonic-gate size_t kmv_size; /* number of bytes in kmv_buf */ 2539*0Sstevel@tonic-gate int kmv_corruption; /* > 0 if corruption found. */ 2540*0Sstevel@tonic-gate int kmv_besilent; /* report actual corruption sites */ 2541*0Sstevel@tonic-gate struct kmem_cache kmv_cache; /* the cache we're operating on */ 2542*0Sstevel@tonic-gate } kmem_verify_t; 2543*0Sstevel@tonic-gate 2544*0Sstevel@tonic-gate /* 2545*0Sstevel@tonic-gate * verify_pattern() 2546*0Sstevel@tonic-gate * verify that buf is filled with the pattern pat. 2547*0Sstevel@tonic-gate */ 2548*0Sstevel@tonic-gate static int64_t 2549*0Sstevel@tonic-gate verify_pattern(uint64_t *buf_arg, size_t size, uint64_t pat) 2550*0Sstevel@tonic-gate { 2551*0Sstevel@tonic-gate /*LINTED*/ 2552*0Sstevel@tonic-gate uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 2553*0Sstevel@tonic-gate uint64_t *buf; 2554*0Sstevel@tonic-gate 2555*0Sstevel@tonic-gate for (buf = buf_arg; buf < bufend; buf++) 2556*0Sstevel@tonic-gate if (*buf != pat) 2557*0Sstevel@tonic-gate return ((uintptr_t)buf - (uintptr_t)buf_arg); 2558*0Sstevel@tonic-gate return (-1); 2559*0Sstevel@tonic-gate } 2560*0Sstevel@tonic-gate 2561*0Sstevel@tonic-gate /* 2562*0Sstevel@tonic-gate * verify_buftag() 2563*0Sstevel@tonic-gate * verify that btp->bt_bxstat == (bcp ^ pat) 2564*0Sstevel@tonic-gate */ 2565*0Sstevel@tonic-gate static int 2566*0Sstevel@tonic-gate verify_buftag(kmem_buftag_t *btp, uintptr_t pat) 2567*0Sstevel@tonic-gate { 2568*0Sstevel@tonic-gate return (btp->bt_bxstat == ((intptr_t)btp->bt_bufctl ^ pat) ? 0 : -1); 2569*0Sstevel@tonic-gate } 2570*0Sstevel@tonic-gate 2571*0Sstevel@tonic-gate /* 2572*0Sstevel@tonic-gate * verify_free() 2573*0Sstevel@tonic-gate * verify the integrity of a free block of memory by checking 2574*0Sstevel@tonic-gate * that it is filled with 0xdeadbeef and that its buftag is sane. 2575*0Sstevel@tonic-gate */ 2576*0Sstevel@tonic-gate /*ARGSUSED1*/ 2577*0Sstevel@tonic-gate static int 2578*0Sstevel@tonic-gate verify_free(uintptr_t addr, const void *data, void *private) 2579*0Sstevel@tonic-gate { 2580*0Sstevel@tonic-gate kmem_verify_t *kmv = (kmem_verify_t *)private; 2581*0Sstevel@tonic-gate uint64_t *buf = kmv->kmv_buf; /* buf to validate */ 2582*0Sstevel@tonic-gate int64_t corrupt; /* corruption offset */ 2583*0Sstevel@tonic-gate kmem_buftag_t *buftagp; /* ptr to buftag */ 2584*0Sstevel@tonic-gate kmem_cache_t *cp = &kmv->kmv_cache; 2585*0Sstevel@tonic-gate int besilent = kmv->kmv_besilent; 2586*0Sstevel@tonic-gate 2587*0Sstevel@tonic-gate /*LINTED*/ 2588*0Sstevel@tonic-gate buftagp = KMEM_BUFTAG(cp, buf); 2589*0Sstevel@tonic-gate 2590*0Sstevel@tonic-gate /* 2591*0Sstevel@tonic-gate * Read the buffer to check. 2592*0Sstevel@tonic-gate */ 2593*0Sstevel@tonic-gate if (mdb_vread(buf, kmv->kmv_size, addr) == -1) { 2594*0Sstevel@tonic-gate if (!besilent) 2595*0Sstevel@tonic-gate mdb_warn("couldn't read %p", addr); 2596*0Sstevel@tonic-gate return (WALK_NEXT); 2597*0Sstevel@tonic-gate } 2598*0Sstevel@tonic-gate 2599*0Sstevel@tonic-gate if ((corrupt = verify_pattern(buf, cp->cache_verify, 2600*0Sstevel@tonic-gate KMEM_FREE_PATTERN)) >= 0) { 2601*0Sstevel@tonic-gate if (!besilent) 2602*0Sstevel@tonic-gate mdb_printf("buffer %p (free) seems corrupted, at %p\n", 2603*0Sstevel@tonic-gate addr, (uintptr_t)addr + corrupt); 2604*0Sstevel@tonic-gate goto corrupt; 2605*0Sstevel@tonic-gate } 2606*0Sstevel@tonic-gate /* 2607*0Sstevel@tonic-gate * When KMF_LITE is set, buftagp->bt_redzone is used to hold 2608*0Sstevel@tonic-gate * the first bytes of the buffer, hence we cannot check for red 2609*0Sstevel@tonic-gate * zone corruption. 2610*0Sstevel@tonic-gate */ 2611*0Sstevel@tonic-gate if ((cp->cache_flags & (KMF_HASH | KMF_LITE)) == KMF_HASH && 2612*0Sstevel@tonic-gate buftagp->bt_redzone != KMEM_REDZONE_PATTERN) { 2613*0Sstevel@tonic-gate if (!besilent) 2614*0Sstevel@tonic-gate mdb_printf("buffer %p (free) seems to " 2615*0Sstevel@tonic-gate "have a corrupt redzone pattern\n", addr); 2616*0Sstevel@tonic-gate goto corrupt; 2617*0Sstevel@tonic-gate } 2618*0Sstevel@tonic-gate 2619*0Sstevel@tonic-gate /* 2620*0Sstevel@tonic-gate * confirm bufctl pointer integrity. 2621*0Sstevel@tonic-gate */ 2622*0Sstevel@tonic-gate if (verify_buftag(buftagp, KMEM_BUFTAG_FREE) == -1) { 2623*0Sstevel@tonic-gate if (!besilent) 2624*0Sstevel@tonic-gate mdb_printf("buffer %p (free) has a corrupt " 2625*0Sstevel@tonic-gate "buftag\n", addr); 2626*0Sstevel@tonic-gate goto corrupt; 2627*0Sstevel@tonic-gate } 2628*0Sstevel@tonic-gate 2629*0Sstevel@tonic-gate return (WALK_NEXT); 2630*0Sstevel@tonic-gate corrupt: 2631*0Sstevel@tonic-gate kmv->kmv_corruption++; 2632*0Sstevel@tonic-gate return (WALK_NEXT); 2633*0Sstevel@tonic-gate } 2634*0Sstevel@tonic-gate 2635*0Sstevel@tonic-gate /* 2636*0Sstevel@tonic-gate * verify_alloc() 2637*0Sstevel@tonic-gate * Verify that the buftag of an allocated buffer makes sense with respect 2638*0Sstevel@tonic-gate * to the buffer. 2639*0Sstevel@tonic-gate */ 2640*0Sstevel@tonic-gate /*ARGSUSED1*/ 2641*0Sstevel@tonic-gate static int 2642*0Sstevel@tonic-gate verify_alloc(uintptr_t addr, const void *data, void *private) 2643*0Sstevel@tonic-gate { 2644*0Sstevel@tonic-gate kmem_verify_t *kmv = (kmem_verify_t *)private; 2645*0Sstevel@tonic-gate kmem_cache_t *cp = &kmv->kmv_cache; 2646*0Sstevel@tonic-gate uint64_t *buf = kmv->kmv_buf; /* buf to validate */ 2647*0Sstevel@tonic-gate /*LINTED*/ 2648*0Sstevel@tonic-gate kmem_buftag_t *buftagp = KMEM_BUFTAG(cp, buf); 2649*0Sstevel@tonic-gate uint32_t *ip = (uint32_t *)buftagp; 2650*0Sstevel@tonic-gate uint8_t *bp = (uint8_t *)buf; 2651*0Sstevel@tonic-gate int looks_ok = 0, size_ok = 1; /* flags for finding corruption */ 2652*0Sstevel@tonic-gate int besilent = kmv->kmv_besilent; 2653*0Sstevel@tonic-gate 2654*0Sstevel@tonic-gate /* 2655*0Sstevel@tonic-gate * Read the buffer to check. 2656*0Sstevel@tonic-gate */ 2657*0Sstevel@tonic-gate if (mdb_vread(buf, kmv->kmv_size, addr) == -1) { 2658*0Sstevel@tonic-gate if (!besilent) 2659*0Sstevel@tonic-gate mdb_warn("couldn't read %p", addr); 2660*0Sstevel@tonic-gate return (WALK_NEXT); 2661*0Sstevel@tonic-gate } 2662*0Sstevel@tonic-gate 2663*0Sstevel@tonic-gate /* 2664*0Sstevel@tonic-gate * There are two cases to handle: 2665*0Sstevel@tonic-gate * 1. If the buf was alloc'd using kmem_cache_alloc, it will have 2666*0Sstevel@tonic-gate * 0xfeedfacefeedface at the end of it 2667*0Sstevel@tonic-gate * 2. If the buf was alloc'd using kmem_alloc, it will have 2668*0Sstevel@tonic-gate * 0xbb just past the end of the region in use. At the buftag, 2669*0Sstevel@tonic-gate * it will have 0xfeedface (or, if the whole buffer is in use, 2670*0Sstevel@tonic-gate * 0xfeedface & bb000000 or 0xfeedfacf & 000000bb depending on 2671*0Sstevel@tonic-gate * endianness), followed by 32 bits containing the offset of the 2672*0Sstevel@tonic-gate * 0xbb byte in the buffer. 2673*0Sstevel@tonic-gate * 2674*0Sstevel@tonic-gate * Finally, the two 32-bit words that comprise the second half of the 2675*0Sstevel@tonic-gate * buftag should xor to KMEM_BUFTAG_ALLOC 2676*0Sstevel@tonic-gate */ 2677*0Sstevel@tonic-gate 2678*0Sstevel@tonic-gate if (buftagp->bt_redzone == KMEM_REDZONE_PATTERN) 2679*0Sstevel@tonic-gate looks_ok = 1; 2680*0Sstevel@tonic-gate else if (!KMEM_SIZE_VALID(ip[1])) 2681*0Sstevel@tonic-gate size_ok = 0; 2682*0Sstevel@tonic-gate else if (bp[KMEM_SIZE_DECODE(ip[1])] == KMEM_REDZONE_BYTE) 2683*0Sstevel@tonic-gate looks_ok = 1; 2684*0Sstevel@tonic-gate else 2685*0Sstevel@tonic-gate size_ok = 0; 2686*0Sstevel@tonic-gate 2687*0Sstevel@tonic-gate if (!size_ok) { 2688*0Sstevel@tonic-gate if (!besilent) 2689*0Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a corrupt " 2690*0Sstevel@tonic-gate "redzone size encoding\n", addr); 2691*0Sstevel@tonic-gate goto corrupt; 2692*0Sstevel@tonic-gate } 2693*0Sstevel@tonic-gate 2694*0Sstevel@tonic-gate if (!looks_ok) { 2695*0Sstevel@tonic-gate if (!besilent) 2696*0Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a corrupt " 2697*0Sstevel@tonic-gate "redzone signature\n", addr); 2698*0Sstevel@tonic-gate goto corrupt; 2699*0Sstevel@tonic-gate } 2700*0Sstevel@tonic-gate 2701*0Sstevel@tonic-gate if (verify_buftag(buftagp, KMEM_BUFTAG_ALLOC) == -1) { 2702*0Sstevel@tonic-gate if (!besilent) 2703*0Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a " 2704*0Sstevel@tonic-gate "corrupt buftag\n", addr); 2705*0Sstevel@tonic-gate goto corrupt; 2706*0Sstevel@tonic-gate } 2707*0Sstevel@tonic-gate 2708*0Sstevel@tonic-gate return (WALK_NEXT); 2709*0Sstevel@tonic-gate corrupt: 2710*0Sstevel@tonic-gate kmv->kmv_corruption++; 2711*0Sstevel@tonic-gate return (WALK_NEXT); 2712*0Sstevel@tonic-gate } 2713*0Sstevel@tonic-gate 2714*0Sstevel@tonic-gate /*ARGSUSED2*/ 2715*0Sstevel@tonic-gate int 2716*0Sstevel@tonic-gate kmem_verify(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 2717*0Sstevel@tonic-gate { 2718*0Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) { 2719*0Sstevel@tonic-gate int check_alloc = 0, check_free = 0; 2720*0Sstevel@tonic-gate kmem_verify_t kmv; 2721*0Sstevel@tonic-gate 2722*0Sstevel@tonic-gate if (mdb_vread(&kmv.kmv_cache, sizeof (kmv.kmv_cache), 2723*0Sstevel@tonic-gate addr) == -1) { 2724*0Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache %p", addr); 2725*0Sstevel@tonic-gate return (DCMD_ERR); 2726*0Sstevel@tonic-gate } 2727*0Sstevel@tonic-gate 2728*0Sstevel@tonic-gate kmv.kmv_size = kmv.kmv_cache.cache_buftag + 2729*0Sstevel@tonic-gate sizeof (kmem_buftag_t); 2730*0Sstevel@tonic-gate kmv.kmv_buf = mdb_alloc(kmv.kmv_size, UM_SLEEP | UM_GC); 2731*0Sstevel@tonic-gate kmv.kmv_corruption = 0; 2732*0Sstevel@tonic-gate 2733*0Sstevel@tonic-gate if ((kmv.kmv_cache.cache_flags & KMF_REDZONE)) { 2734*0Sstevel@tonic-gate check_alloc = 1; 2735*0Sstevel@tonic-gate if (kmv.kmv_cache.cache_flags & KMF_DEADBEEF) 2736*0Sstevel@tonic-gate check_free = 1; 2737*0Sstevel@tonic-gate } else { 2738*0Sstevel@tonic-gate if (!(flags & DCMD_LOOP)) { 2739*0Sstevel@tonic-gate mdb_warn("cache %p (%s) does not have " 2740*0Sstevel@tonic-gate "redzone checking enabled\n", addr, 2741*0Sstevel@tonic-gate kmv.kmv_cache.cache_name); 2742*0Sstevel@tonic-gate } 2743*0Sstevel@tonic-gate return (DCMD_ERR); 2744*0Sstevel@tonic-gate } 2745*0Sstevel@tonic-gate 2746*0Sstevel@tonic-gate if (flags & DCMD_LOOP) { 2747*0Sstevel@tonic-gate /* 2748*0Sstevel@tonic-gate * table mode, don't print out every corrupt buffer 2749*0Sstevel@tonic-gate */ 2750*0Sstevel@tonic-gate kmv.kmv_besilent = 1; 2751*0Sstevel@tonic-gate } else { 2752*0Sstevel@tonic-gate mdb_printf("Summary for cache '%s'\n", 2753*0Sstevel@tonic-gate kmv.kmv_cache.cache_name); 2754*0Sstevel@tonic-gate mdb_inc_indent(2); 2755*0Sstevel@tonic-gate kmv.kmv_besilent = 0; 2756*0Sstevel@tonic-gate } 2757*0Sstevel@tonic-gate 2758*0Sstevel@tonic-gate if (check_alloc) 2759*0Sstevel@tonic-gate (void) mdb_pwalk("kmem", verify_alloc, &kmv, addr); 2760*0Sstevel@tonic-gate if (check_free) 2761*0Sstevel@tonic-gate (void) mdb_pwalk("freemem", verify_free, &kmv, addr); 2762*0Sstevel@tonic-gate 2763*0Sstevel@tonic-gate if (flags & DCMD_LOOP) { 2764*0Sstevel@tonic-gate if (kmv.kmv_corruption == 0) { 2765*0Sstevel@tonic-gate mdb_printf("%-*s %?p clean\n", 2766*0Sstevel@tonic-gate KMEM_CACHE_NAMELEN, 2767*0Sstevel@tonic-gate kmv.kmv_cache.cache_name, addr); 2768*0Sstevel@tonic-gate } else { 2769*0Sstevel@tonic-gate char *s = ""; /* optional s in "buffer[s]" */ 2770*0Sstevel@tonic-gate if (kmv.kmv_corruption > 1) 2771*0Sstevel@tonic-gate s = "s"; 2772*0Sstevel@tonic-gate 2773*0Sstevel@tonic-gate mdb_printf("%-*s %?p %d corrupt buffer%s\n", 2774*0Sstevel@tonic-gate KMEM_CACHE_NAMELEN, 2775*0Sstevel@tonic-gate kmv.kmv_cache.cache_name, addr, 2776*0Sstevel@tonic-gate kmv.kmv_corruption, s); 2777*0Sstevel@tonic-gate } 2778*0Sstevel@tonic-gate } else { 2779*0Sstevel@tonic-gate /* 2780*0Sstevel@tonic-gate * This is the more verbose mode, when the user has 2781*0Sstevel@tonic-gate * type addr::kmem_verify. If the cache was clean, 2782*0Sstevel@tonic-gate * nothing will have yet been printed. So say something. 2783*0Sstevel@tonic-gate */ 2784*0Sstevel@tonic-gate if (kmv.kmv_corruption == 0) 2785*0Sstevel@tonic-gate mdb_printf("clean\n"); 2786*0Sstevel@tonic-gate 2787*0Sstevel@tonic-gate mdb_dec_indent(2); 2788*0Sstevel@tonic-gate } 2789*0Sstevel@tonic-gate } else { 2790*0Sstevel@tonic-gate /* 2791*0Sstevel@tonic-gate * If the user didn't specify a cache to verify, we'll walk all 2792*0Sstevel@tonic-gate * kmem_cache's, specifying ourself as a callback for each... 2793*0Sstevel@tonic-gate * this is the equivalent of '::walk kmem_cache .::kmem_verify' 2794*0Sstevel@tonic-gate */ 2795*0Sstevel@tonic-gate mdb_printf("%<u>%-*s %-?s %-20s%</b>\n", KMEM_CACHE_NAMELEN, 2796*0Sstevel@tonic-gate "Cache Name", "Addr", "Cache Integrity"); 2797*0Sstevel@tonic-gate (void) (mdb_walk_dcmd("kmem_cache", "kmem_verify", 0, NULL)); 2798*0Sstevel@tonic-gate } 2799*0Sstevel@tonic-gate 2800*0Sstevel@tonic-gate return (DCMD_OK); 2801*0Sstevel@tonic-gate } 2802*0Sstevel@tonic-gate 2803*0Sstevel@tonic-gate typedef struct vmem_node { 2804*0Sstevel@tonic-gate struct vmem_node *vn_next; 2805*0Sstevel@tonic-gate struct vmem_node *vn_parent; 2806*0Sstevel@tonic-gate struct vmem_node *vn_sibling; 2807*0Sstevel@tonic-gate struct vmem_node *vn_children; 2808*0Sstevel@tonic-gate uintptr_t vn_addr; 2809*0Sstevel@tonic-gate int vn_marked; 2810*0Sstevel@tonic-gate vmem_t vn_vmem; 2811*0Sstevel@tonic-gate } vmem_node_t; 2812*0Sstevel@tonic-gate 2813*0Sstevel@tonic-gate typedef struct vmem_walk { 2814*0Sstevel@tonic-gate vmem_node_t *vw_root; 2815*0Sstevel@tonic-gate vmem_node_t *vw_current; 2816*0Sstevel@tonic-gate } vmem_walk_t; 2817*0Sstevel@tonic-gate 2818*0Sstevel@tonic-gate int 2819*0Sstevel@tonic-gate vmem_walk_init(mdb_walk_state_t *wsp) 2820*0Sstevel@tonic-gate { 2821*0Sstevel@tonic-gate uintptr_t vaddr, paddr; 2822*0Sstevel@tonic-gate vmem_node_t *head = NULL, *root = NULL, *current = NULL, *parent, *vp; 2823*0Sstevel@tonic-gate vmem_walk_t *vw; 2824*0Sstevel@tonic-gate 2825*0Sstevel@tonic-gate if (mdb_readvar(&vaddr, "vmem_list") == -1) { 2826*0Sstevel@tonic-gate mdb_warn("couldn't read 'vmem_list'"); 2827*0Sstevel@tonic-gate return (WALK_ERR); 2828*0Sstevel@tonic-gate } 2829*0Sstevel@tonic-gate 2830*0Sstevel@tonic-gate while (vaddr != NULL) { 2831*0Sstevel@tonic-gate vp = mdb_zalloc(sizeof (vmem_node_t), UM_SLEEP); 2832*0Sstevel@tonic-gate vp->vn_addr = vaddr; 2833*0Sstevel@tonic-gate vp->vn_next = head; 2834*0Sstevel@tonic-gate head = vp; 2835*0Sstevel@tonic-gate 2836*0Sstevel@tonic-gate if (vaddr == wsp->walk_addr) 2837*0Sstevel@tonic-gate current = vp; 2838*0Sstevel@tonic-gate 2839*0Sstevel@tonic-gate if (mdb_vread(&vp->vn_vmem, sizeof (vmem_t), vaddr) == -1) { 2840*0Sstevel@tonic-gate mdb_warn("couldn't read vmem_t at %p", vaddr); 2841*0Sstevel@tonic-gate goto err; 2842*0Sstevel@tonic-gate } 2843*0Sstevel@tonic-gate 2844*0Sstevel@tonic-gate vaddr = (uintptr_t)vp->vn_vmem.vm_next; 2845*0Sstevel@tonic-gate } 2846*0Sstevel@tonic-gate 2847*0Sstevel@tonic-gate for (vp = head; vp != NULL; vp = vp->vn_next) { 2848*0Sstevel@tonic-gate 2849*0Sstevel@tonic-gate if ((paddr = (uintptr_t)vp->vn_vmem.vm_source) == NULL) { 2850*0Sstevel@tonic-gate vp->vn_sibling = root; 2851*0Sstevel@tonic-gate root = vp; 2852*0Sstevel@tonic-gate continue; 2853*0Sstevel@tonic-gate } 2854*0Sstevel@tonic-gate 2855*0Sstevel@tonic-gate for (parent = head; parent != NULL; parent = parent->vn_next) { 2856*0Sstevel@tonic-gate if (parent->vn_addr != paddr) 2857*0Sstevel@tonic-gate continue; 2858*0Sstevel@tonic-gate vp->vn_sibling = parent->vn_children; 2859*0Sstevel@tonic-gate parent->vn_children = vp; 2860*0Sstevel@tonic-gate vp->vn_parent = parent; 2861*0Sstevel@tonic-gate break; 2862*0Sstevel@tonic-gate } 2863*0Sstevel@tonic-gate 2864*0Sstevel@tonic-gate if (parent == NULL) { 2865*0Sstevel@tonic-gate mdb_warn("couldn't find %p's parent (%p)\n", 2866*0Sstevel@tonic-gate vp->vn_addr, paddr); 2867*0Sstevel@tonic-gate goto err; 2868*0Sstevel@tonic-gate } 2869*0Sstevel@tonic-gate } 2870*0Sstevel@tonic-gate 2871*0Sstevel@tonic-gate vw = mdb_zalloc(sizeof (vmem_walk_t), UM_SLEEP); 2872*0Sstevel@tonic-gate vw->vw_root = root; 2873*0Sstevel@tonic-gate 2874*0Sstevel@tonic-gate if (current != NULL) 2875*0Sstevel@tonic-gate vw->vw_current = current; 2876*0Sstevel@tonic-gate else 2877*0Sstevel@tonic-gate vw->vw_current = root; 2878*0Sstevel@tonic-gate 2879*0Sstevel@tonic-gate wsp->walk_data = vw; 2880*0Sstevel@tonic-gate return (WALK_NEXT); 2881*0Sstevel@tonic-gate err: 2882*0Sstevel@tonic-gate for (vp = head; head != NULL; vp = head) { 2883*0Sstevel@tonic-gate head = vp->vn_next; 2884*0Sstevel@tonic-gate mdb_free(vp, sizeof (vmem_node_t)); 2885*0Sstevel@tonic-gate } 2886*0Sstevel@tonic-gate 2887*0Sstevel@tonic-gate return (WALK_ERR); 2888*0Sstevel@tonic-gate } 2889*0Sstevel@tonic-gate 2890*0Sstevel@tonic-gate int 2891*0Sstevel@tonic-gate vmem_walk_step(mdb_walk_state_t *wsp) 2892*0Sstevel@tonic-gate { 2893*0Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 2894*0Sstevel@tonic-gate vmem_node_t *vp; 2895*0Sstevel@tonic-gate int rval; 2896*0Sstevel@tonic-gate 2897*0Sstevel@tonic-gate if ((vp = vw->vw_current) == NULL) 2898*0Sstevel@tonic-gate return (WALK_DONE); 2899*0Sstevel@tonic-gate 2900*0Sstevel@tonic-gate rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata); 2901*0Sstevel@tonic-gate 2902*0Sstevel@tonic-gate if (vp->vn_children != NULL) { 2903*0Sstevel@tonic-gate vw->vw_current = vp->vn_children; 2904*0Sstevel@tonic-gate return (rval); 2905*0Sstevel@tonic-gate } 2906*0Sstevel@tonic-gate 2907*0Sstevel@tonic-gate do { 2908*0Sstevel@tonic-gate vw->vw_current = vp->vn_sibling; 2909*0Sstevel@tonic-gate vp = vp->vn_parent; 2910*0Sstevel@tonic-gate } while (vw->vw_current == NULL && vp != NULL); 2911*0Sstevel@tonic-gate 2912*0Sstevel@tonic-gate return (rval); 2913*0Sstevel@tonic-gate } 2914*0Sstevel@tonic-gate 2915*0Sstevel@tonic-gate /* 2916*0Sstevel@tonic-gate * The "vmem_postfix" walk walks the vmem arenas in post-fix order; all 2917*0Sstevel@tonic-gate * children are visited before their parent. We perform the postfix walk 2918*0Sstevel@tonic-gate * iteratively (rather than recursively) to allow mdb to regain control 2919*0Sstevel@tonic-gate * after each callback. 2920*0Sstevel@tonic-gate */ 2921*0Sstevel@tonic-gate int 2922*0Sstevel@tonic-gate vmem_postfix_walk_step(mdb_walk_state_t *wsp) 2923*0Sstevel@tonic-gate { 2924*0Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 2925*0Sstevel@tonic-gate vmem_node_t *vp = vw->vw_current; 2926*0Sstevel@tonic-gate int rval; 2927*0Sstevel@tonic-gate 2928*0Sstevel@tonic-gate /* 2929*0Sstevel@tonic-gate * If this node is marked, then we know that we have already visited 2930*0Sstevel@tonic-gate * all of its children. If the node has any siblings, they need to 2931*0Sstevel@tonic-gate * be visited next; otherwise, we need to visit the parent. Note 2932*0Sstevel@tonic-gate * that vp->vn_marked will only be zero on the first invocation of 2933*0Sstevel@tonic-gate * the step function. 2934*0Sstevel@tonic-gate */ 2935*0Sstevel@tonic-gate if (vp->vn_marked) { 2936*0Sstevel@tonic-gate if (vp->vn_sibling != NULL) 2937*0Sstevel@tonic-gate vp = vp->vn_sibling; 2938*0Sstevel@tonic-gate else if (vp->vn_parent != NULL) 2939*0Sstevel@tonic-gate vp = vp->vn_parent; 2940*0Sstevel@tonic-gate else { 2941*0Sstevel@tonic-gate /* 2942*0Sstevel@tonic-gate * We have neither a parent, nor a sibling, and we 2943*0Sstevel@tonic-gate * have already been visited; we're done. 2944*0Sstevel@tonic-gate */ 2945*0Sstevel@tonic-gate return (WALK_DONE); 2946*0Sstevel@tonic-gate } 2947*0Sstevel@tonic-gate } 2948*0Sstevel@tonic-gate 2949*0Sstevel@tonic-gate /* 2950*0Sstevel@tonic-gate * Before we visit this node, visit its children. 2951*0Sstevel@tonic-gate */ 2952*0Sstevel@tonic-gate while (vp->vn_children != NULL && !vp->vn_children->vn_marked) 2953*0Sstevel@tonic-gate vp = vp->vn_children; 2954*0Sstevel@tonic-gate 2955*0Sstevel@tonic-gate vp->vn_marked = 1; 2956*0Sstevel@tonic-gate vw->vw_current = vp; 2957*0Sstevel@tonic-gate rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata); 2958*0Sstevel@tonic-gate 2959*0Sstevel@tonic-gate return (rval); 2960*0Sstevel@tonic-gate } 2961*0Sstevel@tonic-gate 2962*0Sstevel@tonic-gate void 2963*0Sstevel@tonic-gate vmem_walk_fini(mdb_walk_state_t *wsp) 2964*0Sstevel@tonic-gate { 2965*0Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 2966*0Sstevel@tonic-gate vmem_node_t *root = vw->vw_root; 2967*0Sstevel@tonic-gate int done; 2968*0Sstevel@tonic-gate 2969*0Sstevel@tonic-gate if (root == NULL) 2970*0Sstevel@tonic-gate return; 2971*0Sstevel@tonic-gate 2972*0Sstevel@tonic-gate if ((vw->vw_root = root->vn_children) != NULL) 2973*0Sstevel@tonic-gate vmem_walk_fini(wsp); 2974*0Sstevel@tonic-gate 2975*0Sstevel@tonic-gate vw->vw_root = root->vn_sibling; 2976*0Sstevel@tonic-gate done = (root->vn_sibling == NULL && root->vn_parent == NULL); 2977*0Sstevel@tonic-gate mdb_free(root, sizeof (vmem_node_t)); 2978*0Sstevel@tonic-gate 2979*0Sstevel@tonic-gate if (done) { 2980*0Sstevel@tonic-gate mdb_free(vw, sizeof (vmem_walk_t)); 2981*0Sstevel@tonic-gate } else { 2982*0Sstevel@tonic-gate vmem_walk_fini(wsp); 2983*0Sstevel@tonic-gate } 2984*0Sstevel@tonic-gate } 2985*0Sstevel@tonic-gate 2986*0Sstevel@tonic-gate typedef struct vmem_seg_walk { 2987*0Sstevel@tonic-gate uint8_t vsw_type; 2988*0Sstevel@tonic-gate uintptr_t vsw_start; 2989*0Sstevel@tonic-gate uintptr_t vsw_current; 2990*0Sstevel@tonic-gate } vmem_seg_walk_t; 2991*0Sstevel@tonic-gate 2992*0Sstevel@tonic-gate /*ARGSUSED*/ 2993*0Sstevel@tonic-gate int 2994*0Sstevel@tonic-gate vmem_seg_walk_common_init(mdb_walk_state_t *wsp, uint8_t type, char *name) 2995*0Sstevel@tonic-gate { 2996*0Sstevel@tonic-gate vmem_seg_walk_t *vsw; 2997*0Sstevel@tonic-gate 2998*0Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 2999*0Sstevel@tonic-gate mdb_warn("vmem_%s does not support global walks\n", name); 3000*0Sstevel@tonic-gate return (WALK_ERR); 3001*0Sstevel@tonic-gate } 3002*0Sstevel@tonic-gate 3003*0Sstevel@tonic-gate wsp->walk_data = vsw = mdb_alloc(sizeof (vmem_seg_walk_t), UM_SLEEP); 3004*0Sstevel@tonic-gate 3005*0Sstevel@tonic-gate vsw->vsw_type = type; 3006*0Sstevel@tonic-gate vsw->vsw_start = wsp->walk_addr + offsetof(vmem_t, vm_seg0); 3007*0Sstevel@tonic-gate vsw->vsw_current = vsw->vsw_start; 3008*0Sstevel@tonic-gate 3009*0Sstevel@tonic-gate return (WALK_NEXT); 3010*0Sstevel@tonic-gate } 3011*0Sstevel@tonic-gate 3012*0Sstevel@tonic-gate /* 3013*0Sstevel@tonic-gate * vmem segments can't have type 0 (this should be added to vmem_impl.h). 3014*0Sstevel@tonic-gate */ 3015*0Sstevel@tonic-gate #define VMEM_NONE 0 3016*0Sstevel@tonic-gate 3017*0Sstevel@tonic-gate int 3018*0Sstevel@tonic-gate vmem_alloc_walk_init(mdb_walk_state_t *wsp) 3019*0Sstevel@tonic-gate { 3020*0Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_ALLOC, "alloc")); 3021*0Sstevel@tonic-gate } 3022*0Sstevel@tonic-gate 3023*0Sstevel@tonic-gate int 3024*0Sstevel@tonic-gate vmem_free_walk_init(mdb_walk_state_t *wsp) 3025*0Sstevel@tonic-gate { 3026*0Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_FREE, "free")); 3027*0Sstevel@tonic-gate } 3028*0Sstevel@tonic-gate 3029*0Sstevel@tonic-gate int 3030*0Sstevel@tonic-gate vmem_span_walk_init(mdb_walk_state_t *wsp) 3031*0Sstevel@tonic-gate { 3032*0Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_SPAN, "span")); 3033*0Sstevel@tonic-gate } 3034*0Sstevel@tonic-gate 3035*0Sstevel@tonic-gate int 3036*0Sstevel@tonic-gate vmem_seg_walk_init(mdb_walk_state_t *wsp) 3037*0Sstevel@tonic-gate { 3038*0Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_NONE, "seg")); 3039*0Sstevel@tonic-gate } 3040*0Sstevel@tonic-gate 3041*0Sstevel@tonic-gate int 3042*0Sstevel@tonic-gate vmem_seg_walk_step(mdb_walk_state_t *wsp) 3043*0Sstevel@tonic-gate { 3044*0Sstevel@tonic-gate vmem_seg_t seg; 3045*0Sstevel@tonic-gate vmem_seg_walk_t *vsw = wsp->walk_data; 3046*0Sstevel@tonic-gate uintptr_t addr = vsw->vsw_current; 3047*0Sstevel@tonic-gate static size_t seg_size = 0; 3048*0Sstevel@tonic-gate int rval; 3049*0Sstevel@tonic-gate 3050*0Sstevel@tonic-gate if (!seg_size) { 3051*0Sstevel@tonic-gate if (mdb_readvar(&seg_size, "vmem_seg_size") == -1) { 3052*0Sstevel@tonic-gate mdb_warn("failed to read 'vmem_seg_size'"); 3053*0Sstevel@tonic-gate seg_size = sizeof (vmem_seg_t); 3054*0Sstevel@tonic-gate } 3055*0Sstevel@tonic-gate } 3056*0Sstevel@tonic-gate 3057*0Sstevel@tonic-gate if (seg_size < sizeof (seg)) 3058*0Sstevel@tonic-gate bzero((caddr_t)&seg + seg_size, sizeof (seg) - seg_size); 3059*0Sstevel@tonic-gate 3060*0Sstevel@tonic-gate if (mdb_vread(&seg, seg_size, addr) == -1) { 3061*0Sstevel@tonic-gate mdb_warn("couldn't read vmem_seg at %p", addr); 3062*0Sstevel@tonic-gate return (WALK_ERR); 3063*0Sstevel@tonic-gate } 3064*0Sstevel@tonic-gate 3065*0Sstevel@tonic-gate vsw->vsw_current = (uintptr_t)seg.vs_anext; 3066*0Sstevel@tonic-gate if (vsw->vsw_type != VMEM_NONE && seg.vs_type != vsw->vsw_type) { 3067*0Sstevel@tonic-gate rval = WALK_NEXT; 3068*0Sstevel@tonic-gate } else { 3069*0Sstevel@tonic-gate rval = wsp->walk_callback(addr, &seg, wsp->walk_cbdata); 3070*0Sstevel@tonic-gate } 3071*0Sstevel@tonic-gate 3072*0Sstevel@tonic-gate if (vsw->vsw_current == vsw->vsw_start) 3073*0Sstevel@tonic-gate return (WALK_DONE); 3074*0Sstevel@tonic-gate 3075*0Sstevel@tonic-gate return (rval); 3076*0Sstevel@tonic-gate } 3077*0Sstevel@tonic-gate 3078*0Sstevel@tonic-gate void 3079*0Sstevel@tonic-gate vmem_seg_walk_fini(mdb_walk_state_t *wsp) 3080*0Sstevel@tonic-gate { 3081*0Sstevel@tonic-gate vmem_seg_walk_t *vsw = wsp->walk_data; 3082*0Sstevel@tonic-gate 3083*0Sstevel@tonic-gate mdb_free(vsw, sizeof (vmem_seg_walk_t)); 3084*0Sstevel@tonic-gate } 3085*0Sstevel@tonic-gate 3086*0Sstevel@tonic-gate #define VMEM_NAMEWIDTH 22 3087*0Sstevel@tonic-gate 3088*0Sstevel@tonic-gate int 3089*0Sstevel@tonic-gate vmem(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 3090*0Sstevel@tonic-gate { 3091*0Sstevel@tonic-gate vmem_t v, parent; 3092*0Sstevel@tonic-gate vmem_kstat_t *vkp = &v.vm_kstat; 3093*0Sstevel@tonic-gate uintptr_t paddr; 3094*0Sstevel@tonic-gate int ident = 0; 3095*0Sstevel@tonic-gate char c[VMEM_NAMEWIDTH]; 3096*0Sstevel@tonic-gate 3097*0Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) { 3098*0Sstevel@tonic-gate if (mdb_walk_dcmd("vmem", "vmem", argc, argv) == -1) { 3099*0Sstevel@tonic-gate mdb_warn("can't walk vmem"); 3100*0Sstevel@tonic-gate return (DCMD_ERR); 3101*0Sstevel@tonic-gate } 3102*0Sstevel@tonic-gate return (DCMD_OK); 3103*0Sstevel@tonic-gate } 3104*0Sstevel@tonic-gate 3105*0Sstevel@tonic-gate if (DCMD_HDRSPEC(flags)) 3106*0Sstevel@tonic-gate mdb_printf("%-?s %-*s %10s %12s %9s %5s\n", 3107*0Sstevel@tonic-gate "ADDR", VMEM_NAMEWIDTH, "NAME", "INUSE", 3108*0Sstevel@tonic-gate "TOTAL", "SUCCEED", "FAIL"); 3109*0Sstevel@tonic-gate 3110*0Sstevel@tonic-gate if (mdb_vread(&v, sizeof (v), addr) == -1) { 3111*0Sstevel@tonic-gate mdb_warn("couldn't read vmem at %p", addr); 3112*0Sstevel@tonic-gate return (DCMD_ERR); 3113*0Sstevel@tonic-gate } 3114*0Sstevel@tonic-gate 3115*0Sstevel@tonic-gate for (paddr = (uintptr_t)v.vm_source; paddr != NULL; ident += 2) { 3116*0Sstevel@tonic-gate if (mdb_vread(&parent, sizeof (parent), paddr) == -1) { 3117*0Sstevel@tonic-gate mdb_warn("couldn't trace %p's ancestry", addr); 3118*0Sstevel@tonic-gate ident = 0; 3119*0Sstevel@tonic-gate break; 3120*0Sstevel@tonic-gate } 3121*0Sstevel@tonic-gate paddr = (uintptr_t)parent.vm_source; 3122*0Sstevel@tonic-gate } 3123*0Sstevel@tonic-gate 3124*0Sstevel@tonic-gate (void) mdb_snprintf(c, VMEM_NAMEWIDTH, "%*s%s", ident, "", v.vm_name); 3125*0Sstevel@tonic-gate 3126*0Sstevel@tonic-gate mdb_printf("%0?p %-*s %10llu %12llu %9llu %5llu\n", 3127*0Sstevel@tonic-gate addr, VMEM_NAMEWIDTH, c, 3128*0Sstevel@tonic-gate vkp->vk_mem_inuse.value.ui64, vkp->vk_mem_total.value.ui64, 3129*0Sstevel@tonic-gate vkp->vk_alloc.value.ui64, vkp->vk_fail.value.ui64); 3130*0Sstevel@tonic-gate 3131*0Sstevel@tonic-gate return (DCMD_OK); 3132*0Sstevel@tonic-gate } 3133*0Sstevel@tonic-gate 3134*0Sstevel@tonic-gate void 3135*0Sstevel@tonic-gate vmem_seg_help(void) 3136*0Sstevel@tonic-gate { 3137*0Sstevel@tonic-gate mdb_printf("%s\n", 3138*0Sstevel@tonic-gate "Display the contents of vmem_seg_ts, with optional filtering.\n" 3139*0Sstevel@tonic-gate "\n" 3140*0Sstevel@tonic-gate "A vmem_seg_t represents a range of addresses (or arbitrary numbers),\n" 3141*0Sstevel@tonic-gate "representing a single chunk of data. Only ALLOC segments have debugging\n" 3142*0Sstevel@tonic-gate "information.\n"); 3143*0Sstevel@tonic-gate mdb_dec_indent(2); 3144*0Sstevel@tonic-gate mdb_printf("%<b>OPTIONS%</b>\n"); 3145*0Sstevel@tonic-gate mdb_inc_indent(2); 3146*0Sstevel@tonic-gate mdb_printf("%s", 3147*0Sstevel@tonic-gate " -v Display the full content of the vmem_seg, including its stack trace\n" 3148*0Sstevel@tonic-gate " -s report the size of the segment, instead of the end address\n" 3149*0Sstevel@tonic-gate " -c caller\n" 3150*0Sstevel@tonic-gate " filter out segments without the function/PC in their stack trace\n" 3151*0Sstevel@tonic-gate " -e earliest\n" 3152*0Sstevel@tonic-gate " filter out segments timestamped before earliest\n" 3153*0Sstevel@tonic-gate " -l latest\n" 3154*0Sstevel@tonic-gate " filter out segments timestamped after latest\n" 3155*0Sstevel@tonic-gate " -m minsize\n" 3156*0Sstevel@tonic-gate " filer out segments smaller than minsize\n" 3157*0Sstevel@tonic-gate " -M maxsize\n" 3158*0Sstevel@tonic-gate " filer out segments larger than maxsize\n" 3159*0Sstevel@tonic-gate " -t thread\n" 3160*0Sstevel@tonic-gate " filter out segments not involving thread\n" 3161*0Sstevel@tonic-gate " -T type\n" 3162*0Sstevel@tonic-gate " filter out segments not of type 'type'\n" 3163*0Sstevel@tonic-gate " type is one of: ALLOC/FREE/SPAN/ROTOR/WALKER\n"); 3164*0Sstevel@tonic-gate } 3165*0Sstevel@tonic-gate 3166*0Sstevel@tonic-gate /*ARGSUSED*/ 3167*0Sstevel@tonic-gate int 3168*0Sstevel@tonic-gate vmem_seg(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 3169*0Sstevel@tonic-gate { 3170*0Sstevel@tonic-gate vmem_seg_t vs; 3171*0Sstevel@tonic-gate pc_t *stk = vs.vs_stack; 3172*0Sstevel@tonic-gate uintptr_t sz; 3173*0Sstevel@tonic-gate uint8_t t; 3174*0Sstevel@tonic-gate const char *type = NULL; 3175*0Sstevel@tonic-gate GElf_Sym sym; 3176*0Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 3177*0Sstevel@tonic-gate int no_debug; 3178*0Sstevel@tonic-gate int i; 3179*0Sstevel@tonic-gate int depth; 3180*0Sstevel@tonic-gate uintptr_t laddr, haddr; 3181*0Sstevel@tonic-gate 3182*0Sstevel@tonic-gate uintptr_t caller = NULL, thread = NULL; 3183*0Sstevel@tonic-gate uintptr_t minsize = 0, maxsize = 0; 3184*0Sstevel@tonic-gate 3185*0Sstevel@tonic-gate hrtime_t earliest = 0, latest = 0; 3186*0Sstevel@tonic-gate 3187*0Sstevel@tonic-gate uint_t size = 0; 3188*0Sstevel@tonic-gate uint_t verbose = 0; 3189*0Sstevel@tonic-gate 3190*0Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 3191*0Sstevel@tonic-gate return (DCMD_USAGE); 3192*0Sstevel@tonic-gate 3193*0Sstevel@tonic-gate if (mdb_getopts(argc, argv, 3194*0Sstevel@tonic-gate 'c', MDB_OPT_UINTPTR, &caller, 3195*0Sstevel@tonic-gate 'e', MDB_OPT_UINT64, &earliest, 3196*0Sstevel@tonic-gate 'l', MDB_OPT_UINT64, &latest, 3197*0Sstevel@tonic-gate 's', MDB_OPT_SETBITS, TRUE, &size, 3198*0Sstevel@tonic-gate 'm', MDB_OPT_UINTPTR, &minsize, 3199*0Sstevel@tonic-gate 'M', MDB_OPT_UINTPTR, &maxsize, 3200*0Sstevel@tonic-gate 't', MDB_OPT_UINTPTR, &thread, 3201*0Sstevel@tonic-gate 'T', MDB_OPT_STR, &type, 3202*0Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &verbose, 3203*0Sstevel@tonic-gate NULL) != argc) 3204*0Sstevel@tonic-gate return (DCMD_USAGE); 3205*0Sstevel@tonic-gate 3206*0Sstevel@tonic-gate if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) { 3207*0Sstevel@tonic-gate if (verbose) { 3208*0Sstevel@tonic-gate mdb_printf("%16s %4s %16s %16s %16s\n" 3209*0Sstevel@tonic-gate "%<u>%16s %4s %16s %16s %16s%</u>\n", 3210*0Sstevel@tonic-gate "ADDR", "TYPE", "START", "END", "SIZE", 3211*0Sstevel@tonic-gate "", "", "THREAD", "TIMESTAMP", ""); 3212*0Sstevel@tonic-gate } else { 3213*0Sstevel@tonic-gate mdb_printf("%?s %4s %?s %?s %s\n", "ADDR", "TYPE", 3214*0Sstevel@tonic-gate "START", size? "SIZE" : "END", "WHO"); 3215*0Sstevel@tonic-gate } 3216*0Sstevel@tonic-gate } 3217*0Sstevel@tonic-gate 3218*0Sstevel@tonic-gate if (mdb_vread(&vs, sizeof (vs), addr) == -1) { 3219*0Sstevel@tonic-gate mdb_warn("couldn't read vmem_seg at %p", addr); 3220*0Sstevel@tonic-gate return (DCMD_ERR); 3221*0Sstevel@tonic-gate } 3222*0Sstevel@tonic-gate 3223*0Sstevel@tonic-gate if (type != NULL) { 3224*0Sstevel@tonic-gate if (strcmp(type, "ALLC") == 0 || strcmp(type, "ALLOC") == 0) 3225*0Sstevel@tonic-gate t = VMEM_ALLOC; 3226*0Sstevel@tonic-gate else if (strcmp(type, "FREE") == 0) 3227*0Sstevel@tonic-gate t = VMEM_FREE; 3228*0Sstevel@tonic-gate else if (strcmp(type, "SPAN") == 0) 3229*0Sstevel@tonic-gate t = VMEM_SPAN; 3230*0Sstevel@tonic-gate else if (strcmp(type, "ROTR") == 0 || 3231*0Sstevel@tonic-gate strcmp(type, "ROTOR") == 0) 3232*0Sstevel@tonic-gate t = VMEM_ROTOR; 3233*0Sstevel@tonic-gate else if (strcmp(type, "WLKR") == 0 || 3234*0Sstevel@tonic-gate strcmp(type, "WALKER") == 0) 3235*0Sstevel@tonic-gate t = VMEM_WALKER; 3236*0Sstevel@tonic-gate else { 3237*0Sstevel@tonic-gate mdb_warn("\"%s\" is not a recognized vmem_seg type\n", 3238*0Sstevel@tonic-gate type); 3239*0Sstevel@tonic-gate return (DCMD_ERR); 3240*0Sstevel@tonic-gate } 3241*0Sstevel@tonic-gate 3242*0Sstevel@tonic-gate if (vs.vs_type != t) 3243*0Sstevel@tonic-gate return (DCMD_OK); 3244*0Sstevel@tonic-gate } 3245*0Sstevel@tonic-gate 3246*0Sstevel@tonic-gate sz = vs.vs_end - vs.vs_start; 3247*0Sstevel@tonic-gate 3248*0Sstevel@tonic-gate if (minsize != 0 && sz < minsize) 3249*0Sstevel@tonic-gate return (DCMD_OK); 3250*0Sstevel@tonic-gate 3251*0Sstevel@tonic-gate if (maxsize != 0 && sz > maxsize) 3252*0Sstevel@tonic-gate return (DCMD_OK); 3253*0Sstevel@tonic-gate 3254*0Sstevel@tonic-gate t = vs.vs_type; 3255*0Sstevel@tonic-gate depth = vs.vs_depth; 3256*0Sstevel@tonic-gate 3257*0Sstevel@tonic-gate /* 3258*0Sstevel@tonic-gate * debug info, when present, is only accurate for VMEM_ALLOC segments 3259*0Sstevel@tonic-gate */ 3260*0Sstevel@tonic-gate no_debug = (t != VMEM_ALLOC) || 3261*0Sstevel@tonic-gate (depth == 0 || depth > VMEM_STACK_DEPTH); 3262*0Sstevel@tonic-gate 3263*0Sstevel@tonic-gate if (no_debug) { 3264*0Sstevel@tonic-gate if (caller != NULL || thread != NULL || earliest != 0 || 3265*0Sstevel@tonic-gate latest != 0) 3266*0Sstevel@tonic-gate return (DCMD_OK); /* not enough info */ 3267*0Sstevel@tonic-gate } else { 3268*0Sstevel@tonic-gate if (caller != NULL) { 3269*0Sstevel@tonic-gate laddr = caller; 3270*0Sstevel@tonic-gate haddr = caller + sizeof (caller); 3271*0Sstevel@tonic-gate 3272*0Sstevel@tonic-gate if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, 3273*0Sstevel@tonic-gate sizeof (c), &sym) != -1 && 3274*0Sstevel@tonic-gate caller == (uintptr_t)sym.st_value) { 3275*0Sstevel@tonic-gate /* 3276*0Sstevel@tonic-gate * We were provided an exact symbol value; any 3277*0Sstevel@tonic-gate * address in the function is valid. 3278*0Sstevel@tonic-gate */ 3279*0Sstevel@tonic-gate laddr = (uintptr_t)sym.st_value; 3280*0Sstevel@tonic-gate haddr = (uintptr_t)sym.st_value + sym.st_size; 3281*0Sstevel@tonic-gate } 3282*0Sstevel@tonic-gate 3283*0Sstevel@tonic-gate for (i = 0; i < depth; i++) 3284*0Sstevel@tonic-gate if (vs.vs_stack[i] >= laddr && 3285*0Sstevel@tonic-gate vs.vs_stack[i] < haddr) 3286*0Sstevel@tonic-gate break; 3287*0Sstevel@tonic-gate 3288*0Sstevel@tonic-gate if (i == depth) 3289*0Sstevel@tonic-gate return (DCMD_OK); 3290*0Sstevel@tonic-gate } 3291*0Sstevel@tonic-gate 3292*0Sstevel@tonic-gate if (thread != NULL && (uintptr_t)vs.vs_thread != thread) 3293*0Sstevel@tonic-gate return (DCMD_OK); 3294*0Sstevel@tonic-gate 3295*0Sstevel@tonic-gate if (earliest != 0 && vs.vs_timestamp < earliest) 3296*0Sstevel@tonic-gate return (DCMD_OK); 3297*0Sstevel@tonic-gate 3298*0Sstevel@tonic-gate if (latest != 0 && vs.vs_timestamp > latest) 3299*0Sstevel@tonic-gate return (DCMD_OK); 3300*0Sstevel@tonic-gate } 3301*0Sstevel@tonic-gate 3302*0Sstevel@tonic-gate type = (t == VMEM_ALLOC ? "ALLC" : 3303*0Sstevel@tonic-gate t == VMEM_FREE ? "FREE" : 3304*0Sstevel@tonic-gate t == VMEM_SPAN ? "SPAN" : 3305*0Sstevel@tonic-gate t == VMEM_ROTOR ? "ROTR" : 3306*0Sstevel@tonic-gate t == VMEM_WALKER ? "WLKR" : 3307*0Sstevel@tonic-gate "????"); 3308*0Sstevel@tonic-gate 3309*0Sstevel@tonic-gate if (flags & DCMD_PIPE_OUT) { 3310*0Sstevel@tonic-gate mdb_printf("%#lr\n", addr); 3311*0Sstevel@tonic-gate return (DCMD_OK); 3312*0Sstevel@tonic-gate } 3313*0Sstevel@tonic-gate 3314*0Sstevel@tonic-gate if (verbose) { 3315*0Sstevel@tonic-gate mdb_printf("%<b>%16p%</b> %4s %16p %16p %16d\n", 3316*0Sstevel@tonic-gate addr, type, vs.vs_start, vs.vs_end, sz); 3317*0Sstevel@tonic-gate 3318*0Sstevel@tonic-gate if (no_debug) 3319*0Sstevel@tonic-gate return (DCMD_OK); 3320*0Sstevel@tonic-gate 3321*0Sstevel@tonic-gate mdb_printf("%16s %4s %16p %16llx\n", 3322*0Sstevel@tonic-gate "", "", vs.vs_thread, vs.vs_timestamp); 3323*0Sstevel@tonic-gate 3324*0Sstevel@tonic-gate mdb_inc_indent(17); 3325*0Sstevel@tonic-gate for (i = 0; i < depth; i++) { 3326*0Sstevel@tonic-gate mdb_printf("%a\n", stk[i]); 3327*0Sstevel@tonic-gate } 3328*0Sstevel@tonic-gate mdb_dec_indent(17); 3329*0Sstevel@tonic-gate mdb_printf("\n"); 3330*0Sstevel@tonic-gate } else { 3331*0Sstevel@tonic-gate mdb_printf("%0?p %4s %0?p %0?p", addr, type, 3332*0Sstevel@tonic-gate vs.vs_start, size? sz : vs.vs_end); 3333*0Sstevel@tonic-gate 3334*0Sstevel@tonic-gate if (no_debug) { 3335*0Sstevel@tonic-gate mdb_printf("\n"); 3336*0Sstevel@tonic-gate return (DCMD_OK); 3337*0Sstevel@tonic-gate } 3338*0Sstevel@tonic-gate 3339*0Sstevel@tonic-gate for (i = 0; i < depth; i++) { 3340*0Sstevel@tonic-gate if (mdb_lookup_by_addr(stk[i], MDB_SYM_FUZZY, 3341*0Sstevel@tonic-gate c, sizeof (c), &sym) == -1) 3342*0Sstevel@tonic-gate continue; 3343*0Sstevel@tonic-gate if (strncmp(c, "vmem_", 5) == 0) 3344*0Sstevel@tonic-gate continue; 3345*0Sstevel@tonic-gate break; 3346*0Sstevel@tonic-gate } 3347*0Sstevel@tonic-gate mdb_printf(" %a\n", stk[i]); 3348*0Sstevel@tonic-gate } 3349*0Sstevel@tonic-gate return (DCMD_OK); 3350*0Sstevel@tonic-gate } 3351*0Sstevel@tonic-gate 3352*0Sstevel@tonic-gate typedef struct kmalog_data { 3353*0Sstevel@tonic-gate uintptr_t kma_addr; 3354*0Sstevel@tonic-gate hrtime_t kma_newest; 3355*0Sstevel@tonic-gate } kmalog_data_t; 3356*0Sstevel@tonic-gate 3357*0Sstevel@tonic-gate /*ARGSUSED*/ 3358*0Sstevel@tonic-gate static int 3359*0Sstevel@tonic-gate showbc(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmalog_data_t *kma) 3360*0Sstevel@tonic-gate { 3361*0Sstevel@tonic-gate char name[KMEM_CACHE_NAMELEN + 1]; 3362*0Sstevel@tonic-gate hrtime_t delta; 3363*0Sstevel@tonic-gate int i, depth; 3364*0Sstevel@tonic-gate size_t bufsize; 3365*0Sstevel@tonic-gate 3366*0Sstevel@tonic-gate if (bcp->bc_timestamp == 0) 3367*0Sstevel@tonic-gate return (WALK_DONE); 3368*0Sstevel@tonic-gate 3369*0Sstevel@tonic-gate if (kma->kma_newest == 0) 3370*0Sstevel@tonic-gate kma->kma_newest = bcp->bc_timestamp; 3371*0Sstevel@tonic-gate 3372*0Sstevel@tonic-gate if (kma->kma_addr) { 3373*0Sstevel@tonic-gate if (mdb_vread(&bufsize, sizeof (bufsize), 3374*0Sstevel@tonic-gate (uintptr_t)&bcp->bc_cache->cache_bufsize) == -1) { 3375*0Sstevel@tonic-gate mdb_warn( 3376*0Sstevel@tonic-gate "failed to read cache_bufsize for cache at %p", 3377*0Sstevel@tonic-gate bcp->bc_cache); 3378*0Sstevel@tonic-gate return (WALK_ERR); 3379*0Sstevel@tonic-gate } 3380*0Sstevel@tonic-gate 3381*0Sstevel@tonic-gate if (kma->kma_addr < (uintptr_t)bcp->bc_addr || 3382*0Sstevel@tonic-gate kma->kma_addr >= (uintptr_t)bcp->bc_addr + bufsize) 3383*0Sstevel@tonic-gate return (WALK_NEXT); 3384*0Sstevel@tonic-gate } 3385*0Sstevel@tonic-gate 3386*0Sstevel@tonic-gate delta = kma->kma_newest - bcp->bc_timestamp; 3387*0Sstevel@tonic-gate depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH); 3388*0Sstevel@tonic-gate 3389*0Sstevel@tonic-gate if (mdb_readstr(name, sizeof (name), (uintptr_t) 3390*0Sstevel@tonic-gate &bcp->bc_cache->cache_name) <= 0) 3391*0Sstevel@tonic-gate (void) mdb_snprintf(name, sizeof (name), "%a", bcp->bc_cache); 3392*0Sstevel@tonic-gate 3393*0Sstevel@tonic-gate mdb_printf("\nT-%lld.%09lld addr=%p %s\n", 3394*0Sstevel@tonic-gate delta / NANOSEC, delta % NANOSEC, bcp->bc_addr, name); 3395*0Sstevel@tonic-gate 3396*0Sstevel@tonic-gate for (i = 0; i < depth; i++) 3397*0Sstevel@tonic-gate mdb_printf("\t %a\n", bcp->bc_stack[i]); 3398*0Sstevel@tonic-gate 3399*0Sstevel@tonic-gate return (WALK_NEXT); 3400*0Sstevel@tonic-gate } 3401*0Sstevel@tonic-gate 3402*0Sstevel@tonic-gate int 3403*0Sstevel@tonic-gate kmalog(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 3404*0Sstevel@tonic-gate { 3405*0Sstevel@tonic-gate const char *logname = "kmem_transaction_log"; 3406*0Sstevel@tonic-gate kmalog_data_t kma; 3407*0Sstevel@tonic-gate 3408*0Sstevel@tonic-gate if (argc > 1) 3409*0Sstevel@tonic-gate return (DCMD_USAGE); 3410*0Sstevel@tonic-gate 3411*0Sstevel@tonic-gate kma.kma_newest = 0; 3412*0Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) 3413*0Sstevel@tonic-gate kma.kma_addr = addr; 3414*0Sstevel@tonic-gate else 3415*0Sstevel@tonic-gate kma.kma_addr = NULL; 3416*0Sstevel@tonic-gate 3417*0Sstevel@tonic-gate if (argc > 0) { 3418*0Sstevel@tonic-gate if (argv->a_type != MDB_TYPE_STRING) 3419*0Sstevel@tonic-gate return (DCMD_USAGE); 3420*0Sstevel@tonic-gate if (strcmp(argv->a_un.a_str, "fail") == 0) 3421*0Sstevel@tonic-gate logname = "kmem_failure_log"; 3422*0Sstevel@tonic-gate else if (strcmp(argv->a_un.a_str, "slab") == 0) 3423*0Sstevel@tonic-gate logname = "kmem_slab_log"; 3424*0Sstevel@tonic-gate else 3425*0Sstevel@tonic-gate return (DCMD_USAGE); 3426*0Sstevel@tonic-gate } 3427*0Sstevel@tonic-gate 3428*0Sstevel@tonic-gate if (mdb_readvar(&addr, logname) == -1) { 3429*0Sstevel@tonic-gate mdb_warn("failed to read %s log header pointer"); 3430*0Sstevel@tonic-gate return (DCMD_ERR); 3431*0Sstevel@tonic-gate } 3432*0Sstevel@tonic-gate 3433*0Sstevel@tonic-gate if (mdb_pwalk("kmem_log", (mdb_walk_cb_t)showbc, &kma, addr) == -1) { 3434*0Sstevel@tonic-gate mdb_warn("failed to walk kmem log"); 3435*0Sstevel@tonic-gate return (DCMD_ERR); 3436*0Sstevel@tonic-gate } 3437*0Sstevel@tonic-gate 3438*0Sstevel@tonic-gate return (DCMD_OK); 3439*0Sstevel@tonic-gate } 3440*0Sstevel@tonic-gate 3441*0Sstevel@tonic-gate /* 3442*0Sstevel@tonic-gate * As the final lure for die-hard crash(1M) users, we provide ::kmausers here. 3443*0Sstevel@tonic-gate * The first piece is a structure which we use to accumulate kmem_cache_t 3444*0Sstevel@tonic-gate * addresses of interest. The kmc_add is used as a callback for the kmem_cache 3445*0Sstevel@tonic-gate * walker; we either add all caches, or ones named explicitly as arguments. 3446*0Sstevel@tonic-gate */ 3447*0Sstevel@tonic-gate 3448*0Sstevel@tonic-gate typedef struct kmclist { 3449*0Sstevel@tonic-gate const char *kmc_name; /* Name to match (or NULL) */ 3450*0Sstevel@tonic-gate uintptr_t *kmc_caches; /* List of kmem_cache_t addrs */ 3451*0Sstevel@tonic-gate int kmc_nelems; /* Num entries in kmc_caches */ 3452*0Sstevel@tonic-gate int kmc_size; /* Size of kmc_caches array */ 3453*0Sstevel@tonic-gate } kmclist_t; 3454*0Sstevel@tonic-gate 3455*0Sstevel@tonic-gate static int 3456*0Sstevel@tonic-gate kmc_add(uintptr_t addr, const kmem_cache_t *cp, kmclist_t *kmc) 3457*0Sstevel@tonic-gate { 3458*0Sstevel@tonic-gate void *p; 3459*0Sstevel@tonic-gate int s; 3460*0Sstevel@tonic-gate 3461*0Sstevel@tonic-gate if (kmc->kmc_name == NULL || 3462*0Sstevel@tonic-gate strcmp(cp->cache_name, kmc->kmc_name) == 0) { 3463*0Sstevel@tonic-gate /* 3464*0Sstevel@tonic-gate * If we have a match, grow our array (if necessary), and then 3465*0Sstevel@tonic-gate * add the virtual address of the matching cache to our list. 3466*0Sstevel@tonic-gate */ 3467*0Sstevel@tonic-gate if (kmc->kmc_nelems >= kmc->kmc_size) { 3468*0Sstevel@tonic-gate s = kmc->kmc_size ? kmc->kmc_size * 2 : 256; 3469*0Sstevel@tonic-gate p = mdb_alloc(sizeof (uintptr_t) * s, UM_SLEEP | UM_GC); 3470*0Sstevel@tonic-gate 3471*0Sstevel@tonic-gate bcopy(kmc->kmc_caches, p, 3472*0Sstevel@tonic-gate sizeof (uintptr_t) * kmc->kmc_size); 3473*0Sstevel@tonic-gate 3474*0Sstevel@tonic-gate kmc->kmc_caches = p; 3475*0Sstevel@tonic-gate kmc->kmc_size = s; 3476*0Sstevel@tonic-gate } 3477*0Sstevel@tonic-gate 3478*0Sstevel@tonic-gate kmc->kmc_caches[kmc->kmc_nelems++] = addr; 3479*0Sstevel@tonic-gate return (kmc->kmc_name ? WALK_DONE : WALK_NEXT); 3480*0Sstevel@tonic-gate } 3481*0Sstevel@tonic-gate 3482*0Sstevel@tonic-gate return (WALK_NEXT); 3483*0Sstevel@tonic-gate } 3484*0Sstevel@tonic-gate 3485*0Sstevel@tonic-gate /* 3486*0Sstevel@tonic-gate * The second piece of ::kmausers is a hash table of allocations. Each 3487*0Sstevel@tonic-gate * allocation owner is identified by its stack trace and data_size. We then 3488*0Sstevel@tonic-gate * track the total bytes of all such allocations, and the number of allocations 3489*0Sstevel@tonic-gate * to report at the end. Once we have a list of caches, we walk through the 3490*0Sstevel@tonic-gate * allocated bufctls of each, and update our hash table accordingly. 3491*0Sstevel@tonic-gate */ 3492*0Sstevel@tonic-gate 3493*0Sstevel@tonic-gate typedef struct kmowner { 3494*0Sstevel@tonic-gate struct kmowner *kmo_head; /* First hash elt in bucket */ 3495*0Sstevel@tonic-gate struct kmowner *kmo_next; /* Next hash elt in chain */ 3496*0Sstevel@tonic-gate size_t kmo_signature; /* Hash table signature */ 3497*0Sstevel@tonic-gate uint_t kmo_num; /* Number of allocations */ 3498*0Sstevel@tonic-gate size_t kmo_data_size; /* Size of each allocation */ 3499*0Sstevel@tonic-gate size_t kmo_total_size; /* Total bytes of allocation */ 3500*0Sstevel@tonic-gate int kmo_depth; /* Depth of stack trace */ 3501*0Sstevel@tonic-gate uintptr_t kmo_stack[KMEM_STACK_DEPTH]; /* Stack trace */ 3502*0Sstevel@tonic-gate } kmowner_t; 3503*0Sstevel@tonic-gate 3504*0Sstevel@tonic-gate typedef struct kmusers { 3505*0Sstevel@tonic-gate uintptr_t kmu_addr; /* address of interest */ 3506*0Sstevel@tonic-gate const kmem_cache_t *kmu_cache; /* Current kmem cache */ 3507*0Sstevel@tonic-gate kmowner_t *kmu_hash; /* Hash table of owners */ 3508*0Sstevel@tonic-gate int kmu_nelems; /* Number of entries in use */ 3509*0Sstevel@tonic-gate int kmu_size; /* Total number of entries */ 3510*0Sstevel@tonic-gate } kmusers_t; 3511*0Sstevel@tonic-gate 3512*0Sstevel@tonic-gate static void 3513*0Sstevel@tonic-gate kmu_add(kmusers_t *kmu, const kmem_bufctl_audit_t *bcp, 3514*0Sstevel@tonic-gate size_t size, size_t data_size) 3515*0Sstevel@tonic-gate { 3516*0Sstevel@tonic-gate int i, depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH); 3517*0Sstevel@tonic-gate size_t bucket, signature = data_size; 3518*0Sstevel@tonic-gate kmowner_t *kmo, *kmoend; 3519*0Sstevel@tonic-gate 3520*0Sstevel@tonic-gate /* 3521*0Sstevel@tonic-gate * If the hash table is full, double its size and rehash everything. 3522*0Sstevel@tonic-gate */ 3523*0Sstevel@tonic-gate if (kmu->kmu_nelems >= kmu->kmu_size) { 3524*0Sstevel@tonic-gate int s = kmu->kmu_size ? kmu->kmu_size * 2 : 1024; 3525*0Sstevel@tonic-gate 3526*0Sstevel@tonic-gate kmo = mdb_alloc(sizeof (kmowner_t) * s, UM_SLEEP | UM_GC); 3527*0Sstevel@tonic-gate bcopy(kmu->kmu_hash, kmo, sizeof (kmowner_t) * kmu->kmu_size); 3528*0Sstevel@tonic-gate kmu->kmu_hash = kmo; 3529*0Sstevel@tonic-gate kmu->kmu_size = s; 3530*0Sstevel@tonic-gate 3531*0Sstevel@tonic-gate kmoend = kmu->kmu_hash + kmu->kmu_size; 3532*0Sstevel@tonic-gate for (kmo = kmu->kmu_hash; kmo < kmoend; kmo++) 3533*0Sstevel@tonic-gate kmo->kmo_head = NULL; 3534*0Sstevel@tonic-gate 3535*0Sstevel@tonic-gate kmoend = kmu->kmu_hash + kmu->kmu_nelems; 3536*0Sstevel@tonic-gate for (kmo = kmu->kmu_hash; kmo < kmoend; kmo++) { 3537*0Sstevel@tonic-gate bucket = kmo->kmo_signature & (kmu->kmu_size - 1); 3538*0Sstevel@tonic-gate kmo->kmo_next = kmu->kmu_hash[bucket].kmo_head; 3539*0Sstevel@tonic-gate kmu->kmu_hash[bucket].kmo_head = kmo; 3540*0Sstevel@tonic-gate } 3541*0Sstevel@tonic-gate } 3542*0Sstevel@tonic-gate 3543*0Sstevel@tonic-gate /* 3544*0Sstevel@tonic-gate * Finish computing the hash signature from the stack trace, and then 3545*0Sstevel@tonic-gate * see if the owner is in the hash table. If so, update our stats. 3546*0Sstevel@tonic-gate */ 3547*0Sstevel@tonic-gate for (i = 0; i < depth; i++) 3548*0Sstevel@tonic-gate signature += bcp->bc_stack[i]; 3549*0Sstevel@tonic-gate 3550*0Sstevel@tonic-gate bucket = signature & (kmu->kmu_size - 1); 3551*0Sstevel@tonic-gate 3552*0Sstevel@tonic-gate for (kmo = kmu->kmu_hash[bucket].kmo_head; kmo; kmo = kmo->kmo_next) { 3553*0Sstevel@tonic-gate if (kmo->kmo_signature == signature) { 3554*0Sstevel@tonic-gate size_t difference = 0; 3555*0Sstevel@tonic-gate 3556*0Sstevel@tonic-gate difference |= kmo->kmo_data_size - data_size; 3557*0Sstevel@tonic-gate difference |= kmo->kmo_depth - depth; 3558*0Sstevel@tonic-gate 3559*0Sstevel@tonic-gate for (i = 0; i < depth; i++) { 3560*0Sstevel@tonic-gate difference |= kmo->kmo_stack[i] - 3561*0Sstevel@tonic-gate bcp->bc_stack[i]; 3562*0Sstevel@tonic-gate } 3563*0Sstevel@tonic-gate 3564*0Sstevel@tonic-gate if (difference == 0) { 3565*0Sstevel@tonic-gate kmo->kmo_total_size += size; 3566*0Sstevel@tonic-gate kmo->kmo_num++; 3567*0Sstevel@tonic-gate return; 3568*0Sstevel@tonic-gate } 3569*0Sstevel@tonic-gate } 3570*0Sstevel@tonic-gate } 3571*0Sstevel@tonic-gate 3572*0Sstevel@tonic-gate /* 3573*0Sstevel@tonic-gate * If the owner is not yet hashed, grab the next element and fill it 3574*0Sstevel@tonic-gate * in based on the allocation information. 3575*0Sstevel@tonic-gate */ 3576*0Sstevel@tonic-gate kmo = &kmu->kmu_hash[kmu->kmu_nelems++]; 3577*0Sstevel@tonic-gate kmo->kmo_next = kmu->kmu_hash[bucket].kmo_head; 3578*0Sstevel@tonic-gate kmu->kmu_hash[bucket].kmo_head = kmo; 3579*0Sstevel@tonic-gate 3580*0Sstevel@tonic-gate kmo->kmo_signature = signature; 3581*0Sstevel@tonic-gate kmo->kmo_num = 1; 3582*0Sstevel@tonic-gate kmo->kmo_data_size = data_size; 3583*0Sstevel@tonic-gate kmo->kmo_total_size = size; 3584*0Sstevel@tonic-gate kmo->kmo_depth = depth; 3585*0Sstevel@tonic-gate 3586*0Sstevel@tonic-gate for (i = 0; i < depth; i++) 3587*0Sstevel@tonic-gate kmo->kmo_stack[i] = bcp->bc_stack[i]; 3588*0Sstevel@tonic-gate } 3589*0Sstevel@tonic-gate 3590*0Sstevel@tonic-gate /* 3591*0Sstevel@tonic-gate * When ::kmausers is invoked without the -f flag, we simply update our hash 3592*0Sstevel@tonic-gate * table with the information from each allocated bufctl. 3593*0Sstevel@tonic-gate */ 3594*0Sstevel@tonic-gate /*ARGSUSED*/ 3595*0Sstevel@tonic-gate static int 3596*0Sstevel@tonic-gate kmause1(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmusers_t *kmu) 3597*0Sstevel@tonic-gate { 3598*0Sstevel@tonic-gate const kmem_cache_t *cp = kmu->kmu_cache; 3599*0Sstevel@tonic-gate 3600*0Sstevel@tonic-gate kmu_add(kmu, bcp, cp->cache_bufsize, cp->cache_bufsize); 3601*0Sstevel@tonic-gate return (WALK_NEXT); 3602*0Sstevel@tonic-gate } 3603*0Sstevel@tonic-gate 3604*0Sstevel@tonic-gate /* 3605*0Sstevel@tonic-gate * When ::kmausers is invoked with the -f flag, we print out the information 3606*0Sstevel@tonic-gate * for each bufctl as well as updating the hash table. 3607*0Sstevel@tonic-gate */ 3608*0Sstevel@tonic-gate static int 3609*0Sstevel@tonic-gate kmause2(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmusers_t *kmu) 3610*0Sstevel@tonic-gate { 3611*0Sstevel@tonic-gate int i, depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH); 3612*0Sstevel@tonic-gate const kmem_cache_t *cp = kmu->kmu_cache; 3613*0Sstevel@tonic-gate kmem_bufctl_t bufctl; 3614*0Sstevel@tonic-gate 3615*0Sstevel@tonic-gate if (kmu->kmu_addr) { 3616*0Sstevel@tonic-gate if (mdb_vread(&bufctl, sizeof (bufctl), addr) == -1) 3617*0Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr); 3618*0Sstevel@tonic-gate else if (kmu->kmu_addr < (uintptr_t)bufctl.bc_addr || 3619*0Sstevel@tonic-gate kmu->kmu_addr >= (uintptr_t)bufctl.bc_addr + 3620*0Sstevel@tonic-gate cp->cache_bufsize) 3621*0Sstevel@tonic-gate return (WALK_NEXT); 3622*0Sstevel@tonic-gate } 3623*0Sstevel@tonic-gate 3624*0Sstevel@tonic-gate mdb_printf("size %d, addr %p, thread %p, cache %s\n", 3625*0Sstevel@tonic-gate cp->cache_bufsize, addr, bcp->bc_thread, cp->cache_name); 3626*0Sstevel@tonic-gate 3627*0Sstevel@tonic-gate for (i = 0; i < depth; i++) 3628*0Sstevel@tonic-gate mdb_printf("\t %a\n", bcp->bc_stack[i]); 3629*0Sstevel@tonic-gate 3630*0Sstevel@tonic-gate kmu_add(kmu, bcp, cp->cache_bufsize, cp->cache_bufsize); 3631*0Sstevel@tonic-gate return (WALK_NEXT); 3632*0Sstevel@tonic-gate } 3633*0Sstevel@tonic-gate 3634*0Sstevel@tonic-gate /* 3635*0Sstevel@tonic-gate * We sort our results by allocation size before printing them. 3636*0Sstevel@tonic-gate */ 3637*0Sstevel@tonic-gate static int 3638*0Sstevel@tonic-gate kmownercmp(const void *lp, const void *rp) 3639*0Sstevel@tonic-gate { 3640*0Sstevel@tonic-gate const kmowner_t *lhs = lp; 3641*0Sstevel@tonic-gate const kmowner_t *rhs = rp; 3642*0Sstevel@tonic-gate 3643*0Sstevel@tonic-gate return (rhs->kmo_total_size - lhs->kmo_total_size); 3644*0Sstevel@tonic-gate } 3645*0Sstevel@tonic-gate 3646*0Sstevel@tonic-gate /* 3647*0Sstevel@tonic-gate * The main engine of ::kmausers is relatively straightforward: First we 3648*0Sstevel@tonic-gate * accumulate our list of kmem_cache_t addresses into the kmclist_t. Next we 3649*0Sstevel@tonic-gate * iterate over the allocated bufctls of each cache in the list. Finally, 3650*0Sstevel@tonic-gate * we sort and print our results. 3651*0Sstevel@tonic-gate */ 3652*0Sstevel@tonic-gate /*ARGSUSED*/ 3653*0Sstevel@tonic-gate int 3654*0Sstevel@tonic-gate kmausers(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 3655*0Sstevel@tonic-gate { 3656*0Sstevel@tonic-gate int mem_threshold = 8192; /* Minimum # bytes for printing */ 3657*0Sstevel@tonic-gate int cnt_threshold = 100; /* Minimum # blocks for printing */ 3658*0Sstevel@tonic-gate int audited_caches = 0; /* Number of KMF_AUDIT caches found */ 3659*0Sstevel@tonic-gate int do_all_caches = 1; /* Do all caches (no arguments) */ 3660*0Sstevel@tonic-gate int opt_e = FALSE; /* Include "small" users */ 3661*0Sstevel@tonic-gate int opt_f = FALSE; /* Print stack traces */ 3662*0Sstevel@tonic-gate 3663*0Sstevel@tonic-gate mdb_walk_cb_t callback = (mdb_walk_cb_t)kmause1; 3664*0Sstevel@tonic-gate kmowner_t *kmo, *kmoend; 3665*0Sstevel@tonic-gate int i, oelems; 3666*0Sstevel@tonic-gate 3667*0Sstevel@tonic-gate kmclist_t kmc; 3668*0Sstevel@tonic-gate kmusers_t kmu; 3669*0Sstevel@tonic-gate 3670*0Sstevel@tonic-gate bzero(&kmc, sizeof (kmc)); 3671*0Sstevel@tonic-gate bzero(&kmu, sizeof (kmu)); 3672*0Sstevel@tonic-gate 3673*0Sstevel@tonic-gate while ((i = mdb_getopts(argc, argv, 3674*0Sstevel@tonic-gate 'e', MDB_OPT_SETBITS, TRUE, &opt_e, 3675*0Sstevel@tonic-gate 'f', MDB_OPT_SETBITS, TRUE, &opt_f, NULL)) != argc) { 3676*0Sstevel@tonic-gate 3677*0Sstevel@tonic-gate argv += i; /* skip past options we just processed */ 3678*0Sstevel@tonic-gate argc -= i; /* adjust argc */ 3679*0Sstevel@tonic-gate 3680*0Sstevel@tonic-gate if (argv->a_type != MDB_TYPE_STRING || *argv->a_un.a_str == '-') 3681*0Sstevel@tonic-gate return (DCMD_USAGE); 3682*0Sstevel@tonic-gate 3683*0Sstevel@tonic-gate oelems = kmc.kmc_nelems; 3684*0Sstevel@tonic-gate kmc.kmc_name = argv->a_un.a_str; 3685*0Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmc_add, &kmc); 3686*0Sstevel@tonic-gate 3687*0Sstevel@tonic-gate if (kmc.kmc_nelems == oelems) { 3688*0Sstevel@tonic-gate mdb_warn("unknown kmem cache: %s\n", kmc.kmc_name); 3689*0Sstevel@tonic-gate return (DCMD_ERR); 3690*0Sstevel@tonic-gate } 3691*0Sstevel@tonic-gate 3692*0Sstevel@tonic-gate do_all_caches = 0; 3693*0Sstevel@tonic-gate argv++; 3694*0Sstevel@tonic-gate argc--; 3695*0Sstevel@tonic-gate } 3696*0Sstevel@tonic-gate 3697*0Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) { 3698*0Sstevel@tonic-gate opt_f = TRUE; 3699*0Sstevel@tonic-gate kmu.kmu_addr = addr; 3700*0Sstevel@tonic-gate } else { 3701*0Sstevel@tonic-gate kmu.kmu_addr = NULL; 3702*0Sstevel@tonic-gate } 3703*0Sstevel@tonic-gate 3704*0Sstevel@tonic-gate if (opt_e) 3705*0Sstevel@tonic-gate mem_threshold = cnt_threshold = 0; 3706*0Sstevel@tonic-gate 3707*0Sstevel@tonic-gate if (opt_f) 3708*0Sstevel@tonic-gate callback = (mdb_walk_cb_t)kmause2; 3709*0Sstevel@tonic-gate 3710*0Sstevel@tonic-gate if (do_all_caches) { 3711*0Sstevel@tonic-gate kmc.kmc_name = NULL; /* match all cache names */ 3712*0Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmc_add, &kmc); 3713*0Sstevel@tonic-gate } 3714*0Sstevel@tonic-gate 3715*0Sstevel@tonic-gate for (i = 0; i < kmc.kmc_nelems; i++) { 3716*0Sstevel@tonic-gate uintptr_t cp = kmc.kmc_caches[i]; 3717*0Sstevel@tonic-gate kmem_cache_t c; 3718*0Sstevel@tonic-gate 3719*0Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), cp) == -1) { 3720*0Sstevel@tonic-gate mdb_warn("failed to read cache at %p", cp); 3721*0Sstevel@tonic-gate continue; 3722*0Sstevel@tonic-gate } 3723*0Sstevel@tonic-gate 3724*0Sstevel@tonic-gate if (!(c.cache_flags & KMF_AUDIT)) { 3725*0Sstevel@tonic-gate if (!do_all_caches) { 3726*0Sstevel@tonic-gate mdb_warn("KMF_AUDIT is not enabled for %s\n", 3727*0Sstevel@tonic-gate c.cache_name); 3728*0Sstevel@tonic-gate } 3729*0Sstevel@tonic-gate continue; 3730*0Sstevel@tonic-gate } 3731*0Sstevel@tonic-gate 3732*0Sstevel@tonic-gate kmu.kmu_cache = &c; 3733*0Sstevel@tonic-gate (void) mdb_pwalk("bufctl", callback, &kmu, cp); 3734*0Sstevel@tonic-gate audited_caches++; 3735*0Sstevel@tonic-gate } 3736*0Sstevel@tonic-gate 3737*0Sstevel@tonic-gate if (audited_caches == 0 && do_all_caches) { 3738*0Sstevel@tonic-gate mdb_warn("KMF_AUDIT is not enabled for any caches\n"); 3739*0Sstevel@tonic-gate return (DCMD_ERR); 3740*0Sstevel@tonic-gate } 3741*0Sstevel@tonic-gate 3742*0Sstevel@tonic-gate qsort(kmu.kmu_hash, kmu.kmu_nelems, sizeof (kmowner_t), kmownercmp); 3743*0Sstevel@tonic-gate kmoend = kmu.kmu_hash + kmu.kmu_nelems; 3744*0Sstevel@tonic-gate 3745*0Sstevel@tonic-gate for (kmo = kmu.kmu_hash; kmo < kmoend; kmo++) { 3746*0Sstevel@tonic-gate if (kmo->kmo_total_size < mem_threshold && 3747*0Sstevel@tonic-gate kmo->kmo_num < cnt_threshold) 3748*0Sstevel@tonic-gate continue; 3749*0Sstevel@tonic-gate mdb_printf("%lu bytes for %u allocations with data size %lu:\n", 3750*0Sstevel@tonic-gate kmo->kmo_total_size, kmo->kmo_num, kmo->kmo_data_size); 3751*0Sstevel@tonic-gate for (i = 0; i < kmo->kmo_depth; i++) 3752*0Sstevel@tonic-gate mdb_printf("\t %a\n", kmo->kmo_stack[i]); 3753*0Sstevel@tonic-gate } 3754*0Sstevel@tonic-gate 3755*0Sstevel@tonic-gate return (DCMD_OK); 3756*0Sstevel@tonic-gate } 3757*0Sstevel@tonic-gate 3758*0Sstevel@tonic-gate void 3759*0Sstevel@tonic-gate kmausers_help(void) 3760*0Sstevel@tonic-gate { 3761*0Sstevel@tonic-gate mdb_printf( 3762*0Sstevel@tonic-gate "Displays the largest users of the kmem allocator, sorted by \n" 3763*0Sstevel@tonic-gate "trace. If one or more caches is specified, only those caches\n" 3764*0Sstevel@tonic-gate "will be searched. By default, all caches are searched. If an\n" 3765*0Sstevel@tonic-gate "address is specified, then only those allocations which include\n" 3766*0Sstevel@tonic-gate "the given address are displayed. Specifying an address implies\n" 3767*0Sstevel@tonic-gate "-f.\n" 3768*0Sstevel@tonic-gate "\n" 3769*0Sstevel@tonic-gate "\t-e\tInclude all users, not just the largest\n" 3770*0Sstevel@tonic-gate "\t-f\tDisplay individual allocations. By default, users are\n" 3771*0Sstevel@tonic-gate "\t\tgrouped by stack\n"); 3772*0Sstevel@tonic-gate } 3773*0Sstevel@tonic-gate 3774*0Sstevel@tonic-gate static int 3775*0Sstevel@tonic-gate kmem_ready_check(void) 3776*0Sstevel@tonic-gate { 3777*0Sstevel@tonic-gate int ready; 3778*0Sstevel@tonic-gate 3779*0Sstevel@tonic-gate if (mdb_readvar(&ready, "kmem_ready") < 0) 3780*0Sstevel@tonic-gate return (-1); /* errno is set for us */ 3781*0Sstevel@tonic-gate 3782*0Sstevel@tonic-gate return (ready); 3783*0Sstevel@tonic-gate } 3784*0Sstevel@tonic-gate 3785*0Sstevel@tonic-gate /*ARGSUSED*/ 3786*0Sstevel@tonic-gate static void 3787*0Sstevel@tonic-gate kmem_ready_cb(void *arg) 3788*0Sstevel@tonic-gate { 3789*0Sstevel@tonic-gate if (kmem_ready_check() <= 0) 3790*0Sstevel@tonic-gate return; 3791*0Sstevel@tonic-gate 3792*0Sstevel@tonic-gate if (kmem_ready_cbhdl != NULL) { 3793*0Sstevel@tonic-gate mdb_callback_remove(kmem_ready_cbhdl); 3794*0Sstevel@tonic-gate kmem_ready_cbhdl = NULL; 3795*0Sstevel@tonic-gate } 3796*0Sstevel@tonic-gate 3797*0Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmem_init_walkers, NULL); 3798*0Sstevel@tonic-gate } 3799*0Sstevel@tonic-gate 3800*0Sstevel@tonic-gate void 3801*0Sstevel@tonic-gate kmem_init(void) 3802*0Sstevel@tonic-gate { 3803*0Sstevel@tonic-gate mdb_walker_t w = { 3804*0Sstevel@tonic-gate "kmem_cache", "walk list of kmem caches", kmem_cache_walk_init, 3805*0Sstevel@tonic-gate kmem_cache_walk_step, kmem_cache_walk_fini 3806*0Sstevel@tonic-gate }; 3807*0Sstevel@tonic-gate 3808*0Sstevel@tonic-gate /* 3809*0Sstevel@tonic-gate * If kmem is ready, we'll need to invoke the kmem_cache walker 3810*0Sstevel@tonic-gate * immediately. Walkers in the linkage structure won't be ready until 3811*0Sstevel@tonic-gate * _mdb_init returns, so we'll need to add this one manually. If kmem 3812*0Sstevel@tonic-gate * is ready, we'll use the walker to initialize the caches. If kmem 3813*0Sstevel@tonic-gate * isn't ready, we'll register a callback that will allow us to defer 3814*0Sstevel@tonic-gate * cache walking until it is. 3815*0Sstevel@tonic-gate */ 3816*0Sstevel@tonic-gate if (mdb_add_walker(&w) != 0) { 3817*0Sstevel@tonic-gate mdb_warn("failed to add kmem_cache walker"); 3818*0Sstevel@tonic-gate return; 3819*0Sstevel@tonic-gate } 3820*0Sstevel@tonic-gate 3821*0Sstevel@tonic-gate if (kmem_ready_check() > 0) { 3822*0Sstevel@tonic-gate kmem_ready_cb(NULL); 3823*0Sstevel@tonic-gate } else { 3824*0Sstevel@tonic-gate kmem_ready_cbhdl = mdb_callback_add(MDB_CALLBACK_STCHG, 3825*0Sstevel@tonic-gate kmem_ready_cb, NULL); 3826*0Sstevel@tonic-gate } 3827*0Sstevel@tonic-gate } 3828*0Sstevel@tonic-gate 3829*0Sstevel@tonic-gate typedef struct whatthread { 3830*0Sstevel@tonic-gate uintptr_t wt_target; 3831*0Sstevel@tonic-gate int wt_verbose; 3832*0Sstevel@tonic-gate } whatthread_t; 3833*0Sstevel@tonic-gate 3834*0Sstevel@tonic-gate static int 3835*0Sstevel@tonic-gate whatthread_walk_thread(uintptr_t addr, const kthread_t *t, whatthread_t *w) 3836*0Sstevel@tonic-gate { 3837*0Sstevel@tonic-gate uintptr_t current, data; 3838*0Sstevel@tonic-gate 3839*0Sstevel@tonic-gate if (t->t_stkbase == NULL) 3840*0Sstevel@tonic-gate return (WALK_NEXT); 3841*0Sstevel@tonic-gate 3842*0Sstevel@tonic-gate /* 3843*0Sstevel@tonic-gate * Warn about swapped out threads, but drive on anyway 3844*0Sstevel@tonic-gate */ 3845*0Sstevel@tonic-gate if (!(t->t_schedflag & TS_LOAD)) { 3846*0Sstevel@tonic-gate mdb_warn("thread %p's stack swapped out\n", addr); 3847*0Sstevel@tonic-gate return (WALK_NEXT); 3848*0Sstevel@tonic-gate } 3849*0Sstevel@tonic-gate 3850*0Sstevel@tonic-gate /* 3851*0Sstevel@tonic-gate * Search the thread's stack for the given pointer. Note that it would 3852*0Sstevel@tonic-gate * be more efficient to follow ::kgrep's lead and read in page-sized 3853*0Sstevel@tonic-gate * chunks, but this routine is already fast and simple. 3854*0Sstevel@tonic-gate */ 3855*0Sstevel@tonic-gate for (current = (uintptr_t)t->t_stkbase; current < (uintptr_t)t->t_stk; 3856*0Sstevel@tonic-gate current += sizeof (uintptr_t)) { 3857*0Sstevel@tonic-gate if (mdb_vread(&data, sizeof (data), current) == -1) { 3858*0Sstevel@tonic-gate mdb_warn("couldn't read thread %p's stack at %p", 3859*0Sstevel@tonic-gate addr, current); 3860*0Sstevel@tonic-gate return (WALK_ERR); 3861*0Sstevel@tonic-gate } 3862*0Sstevel@tonic-gate 3863*0Sstevel@tonic-gate if (data == w->wt_target) { 3864*0Sstevel@tonic-gate if (w->wt_verbose) { 3865*0Sstevel@tonic-gate mdb_printf("%p in thread %p's stack%s\n", 3866*0Sstevel@tonic-gate current, addr, stack_active(t, current)); 3867*0Sstevel@tonic-gate } else { 3868*0Sstevel@tonic-gate mdb_printf("%#lr\n", addr); 3869*0Sstevel@tonic-gate return (WALK_NEXT); 3870*0Sstevel@tonic-gate } 3871*0Sstevel@tonic-gate } 3872*0Sstevel@tonic-gate } 3873*0Sstevel@tonic-gate 3874*0Sstevel@tonic-gate return (WALK_NEXT); 3875*0Sstevel@tonic-gate } 3876*0Sstevel@tonic-gate 3877*0Sstevel@tonic-gate int 3878*0Sstevel@tonic-gate whatthread(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 3879*0Sstevel@tonic-gate { 3880*0Sstevel@tonic-gate whatthread_t w; 3881*0Sstevel@tonic-gate 3882*0Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 3883*0Sstevel@tonic-gate return (DCMD_USAGE); 3884*0Sstevel@tonic-gate 3885*0Sstevel@tonic-gate w.wt_verbose = FALSE; 3886*0Sstevel@tonic-gate w.wt_target = addr; 3887*0Sstevel@tonic-gate 3888*0Sstevel@tonic-gate if (mdb_getopts(argc, argv, 3889*0Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &w.wt_verbose, NULL) != argc) 3890*0Sstevel@tonic-gate return (DCMD_USAGE); 3891*0Sstevel@tonic-gate 3892*0Sstevel@tonic-gate if (mdb_walk("thread", (mdb_walk_cb_t)whatthread_walk_thread, &w) 3893*0Sstevel@tonic-gate == -1) { 3894*0Sstevel@tonic-gate mdb_warn("couldn't walk threads"); 3895*0Sstevel@tonic-gate return (DCMD_ERR); 3896*0Sstevel@tonic-gate } 3897*0Sstevel@tonic-gate 3898*0Sstevel@tonic-gate return (DCMD_OK); 3899*0Sstevel@tonic-gate } 3900