10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51528Sjwadams * Common Development and Distribution License (the "License"). 61528Sjwadams * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 228721SJonathan.Adams@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #include <mdb/mdb_param.h> 270Sstevel@tonic-gate #include <mdb/mdb_modapi.h> 280Sstevel@tonic-gate #include <mdb/mdb_ctf.h> 290Sstevel@tonic-gate #include <sys/cpuvar.h> 300Sstevel@tonic-gate #include <sys/kmem_impl.h> 310Sstevel@tonic-gate #include <sys/vmem_impl.h> 320Sstevel@tonic-gate #include <sys/machelf.h> 330Sstevel@tonic-gate #include <sys/modctl.h> 340Sstevel@tonic-gate #include <sys/kobj.h> 350Sstevel@tonic-gate #include <sys/panic.h> 360Sstevel@tonic-gate #include <sys/stack.h> 370Sstevel@tonic-gate #include <sys/sysmacros.h> 380Sstevel@tonic-gate #include <vm/page.h> 390Sstevel@tonic-gate 406712Stomee #include "avl.h" 416712Stomee #include "combined.h" 424798Stomee #include "dist.h" 430Sstevel@tonic-gate #include "kmem.h" 446712Stomee #include "list.h" 450Sstevel@tonic-gate 460Sstevel@tonic-gate #define dprintf(x) if (mdb_debug_level) { \ 470Sstevel@tonic-gate mdb_printf("kmem debug: "); \ 480Sstevel@tonic-gate /*CSTYLED*/\ 490Sstevel@tonic-gate mdb_printf x ;\ 500Sstevel@tonic-gate } 510Sstevel@tonic-gate 520Sstevel@tonic-gate #define KM_ALLOCATED 0x01 530Sstevel@tonic-gate #define KM_FREE 0x02 540Sstevel@tonic-gate #define KM_BUFCTL 0x04 550Sstevel@tonic-gate #define KM_CONSTRUCTED 0x08 /* only constructed free buffers */ 560Sstevel@tonic-gate #define KM_HASH 0x10 570Sstevel@tonic-gate 580Sstevel@tonic-gate static int mdb_debug_level = 0; 590Sstevel@tonic-gate 600Sstevel@tonic-gate /*ARGSUSED*/ 610Sstevel@tonic-gate static int 620Sstevel@tonic-gate kmem_init_walkers(uintptr_t addr, const kmem_cache_t *c, void *ignored) 630Sstevel@tonic-gate { 640Sstevel@tonic-gate mdb_walker_t w; 650Sstevel@tonic-gate char descr[64]; 660Sstevel@tonic-gate 670Sstevel@tonic-gate (void) mdb_snprintf(descr, sizeof (descr), 680Sstevel@tonic-gate "walk the %s cache", c->cache_name); 690Sstevel@tonic-gate 700Sstevel@tonic-gate w.walk_name = c->cache_name; 710Sstevel@tonic-gate w.walk_descr = descr; 720Sstevel@tonic-gate w.walk_init = kmem_walk_init; 730Sstevel@tonic-gate w.walk_step = kmem_walk_step; 740Sstevel@tonic-gate w.walk_fini = kmem_walk_fini; 750Sstevel@tonic-gate w.walk_init_arg = (void *)addr; 760Sstevel@tonic-gate 770Sstevel@tonic-gate if (mdb_add_walker(&w) == -1) 780Sstevel@tonic-gate mdb_warn("failed to add %s walker", c->cache_name); 790Sstevel@tonic-gate 800Sstevel@tonic-gate return (WALK_NEXT); 810Sstevel@tonic-gate } 820Sstevel@tonic-gate 830Sstevel@tonic-gate /*ARGSUSED*/ 840Sstevel@tonic-gate int 850Sstevel@tonic-gate kmem_debug(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 860Sstevel@tonic-gate { 870Sstevel@tonic-gate mdb_debug_level ^= 1; 880Sstevel@tonic-gate 890Sstevel@tonic-gate mdb_printf("kmem: debugging is now %s\n", 900Sstevel@tonic-gate mdb_debug_level ? "on" : "off"); 910Sstevel@tonic-gate 920Sstevel@tonic-gate return (DCMD_OK); 930Sstevel@tonic-gate } 940Sstevel@tonic-gate 950Sstevel@tonic-gate int 960Sstevel@tonic-gate kmem_cache_walk_init(mdb_walk_state_t *wsp) 970Sstevel@tonic-gate { 980Sstevel@tonic-gate GElf_Sym sym; 990Sstevel@tonic-gate 1006712Stomee if (mdb_lookup_by_name("kmem_caches", &sym) == -1) { 1016712Stomee mdb_warn("couldn't find kmem_caches"); 1020Sstevel@tonic-gate return (WALK_ERR); 1030Sstevel@tonic-gate } 1040Sstevel@tonic-gate 1056712Stomee wsp->walk_addr = (uintptr_t)sym.st_value; 1066712Stomee 1076712Stomee return (list_walk_init_named(wsp, "cache list", "cache")); 1080Sstevel@tonic-gate } 1090Sstevel@tonic-gate 1100Sstevel@tonic-gate int 1110Sstevel@tonic-gate kmem_cpu_cache_walk_init(mdb_walk_state_t *wsp) 1120Sstevel@tonic-gate { 1130Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 1140Sstevel@tonic-gate mdb_warn("kmem_cpu_cache doesn't support global walks"); 1150Sstevel@tonic-gate return (WALK_ERR); 1160Sstevel@tonic-gate } 1170Sstevel@tonic-gate 1180Sstevel@tonic-gate if (mdb_layered_walk("cpu", wsp) == -1) { 1190Sstevel@tonic-gate mdb_warn("couldn't walk 'cpu'"); 1200Sstevel@tonic-gate return (WALK_ERR); 1210Sstevel@tonic-gate } 1220Sstevel@tonic-gate 1230Sstevel@tonic-gate wsp->walk_data = (void *)wsp->walk_addr; 1240Sstevel@tonic-gate 1250Sstevel@tonic-gate return (WALK_NEXT); 1260Sstevel@tonic-gate } 1270Sstevel@tonic-gate 1280Sstevel@tonic-gate int 1290Sstevel@tonic-gate kmem_cpu_cache_walk_step(mdb_walk_state_t *wsp) 1300Sstevel@tonic-gate { 1310Sstevel@tonic-gate uintptr_t caddr = (uintptr_t)wsp->walk_data; 1320Sstevel@tonic-gate const cpu_t *cpu = wsp->walk_layer; 1330Sstevel@tonic-gate kmem_cpu_cache_t cc; 1340Sstevel@tonic-gate 135*9019SMichael.Corcoran@Sun.COM caddr += OFFSETOF(kmem_cache_t, cache_cpu[cpu->cpu_seqid]); 1360Sstevel@tonic-gate 1370Sstevel@tonic-gate if (mdb_vread(&cc, sizeof (kmem_cpu_cache_t), caddr) == -1) { 1380Sstevel@tonic-gate mdb_warn("couldn't read kmem_cpu_cache at %p", caddr); 1390Sstevel@tonic-gate return (WALK_ERR); 1400Sstevel@tonic-gate } 1410Sstevel@tonic-gate 1420Sstevel@tonic-gate return (wsp->walk_callback(caddr, &cc, wsp->walk_cbdata)); 1430Sstevel@tonic-gate } 1440Sstevel@tonic-gate 1456712Stomee static int 1466712Stomee kmem_slab_check(void *p, uintptr_t saddr, void *arg) 1476712Stomee { 1486712Stomee kmem_slab_t *sp = p; 1496712Stomee uintptr_t caddr = (uintptr_t)arg; 1506712Stomee if ((uintptr_t)sp->slab_cache != caddr) { 1516712Stomee mdb_warn("slab %p isn't in cache %p (in cache %p)\n", 1526712Stomee saddr, caddr, sp->slab_cache); 1536712Stomee return (-1); 1546712Stomee } 1556712Stomee 1566712Stomee return (0); 1576712Stomee } 1586712Stomee 1596712Stomee static int 1606712Stomee kmem_partial_slab_check(void *p, uintptr_t saddr, void *arg) 1616712Stomee { 1626712Stomee kmem_slab_t *sp = p; 1636712Stomee 1646712Stomee int rc = kmem_slab_check(p, saddr, arg); 1656712Stomee if (rc != 0) { 1666712Stomee return (rc); 1676712Stomee } 1686712Stomee 1696712Stomee if (!KMEM_SLAB_IS_PARTIAL(sp)) { 1706712Stomee mdb_warn("slab %p is not a partial slab\n", saddr); 1716712Stomee return (-1); 1726712Stomee } 1736712Stomee 1746712Stomee return (0); 1756712Stomee } 1766712Stomee 1776712Stomee static int 1786712Stomee kmem_complete_slab_check(void *p, uintptr_t saddr, void *arg) 1796712Stomee { 1806712Stomee kmem_slab_t *sp = p; 1816712Stomee 1826712Stomee int rc = kmem_slab_check(p, saddr, arg); 1836712Stomee if (rc != 0) { 1846712Stomee return (rc); 1856712Stomee } 1866712Stomee 1876712Stomee if (!KMEM_SLAB_IS_ALL_USED(sp)) { 1886712Stomee mdb_warn("slab %p is not completely allocated\n", saddr); 1896712Stomee return (-1); 1906712Stomee } 1916712Stomee 1926712Stomee return (0); 1936712Stomee } 1946712Stomee 1956712Stomee typedef struct { 1966712Stomee uintptr_t kns_cache_addr; 1976712Stomee int kns_nslabs; 1986712Stomee } kmem_nth_slab_t; 1996712Stomee 2006712Stomee static int 2016712Stomee kmem_nth_slab_check(void *p, uintptr_t saddr, void *arg) 2026712Stomee { 2036712Stomee kmem_nth_slab_t *chkp = arg; 2046712Stomee 2056712Stomee int rc = kmem_slab_check(p, saddr, (void *)chkp->kns_cache_addr); 2066712Stomee if (rc != 0) { 2076712Stomee return (rc); 2086712Stomee } 2096712Stomee 2106712Stomee return (chkp->kns_nslabs-- == 0 ? 1 : 0); 2116712Stomee } 2126712Stomee 2136712Stomee static int 2146712Stomee kmem_complete_slab_walk_init(mdb_walk_state_t *wsp) 2156712Stomee { 2166712Stomee uintptr_t caddr = wsp->walk_addr; 2176712Stomee 2186712Stomee wsp->walk_addr = (uintptr_t)(caddr + 2196712Stomee offsetof(kmem_cache_t, cache_complete_slabs)); 2206712Stomee 2216712Stomee return (list_walk_init_checked(wsp, "slab list", "slab", 2226712Stomee kmem_complete_slab_check, (void *)caddr)); 2236712Stomee } 2246712Stomee 2256712Stomee static int 2266712Stomee kmem_partial_slab_walk_init(mdb_walk_state_t *wsp) 2276712Stomee { 2286712Stomee uintptr_t caddr = wsp->walk_addr; 2296712Stomee 2306712Stomee wsp->walk_addr = (uintptr_t)(caddr + 2316712Stomee offsetof(kmem_cache_t, cache_partial_slabs)); 2326712Stomee 2336712Stomee return (avl_walk_init_checked(wsp, "slab list", "slab", 2346712Stomee kmem_partial_slab_check, (void *)caddr)); 2356712Stomee } 2366712Stomee 2370Sstevel@tonic-gate int 2380Sstevel@tonic-gate kmem_slab_walk_init(mdb_walk_state_t *wsp) 2390Sstevel@tonic-gate { 2400Sstevel@tonic-gate uintptr_t caddr = wsp->walk_addr; 2410Sstevel@tonic-gate 2420Sstevel@tonic-gate if (caddr == NULL) { 2430Sstevel@tonic-gate mdb_warn("kmem_slab doesn't support global walks\n"); 2440Sstevel@tonic-gate return (WALK_ERR); 2450Sstevel@tonic-gate } 2460Sstevel@tonic-gate 2476712Stomee combined_walk_init(wsp); 2486712Stomee combined_walk_add(wsp, 2496712Stomee kmem_complete_slab_walk_init, list_walk_step, list_walk_fini); 2506712Stomee combined_walk_add(wsp, 2516712Stomee kmem_partial_slab_walk_init, avl_walk_step, avl_walk_fini); 2520Sstevel@tonic-gate 2530Sstevel@tonic-gate return (WALK_NEXT); 2540Sstevel@tonic-gate } 2550Sstevel@tonic-gate 2566712Stomee static int 2576712Stomee kmem_first_complete_slab_walk_init(mdb_walk_state_t *wsp) 2586712Stomee { 2596712Stomee uintptr_t caddr = wsp->walk_addr; 2606712Stomee kmem_nth_slab_t *chk; 2616712Stomee 2626712Stomee chk = mdb_alloc(sizeof (kmem_nth_slab_t), 2636712Stomee UM_SLEEP | UM_GC); 2646712Stomee chk->kns_cache_addr = caddr; 2656712Stomee chk->kns_nslabs = 1; 2666712Stomee wsp->walk_addr = (uintptr_t)(caddr + 2676712Stomee offsetof(kmem_cache_t, cache_complete_slabs)); 2686712Stomee 2696712Stomee return (list_walk_init_checked(wsp, "slab list", "slab", 2706712Stomee kmem_nth_slab_check, chk)); 2716712Stomee } 2726712Stomee 2730Sstevel@tonic-gate int 2740Sstevel@tonic-gate kmem_slab_walk_partial_init(mdb_walk_state_t *wsp) 2750Sstevel@tonic-gate { 2760Sstevel@tonic-gate uintptr_t caddr = wsp->walk_addr; 2770Sstevel@tonic-gate kmem_cache_t c; 2780Sstevel@tonic-gate 2790Sstevel@tonic-gate if (caddr == NULL) { 2800Sstevel@tonic-gate mdb_warn("kmem_slab_partial doesn't support global walks\n"); 2810Sstevel@tonic-gate return (WALK_ERR); 2820Sstevel@tonic-gate } 2830Sstevel@tonic-gate 2840Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), caddr) == -1) { 2850Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache at %p", caddr); 2860Sstevel@tonic-gate return (WALK_ERR); 2870Sstevel@tonic-gate } 2880Sstevel@tonic-gate 2896712Stomee combined_walk_init(wsp); 2900Sstevel@tonic-gate 2910Sstevel@tonic-gate /* 2920Sstevel@tonic-gate * Some consumers (umem_walk_step(), in particular) require at 2930Sstevel@tonic-gate * least one callback if there are any buffers in the cache. So 2946712Stomee * if there are *no* partial slabs, report the first full slab, if 2950Sstevel@tonic-gate * any. 2960Sstevel@tonic-gate * 2970Sstevel@tonic-gate * Yes, this is ugly, but it's cleaner than the other possibilities. 2980Sstevel@tonic-gate */ 2996712Stomee if (c.cache_partial_slabs.avl_numnodes == 0) { 3006712Stomee combined_walk_add(wsp, kmem_first_complete_slab_walk_init, 3016712Stomee list_walk_step, list_walk_fini); 3026712Stomee } else { 3036712Stomee combined_walk_add(wsp, kmem_partial_slab_walk_init, 3046712Stomee avl_walk_step, avl_walk_fini); 3056712Stomee } 3060Sstevel@tonic-gate 3070Sstevel@tonic-gate return (WALK_NEXT); 3080Sstevel@tonic-gate } 3090Sstevel@tonic-gate 3100Sstevel@tonic-gate int 3110Sstevel@tonic-gate kmem_cache(uintptr_t addr, uint_t flags, int ac, const mdb_arg_t *argv) 3120Sstevel@tonic-gate { 3130Sstevel@tonic-gate kmem_cache_t c; 3146712Stomee const char *filter = NULL; 3156712Stomee 3166712Stomee if (mdb_getopts(ac, argv, 3176712Stomee 'n', MDB_OPT_STR, &filter, 3186712Stomee NULL) != ac) { 3196712Stomee return (DCMD_USAGE); 3206712Stomee } 3210Sstevel@tonic-gate 3220Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) { 3230Sstevel@tonic-gate if (mdb_walk_dcmd("kmem_cache", "kmem_cache", ac, argv) == -1) { 3240Sstevel@tonic-gate mdb_warn("can't walk kmem_cache"); 3250Sstevel@tonic-gate return (DCMD_ERR); 3260Sstevel@tonic-gate } 3270Sstevel@tonic-gate return (DCMD_OK); 3280Sstevel@tonic-gate } 3290Sstevel@tonic-gate 3300Sstevel@tonic-gate if (DCMD_HDRSPEC(flags)) 3310Sstevel@tonic-gate mdb_printf("%-?s %-25s %4s %6s %8s %8s\n", "ADDR", "NAME", 3320Sstevel@tonic-gate "FLAG", "CFLAG", "BUFSIZE", "BUFTOTL"); 3330Sstevel@tonic-gate 3340Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), addr) == -1) { 3350Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache at %p", addr); 3360Sstevel@tonic-gate return (DCMD_ERR); 3370Sstevel@tonic-gate } 3380Sstevel@tonic-gate 3396712Stomee if ((filter != NULL) && (strstr(c.cache_name, filter) == NULL)) 3406712Stomee return (DCMD_OK); 3416712Stomee 3420Sstevel@tonic-gate mdb_printf("%0?p %-25s %04x %06x %8ld %8lld\n", addr, c.cache_name, 3430Sstevel@tonic-gate c.cache_flags, c.cache_cflags, c.cache_bufsize, c.cache_buftotal); 3440Sstevel@tonic-gate 3450Sstevel@tonic-gate return (DCMD_OK); 3460Sstevel@tonic-gate } 3470Sstevel@tonic-gate 3486712Stomee void 3496712Stomee kmem_cache_help(void) 3506712Stomee { 3516712Stomee mdb_printf("%s", "Print kernel memory caches.\n\n"); 3526712Stomee mdb_dec_indent(2); 3536712Stomee mdb_printf("%<b>OPTIONS%</b>\n"); 3546712Stomee mdb_inc_indent(2); 3556712Stomee mdb_printf("%s", 3566712Stomee " -n name\n" 3576712Stomee " name of kmem cache (or matching partial name)\n" 3586712Stomee "\n" 3596712Stomee "Column\tDescription\n" 3606712Stomee "\n" 3616712Stomee "ADDR\t\taddress of kmem cache\n" 3626712Stomee "NAME\t\tname of kmem cache\n" 3636712Stomee "FLAG\t\tvarious cache state flags\n" 3646712Stomee "CFLAG\t\tcache creation flags\n" 3656712Stomee "BUFSIZE\tobject size in bytes\n" 3666712Stomee "BUFTOTL\tcurrent total buffers in cache (allocated and free)\n"); 3676712Stomee } 3684688Stomee 3694688Stomee #define LABEL_WIDTH 11 3704688Stomee static void 3714688Stomee kmem_slabs_print_dist(uint_t *ks_bucket, size_t buffers_per_slab, 3724688Stomee size_t maxbuckets, size_t minbucketsize) 3734688Stomee { 3744688Stomee uint64_t total; 3754688Stomee int buckets; 3764688Stomee int i; 3774688Stomee const int *distarray; 3784688Stomee int complete[2]; 3794688Stomee 3804688Stomee buckets = buffers_per_slab; 3814688Stomee 3824688Stomee total = 0; 3834688Stomee for (i = 0; i <= buffers_per_slab; i++) 3844688Stomee total += ks_bucket[i]; 3854688Stomee 3864688Stomee if (maxbuckets > 1) 3874688Stomee buckets = MIN(buckets, maxbuckets); 3884688Stomee 3894688Stomee if (minbucketsize > 1) { 3904688Stomee /* 3914688Stomee * minbucketsize does not apply to the first bucket reserved 3924688Stomee * for completely allocated slabs 3934688Stomee */ 3944688Stomee buckets = MIN(buckets, 1 + ((buffers_per_slab - 1) / 3954688Stomee minbucketsize)); 3964688Stomee if ((buckets < 2) && (buffers_per_slab > 1)) { 3974688Stomee buckets = 2; 3984688Stomee minbucketsize = (buffers_per_slab - 1); 3994688Stomee } 4004688Stomee } 4014688Stomee 4024688Stomee /* 4034688Stomee * The first printed bucket is reserved for completely allocated slabs. 4044688Stomee * Passing (buckets - 1) excludes that bucket from the generated 4054688Stomee * distribution, since we're handling it as a special case. 4064688Stomee */ 4074688Stomee complete[0] = buffers_per_slab; 4084688Stomee complete[1] = buffers_per_slab + 1; 4094798Stomee distarray = dist_linear(buckets - 1, 1, buffers_per_slab - 1); 4104688Stomee 4114688Stomee mdb_printf("%*s\n", LABEL_WIDTH, "Allocated"); 4124798Stomee dist_print_header("Buffers", LABEL_WIDTH, "Slabs"); 4134798Stomee 4144798Stomee dist_print_bucket(complete, 0, ks_bucket, total, LABEL_WIDTH); 4154688Stomee /* 4164688Stomee * Print bucket ranges in descending order after the first bucket for 4174688Stomee * completely allocated slabs, so a person can see immediately whether 4184688Stomee * or not there is fragmentation without having to scan possibly 4194688Stomee * multiple screens of output. Starting at (buckets - 2) excludes the 4204688Stomee * extra terminating bucket. 4214688Stomee */ 4224688Stomee for (i = buckets - 2; i >= 0; i--) { 4234798Stomee dist_print_bucket(distarray, i, ks_bucket, total, LABEL_WIDTH); 4244688Stomee } 4254688Stomee mdb_printf("\n"); 4264688Stomee } 4274688Stomee #undef LABEL_WIDTH 4284688Stomee 4294688Stomee /*ARGSUSED*/ 4304688Stomee static int 4314688Stomee kmem_first_slab(uintptr_t addr, const kmem_slab_t *sp, boolean_t *is_slab) 4324688Stomee { 4334688Stomee *is_slab = B_TRUE; 4344688Stomee return (WALK_DONE); 4354688Stomee } 4364688Stomee 4374688Stomee /*ARGSUSED*/ 4384688Stomee static int 4394688Stomee kmem_first_partial_slab(uintptr_t addr, const kmem_slab_t *sp, 4404688Stomee boolean_t *is_slab) 4414688Stomee { 4424688Stomee /* 4436712Stomee * The "kmem_partial_slab" walker reports the first full slab if there 4444688Stomee * are no partial slabs (for the sake of consumers that require at least 4454688Stomee * one callback if there are any buffers in the cache). 4464688Stomee */ 4476712Stomee *is_slab = KMEM_SLAB_IS_PARTIAL(sp); 4484688Stomee return (WALK_DONE); 4494688Stomee } 4504688Stomee 4516712Stomee typedef struct kmem_slab_usage { 4526712Stomee int ksu_refcnt; /* count of allocated buffers on slab */ 4536712Stomee boolean_t ksu_nomove; /* slab marked non-reclaimable */ 4546712Stomee } kmem_slab_usage_t; 4556712Stomee 4566712Stomee typedef struct kmem_slab_stats { 4576712Stomee const kmem_cache_t *ks_cp; 4586712Stomee int ks_slabs; /* slabs in cache */ 4596712Stomee int ks_partial_slabs; /* partially allocated slabs in cache */ 4606712Stomee uint64_t ks_unused_buffers; /* total unused buffers in cache */ 4616712Stomee int ks_max_buffers_per_slab; /* max buffers per slab */ 4626712Stomee int ks_usage_len; /* ks_usage array length */ 4636712Stomee kmem_slab_usage_t *ks_usage; /* partial slab usage */ 4646712Stomee uint_t *ks_bucket; /* slab usage distribution */ 4656712Stomee } kmem_slab_stats_t; 4666712Stomee 4674688Stomee /*ARGSUSED*/ 4684688Stomee static int 4694688Stomee kmem_slablist_stat(uintptr_t addr, const kmem_slab_t *sp, 4704688Stomee kmem_slab_stats_t *ks) 4714688Stomee { 4724688Stomee kmem_slab_usage_t *ksu; 4734688Stomee long unused; 4744688Stomee 4754688Stomee ks->ks_slabs++; 4764688Stomee ks->ks_bucket[sp->slab_refcnt]++; 4774688Stomee 4784688Stomee unused = (sp->slab_chunks - sp->slab_refcnt); 4794688Stomee if (unused == 0) { 4804688Stomee return (WALK_NEXT); 4814688Stomee } 4824688Stomee 4834688Stomee ks->ks_partial_slabs++; 4844688Stomee ks->ks_unused_buffers += unused; 4854688Stomee 4864688Stomee if (ks->ks_partial_slabs > ks->ks_usage_len) { 4874688Stomee kmem_slab_usage_t *usage; 4884688Stomee int len = ks->ks_usage_len; 4894688Stomee 4904688Stomee len = (len == 0 ? 16 : len * 2); 4914688Stomee usage = mdb_zalloc(len * sizeof (kmem_slab_usage_t), UM_SLEEP); 4924688Stomee if (ks->ks_usage != NULL) { 4934688Stomee bcopy(ks->ks_usage, usage, 4944688Stomee ks->ks_usage_len * sizeof (kmem_slab_usage_t)); 4954688Stomee mdb_free(ks->ks_usage, 4964688Stomee ks->ks_usage_len * sizeof (kmem_slab_usage_t)); 4974688Stomee } 4984688Stomee ks->ks_usage = usage; 4994688Stomee ks->ks_usage_len = len; 5004688Stomee } 5014688Stomee 5024688Stomee ksu = &ks->ks_usage[ks->ks_partial_slabs - 1]; 5034688Stomee ksu->ksu_refcnt = sp->slab_refcnt; 5046712Stomee ksu->ksu_nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE); 5054688Stomee return (WALK_NEXT); 5064688Stomee } 5074688Stomee 5084688Stomee static void 5094688Stomee kmem_slabs_header() 5104688Stomee { 5114688Stomee mdb_printf("%-25s %8s %8s %9s %9s %6s\n", 5124688Stomee "", "", "Partial", "", "Unused", ""); 5134688Stomee mdb_printf("%-25s %8s %8s %9s %9s %6s\n", 5144688Stomee "Cache Name", "Slabs", "Slabs", "Buffers", "Buffers", "Waste"); 5154688Stomee mdb_printf("%-25s %8s %8s %9s %9s %6s\n", 5164688Stomee "-------------------------", "--------", "--------", "---------", 5174688Stomee "---------", "------"); 5184688Stomee } 5194688Stomee 5204688Stomee int 5214688Stomee kmem_slabs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 5224688Stomee { 5234688Stomee kmem_cache_t c; 5244688Stomee kmem_slab_stats_t stats; 5254688Stomee mdb_walk_cb_t cb; 5264688Stomee int pct; 5274688Stomee int tenths_pct; 5284688Stomee size_t maxbuckets = 1; 5294688Stomee size_t minbucketsize = 0; 5304688Stomee const char *filter = NULL; 5316712Stomee const char *name = NULL; 5324688Stomee uint_t opt_v = FALSE; 5336712Stomee boolean_t buckets = B_FALSE; 5344688Stomee boolean_t skip = B_FALSE; 5354688Stomee 5364688Stomee if (mdb_getopts(argc, argv, 5374688Stomee 'B', MDB_OPT_UINTPTR, &minbucketsize, 5384688Stomee 'b', MDB_OPT_UINTPTR, &maxbuckets, 5394688Stomee 'n', MDB_OPT_STR, &filter, 5406712Stomee 'N', MDB_OPT_STR, &name, 5414688Stomee 'v', MDB_OPT_SETBITS, TRUE, &opt_v, 5424688Stomee NULL) != argc) { 5434688Stomee return (DCMD_USAGE); 5444688Stomee } 5454688Stomee 5466712Stomee if ((maxbuckets != 1) || (minbucketsize != 0)) { 5476712Stomee buckets = B_TRUE; 5484688Stomee } 5494688Stomee 5504688Stomee if (!(flags & DCMD_ADDRSPEC)) { 5514688Stomee if (mdb_walk_dcmd("kmem_cache", "kmem_slabs", argc, 5524688Stomee argv) == -1) { 5534688Stomee mdb_warn("can't walk kmem_cache"); 5544688Stomee return (DCMD_ERR); 5554688Stomee } 5564688Stomee return (DCMD_OK); 5574688Stomee } 5584688Stomee 5594688Stomee if (mdb_vread(&c, sizeof (c), addr) == -1) { 5604688Stomee mdb_warn("couldn't read kmem_cache at %p", addr); 5614688Stomee return (DCMD_ERR); 5624688Stomee } 5634688Stomee 5646712Stomee if (name == NULL) { 5656712Stomee skip = ((filter != NULL) && 5666712Stomee (strstr(c.cache_name, filter) == NULL)); 5676712Stomee } else if (filter == NULL) { 5686712Stomee skip = (strcmp(c.cache_name, name) != 0); 5696712Stomee } else { 5706712Stomee /* match either -n or -N */ 5716712Stomee skip = ((strcmp(c.cache_name, name) != 0) && 5726712Stomee (strstr(c.cache_name, filter) == NULL)); 5734688Stomee } 5744688Stomee 5756712Stomee if (!(opt_v || buckets) && DCMD_HDRSPEC(flags)) { 5764688Stomee kmem_slabs_header(); 5776712Stomee } else if ((opt_v || buckets) && !skip) { 5784688Stomee if (DCMD_HDRSPEC(flags)) { 5794688Stomee kmem_slabs_header(); 5804688Stomee } else { 5814688Stomee boolean_t is_slab = B_FALSE; 5824688Stomee const char *walker_name; 5834688Stomee if (opt_v) { 5844688Stomee cb = (mdb_walk_cb_t)kmem_first_partial_slab; 5854688Stomee walker_name = "kmem_slab_partial"; 5864688Stomee } else { 5874688Stomee cb = (mdb_walk_cb_t)kmem_first_slab; 5884688Stomee walker_name = "kmem_slab"; 5894688Stomee } 5904688Stomee (void) mdb_pwalk(walker_name, cb, &is_slab, addr); 5914688Stomee if (is_slab) { 5924688Stomee kmem_slabs_header(); 5934688Stomee } 5944688Stomee } 5954688Stomee } 5964688Stomee 5974688Stomee if (skip) { 5984688Stomee return (DCMD_OK); 5994688Stomee } 6004688Stomee 6014688Stomee bzero(&stats, sizeof (kmem_slab_stats_t)); 6026712Stomee stats.ks_cp = &c; 6036712Stomee stats.ks_max_buffers_per_slab = c.cache_maxchunks; 6046712Stomee /* +1 to include a zero bucket */ 6056712Stomee stats.ks_bucket = mdb_zalloc((stats.ks_max_buffers_per_slab + 1) * 6066712Stomee sizeof (*stats.ks_bucket), UM_SLEEP); 6074688Stomee cb = (mdb_walk_cb_t)kmem_slablist_stat; 6084688Stomee (void) mdb_pwalk("kmem_slab", cb, &stats, addr); 6094688Stomee 6104688Stomee if (c.cache_buftotal == 0) { 6114688Stomee pct = 0; 6124688Stomee tenths_pct = 0; 6134688Stomee } else { 6144688Stomee uint64_t n = stats.ks_unused_buffers * 10000; 6154688Stomee pct = (int)(n / c.cache_buftotal); 6164688Stomee tenths_pct = pct - ((pct / 100) * 100); 6174688Stomee tenths_pct = (tenths_pct + 5) / 10; /* round nearest tenth */ 6184688Stomee if (tenths_pct == 10) { 6194688Stomee pct += 100; 6204688Stomee tenths_pct = 0; 6214688Stomee } 6224688Stomee } 6234688Stomee 6244688Stomee pct /= 100; 6254688Stomee mdb_printf("%-25s %8d %8d %9lld %9lld %3d.%1d%%\n", c.cache_name, 6264688Stomee stats.ks_slabs, stats.ks_partial_slabs, c.cache_buftotal, 6274688Stomee stats.ks_unused_buffers, pct, tenths_pct); 6284688Stomee 6294688Stomee if (maxbuckets == 0) { 6306712Stomee maxbuckets = stats.ks_max_buffers_per_slab; 6314688Stomee } 6324688Stomee 6334688Stomee if (((maxbuckets > 1) || (minbucketsize > 0)) && 6344688Stomee (stats.ks_slabs > 0)) { 6354688Stomee mdb_printf("\n"); 6364688Stomee kmem_slabs_print_dist(stats.ks_bucket, 6376712Stomee stats.ks_max_buffers_per_slab, maxbuckets, minbucketsize); 6386712Stomee } 6396712Stomee 6406712Stomee mdb_free(stats.ks_bucket, (stats.ks_max_buffers_per_slab + 1) * 6416712Stomee sizeof (*stats.ks_bucket)); 6426712Stomee 6436712Stomee if (!opt_v) { 6446712Stomee return (DCMD_OK); 6454688Stomee } 6464688Stomee 6474688Stomee if (opt_v && (stats.ks_partial_slabs > 0)) { 6484688Stomee int i; 6494688Stomee kmem_slab_usage_t *ksu; 6504688Stomee 6514688Stomee mdb_printf(" %d complete, %d partial", 6524688Stomee (stats.ks_slabs - stats.ks_partial_slabs), 6534688Stomee stats.ks_partial_slabs); 6544688Stomee if (stats.ks_partial_slabs > 0) { 6556712Stomee mdb_printf(" (%d):", stats.ks_max_buffers_per_slab); 6564688Stomee } 6574688Stomee for (i = 0; i < stats.ks_partial_slabs; i++) { 6584688Stomee ksu = &stats.ks_usage[i]; 6596712Stomee if (ksu->ksu_nomove) { 6606712Stomee const char *symbol = "*"; 6616712Stomee mdb_printf(" %d%s", ksu->ksu_refcnt, symbol); 6626712Stomee } else { 6636712Stomee mdb_printf(" %d", ksu->ksu_refcnt); 6646712Stomee } 6654688Stomee } 6664688Stomee mdb_printf("\n\n"); 6674688Stomee } 6684688Stomee 6694688Stomee if (stats.ks_usage_len > 0) { 6704688Stomee mdb_free(stats.ks_usage, 6714688Stomee stats.ks_usage_len * sizeof (kmem_slab_usage_t)); 6724688Stomee } 6734688Stomee 6744688Stomee return (DCMD_OK); 6754688Stomee } 6764688Stomee 6774688Stomee void 6784688Stomee kmem_slabs_help(void) 6794688Stomee { 6806712Stomee mdb_printf("%s", 6816712Stomee "Display slab usage per kmem cache.\n\n"); 6824688Stomee mdb_dec_indent(2); 6834688Stomee mdb_printf("%<b>OPTIONS%</b>\n"); 6844688Stomee mdb_inc_indent(2); 6854688Stomee mdb_printf("%s", 6864688Stomee " -n name\n" 6874688Stomee " name of kmem cache (or matching partial name)\n" 6886712Stomee " -N name\n" 6896712Stomee " exact name of kmem cache\n" 6904688Stomee " -b maxbins\n" 6914688Stomee " Print a distribution of allocated buffers per slab using at\n" 6924688Stomee " most maxbins bins. The first bin is reserved for completely\n" 6934688Stomee " allocated slabs. Setting maxbins to zero (-b 0) has the same\n" 6944688Stomee " effect as specifying the maximum allocated buffers per slab\n" 6954688Stomee " or setting minbinsize to 1 (-B 1).\n" 6964688Stomee " -B minbinsize\n" 6974688Stomee " Print a distribution of allocated buffers per slab, making\n" 6984688Stomee " all bins (except the first, reserved for completely allocated\n" 6994688Stomee " slabs) at least minbinsize buffers apart.\n" 7004688Stomee " -v verbose output: List the allocated buffer count of each partial\n" 7014688Stomee " slab on the free list in order from front to back to show how\n" 7024688Stomee " closely the slabs are ordered by usage. For example\n" 7034688Stomee "\n" 7044688Stomee " 10 complete, 3 partial (8): 7 3 1\n" 7054688Stomee "\n" 7064688Stomee " means there are thirteen slabs with eight buffers each, including\n" 7074688Stomee " three partially allocated slabs with less than all eight buffers\n" 7084688Stomee " allocated.\n" 7094688Stomee "\n" 7104688Stomee " Buffer allocations are always from the front of the partial slab\n" 7114688Stomee " list. When a buffer is freed from a completely used slab, that\n" 7124688Stomee " slab is added to the front of the partial slab list. Assuming\n" 7134688Stomee " that all buffers are equally likely to be freed soon, the\n" 7144688Stomee " desired order of partial slabs is most-used at the front of the\n" 7154688Stomee " list and least-used at the back (as in the example above).\n" 7164688Stomee " However, if a slab contains an allocated buffer that will not\n" 7174688Stomee " soon be freed, it would be better for that slab to be at the\n" 7186712Stomee " front where all of its buffers can be allocated. Taking a slab\n" 7196712Stomee " off the partial slab list (either with all buffers freed or all\n" 7206712Stomee " buffers allocated) reduces cache fragmentation.\n" 7216712Stomee "\n" 7226712Stomee " A slab's allocated buffer count representing a partial slab (9 in\n" 7236712Stomee " the example below) may be marked as follows:\n" 7246712Stomee "\n" 7256712Stomee " 9* An asterisk indicates that kmem has marked the slab non-\n" 7266712Stomee " reclaimable because the kmem client refused to move one of the\n" 7276712Stomee " slab's buffers. Since kmem does not expect to completely free the\n" 7286712Stomee " slab, it moves it to the front of the list in the hope of\n" 7296712Stomee " completely allocating it instead. A slab marked with an asterisk\n" 7306712Stomee " stays marked for as long as it remains on the partial slab list.\n" 7314688Stomee "\n" 7324688Stomee "Column\t\tDescription\n" 7334688Stomee "\n" 7344688Stomee "Cache Name\t\tname of kmem cache\n" 7354688Stomee "Slabs\t\t\ttotal slab count\n" 7364688Stomee "Partial Slabs\t\tcount of partially allocated slabs on the free list\n" 7374688Stomee "Buffers\t\ttotal buffer count (Slabs * (buffers per slab))\n" 7384688Stomee "Unused Buffers\tcount of unallocated buffers across all partial slabs\n" 7394688Stomee "Waste\t\t\t(Unused Buffers / Buffers) does not include space\n" 7404688Stomee "\t\t\t for accounting structures (debug mode), slab\n" 7414688Stomee "\t\t\t coloring (incremental small offsets to stagger\n" 7424688Stomee "\t\t\t buffer alignment), or the per-CPU magazine layer\n"); 7434688Stomee } 7444688Stomee 7450Sstevel@tonic-gate static int 7460Sstevel@tonic-gate addrcmp(const void *lhs, const void *rhs) 7470Sstevel@tonic-gate { 7480Sstevel@tonic-gate uintptr_t p1 = *((uintptr_t *)lhs); 7490Sstevel@tonic-gate uintptr_t p2 = *((uintptr_t *)rhs); 7500Sstevel@tonic-gate 7510Sstevel@tonic-gate if (p1 < p2) 7520Sstevel@tonic-gate return (-1); 7530Sstevel@tonic-gate if (p1 > p2) 7540Sstevel@tonic-gate return (1); 7550Sstevel@tonic-gate return (0); 7560Sstevel@tonic-gate } 7570Sstevel@tonic-gate 7580Sstevel@tonic-gate static int 7590Sstevel@tonic-gate bufctlcmp(const kmem_bufctl_audit_t **lhs, const kmem_bufctl_audit_t **rhs) 7600Sstevel@tonic-gate { 7610Sstevel@tonic-gate const kmem_bufctl_audit_t *bcp1 = *lhs; 7620Sstevel@tonic-gate const kmem_bufctl_audit_t *bcp2 = *rhs; 7630Sstevel@tonic-gate 7640Sstevel@tonic-gate if (bcp1->bc_timestamp > bcp2->bc_timestamp) 7650Sstevel@tonic-gate return (-1); 7660Sstevel@tonic-gate 7670Sstevel@tonic-gate if (bcp1->bc_timestamp < bcp2->bc_timestamp) 7680Sstevel@tonic-gate return (1); 7690Sstevel@tonic-gate 7700Sstevel@tonic-gate return (0); 7710Sstevel@tonic-gate } 7720Sstevel@tonic-gate 7730Sstevel@tonic-gate typedef struct kmem_hash_walk { 7740Sstevel@tonic-gate uintptr_t *kmhw_table; 7750Sstevel@tonic-gate size_t kmhw_nelems; 7760Sstevel@tonic-gate size_t kmhw_pos; 7770Sstevel@tonic-gate kmem_bufctl_t kmhw_cur; 7780Sstevel@tonic-gate } kmem_hash_walk_t; 7790Sstevel@tonic-gate 7800Sstevel@tonic-gate int 7810Sstevel@tonic-gate kmem_hash_walk_init(mdb_walk_state_t *wsp) 7820Sstevel@tonic-gate { 7830Sstevel@tonic-gate kmem_hash_walk_t *kmhw; 7840Sstevel@tonic-gate uintptr_t *hash; 7850Sstevel@tonic-gate kmem_cache_t c; 7860Sstevel@tonic-gate uintptr_t haddr, addr = wsp->walk_addr; 7870Sstevel@tonic-gate size_t nelems; 7880Sstevel@tonic-gate size_t hsize; 7890Sstevel@tonic-gate 7900Sstevel@tonic-gate if (addr == NULL) { 7910Sstevel@tonic-gate mdb_warn("kmem_hash doesn't support global walks\n"); 7920Sstevel@tonic-gate return (WALK_ERR); 7930Sstevel@tonic-gate } 7940Sstevel@tonic-gate 7950Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), addr) == -1) { 7960Sstevel@tonic-gate mdb_warn("couldn't read cache at addr %p", addr); 7970Sstevel@tonic-gate return (WALK_ERR); 7980Sstevel@tonic-gate } 7990Sstevel@tonic-gate 8000Sstevel@tonic-gate if (!(c.cache_flags & KMF_HASH)) { 8010Sstevel@tonic-gate mdb_warn("cache %p doesn't have a hash table\n", addr); 8020Sstevel@tonic-gate return (WALK_DONE); /* nothing to do */ 8030Sstevel@tonic-gate } 8040Sstevel@tonic-gate 8050Sstevel@tonic-gate kmhw = mdb_zalloc(sizeof (kmem_hash_walk_t), UM_SLEEP); 8060Sstevel@tonic-gate kmhw->kmhw_cur.bc_next = NULL; 8070Sstevel@tonic-gate kmhw->kmhw_pos = 0; 8080Sstevel@tonic-gate 8090Sstevel@tonic-gate kmhw->kmhw_nelems = nelems = c.cache_hash_mask + 1; 8100Sstevel@tonic-gate hsize = nelems * sizeof (uintptr_t); 8110Sstevel@tonic-gate haddr = (uintptr_t)c.cache_hash_table; 8120Sstevel@tonic-gate 8130Sstevel@tonic-gate kmhw->kmhw_table = hash = mdb_alloc(hsize, UM_SLEEP); 8140Sstevel@tonic-gate if (mdb_vread(hash, hsize, haddr) == -1) { 8150Sstevel@tonic-gate mdb_warn("failed to read hash table at %p", haddr); 8160Sstevel@tonic-gate mdb_free(hash, hsize); 8170Sstevel@tonic-gate mdb_free(kmhw, sizeof (kmem_hash_walk_t)); 8180Sstevel@tonic-gate return (WALK_ERR); 8190Sstevel@tonic-gate } 8200Sstevel@tonic-gate 8210Sstevel@tonic-gate wsp->walk_data = kmhw; 8220Sstevel@tonic-gate 8230Sstevel@tonic-gate return (WALK_NEXT); 8240Sstevel@tonic-gate } 8250Sstevel@tonic-gate 8260Sstevel@tonic-gate int 8270Sstevel@tonic-gate kmem_hash_walk_step(mdb_walk_state_t *wsp) 8280Sstevel@tonic-gate { 8290Sstevel@tonic-gate kmem_hash_walk_t *kmhw = wsp->walk_data; 8300Sstevel@tonic-gate uintptr_t addr = NULL; 8310Sstevel@tonic-gate 8320Sstevel@tonic-gate if ((addr = (uintptr_t)kmhw->kmhw_cur.bc_next) == NULL) { 8330Sstevel@tonic-gate while (kmhw->kmhw_pos < kmhw->kmhw_nelems) { 8340Sstevel@tonic-gate if ((addr = kmhw->kmhw_table[kmhw->kmhw_pos++]) != NULL) 8350Sstevel@tonic-gate break; 8360Sstevel@tonic-gate } 8370Sstevel@tonic-gate } 8380Sstevel@tonic-gate if (addr == NULL) 8390Sstevel@tonic-gate return (WALK_DONE); 8400Sstevel@tonic-gate 8410Sstevel@tonic-gate if (mdb_vread(&kmhw->kmhw_cur, sizeof (kmem_bufctl_t), addr) == -1) { 8420Sstevel@tonic-gate mdb_warn("couldn't read kmem_bufctl_t at addr %p", addr); 8430Sstevel@tonic-gate return (WALK_ERR); 8440Sstevel@tonic-gate } 8450Sstevel@tonic-gate 8460Sstevel@tonic-gate return (wsp->walk_callback(addr, &kmhw->kmhw_cur, wsp->walk_cbdata)); 8470Sstevel@tonic-gate } 8480Sstevel@tonic-gate 8490Sstevel@tonic-gate void 8500Sstevel@tonic-gate kmem_hash_walk_fini(mdb_walk_state_t *wsp) 8510Sstevel@tonic-gate { 8520Sstevel@tonic-gate kmem_hash_walk_t *kmhw = wsp->walk_data; 8530Sstevel@tonic-gate 8540Sstevel@tonic-gate if (kmhw == NULL) 8550Sstevel@tonic-gate return; 8560Sstevel@tonic-gate 8570Sstevel@tonic-gate mdb_free(kmhw->kmhw_table, kmhw->kmhw_nelems * sizeof (uintptr_t)); 8580Sstevel@tonic-gate mdb_free(kmhw, sizeof (kmem_hash_walk_t)); 8590Sstevel@tonic-gate } 8600Sstevel@tonic-gate 8610Sstevel@tonic-gate /* 8620Sstevel@tonic-gate * Find the address of the bufctl structure for the address 'buf' in cache 8630Sstevel@tonic-gate * 'cp', which is at address caddr, and place it in *out. 8640Sstevel@tonic-gate */ 8650Sstevel@tonic-gate static int 8660Sstevel@tonic-gate kmem_hash_lookup(kmem_cache_t *cp, uintptr_t caddr, void *buf, uintptr_t *out) 8670Sstevel@tonic-gate { 8680Sstevel@tonic-gate uintptr_t bucket = (uintptr_t)KMEM_HASH(cp, buf); 8690Sstevel@tonic-gate kmem_bufctl_t *bcp; 8700Sstevel@tonic-gate kmem_bufctl_t bc; 8710Sstevel@tonic-gate 8720Sstevel@tonic-gate if (mdb_vread(&bcp, sizeof (kmem_bufctl_t *), bucket) == -1) { 8730Sstevel@tonic-gate mdb_warn("unable to read hash bucket for %p in cache %p", 8740Sstevel@tonic-gate buf, caddr); 8750Sstevel@tonic-gate return (-1); 8760Sstevel@tonic-gate } 8770Sstevel@tonic-gate 8780Sstevel@tonic-gate while (bcp != NULL) { 8790Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (kmem_bufctl_t), 8800Sstevel@tonic-gate (uintptr_t)bcp) == -1) { 8810Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", bcp); 8820Sstevel@tonic-gate return (-1); 8830Sstevel@tonic-gate } 8840Sstevel@tonic-gate if (bc.bc_addr == buf) { 8850Sstevel@tonic-gate *out = (uintptr_t)bcp; 8860Sstevel@tonic-gate return (0); 8870Sstevel@tonic-gate } 8880Sstevel@tonic-gate bcp = bc.bc_next; 8890Sstevel@tonic-gate } 8900Sstevel@tonic-gate 8910Sstevel@tonic-gate mdb_warn("unable to find bufctl for %p in cache %p\n", buf, caddr); 8920Sstevel@tonic-gate return (-1); 8930Sstevel@tonic-gate } 8940Sstevel@tonic-gate 8950Sstevel@tonic-gate int 8960Sstevel@tonic-gate kmem_get_magsize(const kmem_cache_t *cp) 8970Sstevel@tonic-gate { 8980Sstevel@tonic-gate uintptr_t addr = (uintptr_t)cp->cache_magtype; 8990Sstevel@tonic-gate GElf_Sym mt_sym; 9000Sstevel@tonic-gate kmem_magtype_t mt; 9010Sstevel@tonic-gate int res; 9020Sstevel@tonic-gate 9030Sstevel@tonic-gate /* 9040Sstevel@tonic-gate * if cpu 0 has a non-zero magsize, it must be correct. caches 9050Sstevel@tonic-gate * with KMF_NOMAGAZINE have disabled their magazine layers, so 9060Sstevel@tonic-gate * it is okay to return 0 for them. 9070Sstevel@tonic-gate */ 9080Sstevel@tonic-gate if ((res = cp->cache_cpu[0].cc_magsize) != 0 || 9090Sstevel@tonic-gate (cp->cache_flags & KMF_NOMAGAZINE)) 9100Sstevel@tonic-gate return (res); 9110Sstevel@tonic-gate 9120Sstevel@tonic-gate if (mdb_lookup_by_name("kmem_magtype", &mt_sym) == -1) { 9130Sstevel@tonic-gate mdb_warn("unable to read 'kmem_magtype'"); 9140Sstevel@tonic-gate } else if (addr < mt_sym.st_value || 9150Sstevel@tonic-gate addr + sizeof (mt) - 1 > mt_sym.st_value + mt_sym.st_size - 1 || 9160Sstevel@tonic-gate ((addr - mt_sym.st_value) % sizeof (mt)) != 0) { 9170Sstevel@tonic-gate mdb_warn("cache '%s' has invalid magtype pointer (%p)\n", 9180Sstevel@tonic-gate cp->cache_name, addr); 9190Sstevel@tonic-gate return (0); 9200Sstevel@tonic-gate } 9210Sstevel@tonic-gate if (mdb_vread(&mt, sizeof (mt), addr) == -1) { 9220Sstevel@tonic-gate mdb_warn("unable to read magtype at %a", addr); 9230Sstevel@tonic-gate return (0); 9240Sstevel@tonic-gate } 9250Sstevel@tonic-gate return (mt.mt_magsize); 9260Sstevel@tonic-gate } 9270Sstevel@tonic-gate 9280Sstevel@tonic-gate /*ARGSUSED*/ 9290Sstevel@tonic-gate static int 9300Sstevel@tonic-gate kmem_estimate_slab(uintptr_t addr, const kmem_slab_t *sp, size_t *est) 9310Sstevel@tonic-gate { 9320Sstevel@tonic-gate *est -= (sp->slab_chunks - sp->slab_refcnt); 9330Sstevel@tonic-gate 9340Sstevel@tonic-gate return (WALK_NEXT); 9350Sstevel@tonic-gate } 9360Sstevel@tonic-gate 9370Sstevel@tonic-gate /* 9380Sstevel@tonic-gate * Returns an upper bound on the number of allocated buffers in a given 9390Sstevel@tonic-gate * cache. 9400Sstevel@tonic-gate */ 9410Sstevel@tonic-gate size_t 9420Sstevel@tonic-gate kmem_estimate_allocated(uintptr_t addr, const kmem_cache_t *cp) 9430Sstevel@tonic-gate { 9440Sstevel@tonic-gate int magsize; 9450Sstevel@tonic-gate size_t cache_est; 9460Sstevel@tonic-gate 9470Sstevel@tonic-gate cache_est = cp->cache_buftotal; 9480Sstevel@tonic-gate 9490Sstevel@tonic-gate (void) mdb_pwalk("kmem_slab_partial", 9500Sstevel@tonic-gate (mdb_walk_cb_t)kmem_estimate_slab, &cache_est, addr); 9510Sstevel@tonic-gate 9520Sstevel@tonic-gate if ((magsize = kmem_get_magsize(cp)) != 0) { 9530Sstevel@tonic-gate size_t mag_est = cp->cache_full.ml_total * magsize; 9540Sstevel@tonic-gate 9550Sstevel@tonic-gate if (cache_est >= mag_est) { 9560Sstevel@tonic-gate cache_est -= mag_est; 9570Sstevel@tonic-gate } else { 9580Sstevel@tonic-gate mdb_warn("cache %p's magazine layer holds more buffers " 9590Sstevel@tonic-gate "than the slab layer.\n", addr); 9600Sstevel@tonic-gate } 9610Sstevel@tonic-gate } 9620Sstevel@tonic-gate return (cache_est); 9630Sstevel@tonic-gate } 9640Sstevel@tonic-gate 9650Sstevel@tonic-gate #define READMAG_ROUNDS(rounds) { \ 9660Sstevel@tonic-gate if (mdb_vread(mp, magbsize, (uintptr_t)kmp) == -1) { \ 9670Sstevel@tonic-gate mdb_warn("couldn't read magazine at %p", kmp); \ 9680Sstevel@tonic-gate goto fail; \ 9690Sstevel@tonic-gate } \ 9700Sstevel@tonic-gate for (i = 0; i < rounds; i++) { \ 9710Sstevel@tonic-gate maglist[magcnt++] = mp->mag_round[i]; \ 9720Sstevel@tonic-gate if (magcnt == magmax) { \ 9730Sstevel@tonic-gate mdb_warn("%d magazines exceeds fudge factor\n", \ 9740Sstevel@tonic-gate magcnt); \ 9750Sstevel@tonic-gate goto fail; \ 9760Sstevel@tonic-gate } \ 9770Sstevel@tonic-gate } \ 9780Sstevel@tonic-gate } 9790Sstevel@tonic-gate 9800Sstevel@tonic-gate int 9810Sstevel@tonic-gate kmem_read_magazines(kmem_cache_t *cp, uintptr_t addr, int ncpus, 9820Sstevel@tonic-gate void ***maglistp, size_t *magcntp, size_t *magmaxp, int alloc_flags) 9830Sstevel@tonic-gate { 9840Sstevel@tonic-gate kmem_magazine_t *kmp, *mp; 9850Sstevel@tonic-gate void **maglist = NULL; 9860Sstevel@tonic-gate int i, cpu; 9870Sstevel@tonic-gate size_t magsize, magmax, magbsize; 9880Sstevel@tonic-gate size_t magcnt = 0; 9890Sstevel@tonic-gate 9900Sstevel@tonic-gate /* 9910Sstevel@tonic-gate * Read the magtype out of the cache, after verifying the pointer's 9920Sstevel@tonic-gate * correctness. 9930Sstevel@tonic-gate */ 9940Sstevel@tonic-gate magsize = kmem_get_magsize(cp); 9951528Sjwadams if (magsize == 0) { 9961528Sjwadams *maglistp = NULL; 9971528Sjwadams *magcntp = 0; 9981528Sjwadams *magmaxp = 0; 9991528Sjwadams return (WALK_NEXT); 10001528Sjwadams } 10010Sstevel@tonic-gate 10020Sstevel@tonic-gate /* 10030Sstevel@tonic-gate * There are several places where we need to go buffer hunting: 10040Sstevel@tonic-gate * the per-CPU loaded magazine, the per-CPU spare full magazine, 10050Sstevel@tonic-gate * and the full magazine list in the depot. 10060Sstevel@tonic-gate * 10070Sstevel@tonic-gate * For an upper bound on the number of buffers in the magazine 10080Sstevel@tonic-gate * layer, we have the number of magazines on the cache_full 10090Sstevel@tonic-gate * list plus at most two magazines per CPU (the loaded and the 10100Sstevel@tonic-gate * spare). Toss in 100 magazines as a fudge factor in case this 10110Sstevel@tonic-gate * is live (the number "100" comes from the same fudge factor in 10120Sstevel@tonic-gate * crash(1M)). 10130Sstevel@tonic-gate */ 10140Sstevel@tonic-gate magmax = (cp->cache_full.ml_total + 2 * ncpus + 100) * magsize; 10150Sstevel@tonic-gate magbsize = offsetof(kmem_magazine_t, mag_round[magsize]); 10160Sstevel@tonic-gate 10170Sstevel@tonic-gate if (magbsize >= PAGESIZE / 2) { 10180Sstevel@tonic-gate mdb_warn("magazine size for cache %p unreasonable (%x)\n", 10190Sstevel@tonic-gate addr, magbsize); 10201528Sjwadams return (WALK_ERR); 10210Sstevel@tonic-gate } 10220Sstevel@tonic-gate 10230Sstevel@tonic-gate maglist = mdb_alloc(magmax * sizeof (void *), alloc_flags); 10240Sstevel@tonic-gate mp = mdb_alloc(magbsize, alloc_flags); 10250Sstevel@tonic-gate if (mp == NULL || maglist == NULL) 10260Sstevel@tonic-gate goto fail; 10270Sstevel@tonic-gate 10280Sstevel@tonic-gate /* 10290Sstevel@tonic-gate * First up: the magazines in the depot (i.e. on the cache_full list). 10300Sstevel@tonic-gate */ 10310Sstevel@tonic-gate for (kmp = cp->cache_full.ml_list; kmp != NULL; ) { 10320Sstevel@tonic-gate READMAG_ROUNDS(magsize); 10330Sstevel@tonic-gate kmp = mp->mag_next; 10340Sstevel@tonic-gate 10350Sstevel@tonic-gate if (kmp == cp->cache_full.ml_list) 10360Sstevel@tonic-gate break; /* cache_full list loop detected */ 10370Sstevel@tonic-gate } 10380Sstevel@tonic-gate 10390Sstevel@tonic-gate dprintf(("cache_full list done\n")); 10400Sstevel@tonic-gate 10410Sstevel@tonic-gate /* 10420Sstevel@tonic-gate * Now whip through the CPUs, snagging the loaded magazines 10430Sstevel@tonic-gate * and full spares. 10440Sstevel@tonic-gate */ 10450Sstevel@tonic-gate for (cpu = 0; cpu < ncpus; cpu++) { 10460Sstevel@tonic-gate kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu]; 10470Sstevel@tonic-gate 10480Sstevel@tonic-gate dprintf(("reading cpu cache %p\n", 10490Sstevel@tonic-gate (uintptr_t)ccp - (uintptr_t)cp + addr)); 10500Sstevel@tonic-gate 10510Sstevel@tonic-gate if (ccp->cc_rounds > 0 && 10520Sstevel@tonic-gate (kmp = ccp->cc_loaded) != NULL) { 10530Sstevel@tonic-gate dprintf(("reading %d loaded rounds\n", ccp->cc_rounds)); 10540Sstevel@tonic-gate READMAG_ROUNDS(ccp->cc_rounds); 10550Sstevel@tonic-gate } 10560Sstevel@tonic-gate 10570Sstevel@tonic-gate if (ccp->cc_prounds > 0 && 10580Sstevel@tonic-gate (kmp = ccp->cc_ploaded) != NULL) { 10590Sstevel@tonic-gate dprintf(("reading %d previously loaded rounds\n", 10600Sstevel@tonic-gate ccp->cc_prounds)); 10610Sstevel@tonic-gate READMAG_ROUNDS(ccp->cc_prounds); 10620Sstevel@tonic-gate } 10630Sstevel@tonic-gate } 10640Sstevel@tonic-gate 10650Sstevel@tonic-gate dprintf(("magazine layer: %d buffers\n", magcnt)); 10660Sstevel@tonic-gate 10670Sstevel@tonic-gate if (!(alloc_flags & UM_GC)) 10680Sstevel@tonic-gate mdb_free(mp, magbsize); 10690Sstevel@tonic-gate 10700Sstevel@tonic-gate *maglistp = maglist; 10710Sstevel@tonic-gate *magcntp = magcnt; 10720Sstevel@tonic-gate *magmaxp = magmax; 10730Sstevel@tonic-gate 10740Sstevel@tonic-gate return (WALK_NEXT); 10750Sstevel@tonic-gate 10760Sstevel@tonic-gate fail: 10770Sstevel@tonic-gate if (!(alloc_flags & UM_GC)) { 10780Sstevel@tonic-gate if (mp) 10790Sstevel@tonic-gate mdb_free(mp, magbsize); 10800Sstevel@tonic-gate if (maglist) 10810Sstevel@tonic-gate mdb_free(maglist, magmax * sizeof (void *)); 10820Sstevel@tonic-gate } 10830Sstevel@tonic-gate return (WALK_ERR); 10840Sstevel@tonic-gate } 10850Sstevel@tonic-gate 10860Sstevel@tonic-gate static int 10870Sstevel@tonic-gate kmem_walk_callback(mdb_walk_state_t *wsp, uintptr_t buf) 10880Sstevel@tonic-gate { 10890Sstevel@tonic-gate return (wsp->walk_callback(buf, NULL, wsp->walk_cbdata)); 10900Sstevel@tonic-gate } 10910Sstevel@tonic-gate 10920Sstevel@tonic-gate static int 10930Sstevel@tonic-gate bufctl_walk_callback(kmem_cache_t *cp, mdb_walk_state_t *wsp, uintptr_t buf) 10940Sstevel@tonic-gate { 10950Sstevel@tonic-gate kmem_bufctl_audit_t b; 10960Sstevel@tonic-gate 10970Sstevel@tonic-gate /* 10980Sstevel@tonic-gate * if KMF_AUDIT is not set, we know that we're looking at a 10990Sstevel@tonic-gate * kmem_bufctl_t. 11000Sstevel@tonic-gate */ 11010Sstevel@tonic-gate if (!(cp->cache_flags & KMF_AUDIT) || 11020Sstevel@tonic-gate mdb_vread(&b, sizeof (kmem_bufctl_audit_t), buf) == -1) { 11030Sstevel@tonic-gate (void) memset(&b, 0, sizeof (b)); 11040Sstevel@tonic-gate if (mdb_vread(&b, sizeof (kmem_bufctl_t), buf) == -1) { 11050Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", buf); 11060Sstevel@tonic-gate return (WALK_ERR); 11070Sstevel@tonic-gate } 11080Sstevel@tonic-gate } 11090Sstevel@tonic-gate 11100Sstevel@tonic-gate return (wsp->walk_callback(buf, &b, wsp->walk_cbdata)); 11110Sstevel@tonic-gate } 11120Sstevel@tonic-gate 11130Sstevel@tonic-gate typedef struct kmem_walk { 11140Sstevel@tonic-gate int kmw_type; 11150Sstevel@tonic-gate 11160Sstevel@tonic-gate int kmw_addr; /* cache address */ 11170Sstevel@tonic-gate kmem_cache_t *kmw_cp; 11180Sstevel@tonic-gate size_t kmw_csize; 11190Sstevel@tonic-gate 11200Sstevel@tonic-gate /* 11210Sstevel@tonic-gate * magazine layer 11220Sstevel@tonic-gate */ 11230Sstevel@tonic-gate void **kmw_maglist; 11240Sstevel@tonic-gate size_t kmw_max; 11250Sstevel@tonic-gate size_t kmw_count; 11260Sstevel@tonic-gate size_t kmw_pos; 11270Sstevel@tonic-gate 11280Sstevel@tonic-gate /* 11290Sstevel@tonic-gate * slab layer 11300Sstevel@tonic-gate */ 11310Sstevel@tonic-gate char *kmw_valid; /* to keep track of freed buffers */ 11320Sstevel@tonic-gate char *kmw_ubase; /* buffer for slab data */ 11330Sstevel@tonic-gate } kmem_walk_t; 11340Sstevel@tonic-gate 11350Sstevel@tonic-gate static int 11360Sstevel@tonic-gate kmem_walk_init_common(mdb_walk_state_t *wsp, int type) 11370Sstevel@tonic-gate { 11380Sstevel@tonic-gate kmem_walk_t *kmw; 11390Sstevel@tonic-gate int ncpus, csize; 11400Sstevel@tonic-gate kmem_cache_t *cp; 11411528Sjwadams size_t vm_quantum; 11420Sstevel@tonic-gate 11430Sstevel@tonic-gate size_t magmax, magcnt; 11440Sstevel@tonic-gate void **maglist = NULL; 11450Sstevel@tonic-gate uint_t chunksize, slabsize; 11460Sstevel@tonic-gate int status = WALK_ERR; 11470Sstevel@tonic-gate uintptr_t addr = wsp->walk_addr; 11480Sstevel@tonic-gate const char *layered; 11490Sstevel@tonic-gate 11500Sstevel@tonic-gate type &= ~KM_HASH; 11510Sstevel@tonic-gate 11520Sstevel@tonic-gate if (addr == NULL) { 11530Sstevel@tonic-gate mdb_warn("kmem walk doesn't support global walks\n"); 11540Sstevel@tonic-gate return (WALK_ERR); 11550Sstevel@tonic-gate } 11560Sstevel@tonic-gate 11570Sstevel@tonic-gate dprintf(("walking %p\n", addr)); 11580Sstevel@tonic-gate 11590Sstevel@tonic-gate /* 11600Sstevel@tonic-gate * First we need to figure out how many CPUs are configured in the 11610Sstevel@tonic-gate * system to know how much to slurp out. 11620Sstevel@tonic-gate */ 11630Sstevel@tonic-gate mdb_readvar(&ncpus, "max_ncpus"); 11640Sstevel@tonic-gate 11650Sstevel@tonic-gate csize = KMEM_CACHE_SIZE(ncpus); 11660Sstevel@tonic-gate cp = mdb_alloc(csize, UM_SLEEP); 11670Sstevel@tonic-gate 11680Sstevel@tonic-gate if (mdb_vread(cp, csize, addr) == -1) { 11690Sstevel@tonic-gate mdb_warn("couldn't read cache at addr %p", addr); 11700Sstevel@tonic-gate goto out2; 11710Sstevel@tonic-gate } 11720Sstevel@tonic-gate 11731528Sjwadams /* 11741528Sjwadams * It's easy for someone to hand us an invalid cache address. 11751528Sjwadams * Unfortunately, it is hard for this walker to survive an 11761528Sjwadams * invalid cache cleanly. So we make sure that: 11771528Sjwadams * 11781528Sjwadams * 1. the vmem arena for the cache is readable, 11791528Sjwadams * 2. the vmem arena's quantum is a power of 2, 11801528Sjwadams * 3. our slabsize is a multiple of the quantum, and 11811528Sjwadams * 4. our chunksize is >0 and less than our slabsize. 11821528Sjwadams */ 11831528Sjwadams if (mdb_vread(&vm_quantum, sizeof (vm_quantum), 11841528Sjwadams (uintptr_t)&cp->cache_arena->vm_quantum) == -1 || 11851528Sjwadams vm_quantum == 0 || 11861528Sjwadams (vm_quantum & (vm_quantum - 1)) != 0 || 11871528Sjwadams cp->cache_slabsize < vm_quantum || 11881528Sjwadams P2PHASE(cp->cache_slabsize, vm_quantum) != 0 || 11891528Sjwadams cp->cache_chunksize == 0 || 11901528Sjwadams cp->cache_chunksize > cp->cache_slabsize) { 11911528Sjwadams mdb_warn("%p is not a valid kmem_cache_t\n", addr); 11921528Sjwadams goto out2; 11931528Sjwadams } 11941528Sjwadams 11950Sstevel@tonic-gate dprintf(("buf total is %d\n", cp->cache_buftotal)); 11960Sstevel@tonic-gate 11970Sstevel@tonic-gate if (cp->cache_buftotal == 0) { 11980Sstevel@tonic-gate mdb_free(cp, csize); 11990Sstevel@tonic-gate return (WALK_DONE); 12000Sstevel@tonic-gate } 12010Sstevel@tonic-gate 12020Sstevel@tonic-gate /* 12030Sstevel@tonic-gate * If they ask for bufctls, but it's a small-slab cache, 12040Sstevel@tonic-gate * there is nothing to report. 12050Sstevel@tonic-gate */ 12060Sstevel@tonic-gate if ((type & KM_BUFCTL) && !(cp->cache_flags & KMF_HASH)) { 12070Sstevel@tonic-gate dprintf(("bufctl requested, not KMF_HASH (flags: %p)\n", 12080Sstevel@tonic-gate cp->cache_flags)); 12090Sstevel@tonic-gate mdb_free(cp, csize); 12100Sstevel@tonic-gate return (WALK_DONE); 12110Sstevel@tonic-gate } 12120Sstevel@tonic-gate 12130Sstevel@tonic-gate /* 12140Sstevel@tonic-gate * If they want constructed buffers, but there's no constructor or 12150Sstevel@tonic-gate * the cache has DEADBEEF checking enabled, there is nothing to report. 12160Sstevel@tonic-gate */ 12170Sstevel@tonic-gate if ((type & KM_CONSTRUCTED) && (!(type & KM_FREE) || 12180Sstevel@tonic-gate cp->cache_constructor == NULL || 12190Sstevel@tonic-gate (cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) == KMF_DEADBEEF)) { 12200Sstevel@tonic-gate mdb_free(cp, csize); 12210Sstevel@tonic-gate return (WALK_DONE); 12220Sstevel@tonic-gate } 12230Sstevel@tonic-gate 12240Sstevel@tonic-gate /* 12250Sstevel@tonic-gate * Read in the contents of the magazine layer 12260Sstevel@tonic-gate */ 12270Sstevel@tonic-gate if (kmem_read_magazines(cp, addr, ncpus, &maglist, &magcnt, 12280Sstevel@tonic-gate &magmax, UM_SLEEP) == WALK_ERR) 12290Sstevel@tonic-gate goto out2; 12300Sstevel@tonic-gate 12310Sstevel@tonic-gate /* 12320Sstevel@tonic-gate * We have all of the buffers from the magazines; if we are walking 12330Sstevel@tonic-gate * allocated buffers, sort them so we can bsearch them later. 12340Sstevel@tonic-gate */ 12350Sstevel@tonic-gate if (type & KM_ALLOCATED) 12360Sstevel@tonic-gate qsort(maglist, magcnt, sizeof (void *), addrcmp); 12370Sstevel@tonic-gate 12380Sstevel@tonic-gate wsp->walk_data = kmw = mdb_zalloc(sizeof (kmem_walk_t), UM_SLEEP); 12390Sstevel@tonic-gate 12400Sstevel@tonic-gate kmw->kmw_type = type; 12410Sstevel@tonic-gate kmw->kmw_addr = addr; 12420Sstevel@tonic-gate kmw->kmw_cp = cp; 12430Sstevel@tonic-gate kmw->kmw_csize = csize; 12440Sstevel@tonic-gate kmw->kmw_maglist = maglist; 12450Sstevel@tonic-gate kmw->kmw_max = magmax; 12460Sstevel@tonic-gate kmw->kmw_count = magcnt; 12470Sstevel@tonic-gate kmw->kmw_pos = 0; 12480Sstevel@tonic-gate 12490Sstevel@tonic-gate /* 12500Sstevel@tonic-gate * When walking allocated buffers in a KMF_HASH cache, we walk the 12510Sstevel@tonic-gate * hash table instead of the slab layer. 12520Sstevel@tonic-gate */ 12530Sstevel@tonic-gate if ((cp->cache_flags & KMF_HASH) && (type & KM_ALLOCATED)) { 12540Sstevel@tonic-gate layered = "kmem_hash"; 12550Sstevel@tonic-gate 12560Sstevel@tonic-gate kmw->kmw_type |= KM_HASH; 12570Sstevel@tonic-gate } else { 12580Sstevel@tonic-gate /* 12590Sstevel@tonic-gate * If we are walking freed buffers, we only need the 12600Sstevel@tonic-gate * magazine layer plus the partially allocated slabs. 12610Sstevel@tonic-gate * To walk allocated buffers, we need all of the slabs. 12620Sstevel@tonic-gate */ 12630Sstevel@tonic-gate if (type & KM_ALLOCATED) 12640Sstevel@tonic-gate layered = "kmem_slab"; 12650Sstevel@tonic-gate else 12660Sstevel@tonic-gate layered = "kmem_slab_partial"; 12670Sstevel@tonic-gate 12680Sstevel@tonic-gate /* 12690Sstevel@tonic-gate * for small-slab caches, we read in the entire slab. For 12700Sstevel@tonic-gate * freed buffers, we can just walk the freelist. For 12710Sstevel@tonic-gate * allocated buffers, we use a 'valid' array to track 12720Sstevel@tonic-gate * the freed buffers. 12730Sstevel@tonic-gate */ 12740Sstevel@tonic-gate if (!(cp->cache_flags & KMF_HASH)) { 12750Sstevel@tonic-gate chunksize = cp->cache_chunksize; 12760Sstevel@tonic-gate slabsize = cp->cache_slabsize; 12770Sstevel@tonic-gate 12780Sstevel@tonic-gate kmw->kmw_ubase = mdb_alloc(slabsize + 12790Sstevel@tonic-gate sizeof (kmem_bufctl_t), UM_SLEEP); 12800Sstevel@tonic-gate 12810Sstevel@tonic-gate if (type & KM_ALLOCATED) 12820Sstevel@tonic-gate kmw->kmw_valid = 12830Sstevel@tonic-gate mdb_alloc(slabsize / chunksize, UM_SLEEP); 12840Sstevel@tonic-gate } 12850Sstevel@tonic-gate } 12860Sstevel@tonic-gate 12870Sstevel@tonic-gate status = WALK_NEXT; 12880Sstevel@tonic-gate 12890Sstevel@tonic-gate if (mdb_layered_walk(layered, wsp) == -1) { 12900Sstevel@tonic-gate mdb_warn("unable to start layered '%s' walk", layered); 12910Sstevel@tonic-gate status = WALK_ERR; 12920Sstevel@tonic-gate } 12930Sstevel@tonic-gate 12940Sstevel@tonic-gate out1: 12950Sstevel@tonic-gate if (status == WALK_ERR) { 12960Sstevel@tonic-gate if (kmw->kmw_valid) 12970Sstevel@tonic-gate mdb_free(kmw->kmw_valid, slabsize / chunksize); 12980Sstevel@tonic-gate 12990Sstevel@tonic-gate if (kmw->kmw_ubase) 13000Sstevel@tonic-gate mdb_free(kmw->kmw_ubase, slabsize + 13010Sstevel@tonic-gate sizeof (kmem_bufctl_t)); 13020Sstevel@tonic-gate 13031528Sjwadams if (kmw->kmw_maglist) 13041528Sjwadams mdb_free(kmw->kmw_maglist, 13051528Sjwadams kmw->kmw_max * sizeof (uintptr_t)); 13061528Sjwadams 13070Sstevel@tonic-gate mdb_free(kmw, sizeof (kmem_walk_t)); 13080Sstevel@tonic-gate wsp->walk_data = NULL; 13090Sstevel@tonic-gate } 13100Sstevel@tonic-gate 13110Sstevel@tonic-gate out2: 13120Sstevel@tonic-gate if (status == WALK_ERR) 13130Sstevel@tonic-gate mdb_free(cp, csize); 13140Sstevel@tonic-gate 13150Sstevel@tonic-gate return (status); 13160Sstevel@tonic-gate } 13170Sstevel@tonic-gate 13180Sstevel@tonic-gate int 13190Sstevel@tonic-gate kmem_walk_step(mdb_walk_state_t *wsp) 13200Sstevel@tonic-gate { 13210Sstevel@tonic-gate kmem_walk_t *kmw = wsp->walk_data; 13220Sstevel@tonic-gate int type = kmw->kmw_type; 13230Sstevel@tonic-gate kmem_cache_t *cp = kmw->kmw_cp; 13240Sstevel@tonic-gate 13250Sstevel@tonic-gate void **maglist = kmw->kmw_maglist; 13260Sstevel@tonic-gate int magcnt = kmw->kmw_count; 13270Sstevel@tonic-gate 13280Sstevel@tonic-gate uintptr_t chunksize, slabsize; 13290Sstevel@tonic-gate uintptr_t addr; 13300Sstevel@tonic-gate const kmem_slab_t *sp; 13310Sstevel@tonic-gate const kmem_bufctl_t *bcp; 13320Sstevel@tonic-gate kmem_bufctl_t bc; 13330Sstevel@tonic-gate 13340Sstevel@tonic-gate int chunks; 13350Sstevel@tonic-gate char *kbase; 13360Sstevel@tonic-gate void *buf; 13370Sstevel@tonic-gate int i, ret; 13380Sstevel@tonic-gate 13390Sstevel@tonic-gate char *valid, *ubase; 13400Sstevel@tonic-gate 13410Sstevel@tonic-gate /* 13420Sstevel@tonic-gate * first, handle the 'kmem_hash' layered walk case 13430Sstevel@tonic-gate */ 13440Sstevel@tonic-gate if (type & KM_HASH) { 13450Sstevel@tonic-gate /* 13460Sstevel@tonic-gate * We have a buffer which has been allocated out of the 13470Sstevel@tonic-gate * global layer. We need to make sure that it's not 13480Sstevel@tonic-gate * actually sitting in a magazine before we report it as 13490Sstevel@tonic-gate * an allocated buffer. 13500Sstevel@tonic-gate */ 13510Sstevel@tonic-gate buf = ((const kmem_bufctl_t *)wsp->walk_layer)->bc_addr; 13520Sstevel@tonic-gate 13530Sstevel@tonic-gate if (magcnt > 0 && 13540Sstevel@tonic-gate bsearch(&buf, maglist, magcnt, sizeof (void *), 13550Sstevel@tonic-gate addrcmp) != NULL) 13560Sstevel@tonic-gate return (WALK_NEXT); 13570Sstevel@tonic-gate 13580Sstevel@tonic-gate if (type & KM_BUFCTL) 13590Sstevel@tonic-gate return (bufctl_walk_callback(cp, wsp, wsp->walk_addr)); 13600Sstevel@tonic-gate 13610Sstevel@tonic-gate return (kmem_walk_callback(wsp, (uintptr_t)buf)); 13620Sstevel@tonic-gate } 13630Sstevel@tonic-gate 13640Sstevel@tonic-gate ret = WALK_NEXT; 13650Sstevel@tonic-gate 13660Sstevel@tonic-gate addr = kmw->kmw_addr; 13670Sstevel@tonic-gate 13680Sstevel@tonic-gate /* 13690Sstevel@tonic-gate * If we're walking freed buffers, report everything in the 13700Sstevel@tonic-gate * magazine layer before processing the first slab. 13710Sstevel@tonic-gate */ 13720Sstevel@tonic-gate if ((type & KM_FREE) && magcnt != 0) { 13730Sstevel@tonic-gate kmw->kmw_count = 0; /* only do this once */ 13740Sstevel@tonic-gate for (i = 0; i < magcnt; i++) { 13750Sstevel@tonic-gate buf = maglist[i]; 13760Sstevel@tonic-gate 13770Sstevel@tonic-gate if (type & KM_BUFCTL) { 13780Sstevel@tonic-gate uintptr_t out; 13790Sstevel@tonic-gate 13800Sstevel@tonic-gate if (cp->cache_flags & KMF_BUFTAG) { 13810Sstevel@tonic-gate kmem_buftag_t *btp; 13820Sstevel@tonic-gate kmem_buftag_t tag; 13830Sstevel@tonic-gate 13840Sstevel@tonic-gate /* LINTED - alignment */ 13850Sstevel@tonic-gate btp = KMEM_BUFTAG(cp, buf); 13860Sstevel@tonic-gate if (mdb_vread(&tag, sizeof (tag), 13870Sstevel@tonic-gate (uintptr_t)btp) == -1) { 13880Sstevel@tonic-gate mdb_warn("reading buftag for " 13890Sstevel@tonic-gate "%p at %p", buf, btp); 13900Sstevel@tonic-gate continue; 13910Sstevel@tonic-gate } 13920Sstevel@tonic-gate out = (uintptr_t)tag.bt_bufctl; 13930Sstevel@tonic-gate } else { 13940Sstevel@tonic-gate if (kmem_hash_lookup(cp, addr, buf, 13950Sstevel@tonic-gate &out) == -1) 13960Sstevel@tonic-gate continue; 13970Sstevel@tonic-gate } 13980Sstevel@tonic-gate ret = bufctl_walk_callback(cp, wsp, out); 13990Sstevel@tonic-gate } else { 14000Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf); 14010Sstevel@tonic-gate } 14020Sstevel@tonic-gate 14030Sstevel@tonic-gate if (ret != WALK_NEXT) 14040Sstevel@tonic-gate return (ret); 14050Sstevel@tonic-gate } 14060Sstevel@tonic-gate } 14070Sstevel@tonic-gate 14080Sstevel@tonic-gate /* 14090Sstevel@tonic-gate * If they want constructed buffers, we're finished, since the 14100Sstevel@tonic-gate * magazine layer holds them all. 14110Sstevel@tonic-gate */ 14120Sstevel@tonic-gate if (type & KM_CONSTRUCTED) 14130Sstevel@tonic-gate return (WALK_DONE); 14140Sstevel@tonic-gate 14150Sstevel@tonic-gate /* 14160Sstevel@tonic-gate * Handle the buffers in the current slab 14170Sstevel@tonic-gate */ 14180Sstevel@tonic-gate chunksize = cp->cache_chunksize; 14190Sstevel@tonic-gate slabsize = cp->cache_slabsize; 14200Sstevel@tonic-gate 14210Sstevel@tonic-gate sp = wsp->walk_layer; 14220Sstevel@tonic-gate chunks = sp->slab_chunks; 14230Sstevel@tonic-gate kbase = sp->slab_base; 14240Sstevel@tonic-gate 14250Sstevel@tonic-gate dprintf(("kbase is %p\n", kbase)); 14260Sstevel@tonic-gate 14270Sstevel@tonic-gate if (!(cp->cache_flags & KMF_HASH)) { 14280Sstevel@tonic-gate valid = kmw->kmw_valid; 14290Sstevel@tonic-gate ubase = kmw->kmw_ubase; 14300Sstevel@tonic-gate 14310Sstevel@tonic-gate if (mdb_vread(ubase, chunks * chunksize, 14320Sstevel@tonic-gate (uintptr_t)kbase) == -1) { 14330Sstevel@tonic-gate mdb_warn("failed to read slab contents at %p", kbase); 14340Sstevel@tonic-gate return (WALK_ERR); 14350Sstevel@tonic-gate } 14360Sstevel@tonic-gate 14370Sstevel@tonic-gate /* 14380Sstevel@tonic-gate * Set up the valid map as fully allocated -- we'll punch 14390Sstevel@tonic-gate * out the freelist. 14400Sstevel@tonic-gate */ 14410Sstevel@tonic-gate if (type & KM_ALLOCATED) 14420Sstevel@tonic-gate (void) memset(valid, 1, chunks); 14430Sstevel@tonic-gate } else { 14440Sstevel@tonic-gate valid = NULL; 14450Sstevel@tonic-gate ubase = NULL; 14460Sstevel@tonic-gate } 14470Sstevel@tonic-gate 14480Sstevel@tonic-gate /* 14490Sstevel@tonic-gate * walk the slab's freelist 14500Sstevel@tonic-gate */ 14510Sstevel@tonic-gate bcp = sp->slab_head; 14520Sstevel@tonic-gate 14530Sstevel@tonic-gate dprintf(("refcnt is %d; chunks is %d\n", sp->slab_refcnt, chunks)); 14540Sstevel@tonic-gate 14550Sstevel@tonic-gate /* 14560Sstevel@tonic-gate * since we could be in the middle of allocating a buffer, 14570Sstevel@tonic-gate * our refcnt could be one higher than it aught. So we 14580Sstevel@tonic-gate * check one further on the freelist than the count allows. 14590Sstevel@tonic-gate */ 14600Sstevel@tonic-gate for (i = sp->slab_refcnt; i <= chunks; i++) { 14610Sstevel@tonic-gate uint_t ndx; 14620Sstevel@tonic-gate 14630Sstevel@tonic-gate dprintf(("bcp is %p\n", bcp)); 14640Sstevel@tonic-gate 14650Sstevel@tonic-gate if (bcp == NULL) { 14660Sstevel@tonic-gate if (i == chunks) 14670Sstevel@tonic-gate break; 14680Sstevel@tonic-gate mdb_warn( 14690Sstevel@tonic-gate "slab %p in cache %p freelist too short by %d\n", 14700Sstevel@tonic-gate sp, addr, chunks - i); 14710Sstevel@tonic-gate break; 14720Sstevel@tonic-gate } 14730Sstevel@tonic-gate 14740Sstevel@tonic-gate if (cp->cache_flags & KMF_HASH) { 14750Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), (uintptr_t)bcp) == -1) { 14760Sstevel@tonic-gate mdb_warn("failed to read bufctl ptr at %p", 14770Sstevel@tonic-gate bcp); 14780Sstevel@tonic-gate break; 14790Sstevel@tonic-gate } 14800Sstevel@tonic-gate buf = bc.bc_addr; 14810Sstevel@tonic-gate } else { 14820Sstevel@tonic-gate /* 14830Sstevel@tonic-gate * Otherwise the buffer is in the slab which 14840Sstevel@tonic-gate * we've read in; we just need to determine 14850Sstevel@tonic-gate * its offset in the slab to find the 14860Sstevel@tonic-gate * kmem_bufctl_t. 14870Sstevel@tonic-gate */ 14880Sstevel@tonic-gate bc = *((kmem_bufctl_t *) 14890Sstevel@tonic-gate ((uintptr_t)bcp - (uintptr_t)kbase + 14900Sstevel@tonic-gate (uintptr_t)ubase)); 14910Sstevel@tonic-gate 14920Sstevel@tonic-gate buf = KMEM_BUF(cp, bcp); 14930Sstevel@tonic-gate } 14940Sstevel@tonic-gate 14950Sstevel@tonic-gate ndx = ((uintptr_t)buf - (uintptr_t)kbase) / chunksize; 14960Sstevel@tonic-gate 14970Sstevel@tonic-gate if (ndx > slabsize / cp->cache_bufsize) { 14980Sstevel@tonic-gate /* 14990Sstevel@tonic-gate * This is very wrong; we have managed to find 15000Sstevel@tonic-gate * a buffer in the slab which shouldn't 15010Sstevel@tonic-gate * actually be here. Emit a warning, and 15020Sstevel@tonic-gate * try to continue. 15030Sstevel@tonic-gate */ 15040Sstevel@tonic-gate mdb_warn("buf %p is out of range for " 15050Sstevel@tonic-gate "slab %p, cache %p\n", buf, sp, addr); 15060Sstevel@tonic-gate } else if (type & KM_ALLOCATED) { 15070Sstevel@tonic-gate /* 15080Sstevel@tonic-gate * we have found a buffer on the slab's freelist; 15090Sstevel@tonic-gate * clear its entry 15100Sstevel@tonic-gate */ 15110Sstevel@tonic-gate valid[ndx] = 0; 15120Sstevel@tonic-gate } else { 15130Sstevel@tonic-gate /* 15140Sstevel@tonic-gate * Report this freed buffer 15150Sstevel@tonic-gate */ 15160Sstevel@tonic-gate if (type & KM_BUFCTL) { 15170Sstevel@tonic-gate ret = bufctl_walk_callback(cp, wsp, 15180Sstevel@tonic-gate (uintptr_t)bcp); 15190Sstevel@tonic-gate } else { 15200Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf); 15210Sstevel@tonic-gate } 15220Sstevel@tonic-gate if (ret != WALK_NEXT) 15230Sstevel@tonic-gate return (ret); 15240Sstevel@tonic-gate } 15250Sstevel@tonic-gate 15260Sstevel@tonic-gate bcp = bc.bc_next; 15270Sstevel@tonic-gate } 15280Sstevel@tonic-gate 15290Sstevel@tonic-gate if (bcp != NULL) { 15300Sstevel@tonic-gate dprintf(("slab %p in cache %p freelist too long (%p)\n", 15310Sstevel@tonic-gate sp, addr, bcp)); 15320Sstevel@tonic-gate } 15330Sstevel@tonic-gate 15340Sstevel@tonic-gate /* 15350Sstevel@tonic-gate * If we are walking freed buffers, the loop above handled reporting 15360Sstevel@tonic-gate * them. 15370Sstevel@tonic-gate */ 15380Sstevel@tonic-gate if (type & KM_FREE) 15390Sstevel@tonic-gate return (WALK_NEXT); 15400Sstevel@tonic-gate 15410Sstevel@tonic-gate if (type & KM_BUFCTL) { 15420Sstevel@tonic-gate mdb_warn("impossible situation: small-slab KM_BUFCTL walk for " 15430Sstevel@tonic-gate "cache %p\n", addr); 15440Sstevel@tonic-gate return (WALK_ERR); 15450Sstevel@tonic-gate } 15460Sstevel@tonic-gate 15470Sstevel@tonic-gate /* 15480Sstevel@tonic-gate * Report allocated buffers, skipping buffers in the magazine layer. 15490Sstevel@tonic-gate * We only get this far for small-slab caches. 15500Sstevel@tonic-gate */ 15510Sstevel@tonic-gate for (i = 0; ret == WALK_NEXT && i < chunks; i++) { 15520Sstevel@tonic-gate buf = (char *)kbase + i * chunksize; 15530Sstevel@tonic-gate 15540Sstevel@tonic-gate if (!valid[i]) 15550Sstevel@tonic-gate continue; /* on slab freelist */ 15560Sstevel@tonic-gate 15570Sstevel@tonic-gate if (magcnt > 0 && 15580Sstevel@tonic-gate bsearch(&buf, maglist, magcnt, sizeof (void *), 15590Sstevel@tonic-gate addrcmp) != NULL) 15600Sstevel@tonic-gate continue; /* in magazine layer */ 15610Sstevel@tonic-gate 15620Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf); 15630Sstevel@tonic-gate } 15640Sstevel@tonic-gate return (ret); 15650Sstevel@tonic-gate } 15660Sstevel@tonic-gate 15670Sstevel@tonic-gate void 15680Sstevel@tonic-gate kmem_walk_fini(mdb_walk_state_t *wsp) 15690Sstevel@tonic-gate { 15700Sstevel@tonic-gate kmem_walk_t *kmw = wsp->walk_data; 15710Sstevel@tonic-gate uintptr_t chunksize; 15720Sstevel@tonic-gate uintptr_t slabsize; 15730Sstevel@tonic-gate 15740Sstevel@tonic-gate if (kmw == NULL) 15750Sstevel@tonic-gate return; 15760Sstevel@tonic-gate 15770Sstevel@tonic-gate if (kmw->kmw_maglist != NULL) 15780Sstevel@tonic-gate mdb_free(kmw->kmw_maglist, kmw->kmw_max * sizeof (void *)); 15790Sstevel@tonic-gate 15800Sstevel@tonic-gate chunksize = kmw->kmw_cp->cache_chunksize; 15810Sstevel@tonic-gate slabsize = kmw->kmw_cp->cache_slabsize; 15820Sstevel@tonic-gate 15830Sstevel@tonic-gate if (kmw->kmw_valid != NULL) 15840Sstevel@tonic-gate mdb_free(kmw->kmw_valid, slabsize / chunksize); 15850Sstevel@tonic-gate if (kmw->kmw_ubase != NULL) 15860Sstevel@tonic-gate mdb_free(kmw->kmw_ubase, slabsize + sizeof (kmem_bufctl_t)); 15870Sstevel@tonic-gate 15880Sstevel@tonic-gate mdb_free(kmw->kmw_cp, kmw->kmw_csize); 15890Sstevel@tonic-gate mdb_free(kmw, sizeof (kmem_walk_t)); 15900Sstevel@tonic-gate } 15910Sstevel@tonic-gate 15920Sstevel@tonic-gate /*ARGSUSED*/ 15930Sstevel@tonic-gate static int 15940Sstevel@tonic-gate kmem_walk_all(uintptr_t addr, const kmem_cache_t *c, mdb_walk_state_t *wsp) 15950Sstevel@tonic-gate { 15960Sstevel@tonic-gate /* 15970Sstevel@tonic-gate * Buffers allocated from NOTOUCH caches can also show up as freed 15980Sstevel@tonic-gate * memory in other caches. This can be a little confusing, so we 15990Sstevel@tonic-gate * don't walk NOTOUCH caches when walking all caches (thereby assuring 16000Sstevel@tonic-gate * that "::walk kmem" and "::walk freemem" yield disjoint output). 16010Sstevel@tonic-gate */ 16020Sstevel@tonic-gate if (c->cache_cflags & KMC_NOTOUCH) 16030Sstevel@tonic-gate return (WALK_NEXT); 16040Sstevel@tonic-gate 16050Sstevel@tonic-gate if (mdb_pwalk(wsp->walk_data, wsp->walk_callback, 16060Sstevel@tonic-gate wsp->walk_cbdata, addr) == -1) 16070Sstevel@tonic-gate return (WALK_DONE); 16080Sstevel@tonic-gate 16090Sstevel@tonic-gate return (WALK_NEXT); 16100Sstevel@tonic-gate } 16110Sstevel@tonic-gate 16120Sstevel@tonic-gate #define KMEM_WALK_ALL(name, wsp) { \ 16130Sstevel@tonic-gate wsp->walk_data = (name); \ 16140Sstevel@tonic-gate if (mdb_walk("kmem_cache", (mdb_walk_cb_t)kmem_walk_all, wsp) == -1) \ 16150Sstevel@tonic-gate return (WALK_ERR); \ 16160Sstevel@tonic-gate return (WALK_DONE); \ 16170Sstevel@tonic-gate } 16180Sstevel@tonic-gate 16190Sstevel@tonic-gate int 16200Sstevel@tonic-gate kmem_walk_init(mdb_walk_state_t *wsp) 16210Sstevel@tonic-gate { 16220Sstevel@tonic-gate if (wsp->walk_arg != NULL) 16230Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)wsp->walk_arg; 16240Sstevel@tonic-gate 16250Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16260Sstevel@tonic-gate KMEM_WALK_ALL("kmem", wsp); 16270Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_ALLOCATED)); 16280Sstevel@tonic-gate } 16290Sstevel@tonic-gate 16300Sstevel@tonic-gate int 16310Sstevel@tonic-gate bufctl_walk_init(mdb_walk_state_t *wsp) 16320Sstevel@tonic-gate { 16330Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16340Sstevel@tonic-gate KMEM_WALK_ALL("bufctl", wsp); 16350Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_ALLOCATED | KM_BUFCTL)); 16360Sstevel@tonic-gate } 16370Sstevel@tonic-gate 16380Sstevel@tonic-gate int 16390Sstevel@tonic-gate freemem_walk_init(mdb_walk_state_t *wsp) 16400Sstevel@tonic-gate { 16410Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16420Sstevel@tonic-gate KMEM_WALK_ALL("freemem", wsp); 16430Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE)); 16440Sstevel@tonic-gate } 16450Sstevel@tonic-gate 16460Sstevel@tonic-gate int 16470Sstevel@tonic-gate freemem_constructed_walk_init(mdb_walk_state_t *wsp) 16480Sstevel@tonic-gate { 16490Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16500Sstevel@tonic-gate KMEM_WALK_ALL("freemem_constructed", wsp); 16510Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE | KM_CONSTRUCTED)); 16520Sstevel@tonic-gate } 16530Sstevel@tonic-gate 16540Sstevel@tonic-gate int 16550Sstevel@tonic-gate freectl_walk_init(mdb_walk_state_t *wsp) 16560Sstevel@tonic-gate { 16570Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16580Sstevel@tonic-gate KMEM_WALK_ALL("freectl", wsp); 16590Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE | KM_BUFCTL)); 16600Sstevel@tonic-gate } 16610Sstevel@tonic-gate 16620Sstevel@tonic-gate int 16630Sstevel@tonic-gate freectl_constructed_walk_init(mdb_walk_state_t *wsp) 16640Sstevel@tonic-gate { 16650Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16660Sstevel@tonic-gate KMEM_WALK_ALL("freectl_constructed", wsp); 16670Sstevel@tonic-gate return (kmem_walk_init_common(wsp, 16680Sstevel@tonic-gate KM_FREE | KM_BUFCTL | KM_CONSTRUCTED)); 16690Sstevel@tonic-gate } 16700Sstevel@tonic-gate 16710Sstevel@tonic-gate typedef struct bufctl_history_walk { 16720Sstevel@tonic-gate void *bhw_next; 16730Sstevel@tonic-gate kmem_cache_t *bhw_cache; 16740Sstevel@tonic-gate kmem_slab_t *bhw_slab; 16750Sstevel@tonic-gate hrtime_t bhw_timestamp; 16760Sstevel@tonic-gate } bufctl_history_walk_t; 16770Sstevel@tonic-gate 16780Sstevel@tonic-gate int 16790Sstevel@tonic-gate bufctl_history_walk_init(mdb_walk_state_t *wsp) 16800Sstevel@tonic-gate { 16810Sstevel@tonic-gate bufctl_history_walk_t *bhw; 16820Sstevel@tonic-gate kmem_bufctl_audit_t bc; 16830Sstevel@tonic-gate kmem_bufctl_audit_t bcn; 16840Sstevel@tonic-gate 16850Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 16860Sstevel@tonic-gate mdb_warn("bufctl_history walk doesn't support global walks\n"); 16870Sstevel@tonic-gate return (WALK_ERR); 16880Sstevel@tonic-gate } 16890Sstevel@tonic-gate 16900Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), wsp->walk_addr) == -1) { 16910Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", wsp->walk_addr); 16920Sstevel@tonic-gate return (WALK_ERR); 16930Sstevel@tonic-gate } 16940Sstevel@tonic-gate 16950Sstevel@tonic-gate bhw = mdb_zalloc(sizeof (*bhw), UM_SLEEP); 16960Sstevel@tonic-gate bhw->bhw_timestamp = 0; 16970Sstevel@tonic-gate bhw->bhw_cache = bc.bc_cache; 16980Sstevel@tonic-gate bhw->bhw_slab = bc.bc_slab; 16990Sstevel@tonic-gate 17000Sstevel@tonic-gate /* 17010Sstevel@tonic-gate * sometimes the first log entry matches the base bufctl; in that 17020Sstevel@tonic-gate * case, skip the base bufctl. 17030Sstevel@tonic-gate */ 17040Sstevel@tonic-gate if (bc.bc_lastlog != NULL && 17050Sstevel@tonic-gate mdb_vread(&bcn, sizeof (bcn), (uintptr_t)bc.bc_lastlog) != -1 && 17060Sstevel@tonic-gate bc.bc_addr == bcn.bc_addr && 17070Sstevel@tonic-gate bc.bc_cache == bcn.bc_cache && 17080Sstevel@tonic-gate bc.bc_slab == bcn.bc_slab && 17090Sstevel@tonic-gate bc.bc_timestamp == bcn.bc_timestamp && 17100Sstevel@tonic-gate bc.bc_thread == bcn.bc_thread) 17110Sstevel@tonic-gate bhw->bhw_next = bc.bc_lastlog; 17120Sstevel@tonic-gate else 17130Sstevel@tonic-gate bhw->bhw_next = (void *)wsp->walk_addr; 17140Sstevel@tonic-gate 17150Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)bc.bc_addr; 17160Sstevel@tonic-gate wsp->walk_data = bhw; 17170Sstevel@tonic-gate 17180Sstevel@tonic-gate return (WALK_NEXT); 17190Sstevel@tonic-gate } 17200Sstevel@tonic-gate 17210Sstevel@tonic-gate int 17220Sstevel@tonic-gate bufctl_history_walk_step(mdb_walk_state_t *wsp) 17230Sstevel@tonic-gate { 17240Sstevel@tonic-gate bufctl_history_walk_t *bhw = wsp->walk_data; 17250Sstevel@tonic-gate uintptr_t addr = (uintptr_t)bhw->bhw_next; 17260Sstevel@tonic-gate uintptr_t baseaddr = wsp->walk_addr; 17270Sstevel@tonic-gate kmem_bufctl_audit_t bc; 17280Sstevel@tonic-gate 17290Sstevel@tonic-gate if (addr == NULL) 17300Sstevel@tonic-gate return (WALK_DONE); 17310Sstevel@tonic-gate 17320Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) { 17330Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", bhw->bhw_next); 17340Sstevel@tonic-gate return (WALK_ERR); 17350Sstevel@tonic-gate } 17360Sstevel@tonic-gate 17370Sstevel@tonic-gate /* 17380Sstevel@tonic-gate * The bufctl is only valid if the address, cache, and slab are 17390Sstevel@tonic-gate * correct. We also check that the timestamp is decreasing, to 17400Sstevel@tonic-gate * prevent infinite loops. 17410Sstevel@tonic-gate */ 17420Sstevel@tonic-gate if ((uintptr_t)bc.bc_addr != baseaddr || 17430Sstevel@tonic-gate bc.bc_cache != bhw->bhw_cache || 17440Sstevel@tonic-gate bc.bc_slab != bhw->bhw_slab || 17450Sstevel@tonic-gate (bhw->bhw_timestamp != 0 && bc.bc_timestamp >= bhw->bhw_timestamp)) 17460Sstevel@tonic-gate return (WALK_DONE); 17470Sstevel@tonic-gate 17480Sstevel@tonic-gate bhw->bhw_next = bc.bc_lastlog; 17490Sstevel@tonic-gate bhw->bhw_timestamp = bc.bc_timestamp; 17500Sstevel@tonic-gate 17510Sstevel@tonic-gate return (wsp->walk_callback(addr, &bc, wsp->walk_cbdata)); 17520Sstevel@tonic-gate } 17530Sstevel@tonic-gate 17540Sstevel@tonic-gate void 17550Sstevel@tonic-gate bufctl_history_walk_fini(mdb_walk_state_t *wsp) 17560Sstevel@tonic-gate { 17570Sstevel@tonic-gate bufctl_history_walk_t *bhw = wsp->walk_data; 17580Sstevel@tonic-gate 17590Sstevel@tonic-gate mdb_free(bhw, sizeof (*bhw)); 17600Sstevel@tonic-gate } 17610Sstevel@tonic-gate 17620Sstevel@tonic-gate typedef struct kmem_log_walk { 17630Sstevel@tonic-gate kmem_bufctl_audit_t *klw_base; 17640Sstevel@tonic-gate kmem_bufctl_audit_t **klw_sorted; 17650Sstevel@tonic-gate kmem_log_header_t klw_lh; 17660Sstevel@tonic-gate size_t klw_size; 17670Sstevel@tonic-gate size_t klw_maxndx; 17680Sstevel@tonic-gate size_t klw_ndx; 17690Sstevel@tonic-gate } kmem_log_walk_t; 17700Sstevel@tonic-gate 17710Sstevel@tonic-gate int 17720Sstevel@tonic-gate kmem_log_walk_init(mdb_walk_state_t *wsp) 17730Sstevel@tonic-gate { 17740Sstevel@tonic-gate uintptr_t lp = wsp->walk_addr; 17750Sstevel@tonic-gate kmem_log_walk_t *klw; 17760Sstevel@tonic-gate kmem_log_header_t *lhp; 17770Sstevel@tonic-gate int maxndx, i, j, k; 17780Sstevel@tonic-gate 17790Sstevel@tonic-gate /* 17800Sstevel@tonic-gate * By default (global walk), walk the kmem_transaction_log. Otherwise 17810Sstevel@tonic-gate * read the log whose kmem_log_header_t is stored at walk_addr. 17820Sstevel@tonic-gate */ 17830Sstevel@tonic-gate if (lp == NULL && mdb_readvar(&lp, "kmem_transaction_log") == -1) { 17840Sstevel@tonic-gate mdb_warn("failed to read 'kmem_transaction_log'"); 17850Sstevel@tonic-gate return (WALK_ERR); 17860Sstevel@tonic-gate } 17870Sstevel@tonic-gate 17880Sstevel@tonic-gate if (lp == NULL) { 17890Sstevel@tonic-gate mdb_warn("log is disabled\n"); 17900Sstevel@tonic-gate return (WALK_ERR); 17910Sstevel@tonic-gate } 17920Sstevel@tonic-gate 17930Sstevel@tonic-gate klw = mdb_zalloc(sizeof (kmem_log_walk_t), UM_SLEEP); 17940Sstevel@tonic-gate lhp = &klw->klw_lh; 17950Sstevel@tonic-gate 17960Sstevel@tonic-gate if (mdb_vread(lhp, sizeof (kmem_log_header_t), lp) == -1) { 17970Sstevel@tonic-gate mdb_warn("failed to read log header at %p", lp); 17980Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t)); 17990Sstevel@tonic-gate return (WALK_ERR); 18000Sstevel@tonic-gate } 18010Sstevel@tonic-gate 18020Sstevel@tonic-gate klw->klw_size = lhp->lh_chunksize * lhp->lh_nchunks; 18030Sstevel@tonic-gate klw->klw_base = mdb_alloc(klw->klw_size, UM_SLEEP); 18040Sstevel@tonic-gate maxndx = lhp->lh_chunksize / sizeof (kmem_bufctl_audit_t) - 1; 18050Sstevel@tonic-gate 18060Sstevel@tonic-gate if (mdb_vread(klw->klw_base, klw->klw_size, 18070Sstevel@tonic-gate (uintptr_t)lhp->lh_base) == -1) { 18080Sstevel@tonic-gate mdb_warn("failed to read log at base %p", lhp->lh_base); 18090Sstevel@tonic-gate mdb_free(klw->klw_base, klw->klw_size); 18100Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t)); 18110Sstevel@tonic-gate return (WALK_ERR); 18120Sstevel@tonic-gate } 18130Sstevel@tonic-gate 18140Sstevel@tonic-gate klw->klw_sorted = mdb_alloc(maxndx * lhp->lh_nchunks * 18150Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t *), UM_SLEEP); 18160Sstevel@tonic-gate 18170Sstevel@tonic-gate for (i = 0, k = 0; i < lhp->lh_nchunks; i++) { 18180Sstevel@tonic-gate kmem_bufctl_audit_t *chunk = (kmem_bufctl_audit_t *) 18190Sstevel@tonic-gate ((uintptr_t)klw->klw_base + i * lhp->lh_chunksize); 18200Sstevel@tonic-gate 18210Sstevel@tonic-gate for (j = 0; j < maxndx; j++) 18220Sstevel@tonic-gate klw->klw_sorted[k++] = &chunk[j]; 18230Sstevel@tonic-gate } 18240Sstevel@tonic-gate 18250Sstevel@tonic-gate qsort(klw->klw_sorted, k, sizeof (kmem_bufctl_audit_t *), 18260Sstevel@tonic-gate (int(*)(const void *, const void *))bufctlcmp); 18270Sstevel@tonic-gate 18280Sstevel@tonic-gate klw->klw_maxndx = k; 18290Sstevel@tonic-gate wsp->walk_data = klw; 18300Sstevel@tonic-gate 18310Sstevel@tonic-gate return (WALK_NEXT); 18320Sstevel@tonic-gate } 18330Sstevel@tonic-gate 18340Sstevel@tonic-gate int 18350Sstevel@tonic-gate kmem_log_walk_step(mdb_walk_state_t *wsp) 18360Sstevel@tonic-gate { 18370Sstevel@tonic-gate kmem_log_walk_t *klw = wsp->walk_data; 18380Sstevel@tonic-gate kmem_bufctl_audit_t *bcp; 18390Sstevel@tonic-gate 18400Sstevel@tonic-gate if (klw->klw_ndx == klw->klw_maxndx) 18410Sstevel@tonic-gate return (WALK_DONE); 18420Sstevel@tonic-gate 18430Sstevel@tonic-gate bcp = klw->klw_sorted[klw->klw_ndx++]; 18440Sstevel@tonic-gate 18450Sstevel@tonic-gate return (wsp->walk_callback((uintptr_t)bcp - (uintptr_t)klw->klw_base + 18460Sstevel@tonic-gate (uintptr_t)klw->klw_lh.lh_base, bcp, wsp->walk_cbdata)); 18470Sstevel@tonic-gate } 18480Sstevel@tonic-gate 18490Sstevel@tonic-gate void 18500Sstevel@tonic-gate kmem_log_walk_fini(mdb_walk_state_t *wsp) 18510Sstevel@tonic-gate { 18520Sstevel@tonic-gate kmem_log_walk_t *klw = wsp->walk_data; 18530Sstevel@tonic-gate 18540Sstevel@tonic-gate mdb_free(klw->klw_base, klw->klw_size); 18550Sstevel@tonic-gate mdb_free(klw->klw_sorted, klw->klw_maxndx * 18560Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t *)); 18570Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t)); 18580Sstevel@tonic-gate } 18590Sstevel@tonic-gate 18600Sstevel@tonic-gate typedef struct allocdby_bufctl { 18610Sstevel@tonic-gate uintptr_t abb_addr; 18620Sstevel@tonic-gate hrtime_t abb_ts; 18630Sstevel@tonic-gate } allocdby_bufctl_t; 18640Sstevel@tonic-gate 18650Sstevel@tonic-gate typedef struct allocdby_walk { 18660Sstevel@tonic-gate const char *abw_walk; 18670Sstevel@tonic-gate uintptr_t abw_thread; 18680Sstevel@tonic-gate size_t abw_nbufs; 18690Sstevel@tonic-gate size_t abw_size; 18700Sstevel@tonic-gate allocdby_bufctl_t *abw_buf; 18710Sstevel@tonic-gate size_t abw_ndx; 18720Sstevel@tonic-gate } allocdby_walk_t; 18730Sstevel@tonic-gate 18740Sstevel@tonic-gate int 18750Sstevel@tonic-gate allocdby_walk_bufctl(uintptr_t addr, const kmem_bufctl_audit_t *bcp, 18760Sstevel@tonic-gate allocdby_walk_t *abw) 18770Sstevel@tonic-gate { 18780Sstevel@tonic-gate if ((uintptr_t)bcp->bc_thread != abw->abw_thread) 18790Sstevel@tonic-gate return (WALK_NEXT); 18800Sstevel@tonic-gate 18810Sstevel@tonic-gate if (abw->abw_nbufs == abw->abw_size) { 18820Sstevel@tonic-gate allocdby_bufctl_t *buf; 18830Sstevel@tonic-gate size_t oldsize = sizeof (allocdby_bufctl_t) * abw->abw_size; 18840Sstevel@tonic-gate 18850Sstevel@tonic-gate buf = mdb_zalloc(oldsize << 1, UM_SLEEP); 18860Sstevel@tonic-gate 18870Sstevel@tonic-gate bcopy(abw->abw_buf, buf, oldsize); 18880Sstevel@tonic-gate mdb_free(abw->abw_buf, oldsize); 18890Sstevel@tonic-gate 18900Sstevel@tonic-gate abw->abw_size <<= 1; 18910Sstevel@tonic-gate abw->abw_buf = buf; 18920Sstevel@tonic-gate } 18930Sstevel@tonic-gate 18940Sstevel@tonic-gate abw->abw_buf[abw->abw_nbufs].abb_addr = addr; 18950Sstevel@tonic-gate abw->abw_buf[abw->abw_nbufs].abb_ts = bcp->bc_timestamp; 18960Sstevel@tonic-gate abw->abw_nbufs++; 18970Sstevel@tonic-gate 18980Sstevel@tonic-gate return (WALK_NEXT); 18990Sstevel@tonic-gate } 19000Sstevel@tonic-gate 19010Sstevel@tonic-gate /*ARGSUSED*/ 19020Sstevel@tonic-gate int 19030Sstevel@tonic-gate allocdby_walk_cache(uintptr_t addr, const kmem_cache_t *c, allocdby_walk_t *abw) 19040Sstevel@tonic-gate { 19050Sstevel@tonic-gate if (mdb_pwalk(abw->abw_walk, (mdb_walk_cb_t)allocdby_walk_bufctl, 19060Sstevel@tonic-gate abw, addr) == -1) { 19070Sstevel@tonic-gate mdb_warn("couldn't walk bufctl for cache %p", addr); 19080Sstevel@tonic-gate return (WALK_DONE); 19090Sstevel@tonic-gate } 19100Sstevel@tonic-gate 19110Sstevel@tonic-gate return (WALK_NEXT); 19120Sstevel@tonic-gate } 19130Sstevel@tonic-gate 19140Sstevel@tonic-gate static int 19150Sstevel@tonic-gate allocdby_cmp(const allocdby_bufctl_t *lhs, const allocdby_bufctl_t *rhs) 19160Sstevel@tonic-gate { 19170Sstevel@tonic-gate if (lhs->abb_ts < rhs->abb_ts) 19180Sstevel@tonic-gate return (1); 19190Sstevel@tonic-gate if (lhs->abb_ts > rhs->abb_ts) 19200Sstevel@tonic-gate return (-1); 19210Sstevel@tonic-gate return (0); 19220Sstevel@tonic-gate } 19230Sstevel@tonic-gate 19240Sstevel@tonic-gate static int 19250Sstevel@tonic-gate allocdby_walk_init_common(mdb_walk_state_t *wsp, const char *walk) 19260Sstevel@tonic-gate { 19270Sstevel@tonic-gate allocdby_walk_t *abw; 19280Sstevel@tonic-gate 19290Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 19300Sstevel@tonic-gate mdb_warn("allocdby walk doesn't support global walks\n"); 19310Sstevel@tonic-gate return (WALK_ERR); 19320Sstevel@tonic-gate } 19330Sstevel@tonic-gate 19340Sstevel@tonic-gate abw = mdb_zalloc(sizeof (allocdby_walk_t), UM_SLEEP); 19350Sstevel@tonic-gate 19360Sstevel@tonic-gate abw->abw_thread = wsp->walk_addr; 19370Sstevel@tonic-gate abw->abw_walk = walk; 19380Sstevel@tonic-gate abw->abw_size = 128; /* something reasonable */ 19390Sstevel@tonic-gate abw->abw_buf = 19400Sstevel@tonic-gate mdb_zalloc(abw->abw_size * sizeof (allocdby_bufctl_t), UM_SLEEP); 19410Sstevel@tonic-gate 19420Sstevel@tonic-gate wsp->walk_data = abw; 19430Sstevel@tonic-gate 19440Sstevel@tonic-gate if (mdb_walk("kmem_cache", 19450Sstevel@tonic-gate (mdb_walk_cb_t)allocdby_walk_cache, abw) == -1) { 19460Sstevel@tonic-gate mdb_warn("couldn't walk kmem_cache"); 19470Sstevel@tonic-gate allocdby_walk_fini(wsp); 19480Sstevel@tonic-gate return (WALK_ERR); 19490Sstevel@tonic-gate } 19500Sstevel@tonic-gate 19510Sstevel@tonic-gate qsort(abw->abw_buf, abw->abw_nbufs, sizeof (allocdby_bufctl_t), 19520Sstevel@tonic-gate (int(*)(const void *, const void *))allocdby_cmp); 19530Sstevel@tonic-gate 19540Sstevel@tonic-gate return (WALK_NEXT); 19550Sstevel@tonic-gate } 19560Sstevel@tonic-gate 19570Sstevel@tonic-gate int 19580Sstevel@tonic-gate allocdby_walk_init(mdb_walk_state_t *wsp) 19590Sstevel@tonic-gate { 19600Sstevel@tonic-gate return (allocdby_walk_init_common(wsp, "bufctl")); 19610Sstevel@tonic-gate } 19620Sstevel@tonic-gate 19630Sstevel@tonic-gate int 19640Sstevel@tonic-gate freedby_walk_init(mdb_walk_state_t *wsp) 19650Sstevel@tonic-gate { 19660Sstevel@tonic-gate return (allocdby_walk_init_common(wsp, "freectl")); 19670Sstevel@tonic-gate } 19680Sstevel@tonic-gate 19690Sstevel@tonic-gate int 19700Sstevel@tonic-gate allocdby_walk_step(mdb_walk_state_t *wsp) 19710Sstevel@tonic-gate { 19720Sstevel@tonic-gate allocdby_walk_t *abw = wsp->walk_data; 19730Sstevel@tonic-gate kmem_bufctl_audit_t bc; 19740Sstevel@tonic-gate uintptr_t addr; 19750Sstevel@tonic-gate 19760Sstevel@tonic-gate if (abw->abw_ndx == abw->abw_nbufs) 19770Sstevel@tonic-gate return (WALK_DONE); 19780Sstevel@tonic-gate 19790Sstevel@tonic-gate addr = abw->abw_buf[abw->abw_ndx++].abb_addr; 19800Sstevel@tonic-gate 19810Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) { 19820Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr); 19830Sstevel@tonic-gate return (WALK_DONE); 19840Sstevel@tonic-gate } 19850Sstevel@tonic-gate 19860Sstevel@tonic-gate return (wsp->walk_callback(addr, &bc, wsp->walk_cbdata)); 19870Sstevel@tonic-gate } 19880Sstevel@tonic-gate 19890Sstevel@tonic-gate void 19900Sstevel@tonic-gate allocdby_walk_fini(mdb_walk_state_t *wsp) 19910Sstevel@tonic-gate { 19920Sstevel@tonic-gate allocdby_walk_t *abw = wsp->walk_data; 19930Sstevel@tonic-gate 19940Sstevel@tonic-gate mdb_free(abw->abw_buf, sizeof (allocdby_bufctl_t) * abw->abw_size); 19950Sstevel@tonic-gate mdb_free(abw, sizeof (allocdby_walk_t)); 19960Sstevel@tonic-gate } 19970Sstevel@tonic-gate 19980Sstevel@tonic-gate /*ARGSUSED*/ 19990Sstevel@tonic-gate int 20000Sstevel@tonic-gate allocdby_walk(uintptr_t addr, const kmem_bufctl_audit_t *bcp, void *ignored) 20010Sstevel@tonic-gate { 20020Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 20030Sstevel@tonic-gate GElf_Sym sym; 20040Sstevel@tonic-gate int i; 20050Sstevel@tonic-gate 20060Sstevel@tonic-gate mdb_printf("%0?p %12llx ", addr, bcp->bc_timestamp); 20070Sstevel@tonic-gate for (i = 0; i < bcp->bc_depth; i++) { 20080Sstevel@tonic-gate if (mdb_lookup_by_addr(bcp->bc_stack[i], 20090Sstevel@tonic-gate MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1) 20100Sstevel@tonic-gate continue; 20110Sstevel@tonic-gate if (strncmp(c, "kmem_", 5) == 0) 20120Sstevel@tonic-gate continue; 20130Sstevel@tonic-gate mdb_printf("%s+0x%lx", 20140Sstevel@tonic-gate c, bcp->bc_stack[i] - (uintptr_t)sym.st_value); 20150Sstevel@tonic-gate break; 20160Sstevel@tonic-gate } 20170Sstevel@tonic-gate mdb_printf("\n"); 20180Sstevel@tonic-gate 20190Sstevel@tonic-gate return (WALK_NEXT); 20200Sstevel@tonic-gate } 20210Sstevel@tonic-gate 20220Sstevel@tonic-gate static int 20230Sstevel@tonic-gate allocdby_common(uintptr_t addr, uint_t flags, const char *w) 20240Sstevel@tonic-gate { 20250Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 20260Sstevel@tonic-gate return (DCMD_USAGE); 20270Sstevel@tonic-gate 20280Sstevel@tonic-gate mdb_printf("%-?s %12s %s\n", "BUFCTL", "TIMESTAMP", "CALLER"); 20290Sstevel@tonic-gate 20300Sstevel@tonic-gate if (mdb_pwalk(w, (mdb_walk_cb_t)allocdby_walk, NULL, addr) == -1) { 20310Sstevel@tonic-gate mdb_warn("can't walk '%s' for %p", w, addr); 20320Sstevel@tonic-gate return (DCMD_ERR); 20330Sstevel@tonic-gate } 20340Sstevel@tonic-gate 20350Sstevel@tonic-gate return (DCMD_OK); 20360Sstevel@tonic-gate } 20370Sstevel@tonic-gate 20380Sstevel@tonic-gate /*ARGSUSED*/ 20390Sstevel@tonic-gate int 20400Sstevel@tonic-gate allocdby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 20410Sstevel@tonic-gate { 20420Sstevel@tonic-gate return (allocdby_common(addr, flags, "allocdby")); 20430Sstevel@tonic-gate } 20440Sstevel@tonic-gate 20450Sstevel@tonic-gate /*ARGSUSED*/ 20460Sstevel@tonic-gate int 20470Sstevel@tonic-gate freedby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 20480Sstevel@tonic-gate { 20490Sstevel@tonic-gate return (allocdby_common(addr, flags, "freedby")); 20500Sstevel@tonic-gate } 20510Sstevel@tonic-gate 20520Sstevel@tonic-gate /* 20530Sstevel@tonic-gate * Return a string describing the address in relation to the given thread's 20540Sstevel@tonic-gate * stack. 20550Sstevel@tonic-gate * 20560Sstevel@tonic-gate * - If the thread state is TS_FREE, return " (inactive interrupt thread)". 20570Sstevel@tonic-gate * 20580Sstevel@tonic-gate * - If the address is above the stack pointer, return an empty string 20590Sstevel@tonic-gate * signifying that the address is active. 20600Sstevel@tonic-gate * 20610Sstevel@tonic-gate * - If the address is below the stack pointer, and the thread is not on proc, 20620Sstevel@tonic-gate * return " (below sp)". 20630Sstevel@tonic-gate * 20640Sstevel@tonic-gate * - If the address is below the stack pointer, and the thread is on proc, 20650Sstevel@tonic-gate * return " (possibly below sp)". Depending on context, we may or may not 20660Sstevel@tonic-gate * have an accurate t_sp. 20670Sstevel@tonic-gate */ 20680Sstevel@tonic-gate static const char * 20690Sstevel@tonic-gate stack_active(const kthread_t *t, uintptr_t addr) 20700Sstevel@tonic-gate { 20710Sstevel@tonic-gate uintptr_t panicstk; 20720Sstevel@tonic-gate GElf_Sym sym; 20730Sstevel@tonic-gate 20740Sstevel@tonic-gate if (t->t_state == TS_FREE) 20750Sstevel@tonic-gate return (" (inactive interrupt thread)"); 20760Sstevel@tonic-gate 20770Sstevel@tonic-gate /* 20780Sstevel@tonic-gate * Check to see if we're on the panic stack. If so, ignore t_sp, as it 20790Sstevel@tonic-gate * no longer relates to the thread's real stack. 20800Sstevel@tonic-gate */ 20810Sstevel@tonic-gate if (mdb_lookup_by_name("panic_stack", &sym) == 0) { 20820Sstevel@tonic-gate panicstk = (uintptr_t)sym.st_value; 20830Sstevel@tonic-gate 20840Sstevel@tonic-gate if (t->t_sp >= panicstk && t->t_sp < panicstk + PANICSTKSIZE) 20850Sstevel@tonic-gate return (""); 20860Sstevel@tonic-gate } 20870Sstevel@tonic-gate 20880Sstevel@tonic-gate if (addr >= t->t_sp + STACK_BIAS) 20890Sstevel@tonic-gate return (""); 20900Sstevel@tonic-gate 20910Sstevel@tonic-gate if (t->t_state == TS_ONPROC) 20920Sstevel@tonic-gate return (" (possibly below sp)"); 20930Sstevel@tonic-gate 20940Sstevel@tonic-gate return (" (below sp)"); 20950Sstevel@tonic-gate } 20960Sstevel@tonic-gate 20970Sstevel@tonic-gate typedef struct whatis { 20980Sstevel@tonic-gate uintptr_t w_addr; 20990Sstevel@tonic-gate const kmem_cache_t *w_cache; 21000Sstevel@tonic-gate const vmem_t *w_vmem; 21010Sstevel@tonic-gate size_t w_slab_align; 21020Sstevel@tonic-gate int w_slab_found; 21030Sstevel@tonic-gate int w_found; 21040Sstevel@tonic-gate int w_kmem_lite_count; 21050Sstevel@tonic-gate uint_t w_verbose; 21060Sstevel@tonic-gate uint_t w_freemem; 21070Sstevel@tonic-gate uint_t w_all; 21080Sstevel@tonic-gate uint_t w_bufctl; 21090Sstevel@tonic-gate uint_t w_idspace; 21100Sstevel@tonic-gate } whatis_t; 21110Sstevel@tonic-gate 21120Sstevel@tonic-gate static void 21130Sstevel@tonic-gate whatis_print_kmem(uintptr_t addr, uintptr_t baddr, whatis_t *w) 21140Sstevel@tonic-gate { 21150Sstevel@tonic-gate /* LINTED pointer cast may result in improper alignment */ 21160Sstevel@tonic-gate uintptr_t btaddr = (uintptr_t)KMEM_BUFTAG(w->w_cache, addr); 21170Sstevel@tonic-gate intptr_t stat; 21180Sstevel@tonic-gate int count = 0; 21190Sstevel@tonic-gate int i; 21200Sstevel@tonic-gate pc_t callers[16]; 21210Sstevel@tonic-gate 21220Sstevel@tonic-gate if (w->w_cache->cache_flags & KMF_REDZONE) { 21230Sstevel@tonic-gate kmem_buftag_t bt; 21240Sstevel@tonic-gate 21250Sstevel@tonic-gate if (mdb_vread(&bt, sizeof (bt), btaddr) == -1) 21260Sstevel@tonic-gate goto done; 21270Sstevel@tonic-gate 21280Sstevel@tonic-gate stat = (intptr_t)bt.bt_bufctl ^ bt.bt_bxstat; 21290Sstevel@tonic-gate 21300Sstevel@tonic-gate if (stat != KMEM_BUFTAG_ALLOC && stat != KMEM_BUFTAG_FREE) 21310Sstevel@tonic-gate goto done; 21320Sstevel@tonic-gate 21330Sstevel@tonic-gate /* 21340Sstevel@tonic-gate * provide the bufctl ptr if it has useful information 21350Sstevel@tonic-gate */ 21360Sstevel@tonic-gate if (baddr == 0 && (w->w_cache->cache_flags & KMF_AUDIT)) 21370Sstevel@tonic-gate baddr = (uintptr_t)bt.bt_bufctl; 21380Sstevel@tonic-gate 21390Sstevel@tonic-gate if (w->w_cache->cache_flags & KMF_LITE) { 21400Sstevel@tonic-gate count = w->w_kmem_lite_count; 21410Sstevel@tonic-gate 21420Sstevel@tonic-gate if (count * sizeof (pc_t) > sizeof (callers)) 21430Sstevel@tonic-gate count = 0; 21440Sstevel@tonic-gate 21450Sstevel@tonic-gate if (count > 0 && 21460Sstevel@tonic-gate mdb_vread(callers, count * sizeof (pc_t), 21470Sstevel@tonic-gate btaddr + 21480Sstevel@tonic-gate offsetof(kmem_buftag_lite_t, bt_history)) == -1) 21490Sstevel@tonic-gate count = 0; 21500Sstevel@tonic-gate 21510Sstevel@tonic-gate /* 21520Sstevel@tonic-gate * skip unused callers 21530Sstevel@tonic-gate */ 21540Sstevel@tonic-gate while (count > 0 && callers[count - 1] == 21550Sstevel@tonic-gate (pc_t)KMEM_UNINITIALIZED_PATTERN) 21560Sstevel@tonic-gate count--; 21570Sstevel@tonic-gate } 21580Sstevel@tonic-gate } 21590Sstevel@tonic-gate 21600Sstevel@tonic-gate done: 21610Sstevel@tonic-gate if (baddr == 0) 21620Sstevel@tonic-gate mdb_printf("%p is %p+%p, %s from %s\n", 21630Sstevel@tonic-gate w->w_addr, addr, w->w_addr - addr, 21640Sstevel@tonic-gate w->w_freemem == FALSE ? "allocated" : "freed", 21650Sstevel@tonic-gate w->w_cache->cache_name); 21660Sstevel@tonic-gate else 21670Sstevel@tonic-gate mdb_printf("%p is %p+%p, bufctl %p %s from %s\n", 21680Sstevel@tonic-gate w->w_addr, addr, w->w_addr - addr, baddr, 21690Sstevel@tonic-gate w->w_freemem == FALSE ? "allocated" : "freed", 21700Sstevel@tonic-gate w->w_cache->cache_name); 21710Sstevel@tonic-gate 21720Sstevel@tonic-gate if (count > 0) { 21730Sstevel@tonic-gate mdb_inc_indent(8); 21740Sstevel@tonic-gate mdb_printf("recent caller%s: %a%s", (count != 1)? "s":"", 21750Sstevel@tonic-gate callers[0], (count != 1)? ", ":"\n"); 21760Sstevel@tonic-gate for (i = 1; i < count; i++) 21770Sstevel@tonic-gate mdb_printf("%a%s", callers[i], 21780Sstevel@tonic-gate (i + 1 < count)? ", ":"\n"); 21790Sstevel@tonic-gate mdb_dec_indent(8); 21800Sstevel@tonic-gate } 21810Sstevel@tonic-gate } 21820Sstevel@tonic-gate 21830Sstevel@tonic-gate /*ARGSUSED*/ 21840Sstevel@tonic-gate static int 21850Sstevel@tonic-gate whatis_walk_kmem(uintptr_t addr, void *ignored, whatis_t *w) 21860Sstevel@tonic-gate { 21870Sstevel@tonic-gate if (w->w_addr < addr || w->w_addr >= addr + w->w_cache->cache_bufsize) 21880Sstevel@tonic-gate return (WALK_NEXT); 21890Sstevel@tonic-gate 21900Sstevel@tonic-gate whatis_print_kmem(addr, 0, w); 21910Sstevel@tonic-gate w->w_found++; 21920Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 21930Sstevel@tonic-gate } 21940Sstevel@tonic-gate 21950Sstevel@tonic-gate static int 21960Sstevel@tonic-gate whatis_walk_seg(uintptr_t addr, const vmem_seg_t *vs, whatis_t *w) 21970Sstevel@tonic-gate { 21980Sstevel@tonic-gate if (w->w_addr < vs->vs_start || w->w_addr >= vs->vs_end) 21990Sstevel@tonic-gate return (WALK_NEXT); 22000Sstevel@tonic-gate 22010Sstevel@tonic-gate mdb_printf("%p is %p+%p ", w->w_addr, 22020Sstevel@tonic-gate vs->vs_start, w->w_addr - vs->vs_start); 22030Sstevel@tonic-gate 22040Sstevel@tonic-gate /* 22050Sstevel@tonic-gate * Always provide the vmem_seg pointer if it has a stack trace. 22060Sstevel@tonic-gate */ 22070Sstevel@tonic-gate if (w->w_bufctl == TRUE || 22080Sstevel@tonic-gate (vs->vs_type == VMEM_ALLOC && vs->vs_depth != 0)) { 22090Sstevel@tonic-gate mdb_printf("(vmem_seg %p) ", addr); 22100Sstevel@tonic-gate } 22110Sstevel@tonic-gate 22120Sstevel@tonic-gate mdb_printf("%sfrom %s vmem arena\n", w->w_freemem == TRUE ? 22130Sstevel@tonic-gate "freed " : "", w->w_vmem->vm_name); 22140Sstevel@tonic-gate 22150Sstevel@tonic-gate w->w_found++; 22160Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 22170Sstevel@tonic-gate } 22180Sstevel@tonic-gate 22190Sstevel@tonic-gate static int 22200Sstevel@tonic-gate whatis_walk_vmem(uintptr_t addr, const vmem_t *vmem, whatis_t *w) 22210Sstevel@tonic-gate { 22220Sstevel@tonic-gate const char *nm = vmem->vm_name; 22230Sstevel@tonic-gate w->w_vmem = vmem; 22240Sstevel@tonic-gate w->w_freemem = FALSE; 22250Sstevel@tonic-gate 22260Sstevel@tonic-gate if (((vmem->vm_cflags & VMC_IDENTIFIER) != 0) ^ w->w_idspace) 22270Sstevel@tonic-gate return (WALK_NEXT); 22280Sstevel@tonic-gate 22290Sstevel@tonic-gate if (w->w_verbose) 22300Sstevel@tonic-gate mdb_printf("Searching vmem arena %s...\n", nm); 22310Sstevel@tonic-gate 22320Sstevel@tonic-gate if (mdb_pwalk("vmem_alloc", 22330Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_seg, w, addr) == -1) { 22340Sstevel@tonic-gate mdb_warn("can't walk vmem seg for %p", addr); 22350Sstevel@tonic-gate return (WALK_NEXT); 22360Sstevel@tonic-gate } 22370Sstevel@tonic-gate 22380Sstevel@tonic-gate if (w->w_found && w->w_all == FALSE) 22390Sstevel@tonic-gate return (WALK_DONE); 22400Sstevel@tonic-gate 22410Sstevel@tonic-gate if (w->w_verbose) 22420Sstevel@tonic-gate mdb_printf("Searching vmem arena %s for free virtual...\n", nm); 22430Sstevel@tonic-gate 22440Sstevel@tonic-gate w->w_freemem = TRUE; 22450Sstevel@tonic-gate 22460Sstevel@tonic-gate if (mdb_pwalk("vmem_free", 22470Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_seg, w, addr) == -1) { 22480Sstevel@tonic-gate mdb_warn("can't walk vmem seg for %p", addr); 22490Sstevel@tonic-gate return (WALK_NEXT); 22500Sstevel@tonic-gate } 22510Sstevel@tonic-gate 22520Sstevel@tonic-gate return (w->w_found && w->w_all == FALSE ? WALK_DONE : WALK_NEXT); 22530Sstevel@tonic-gate } 22540Sstevel@tonic-gate 22550Sstevel@tonic-gate /*ARGSUSED*/ 22560Sstevel@tonic-gate static int 22570Sstevel@tonic-gate whatis_walk_bufctl(uintptr_t baddr, const kmem_bufctl_t *bcp, whatis_t *w) 22580Sstevel@tonic-gate { 22590Sstevel@tonic-gate uintptr_t addr; 22600Sstevel@tonic-gate 22610Sstevel@tonic-gate if (bcp == NULL) 22620Sstevel@tonic-gate return (WALK_NEXT); 22630Sstevel@tonic-gate 22640Sstevel@tonic-gate addr = (uintptr_t)bcp->bc_addr; 22650Sstevel@tonic-gate 22660Sstevel@tonic-gate if (w->w_addr < addr || w->w_addr >= addr + w->w_cache->cache_bufsize) 22670Sstevel@tonic-gate return (WALK_NEXT); 22680Sstevel@tonic-gate 22690Sstevel@tonic-gate whatis_print_kmem(addr, baddr, w); 22700Sstevel@tonic-gate w->w_found++; 22710Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 22720Sstevel@tonic-gate } 22730Sstevel@tonic-gate 22740Sstevel@tonic-gate /*ARGSUSED*/ 22750Sstevel@tonic-gate static int 22760Sstevel@tonic-gate whatis_walk_slab(uintptr_t saddr, const kmem_slab_t *sp, whatis_t *w) 22770Sstevel@tonic-gate { 22780Sstevel@tonic-gate uintptr_t base = P2ALIGN((uintptr_t)sp->slab_base, w->w_slab_align); 22790Sstevel@tonic-gate 22800Sstevel@tonic-gate if ((w->w_addr - base) >= w->w_cache->cache_slabsize) 22810Sstevel@tonic-gate return (WALK_NEXT); 22820Sstevel@tonic-gate 22830Sstevel@tonic-gate w->w_slab_found++; 22840Sstevel@tonic-gate return (WALK_DONE); 22850Sstevel@tonic-gate } 22860Sstevel@tonic-gate 22870Sstevel@tonic-gate static int 22880Sstevel@tonic-gate whatis_walk_cache(uintptr_t addr, const kmem_cache_t *c, whatis_t *w) 22890Sstevel@tonic-gate { 22900Sstevel@tonic-gate char *walk, *freewalk; 22910Sstevel@tonic-gate mdb_walk_cb_t func; 22920Sstevel@tonic-gate vmem_t *vmp = c->cache_arena; 22930Sstevel@tonic-gate 22940Sstevel@tonic-gate if (((c->cache_flags & VMC_IDENTIFIER) != 0) ^ w->w_idspace) 22950Sstevel@tonic-gate return (WALK_NEXT); 22960Sstevel@tonic-gate 22970Sstevel@tonic-gate if (w->w_bufctl == FALSE) { 22980Sstevel@tonic-gate walk = "kmem"; 22990Sstevel@tonic-gate freewalk = "freemem"; 23000Sstevel@tonic-gate func = (mdb_walk_cb_t)whatis_walk_kmem; 23010Sstevel@tonic-gate } else { 23020Sstevel@tonic-gate walk = "bufctl"; 23030Sstevel@tonic-gate freewalk = "freectl"; 23040Sstevel@tonic-gate func = (mdb_walk_cb_t)whatis_walk_bufctl; 23050Sstevel@tonic-gate } 23060Sstevel@tonic-gate 23070Sstevel@tonic-gate w->w_cache = c; 23080Sstevel@tonic-gate 23090Sstevel@tonic-gate if (w->w_verbose) 23100Sstevel@tonic-gate mdb_printf("Searching %s's slabs...\n", c->cache_name); 23110Sstevel@tonic-gate 23120Sstevel@tonic-gate /* 23130Sstevel@tonic-gate * Verify that the address is in one of the cache's slabs. If not, 23140Sstevel@tonic-gate * we can skip the more expensive walkers. (this is purely a 23150Sstevel@tonic-gate * heuristic -- as long as there are no false-negatives, we'll be fine) 23160Sstevel@tonic-gate * 23170Sstevel@tonic-gate * We try to get the cache's arena's quantum, since to accurately 23180Sstevel@tonic-gate * get the base of a slab, you have to align it to the quantum. If 23190Sstevel@tonic-gate * it doesn't look sensible, we fall back to not aligning. 23200Sstevel@tonic-gate */ 23210Sstevel@tonic-gate if (mdb_vread(&w->w_slab_align, sizeof (w->w_slab_align), 23220Sstevel@tonic-gate (uintptr_t)&vmp->vm_quantum) == -1) { 23230Sstevel@tonic-gate mdb_warn("unable to read %p->cache_arena->vm_quantum", c); 23240Sstevel@tonic-gate w->w_slab_align = 1; 23250Sstevel@tonic-gate } 23260Sstevel@tonic-gate 23270Sstevel@tonic-gate if ((c->cache_slabsize < w->w_slab_align) || w->w_slab_align == 0 || 23280Sstevel@tonic-gate (w->w_slab_align & (w->w_slab_align - 1))) { 23290Sstevel@tonic-gate mdb_warn("%p's arena has invalid quantum (0x%p)\n", c, 23300Sstevel@tonic-gate w->w_slab_align); 23310Sstevel@tonic-gate w->w_slab_align = 1; 23320Sstevel@tonic-gate } 23330Sstevel@tonic-gate 23340Sstevel@tonic-gate w->w_slab_found = 0; 23350Sstevel@tonic-gate if (mdb_pwalk("kmem_slab", (mdb_walk_cb_t)whatis_walk_slab, w, 23360Sstevel@tonic-gate addr) == -1) { 23370Sstevel@tonic-gate mdb_warn("can't find kmem_slab walker"); 23380Sstevel@tonic-gate return (WALK_DONE); 23390Sstevel@tonic-gate } 23400Sstevel@tonic-gate if (w->w_slab_found == 0) 23410Sstevel@tonic-gate return (WALK_NEXT); 23420Sstevel@tonic-gate 23430Sstevel@tonic-gate if (c->cache_flags & KMF_LITE) { 23440Sstevel@tonic-gate if (mdb_readvar(&w->w_kmem_lite_count, 23450Sstevel@tonic-gate "kmem_lite_count") == -1 || w->w_kmem_lite_count > 16) 23460Sstevel@tonic-gate w->w_kmem_lite_count = 0; 23470Sstevel@tonic-gate } 23480Sstevel@tonic-gate 23490Sstevel@tonic-gate if (w->w_verbose) 23500Sstevel@tonic-gate mdb_printf("Searching %s...\n", c->cache_name); 23510Sstevel@tonic-gate 23520Sstevel@tonic-gate w->w_freemem = FALSE; 23530Sstevel@tonic-gate 23540Sstevel@tonic-gate if (mdb_pwalk(walk, func, w, addr) == -1) { 23550Sstevel@tonic-gate mdb_warn("can't find %s walker", walk); 23560Sstevel@tonic-gate return (WALK_DONE); 23570Sstevel@tonic-gate } 23580Sstevel@tonic-gate 23590Sstevel@tonic-gate if (w->w_found && w->w_all == FALSE) 23600Sstevel@tonic-gate return (WALK_DONE); 23610Sstevel@tonic-gate 23620Sstevel@tonic-gate /* 23630Sstevel@tonic-gate * We have searched for allocated memory; now search for freed memory. 23640Sstevel@tonic-gate */ 23650Sstevel@tonic-gate if (w->w_verbose) 23660Sstevel@tonic-gate mdb_printf("Searching %s for free memory...\n", c->cache_name); 23670Sstevel@tonic-gate 23680Sstevel@tonic-gate w->w_freemem = TRUE; 23690Sstevel@tonic-gate 23700Sstevel@tonic-gate if (mdb_pwalk(freewalk, func, w, addr) == -1) { 23710Sstevel@tonic-gate mdb_warn("can't find %s walker", freewalk); 23720Sstevel@tonic-gate return (WALK_DONE); 23730Sstevel@tonic-gate } 23740Sstevel@tonic-gate 23750Sstevel@tonic-gate return (w->w_found && w->w_all == FALSE ? WALK_DONE : WALK_NEXT); 23760Sstevel@tonic-gate } 23770Sstevel@tonic-gate 23780Sstevel@tonic-gate static int 23790Sstevel@tonic-gate whatis_walk_touch(uintptr_t addr, const kmem_cache_t *c, whatis_t *w) 23800Sstevel@tonic-gate { 23810Sstevel@tonic-gate if (c->cache_cflags & KMC_NOTOUCH) 23820Sstevel@tonic-gate return (WALK_NEXT); 23830Sstevel@tonic-gate 23840Sstevel@tonic-gate return (whatis_walk_cache(addr, c, w)); 23850Sstevel@tonic-gate } 23860Sstevel@tonic-gate 23870Sstevel@tonic-gate static int 23880Sstevel@tonic-gate whatis_walk_notouch(uintptr_t addr, const kmem_cache_t *c, whatis_t *w) 23890Sstevel@tonic-gate { 23900Sstevel@tonic-gate if (!(c->cache_cflags & KMC_NOTOUCH)) 23910Sstevel@tonic-gate return (WALK_NEXT); 23920Sstevel@tonic-gate 23930Sstevel@tonic-gate return (whatis_walk_cache(addr, c, w)); 23940Sstevel@tonic-gate } 23950Sstevel@tonic-gate 23960Sstevel@tonic-gate static int 23970Sstevel@tonic-gate whatis_walk_thread(uintptr_t addr, const kthread_t *t, whatis_t *w) 23980Sstevel@tonic-gate { 23990Sstevel@tonic-gate /* 24000Sstevel@tonic-gate * Often, one calls ::whatis on an address from a thread structure. 24010Sstevel@tonic-gate * We use this opportunity to short circuit this case... 24020Sstevel@tonic-gate */ 24030Sstevel@tonic-gate if (w->w_addr >= addr && w->w_addr < addr + sizeof (kthread_t)) { 24040Sstevel@tonic-gate mdb_printf("%p is %p+%p, allocated as a thread structure\n", 24050Sstevel@tonic-gate w->w_addr, addr, w->w_addr - addr); 24060Sstevel@tonic-gate w->w_found++; 24070Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 24080Sstevel@tonic-gate } 24090Sstevel@tonic-gate 24100Sstevel@tonic-gate if (w->w_addr < (uintptr_t)t->t_stkbase || 24110Sstevel@tonic-gate w->w_addr > (uintptr_t)t->t_stk) 24120Sstevel@tonic-gate return (WALK_NEXT); 24130Sstevel@tonic-gate 24140Sstevel@tonic-gate if (t->t_stkbase == NULL) 24150Sstevel@tonic-gate return (WALK_NEXT); 24160Sstevel@tonic-gate 24170Sstevel@tonic-gate mdb_printf("%p is in thread %p's stack%s\n", w->w_addr, addr, 24180Sstevel@tonic-gate stack_active(t, w->w_addr)); 24190Sstevel@tonic-gate 24200Sstevel@tonic-gate w->w_found++; 24210Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 24220Sstevel@tonic-gate } 24230Sstevel@tonic-gate 24240Sstevel@tonic-gate static int 24250Sstevel@tonic-gate whatis_walk_modctl(uintptr_t addr, const struct modctl *m, whatis_t *w) 24260Sstevel@tonic-gate { 24270Sstevel@tonic-gate struct module mod; 24280Sstevel@tonic-gate char name[MODMAXNAMELEN], *where; 24290Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 24300Sstevel@tonic-gate Shdr shdr; 24310Sstevel@tonic-gate GElf_Sym sym; 24320Sstevel@tonic-gate 24330Sstevel@tonic-gate if (m->mod_mp == NULL) 24340Sstevel@tonic-gate return (WALK_NEXT); 24350Sstevel@tonic-gate 24360Sstevel@tonic-gate if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) { 24370Sstevel@tonic-gate mdb_warn("couldn't read modctl %p's module", addr); 24380Sstevel@tonic-gate return (WALK_NEXT); 24390Sstevel@tonic-gate } 24400Sstevel@tonic-gate 24410Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.text && 24420Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.text + mod.text_size) { 24430Sstevel@tonic-gate where = "text segment"; 24440Sstevel@tonic-gate goto found; 24450Sstevel@tonic-gate } 24460Sstevel@tonic-gate 24470Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.data && 24480Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.data + mod.data_size) { 24490Sstevel@tonic-gate where = "data segment"; 24500Sstevel@tonic-gate goto found; 24510Sstevel@tonic-gate } 24520Sstevel@tonic-gate 24530Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.bss && 24540Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.bss + mod.bss_size) { 24550Sstevel@tonic-gate where = "bss"; 24560Sstevel@tonic-gate goto found; 24570Sstevel@tonic-gate } 24580Sstevel@tonic-gate 24590Sstevel@tonic-gate if (mdb_vread(&shdr, sizeof (shdr), (uintptr_t)mod.symhdr) == -1) { 24600Sstevel@tonic-gate mdb_warn("couldn't read symbol header for %p's module", addr); 24610Sstevel@tonic-gate return (WALK_NEXT); 24620Sstevel@tonic-gate } 24630Sstevel@tonic-gate 24640Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.symtbl && w->w_addr < 24650Sstevel@tonic-gate (uintptr_t)mod.symtbl + (uintptr_t)mod.nsyms * shdr.sh_entsize) { 24660Sstevel@tonic-gate where = "symtab"; 24670Sstevel@tonic-gate goto found; 24680Sstevel@tonic-gate } 24690Sstevel@tonic-gate 24700Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.symspace && 24710Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.symspace + (uintptr_t)mod.symsize) { 24720Sstevel@tonic-gate where = "symspace"; 24730Sstevel@tonic-gate goto found; 24740Sstevel@tonic-gate } 24750Sstevel@tonic-gate 24760Sstevel@tonic-gate return (WALK_NEXT); 24770Sstevel@tonic-gate 24780Sstevel@tonic-gate found: 24790Sstevel@tonic-gate if (mdb_readstr(name, sizeof (name), (uintptr_t)m->mod_modname) == -1) 24800Sstevel@tonic-gate (void) mdb_snprintf(name, sizeof (name), "0x%p", addr); 24810Sstevel@tonic-gate 24820Sstevel@tonic-gate mdb_printf("%p is ", w->w_addr); 24830Sstevel@tonic-gate 24840Sstevel@tonic-gate /* 24850Sstevel@tonic-gate * If we found this address in a module, then there's a chance that 24860Sstevel@tonic-gate * it's actually a named symbol. Try the symbol lookup. 24870Sstevel@tonic-gate */ 24880Sstevel@tonic-gate if (mdb_lookup_by_addr(w->w_addr, MDB_SYM_FUZZY, c, sizeof (c), 24890Sstevel@tonic-gate &sym) != -1 && w->w_addr >= (uintptr_t)sym.st_value && 24900Sstevel@tonic-gate w->w_addr < (uintptr_t)sym.st_value + sym.st_size) { 24910Sstevel@tonic-gate mdb_printf("%s+%lx ", c, w->w_addr - (uintptr_t)sym.st_value); 24920Sstevel@tonic-gate } 24930Sstevel@tonic-gate 24940Sstevel@tonic-gate mdb_printf("in %s's %s\n", name, where); 24950Sstevel@tonic-gate 24960Sstevel@tonic-gate w->w_found++; 24970Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 24980Sstevel@tonic-gate } 24990Sstevel@tonic-gate 25000Sstevel@tonic-gate /*ARGSUSED*/ 25010Sstevel@tonic-gate static int 25020Sstevel@tonic-gate whatis_walk_page(uintptr_t addr, const void *ignored, whatis_t *w) 25030Sstevel@tonic-gate { 25040Sstevel@tonic-gate static int machsize = 0; 25050Sstevel@tonic-gate mdb_ctf_id_t id; 25060Sstevel@tonic-gate 25070Sstevel@tonic-gate if (machsize == 0) { 25080Sstevel@tonic-gate if (mdb_ctf_lookup_by_name("unix`page_t", &id) == 0) 25090Sstevel@tonic-gate machsize = mdb_ctf_type_size(id); 25100Sstevel@tonic-gate else { 25110Sstevel@tonic-gate mdb_warn("could not get size of page_t"); 25120Sstevel@tonic-gate machsize = sizeof (page_t); 25130Sstevel@tonic-gate } 25140Sstevel@tonic-gate } 25150Sstevel@tonic-gate 25160Sstevel@tonic-gate if (w->w_addr < addr || w->w_addr >= addr + machsize) 25170Sstevel@tonic-gate return (WALK_NEXT); 25180Sstevel@tonic-gate 25190Sstevel@tonic-gate mdb_printf("%p is %p+%p, allocated as a page structure\n", 25200Sstevel@tonic-gate w->w_addr, addr, w->w_addr - addr); 25210Sstevel@tonic-gate 25220Sstevel@tonic-gate w->w_found++; 25230Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 25240Sstevel@tonic-gate } 25250Sstevel@tonic-gate 25260Sstevel@tonic-gate int 25270Sstevel@tonic-gate whatis(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 25280Sstevel@tonic-gate { 25290Sstevel@tonic-gate whatis_t w; 25300Sstevel@tonic-gate 25310Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 25320Sstevel@tonic-gate return (DCMD_USAGE); 25330Sstevel@tonic-gate 25340Sstevel@tonic-gate w.w_verbose = FALSE; 25350Sstevel@tonic-gate w.w_bufctl = FALSE; 25360Sstevel@tonic-gate w.w_all = FALSE; 25370Sstevel@tonic-gate w.w_idspace = FALSE; 25380Sstevel@tonic-gate 25390Sstevel@tonic-gate if (mdb_getopts(argc, argv, 25400Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &w.w_verbose, 25410Sstevel@tonic-gate 'a', MDB_OPT_SETBITS, TRUE, &w.w_all, 25420Sstevel@tonic-gate 'i', MDB_OPT_SETBITS, TRUE, &w.w_idspace, 25430Sstevel@tonic-gate 'b', MDB_OPT_SETBITS, TRUE, &w.w_bufctl, NULL) != argc) 25440Sstevel@tonic-gate return (DCMD_USAGE); 25450Sstevel@tonic-gate 25460Sstevel@tonic-gate w.w_addr = addr; 25470Sstevel@tonic-gate w.w_found = 0; 25480Sstevel@tonic-gate 25490Sstevel@tonic-gate if (w.w_verbose) 25500Sstevel@tonic-gate mdb_printf("Searching modules...\n"); 25510Sstevel@tonic-gate 25520Sstevel@tonic-gate if (!w.w_idspace) { 25530Sstevel@tonic-gate if (mdb_walk("modctl", (mdb_walk_cb_t)whatis_walk_modctl, &w) 25540Sstevel@tonic-gate == -1) { 25550Sstevel@tonic-gate mdb_warn("couldn't find modctl walker"); 25560Sstevel@tonic-gate return (DCMD_ERR); 25570Sstevel@tonic-gate } 25580Sstevel@tonic-gate 25590Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 25600Sstevel@tonic-gate return (DCMD_OK); 25610Sstevel@tonic-gate 25620Sstevel@tonic-gate /* 25630Sstevel@tonic-gate * Now search all thread stacks. Yes, this is a little weak; we 25640Sstevel@tonic-gate * can save a lot of work by first checking to see if the 25650Sstevel@tonic-gate * address is in segkp vs. segkmem. But hey, computers are 25660Sstevel@tonic-gate * fast. 25670Sstevel@tonic-gate */ 25680Sstevel@tonic-gate if (w.w_verbose) 25690Sstevel@tonic-gate mdb_printf("Searching threads...\n"); 25700Sstevel@tonic-gate 25710Sstevel@tonic-gate if (mdb_walk("thread", (mdb_walk_cb_t)whatis_walk_thread, &w) 25720Sstevel@tonic-gate == -1) { 25730Sstevel@tonic-gate mdb_warn("couldn't find thread walker"); 25740Sstevel@tonic-gate return (DCMD_ERR); 25750Sstevel@tonic-gate } 25760Sstevel@tonic-gate 25770Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 25780Sstevel@tonic-gate return (DCMD_OK); 25790Sstevel@tonic-gate 25800Sstevel@tonic-gate if (w.w_verbose) 25810Sstevel@tonic-gate mdb_printf("Searching page structures...\n"); 25820Sstevel@tonic-gate 25830Sstevel@tonic-gate if (mdb_walk("page", (mdb_walk_cb_t)whatis_walk_page, &w) 25840Sstevel@tonic-gate == -1) { 25850Sstevel@tonic-gate mdb_warn("couldn't find page walker"); 25860Sstevel@tonic-gate return (DCMD_ERR); 25870Sstevel@tonic-gate } 25880Sstevel@tonic-gate 25890Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 25900Sstevel@tonic-gate return (DCMD_OK); 25910Sstevel@tonic-gate } 25920Sstevel@tonic-gate 25930Sstevel@tonic-gate if (mdb_walk("kmem_cache", 25940Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_touch, &w) == -1) { 25950Sstevel@tonic-gate mdb_warn("couldn't find kmem_cache walker"); 25960Sstevel@tonic-gate return (DCMD_ERR); 25970Sstevel@tonic-gate } 25980Sstevel@tonic-gate 25990Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 26000Sstevel@tonic-gate return (DCMD_OK); 26010Sstevel@tonic-gate 26020Sstevel@tonic-gate if (mdb_walk("kmem_cache", 26030Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_notouch, &w) == -1) { 26040Sstevel@tonic-gate mdb_warn("couldn't find kmem_cache walker"); 26050Sstevel@tonic-gate return (DCMD_ERR); 26060Sstevel@tonic-gate } 26070Sstevel@tonic-gate 26080Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 26090Sstevel@tonic-gate return (DCMD_OK); 26100Sstevel@tonic-gate 26110Sstevel@tonic-gate if (mdb_walk("vmem_postfix", 26120Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_vmem, &w) == -1) { 26130Sstevel@tonic-gate mdb_warn("couldn't find vmem_postfix walker"); 26140Sstevel@tonic-gate return (DCMD_ERR); 26150Sstevel@tonic-gate } 26160Sstevel@tonic-gate 26170Sstevel@tonic-gate if (w.w_found == 0) 26180Sstevel@tonic-gate mdb_printf("%p is unknown\n", addr); 26190Sstevel@tonic-gate 26200Sstevel@tonic-gate return (DCMD_OK); 26210Sstevel@tonic-gate } 26220Sstevel@tonic-gate 26230Sstevel@tonic-gate void 26240Sstevel@tonic-gate whatis_help(void) 26250Sstevel@tonic-gate { 26260Sstevel@tonic-gate mdb_printf( 26270Sstevel@tonic-gate "Given a virtual address, attempt to determine where it came\n" 26280Sstevel@tonic-gate "from.\n" 26290Sstevel@tonic-gate "\n" 26300Sstevel@tonic-gate "\t-v\tVerbose output; display caches/arenas/etc as they are\n" 26310Sstevel@tonic-gate "\t\tsearched\n" 26320Sstevel@tonic-gate "\t-a\tFind all possible sources. Default behavior is to stop at\n" 26330Sstevel@tonic-gate "\t\tthe first (most specific) source.\n" 26340Sstevel@tonic-gate "\t-i\tSearch only identifier arenas and caches. By default\n" 26350Sstevel@tonic-gate "\t\tthese are ignored.\n" 26360Sstevel@tonic-gate "\t-b\tReport bufctls and vmem_segs for matches in kmem and vmem,\n" 26370Sstevel@tonic-gate "\t\trespectively. Warning: if the buffer exists, but does not\n" 26380Sstevel@tonic-gate "\t\thave a bufctl, it will not be reported.\n"); 26390Sstevel@tonic-gate } 26400Sstevel@tonic-gate 26410Sstevel@tonic-gate typedef struct kmem_log_cpu { 26420Sstevel@tonic-gate uintptr_t kmc_low; 26430Sstevel@tonic-gate uintptr_t kmc_high; 26440Sstevel@tonic-gate } kmem_log_cpu_t; 26450Sstevel@tonic-gate 26460Sstevel@tonic-gate typedef struct kmem_log_data { 26470Sstevel@tonic-gate uintptr_t kmd_addr; 26480Sstevel@tonic-gate kmem_log_cpu_t *kmd_cpu; 26490Sstevel@tonic-gate } kmem_log_data_t; 26500Sstevel@tonic-gate 26510Sstevel@tonic-gate int 26520Sstevel@tonic-gate kmem_log_walk(uintptr_t addr, const kmem_bufctl_audit_t *b, 26530Sstevel@tonic-gate kmem_log_data_t *kmd) 26540Sstevel@tonic-gate { 26550Sstevel@tonic-gate int i; 26560Sstevel@tonic-gate kmem_log_cpu_t *kmc = kmd->kmd_cpu; 26570Sstevel@tonic-gate size_t bufsize; 26580Sstevel@tonic-gate 26590Sstevel@tonic-gate for (i = 0; i < NCPU; i++) { 26600Sstevel@tonic-gate if (addr >= kmc[i].kmc_low && addr < kmc[i].kmc_high) 26610Sstevel@tonic-gate break; 26620Sstevel@tonic-gate } 26630Sstevel@tonic-gate 26640Sstevel@tonic-gate if (kmd->kmd_addr) { 26650Sstevel@tonic-gate if (b->bc_cache == NULL) 26660Sstevel@tonic-gate return (WALK_NEXT); 26670Sstevel@tonic-gate 26680Sstevel@tonic-gate if (mdb_vread(&bufsize, sizeof (bufsize), 26690Sstevel@tonic-gate (uintptr_t)&b->bc_cache->cache_bufsize) == -1) { 26700Sstevel@tonic-gate mdb_warn( 26710Sstevel@tonic-gate "failed to read cache_bufsize for cache at %p", 26720Sstevel@tonic-gate b->bc_cache); 26730Sstevel@tonic-gate return (WALK_ERR); 26740Sstevel@tonic-gate } 26750Sstevel@tonic-gate 26760Sstevel@tonic-gate if (kmd->kmd_addr < (uintptr_t)b->bc_addr || 26770Sstevel@tonic-gate kmd->kmd_addr >= (uintptr_t)b->bc_addr + bufsize) 26780Sstevel@tonic-gate return (WALK_NEXT); 26790Sstevel@tonic-gate } 26800Sstevel@tonic-gate 26810Sstevel@tonic-gate if (i == NCPU) 26820Sstevel@tonic-gate mdb_printf(" "); 26830Sstevel@tonic-gate else 26840Sstevel@tonic-gate mdb_printf("%3d", i); 26850Sstevel@tonic-gate 26860Sstevel@tonic-gate mdb_printf(" %0?p %0?p %16llx %0?p\n", addr, b->bc_addr, 26870Sstevel@tonic-gate b->bc_timestamp, b->bc_thread); 26880Sstevel@tonic-gate 26890Sstevel@tonic-gate return (WALK_NEXT); 26900Sstevel@tonic-gate } 26910Sstevel@tonic-gate 26920Sstevel@tonic-gate /*ARGSUSED*/ 26930Sstevel@tonic-gate int 26940Sstevel@tonic-gate kmem_log(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 26950Sstevel@tonic-gate { 26960Sstevel@tonic-gate kmem_log_header_t lh; 26970Sstevel@tonic-gate kmem_cpu_log_header_t clh; 26980Sstevel@tonic-gate uintptr_t lhp, clhp; 26990Sstevel@tonic-gate int ncpus; 27000Sstevel@tonic-gate uintptr_t *cpu; 27010Sstevel@tonic-gate GElf_Sym sym; 27020Sstevel@tonic-gate kmem_log_cpu_t *kmc; 27030Sstevel@tonic-gate int i; 27040Sstevel@tonic-gate kmem_log_data_t kmd; 27050Sstevel@tonic-gate uint_t opt_b = FALSE; 27060Sstevel@tonic-gate 27070Sstevel@tonic-gate if (mdb_getopts(argc, argv, 27080Sstevel@tonic-gate 'b', MDB_OPT_SETBITS, TRUE, &opt_b, NULL) != argc) 27090Sstevel@tonic-gate return (DCMD_USAGE); 27100Sstevel@tonic-gate 27110Sstevel@tonic-gate if (mdb_readvar(&lhp, "kmem_transaction_log") == -1) { 27120Sstevel@tonic-gate mdb_warn("failed to read 'kmem_transaction_log'"); 27130Sstevel@tonic-gate return (DCMD_ERR); 27140Sstevel@tonic-gate } 27150Sstevel@tonic-gate 27160Sstevel@tonic-gate if (lhp == NULL) { 27170Sstevel@tonic-gate mdb_warn("no kmem transaction log\n"); 27180Sstevel@tonic-gate return (DCMD_ERR); 27190Sstevel@tonic-gate } 27200Sstevel@tonic-gate 27210Sstevel@tonic-gate mdb_readvar(&ncpus, "ncpus"); 27220Sstevel@tonic-gate 27230Sstevel@tonic-gate if (mdb_vread(&lh, sizeof (kmem_log_header_t), lhp) == -1) { 27240Sstevel@tonic-gate mdb_warn("failed to read log header at %p", lhp); 27250Sstevel@tonic-gate return (DCMD_ERR); 27260Sstevel@tonic-gate } 27270Sstevel@tonic-gate 27280Sstevel@tonic-gate clhp = lhp + ((uintptr_t)&lh.lh_cpu[0] - (uintptr_t)&lh); 27290Sstevel@tonic-gate 27300Sstevel@tonic-gate cpu = mdb_alloc(sizeof (uintptr_t) * NCPU, UM_SLEEP | UM_GC); 27310Sstevel@tonic-gate 27320Sstevel@tonic-gate if (mdb_lookup_by_name("cpu", &sym) == -1) { 27330Sstevel@tonic-gate mdb_warn("couldn't find 'cpu' array"); 27340Sstevel@tonic-gate return (DCMD_ERR); 27350Sstevel@tonic-gate } 27360Sstevel@tonic-gate 27370Sstevel@tonic-gate if (sym.st_size != NCPU * sizeof (uintptr_t)) { 27380Sstevel@tonic-gate mdb_warn("expected 'cpu' to be of size %d; found %d\n", 27390Sstevel@tonic-gate NCPU * sizeof (uintptr_t), sym.st_size); 27400Sstevel@tonic-gate return (DCMD_ERR); 27410Sstevel@tonic-gate } 27420Sstevel@tonic-gate 27430Sstevel@tonic-gate if (mdb_vread(cpu, sym.st_size, (uintptr_t)sym.st_value) == -1) { 27440Sstevel@tonic-gate mdb_warn("failed to read cpu array at %p", sym.st_value); 27450Sstevel@tonic-gate return (DCMD_ERR); 27460Sstevel@tonic-gate } 27470Sstevel@tonic-gate 27480Sstevel@tonic-gate kmc = mdb_zalloc(sizeof (kmem_log_cpu_t) * NCPU, UM_SLEEP | UM_GC); 27490Sstevel@tonic-gate kmd.kmd_addr = NULL; 27500Sstevel@tonic-gate kmd.kmd_cpu = kmc; 27510Sstevel@tonic-gate 27520Sstevel@tonic-gate for (i = 0; i < NCPU; i++) { 27530Sstevel@tonic-gate 27540Sstevel@tonic-gate if (cpu[i] == NULL) 27550Sstevel@tonic-gate continue; 27560Sstevel@tonic-gate 27570Sstevel@tonic-gate if (mdb_vread(&clh, sizeof (clh), clhp) == -1) { 27580Sstevel@tonic-gate mdb_warn("cannot read cpu %d's log header at %p", 27590Sstevel@tonic-gate i, clhp); 27600Sstevel@tonic-gate return (DCMD_ERR); 27610Sstevel@tonic-gate } 27620Sstevel@tonic-gate 27630Sstevel@tonic-gate kmc[i].kmc_low = clh.clh_chunk * lh.lh_chunksize + 27640Sstevel@tonic-gate (uintptr_t)lh.lh_base; 27650Sstevel@tonic-gate kmc[i].kmc_high = (uintptr_t)clh.clh_current; 27660Sstevel@tonic-gate 27670Sstevel@tonic-gate clhp += sizeof (kmem_cpu_log_header_t); 27680Sstevel@tonic-gate } 27690Sstevel@tonic-gate 27700Sstevel@tonic-gate mdb_printf("%3s %-?s %-?s %16s %-?s\n", "CPU", "ADDR", "BUFADDR", 27710Sstevel@tonic-gate "TIMESTAMP", "THREAD"); 27720Sstevel@tonic-gate 27730Sstevel@tonic-gate /* 27740Sstevel@tonic-gate * If we have been passed an address, print out only log entries 27750Sstevel@tonic-gate * corresponding to that address. If opt_b is specified, then interpret 27760Sstevel@tonic-gate * the address as a bufctl. 27770Sstevel@tonic-gate */ 27780Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) { 27790Sstevel@tonic-gate kmem_bufctl_audit_t b; 27800Sstevel@tonic-gate 27810Sstevel@tonic-gate if (opt_b) { 27820Sstevel@tonic-gate kmd.kmd_addr = addr; 27830Sstevel@tonic-gate } else { 27840Sstevel@tonic-gate if (mdb_vread(&b, 27850Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t), addr) == -1) { 27860Sstevel@tonic-gate mdb_warn("failed to read bufctl at %p", addr); 27870Sstevel@tonic-gate return (DCMD_ERR); 27880Sstevel@tonic-gate } 27890Sstevel@tonic-gate 27900Sstevel@tonic-gate (void) kmem_log_walk(addr, &b, &kmd); 27910Sstevel@tonic-gate 27920Sstevel@tonic-gate return (DCMD_OK); 27930Sstevel@tonic-gate } 27940Sstevel@tonic-gate } 27950Sstevel@tonic-gate 27960Sstevel@tonic-gate if (mdb_walk("kmem_log", (mdb_walk_cb_t)kmem_log_walk, &kmd) == -1) { 27970Sstevel@tonic-gate mdb_warn("can't find kmem log walker"); 27980Sstevel@tonic-gate return (DCMD_ERR); 27990Sstevel@tonic-gate } 28000Sstevel@tonic-gate 28010Sstevel@tonic-gate return (DCMD_OK); 28020Sstevel@tonic-gate } 28030Sstevel@tonic-gate 28040Sstevel@tonic-gate typedef struct bufctl_history_cb { 28050Sstevel@tonic-gate int bhc_flags; 28060Sstevel@tonic-gate int bhc_argc; 28070Sstevel@tonic-gate const mdb_arg_t *bhc_argv; 28080Sstevel@tonic-gate int bhc_ret; 28090Sstevel@tonic-gate } bufctl_history_cb_t; 28100Sstevel@tonic-gate 28110Sstevel@tonic-gate /*ARGSUSED*/ 28120Sstevel@tonic-gate static int 28130Sstevel@tonic-gate bufctl_history_callback(uintptr_t addr, const void *ign, void *arg) 28140Sstevel@tonic-gate { 28150Sstevel@tonic-gate bufctl_history_cb_t *bhc = arg; 28160Sstevel@tonic-gate 28170Sstevel@tonic-gate bhc->bhc_ret = 28180Sstevel@tonic-gate bufctl(addr, bhc->bhc_flags, bhc->bhc_argc, bhc->bhc_argv); 28190Sstevel@tonic-gate 28200Sstevel@tonic-gate bhc->bhc_flags &= ~DCMD_LOOPFIRST; 28210Sstevel@tonic-gate 28220Sstevel@tonic-gate return ((bhc->bhc_ret == DCMD_OK)? WALK_NEXT : WALK_DONE); 28230Sstevel@tonic-gate } 28240Sstevel@tonic-gate 28250Sstevel@tonic-gate void 28260Sstevel@tonic-gate bufctl_help(void) 28270Sstevel@tonic-gate { 28286712Stomee mdb_printf("%s", 28296712Stomee "Display the contents of kmem_bufctl_audit_ts, with optional filtering.\n\n"); 28300Sstevel@tonic-gate mdb_dec_indent(2); 28310Sstevel@tonic-gate mdb_printf("%<b>OPTIONS%</b>\n"); 28320Sstevel@tonic-gate mdb_inc_indent(2); 28330Sstevel@tonic-gate mdb_printf("%s", 28340Sstevel@tonic-gate " -v Display the full content of the bufctl, including its stack trace\n" 28350Sstevel@tonic-gate " -h retrieve the bufctl's transaction history, if available\n" 28360Sstevel@tonic-gate " -a addr\n" 28370Sstevel@tonic-gate " filter out bufctls not involving the buffer at addr\n" 28380Sstevel@tonic-gate " -c caller\n" 28390Sstevel@tonic-gate " filter out bufctls without the function/PC in their stack trace\n" 28400Sstevel@tonic-gate " -e earliest\n" 28410Sstevel@tonic-gate " filter out bufctls timestamped before earliest\n" 28420Sstevel@tonic-gate " -l latest\n" 28430Sstevel@tonic-gate " filter out bufctls timestamped after latest\n" 28440Sstevel@tonic-gate " -t thread\n" 28450Sstevel@tonic-gate " filter out bufctls not involving thread\n"); 28460Sstevel@tonic-gate } 28470Sstevel@tonic-gate 28480Sstevel@tonic-gate int 28490Sstevel@tonic-gate bufctl(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 28500Sstevel@tonic-gate { 28510Sstevel@tonic-gate kmem_bufctl_audit_t bc; 28520Sstevel@tonic-gate uint_t verbose = FALSE; 28530Sstevel@tonic-gate uint_t history = FALSE; 28540Sstevel@tonic-gate uint_t in_history = FALSE; 28550Sstevel@tonic-gate uintptr_t caller = NULL, thread = NULL; 28560Sstevel@tonic-gate uintptr_t laddr, haddr, baddr = NULL; 28570Sstevel@tonic-gate hrtime_t earliest = 0, latest = 0; 28580Sstevel@tonic-gate int i, depth; 28590Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 28600Sstevel@tonic-gate GElf_Sym sym; 28610Sstevel@tonic-gate 28620Sstevel@tonic-gate if (mdb_getopts(argc, argv, 28630Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &verbose, 28640Sstevel@tonic-gate 'h', MDB_OPT_SETBITS, TRUE, &history, 28650Sstevel@tonic-gate 'H', MDB_OPT_SETBITS, TRUE, &in_history, /* internal */ 28660Sstevel@tonic-gate 'c', MDB_OPT_UINTPTR, &caller, 28670Sstevel@tonic-gate 't', MDB_OPT_UINTPTR, &thread, 28680Sstevel@tonic-gate 'e', MDB_OPT_UINT64, &earliest, 28690Sstevel@tonic-gate 'l', MDB_OPT_UINT64, &latest, 28700Sstevel@tonic-gate 'a', MDB_OPT_UINTPTR, &baddr, NULL) != argc) 28710Sstevel@tonic-gate return (DCMD_USAGE); 28720Sstevel@tonic-gate 28730Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 28740Sstevel@tonic-gate return (DCMD_USAGE); 28750Sstevel@tonic-gate 28760Sstevel@tonic-gate if (in_history && !history) 28770Sstevel@tonic-gate return (DCMD_USAGE); 28780Sstevel@tonic-gate 28790Sstevel@tonic-gate if (history && !in_history) { 28800Sstevel@tonic-gate mdb_arg_t *nargv = mdb_zalloc(sizeof (*nargv) * (argc + 1), 28810Sstevel@tonic-gate UM_SLEEP | UM_GC); 28820Sstevel@tonic-gate bufctl_history_cb_t bhc; 28830Sstevel@tonic-gate 28840Sstevel@tonic-gate nargv[0].a_type = MDB_TYPE_STRING; 28850Sstevel@tonic-gate nargv[0].a_un.a_str = "-H"; /* prevent recursion */ 28860Sstevel@tonic-gate 28870Sstevel@tonic-gate for (i = 0; i < argc; i++) 28880Sstevel@tonic-gate nargv[i + 1] = argv[i]; 28890Sstevel@tonic-gate 28900Sstevel@tonic-gate /* 28910Sstevel@tonic-gate * When in history mode, we treat each element as if it 28920Sstevel@tonic-gate * were in a seperate loop, so that the headers group 28930Sstevel@tonic-gate * bufctls with similar histories. 28940Sstevel@tonic-gate */ 28950Sstevel@tonic-gate bhc.bhc_flags = flags | DCMD_LOOP | DCMD_LOOPFIRST; 28960Sstevel@tonic-gate bhc.bhc_argc = argc + 1; 28970Sstevel@tonic-gate bhc.bhc_argv = nargv; 28980Sstevel@tonic-gate bhc.bhc_ret = DCMD_OK; 28990Sstevel@tonic-gate 29000Sstevel@tonic-gate if (mdb_pwalk("bufctl_history", bufctl_history_callback, &bhc, 29010Sstevel@tonic-gate addr) == -1) { 29020Sstevel@tonic-gate mdb_warn("unable to walk bufctl_history"); 29030Sstevel@tonic-gate return (DCMD_ERR); 29040Sstevel@tonic-gate } 29050Sstevel@tonic-gate 29060Sstevel@tonic-gate if (bhc.bhc_ret == DCMD_OK && !(flags & DCMD_PIPE_OUT)) 29070Sstevel@tonic-gate mdb_printf("\n"); 29080Sstevel@tonic-gate 29090Sstevel@tonic-gate return (bhc.bhc_ret); 29100Sstevel@tonic-gate } 29110Sstevel@tonic-gate 29120Sstevel@tonic-gate if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) { 29130Sstevel@tonic-gate if (verbose) { 29140Sstevel@tonic-gate mdb_printf("%16s %16s %16s %16s\n" 29150Sstevel@tonic-gate "%<u>%16s %16s %16s %16s%</u>\n", 29160Sstevel@tonic-gate "ADDR", "BUFADDR", "TIMESTAMP", "THREAD", 29170Sstevel@tonic-gate "", "CACHE", "LASTLOG", "CONTENTS"); 29180Sstevel@tonic-gate } else { 29190Sstevel@tonic-gate mdb_printf("%<u>%-?s %-?s %-12s %-?s %s%</u>\n", 29200Sstevel@tonic-gate "ADDR", "BUFADDR", "TIMESTAMP", "THREAD", "CALLER"); 29210Sstevel@tonic-gate } 29220Sstevel@tonic-gate } 29230Sstevel@tonic-gate 29240Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) { 29250Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr); 29260Sstevel@tonic-gate return (DCMD_ERR); 29270Sstevel@tonic-gate } 29280Sstevel@tonic-gate 29290Sstevel@tonic-gate /* 29300Sstevel@tonic-gate * Guard against bogus bc_depth in case the bufctl is corrupt or 29310Sstevel@tonic-gate * the address does not really refer to a bufctl. 29320Sstevel@tonic-gate */ 29330Sstevel@tonic-gate depth = MIN(bc.bc_depth, KMEM_STACK_DEPTH); 29340Sstevel@tonic-gate 29350Sstevel@tonic-gate if (caller != NULL) { 29360Sstevel@tonic-gate laddr = caller; 29370Sstevel@tonic-gate haddr = caller + sizeof (caller); 29380Sstevel@tonic-gate 29390Sstevel@tonic-gate if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, sizeof (c), 29400Sstevel@tonic-gate &sym) != -1 && caller == (uintptr_t)sym.st_value) { 29410Sstevel@tonic-gate /* 29420Sstevel@tonic-gate * We were provided an exact symbol value; any 29430Sstevel@tonic-gate * address in the function is valid. 29440Sstevel@tonic-gate */ 29450Sstevel@tonic-gate laddr = (uintptr_t)sym.st_value; 29460Sstevel@tonic-gate haddr = (uintptr_t)sym.st_value + sym.st_size; 29470Sstevel@tonic-gate } 29480Sstevel@tonic-gate 29490Sstevel@tonic-gate for (i = 0; i < depth; i++) 29500Sstevel@tonic-gate if (bc.bc_stack[i] >= laddr && bc.bc_stack[i] < haddr) 29510Sstevel@tonic-gate break; 29520Sstevel@tonic-gate 29530Sstevel@tonic-gate if (i == depth) 29540Sstevel@tonic-gate return (DCMD_OK); 29550Sstevel@tonic-gate } 29560Sstevel@tonic-gate 29570Sstevel@tonic-gate if (thread != NULL && (uintptr_t)bc.bc_thread != thread) 29580Sstevel@tonic-gate return (DCMD_OK); 29590Sstevel@tonic-gate 29600Sstevel@tonic-gate if (earliest != 0 && bc.bc_timestamp < earliest) 29610Sstevel@tonic-gate return (DCMD_OK); 29620Sstevel@tonic-gate 29630Sstevel@tonic-gate if (latest != 0 && bc.bc_timestamp > latest) 29640Sstevel@tonic-gate return (DCMD_OK); 29650Sstevel@tonic-gate 29660Sstevel@tonic-gate if (baddr != 0 && (uintptr_t)bc.bc_addr != baddr) 29670Sstevel@tonic-gate return (DCMD_OK); 29680Sstevel@tonic-gate 29690Sstevel@tonic-gate if (flags & DCMD_PIPE_OUT) { 29700Sstevel@tonic-gate mdb_printf("%#lr\n", addr); 29710Sstevel@tonic-gate return (DCMD_OK); 29720Sstevel@tonic-gate } 29730Sstevel@tonic-gate 29740Sstevel@tonic-gate if (verbose) { 29750Sstevel@tonic-gate mdb_printf( 29760Sstevel@tonic-gate "%<b>%16p%</b> %16p %16llx %16p\n" 29770Sstevel@tonic-gate "%16s %16p %16p %16p\n", 29780Sstevel@tonic-gate addr, bc.bc_addr, bc.bc_timestamp, bc.bc_thread, 29790Sstevel@tonic-gate "", bc.bc_cache, bc.bc_lastlog, bc.bc_contents); 29800Sstevel@tonic-gate 29810Sstevel@tonic-gate mdb_inc_indent(17); 29820Sstevel@tonic-gate for (i = 0; i < depth; i++) 29830Sstevel@tonic-gate mdb_printf("%a\n", bc.bc_stack[i]); 29840Sstevel@tonic-gate mdb_dec_indent(17); 29850Sstevel@tonic-gate mdb_printf("\n"); 29860Sstevel@tonic-gate } else { 29870Sstevel@tonic-gate mdb_printf("%0?p %0?p %12llx %0?p", addr, bc.bc_addr, 29880Sstevel@tonic-gate bc.bc_timestamp, bc.bc_thread); 29890Sstevel@tonic-gate 29900Sstevel@tonic-gate for (i = 0; i < depth; i++) { 29910Sstevel@tonic-gate if (mdb_lookup_by_addr(bc.bc_stack[i], 29920Sstevel@tonic-gate MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1) 29930Sstevel@tonic-gate continue; 29940Sstevel@tonic-gate if (strncmp(c, "kmem_", 5) == 0) 29950Sstevel@tonic-gate continue; 29960Sstevel@tonic-gate mdb_printf(" %a\n", bc.bc_stack[i]); 29970Sstevel@tonic-gate break; 29980Sstevel@tonic-gate } 29990Sstevel@tonic-gate 30000Sstevel@tonic-gate if (i >= depth) 30010Sstevel@tonic-gate mdb_printf("\n"); 30020Sstevel@tonic-gate } 30030Sstevel@tonic-gate 30040Sstevel@tonic-gate return (DCMD_OK); 30050Sstevel@tonic-gate } 30060Sstevel@tonic-gate 30070Sstevel@tonic-gate typedef struct kmem_verify { 30080Sstevel@tonic-gate uint64_t *kmv_buf; /* buffer to read cache contents into */ 30090Sstevel@tonic-gate size_t kmv_size; /* number of bytes in kmv_buf */ 30100Sstevel@tonic-gate int kmv_corruption; /* > 0 if corruption found. */ 30110Sstevel@tonic-gate int kmv_besilent; /* report actual corruption sites */ 30120Sstevel@tonic-gate struct kmem_cache kmv_cache; /* the cache we're operating on */ 30130Sstevel@tonic-gate } kmem_verify_t; 30140Sstevel@tonic-gate 30150Sstevel@tonic-gate /* 30160Sstevel@tonic-gate * verify_pattern() 30170Sstevel@tonic-gate * verify that buf is filled with the pattern pat. 30180Sstevel@tonic-gate */ 30190Sstevel@tonic-gate static int64_t 30200Sstevel@tonic-gate verify_pattern(uint64_t *buf_arg, size_t size, uint64_t pat) 30210Sstevel@tonic-gate { 30220Sstevel@tonic-gate /*LINTED*/ 30230Sstevel@tonic-gate uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 30240Sstevel@tonic-gate uint64_t *buf; 30250Sstevel@tonic-gate 30260Sstevel@tonic-gate for (buf = buf_arg; buf < bufend; buf++) 30270Sstevel@tonic-gate if (*buf != pat) 30280Sstevel@tonic-gate return ((uintptr_t)buf - (uintptr_t)buf_arg); 30290Sstevel@tonic-gate return (-1); 30300Sstevel@tonic-gate } 30310Sstevel@tonic-gate 30320Sstevel@tonic-gate /* 30330Sstevel@tonic-gate * verify_buftag() 30340Sstevel@tonic-gate * verify that btp->bt_bxstat == (bcp ^ pat) 30350Sstevel@tonic-gate */ 30360Sstevel@tonic-gate static int 30370Sstevel@tonic-gate verify_buftag(kmem_buftag_t *btp, uintptr_t pat) 30380Sstevel@tonic-gate { 30390Sstevel@tonic-gate return (btp->bt_bxstat == ((intptr_t)btp->bt_bufctl ^ pat) ? 0 : -1); 30400Sstevel@tonic-gate } 30410Sstevel@tonic-gate 30420Sstevel@tonic-gate /* 30430Sstevel@tonic-gate * verify_free() 30440Sstevel@tonic-gate * verify the integrity of a free block of memory by checking 30450Sstevel@tonic-gate * that it is filled with 0xdeadbeef and that its buftag is sane. 30460Sstevel@tonic-gate */ 30470Sstevel@tonic-gate /*ARGSUSED1*/ 30480Sstevel@tonic-gate static int 30490Sstevel@tonic-gate verify_free(uintptr_t addr, const void *data, void *private) 30500Sstevel@tonic-gate { 30510Sstevel@tonic-gate kmem_verify_t *kmv = (kmem_verify_t *)private; 30520Sstevel@tonic-gate uint64_t *buf = kmv->kmv_buf; /* buf to validate */ 30530Sstevel@tonic-gate int64_t corrupt; /* corruption offset */ 30540Sstevel@tonic-gate kmem_buftag_t *buftagp; /* ptr to buftag */ 30550Sstevel@tonic-gate kmem_cache_t *cp = &kmv->kmv_cache; 30560Sstevel@tonic-gate int besilent = kmv->kmv_besilent; 30570Sstevel@tonic-gate 30580Sstevel@tonic-gate /*LINTED*/ 30590Sstevel@tonic-gate buftagp = KMEM_BUFTAG(cp, buf); 30600Sstevel@tonic-gate 30610Sstevel@tonic-gate /* 30620Sstevel@tonic-gate * Read the buffer to check. 30630Sstevel@tonic-gate */ 30640Sstevel@tonic-gate if (mdb_vread(buf, kmv->kmv_size, addr) == -1) { 30650Sstevel@tonic-gate if (!besilent) 30660Sstevel@tonic-gate mdb_warn("couldn't read %p", addr); 30670Sstevel@tonic-gate return (WALK_NEXT); 30680Sstevel@tonic-gate } 30690Sstevel@tonic-gate 30700Sstevel@tonic-gate if ((corrupt = verify_pattern(buf, cp->cache_verify, 30710Sstevel@tonic-gate KMEM_FREE_PATTERN)) >= 0) { 30720Sstevel@tonic-gate if (!besilent) 30730Sstevel@tonic-gate mdb_printf("buffer %p (free) seems corrupted, at %p\n", 30740Sstevel@tonic-gate addr, (uintptr_t)addr + corrupt); 30750Sstevel@tonic-gate goto corrupt; 30760Sstevel@tonic-gate } 30770Sstevel@tonic-gate /* 30780Sstevel@tonic-gate * When KMF_LITE is set, buftagp->bt_redzone is used to hold 30790Sstevel@tonic-gate * the first bytes of the buffer, hence we cannot check for red 30800Sstevel@tonic-gate * zone corruption. 30810Sstevel@tonic-gate */ 30820Sstevel@tonic-gate if ((cp->cache_flags & (KMF_HASH | KMF_LITE)) == KMF_HASH && 30830Sstevel@tonic-gate buftagp->bt_redzone != KMEM_REDZONE_PATTERN) { 30840Sstevel@tonic-gate if (!besilent) 30850Sstevel@tonic-gate mdb_printf("buffer %p (free) seems to " 30860Sstevel@tonic-gate "have a corrupt redzone pattern\n", addr); 30870Sstevel@tonic-gate goto corrupt; 30880Sstevel@tonic-gate } 30890Sstevel@tonic-gate 30900Sstevel@tonic-gate /* 30910Sstevel@tonic-gate * confirm bufctl pointer integrity. 30920Sstevel@tonic-gate */ 30930Sstevel@tonic-gate if (verify_buftag(buftagp, KMEM_BUFTAG_FREE) == -1) { 30940Sstevel@tonic-gate if (!besilent) 30950Sstevel@tonic-gate mdb_printf("buffer %p (free) has a corrupt " 30960Sstevel@tonic-gate "buftag\n", addr); 30970Sstevel@tonic-gate goto corrupt; 30980Sstevel@tonic-gate } 30990Sstevel@tonic-gate 31000Sstevel@tonic-gate return (WALK_NEXT); 31010Sstevel@tonic-gate corrupt: 31020Sstevel@tonic-gate kmv->kmv_corruption++; 31030Sstevel@tonic-gate return (WALK_NEXT); 31040Sstevel@tonic-gate } 31050Sstevel@tonic-gate 31060Sstevel@tonic-gate /* 31070Sstevel@tonic-gate * verify_alloc() 31080Sstevel@tonic-gate * Verify that the buftag of an allocated buffer makes sense with respect 31090Sstevel@tonic-gate * to the buffer. 31100Sstevel@tonic-gate */ 31110Sstevel@tonic-gate /*ARGSUSED1*/ 31120Sstevel@tonic-gate static int 31130Sstevel@tonic-gate verify_alloc(uintptr_t addr, const void *data, void *private) 31140Sstevel@tonic-gate { 31150Sstevel@tonic-gate kmem_verify_t *kmv = (kmem_verify_t *)private; 31160Sstevel@tonic-gate kmem_cache_t *cp = &kmv->kmv_cache; 31170Sstevel@tonic-gate uint64_t *buf = kmv->kmv_buf; /* buf to validate */ 31180Sstevel@tonic-gate /*LINTED*/ 31190Sstevel@tonic-gate kmem_buftag_t *buftagp = KMEM_BUFTAG(cp, buf); 31200Sstevel@tonic-gate uint32_t *ip = (uint32_t *)buftagp; 31210Sstevel@tonic-gate uint8_t *bp = (uint8_t *)buf; 31220Sstevel@tonic-gate int looks_ok = 0, size_ok = 1; /* flags for finding corruption */ 31230Sstevel@tonic-gate int besilent = kmv->kmv_besilent; 31240Sstevel@tonic-gate 31250Sstevel@tonic-gate /* 31260Sstevel@tonic-gate * Read the buffer to check. 31270Sstevel@tonic-gate */ 31280Sstevel@tonic-gate if (mdb_vread(buf, kmv->kmv_size, addr) == -1) { 31290Sstevel@tonic-gate if (!besilent) 31300Sstevel@tonic-gate mdb_warn("couldn't read %p", addr); 31310Sstevel@tonic-gate return (WALK_NEXT); 31320Sstevel@tonic-gate } 31330Sstevel@tonic-gate 31340Sstevel@tonic-gate /* 31350Sstevel@tonic-gate * There are two cases to handle: 31360Sstevel@tonic-gate * 1. If the buf was alloc'd using kmem_cache_alloc, it will have 31370Sstevel@tonic-gate * 0xfeedfacefeedface at the end of it 31380Sstevel@tonic-gate * 2. If the buf was alloc'd using kmem_alloc, it will have 31390Sstevel@tonic-gate * 0xbb just past the end of the region in use. At the buftag, 31400Sstevel@tonic-gate * it will have 0xfeedface (or, if the whole buffer is in use, 31410Sstevel@tonic-gate * 0xfeedface & bb000000 or 0xfeedfacf & 000000bb depending on 31420Sstevel@tonic-gate * endianness), followed by 32 bits containing the offset of the 31430Sstevel@tonic-gate * 0xbb byte in the buffer. 31440Sstevel@tonic-gate * 31450Sstevel@tonic-gate * Finally, the two 32-bit words that comprise the second half of the 31460Sstevel@tonic-gate * buftag should xor to KMEM_BUFTAG_ALLOC 31470Sstevel@tonic-gate */ 31480Sstevel@tonic-gate 31490Sstevel@tonic-gate if (buftagp->bt_redzone == KMEM_REDZONE_PATTERN) 31500Sstevel@tonic-gate looks_ok = 1; 31510Sstevel@tonic-gate else if (!KMEM_SIZE_VALID(ip[1])) 31520Sstevel@tonic-gate size_ok = 0; 31530Sstevel@tonic-gate else if (bp[KMEM_SIZE_DECODE(ip[1])] == KMEM_REDZONE_BYTE) 31540Sstevel@tonic-gate looks_ok = 1; 31550Sstevel@tonic-gate else 31560Sstevel@tonic-gate size_ok = 0; 31570Sstevel@tonic-gate 31580Sstevel@tonic-gate if (!size_ok) { 31590Sstevel@tonic-gate if (!besilent) 31600Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a corrupt " 31610Sstevel@tonic-gate "redzone size encoding\n", addr); 31620Sstevel@tonic-gate goto corrupt; 31630Sstevel@tonic-gate } 31640Sstevel@tonic-gate 31650Sstevel@tonic-gate if (!looks_ok) { 31660Sstevel@tonic-gate if (!besilent) 31670Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a corrupt " 31680Sstevel@tonic-gate "redzone signature\n", addr); 31690Sstevel@tonic-gate goto corrupt; 31700Sstevel@tonic-gate } 31710Sstevel@tonic-gate 31720Sstevel@tonic-gate if (verify_buftag(buftagp, KMEM_BUFTAG_ALLOC) == -1) { 31730Sstevel@tonic-gate if (!besilent) 31740Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a " 31750Sstevel@tonic-gate "corrupt buftag\n", addr); 31760Sstevel@tonic-gate goto corrupt; 31770Sstevel@tonic-gate } 31780Sstevel@tonic-gate 31790Sstevel@tonic-gate return (WALK_NEXT); 31800Sstevel@tonic-gate corrupt: 31810Sstevel@tonic-gate kmv->kmv_corruption++; 31820Sstevel@tonic-gate return (WALK_NEXT); 31830Sstevel@tonic-gate } 31840Sstevel@tonic-gate 31850Sstevel@tonic-gate /*ARGSUSED2*/ 31860Sstevel@tonic-gate int 31870Sstevel@tonic-gate kmem_verify(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 31880Sstevel@tonic-gate { 31890Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) { 31900Sstevel@tonic-gate int check_alloc = 0, check_free = 0; 31910Sstevel@tonic-gate kmem_verify_t kmv; 31920Sstevel@tonic-gate 31930Sstevel@tonic-gate if (mdb_vread(&kmv.kmv_cache, sizeof (kmv.kmv_cache), 31940Sstevel@tonic-gate addr) == -1) { 31950Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache %p", addr); 31960Sstevel@tonic-gate return (DCMD_ERR); 31970Sstevel@tonic-gate } 31980Sstevel@tonic-gate 31990Sstevel@tonic-gate kmv.kmv_size = kmv.kmv_cache.cache_buftag + 32000Sstevel@tonic-gate sizeof (kmem_buftag_t); 32010Sstevel@tonic-gate kmv.kmv_buf = mdb_alloc(kmv.kmv_size, UM_SLEEP | UM_GC); 32020Sstevel@tonic-gate kmv.kmv_corruption = 0; 32030Sstevel@tonic-gate 32040Sstevel@tonic-gate if ((kmv.kmv_cache.cache_flags & KMF_REDZONE)) { 32050Sstevel@tonic-gate check_alloc = 1; 32060Sstevel@tonic-gate if (kmv.kmv_cache.cache_flags & KMF_DEADBEEF) 32070Sstevel@tonic-gate check_free = 1; 32080Sstevel@tonic-gate } else { 32090Sstevel@tonic-gate if (!(flags & DCMD_LOOP)) { 32100Sstevel@tonic-gate mdb_warn("cache %p (%s) does not have " 32110Sstevel@tonic-gate "redzone checking enabled\n", addr, 32120Sstevel@tonic-gate kmv.kmv_cache.cache_name); 32130Sstevel@tonic-gate } 32140Sstevel@tonic-gate return (DCMD_ERR); 32150Sstevel@tonic-gate } 32160Sstevel@tonic-gate 32170Sstevel@tonic-gate if (flags & DCMD_LOOP) { 32180Sstevel@tonic-gate /* 32190Sstevel@tonic-gate * table mode, don't print out every corrupt buffer 32200Sstevel@tonic-gate */ 32210Sstevel@tonic-gate kmv.kmv_besilent = 1; 32220Sstevel@tonic-gate } else { 32230Sstevel@tonic-gate mdb_printf("Summary for cache '%s'\n", 32240Sstevel@tonic-gate kmv.kmv_cache.cache_name); 32250Sstevel@tonic-gate mdb_inc_indent(2); 32260Sstevel@tonic-gate kmv.kmv_besilent = 0; 32270Sstevel@tonic-gate } 32280Sstevel@tonic-gate 32290Sstevel@tonic-gate if (check_alloc) 32300Sstevel@tonic-gate (void) mdb_pwalk("kmem", verify_alloc, &kmv, addr); 32310Sstevel@tonic-gate if (check_free) 32320Sstevel@tonic-gate (void) mdb_pwalk("freemem", verify_free, &kmv, addr); 32330Sstevel@tonic-gate 32340Sstevel@tonic-gate if (flags & DCMD_LOOP) { 32350Sstevel@tonic-gate if (kmv.kmv_corruption == 0) { 32360Sstevel@tonic-gate mdb_printf("%-*s %?p clean\n", 32370Sstevel@tonic-gate KMEM_CACHE_NAMELEN, 32380Sstevel@tonic-gate kmv.kmv_cache.cache_name, addr); 32390Sstevel@tonic-gate } else { 32400Sstevel@tonic-gate char *s = ""; /* optional s in "buffer[s]" */ 32410Sstevel@tonic-gate if (kmv.kmv_corruption > 1) 32420Sstevel@tonic-gate s = "s"; 32430Sstevel@tonic-gate 32440Sstevel@tonic-gate mdb_printf("%-*s %?p %d corrupt buffer%s\n", 32450Sstevel@tonic-gate KMEM_CACHE_NAMELEN, 32460Sstevel@tonic-gate kmv.kmv_cache.cache_name, addr, 32470Sstevel@tonic-gate kmv.kmv_corruption, s); 32480Sstevel@tonic-gate } 32490Sstevel@tonic-gate } else { 32500Sstevel@tonic-gate /* 32510Sstevel@tonic-gate * This is the more verbose mode, when the user has 32520Sstevel@tonic-gate * type addr::kmem_verify. If the cache was clean, 32530Sstevel@tonic-gate * nothing will have yet been printed. So say something. 32540Sstevel@tonic-gate */ 32550Sstevel@tonic-gate if (kmv.kmv_corruption == 0) 32560Sstevel@tonic-gate mdb_printf("clean\n"); 32570Sstevel@tonic-gate 32580Sstevel@tonic-gate mdb_dec_indent(2); 32590Sstevel@tonic-gate } 32600Sstevel@tonic-gate } else { 32610Sstevel@tonic-gate /* 32620Sstevel@tonic-gate * If the user didn't specify a cache to verify, we'll walk all 32630Sstevel@tonic-gate * kmem_cache's, specifying ourself as a callback for each... 32640Sstevel@tonic-gate * this is the equivalent of '::walk kmem_cache .::kmem_verify' 32650Sstevel@tonic-gate */ 32660Sstevel@tonic-gate mdb_printf("%<u>%-*s %-?s %-20s%</b>\n", KMEM_CACHE_NAMELEN, 32670Sstevel@tonic-gate "Cache Name", "Addr", "Cache Integrity"); 32680Sstevel@tonic-gate (void) (mdb_walk_dcmd("kmem_cache", "kmem_verify", 0, NULL)); 32690Sstevel@tonic-gate } 32700Sstevel@tonic-gate 32710Sstevel@tonic-gate return (DCMD_OK); 32720Sstevel@tonic-gate } 32730Sstevel@tonic-gate 32740Sstevel@tonic-gate typedef struct vmem_node { 32750Sstevel@tonic-gate struct vmem_node *vn_next; 32760Sstevel@tonic-gate struct vmem_node *vn_parent; 32770Sstevel@tonic-gate struct vmem_node *vn_sibling; 32780Sstevel@tonic-gate struct vmem_node *vn_children; 32790Sstevel@tonic-gate uintptr_t vn_addr; 32800Sstevel@tonic-gate int vn_marked; 32810Sstevel@tonic-gate vmem_t vn_vmem; 32820Sstevel@tonic-gate } vmem_node_t; 32830Sstevel@tonic-gate 32840Sstevel@tonic-gate typedef struct vmem_walk { 32850Sstevel@tonic-gate vmem_node_t *vw_root; 32860Sstevel@tonic-gate vmem_node_t *vw_current; 32870Sstevel@tonic-gate } vmem_walk_t; 32880Sstevel@tonic-gate 32890Sstevel@tonic-gate int 32900Sstevel@tonic-gate vmem_walk_init(mdb_walk_state_t *wsp) 32910Sstevel@tonic-gate { 32920Sstevel@tonic-gate uintptr_t vaddr, paddr; 32930Sstevel@tonic-gate vmem_node_t *head = NULL, *root = NULL, *current = NULL, *parent, *vp; 32940Sstevel@tonic-gate vmem_walk_t *vw; 32950Sstevel@tonic-gate 32960Sstevel@tonic-gate if (mdb_readvar(&vaddr, "vmem_list") == -1) { 32970Sstevel@tonic-gate mdb_warn("couldn't read 'vmem_list'"); 32980Sstevel@tonic-gate return (WALK_ERR); 32990Sstevel@tonic-gate } 33000Sstevel@tonic-gate 33010Sstevel@tonic-gate while (vaddr != NULL) { 33020Sstevel@tonic-gate vp = mdb_zalloc(sizeof (vmem_node_t), UM_SLEEP); 33030Sstevel@tonic-gate vp->vn_addr = vaddr; 33040Sstevel@tonic-gate vp->vn_next = head; 33050Sstevel@tonic-gate head = vp; 33060Sstevel@tonic-gate 33070Sstevel@tonic-gate if (vaddr == wsp->walk_addr) 33080Sstevel@tonic-gate current = vp; 33090Sstevel@tonic-gate 33100Sstevel@tonic-gate if (mdb_vread(&vp->vn_vmem, sizeof (vmem_t), vaddr) == -1) { 33110Sstevel@tonic-gate mdb_warn("couldn't read vmem_t at %p", vaddr); 33120Sstevel@tonic-gate goto err; 33130Sstevel@tonic-gate } 33140Sstevel@tonic-gate 33150Sstevel@tonic-gate vaddr = (uintptr_t)vp->vn_vmem.vm_next; 33160Sstevel@tonic-gate } 33170Sstevel@tonic-gate 33180Sstevel@tonic-gate for (vp = head; vp != NULL; vp = vp->vn_next) { 33190Sstevel@tonic-gate 33200Sstevel@tonic-gate if ((paddr = (uintptr_t)vp->vn_vmem.vm_source) == NULL) { 33210Sstevel@tonic-gate vp->vn_sibling = root; 33220Sstevel@tonic-gate root = vp; 33230Sstevel@tonic-gate continue; 33240Sstevel@tonic-gate } 33250Sstevel@tonic-gate 33260Sstevel@tonic-gate for (parent = head; parent != NULL; parent = parent->vn_next) { 33270Sstevel@tonic-gate if (parent->vn_addr != paddr) 33280Sstevel@tonic-gate continue; 33290Sstevel@tonic-gate vp->vn_sibling = parent->vn_children; 33300Sstevel@tonic-gate parent->vn_children = vp; 33310Sstevel@tonic-gate vp->vn_parent = parent; 33320Sstevel@tonic-gate break; 33330Sstevel@tonic-gate } 33340Sstevel@tonic-gate 33350Sstevel@tonic-gate if (parent == NULL) { 33360Sstevel@tonic-gate mdb_warn("couldn't find %p's parent (%p)\n", 33370Sstevel@tonic-gate vp->vn_addr, paddr); 33380Sstevel@tonic-gate goto err; 33390Sstevel@tonic-gate } 33400Sstevel@tonic-gate } 33410Sstevel@tonic-gate 33420Sstevel@tonic-gate vw = mdb_zalloc(sizeof (vmem_walk_t), UM_SLEEP); 33430Sstevel@tonic-gate vw->vw_root = root; 33440Sstevel@tonic-gate 33450Sstevel@tonic-gate if (current != NULL) 33460Sstevel@tonic-gate vw->vw_current = current; 33470Sstevel@tonic-gate else 33480Sstevel@tonic-gate vw->vw_current = root; 33490Sstevel@tonic-gate 33500Sstevel@tonic-gate wsp->walk_data = vw; 33510Sstevel@tonic-gate return (WALK_NEXT); 33520Sstevel@tonic-gate err: 33530Sstevel@tonic-gate for (vp = head; head != NULL; vp = head) { 33540Sstevel@tonic-gate head = vp->vn_next; 33550Sstevel@tonic-gate mdb_free(vp, sizeof (vmem_node_t)); 33560Sstevel@tonic-gate } 33570Sstevel@tonic-gate 33580Sstevel@tonic-gate return (WALK_ERR); 33590Sstevel@tonic-gate } 33600Sstevel@tonic-gate 33610Sstevel@tonic-gate int 33620Sstevel@tonic-gate vmem_walk_step(mdb_walk_state_t *wsp) 33630Sstevel@tonic-gate { 33640Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 33650Sstevel@tonic-gate vmem_node_t *vp; 33660Sstevel@tonic-gate int rval; 33670Sstevel@tonic-gate 33680Sstevel@tonic-gate if ((vp = vw->vw_current) == NULL) 33690Sstevel@tonic-gate return (WALK_DONE); 33700Sstevel@tonic-gate 33710Sstevel@tonic-gate rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata); 33720Sstevel@tonic-gate 33730Sstevel@tonic-gate if (vp->vn_children != NULL) { 33740Sstevel@tonic-gate vw->vw_current = vp->vn_children; 33750Sstevel@tonic-gate return (rval); 33760Sstevel@tonic-gate } 33770Sstevel@tonic-gate 33780Sstevel@tonic-gate do { 33790Sstevel@tonic-gate vw->vw_current = vp->vn_sibling; 33800Sstevel@tonic-gate vp = vp->vn_parent; 33810Sstevel@tonic-gate } while (vw->vw_current == NULL && vp != NULL); 33820Sstevel@tonic-gate 33830Sstevel@tonic-gate return (rval); 33840Sstevel@tonic-gate } 33850Sstevel@tonic-gate 33860Sstevel@tonic-gate /* 33870Sstevel@tonic-gate * The "vmem_postfix" walk walks the vmem arenas in post-fix order; all 33880Sstevel@tonic-gate * children are visited before their parent. We perform the postfix walk 33890Sstevel@tonic-gate * iteratively (rather than recursively) to allow mdb to regain control 33900Sstevel@tonic-gate * after each callback. 33910Sstevel@tonic-gate */ 33920Sstevel@tonic-gate int 33930Sstevel@tonic-gate vmem_postfix_walk_step(mdb_walk_state_t *wsp) 33940Sstevel@tonic-gate { 33950Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 33960Sstevel@tonic-gate vmem_node_t *vp = vw->vw_current; 33970Sstevel@tonic-gate int rval; 33980Sstevel@tonic-gate 33990Sstevel@tonic-gate /* 34000Sstevel@tonic-gate * If this node is marked, then we know that we have already visited 34010Sstevel@tonic-gate * all of its children. If the node has any siblings, they need to 34020Sstevel@tonic-gate * be visited next; otherwise, we need to visit the parent. Note 34030Sstevel@tonic-gate * that vp->vn_marked will only be zero on the first invocation of 34040Sstevel@tonic-gate * the step function. 34050Sstevel@tonic-gate */ 34060Sstevel@tonic-gate if (vp->vn_marked) { 34070Sstevel@tonic-gate if (vp->vn_sibling != NULL) 34080Sstevel@tonic-gate vp = vp->vn_sibling; 34090Sstevel@tonic-gate else if (vp->vn_parent != NULL) 34100Sstevel@tonic-gate vp = vp->vn_parent; 34110Sstevel@tonic-gate else { 34120Sstevel@tonic-gate /* 34130Sstevel@tonic-gate * We have neither a parent, nor a sibling, and we 34140Sstevel@tonic-gate * have already been visited; we're done. 34150Sstevel@tonic-gate */ 34160Sstevel@tonic-gate return (WALK_DONE); 34170Sstevel@tonic-gate } 34180Sstevel@tonic-gate } 34190Sstevel@tonic-gate 34200Sstevel@tonic-gate /* 34210Sstevel@tonic-gate * Before we visit this node, visit its children. 34220Sstevel@tonic-gate */ 34230Sstevel@tonic-gate while (vp->vn_children != NULL && !vp->vn_children->vn_marked) 34240Sstevel@tonic-gate vp = vp->vn_children; 34250Sstevel@tonic-gate 34260Sstevel@tonic-gate vp->vn_marked = 1; 34270Sstevel@tonic-gate vw->vw_current = vp; 34280Sstevel@tonic-gate rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata); 34290Sstevel@tonic-gate 34300Sstevel@tonic-gate return (rval); 34310Sstevel@tonic-gate } 34320Sstevel@tonic-gate 34330Sstevel@tonic-gate void 34340Sstevel@tonic-gate vmem_walk_fini(mdb_walk_state_t *wsp) 34350Sstevel@tonic-gate { 34360Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 34370Sstevel@tonic-gate vmem_node_t *root = vw->vw_root; 34380Sstevel@tonic-gate int done; 34390Sstevel@tonic-gate 34400Sstevel@tonic-gate if (root == NULL) 34410Sstevel@tonic-gate return; 34420Sstevel@tonic-gate 34430Sstevel@tonic-gate if ((vw->vw_root = root->vn_children) != NULL) 34440Sstevel@tonic-gate vmem_walk_fini(wsp); 34450Sstevel@tonic-gate 34460Sstevel@tonic-gate vw->vw_root = root->vn_sibling; 34470Sstevel@tonic-gate done = (root->vn_sibling == NULL && root->vn_parent == NULL); 34480Sstevel@tonic-gate mdb_free(root, sizeof (vmem_node_t)); 34490Sstevel@tonic-gate 34500Sstevel@tonic-gate if (done) { 34510Sstevel@tonic-gate mdb_free(vw, sizeof (vmem_walk_t)); 34520Sstevel@tonic-gate } else { 34530Sstevel@tonic-gate vmem_walk_fini(wsp); 34540Sstevel@tonic-gate } 34550Sstevel@tonic-gate } 34560Sstevel@tonic-gate 34570Sstevel@tonic-gate typedef struct vmem_seg_walk { 34580Sstevel@tonic-gate uint8_t vsw_type; 34590Sstevel@tonic-gate uintptr_t vsw_start; 34600Sstevel@tonic-gate uintptr_t vsw_current; 34610Sstevel@tonic-gate } vmem_seg_walk_t; 34620Sstevel@tonic-gate 34630Sstevel@tonic-gate /*ARGSUSED*/ 34640Sstevel@tonic-gate int 34650Sstevel@tonic-gate vmem_seg_walk_common_init(mdb_walk_state_t *wsp, uint8_t type, char *name) 34660Sstevel@tonic-gate { 34670Sstevel@tonic-gate vmem_seg_walk_t *vsw; 34680Sstevel@tonic-gate 34690Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 34700Sstevel@tonic-gate mdb_warn("vmem_%s does not support global walks\n", name); 34710Sstevel@tonic-gate return (WALK_ERR); 34720Sstevel@tonic-gate } 34730Sstevel@tonic-gate 34740Sstevel@tonic-gate wsp->walk_data = vsw = mdb_alloc(sizeof (vmem_seg_walk_t), UM_SLEEP); 34750Sstevel@tonic-gate 34760Sstevel@tonic-gate vsw->vsw_type = type; 34770Sstevel@tonic-gate vsw->vsw_start = wsp->walk_addr + offsetof(vmem_t, vm_seg0); 34780Sstevel@tonic-gate vsw->vsw_current = vsw->vsw_start; 34790Sstevel@tonic-gate 34800Sstevel@tonic-gate return (WALK_NEXT); 34810Sstevel@tonic-gate } 34820Sstevel@tonic-gate 34830Sstevel@tonic-gate /* 34840Sstevel@tonic-gate * vmem segments can't have type 0 (this should be added to vmem_impl.h). 34850Sstevel@tonic-gate */ 34860Sstevel@tonic-gate #define VMEM_NONE 0 34870Sstevel@tonic-gate 34880Sstevel@tonic-gate int 34890Sstevel@tonic-gate vmem_alloc_walk_init(mdb_walk_state_t *wsp) 34900Sstevel@tonic-gate { 34910Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_ALLOC, "alloc")); 34920Sstevel@tonic-gate } 34930Sstevel@tonic-gate 34940Sstevel@tonic-gate int 34950Sstevel@tonic-gate vmem_free_walk_init(mdb_walk_state_t *wsp) 34960Sstevel@tonic-gate { 34970Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_FREE, "free")); 34980Sstevel@tonic-gate } 34990Sstevel@tonic-gate 35000Sstevel@tonic-gate int 35010Sstevel@tonic-gate vmem_span_walk_init(mdb_walk_state_t *wsp) 35020Sstevel@tonic-gate { 35030Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_SPAN, "span")); 35040Sstevel@tonic-gate } 35050Sstevel@tonic-gate 35060Sstevel@tonic-gate int 35070Sstevel@tonic-gate vmem_seg_walk_init(mdb_walk_state_t *wsp) 35080Sstevel@tonic-gate { 35090Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_NONE, "seg")); 35100Sstevel@tonic-gate } 35110Sstevel@tonic-gate 35120Sstevel@tonic-gate int 35130Sstevel@tonic-gate vmem_seg_walk_step(mdb_walk_state_t *wsp) 35140Sstevel@tonic-gate { 35150Sstevel@tonic-gate vmem_seg_t seg; 35160Sstevel@tonic-gate vmem_seg_walk_t *vsw = wsp->walk_data; 35170Sstevel@tonic-gate uintptr_t addr = vsw->vsw_current; 35180Sstevel@tonic-gate static size_t seg_size = 0; 35190Sstevel@tonic-gate int rval; 35200Sstevel@tonic-gate 35210Sstevel@tonic-gate if (!seg_size) { 35220Sstevel@tonic-gate if (mdb_readvar(&seg_size, "vmem_seg_size") == -1) { 35230Sstevel@tonic-gate mdb_warn("failed to read 'vmem_seg_size'"); 35240Sstevel@tonic-gate seg_size = sizeof (vmem_seg_t); 35250Sstevel@tonic-gate } 35260Sstevel@tonic-gate } 35270Sstevel@tonic-gate 35280Sstevel@tonic-gate if (seg_size < sizeof (seg)) 35290Sstevel@tonic-gate bzero((caddr_t)&seg + seg_size, sizeof (seg) - seg_size); 35300Sstevel@tonic-gate 35310Sstevel@tonic-gate if (mdb_vread(&seg, seg_size, addr) == -1) { 35320Sstevel@tonic-gate mdb_warn("couldn't read vmem_seg at %p", addr); 35330Sstevel@tonic-gate return (WALK_ERR); 35340Sstevel@tonic-gate } 35350Sstevel@tonic-gate 35360Sstevel@tonic-gate vsw->vsw_current = (uintptr_t)seg.vs_anext; 35370Sstevel@tonic-gate if (vsw->vsw_type != VMEM_NONE && seg.vs_type != vsw->vsw_type) { 35380Sstevel@tonic-gate rval = WALK_NEXT; 35390Sstevel@tonic-gate } else { 35400Sstevel@tonic-gate rval = wsp->walk_callback(addr, &seg, wsp->walk_cbdata); 35410Sstevel@tonic-gate } 35420Sstevel@tonic-gate 35430Sstevel@tonic-gate if (vsw->vsw_current == vsw->vsw_start) 35440Sstevel@tonic-gate return (WALK_DONE); 35450Sstevel@tonic-gate 35460Sstevel@tonic-gate return (rval); 35470Sstevel@tonic-gate } 35480Sstevel@tonic-gate 35490Sstevel@tonic-gate void 35500Sstevel@tonic-gate vmem_seg_walk_fini(mdb_walk_state_t *wsp) 35510Sstevel@tonic-gate { 35520Sstevel@tonic-gate vmem_seg_walk_t *vsw = wsp->walk_data; 35530Sstevel@tonic-gate 35540Sstevel@tonic-gate mdb_free(vsw, sizeof (vmem_seg_walk_t)); 35550Sstevel@tonic-gate } 35560Sstevel@tonic-gate 35570Sstevel@tonic-gate #define VMEM_NAMEWIDTH 22 35580Sstevel@tonic-gate 35590Sstevel@tonic-gate int 35600Sstevel@tonic-gate vmem(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 35610Sstevel@tonic-gate { 35620Sstevel@tonic-gate vmem_t v, parent; 35630Sstevel@tonic-gate vmem_kstat_t *vkp = &v.vm_kstat; 35640Sstevel@tonic-gate uintptr_t paddr; 35650Sstevel@tonic-gate int ident = 0; 35660Sstevel@tonic-gate char c[VMEM_NAMEWIDTH]; 35670Sstevel@tonic-gate 35680Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) { 35690Sstevel@tonic-gate if (mdb_walk_dcmd("vmem", "vmem", argc, argv) == -1) { 35700Sstevel@tonic-gate mdb_warn("can't walk vmem"); 35710Sstevel@tonic-gate return (DCMD_ERR); 35720Sstevel@tonic-gate } 35730Sstevel@tonic-gate return (DCMD_OK); 35740Sstevel@tonic-gate } 35750Sstevel@tonic-gate 35760Sstevel@tonic-gate if (DCMD_HDRSPEC(flags)) 35770Sstevel@tonic-gate mdb_printf("%-?s %-*s %10s %12s %9s %5s\n", 35780Sstevel@tonic-gate "ADDR", VMEM_NAMEWIDTH, "NAME", "INUSE", 35790Sstevel@tonic-gate "TOTAL", "SUCCEED", "FAIL"); 35800Sstevel@tonic-gate 35810Sstevel@tonic-gate if (mdb_vread(&v, sizeof (v), addr) == -1) { 35820Sstevel@tonic-gate mdb_warn("couldn't read vmem at %p", addr); 35830Sstevel@tonic-gate return (DCMD_ERR); 35840Sstevel@tonic-gate } 35850Sstevel@tonic-gate 35860Sstevel@tonic-gate for (paddr = (uintptr_t)v.vm_source; paddr != NULL; ident += 2) { 35870Sstevel@tonic-gate if (mdb_vread(&parent, sizeof (parent), paddr) == -1) { 35880Sstevel@tonic-gate mdb_warn("couldn't trace %p's ancestry", addr); 35890Sstevel@tonic-gate ident = 0; 35900Sstevel@tonic-gate break; 35910Sstevel@tonic-gate } 35920Sstevel@tonic-gate paddr = (uintptr_t)parent.vm_source; 35930Sstevel@tonic-gate } 35940Sstevel@tonic-gate 35950Sstevel@tonic-gate (void) mdb_snprintf(c, VMEM_NAMEWIDTH, "%*s%s", ident, "", v.vm_name); 35960Sstevel@tonic-gate 35970Sstevel@tonic-gate mdb_printf("%0?p %-*s %10llu %12llu %9llu %5llu\n", 35980Sstevel@tonic-gate addr, VMEM_NAMEWIDTH, c, 35990Sstevel@tonic-gate vkp->vk_mem_inuse.value.ui64, vkp->vk_mem_total.value.ui64, 36000Sstevel@tonic-gate vkp->vk_alloc.value.ui64, vkp->vk_fail.value.ui64); 36010Sstevel@tonic-gate 36020Sstevel@tonic-gate return (DCMD_OK); 36030Sstevel@tonic-gate } 36040Sstevel@tonic-gate 36050Sstevel@tonic-gate void 36060Sstevel@tonic-gate vmem_seg_help(void) 36070Sstevel@tonic-gate { 36086712Stomee mdb_printf("%s", 36096712Stomee "Display the contents of vmem_seg_ts, with optional filtering.\n\n" 36100Sstevel@tonic-gate "\n" 36110Sstevel@tonic-gate "A vmem_seg_t represents a range of addresses (or arbitrary numbers),\n" 36120Sstevel@tonic-gate "representing a single chunk of data. Only ALLOC segments have debugging\n" 36130Sstevel@tonic-gate "information.\n"); 36140Sstevel@tonic-gate mdb_dec_indent(2); 36150Sstevel@tonic-gate mdb_printf("%<b>OPTIONS%</b>\n"); 36160Sstevel@tonic-gate mdb_inc_indent(2); 36170Sstevel@tonic-gate mdb_printf("%s", 36180Sstevel@tonic-gate " -v Display the full content of the vmem_seg, including its stack trace\n" 36190Sstevel@tonic-gate " -s report the size of the segment, instead of the end address\n" 36200Sstevel@tonic-gate " -c caller\n" 36210Sstevel@tonic-gate " filter out segments without the function/PC in their stack trace\n" 36220Sstevel@tonic-gate " -e earliest\n" 36230Sstevel@tonic-gate " filter out segments timestamped before earliest\n" 36240Sstevel@tonic-gate " -l latest\n" 36250Sstevel@tonic-gate " filter out segments timestamped after latest\n" 36260Sstevel@tonic-gate " -m minsize\n" 36270Sstevel@tonic-gate " filer out segments smaller than minsize\n" 36280Sstevel@tonic-gate " -M maxsize\n" 36290Sstevel@tonic-gate " filer out segments larger than maxsize\n" 36300Sstevel@tonic-gate " -t thread\n" 36310Sstevel@tonic-gate " filter out segments not involving thread\n" 36320Sstevel@tonic-gate " -T type\n" 36330Sstevel@tonic-gate " filter out segments not of type 'type'\n" 36340Sstevel@tonic-gate " type is one of: ALLOC/FREE/SPAN/ROTOR/WALKER\n"); 36350Sstevel@tonic-gate } 36360Sstevel@tonic-gate 36370Sstevel@tonic-gate /*ARGSUSED*/ 36380Sstevel@tonic-gate int 36390Sstevel@tonic-gate vmem_seg(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 36400Sstevel@tonic-gate { 36410Sstevel@tonic-gate vmem_seg_t vs; 36420Sstevel@tonic-gate pc_t *stk = vs.vs_stack; 36430Sstevel@tonic-gate uintptr_t sz; 36440Sstevel@tonic-gate uint8_t t; 36450Sstevel@tonic-gate const char *type = NULL; 36460Sstevel@tonic-gate GElf_Sym sym; 36470Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 36480Sstevel@tonic-gate int no_debug; 36490Sstevel@tonic-gate int i; 36500Sstevel@tonic-gate int depth; 36510Sstevel@tonic-gate uintptr_t laddr, haddr; 36520Sstevel@tonic-gate 36530Sstevel@tonic-gate uintptr_t caller = NULL, thread = NULL; 36540Sstevel@tonic-gate uintptr_t minsize = 0, maxsize = 0; 36550Sstevel@tonic-gate 36560Sstevel@tonic-gate hrtime_t earliest = 0, latest = 0; 36570Sstevel@tonic-gate 36580Sstevel@tonic-gate uint_t size = 0; 36590Sstevel@tonic-gate uint_t verbose = 0; 36600Sstevel@tonic-gate 36610Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 36620Sstevel@tonic-gate return (DCMD_USAGE); 36630Sstevel@tonic-gate 36640Sstevel@tonic-gate if (mdb_getopts(argc, argv, 36650Sstevel@tonic-gate 'c', MDB_OPT_UINTPTR, &caller, 36660Sstevel@tonic-gate 'e', MDB_OPT_UINT64, &earliest, 36670Sstevel@tonic-gate 'l', MDB_OPT_UINT64, &latest, 36680Sstevel@tonic-gate 's', MDB_OPT_SETBITS, TRUE, &size, 36690Sstevel@tonic-gate 'm', MDB_OPT_UINTPTR, &minsize, 36700Sstevel@tonic-gate 'M', MDB_OPT_UINTPTR, &maxsize, 36710Sstevel@tonic-gate 't', MDB_OPT_UINTPTR, &thread, 36720Sstevel@tonic-gate 'T', MDB_OPT_STR, &type, 36730Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &verbose, 36740Sstevel@tonic-gate NULL) != argc) 36750Sstevel@tonic-gate return (DCMD_USAGE); 36760Sstevel@tonic-gate 36770Sstevel@tonic-gate if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) { 36780Sstevel@tonic-gate if (verbose) { 36790Sstevel@tonic-gate mdb_printf("%16s %4s %16s %16s %16s\n" 36800Sstevel@tonic-gate "%<u>%16s %4s %16s %16s %16s%</u>\n", 36810Sstevel@tonic-gate "ADDR", "TYPE", "START", "END", "SIZE", 36820Sstevel@tonic-gate "", "", "THREAD", "TIMESTAMP", ""); 36830Sstevel@tonic-gate } else { 36840Sstevel@tonic-gate mdb_printf("%?s %4s %?s %?s %s\n", "ADDR", "TYPE", 36850Sstevel@tonic-gate "START", size? "SIZE" : "END", "WHO"); 36860Sstevel@tonic-gate } 36870Sstevel@tonic-gate } 36880Sstevel@tonic-gate 36890Sstevel@tonic-gate if (mdb_vread(&vs, sizeof (vs), addr) == -1) { 36900Sstevel@tonic-gate mdb_warn("couldn't read vmem_seg at %p", addr); 36910Sstevel@tonic-gate return (DCMD_ERR); 36920Sstevel@tonic-gate } 36930Sstevel@tonic-gate 36940Sstevel@tonic-gate if (type != NULL) { 36950Sstevel@tonic-gate if (strcmp(type, "ALLC") == 0 || strcmp(type, "ALLOC") == 0) 36960Sstevel@tonic-gate t = VMEM_ALLOC; 36970Sstevel@tonic-gate else if (strcmp(type, "FREE") == 0) 36980Sstevel@tonic-gate t = VMEM_FREE; 36990Sstevel@tonic-gate else if (strcmp(type, "SPAN") == 0) 37000Sstevel@tonic-gate t = VMEM_SPAN; 37010Sstevel@tonic-gate else if (strcmp(type, "ROTR") == 0 || 37020Sstevel@tonic-gate strcmp(type, "ROTOR") == 0) 37030Sstevel@tonic-gate t = VMEM_ROTOR; 37040Sstevel@tonic-gate else if (strcmp(type, "WLKR") == 0 || 37050Sstevel@tonic-gate strcmp(type, "WALKER") == 0) 37060Sstevel@tonic-gate t = VMEM_WALKER; 37070Sstevel@tonic-gate else { 37080Sstevel@tonic-gate mdb_warn("\"%s\" is not a recognized vmem_seg type\n", 37090Sstevel@tonic-gate type); 37100Sstevel@tonic-gate return (DCMD_ERR); 37110Sstevel@tonic-gate } 37120Sstevel@tonic-gate 37130Sstevel@tonic-gate if (vs.vs_type != t) 37140Sstevel@tonic-gate return (DCMD_OK); 37150Sstevel@tonic-gate } 37160Sstevel@tonic-gate 37170Sstevel@tonic-gate sz = vs.vs_end - vs.vs_start; 37180Sstevel@tonic-gate 37190Sstevel@tonic-gate if (minsize != 0 && sz < minsize) 37200Sstevel@tonic-gate return (DCMD_OK); 37210Sstevel@tonic-gate 37220Sstevel@tonic-gate if (maxsize != 0 && sz > maxsize) 37230Sstevel@tonic-gate return (DCMD_OK); 37240Sstevel@tonic-gate 37250Sstevel@tonic-gate t = vs.vs_type; 37260Sstevel@tonic-gate depth = vs.vs_depth; 37270Sstevel@tonic-gate 37280Sstevel@tonic-gate /* 37290Sstevel@tonic-gate * debug info, when present, is only accurate for VMEM_ALLOC segments 37300Sstevel@tonic-gate */ 37310Sstevel@tonic-gate no_debug = (t != VMEM_ALLOC) || 37320Sstevel@tonic-gate (depth == 0 || depth > VMEM_STACK_DEPTH); 37330Sstevel@tonic-gate 37340Sstevel@tonic-gate if (no_debug) { 37350Sstevel@tonic-gate if (caller != NULL || thread != NULL || earliest != 0 || 37360Sstevel@tonic-gate latest != 0) 37370Sstevel@tonic-gate return (DCMD_OK); /* not enough info */ 37380Sstevel@tonic-gate } else { 37390Sstevel@tonic-gate if (caller != NULL) { 37400Sstevel@tonic-gate laddr = caller; 37410Sstevel@tonic-gate haddr = caller + sizeof (caller); 37420Sstevel@tonic-gate 37430Sstevel@tonic-gate if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, 37440Sstevel@tonic-gate sizeof (c), &sym) != -1 && 37450Sstevel@tonic-gate caller == (uintptr_t)sym.st_value) { 37460Sstevel@tonic-gate /* 37470Sstevel@tonic-gate * We were provided an exact symbol value; any 37480Sstevel@tonic-gate * address in the function is valid. 37490Sstevel@tonic-gate */ 37500Sstevel@tonic-gate laddr = (uintptr_t)sym.st_value; 37510Sstevel@tonic-gate haddr = (uintptr_t)sym.st_value + sym.st_size; 37520Sstevel@tonic-gate } 37530Sstevel@tonic-gate 37540Sstevel@tonic-gate for (i = 0; i < depth; i++) 37550Sstevel@tonic-gate if (vs.vs_stack[i] >= laddr && 37560Sstevel@tonic-gate vs.vs_stack[i] < haddr) 37570Sstevel@tonic-gate break; 37580Sstevel@tonic-gate 37590Sstevel@tonic-gate if (i == depth) 37600Sstevel@tonic-gate return (DCMD_OK); 37610Sstevel@tonic-gate } 37620Sstevel@tonic-gate 37630Sstevel@tonic-gate if (thread != NULL && (uintptr_t)vs.vs_thread != thread) 37640Sstevel@tonic-gate return (DCMD_OK); 37650Sstevel@tonic-gate 37660Sstevel@tonic-gate if (earliest != 0 && vs.vs_timestamp < earliest) 37670Sstevel@tonic-gate return (DCMD_OK); 37680Sstevel@tonic-gate 37690Sstevel@tonic-gate if (latest != 0 && vs.vs_timestamp > latest) 37700Sstevel@tonic-gate return (DCMD_OK); 37710Sstevel@tonic-gate } 37720Sstevel@tonic-gate 37730Sstevel@tonic-gate type = (t == VMEM_ALLOC ? "ALLC" : 37740Sstevel@tonic-gate t == VMEM_FREE ? "FREE" : 37750Sstevel@tonic-gate t == VMEM_SPAN ? "SPAN" : 37760Sstevel@tonic-gate t == VMEM_ROTOR ? "ROTR" : 37770Sstevel@tonic-gate t == VMEM_WALKER ? "WLKR" : 37780Sstevel@tonic-gate "????"); 37790Sstevel@tonic-gate 37800Sstevel@tonic-gate if (flags & DCMD_PIPE_OUT) { 37810Sstevel@tonic-gate mdb_printf("%#lr\n", addr); 37820Sstevel@tonic-gate return (DCMD_OK); 37830Sstevel@tonic-gate } 37840Sstevel@tonic-gate 37850Sstevel@tonic-gate if (verbose) { 37860Sstevel@tonic-gate mdb_printf("%<b>%16p%</b> %4s %16p %16p %16d\n", 37870Sstevel@tonic-gate addr, type, vs.vs_start, vs.vs_end, sz); 37880Sstevel@tonic-gate 37890Sstevel@tonic-gate if (no_debug) 37900Sstevel@tonic-gate return (DCMD_OK); 37910Sstevel@tonic-gate 37920Sstevel@tonic-gate mdb_printf("%16s %4s %16p %16llx\n", 37930Sstevel@tonic-gate "", "", vs.vs_thread, vs.vs_timestamp); 37940Sstevel@tonic-gate 37950Sstevel@tonic-gate mdb_inc_indent(17); 37960Sstevel@tonic-gate for (i = 0; i < depth; i++) { 37970Sstevel@tonic-gate mdb_printf("%a\n", stk[i]); 37980Sstevel@tonic-gate } 37990Sstevel@tonic-gate mdb_dec_indent(17); 38000Sstevel@tonic-gate mdb_printf("\n"); 38010Sstevel@tonic-gate } else { 38020Sstevel@tonic-gate mdb_printf("%0?p %4s %0?p %0?p", addr, type, 38030Sstevel@tonic-gate vs.vs_start, size? sz : vs.vs_end); 38040Sstevel@tonic-gate 38050Sstevel@tonic-gate if (no_debug) { 38060Sstevel@tonic-gate mdb_printf("\n"); 38070Sstevel@tonic-gate return (DCMD_OK); 38080Sstevel@tonic-gate } 38090Sstevel@tonic-gate 38100Sstevel@tonic-gate for (i = 0; i < depth; i++) { 38110Sstevel@tonic-gate if (mdb_lookup_by_addr(stk[i], MDB_SYM_FUZZY, 38120Sstevel@tonic-gate c, sizeof (c), &sym) == -1) 38130Sstevel@tonic-gate continue; 38140Sstevel@tonic-gate if (strncmp(c, "vmem_", 5) == 0) 38150Sstevel@tonic-gate continue; 38160Sstevel@tonic-gate break; 38170Sstevel@tonic-gate } 38180Sstevel@tonic-gate mdb_printf(" %a\n", stk[i]); 38190Sstevel@tonic-gate } 38200Sstevel@tonic-gate return (DCMD_OK); 38210Sstevel@tonic-gate } 38220Sstevel@tonic-gate 38230Sstevel@tonic-gate typedef struct kmalog_data { 38240Sstevel@tonic-gate uintptr_t kma_addr; 38250Sstevel@tonic-gate hrtime_t kma_newest; 38260Sstevel@tonic-gate } kmalog_data_t; 38270Sstevel@tonic-gate 38280Sstevel@tonic-gate /*ARGSUSED*/ 38290Sstevel@tonic-gate static int 38300Sstevel@tonic-gate showbc(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmalog_data_t *kma) 38310Sstevel@tonic-gate { 38320Sstevel@tonic-gate char name[KMEM_CACHE_NAMELEN + 1]; 38330Sstevel@tonic-gate hrtime_t delta; 38340Sstevel@tonic-gate int i, depth; 38350Sstevel@tonic-gate size_t bufsize; 38360Sstevel@tonic-gate 38370Sstevel@tonic-gate if (bcp->bc_timestamp == 0) 38380Sstevel@tonic-gate return (WALK_DONE); 38390Sstevel@tonic-gate 38400Sstevel@tonic-gate if (kma->kma_newest == 0) 38410Sstevel@tonic-gate kma->kma_newest = bcp->bc_timestamp; 38420Sstevel@tonic-gate 38430Sstevel@tonic-gate if (kma->kma_addr) { 38440Sstevel@tonic-gate if (mdb_vread(&bufsize, sizeof (bufsize), 38450Sstevel@tonic-gate (uintptr_t)&bcp->bc_cache->cache_bufsize) == -1) { 38460Sstevel@tonic-gate mdb_warn( 38470Sstevel@tonic-gate "failed to read cache_bufsize for cache at %p", 38480Sstevel@tonic-gate bcp->bc_cache); 38490Sstevel@tonic-gate return (WALK_ERR); 38500Sstevel@tonic-gate } 38510Sstevel@tonic-gate 38520Sstevel@tonic-gate if (kma->kma_addr < (uintptr_t)bcp->bc_addr || 38530Sstevel@tonic-gate kma->kma_addr >= (uintptr_t)bcp->bc_addr + bufsize) 38540Sstevel@tonic-gate return (WALK_NEXT); 38550Sstevel@tonic-gate } 38560Sstevel@tonic-gate 38570Sstevel@tonic-gate delta = kma->kma_newest - bcp->bc_timestamp; 38580Sstevel@tonic-gate depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH); 38590Sstevel@tonic-gate 38600Sstevel@tonic-gate if (mdb_readstr(name, sizeof (name), (uintptr_t) 38610Sstevel@tonic-gate &bcp->bc_cache->cache_name) <= 0) 38620Sstevel@tonic-gate (void) mdb_snprintf(name, sizeof (name), "%a", bcp->bc_cache); 38630Sstevel@tonic-gate 38640Sstevel@tonic-gate mdb_printf("\nT-%lld.%09lld addr=%p %s\n", 38650Sstevel@tonic-gate delta / NANOSEC, delta % NANOSEC, bcp->bc_addr, name); 38660Sstevel@tonic-gate 38670Sstevel@tonic-gate for (i = 0; i < depth; i++) 38680Sstevel@tonic-gate mdb_printf("\t %a\n", bcp->bc_stack[i]); 38690Sstevel@tonic-gate 38700Sstevel@tonic-gate return (WALK_NEXT); 38710Sstevel@tonic-gate } 38720Sstevel@tonic-gate 38730Sstevel@tonic-gate int 38740Sstevel@tonic-gate kmalog(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 38750Sstevel@tonic-gate { 38760Sstevel@tonic-gate const char *logname = "kmem_transaction_log"; 38770Sstevel@tonic-gate kmalog_data_t kma; 38780Sstevel@tonic-gate 38790Sstevel@tonic-gate if (argc > 1) 38800Sstevel@tonic-gate return (DCMD_USAGE); 38810Sstevel@tonic-gate 38820Sstevel@tonic-gate kma.kma_newest = 0; 38830Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) 38840Sstevel@tonic-gate kma.kma_addr = addr; 38850Sstevel@tonic-gate else 38860Sstevel@tonic-gate kma.kma_addr = NULL; 38870Sstevel@tonic-gate 38880Sstevel@tonic-gate if (argc > 0) { 38890Sstevel@tonic-gate if (argv->a_type != MDB_TYPE_STRING) 38900Sstevel@tonic-gate return (DCMD_USAGE); 38910Sstevel@tonic-gate if (strcmp(argv->a_un.a_str, "fail") == 0) 38920Sstevel@tonic-gate logname = "kmem_failure_log"; 38930Sstevel@tonic-gate else if (strcmp(argv->a_un.a_str, "slab") == 0) 38940Sstevel@tonic-gate logname = "kmem_slab_log"; 38950Sstevel@tonic-gate else 38960Sstevel@tonic-gate return (DCMD_USAGE); 38970Sstevel@tonic-gate } 38980Sstevel@tonic-gate 38990Sstevel@tonic-gate if (mdb_readvar(&addr, logname) == -1) { 39000Sstevel@tonic-gate mdb_warn("failed to read %s log header pointer"); 39010Sstevel@tonic-gate return (DCMD_ERR); 39020Sstevel@tonic-gate } 39030Sstevel@tonic-gate 39040Sstevel@tonic-gate if (mdb_pwalk("kmem_log", (mdb_walk_cb_t)showbc, &kma, addr) == -1) { 39050Sstevel@tonic-gate mdb_warn("failed to walk kmem log"); 39060Sstevel@tonic-gate return (DCMD_ERR); 39070Sstevel@tonic-gate } 39080Sstevel@tonic-gate 39090Sstevel@tonic-gate return (DCMD_OK); 39100Sstevel@tonic-gate } 39110Sstevel@tonic-gate 39120Sstevel@tonic-gate /* 39130Sstevel@tonic-gate * As the final lure for die-hard crash(1M) users, we provide ::kmausers here. 39140Sstevel@tonic-gate * The first piece is a structure which we use to accumulate kmem_cache_t 39150Sstevel@tonic-gate * addresses of interest. The kmc_add is used as a callback for the kmem_cache 39160Sstevel@tonic-gate * walker; we either add all caches, or ones named explicitly as arguments. 39170Sstevel@tonic-gate */ 39180Sstevel@tonic-gate 39190Sstevel@tonic-gate typedef struct kmclist { 39200Sstevel@tonic-gate const char *kmc_name; /* Name to match (or NULL) */ 39210Sstevel@tonic-gate uintptr_t *kmc_caches; /* List of kmem_cache_t addrs */ 39220Sstevel@tonic-gate int kmc_nelems; /* Num entries in kmc_caches */ 39230Sstevel@tonic-gate int kmc_size; /* Size of kmc_caches array */ 39240Sstevel@tonic-gate } kmclist_t; 39250Sstevel@tonic-gate 39260Sstevel@tonic-gate static int 39270Sstevel@tonic-gate kmc_add(uintptr_t addr, const kmem_cache_t *cp, kmclist_t *kmc) 39280Sstevel@tonic-gate { 39290Sstevel@tonic-gate void *p; 39300Sstevel@tonic-gate int s; 39310Sstevel@tonic-gate 39320Sstevel@tonic-gate if (kmc->kmc_name == NULL || 39330Sstevel@tonic-gate strcmp(cp->cache_name, kmc->kmc_name) == 0) { 39340Sstevel@tonic-gate /* 39350Sstevel@tonic-gate * If we have a match, grow our array (if necessary), and then 39360Sstevel@tonic-gate * add the virtual address of the matching cache to our list. 39370Sstevel@tonic-gate */ 39380Sstevel@tonic-gate if (kmc->kmc_nelems >= kmc->kmc_size) { 39390Sstevel@tonic-gate s = kmc->kmc_size ? kmc->kmc_size * 2 : 256; 39400Sstevel@tonic-gate p = mdb_alloc(sizeof (uintptr_t) * s, UM_SLEEP | UM_GC); 39410Sstevel@tonic-gate 39420Sstevel@tonic-gate bcopy(kmc->kmc_caches, p, 39430Sstevel@tonic-gate sizeof (uintptr_t) * kmc->kmc_size); 39440Sstevel@tonic-gate 39450Sstevel@tonic-gate kmc->kmc_caches = p; 39460Sstevel@tonic-gate kmc->kmc_size = s; 39470Sstevel@tonic-gate } 39480Sstevel@tonic-gate 39490Sstevel@tonic-gate kmc->kmc_caches[kmc->kmc_nelems++] = addr; 39500Sstevel@tonic-gate return (kmc->kmc_name ? WALK_DONE : WALK_NEXT); 39510Sstevel@tonic-gate } 39520Sstevel@tonic-gate 39530Sstevel@tonic-gate return (WALK_NEXT); 39540Sstevel@tonic-gate } 39550Sstevel@tonic-gate 39560Sstevel@tonic-gate /* 39570Sstevel@tonic-gate * The second piece of ::kmausers is a hash table of allocations. Each 39580Sstevel@tonic-gate * allocation owner is identified by its stack trace and data_size. We then 39590Sstevel@tonic-gate * track the total bytes of all such allocations, and the number of allocations 39600Sstevel@tonic-gate * to report at the end. Once we have a list of caches, we walk through the 39610Sstevel@tonic-gate * allocated bufctls of each, and update our hash table accordingly. 39620Sstevel@tonic-gate */ 39630Sstevel@tonic-gate 39640Sstevel@tonic-gate typedef struct kmowner { 39650Sstevel@tonic-gate struct kmowner *kmo_head; /* First hash elt in bucket */ 39660Sstevel@tonic-gate struct kmowner *kmo_next; /* Next hash elt in chain */ 39670Sstevel@tonic-gate size_t kmo_signature; /* Hash table signature */ 39680Sstevel@tonic-gate uint_t kmo_num; /* Number of allocations */ 39690Sstevel@tonic-gate size_t kmo_data_size; /* Size of each allocation */ 39700Sstevel@tonic-gate size_t kmo_total_size; /* Total bytes of allocation */ 39710Sstevel@tonic-gate int kmo_depth; /* Depth of stack trace */ 39720Sstevel@tonic-gate uintptr_t kmo_stack[KMEM_STACK_DEPTH]; /* Stack trace */ 39730Sstevel@tonic-gate } kmowner_t; 39740Sstevel@tonic-gate 39750Sstevel@tonic-gate typedef struct kmusers { 39760Sstevel@tonic-gate uintptr_t kmu_addr; /* address of interest */ 39770Sstevel@tonic-gate const kmem_cache_t *kmu_cache; /* Current kmem cache */ 39780Sstevel@tonic-gate kmowner_t *kmu_hash; /* Hash table of owners */ 39790Sstevel@tonic-gate int kmu_nelems; /* Number of entries in use */ 39800Sstevel@tonic-gate int kmu_size; /* Total number of entries */ 39810Sstevel@tonic-gate } kmusers_t; 39820Sstevel@tonic-gate 39830Sstevel@tonic-gate static void 39840Sstevel@tonic-gate kmu_add(kmusers_t *kmu, const kmem_bufctl_audit_t *bcp, 39850Sstevel@tonic-gate size_t size, size_t data_size) 39860Sstevel@tonic-gate { 39870Sstevel@tonic-gate int i, depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH); 39880Sstevel@tonic-gate size_t bucket, signature = data_size; 39890Sstevel@tonic-gate kmowner_t *kmo, *kmoend; 39900Sstevel@tonic-gate 39910Sstevel@tonic-gate /* 39920Sstevel@tonic-gate * If the hash table is full, double its size and rehash everything. 39930Sstevel@tonic-gate */ 39940Sstevel@tonic-gate if (kmu->kmu_nelems >= kmu->kmu_size) { 39950Sstevel@tonic-gate int s = kmu->kmu_size ? kmu->kmu_size * 2 : 1024; 39960Sstevel@tonic-gate 39970Sstevel@tonic-gate kmo = mdb_alloc(sizeof (kmowner_t) * s, UM_SLEEP | UM_GC); 39980Sstevel@tonic-gate bcopy(kmu->kmu_hash, kmo, sizeof (kmowner_t) * kmu->kmu_size); 39990Sstevel@tonic-gate kmu->kmu_hash = kmo; 40000Sstevel@tonic-gate kmu->kmu_size = s; 40010Sstevel@tonic-gate 40020Sstevel@tonic-gate kmoend = kmu->kmu_hash + kmu->kmu_size; 40030Sstevel@tonic-gate for (kmo = kmu->kmu_hash; kmo < kmoend; kmo++) 40040Sstevel@tonic-gate kmo->kmo_head = NULL; 40050Sstevel@tonic-gate 40060Sstevel@tonic-gate kmoend = kmu->kmu_hash + kmu->kmu_nelems; 40070Sstevel@tonic-gate for (kmo = kmu->kmu_hash; kmo < kmoend; kmo++) { 40080Sstevel@tonic-gate bucket = kmo->kmo_signature & (kmu->kmu_size - 1); 40090Sstevel@tonic-gate kmo->kmo_next = kmu->kmu_hash[bucket].kmo_head; 40100Sstevel@tonic-gate kmu->kmu_hash[bucket].kmo_head = kmo; 40110Sstevel@tonic-gate } 40120Sstevel@tonic-gate } 40130Sstevel@tonic-gate 40140Sstevel@tonic-gate /* 40150Sstevel@tonic-gate * Finish computing the hash signature from the stack trace, and then 40160Sstevel@tonic-gate * see if the owner is in the hash table. If so, update our stats. 40170Sstevel@tonic-gate */ 40180Sstevel@tonic-gate for (i = 0; i < depth; i++) 40190Sstevel@tonic-gate signature += bcp->bc_stack[i]; 40200Sstevel@tonic-gate 40210Sstevel@tonic-gate bucket = signature & (kmu->kmu_size - 1); 40220Sstevel@tonic-gate 40230Sstevel@tonic-gate for (kmo = kmu->kmu_hash[bucket].kmo_head; kmo; kmo = kmo->kmo_next) { 40240Sstevel@tonic-gate if (kmo->kmo_signature == signature) { 40250Sstevel@tonic-gate size_t difference = 0; 40260Sstevel@tonic-gate 40270Sstevel@tonic-gate difference |= kmo->kmo_data_size - data_size; 40280Sstevel@tonic-gate difference |= kmo->kmo_depth - depth; 40290Sstevel@tonic-gate 40300Sstevel@tonic-gate for (i = 0; i < depth; i++) { 40310Sstevel@tonic-gate difference |= kmo->kmo_stack[i] - 40320Sstevel@tonic-gate bcp->bc_stack[i]; 40330Sstevel@tonic-gate } 40340Sstevel@tonic-gate 40350Sstevel@tonic-gate if (difference == 0) { 40360Sstevel@tonic-gate kmo->kmo_total_size += size; 40370Sstevel@tonic-gate kmo->kmo_num++; 40380Sstevel@tonic-gate return; 40390Sstevel@tonic-gate } 40400Sstevel@tonic-gate } 40410Sstevel@tonic-gate } 40420Sstevel@tonic-gate 40430Sstevel@tonic-gate /* 40440Sstevel@tonic-gate * If the owner is not yet hashed, grab the next element and fill it 40450Sstevel@tonic-gate * in based on the allocation information. 40460Sstevel@tonic-gate */ 40470Sstevel@tonic-gate kmo = &kmu->kmu_hash[kmu->kmu_nelems++]; 40480Sstevel@tonic-gate kmo->kmo_next = kmu->kmu_hash[bucket].kmo_head; 40490Sstevel@tonic-gate kmu->kmu_hash[bucket].kmo_head = kmo; 40500Sstevel@tonic-gate 40510Sstevel@tonic-gate kmo->kmo_signature = signature; 40520Sstevel@tonic-gate kmo->kmo_num = 1; 40530Sstevel@tonic-gate kmo->kmo_data_size = data_size; 40540Sstevel@tonic-gate kmo->kmo_total_size = size; 40550Sstevel@tonic-gate kmo->kmo_depth = depth; 40560Sstevel@tonic-gate 40570Sstevel@tonic-gate for (i = 0; i < depth; i++) 40580Sstevel@tonic-gate kmo->kmo_stack[i] = bcp->bc_stack[i]; 40590Sstevel@tonic-gate } 40600Sstevel@tonic-gate 40610Sstevel@tonic-gate /* 40620Sstevel@tonic-gate * When ::kmausers is invoked without the -f flag, we simply update our hash 40630Sstevel@tonic-gate * table with the information from each allocated bufctl. 40640Sstevel@tonic-gate */ 40650Sstevel@tonic-gate /*ARGSUSED*/ 40660Sstevel@tonic-gate static int 40670Sstevel@tonic-gate kmause1(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmusers_t *kmu) 40680Sstevel@tonic-gate { 40690Sstevel@tonic-gate const kmem_cache_t *cp = kmu->kmu_cache; 40700Sstevel@tonic-gate 40710Sstevel@tonic-gate kmu_add(kmu, bcp, cp->cache_bufsize, cp->cache_bufsize); 40720Sstevel@tonic-gate return (WALK_NEXT); 40730Sstevel@tonic-gate } 40740Sstevel@tonic-gate 40750Sstevel@tonic-gate /* 40760Sstevel@tonic-gate * When ::kmausers is invoked with the -f flag, we print out the information 40770Sstevel@tonic-gate * for each bufctl as well as updating the hash table. 40780Sstevel@tonic-gate */ 40790Sstevel@tonic-gate static int 40800Sstevel@tonic-gate kmause2(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmusers_t *kmu) 40810Sstevel@tonic-gate { 40820Sstevel@tonic-gate int i, depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH); 40830Sstevel@tonic-gate const kmem_cache_t *cp = kmu->kmu_cache; 40840Sstevel@tonic-gate kmem_bufctl_t bufctl; 40850Sstevel@tonic-gate 40860Sstevel@tonic-gate if (kmu->kmu_addr) { 40870Sstevel@tonic-gate if (mdb_vread(&bufctl, sizeof (bufctl), addr) == -1) 40880Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr); 40890Sstevel@tonic-gate else if (kmu->kmu_addr < (uintptr_t)bufctl.bc_addr || 40900Sstevel@tonic-gate kmu->kmu_addr >= (uintptr_t)bufctl.bc_addr + 40910Sstevel@tonic-gate cp->cache_bufsize) 40920Sstevel@tonic-gate return (WALK_NEXT); 40930Sstevel@tonic-gate } 40940Sstevel@tonic-gate 40950Sstevel@tonic-gate mdb_printf("size %d, addr %p, thread %p, cache %s\n", 40960Sstevel@tonic-gate cp->cache_bufsize, addr, bcp->bc_thread, cp->cache_name); 40970Sstevel@tonic-gate 40980Sstevel@tonic-gate for (i = 0; i < depth; i++) 40990Sstevel@tonic-gate mdb_printf("\t %a\n", bcp->bc_stack[i]); 41000Sstevel@tonic-gate 41010Sstevel@tonic-gate kmu_add(kmu, bcp, cp->cache_bufsize, cp->cache_bufsize); 41020Sstevel@tonic-gate return (WALK_NEXT); 41030Sstevel@tonic-gate } 41040Sstevel@tonic-gate 41050Sstevel@tonic-gate /* 41060Sstevel@tonic-gate * We sort our results by allocation size before printing them. 41070Sstevel@tonic-gate */ 41080Sstevel@tonic-gate static int 41090Sstevel@tonic-gate kmownercmp(const void *lp, const void *rp) 41100Sstevel@tonic-gate { 41110Sstevel@tonic-gate const kmowner_t *lhs = lp; 41120Sstevel@tonic-gate const kmowner_t *rhs = rp; 41130Sstevel@tonic-gate 41140Sstevel@tonic-gate return (rhs->kmo_total_size - lhs->kmo_total_size); 41150Sstevel@tonic-gate } 41160Sstevel@tonic-gate 41170Sstevel@tonic-gate /* 41180Sstevel@tonic-gate * The main engine of ::kmausers is relatively straightforward: First we 41190Sstevel@tonic-gate * accumulate our list of kmem_cache_t addresses into the kmclist_t. Next we 41200Sstevel@tonic-gate * iterate over the allocated bufctls of each cache in the list. Finally, 41210Sstevel@tonic-gate * we sort and print our results. 41220Sstevel@tonic-gate */ 41230Sstevel@tonic-gate /*ARGSUSED*/ 41240Sstevel@tonic-gate int 41250Sstevel@tonic-gate kmausers(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 41260Sstevel@tonic-gate { 41270Sstevel@tonic-gate int mem_threshold = 8192; /* Minimum # bytes for printing */ 41280Sstevel@tonic-gate int cnt_threshold = 100; /* Minimum # blocks for printing */ 41290Sstevel@tonic-gate int audited_caches = 0; /* Number of KMF_AUDIT caches found */ 41300Sstevel@tonic-gate int do_all_caches = 1; /* Do all caches (no arguments) */ 41310Sstevel@tonic-gate int opt_e = FALSE; /* Include "small" users */ 41320Sstevel@tonic-gate int opt_f = FALSE; /* Print stack traces */ 41330Sstevel@tonic-gate 41340Sstevel@tonic-gate mdb_walk_cb_t callback = (mdb_walk_cb_t)kmause1; 41350Sstevel@tonic-gate kmowner_t *kmo, *kmoend; 41360Sstevel@tonic-gate int i, oelems; 41370Sstevel@tonic-gate 41380Sstevel@tonic-gate kmclist_t kmc; 41390Sstevel@tonic-gate kmusers_t kmu; 41400Sstevel@tonic-gate 41410Sstevel@tonic-gate bzero(&kmc, sizeof (kmc)); 41420Sstevel@tonic-gate bzero(&kmu, sizeof (kmu)); 41430Sstevel@tonic-gate 41440Sstevel@tonic-gate while ((i = mdb_getopts(argc, argv, 41450Sstevel@tonic-gate 'e', MDB_OPT_SETBITS, TRUE, &opt_e, 41460Sstevel@tonic-gate 'f', MDB_OPT_SETBITS, TRUE, &opt_f, NULL)) != argc) { 41470Sstevel@tonic-gate 41480Sstevel@tonic-gate argv += i; /* skip past options we just processed */ 41490Sstevel@tonic-gate argc -= i; /* adjust argc */ 41500Sstevel@tonic-gate 41510Sstevel@tonic-gate if (argv->a_type != MDB_TYPE_STRING || *argv->a_un.a_str == '-') 41520Sstevel@tonic-gate return (DCMD_USAGE); 41530Sstevel@tonic-gate 41540Sstevel@tonic-gate oelems = kmc.kmc_nelems; 41550Sstevel@tonic-gate kmc.kmc_name = argv->a_un.a_str; 41560Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmc_add, &kmc); 41570Sstevel@tonic-gate 41580Sstevel@tonic-gate if (kmc.kmc_nelems == oelems) { 41590Sstevel@tonic-gate mdb_warn("unknown kmem cache: %s\n", kmc.kmc_name); 41600Sstevel@tonic-gate return (DCMD_ERR); 41610Sstevel@tonic-gate } 41620Sstevel@tonic-gate 41630Sstevel@tonic-gate do_all_caches = 0; 41640Sstevel@tonic-gate argv++; 41650Sstevel@tonic-gate argc--; 41660Sstevel@tonic-gate } 41670Sstevel@tonic-gate 41680Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) { 41690Sstevel@tonic-gate opt_f = TRUE; 41700Sstevel@tonic-gate kmu.kmu_addr = addr; 41710Sstevel@tonic-gate } else { 41720Sstevel@tonic-gate kmu.kmu_addr = NULL; 41730Sstevel@tonic-gate } 41740Sstevel@tonic-gate 41750Sstevel@tonic-gate if (opt_e) 41760Sstevel@tonic-gate mem_threshold = cnt_threshold = 0; 41770Sstevel@tonic-gate 41780Sstevel@tonic-gate if (opt_f) 41790Sstevel@tonic-gate callback = (mdb_walk_cb_t)kmause2; 41800Sstevel@tonic-gate 41810Sstevel@tonic-gate if (do_all_caches) { 41820Sstevel@tonic-gate kmc.kmc_name = NULL; /* match all cache names */ 41830Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmc_add, &kmc); 41840Sstevel@tonic-gate } 41850Sstevel@tonic-gate 41860Sstevel@tonic-gate for (i = 0; i < kmc.kmc_nelems; i++) { 41870Sstevel@tonic-gate uintptr_t cp = kmc.kmc_caches[i]; 41880Sstevel@tonic-gate kmem_cache_t c; 41890Sstevel@tonic-gate 41900Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), cp) == -1) { 41910Sstevel@tonic-gate mdb_warn("failed to read cache at %p", cp); 41920Sstevel@tonic-gate continue; 41930Sstevel@tonic-gate } 41940Sstevel@tonic-gate 41950Sstevel@tonic-gate if (!(c.cache_flags & KMF_AUDIT)) { 41960Sstevel@tonic-gate if (!do_all_caches) { 41970Sstevel@tonic-gate mdb_warn("KMF_AUDIT is not enabled for %s\n", 41980Sstevel@tonic-gate c.cache_name); 41990Sstevel@tonic-gate } 42000Sstevel@tonic-gate continue; 42010Sstevel@tonic-gate } 42020Sstevel@tonic-gate 42030Sstevel@tonic-gate kmu.kmu_cache = &c; 42040Sstevel@tonic-gate (void) mdb_pwalk("bufctl", callback, &kmu, cp); 42050Sstevel@tonic-gate audited_caches++; 42060Sstevel@tonic-gate } 42070Sstevel@tonic-gate 42080Sstevel@tonic-gate if (audited_caches == 0 && do_all_caches) { 42090Sstevel@tonic-gate mdb_warn("KMF_AUDIT is not enabled for any caches\n"); 42100Sstevel@tonic-gate return (DCMD_ERR); 42110Sstevel@tonic-gate } 42120Sstevel@tonic-gate 42130Sstevel@tonic-gate qsort(kmu.kmu_hash, kmu.kmu_nelems, sizeof (kmowner_t), kmownercmp); 42140Sstevel@tonic-gate kmoend = kmu.kmu_hash + kmu.kmu_nelems; 42150Sstevel@tonic-gate 42160Sstevel@tonic-gate for (kmo = kmu.kmu_hash; kmo < kmoend; kmo++) { 42170Sstevel@tonic-gate if (kmo->kmo_total_size < mem_threshold && 42180Sstevel@tonic-gate kmo->kmo_num < cnt_threshold) 42190Sstevel@tonic-gate continue; 42200Sstevel@tonic-gate mdb_printf("%lu bytes for %u allocations with data size %lu:\n", 42210Sstevel@tonic-gate kmo->kmo_total_size, kmo->kmo_num, kmo->kmo_data_size); 42220Sstevel@tonic-gate for (i = 0; i < kmo->kmo_depth; i++) 42230Sstevel@tonic-gate mdb_printf("\t %a\n", kmo->kmo_stack[i]); 42240Sstevel@tonic-gate } 42250Sstevel@tonic-gate 42260Sstevel@tonic-gate return (DCMD_OK); 42270Sstevel@tonic-gate } 42280Sstevel@tonic-gate 42290Sstevel@tonic-gate void 42300Sstevel@tonic-gate kmausers_help(void) 42310Sstevel@tonic-gate { 42320Sstevel@tonic-gate mdb_printf( 42330Sstevel@tonic-gate "Displays the largest users of the kmem allocator, sorted by \n" 42340Sstevel@tonic-gate "trace. If one or more caches is specified, only those caches\n" 42350Sstevel@tonic-gate "will be searched. By default, all caches are searched. If an\n" 42360Sstevel@tonic-gate "address is specified, then only those allocations which include\n" 42370Sstevel@tonic-gate "the given address are displayed. Specifying an address implies\n" 42380Sstevel@tonic-gate "-f.\n" 42390Sstevel@tonic-gate "\n" 42400Sstevel@tonic-gate "\t-e\tInclude all users, not just the largest\n" 42410Sstevel@tonic-gate "\t-f\tDisplay individual allocations. By default, users are\n" 42420Sstevel@tonic-gate "\t\tgrouped by stack\n"); 42430Sstevel@tonic-gate } 42440Sstevel@tonic-gate 42450Sstevel@tonic-gate static int 42460Sstevel@tonic-gate kmem_ready_check(void) 42470Sstevel@tonic-gate { 42480Sstevel@tonic-gate int ready; 42490Sstevel@tonic-gate 42500Sstevel@tonic-gate if (mdb_readvar(&ready, "kmem_ready") < 0) 42510Sstevel@tonic-gate return (-1); /* errno is set for us */ 42520Sstevel@tonic-gate 42530Sstevel@tonic-gate return (ready); 42540Sstevel@tonic-gate } 42550Sstevel@tonic-gate 42568721SJonathan.Adams@Sun.COM void 42578721SJonathan.Adams@Sun.COM kmem_statechange(void) 42580Sstevel@tonic-gate { 42591528Sjwadams static int been_ready = 0; 42601528Sjwadams 42611528Sjwadams if (been_ready) 42621528Sjwadams return; 42631528Sjwadams 42640Sstevel@tonic-gate if (kmem_ready_check() <= 0) 42650Sstevel@tonic-gate return; 42660Sstevel@tonic-gate 42671528Sjwadams been_ready = 1; 42680Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmem_init_walkers, NULL); 42690Sstevel@tonic-gate } 42700Sstevel@tonic-gate 42710Sstevel@tonic-gate void 42720Sstevel@tonic-gate kmem_init(void) 42730Sstevel@tonic-gate { 42740Sstevel@tonic-gate mdb_walker_t w = { 42750Sstevel@tonic-gate "kmem_cache", "walk list of kmem caches", kmem_cache_walk_init, 42766712Stomee list_walk_step, list_walk_fini 42770Sstevel@tonic-gate }; 42780Sstevel@tonic-gate 42790Sstevel@tonic-gate /* 42800Sstevel@tonic-gate * If kmem is ready, we'll need to invoke the kmem_cache walker 42810Sstevel@tonic-gate * immediately. Walkers in the linkage structure won't be ready until 42820Sstevel@tonic-gate * _mdb_init returns, so we'll need to add this one manually. If kmem 42830Sstevel@tonic-gate * is ready, we'll use the walker to initialize the caches. If kmem 42840Sstevel@tonic-gate * isn't ready, we'll register a callback that will allow us to defer 42850Sstevel@tonic-gate * cache walking until it is. 42860Sstevel@tonic-gate */ 42870Sstevel@tonic-gate if (mdb_add_walker(&w) != 0) { 42880Sstevel@tonic-gate mdb_warn("failed to add kmem_cache walker"); 42890Sstevel@tonic-gate return; 42900Sstevel@tonic-gate } 42910Sstevel@tonic-gate 42928721SJonathan.Adams@Sun.COM kmem_statechange(); 42930Sstevel@tonic-gate } 42940Sstevel@tonic-gate 42950Sstevel@tonic-gate typedef struct whatthread { 42960Sstevel@tonic-gate uintptr_t wt_target; 42970Sstevel@tonic-gate int wt_verbose; 42980Sstevel@tonic-gate } whatthread_t; 42990Sstevel@tonic-gate 43000Sstevel@tonic-gate static int 43010Sstevel@tonic-gate whatthread_walk_thread(uintptr_t addr, const kthread_t *t, whatthread_t *w) 43020Sstevel@tonic-gate { 43030Sstevel@tonic-gate uintptr_t current, data; 43040Sstevel@tonic-gate 43050Sstevel@tonic-gate if (t->t_stkbase == NULL) 43060Sstevel@tonic-gate return (WALK_NEXT); 43070Sstevel@tonic-gate 43080Sstevel@tonic-gate /* 43090Sstevel@tonic-gate * Warn about swapped out threads, but drive on anyway 43100Sstevel@tonic-gate */ 43110Sstevel@tonic-gate if (!(t->t_schedflag & TS_LOAD)) { 43120Sstevel@tonic-gate mdb_warn("thread %p's stack swapped out\n", addr); 43130Sstevel@tonic-gate return (WALK_NEXT); 43140Sstevel@tonic-gate } 43150Sstevel@tonic-gate 43160Sstevel@tonic-gate /* 43170Sstevel@tonic-gate * Search the thread's stack for the given pointer. Note that it would 43180Sstevel@tonic-gate * be more efficient to follow ::kgrep's lead and read in page-sized 43190Sstevel@tonic-gate * chunks, but this routine is already fast and simple. 43200Sstevel@tonic-gate */ 43210Sstevel@tonic-gate for (current = (uintptr_t)t->t_stkbase; current < (uintptr_t)t->t_stk; 43220Sstevel@tonic-gate current += sizeof (uintptr_t)) { 43230Sstevel@tonic-gate if (mdb_vread(&data, sizeof (data), current) == -1) { 43240Sstevel@tonic-gate mdb_warn("couldn't read thread %p's stack at %p", 43250Sstevel@tonic-gate addr, current); 43260Sstevel@tonic-gate return (WALK_ERR); 43270Sstevel@tonic-gate } 43280Sstevel@tonic-gate 43290Sstevel@tonic-gate if (data == w->wt_target) { 43300Sstevel@tonic-gate if (w->wt_verbose) { 43310Sstevel@tonic-gate mdb_printf("%p in thread %p's stack%s\n", 43320Sstevel@tonic-gate current, addr, stack_active(t, current)); 43330Sstevel@tonic-gate } else { 43340Sstevel@tonic-gate mdb_printf("%#lr\n", addr); 43350Sstevel@tonic-gate return (WALK_NEXT); 43360Sstevel@tonic-gate } 43370Sstevel@tonic-gate } 43380Sstevel@tonic-gate } 43390Sstevel@tonic-gate 43400Sstevel@tonic-gate return (WALK_NEXT); 43410Sstevel@tonic-gate } 43420Sstevel@tonic-gate 43430Sstevel@tonic-gate int 43440Sstevel@tonic-gate whatthread(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 43450Sstevel@tonic-gate { 43460Sstevel@tonic-gate whatthread_t w; 43470Sstevel@tonic-gate 43480Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 43490Sstevel@tonic-gate return (DCMD_USAGE); 43500Sstevel@tonic-gate 43510Sstevel@tonic-gate w.wt_verbose = FALSE; 43520Sstevel@tonic-gate w.wt_target = addr; 43530Sstevel@tonic-gate 43540Sstevel@tonic-gate if (mdb_getopts(argc, argv, 43550Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &w.wt_verbose, NULL) != argc) 43560Sstevel@tonic-gate return (DCMD_USAGE); 43570Sstevel@tonic-gate 43580Sstevel@tonic-gate if (mdb_walk("thread", (mdb_walk_cb_t)whatthread_walk_thread, &w) 43590Sstevel@tonic-gate == -1) { 43600Sstevel@tonic-gate mdb_warn("couldn't walk threads"); 43610Sstevel@tonic-gate return (DCMD_ERR); 43620Sstevel@tonic-gate } 43630Sstevel@tonic-gate 43640Sstevel@tonic-gate return (DCMD_OK); 43650Sstevel@tonic-gate } 4366