10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51528Sjwadams * Common Development and Distribution License (the "License"). 61528Sjwadams * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 228721SJonathan.Adams@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #include <mdb/mdb_param.h> 270Sstevel@tonic-gate #include <mdb/mdb_modapi.h> 280Sstevel@tonic-gate #include <mdb/mdb_ctf.h> 290Sstevel@tonic-gate #include <sys/cpuvar.h> 300Sstevel@tonic-gate #include <sys/kmem_impl.h> 310Sstevel@tonic-gate #include <sys/vmem_impl.h> 320Sstevel@tonic-gate #include <sys/machelf.h> 330Sstevel@tonic-gate #include <sys/modctl.h> 340Sstevel@tonic-gate #include <sys/kobj.h> 350Sstevel@tonic-gate #include <sys/panic.h> 360Sstevel@tonic-gate #include <sys/stack.h> 370Sstevel@tonic-gate #include <sys/sysmacros.h> 380Sstevel@tonic-gate #include <vm/page.h> 390Sstevel@tonic-gate 406712Stomee #include "avl.h" 416712Stomee #include "combined.h" 424798Stomee #include "dist.h" 430Sstevel@tonic-gate #include "kmem.h" 446712Stomee #include "list.h" 450Sstevel@tonic-gate 460Sstevel@tonic-gate #define dprintf(x) if (mdb_debug_level) { \ 470Sstevel@tonic-gate mdb_printf("kmem debug: "); \ 480Sstevel@tonic-gate /*CSTYLED*/\ 490Sstevel@tonic-gate mdb_printf x ;\ 500Sstevel@tonic-gate } 510Sstevel@tonic-gate 520Sstevel@tonic-gate #define KM_ALLOCATED 0x01 530Sstevel@tonic-gate #define KM_FREE 0x02 540Sstevel@tonic-gate #define KM_BUFCTL 0x04 550Sstevel@tonic-gate #define KM_CONSTRUCTED 0x08 /* only constructed free buffers */ 560Sstevel@tonic-gate #define KM_HASH 0x10 570Sstevel@tonic-gate 580Sstevel@tonic-gate static int mdb_debug_level = 0; 590Sstevel@tonic-gate 600Sstevel@tonic-gate /*ARGSUSED*/ 610Sstevel@tonic-gate static int 620Sstevel@tonic-gate kmem_init_walkers(uintptr_t addr, const kmem_cache_t *c, void *ignored) 630Sstevel@tonic-gate { 640Sstevel@tonic-gate mdb_walker_t w; 650Sstevel@tonic-gate char descr[64]; 660Sstevel@tonic-gate 670Sstevel@tonic-gate (void) mdb_snprintf(descr, sizeof (descr), 680Sstevel@tonic-gate "walk the %s cache", c->cache_name); 690Sstevel@tonic-gate 700Sstevel@tonic-gate w.walk_name = c->cache_name; 710Sstevel@tonic-gate w.walk_descr = descr; 720Sstevel@tonic-gate w.walk_init = kmem_walk_init; 730Sstevel@tonic-gate w.walk_step = kmem_walk_step; 740Sstevel@tonic-gate w.walk_fini = kmem_walk_fini; 750Sstevel@tonic-gate w.walk_init_arg = (void *)addr; 760Sstevel@tonic-gate 770Sstevel@tonic-gate if (mdb_add_walker(&w) == -1) 780Sstevel@tonic-gate mdb_warn("failed to add %s walker", c->cache_name); 790Sstevel@tonic-gate 800Sstevel@tonic-gate return (WALK_NEXT); 810Sstevel@tonic-gate } 820Sstevel@tonic-gate 830Sstevel@tonic-gate /*ARGSUSED*/ 840Sstevel@tonic-gate int 850Sstevel@tonic-gate kmem_debug(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 860Sstevel@tonic-gate { 870Sstevel@tonic-gate mdb_debug_level ^= 1; 880Sstevel@tonic-gate 890Sstevel@tonic-gate mdb_printf("kmem: debugging is now %s\n", 900Sstevel@tonic-gate mdb_debug_level ? "on" : "off"); 910Sstevel@tonic-gate 920Sstevel@tonic-gate return (DCMD_OK); 930Sstevel@tonic-gate } 940Sstevel@tonic-gate 950Sstevel@tonic-gate int 960Sstevel@tonic-gate kmem_cache_walk_init(mdb_walk_state_t *wsp) 970Sstevel@tonic-gate { 980Sstevel@tonic-gate GElf_Sym sym; 990Sstevel@tonic-gate 1006712Stomee if (mdb_lookup_by_name("kmem_caches", &sym) == -1) { 1016712Stomee mdb_warn("couldn't find kmem_caches"); 1020Sstevel@tonic-gate return (WALK_ERR); 1030Sstevel@tonic-gate } 1040Sstevel@tonic-gate 1056712Stomee wsp->walk_addr = (uintptr_t)sym.st_value; 1066712Stomee 1076712Stomee return (list_walk_init_named(wsp, "cache list", "cache")); 1080Sstevel@tonic-gate } 1090Sstevel@tonic-gate 1100Sstevel@tonic-gate int 1110Sstevel@tonic-gate kmem_cpu_cache_walk_init(mdb_walk_state_t *wsp) 1120Sstevel@tonic-gate { 1130Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 1140Sstevel@tonic-gate mdb_warn("kmem_cpu_cache doesn't support global walks"); 1150Sstevel@tonic-gate return (WALK_ERR); 1160Sstevel@tonic-gate } 1170Sstevel@tonic-gate 1180Sstevel@tonic-gate if (mdb_layered_walk("cpu", wsp) == -1) { 1190Sstevel@tonic-gate mdb_warn("couldn't walk 'cpu'"); 1200Sstevel@tonic-gate return (WALK_ERR); 1210Sstevel@tonic-gate } 1220Sstevel@tonic-gate 1230Sstevel@tonic-gate wsp->walk_data = (void *)wsp->walk_addr; 1240Sstevel@tonic-gate 1250Sstevel@tonic-gate return (WALK_NEXT); 1260Sstevel@tonic-gate } 1270Sstevel@tonic-gate 1280Sstevel@tonic-gate int 1290Sstevel@tonic-gate kmem_cpu_cache_walk_step(mdb_walk_state_t *wsp) 1300Sstevel@tonic-gate { 1310Sstevel@tonic-gate uintptr_t caddr = (uintptr_t)wsp->walk_data; 1320Sstevel@tonic-gate const cpu_t *cpu = wsp->walk_layer; 1330Sstevel@tonic-gate kmem_cpu_cache_t cc; 1340Sstevel@tonic-gate 1359019SMichael.Corcoran@Sun.COM caddr += OFFSETOF(kmem_cache_t, cache_cpu[cpu->cpu_seqid]); 1360Sstevel@tonic-gate 1370Sstevel@tonic-gate if (mdb_vread(&cc, sizeof (kmem_cpu_cache_t), caddr) == -1) { 1380Sstevel@tonic-gate mdb_warn("couldn't read kmem_cpu_cache at %p", caddr); 1390Sstevel@tonic-gate return (WALK_ERR); 1400Sstevel@tonic-gate } 1410Sstevel@tonic-gate 1420Sstevel@tonic-gate return (wsp->walk_callback(caddr, &cc, wsp->walk_cbdata)); 1430Sstevel@tonic-gate } 1440Sstevel@tonic-gate 1456712Stomee static int 1466712Stomee kmem_slab_check(void *p, uintptr_t saddr, void *arg) 1476712Stomee { 1486712Stomee kmem_slab_t *sp = p; 1496712Stomee uintptr_t caddr = (uintptr_t)arg; 1506712Stomee if ((uintptr_t)sp->slab_cache != caddr) { 1516712Stomee mdb_warn("slab %p isn't in cache %p (in cache %p)\n", 1526712Stomee saddr, caddr, sp->slab_cache); 1536712Stomee return (-1); 1546712Stomee } 1556712Stomee 1566712Stomee return (0); 1576712Stomee } 1586712Stomee 1596712Stomee static int 1606712Stomee kmem_partial_slab_check(void *p, uintptr_t saddr, void *arg) 1616712Stomee { 1626712Stomee kmem_slab_t *sp = p; 1636712Stomee 1646712Stomee int rc = kmem_slab_check(p, saddr, arg); 1656712Stomee if (rc != 0) { 1666712Stomee return (rc); 1676712Stomee } 1686712Stomee 1696712Stomee if (!KMEM_SLAB_IS_PARTIAL(sp)) { 1706712Stomee mdb_warn("slab %p is not a partial slab\n", saddr); 1716712Stomee return (-1); 1726712Stomee } 1736712Stomee 1746712Stomee return (0); 1756712Stomee } 1766712Stomee 1776712Stomee static int 1786712Stomee kmem_complete_slab_check(void *p, uintptr_t saddr, void *arg) 1796712Stomee { 1806712Stomee kmem_slab_t *sp = p; 1816712Stomee 1826712Stomee int rc = kmem_slab_check(p, saddr, arg); 1836712Stomee if (rc != 0) { 1846712Stomee return (rc); 1856712Stomee } 1866712Stomee 1876712Stomee if (!KMEM_SLAB_IS_ALL_USED(sp)) { 1886712Stomee mdb_warn("slab %p is not completely allocated\n", saddr); 1896712Stomee return (-1); 1906712Stomee } 1916712Stomee 1926712Stomee return (0); 1936712Stomee } 1946712Stomee 1956712Stomee typedef struct { 1966712Stomee uintptr_t kns_cache_addr; 1976712Stomee int kns_nslabs; 1986712Stomee } kmem_nth_slab_t; 1996712Stomee 2006712Stomee static int 2016712Stomee kmem_nth_slab_check(void *p, uintptr_t saddr, void *arg) 2026712Stomee { 2036712Stomee kmem_nth_slab_t *chkp = arg; 2046712Stomee 2056712Stomee int rc = kmem_slab_check(p, saddr, (void *)chkp->kns_cache_addr); 2066712Stomee if (rc != 0) { 2076712Stomee return (rc); 2086712Stomee } 2096712Stomee 2106712Stomee return (chkp->kns_nslabs-- == 0 ? 1 : 0); 2116712Stomee } 2126712Stomee 2136712Stomee static int 2146712Stomee kmem_complete_slab_walk_init(mdb_walk_state_t *wsp) 2156712Stomee { 2166712Stomee uintptr_t caddr = wsp->walk_addr; 2176712Stomee 2186712Stomee wsp->walk_addr = (uintptr_t)(caddr + 2196712Stomee offsetof(kmem_cache_t, cache_complete_slabs)); 2206712Stomee 2216712Stomee return (list_walk_init_checked(wsp, "slab list", "slab", 2226712Stomee kmem_complete_slab_check, (void *)caddr)); 2236712Stomee } 2246712Stomee 2256712Stomee static int 2266712Stomee kmem_partial_slab_walk_init(mdb_walk_state_t *wsp) 2276712Stomee { 2286712Stomee uintptr_t caddr = wsp->walk_addr; 2296712Stomee 2306712Stomee wsp->walk_addr = (uintptr_t)(caddr + 2316712Stomee offsetof(kmem_cache_t, cache_partial_slabs)); 2326712Stomee 2336712Stomee return (avl_walk_init_checked(wsp, "slab list", "slab", 2346712Stomee kmem_partial_slab_check, (void *)caddr)); 2356712Stomee } 2366712Stomee 2370Sstevel@tonic-gate int 2380Sstevel@tonic-gate kmem_slab_walk_init(mdb_walk_state_t *wsp) 2390Sstevel@tonic-gate { 2400Sstevel@tonic-gate uintptr_t caddr = wsp->walk_addr; 2410Sstevel@tonic-gate 2420Sstevel@tonic-gate if (caddr == NULL) { 2430Sstevel@tonic-gate mdb_warn("kmem_slab doesn't support global walks\n"); 2440Sstevel@tonic-gate return (WALK_ERR); 2450Sstevel@tonic-gate } 2460Sstevel@tonic-gate 2476712Stomee combined_walk_init(wsp); 2486712Stomee combined_walk_add(wsp, 2496712Stomee kmem_complete_slab_walk_init, list_walk_step, list_walk_fini); 2506712Stomee combined_walk_add(wsp, 2516712Stomee kmem_partial_slab_walk_init, avl_walk_step, avl_walk_fini); 2520Sstevel@tonic-gate 2530Sstevel@tonic-gate return (WALK_NEXT); 2540Sstevel@tonic-gate } 2550Sstevel@tonic-gate 2566712Stomee static int 2576712Stomee kmem_first_complete_slab_walk_init(mdb_walk_state_t *wsp) 2586712Stomee { 2596712Stomee uintptr_t caddr = wsp->walk_addr; 2606712Stomee kmem_nth_slab_t *chk; 2616712Stomee 2626712Stomee chk = mdb_alloc(sizeof (kmem_nth_slab_t), 2636712Stomee UM_SLEEP | UM_GC); 2646712Stomee chk->kns_cache_addr = caddr; 2656712Stomee chk->kns_nslabs = 1; 2666712Stomee wsp->walk_addr = (uintptr_t)(caddr + 2676712Stomee offsetof(kmem_cache_t, cache_complete_slabs)); 2686712Stomee 2696712Stomee return (list_walk_init_checked(wsp, "slab list", "slab", 2706712Stomee kmem_nth_slab_check, chk)); 2716712Stomee } 2726712Stomee 2730Sstevel@tonic-gate int 2740Sstevel@tonic-gate kmem_slab_walk_partial_init(mdb_walk_state_t *wsp) 2750Sstevel@tonic-gate { 2760Sstevel@tonic-gate uintptr_t caddr = wsp->walk_addr; 2770Sstevel@tonic-gate kmem_cache_t c; 2780Sstevel@tonic-gate 2790Sstevel@tonic-gate if (caddr == NULL) { 2800Sstevel@tonic-gate mdb_warn("kmem_slab_partial doesn't support global walks\n"); 2810Sstevel@tonic-gate return (WALK_ERR); 2820Sstevel@tonic-gate } 2830Sstevel@tonic-gate 2840Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), caddr) == -1) { 2850Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache at %p", caddr); 2860Sstevel@tonic-gate return (WALK_ERR); 2870Sstevel@tonic-gate } 2880Sstevel@tonic-gate 2896712Stomee combined_walk_init(wsp); 2900Sstevel@tonic-gate 2910Sstevel@tonic-gate /* 2920Sstevel@tonic-gate * Some consumers (umem_walk_step(), in particular) require at 2930Sstevel@tonic-gate * least one callback if there are any buffers in the cache. So 2946712Stomee * if there are *no* partial slabs, report the first full slab, if 2950Sstevel@tonic-gate * any. 2960Sstevel@tonic-gate * 2970Sstevel@tonic-gate * Yes, this is ugly, but it's cleaner than the other possibilities. 2980Sstevel@tonic-gate */ 2996712Stomee if (c.cache_partial_slabs.avl_numnodes == 0) { 3006712Stomee combined_walk_add(wsp, kmem_first_complete_slab_walk_init, 3016712Stomee list_walk_step, list_walk_fini); 3026712Stomee } else { 3036712Stomee combined_walk_add(wsp, kmem_partial_slab_walk_init, 3046712Stomee avl_walk_step, avl_walk_fini); 3056712Stomee } 3060Sstevel@tonic-gate 3070Sstevel@tonic-gate return (WALK_NEXT); 3080Sstevel@tonic-gate } 3090Sstevel@tonic-gate 3100Sstevel@tonic-gate int 3110Sstevel@tonic-gate kmem_cache(uintptr_t addr, uint_t flags, int ac, const mdb_arg_t *argv) 3120Sstevel@tonic-gate { 3130Sstevel@tonic-gate kmem_cache_t c; 3146712Stomee const char *filter = NULL; 3156712Stomee 3166712Stomee if (mdb_getopts(ac, argv, 3176712Stomee 'n', MDB_OPT_STR, &filter, 3186712Stomee NULL) != ac) { 3196712Stomee return (DCMD_USAGE); 3206712Stomee } 3210Sstevel@tonic-gate 3220Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) { 3230Sstevel@tonic-gate if (mdb_walk_dcmd("kmem_cache", "kmem_cache", ac, argv) == -1) { 3240Sstevel@tonic-gate mdb_warn("can't walk kmem_cache"); 3250Sstevel@tonic-gate return (DCMD_ERR); 3260Sstevel@tonic-gate } 3270Sstevel@tonic-gate return (DCMD_OK); 3280Sstevel@tonic-gate } 3290Sstevel@tonic-gate 3300Sstevel@tonic-gate if (DCMD_HDRSPEC(flags)) 3310Sstevel@tonic-gate mdb_printf("%-?s %-25s %4s %6s %8s %8s\n", "ADDR", "NAME", 3320Sstevel@tonic-gate "FLAG", "CFLAG", "BUFSIZE", "BUFTOTL"); 3330Sstevel@tonic-gate 3340Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), addr) == -1) { 3350Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache at %p", addr); 3360Sstevel@tonic-gate return (DCMD_ERR); 3370Sstevel@tonic-gate } 3380Sstevel@tonic-gate 3396712Stomee if ((filter != NULL) && (strstr(c.cache_name, filter) == NULL)) 3406712Stomee return (DCMD_OK); 3416712Stomee 3420Sstevel@tonic-gate mdb_printf("%0?p %-25s %04x %06x %8ld %8lld\n", addr, c.cache_name, 3430Sstevel@tonic-gate c.cache_flags, c.cache_cflags, c.cache_bufsize, c.cache_buftotal); 3440Sstevel@tonic-gate 3450Sstevel@tonic-gate return (DCMD_OK); 3460Sstevel@tonic-gate } 3470Sstevel@tonic-gate 3486712Stomee void 3496712Stomee kmem_cache_help(void) 3506712Stomee { 3516712Stomee mdb_printf("%s", "Print kernel memory caches.\n\n"); 3526712Stomee mdb_dec_indent(2); 3536712Stomee mdb_printf("%<b>OPTIONS%</b>\n"); 3546712Stomee mdb_inc_indent(2); 3556712Stomee mdb_printf("%s", 3566712Stomee " -n name\n" 3576712Stomee " name of kmem cache (or matching partial name)\n" 3586712Stomee "\n" 3596712Stomee "Column\tDescription\n" 3606712Stomee "\n" 3616712Stomee "ADDR\t\taddress of kmem cache\n" 3626712Stomee "NAME\t\tname of kmem cache\n" 3636712Stomee "FLAG\t\tvarious cache state flags\n" 3646712Stomee "CFLAG\t\tcache creation flags\n" 3656712Stomee "BUFSIZE\tobject size in bytes\n" 3666712Stomee "BUFTOTL\tcurrent total buffers in cache (allocated and free)\n"); 3676712Stomee } 3684688Stomee 3694688Stomee #define LABEL_WIDTH 11 3704688Stomee static void 3714688Stomee kmem_slabs_print_dist(uint_t *ks_bucket, size_t buffers_per_slab, 3724688Stomee size_t maxbuckets, size_t minbucketsize) 3734688Stomee { 3744688Stomee uint64_t total; 3754688Stomee int buckets; 3764688Stomee int i; 3774688Stomee const int *distarray; 3784688Stomee int complete[2]; 3794688Stomee 3804688Stomee buckets = buffers_per_slab; 3814688Stomee 3824688Stomee total = 0; 3834688Stomee for (i = 0; i <= buffers_per_slab; i++) 3844688Stomee total += ks_bucket[i]; 3854688Stomee 3864688Stomee if (maxbuckets > 1) 3874688Stomee buckets = MIN(buckets, maxbuckets); 3884688Stomee 3894688Stomee if (minbucketsize > 1) { 3904688Stomee /* 3914688Stomee * minbucketsize does not apply to the first bucket reserved 3924688Stomee * for completely allocated slabs 3934688Stomee */ 3944688Stomee buckets = MIN(buckets, 1 + ((buffers_per_slab - 1) / 3954688Stomee minbucketsize)); 3964688Stomee if ((buckets < 2) && (buffers_per_slab > 1)) { 3974688Stomee buckets = 2; 3984688Stomee minbucketsize = (buffers_per_slab - 1); 3994688Stomee } 4004688Stomee } 4014688Stomee 4024688Stomee /* 4034688Stomee * The first printed bucket is reserved for completely allocated slabs. 4044688Stomee * Passing (buckets - 1) excludes that bucket from the generated 4054688Stomee * distribution, since we're handling it as a special case. 4064688Stomee */ 4074688Stomee complete[0] = buffers_per_slab; 4084688Stomee complete[1] = buffers_per_slab + 1; 4094798Stomee distarray = dist_linear(buckets - 1, 1, buffers_per_slab - 1); 4104688Stomee 4114688Stomee mdb_printf("%*s\n", LABEL_WIDTH, "Allocated"); 4124798Stomee dist_print_header("Buffers", LABEL_WIDTH, "Slabs"); 4134798Stomee 4144798Stomee dist_print_bucket(complete, 0, ks_bucket, total, LABEL_WIDTH); 4154688Stomee /* 4164688Stomee * Print bucket ranges in descending order after the first bucket for 4174688Stomee * completely allocated slabs, so a person can see immediately whether 4184688Stomee * or not there is fragmentation without having to scan possibly 4194688Stomee * multiple screens of output. Starting at (buckets - 2) excludes the 4204688Stomee * extra terminating bucket. 4214688Stomee */ 4224688Stomee for (i = buckets - 2; i >= 0; i--) { 4234798Stomee dist_print_bucket(distarray, i, ks_bucket, total, LABEL_WIDTH); 4244688Stomee } 4254688Stomee mdb_printf("\n"); 4264688Stomee } 4274688Stomee #undef LABEL_WIDTH 4284688Stomee 4294688Stomee /*ARGSUSED*/ 4304688Stomee static int 4314688Stomee kmem_first_slab(uintptr_t addr, const kmem_slab_t *sp, boolean_t *is_slab) 4324688Stomee { 4334688Stomee *is_slab = B_TRUE; 4344688Stomee return (WALK_DONE); 4354688Stomee } 4364688Stomee 4374688Stomee /*ARGSUSED*/ 4384688Stomee static int 4394688Stomee kmem_first_partial_slab(uintptr_t addr, const kmem_slab_t *sp, 4404688Stomee boolean_t *is_slab) 4414688Stomee { 4424688Stomee /* 4436712Stomee * The "kmem_partial_slab" walker reports the first full slab if there 4444688Stomee * are no partial slabs (for the sake of consumers that require at least 4454688Stomee * one callback if there are any buffers in the cache). 4464688Stomee */ 4476712Stomee *is_slab = KMEM_SLAB_IS_PARTIAL(sp); 4484688Stomee return (WALK_DONE); 4494688Stomee } 4504688Stomee 4516712Stomee typedef struct kmem_slab_usage { 4526712Stomee int ksu_refcnt; /* count of allocated buffers on slab */ 4536712Stomee boolean_t ksu_nomove; /* slab marked non-reclaimable */ 4546712Stomee } kmem_slab_usage_t; 4556712Stomee 4566712Stomee typedef struct kmem_slab_stats { 4576712Stomee const kmem_cache_t *ks_cp; 4586712Stomee int ks_slabs; /* slabs in cache */ 4596712Stomee int ks_partial_slabs; /* partially allocated slabs in cache */ 4606712Stomee uint64_t ks_unused_buffers; /* total unused buffers in cache */ 4616712Stomee int ks_max_buffers_per_slab; /* max buffers per slab */ 4626712Stomee int ks_usage_len; /* ks_usage array length */ 4636712Stomee kmem_slab_usage_t *ks_usage; /* partial slab usage */ 4646712Stomee uint_t *ks_bucket; /* slab usage distribution */ 4656712Stomee } kmem_slab_stats_t; 4666712Stomee 4674688Stomee /*ARGSUSED*/ 4684688Stomee static int 4694688Stomee kmem_slablist_stat(uintptr_t addr, const kmem_slab_t *sp, 4704688Stomee kmem_slab_stats_t *ks) 4714688Stomee { 4724688Stomee kmem_slab_usage_t *ksu; 4734688Stomee long unused; 4744688Stomee 4754688Stomee ks->ks_slabs++; 4764688Stomee ks->ks_bucket[sp->slab_refcnt]++; 4774688Stomee 4784688Stomee unused = (sp->slab_chunks - sp->slab_refcnt); 4794688Stomee if (unused == 0) { 4804688Stomee return (WALK_NEXT); 4814688Stomee } 4824688Stomee 4834688Stomee ks->ks_partial_slabs++; 4844688Stomee ks->ks_unused_buffers += unused; 4854688Stomee 4864688Stomee if (ks->ks_partial_slabs > ks->ks_usage_len) { 4874688Stomee kmem_slab_usage_t *usage; 4884688Stomee int len = ks->ks_usage_len; 4894688Stomee 4904688Stomee len = (len == 0 ? 16 : len * 2); 4914688Stomee usage = mdb_zalloc(len * sizeof (kmem_slab_usage_t), UM_SLEEP); 4924688Stomee if (ks->ks_usage != NULL) { 4934688Stomee bcopy(ks->ks_usage, usage, 4944688Stomee ks->ks_usage_len * sizeof (kmem_slab_usage_t)); 4954688Stomee mdb_free(ks->ks_usage, 4964688Stomee ks->ks_usage_len * sizeof (kmem_slab_usage_t)); 4974688Stomee } 4984688Stomee ks->ks_usage = usage; 4994688Stomee ks->ks_usage_len = len; 5004688Stomee } 5014688Stomee 5024688Stomee ksu = &ks->ks_usage[ks->ks_partial_slabs - 1]; 5034688Stomee ksu->ksu_refcnt = sp->slab_refcnt; 5046712Stomee ksu->ksu_nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE); 5054688Stomee return (WALK_NEXT); 5064688Stomee } 5074688Stomee 5084688Stomee static void 5094688Stomee kmem_slabs_header() 5104688Stomee { 5114688Stomee mdb_printf("%-25s %8s %8s %9s %9s %6s\n", 5124688Stomee "", "", "Partial", "", "Unused", ""); 5134688Stomee mdb_printf("%-25s %8s %8s %9s %9s %6s\n", 5144688Stomee "Cache Name", "Slabs", "Slabs", "Buffers", "Buffers", "Waste"); 5154688Stomee mdb_printf("%-25s %8s %8s %9s %9s %6s\n", 5164688Stomee "-------------------------", "--------", "--------", "---------", 5174688Stomee "---------", "------"); 5184688Stomee } 5194688Stomee 5204688Stomee int 5214688Stomee kmem_slabs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 5224688Stomee { 5234688Stomee kmem_cache_t c; 5244688Stomee kmem_slab_stats_t stats; 5254688Stomee mdb_walk_cb_t cb; 5264688Stomee int pct; 5274688Stomee int tenths_pct; 5284688Stomee size_t maxbuckets = 1; 5294688Stomee size_t minbucketsize = 0; 5304688Stomee const char *filter = NULL; 5316712Stomee const char *name = NULL; 5324688Stomee uint_t opt_v = FALSE; 5336712Stomee boolean_t buckets = B_FALSE; 5344688Stomee boolean_t skip = B_FALSE; 5354688Stomee 5364688Stomee if (mdb_getopts(argc, argv, 5374688Stomee 'B', MDB_OPT_UINTPTR, &minbucketsize, 5384688Stomee 'b', MDB_OPT_UINTPTR, &maxbuckets, 5394688Stomee 'n', MDB_OPT_STR, &filter, 5406712Stomee 'N', MDB_OPT_STR, &name, 5414688Stomee 'v', MDB_OPT_SETBITS, TRUE, &opt_v, 5424688Stomee NULL) != argc) { 5434688Stomee return (DCMD_USAGE); 5444688Stomee } 5454688Stomee 5466712Stomee if ((maxbuckets != 1) || (minbucketsize != 0)) { 5476712Stomee buckets = B_TRUE; 5484688Stomee } 5494688Stomee 5504688Stomee if (!(flags & DCMD_ADDRSPEC)) { 5514688Stomee if (mdb_walk_dcmd("kmem_cache", "kmem_slabs", argc, 5524688Stomee argv) == -1) { 5534688Stomee mdb_warn("can't walk kmem_cache"); 5544688Stomee return (DCMD_ERR); 5554688Stomee } 5564688Stomee return (DCMD_OK); 5574688Stomee } 5584688Stomee 5594688Stomee if (mdb_vread(&c, sizeof (c), addr) == -1) { 5604688Stomee mdb_warn("couldn't read kmem_cache at %p", addr); 5614688Stomee return (DCMD_ERR); 5624688Stomee } 5634688Stomee 5646712Stomee if (name == NULL) { 5656712Stomee skip = ((filter != NULL) && 5666712Stomee (strstr(c.cache_name, filter) == NULL)); 5676712Stomee } else if (filter == NULL) { 5686712Stomee skip = (strcmp(c.cache_name, name) != 0); 5696712Stomee } else { 5706712Stomee /* match either -n or -N */ 5716712Stomee skip = ((strcmp(c.cache_name, name) != 0) && 5726712Stomee (strstr(c.cache_name, filter) == NULL)); 5734688Stomee } 5744688Stomee 5756712Stomee if (!(opt_v || buckets) && DCMD_HDRSPEC(flags)) { 5764688Stomee kmem_slabs_header(); 5776712Stomee } else if ((opt_v || buckets) && !skip) { 5784688Stomee if (DCMD_HDRSPEC(flags)) { 5794688Stomee kmem_slabs_header(); 5804688Stomee } else { 5814688Stomee boolean_t is_slab = B_FALSE; 5824688Stomee const char *walker_name; 5834688Stomee if (opt_v) { 5844688Stomee cb = (mdb_walk_cb_t)kmem_first_partial_slab; 5854688Stomee walker_name = "kmem_slab_partial"; 5864688Stomee } else { 5874688Stomee cb = (mdb_walk_cb_t)kmem_first_slab; 5884688Stomee walker_name = "kmem_slab"; 5894688Stomee } 5904688Stomee (void) mdb_pwalk(walker_name, cb, &is_slab, addr); 5914688Stomee if (is_slab) { 5924688Stomee kmem_slabs_header(); 5934688Stomee } 5944688Stomee } 5954688Stomee } 5964688Stomee 5974688Stomee if (skip) { 5984688Stomee return (DCMD_OK); 5994688Stomee } 6004688Stomee 6014688Stomee bzero(&stats, sizeof (kmem_slab_stats_t)); 6026712Stomee stats.ks_cp = &c; 6036712Stomee stats.ks_max_buffers_per_slab = c.cache_maxchunks; 6046712Stomee /* +1 to include a zero bucket */ 6056712Stomee stats.ks_bucket = mdb_zalloc((stats.ks_max_buffers_per_slab + 1) * 6066712Stomee sizeof (*stats.ks_bucket), UM_SLEEP); 6074688Stomee cb = (mdb_walk_cb_t)kmem_slablist_stat; 6084688Stomee (void) mdb_pwalk("kmem_slab", cb, &stats, addr); 6094688Stomee 6104688Stomee if (c.cache_buftotal == 0) { 6114688Stomee pct = 0; 6124688Stomee tenths_pct = 0; 6134688Stomee } else { 6144688Stomee uint64_t n = stats.ks_unused_buffers * 10000; 6154688Stomee pct = (int)(n / c.cache_buftotal); 6164688Stomee tenths_pct = pct - ((pct / 100) * 100); 6174688Stomee tenths_pct = (tenths_pct + 5) / 10; /* round nearest tenth */ 6184688Stomee if (tenths_pct == 10) { 6194688Stomee pct += 100; 6204688Stomee tenths_pct = 0; 6214688Stomee } 6224688Stomee } 6234688Stomee 6244688Stomee pct /= 100; 6254688Stomee mdb_printf("%-25s %8d %8d %9lld %9lld %3d.%1d%%\n", c.cache_name, 6264688Stomee stats.ks_slabs, stats.ks_partial_slabs, c.cache_buftotal, 6274688Stomee stats.ks_unused_buffers, pct, tenths_pct); 6284688Stomee 6294688Stomee if (maxbuckets == 0) { 6306712Stomee maxbuckets = stats.ks_max_buffers_per_slab; 6314688Stomee } 6324688Stomee 6334688Stomee if (((maxbuckets > 1) || (minbucketsize > 0)) && 6344688Stomee (stats.ks_slabs > 0)) { 6354688Stomee mdb_printf("\n"); 6364688Stomee kmem_slabs_print_dist(stats.ks_bucket, 6376712Stomee stats.ks_max_buffers_per_slab, maxbuckets, minbucketsize); 6386712Stomee } 6396712Stomee 6406712Stomee mdb_free(stats.ks_bucket, (stats.ks_max_buffers_per_slab + 1) * 6416712Stomee sizeof (*stats.ks_bucket)); 6426712Stomee 6436712Stomee if (!opt_v) { 6446712Stomee return (DCMD_OK); 6454688Stomee } 6464688Stomee 6474688Stomee if (opt_v && (stats.ks_partial_slabs > 0)) { 6484688Stomee int i; 6494688Stomee kmem_slab_usage_t *ksu; 6504688Stomee 65110217STom.Erickson@Sun.COM mdb_printf(" %d complete (%d), %d partial:", 6524688Stomee (stats.ks_slabs - stats.ks_partial_slabs), 65310217STom.Erickson@Sun.COM stats.ks_max_buffers_per_slab, 6544688Stomee stats.ks_partial_slabs); 65510217STom.Erickson@Sun.COM 6564688Stomee for (i = 0; i < stats.ks_partial_slabs; i++) { 6574688Stomee ksu = &stats.ks_usage[i]; 65810217STom.Erickson@Sun.COM mdb_printf(" %d%s", ksu->ksu_refcnt, 65910217STom.Erickson@Sun.COM (ksu->ksu_nomove ? "*" : "")); 6604688Stomee } 6614688Stomee mdb_printf("\n\n"); 6624688Stomee } 6634688Stomee 6644688Stomee if (stats.ks_usage_len > 0) { 6654688Stomee mdb_free(stats.ks_usage, 6664688Stomee stats.ks_usage_len * sizeof (kmem_slab_usage_t)); 6674688Stomee } 6684688Stomee 6694688Stomee return (DCMD_OK); 6704688Stomee } 6714688Stomee 6724688Stomee void 6734688Stomee kmem_slabs_help(void) 6744688Stomee { 6756712Stomee mdb_printf("%s", 6766712Stomee "Display slab usage per kmem cache.\n\n"); 6774688Stomee mdb_dec_indent(2); 6784688Stomee mdb_printf("%<b>OPTIONS%</b>\n"); 6794688Stomee mdb_inc_indent(2); 6804688Stomee mdb_printf("%s", 6814688Stomee " -n name\n" 6824688Stomee " name of kmem cache (or matching partial name)\n" 6836712Stomee " -N name\n" 6846712Stomee " exact name of kmem cache\n" 6854688Stomee " -b maxbins\n" 6864688Stomee " Print a distribution of allocated buffers per slab using at\n" 6874688Stomee " most maxbins bins. The first bin is reserved for completely\n" 6884688Stomee " allocated slabs. Setting maxbins to zero (-b 0) has the same\n" 6894688Stomee " effect as specifying the maximum allocated buffers per slab\n" 6904688Stomee " or setting minbinsize to 1 (-B 1).\n" 6914688Stomee " -B minbinsize\n" 6924688Stomee " Print a distribution of allocated buffers per slab, making\n" 6934688Stomee " all bins (except the first, reserved for completely allocated\n" 6944688Stomee " slabs) at least minbinsize buffers apart.\n" 6954688Stomee " -v verbose output: List the allocated buffer count of each partial\n" 6964688Stomee " slab on the free list in order from front to back to show how\n" 6974688Stomee " closely the slabs are ordered by usage. For example\n" 6984688Stomee "\n" 6994688Stomee " 10 complete, 3 partial (8): 7 3 1\n" 7004688Stomee "\n" 7014688Stomee " means there are thirteen slabs with eight buffers each, including\n" 7024688Stomee " three partially allocated slabs with less than all eight buffers\n" 7034688Stomee " allocated.\n" 7044688Stomee "\n" 7054688Stomee " Buffer allocations are always from the front of the partial slab\n" 7064688Stomee " list. When a buffer is freed from a completely used slab, that\n" 7074688Stomee " slab is added to the front of the partial slab list. Assuming\n" 7084688Stomee " that all buffers are equally likely to be freed soon, the\n" 7094688Stomee " desired order of partial slabs is most-used at the front of the\n" 7104688Stomee " list and least-used at the back (as in the example above).\n" 7114688Stomee " However, if a slab contains an allocated buffer that will not\n" 7124688Stomee " soon be freed, it would be better for that slab to be at the\n" 7136712Stomee " front where all of its buffers can be allocated. Taking a slab\n" 7146712Stomee " off the partial slab list (either with all buffers freed or all\n" 7156712Stomee " buffers allocated) reduces cache fragmentation.\n" 7166712Stomee "\n" 7176712Stomee " A slab's allocated buffer count representing a partial slab (9 in\n" 7186712Stomee " the example below) may be marked as follows:\n" 7196712Stomee "\n" 7206712Stomee " 9* An asterisk indicates that kmem has marked the slab non-\n" 7216712Stomee " reclaimable because the kmem client refused to move one of the\n" 7226712Stomee " slab's buffers. Since kmem does not expect to completely free the\n" 7236712Stomee " slab, it moves it to the front of the list in the hope of\n" 7246712Stomee " completely allocating it instead. A slab marked with an asterisk\n" 7256712Stomee " stays marked for as long as it remains on the partial slab list.\n" 7264688Stomee "\n" 7274688Stomee "Column\t\tDescription\n" 7284688Stomee "\n" 7294688Stomee "Cache Name\t\tname of kmem cache\n" 7304688Stomee "Slabs\t\t\ttotal slab count\n" 7314688Stomee "Partial Slabs\t\tcount of partially allocated slabs on the free list\n" 7324688Stomee "Buffers\t\ttotal buffer count (Slabs * (buffers per slab))\n" 7334688Stomee "Unused Buffers\tcount of unallocated buffers across all partial slabs\n" 7344688Stomee "Waste\t\t\t(Unused Buffers / Buffers) does not include space\n" 7354688Stomee "\t\t\t for accounting structures (debug mode), slab\n" 7364688Stomee "\t\t\t coloring (incremental small offsets to stagger\n" 7374688Stomee "\t\t\t buffer alignment), or the per-CPU magazine layer\n"); 7384688Stomee } 7394688Stomee 7400Sstevel@tonic-gate static int 7410Sstevel@tonic-gate addrcmp(const void *lhs, const void *rhs) 7420Sstevel@tonic-gate { 7430Sstevel@tonic-gate uintptr_t p1 = *((uintptr_t *)lhs); 7440Sstevel@tonic-gate uintptr_t p2 = *((uintptr_t *)rhs); 7450Sstevel@tonic-gate 7460Sstevel@tonic-gate if (p1 < p2) 7470Sstevel@tonic-gate return (-1); 7480Sstevel@tonic-gate if (p1 > p2) 7490Sstevel@tonic-gate return (1); 7500Sstevel@tonic-gate return (0); 7510Sstevel@tonic-gate } 7520Sstevel@tonic-gate 7530Sstevel@tonic-gate static int 7540Sstevel@tonic-gate bufctlcmp(const kmem_bufctl_audit_t **lhs, const kmem_bufctl_audit_t **rhs) 7550Sstevel@tonic-gate { 7560Sstevel@tonic-gate const kmem_bufctl_audit_t *bcp1 = *lhs; 7570Sstevel@tonic-gate const kmem_bufctl_audit_t *bcp2 = *rhs; 7580Sstevel@tonic-gate 7590Sstevel@tonic-gate if (bcp1->bc_timestamp > bcp2->bc_timestamp) 7600Sstevel@tonic-gate return (-1); 7610Sstevel@tonic-gate 7620Sstevel@tonic-gate if (bcp1->bc_timestamp < bcp2->bc_timestamp) 7630Sstevel@tonic-gate return (1); 7640Sstevel@tonic-gate 7650Sstevel@tonic-gate return (0); 7660Sstevel@tonic-gate } 7670Sstevel@tonic-gate 7680Sstevel@tonic-gate typedef struct kmem_hash_walk { 7690Sstevel@tonic-gate uintptr_t *kmhw_table; 7700Sstevel@tonic-gate size_t kmhw_nelems; 7710Sstevel@tonic-gate size_t kmhw_pos; 7720Sstevel@tonic-gate kmem_bufctl_t kmhw_cur; 7730Sstevel@tonic-gate } kmem_hash_walk_t; 7740Sstevel@tonic-gate 7750Sstevel@tonic-gate int 7760Sstevel@tonic-gate kmem_hash_walk_init(mdb_walk_state_t *wsp) 7770Sstevel@tonic-gate { 7780Sstevel@tonic-gate kmem_hash_walk_t *kmhw; 7790Sstevel@tonic-gate uintptr_t *hash; 7800Sstevel@tonic-gate kmem_cache_t c; 7810Sstevel@tonic-gate uintptr_t haddr, addr = wsp->walk_addr; 7820Sstevel@tonic-gate size_t nelems; 7830Sstevel@tonic-gate size_t hsize; 7840Sstevel@tonic-gate 7850Sstevel@tonic-gate if (addr == NULL) { 7860Sstevel@tonic-gate mdb_warn("kmem_hash doesn't support global walks\n"); 7870Sstevel@tonic-gate return (WALK_ERR); 7880Sstevel@tonic-gate } 7890Sstevel@tonic-gate 7900Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), addr) == -1) { 7910Sstevel@tonic-gate mdb_warn("couldn't read cache at addr %p", addr); 7920Sstevel@tonic-gate return (WALK_ERR); 7930Sstevel@tonic-gate } 7940Sstevel@tonic-gate 7950Sstevel@tonic-gate if (!(c.cache_flags & KMF_HASH)) { 7960Sstevel@tonic-gate mdb_warn("cache %p doesn't have a hash table\n", addr); 7970Sstevel@tonic-gate return (WALK_DONE); /* nothing to do */ 7980Sstevel@tonic-gate } 7990Sstevel@tonic-gate 8000Sstevel@tonic-gate kmhw = mdb_zalloc(sizeof (kmem_hash_walk_t), UM_SLEEP); 8010Sstevel@tonic-gate kmhw->kmhw_cur.bc_next = NULL; 8020Sstevel@tonic-gate kmhw->kmhw_pos = 0; 8030Sstevel@tonic-gate 8040Sstevel@tonic-gate kmhw->kmhw_nelems = nelems = c.cache_hash_mask + 1; 8050Sstevel@tonic-gate hsize = nelems * sizeof (uintptr_t); 8060Sstevel@tonic-gate haddr = (uintptr_t)c.cache_hash_table; 8070Sstevel@tonic-gate 8080Sstevel@tonic-gate kmhw->kmhw_table = hash = mdb_alloc(hsize, UM_SLEEP); 8090Sstevel@tonic-gate if (mdb_vread(hash, hsize, haddr) == -1) { 8100Sstevel@tonic-gate mdb_warn("failed to read hash table at %p", haddr); 8110Sstevel@tonic-gate mdb_free(hash, hsize); 8120Sstevel@tonic-gate mdb_free(kmhw, sizeof (kmem_hash_walk_t)); 8130Sstevel@tonic-gate return (WALK_ERR); 8140Sstevel@tonic-gate } 8150Sstevel@tonic-gate 8160Sstevel@tonic-gate wsp->walk_data = kmhw; 8170Sstevel@tonic-gate 8180Sstevel@tonic-gate return (WALK_NEXT); 8190Sstevel@tonic-gate } 8200Sstevel@tonic-gate 8210Sstevel@tonic-gate int 8220Sstevel@tonic-gate kmem_hash_walk_step(mdb_walk_state_t *wsp) 8230Sstevel@tonic-gate { 8240Sstevel@tonic-gate kmem_hash_walk_t *kmhw = wsp->walk_data; 8250Sstevel@tonic-gate uintptr_t addr = NULL; 8260Sstevel@tonic-gate 8270Sstevel@tonic-gate if ((addr = (uintptr_t)kmhw->kmhw_cur.bc_next) == NULL) { 8280Sstevel@tonic-gate while (kmhw->kmhw_pos < kmhw->kmhw_nelems) { 8290Sstevel@tonic-gate if ((addr = kmhw->kmhw_table[kmhw->kmhw_pos++]) != NULL) 8300Sstevel@tonic-gate break; 8310Sstevel@tonic-gate } 8320Sstevel@tonic-gate } 8330Sstevel@tonic-gate if (addr == NULL) 8340Sstevel@tonic-gate return (WALK_DONE); 8350Sstevel@tonic-gate 8360Sstevel@tonic-gate if (mdb_vread(&kmhw->kmhw_cur, sizeof (kmem_bufctl_t), addr) == -1) { 8370Sstevel@tonic-gate mdb_warn("couldn't read kmem_bufctl_t at addr %p", addr); 8380Sstevel@tonic-gate return (WALK_ERR); 8390Sstevel@tonic-gate } 8400Sstevel@tonic-gate 8410Sstevel@tonic-gate return (wsp->walk_callback(addr, &kmhw->kmhw_cur, wsp->walk_cbdata)); 8420Sstevel@tonic-gate } 8430Sstevel@tonic-gate 8440Sstevel@tonic-gate void 8450Sstevel@tonic-gate kmem_hash_walk_fini(mdb_walk_state_t *wsp) 8460Sstevel@tonic-gate { 8470Sstevel@tonic-gate kmem_hash_walk_t *kmhw = wsp->walk_data; 8480Sstevel@tonic-gate 8490Sstevel@tonic-gate if (kmhw == NULL) 8500Sstevel@tonic-gate return; 8510Sstevel@tonic-gate 8520Sstevel@tonic-gate mdb_free(kmhw->kmhw_table, kmhw->kmhw_nelems * sizeof (uintptr_t)); 8530Sstevel@tonic-gate mdb_free(kmhw, sizeof (kmem_hash_walk_t)); 8540Sstevel@tonic-gate } 8550Sstevel@tonic-gate 8560Sstevel@tonic-gate /* 8570Sstevel@tonic-gate * Find the address of the bufctl structure for the address 'buf' in cache 8580Sstevel@tonic-gate * 'cp', which is at address caddr, and place it in *out. 8590Sstevel@tonic-gate */ 8600Sstevel@tonic-gate static int 8610Sstevel@tonic-gate kmem_hash_lookup(kmem_cache_t *cp, uintptr_t caddr, void *buf, uintptr_t *out) 8620Sstevel@tonic-gate { 8630Sstevel@tonic-gate uintptr_t bucket = (uintptr_t)KMEM_HASH(cp, buf); 8640Sstevel@tonic-gate kmem_bufctl_t *bcp; 8650Sstevel@tonic-gate kmem_bufctl_t bc; 8660Sstevel@tonic-gate 8670Sstevel@tonic-gate if (mdb_vread(&bcp, sizeof (kmem_bufctl_t *), bucket) == -1) { 8680Sstevel@tonic-gate mdb_warn("unable to read hash bucket for %p in cache %p", 8690Sstevel@tonic-gate buf, caddr); 8700Sstevel@tonic-gate return (-1); 8710Sstevel@tonic-gate } 8720Sstevel@tonic-gate 8730Sstevel@tonic-gate while (bcp != NULL) { 8740Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (kmem_bufctl_t), 8750Sstevel@tonic-gate (uintptr_t)bcp) == -1) { 8760Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", bcp); 8770Sstevel@tonic-gate return (-1); 8780Sstevel@tonic-gate } 8790Sstevel@tonic-gate if (bc.bc_addr == buf) { 8800Sstevel@tonic-gate *out = (uintptr_t)bcp; 8810Sstevel@tonic-gate return (0); 8820Sstevel@tonic-gate } 8830Sstevel@tonic-gate bcp = bc.bc_next; 8840Sstevel@tonic-gate } 8850Sstevel@tonic-gate 8860Sstevel@tonic-gate mdb_warn("unable to find bufctl for %p in cache %p\n", buf, caddr); 8870Sstevel@tonic-gate return (-1); 8880Sstevel@tonic-gate } 8890Sstevel@tonic-gate 8900Sstevel@tonic-gate int 8910Sstevel@tonic-gate kmem_get_magsize(const kmem_cache_t *cp) 8920Sstevel@tonic-gate { 8930Sstevel@tonic-gate uintptr_t addr = (uintptr_t)cp->cache_magtype; 8940Sstevel@tonic-gate GElf_Sym mt_sym; 8950Sstevel@tonic-gate kmem_magtype_t mt; 8960Sstevel@tonic-gate int res; 8970Sstevel@tonic-gate 8980Sstevel@tonic-gate /* 8990Sstevel@tonic-gate * if cpu 0 has a non-zero magsize, it must be correct. caches 9000Sstevel@tonic-gate * with KMF_NOMAGAZINE have disabled their magazine layers, so 9010Sstevel@tonic-gate * it is okay to return 0 for them. 9020Sstevel@tonic-gate */ 9030Sstevel@tonic-gate if ((res = cp->cache_cpu[0].cc_magsize) != 0 || 9040Sstevel@tonic-gate (cp->cache_flags & KMF_NOMAGAZINE)) 9050Sstevel@tonic-gate return (res); 9060Sstevel@tonic-gate 9070Sstevel@tonic-gate if (mdb_lookup_by_name("kmem_magtype", &mt_sym) == -1) { 9080Sstevel@tonic-gate mdb_warn("unable to read 'kmem_magtype'"); 9090Sstevel@tonic-gate } else if (addr < mt_sym.st_value || 9100Sstevel@tonic-gate addr + sizeof (mt) - 1 > mt_sym.st_value + mt_sym.st_size - 1 || 9110Sstevel@tonic-gate ((addr - mt_sym.st_value) % sizeof (mt)) != 0) { 9120Sstevel@tonic-gate mdb_warn("cache '%s' has invalid magtype pointer (%p)\n", 9130Sstevel@tonic-gate cp->cache_name, addr); 9140Sstevel@tonic-gate return (0); 9150Sstevel@tonic-gate } 9160Sstevel@tonic-gate if (mdb_vread(&mt, sizeof (mt), addr) == -1) { 9170Sstevel@tonic-gate mdb_warn("unable to read magtype at %a", addr); 9180Sstevel@tonic-gate return (0); 9190Sstevel@tonic-gate } 9200Sstevel@tonic-gate return (mt.mt_magsize); 9210Sstevel@tonic-gate } 9220Sstevel@tonic-gate 9230Sstevel@tonic-gate /*ARGSUSED*/ 9240Sstevel@tonic-gate static int 9250Sstevel@tonic-gate kmem_estimate_slab(uintptr_t addr, const kmem_slab_t *sp, size_t *est) 9260Sstevel@tonic-gate { 9270Sstevel@tonic-gate *est -= (sp->slab_chunks - sp->slab_refcnt); 9280Sstevel@tonic-gate 9290Sstevel@tonic-gate return (WALK_NEXT); 9300Sstevel@tonic-gate } 9310Sstevel@tonic-gate 9320Sstevel@tonic-gate /* 9330Sstevel@tonic-gate * Returns an upper bound on the number of allocated buffers in a given 9340Sstevel@tonic-gate * cache. 9350Sstevel@tonic-gate */ 9360Sstevel@tonic-gate size_t 9370Sstevel@tonic-gate kmem_estimate_allocated(uintptr_t addr, const kmem_cache_t *cp) 9380Sstevel@tonic-gate { 9390Sstevel@tonic-gate int magsize; 9400Sstevel@tonic-gate size_t cache_est; 9410Sstevel@tonic-gate 9420Sstevel@tonic-gate cache_est = cp->cache_buftotal; 9430Sstevel@tonic-gate 9440Sstevel@tonic-gate (void) mdb_pwalk("kmem_slab_partial", 9450Sstevel@tonic-gate (mdb_walk_cb_t)kmem_estimate_slab, &cache_est, addr); 9460Sstevel@tonic-gate 9470Sstevel@tonic-gate if ((magsize = kmem_get_magsize(cp)) != 0) { 9480Sstevel@tonic-gate size_t mag_est = cp->cache_full.ml_total * magsize; 9490Sstevel@tonic-gate 9500Sstevel@tonic-gate if (cache_est >= mag_est) { 9510Sstevel@tonic-gate cache_est -= mag_est; 9520Sstevel@tonic-gate } else { 9530Sstevel@tonic-gate mdb_warn("cache %p's magazine layer holds more buffers " 9540Sstevel@tonic-gate "than the slab layer.\n", addr); 9550Sstevel@tonic-gate } 9560Sstevel@tonic-gate } 9570Sstevel@tonic-gate return (cache_est); 9580Sstevel@tonic-gate } 9590Sstevel@tonic-gate 9600Sstevel@tonic-gate #define READMAG_ROUNDS(rounds) { \ 9610Sstevel@tonic-gate if (mdb_vread(mp, magbsize, (uintptr_t)kmp) == -1) { \ 9620Sstevel@tonic-gate mdb_warn("couldn't read magazine at %p", kmp); \ 9630Sstevel@tonic-gate goto fail; \ 9640Sstevel@tonic-gate } \ 9650Sstevel@tonic-gate for (i = 0; i < rounds; i++) { \ 9660Sstevel@tonic-gate maglist[magcnt++] = mp->mag_round[i]; \ 9670Sstevel@tonic-gate if (magcnt == magmax) { \ 9680Sstevel@tonic-gate mdb_warn("%d magazines exceeds fudge factor\n", \ 9690Sstevel@tonic-gate magcnt); \ 9700Sstevel@tonic-gate goto fail; \ 9710Sstevel@tonic-gate } \ 9720Sstevel@tonic-gate } \ 9730Sstevel@tonic-gate } 9740Sstevel@tonic-gate 9750Sstevel@tonic-gate int 9760Sstevel@tonic-gate kmem_read_magazines(kmem_cache_t *cp, uintptr_t addr, int ncpus, 9770Sstevel@tonic-gate void ***maglistp, size_t *magcntp, size_t *magmaxp, int alloc_flags) 9780Sstevel@tonic-gate { 9790Sstevel@tonic-gate kmem_magazine_t *kmp, *mp; 9800Sstevel@tonic-gate void **maglist = NULL; 9810Sstevel@tonic-gate int i, cpu; 9820Sstevel@tonic-gate size_t magsize, magmax, magbsize; 9830Sstevel@tonic-gate size_t magcnt = 0; 9840Sstevel@tonic-gate 9850Sstevel@tonic-gate /* 9860Sstevel@tonic-gate * Read the magtype out of the cache, after verifying the pointer's 9870Sstevel@tonic-gate * correctness. 9880Sstevel@tonic-gate */ 9890Sstevel@tonic-gate magsize = kmem_get_magsize(cp); 9901528Sjwadams if (magsize == 0) { 9911528Sjwadams *maglistp = NULL; 9921528Sjwadams *magcntp = 0; 9931528Sjwadams *magmaxp = 0; 9941528Sjwadams return (WALK_NEXT); 9951528Sjwadams } 9960Sstevel@tonic-gate 9970Sstevel@tonic-gate /* 9980Sstevel@tonic-gate * There are several places where we need to go buffer hunting: 9990Sstevel@tonic-gate * the per-CPU loaded magazine, the per-CPU spare full magazine, 10000Sstevel@tonic-gate * and the full magazine list in the depot. 10010Sstevel@tonic-gate * 10020Sstevel@tonic-gate * For an upper bound on the number of buffers in the magazine 10030Sstevel@tonic-gate * layer, we have the number of magazines on the cache_full 10040Sstevel@tonic-gate * list plus at most two magazines per CPU (the loaded and the 10050Sstevel@tonic-gate * spare). Toss in 100 magazines as a fudge factor in case this 10060Sstevel@tonic-gate * is live (the number "100" comes from the same fudge factor in 10070Sstevel@tonic-gate * crash(1M)). 10080Sstevel@tonic-gate */ 10090Sstevel@tonic-gate magmax = (cp->cache_full.ml_total + 2 * ncpus + 100) * magsize; 10100Sstevel@tonic-gate magbsize = offsetof(kmem_magazine_t, mag_round[magsize]); 10110Sstevel@tonic-gate 10120Sstevel@tonic-gate if (magbsize >= PAGESIZE / 2) { 10130Sstevel@tonic-gate mdb_warn("magazine size for cache %p unreasonable (%x)\n", 10140Sstevel@tonic-gate addr, magbsize); 10151528Sjwadams return (WALK_ERR); 10160Sstevel@tonic-gate } 10170Sstevel@tonic-gate 10180Sstevel@tonic-gate maglist = mdb_alloc(magmax * sizeof (void *), alloc_flags); 10190Sstevel@tonic-gate mp = mdb_alloc(magbsize, alloc_flags); 10200Sstevel@tonic-gate if (mp == NULL || maglist == NULL) 10210Sstevel@tonic-gate goto fail; 10220Sstevel@tonic-gate 10230Sstevel@tonic-gate /* 10240Sstevel@tonic-gate * First up: the magazines in the depot (i.e. on the cache_full list). 10250Sstevel@tonic-gate */ 10260Sstevel@tonic-gate for (kmp = cp->cache_full.ml_list; kmp != NULL; ) { 10270Sstevel@tonic-gate READMAG_ROUNDS(magsize); 10280Sstevel@tonic-gate kmp = mp->mag_next; 10290Sstevel@tonic-gate 10300Sstevel@tonic-gate if (kmp == cp->cache_full.ml_list) 10310Sstevel@tonic-gate break; /* cache_full list loop detected */ 10320Sstevel@tonic-gate } 10330Sstevel@tonic-gate 10340Sstevel@tonic-gate dprintf(("cache_full list done\n")); 10350Sstevel@tonic-gate 10360Sstevel@tonic-gate /* 10370Sstevel@tonic-gate * Now whip through the CPUs, snagging the loaded magazines 10380Sstevel@tonic-gate * and full spares. 10390Sstevel@tonic-gate */ 10400Sstevel@tonic-gate for (cpu = 0; cpu < ncpus; cpu++) { 10410Sstevel@tonic-gate kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu]; 10420Sstevel@tonic-gate 10430Sstevel@tonic-gate dprintf(("reading cpu cache %p\n", 10440Sstevel@tonic-gate (uintptr_t)ccp - (uintptr_t)cp + addr)); 10450Sstevel@tonic-gate 10460Sstevel@tonic-gate if (ccp->cc_rounds > 0 && 10470Sstevel@tonic-gate (kmp = ccp->cc_loaded) != NULL) { 10480Sstevel@tonic-gate dprintf(("reading %d loaded rounds\n", ccp->cc_rounds)); 10490Sstevel@tonic-gate READMAG_ROUNDS(ccp->cc_rounds); 10500Sstevel@tonic-gate } 10510Sstevel@tonic-gate 10520Sstevel@tonic-gate if (ccp->cc_prounds > 0 && 10530Sstevel@tonic-gate (kmp = ccp->cc_ploaded) != NULL) { 10540Sstevel@tonic-gate dprintf(("reading %d previously loaded rounds\n", 10550Sstevel@tonic-gate ccp->cc_prounds)); 10560Sstevel@tonic-gate READMAG_ROUNDS(ccp->cc_prounds); 10570Sstevel@tonic-gate } 10580Sstevel@tonic-gate } 10590Sstevel@tonic-gate 10600Sstevel@tonic-gate dprintf(("magazine layer: %d buffers\n", magcnt)); 10610Sstevel@tonic-gate 10620Sstevel@tonic-gate if (!(alloc_flags & UM_GC)) 10630Sstevel@tonic-gate mdb_free(mp, magbsize); 10640Sstevel@tonic-gate 10650Sstevel@tonic-gate *maglistp = maglist; 10660Sstevel@tonic-gate *magcntp = magcnt; 10670Sstevel@tonic-gate *magmaxp = magmax; 10680Sstevel@tonic-gate 10690Sstevel@tonic-gate return (WALK_NEXT); 10700Sstevel@tonic-gate 10710Sstevel@tonic-gate fail: 10720Sstevel@tonic-gate if (!(alloc_flags & UM_GC)) { 10730Sstevel@tonic-gate if (mp) 10740Sstevel@tonic-gate mdb_free(mp, magbsize); 10750Sstevel@tonic-gate if (maglist) 10760Sstevel@tonic-gate mdb_free(maglist, magmax * sizeof (void *)); 10770Sstevel@tonic-gate } 10780Sstevel@tonic-gate return (WALK_ERR); 10790Sstevel@tonic-gate } 10800Sstevel@tonic-gate 10810Sstevel@tonic-gate static int 10820Sstevel@tonic-gate kmem_walk_callback(mdb_walk_state_t *wsp, uintptr_t buf) 10830Sstevel@tonic-gate { 10840Sstevel@tonic-gate return (wsp->walk_callback(buf, NULL, wsp->walk_cbdata)); 10850Sstevel@tonic-gate } 10860Sstevel@tonic-gate 10870Sstevel@tonic-gate static int 10880Sstevel@tonic-gate bufctl_walk_callback(kmem_cache_t *cp, mdb_walk_state_t *wsp, uintptr_t buf) 10890Sstevel@tonic-gate { 10900Sstevel@tonic-gate kmem_bufctl_audit_t b; 10910Sstevel@tonic-gate 10920Sstevel@tonic-gate /* 10930Sstevel@tonic-gate * if KMF_AUDIT is not set, we know that we're looking at a 10940Sstevel@tonic-gate * kmem_bufctl_t. 10950Sstevel@tonic-gate */ 10960Sstevel@tonic-gate if (!(cp->cache_flags & KMF_AUDIT) || 10970Sstevel@tonic-gate mdb_vread(&b, sizeof (kmem_bufctl_audit_t), buf) == -1) { 10980Sstevel@tonic-gate (void) memset(&b, 0, sizeof (b)); 10990Sstevel@tonic-gate if (mdb_vread(&b, sizeof (kmem_bufctl_t), buf) == -1) { 11000Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", buf); 11010Sstevel@tonic-gate return (WALK_ERR); 11020Sstevel@tonic-gate } 11030Sstevel@tonic-gate } 11040Sstevel@tonic-gate 11050Sstevel@tonic-gate return (wsp->walk_callback(buf, &b, wsp->walk_cbdata)); 11060Sstevel@tonic-gate } 11070Sstevel@tonic-gate 11080Sstevel@tonic-gate typedef struct kmem_walk { 11090Sstevel@tonic-gate int kmw_type; 11100Sstevel@tonic-gate 11110Sstevel@tonic-gate int kmw_addr; /* cache address */ 11120Sstevel@tonic-gate kmem_cache_t *kmw_cp; 11130Sstevel@tonic-gate size_t kmw_csize; 11140Sstevel@tonic-gate 11150Sstevel@tonic-gate /* 11160Sstevel@tonic-gate * magazine layer 11170Sstevel@tonic-gate */ 11180Sstevel@tonic-gate void **kmw_maglist; 11190Sstevel@tonic-gate size_t kmw_max; 11200Sstevel@tonic-gate size_t kmw_count; 11210Sstevel@tonic-gate size_t kmw_pos; 11220Sstevel@tonic-gate 11230Sstevel@tonic-gate /* 11240Sstevel@tonic-gate * slab layer 11250Sstevel@tonic-gate */ 11260Sstevel@tonic-gate char *kmw_valid; /* to keep track of freed buffers */ 11270Sstevel@tonic-gate char *kmw_ubase; /* buffer for slab data */ 11280Sstevel@tonic-gate } kmem_walk_t; 11290Sstevel@tonic-gate 11300Sstevel@tonic-gate static int 11310Sstevel@tonic-gate kmem_walk_init_common(mdb_walk_state_t *wsp, int type) 11320Sstevel@tonic-gate { 11330Sstevel@tonic-gate kmem_walk_t *kmw; 11340Sstevel@tonic-gate int ncpus, csize; 11350Sstevel@tonic-gate kmem_cache_t *cp; 11361528Sjwadams size_t vm_quantum; 11370Sstevel@tonic-gate 11380Sstevel@tonic-gate size_t magmax, magcnt; 11390Sstevel@tonic-gate void **maglist = NULL; 11400Sstevel@tonic-gate uint_t chunksize, slabsize; 11410Sstevel@tonic-gate int status = WALK_ERR; 11420Sstevel@tonic-gate uintptr_t addr = wsp->walk_addr; 11430Sstevel@tonic-gate const char *layered; 11440Sstevel@tonic-gate 11450Sstevel@tonic-gate type &= ~KM_HASH; 11460Sstevel@tonic-gate 11470Sstevel@tonic-gate if (addr == NULL) { 11480Sstevel@tonic-gate mdb_warn("kmem walk doesn't support global walks\n"); 11490Sstevel@tonic-gate return (WALK_ERR); 11500Sstevel@tonic-gate } 11510Sstevel@tonic-gate 11520Sstevel@tonic-gate dprintf(("walking %p\n", addr)); 11530Sstevel@tonic-gate 11540Sstevel@tonic-gate /* 11550Sstevel@tonic-gate * First we need to figure out how many CPUs are configured in the 11560Sstevel@tonic-gate * system to know how much to slurp out. 11570Sstevel@tonic-gate */ 11580Sstevel@tonic-gate mdb_readvar(&ncpus, "max_ncpus"); 11590Sstevel@tonic-gate 11600Sstevel@tonic-gate csize = KMEM_CACHE_SIZE(ncpus); 11610Sstevel@tonic-gate cp = mdb_alloc(csize, UM_SLEEP); 11620Sstevel@tonic-gate 11630Sstevel@tonic-gate if (mdb_vread(cp, csize, addr) == -1) { 11640Sstevel@tonic-gate mdb_warn("couldn't read cache at addr %p", addr); 11650Sstevel@tonic-gate goto out2; 11660Sstevel@tonic-gate } 11670Sstevel@tonic-gate 11681528Sjwadams /* 11691528Sjwadams * It's easy for someone to hand us an invalid cache address. 11701528Sjwadams * Unfortunately, it is hard for this walker to survive an 11711528Sjwadams * invalid cache cleanly. So we make sure that: 11721528Sjwadams * 11731528Sjwadams * 1. the vmem arena for the cache is readable, 11741528Sjwadams * 2. the vmem arena's quantum is a power of 2, 11751528Sjwadams * 3. our slabsize is a multiple of the quantum, and 11761528Sjwadams * 4. our chunksize is >0 and less than our slabsize. 11771528Sjwadams */ 11781528Sjwadams if (mdb_vread(&vm_quantum, sizeof (vm_quantum), 11791528Sjwadams (uintptr_t)&cp->cache_arena->vm_quantum) == -1 || 11801528Sjwadams vm_quantum == 0 || 11811528Sjwadams (vm_quantum & (vm_quantum - 1)) != 0 || 11821528Sjwadams cp->cache_slabsize < vm_quantum || 11831528Sjwadams P2PHASE(cp->cache_slabsize, vm_quantum) != 0 || 11841528Sjwadams cp->cache_chunksize == 0 || 11851528Sjwadams cp->cache_chunksize > cp->cache_slabsize) { 11861528Sjwadams mdb_warn("%p is not a valid kmem_cache_t\n", addr); 11871528Sjwadams goto out2; 11881528Sjwadams } 11891528Sjwadams 11900Sstevel@tonic-gate dprintf(("buf total is %d\n", cp->cache_buftotal)); 11910Sstevel@tonic-gate 11920Sstevel@tonic-gate if (cp->cache_buftotal == 0) { 11930Sstevel@tonic-gate mdb_free(cp, csize); 11940Sstevel@tonic-gate return (WALK_DONE); 11950Sstevel@tonic-gate } 11960Sstevel@tonic-gate 11970Sstevel@tonic-gate /* 11980Sstevel@tonic-gate * If they ask for bufctls, but it's a small-slab cache, 11990Sstevel@tonic-gate * there is nothing to report. 12000Sstevel@tonic-gate */ 12010Sstevel@tonic-gate if ((type & KM_BUFCTL) && !(cp->cache_flags & KMF_HASH)) { 12020Sstevel@tonic-gate dprintf(("bufctl requested, not KMF_HASH (flags: %p)\n", 12030Sstevel@tonic-gate cp->cache_flags)); 12040Sstevel@tonic-gate mdb_free(cp, csize); 12050Sstevel@tonic-gate return (WALK_DONE); 12060Sstevel@tonic-gate } 12070Sstevel@tonic-gate 12080Sstevel@tonic-gate /* 12090Sstevel@tonic-gate * If they want constructed buffers, but there's no constructor or 12100Sstevel@tonic-gate * the cache has DEADBEEF checking enabled, there is nothing to report. 12110Sstevel@tonic-gate */ 12120Sstevel@tonic-gate if ((type & KM_CONSTRUCTED) && (!(type & KM_FREE) || 12130Sstevel@tonic-gate cp->cache_constructor == NULL || 12140Sstevel@tonic-gate (cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) == KMF_DEADBEEF)) { 12150Sstevel@tonic-gate mdb_free(cp, csize); 12160Sstevel@tonic-gate return (WALK_DONE); 12170Sstevel@tonic-gate } 12180Sstevel@tonic-gate 12190Sstevel@tonic-gate /* 12200Sstevel@tonic-gate * Read in the contents of the magazine layer 12210Sstevel@tonic-gate */ 12220Sstevel@tonic-gate if (kmem_read_magazines(cp, addr, ncpus, &maglist, &magcnt, 12230Sstevel@tonic-gate &magmax, UM_SLEEP) == WALK_ERR) 12240Sstevel@tonic-gate goto out2; 12250Sstevel@tonic-gate 12260Sstevel@tonic-gate /* 12270Sstevel@tonic-gate * We have all of the buffers from the magazines; if we are walking 12280Sstevel@tonic-gate * allocated buffers, sort them so we can bsearch them later. 12290Sstevel@tonic-gate */ 12300Sstevel@tonic-gate if (type & KM_ALLOCATED) 12310Sstevel@tonic-gate qsort(maglist, magcnt, sizeof (void *), addrcmp); 12320Sstevel@tonic-gate 12330Sstevel@tonic-gate wsp->walk_data = kmw = mdb_zalloc(sizeof (kmem_walk_t), UM_SLEEP); 12340Sstevel@tonic-gate 12350Sstevel@tonic-gate kmw->kmw_type = type; 12360Sstevel@tonic-gate kmw->kmw_addr = addr; 12370Sstevel@tonic-gate kmw->kmw_cp = cp; 12380Sstevel@tonic-gate kmw->kmw_csize = csize; 12390Sstevel@tonic-gate kmw->kmw_maglist = maglist; 12400Sstevel@tonic-gate kmw->kmw_max = magmax; 12410Sstevel@tonic-gate kmw->kmw_count = magcnt; 12420Sstevel@tonic-gate kmw->kmw_pos = 0; 12430Sstevel@tonic-gate 12440Sstevel@tonic-gate /* 12450Sstevel@tonic-gate * When walking allocated buffers in a KMF_HASH cache, we walk the 12460Sstevel@tonic-gate * hash table instead of the slab layer. 12470Sstevel@tonic-gate */ 12480Sstevel@tonic-gate if ((cp->cache_flags & KMF_HASH) && (type & KM_ALLOCATED)) { 12490Sstevel@tonic-gate layered = "kmem_hash"; 12500Sstevel@tonic-gate 12510Sstevel@tonic-gate kmw->kmw_type |= KM_HASH; 12520Sstevel@tonic-gate } else { 12530Sstevel@tonic-gate /* 12540Sstevel@tonic-gate * If we are walking freed buffers, we only need the 12550Sstevel@tonic-gate * magazine layer plus the partially allocated slabs. 12560Sstevel@tonic-gate * To walk allocated buffers, we need all of the slabs. 12570Sstevel@tonic-gate */ 12580Sstevel@tonic-gate if (type & KM_ALLOCATED) 12590Sstevel@tonic-gate layered = "kmem_slab"; 12600Sstevel@tonic-gate else 12610Sstevel@tonic-gate layered = "kmem_slab_partial"; 12620Sstevel@tonic-gate 12630Sstevel@tonic-gate /* 12640Sstevel@tonic-gate * for small-slab caches, we read in the entire slab. For 12650Sstevel@tonic-gate * freed buffers, we can just walk the freelist. For 12660Sstevel@tonic-gate * allocated buffers, we use a 'valid' array to track 12670Sstevel@tonic-gate * the freed buffers. 12680Sstevel@tonic-gate */ 12690Sstevel@tonic-gate if (!(cp->cache_flags & KMF_HASH)) { 12700Sstevel@tonic-gate chunksize = cp->cache_chunksize; 12710Sstevel@tonic-gate slabsize = cp->cache_slabsize; 12720Sstevel@tonic-gate 12730Sstevel@tonic-gate kmw->kmw_ubase = mdb_alloc(slabsize + 12740Sstevel@tonic-gate sizeof (kmem_bufctl_t), UM_SLEEP); 12750Sstevel@tonic-gate 12760Sstevel@tonic-gate if (type & KM_ALLOCATED) 12770Sstevel@tonic-gate kmw->kmw_valid = 12780Sstevel@tonic-gate mdb_alloc(slabsize / chunksize, UM_SLEEP); 12790Sstevel@tonic-gate } 12800Sstevel@tonic-gate } 12810Sstevel@tonic-gate 12820Sstevel@tonic-gate status = WALK_NEXT; 12830Sstevel@tonic-gate 12840Sstevel@tonic-gate if (mdb_layered_walk(layered, wsp) == -1) { 12850Sstevel@tonic-gate mdb_warn("unable to start layered '%s' walk", layered); 12860Sstevel@tonic-gate status = WALK_ERR; 12870Sstevel@tonic-gate } 12880Sstevel@tonic-gate 12890Sstevel@tonic-gate out1: 12900Sstevel@tonic-gate if (status == WALK_ERR) { 12910Sstevel@tonic-gate if (kmw->kmw_valid) 12920Sstevel@tonic-gate mdb_free(kmw->kmw_valid, slabsize / chunksize); 12930Sstevel@tonic-gate 12940Sstevel@tonic-gate if (kmw->kmw_ubase) 12950Sstevel@tonic-gate mdb_free(kmw->kmw_ubase, slabsize + 12960Sstevel@tonic-gate sizeof (kmem_bufctl_t)); 12970Sstevel@tonic-gate 12981528Sjwadams if (kmw->kmw_maglist) 12991528Sjwadams mdb_free(kmw->kmw_maglist, 13001528Sjwadams kmw->kmw_max * sizeof (uintptr_t)); 13011528Sjwadams 13020Sstevel@tonic-gate mdb_free(kmw, sizeof (kmem_walk_t)); 13030Sstevel@tonic-gate wsp->walk_data = NULL; 13040Sstevel@tonic-gate } 13050Sstevel@tonic-gate 13060Sstevel@tonic-gate out2: 13070Sstevel@tonic-gate if (status == WALK_ERR) 13080Sstevel@tonic-gate mdb_free(cp, csize); 13090Sstevel@tonic-gate 13100Sstevel@tonic-gate return (status); 13110Sstevel@tonic-gate } 13120Sstevel@tonic-gate 13130Sstevel@tonic-gate int 13140Sstevel@tonic-gate kmem_walk_step(mdb_walk_state_t *wsp) 13150Sstevel@tonic-gate { 13160Sstevel@tonic-gate kmem_walk_t *kmw = wsp->walk_data; 13170Sstevel@tonic-gate int type = kmw->kmw_type; 13180Sstevel@tonic-gate kmem_cache_t *cp = kmw->kmw_cp; 13190Sstevel@tonic-gate 13200Sstevel@tonic-gate void **maglist = kmw->kmw_maglist; 13210Sstevel@tonic-gate int magcnt = kmw->kmw_count; 13220Sstevel@tonic-gate 13230Sstevel@tonic-gate uintptr_t chunksize, slabsize; 13240Sstevel@tonic-gate uintptr_t addr; 13250Sstevel@tonic-gate const kmem_slab_t *sp; 13260Sstevel@tonic-gate const kmem_bufctl_t *bcp; 13270Sstevel@tonic-gate kmem_bufctl_t bc; 13280Sstevel@tonic-gate 13290Sstevel@tonic-gate int chunks; 13300Sstevel@tonic-gate char *kbase; 13310Sstevel@tonic-gate void *buf; 13320Sstevel@tonic-gate int i, ret; 13330Sstevel@tonic-gate 13340Sstevel@tonic-gate char *valid, *ubase; 13350Sstevel@tonic-gate 13360Sstevel@tonic-gate /* 13370Sstevel@tonic-gate * first, handle the 'kmem_hash' layered walk case 13380Sstevel@tonic-gate */ 13390Sstevel@tonic-gate if (type & KM_HASH) { 13400Sstevel@tonic-gate /* 13410Sstevel@tonic-gate * We have a buffer which has been allocated out of the 13420Sstevel@tonic-gate * global layer. We need to make sure that it's not 13430Sstevel@tonic-gate * actually sitting in a magazine before we report it as 13440Sstevel@tonic-gate * an allocated buffer. 13450Sstevel@tonic-gate */ 13460Sstevel@tonic-gate buf = ((const kmem_bufctl_t *)wsp->walk_layer)->bc_addr; 13470Sstevel@tonic-gate 13480Sstevel@tonic-gate if (magcnt > 0 && 13490Sstevel@tonic-gate bsearch(&buf, maglist, magcnt, sizeof (void *), 13500Sstevel@tonic-gate addrcmp) != NULL) 13510Sstevel@tonic-gate return (WALK_NEXT); 13520Sstevel@tonic-gate 13530Sstevel@tonic-gate if (type & KM_BUFCTL) 13540Sstevel@tonic-gate return (bufctl_walk_callback(cp, wsp, wsp->walk_addr)); 13550Sstevel@tonic-gate 13560Sstevel@tonic-gate return (kmem_walk_callback(wsp, (uintptr_t)buf)); 13570Sstevel@tonic-gate } 13580Sstevel@tonic-gate 13590Sstevel@tonic-gate ret = WALK_NEXT; 13600Sstevel@tonic-gate 13610Sstevel@tonic-gate addr = kmw->kmw_addr; 13620Sstevel@tonic-gate 13630Sstevel@tonic-gate /* 13640Sstevel@tonic-gate * If we're walking freed buffers, report everything in the 13650Sstevel@tonic-gate * magazine layer before processing the first slab. 13660Sstevel@tonic-gate */ 13670Sstevel@tonic-gate if ((type & KM_FREE) && magcnt != 0) { 13680Sstevel@tonic-gate kmw->kmw_count = 0; /* only do this once */ 13690Sstevel@tonic-gate for (i = 0; i < magcnt; i++) { 13700Sstevel@tonic-gate buf = maglist[i]; 13710Sstevel@tonic-gate 13720Sstevel@tonic-gate if (type & KM_BUFCTL) { 13730Sstevel@tonic-gate uintptr_t out; 13740Sstevel@tonic-gate 13750Sstevel@tonic-gate if (cp->cache_flags & KMF_BUFTAG) { 13760Sstevel@tonic-gate kmem_buftag_t *btp; 13770Sstevel@tonic-gate kmem_buftag_t tag; 13780Sstevel@tonic-gate 13790Sstevel@tonic-gate /* LINTED - alignment */ 13800Sstevel@tonic-gate btp = KMEM_BUFTAG(cp, buf); 13810Sstevel@tonic-gate if (mdb_vread(&tag, sizeof (tag), 13820Sstevel@tonic-gate (uintptr_t)btp) == -1) { 13830Sstevel@tonic-gate mdb_warn("reading buftag for " 13840Sstevel@tonic-gate "%p at %p", buf, btp); 13850Sstevel@tonic-gate continue; 13860Sstevel@tonic-gate } 13870Sstevel@tonic-gate out = (uintptr_t)tag.bt_bufctl; 13880Sstevel@tonic-gate } else { 13890Sstevel@tonic-gate if (kmem_hash_lookup(cp, addr, buf, 13900Sstevel@tonic-gate &out) == -1) 13910Sstevel@tonic-gate continue; 13920Sstevel@tonic-gate } 13930Sstevel@tonic-gate ret = bufctl_walk_callback(cp, wsp, out); 13940Sstevel@tonic-gate } else { 13950Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf); 13960Sstevel@tonic-gate } 13970Sstevel@tonic-gate 13980Sstevel@tonic-gate if (ret != WALK_NEXT) 13990Sstevel@tonic-gate return (ret); 14000Sstevel@tonic-gate } 14010Sstevel@tonic-gate } 14020Sstevel@tonic-gate 14030Sstevel@tonic-gate /* 14040Sstevel@tonic-gate * If they want constructed buffers, we're finished, since the 14050Sstevel@tonic-gate * magazine layer holds them all. 14060Sstevel@tonic-gate */ 14070Sstevel@tonic-gate if (type & KM_CONSTRUCTED) 14080Sstevel@tonic-gate return (WALK_DONE); 14090Sstevel@tonic-gate 14100Sstevel@tonic-gate /* 14110Sstevel@tonic-gate * Handle the buffers in the current slab 14120Sstevel@tonic-gate */ 14130Sstevel@tonic-gate chunksize = cp->cache_chunksize; 14140Sstevel@tonic-gate slabsize = cp->cache_slabsize; 14150Sstevel@tonic-gate 14160Sstevel@tonic-gate sp = wsp->walk_layer; 14170Sstevel@tonic-gate chunks = sp->slab_chunks; 14180Sstevel@tonic-gate kbase = sp->slab_base; 14190Sstevel@tonic-gate 14200Sstevel@tonic-gate dprintf(("kbase is %p\n", kbase)); 14210Sstevel@tonic-gate 14220Sstevel@tonic-gate if (!(cp->cache_flags & KMF_HASH)) { 14230Sstevel@tonic-gate valid = kmw->kmw_valid; 14240Sstevel@tonic-gate ubase = kmw->kmw_ubase; 14250Sstevel@tonic-gate 14260Sstevel@tonic-gate if (mdb_vread(ubase, chunks * chunksize, 14270Sstevel@tonic-gate (uintptr_t)kbase) == -1) { 14280Sstevel@tonic-gate mdb_warn("failed to read slab contents at %p", kbase); 14290Sstevel@tonic-gate return (WALK_ERR); 14300Sstevel@tonic-gate } 14310Sstevel@tonic-gate 14320Sstevel@tonic-gate /* 14330Sstevel@tonic-gate * Set up the valid map as fully allocated -- we'll punch 14340Sstevel@tonic-gate * out the freelist. 14350Sstevel@tonic-gate */ 14360Sstevel@tonic-gate if (type & KM_ALLOCATED) 14370Sstevel@tonic-gate (void) memset(valid, 1, chunks); 14380Sstevel@tonic-gate } else { 14390Sstevel@tonic-gate valid = NULL; 14400Sstevel@tonic-gate ubase = NULL; 14410Sstevel@tonic-gate } 14420Sstevel@tonic-gate 14430Sstevel@tonic-gate /* 14440Sstevel@tonic-gate * walk the slab's freelist 14450Sstevel@tonic-gate */ 14460Sstevel@tonic-gate bcp = sp->slab_head; 14470Sstevel@tonic-gate 14480Sstevel@tonic-gate dprintf(("refcnt is %d; chunks is %d\n", sp->slab_refcnt, chunks)); 14490Sstevel@tonic-gate 14500Sstevel@tonic-gate /* 14510Sstevel@tonic-gate * since we could be in the middle of allocating a buffer, 14520Sstevel@tonic-gate * our refcnt could be one higher than it aught. So we 14530Sstevel@tonic-gate * check one further on the freelist than the count allows. 14540Sstevel@tonic-gate */ 14550Sstevel@tonic-gate for (i = sp->slab_refcnt; i <= chunks; i++) { 14560Sstevel@tonic-gate uint_t ndx; 14570Sstevel@tonic-gate 14580Sstevel@tonic-gate dprintf(("bcp is %p\n", bcp)); 14590Sstevel@tonic-gate 14600Sstevel@tonic-gate if (bcp == NULL) { 14610Sstevel@tonic-gate if (i == chunks) 14620Sstevel@tonic-gate break; 14630Sstevel@tonic-gate mdb_warn( 14640Sstevel@tonic-gate "slab %p in cache %p freelist too short by %d\n", 14650Sstevel@tonic-gate sp, addr, chunks - i); 14660Sstevel@tonic-gate break; 14670Sstevel@tonic-gate } 14680Sstevel@tonic-gate 14690Sstevel@tonic-gate if (cp->cache_flags & KMF_HASH) { 14700Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), (uintptr_t)bcp) == -1) { 14710Sstevel@tonic-gate mdb_warn("failed to read bufctl ptr at %p", 14720Sstevel@tonic-gate bcp); 14730Sstevel@tonic-gate break; 14740Sstevel@tonic-gate } 14750Sstevel@tonic-gate buf = bc.bc_addr; 14760Sstevel@tonic-gate } else { 14770Sstevel@tonic-gate /* 14780Sstevel@tonic-gate * Otherwise the buffer is in the slab which 14790Sstevel@tonic-gate * we've read in; we just need to determine 14800Sstevel@tonic-gate * its offset in the slab to find the 14810Sstevel@tonic-gate * kmem_bufctl_t. 14820Sstevel@tonic-gate */ 14830Sstevel@tonic-gate bc = *((kmem_bufctl_t *) 14840Sstevel@tonic-gate ((uintptr_t)bcp - (uintptr_t)kbase + 14850Sstevel@tonic-gate (uintptr_t)ubase)); 14860Sstevel@tonic-gate 14870Sstevel@tonic-gate buf = KMEM_BUF(cp, bcp); 14880Sstevel@tonic-gate } 14890Sstevel@tonic-gate 14900Sstevel@tonic-gate ndx = ((uintptr_t)buf - (uintptr_t)kbase) / chunksize; 14910Sstevel@tonic-gate 14920Sstevel@tonic-gate if (ndx > slabsize / cp->cache_bufsize) { 14930Sstevel@tonic-gate /* 14940Sstevel@tonic-gate * This is very wrong; we have managed to find 14950Sstevel@tonic-gate * a buffer in the slab which shouldn't 14960Sstevel@tonic-gate * actually be here. Emit a warning, and 14970Sstevel@tonic-gate * try to continue. 14980Sstevel@tonic-gate */ 14990Sstevel@tonic-gate mdb_warn("buf %p is out of range for " 15000Sstevel@tonic-gate "slab %p, cache %p\n", buf, sp, addr); 15010Sstevel@tonic-gate } else if (type & KM_ALLOCATED) { 15020Sstevel@tonic-gate /* 15030Sstevel@tonic-gate * we have found a buffer on the slab's freelist; 15040Sstevel@tonic-gate * clear its entry 15050Sstevel@tonic-gate */ 15060Sstevel@tonic-gate valid[ndx] = 0; 15070Sstevel@tonic-gate } else { 15080Sstevel@tonic-gate /* 15090Sstevel@tonic-gate * Report this freed buffer 15100Sstevel@tonic-gate */ 15110Sstevel@tonic-gate if (type & KM_BUFCTL) { 15120Sstevel@tonic-gate ret = bufctl_walk_callback(cp, wsp, 15130Sstevel@tonic-gate (uintptr_t)bcp); 15140Sstevel@tonic-gate } else { 15150Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf); 15160Sstevel@tonic-gate } 15170Sstevel@tonic-gate if (ret != WALK_NEXT) 15180Sstevel@tonic-gate return (ret); 15190Sstevel@tonic-gate } 15200Sstevel@tonic-gate 15210Sstevel@tonic-gate bcp = bc.bc_next; 15220Sstevel@tonic-gate } 15230Sstevel@tonic-gate 15240Sstevel@tonic-gate if (bcp != NULL) { 15250Sstevel@tonic-gate dprintf(("slab %p in cache %p freelist too long (%p)\n", 15260Sstevel@tonic-gate sp, addr, bcp)); 15270Sstevel@tonic-gate } 15280Sstevel@tonic-gate 15290Sstevel@tonic-gate /* 15300Sstevel@tonic-gate * If we are walking freed buffers, the loop above handled reporting 15310Sstevel@tonic-gate * them. 15320Sstevel@tonic-gate */ 15330Sstevel@tonic-gate if (type & KM_FREE) 15340Sstevel@tonic-gate return (WALK_NEXT); 15350Sstevel@tonic-gate 15360Sstevel@tonic-gate if (type & KM_BUFCTL) { 15370Sstevel@tonic-gate mdb_warn("impossible situation: small-slab KM_BUFCTL walk for " 15380Sstevel@tonic-gate "cache %p\n", addr); 15390Sstevel@tonic-gate return (WALK_ERR); 15400Sstevel@tonic-gate } 15410Sstevel@tonic-gate 15420Sstevel@tonic-gate /* 15430Sstevel@tonic-gate * Report allocated buffers, skipping buffers in the magazine layer. 15440Sstevel@tonic-gate * We only get this far for small-slab caches. 15450Sstevel@tonic-gate */ 15460Sstevel@tonic-gate for (i = 0; ret == WALK_NEXT && i < chunks; i++) { 15470Sstevel@tonic-gate buf = (char *)kbase + i * chunksize; 15480Sstevel@tonic-gate 15490Sstevel@tonic-gate if (!valid[i]) 15500Sstevel@tonic-gate continue; /* on slab freelist */ 15510Sstevel@tonic-gate 15520Sstevel@tonic-gate if (magcnt > 0 && 15530Sstevel@tonic-gate bsearch(&buf, maglist, magcnt, sizeof (void *), 15540Sstevel@tonic-gate addrcmp) != NULL) 15550Sstevel@tonic-gate continue; /* in magazine layer */ 15560Sstevel@tonic-gate 15570Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf); 15580Sstevel@tonic-gate } 15590Sstevel@tonic-gate return (ret); 15600Sstevel@tonic-gate } 15610Sstevel@tonic-gate 15620Sstevel@tonic-gate void 15630Sstevel@tonic-gate kmem_walk_fini(mdb_walk_state_t *wsp) 15640Sstevel@tonic-gate { 15650Sstevel@tonic-gate kmem_walk_t *kmw = wsp->walk_data; 15660Sstevel@tonic-gate uintptr_t chunksize; 15670Sstevel@tonic-gate uintptr_t slabsize; 15680Sstevel@tonic-gate 15690Sstevel@tonic-gate if (kmw == NULL) 15700Sstevel@tonic-gate return; 15710Sstevel@tonic-gate 15720Sstevel@tonic-gate if (kmw->kmw_maglist != NULL) 15730Sstevel@tonic-gate mdb_free(kmw->kmw_maglist, kmw->kmw_max * sizeof (void *)); 15740Sstevel@tonic-gate 15750Sstevel@tonic-gate chunksize = kmw->kmw_cp->cache_chunksize; 15760Sstevel@tonic-gate slabsize = kmw->kmw_cp->cache_slabsize; 15770Sstevel@tonic-gate 15780Sstevel@tonic-gate if (kmw->kmw_valid != NULL) 15790Sstevel@tonic-gate mdb_free(kmw->kmw_valid, slabsize / chunksize); 15800Sstevel@tonic-gate if (kmw->kmw_ubase != NULL) 15810Sstevel@tonic-gate mdb_free(kmw->kmw_ubase, slabsize + sizeof (kmem_bufctl_t)); 15820Sstevel@tonic-gate 15830Sstevel@tonic-gate mdb_free(kmw->kmw_cp, kmw->kmw_csize); 15840Sstevel@tonic-gate mdb_free(kmw, sizeof (kmem_walk_t)); 15850Sstevel@tonic-gate } 15860Sstevel@tonic-gate 15870Sstevel@tonic-gate /*ARGSUSED*/ 15880Sstevel@tonic-gate static int 15890Sstevel@tonic-gate kmem_walk_all(uintptr_t addr, const kmem_cache_t *c, mdb_walk_state_t *wsp) 15900Sstevel@tonic-gate { 15910Sstevel@tonic-gate /* 15920Sstevel@tonic-gate * Buffers allocated from NOTOUCH caches can also show up as freed 15930Sstevel@tonic-gate * memory in other caches. This can be a little confusing, so we 15940Sstevel@tonic-gate * don't walk NOTOUCH caches when walking all caches (thereby assuring 15950Sstevel@tonic-gate * that "::walk kmem" and "::walk freemem" yield disjoint output). 15960Sstevel@tonic-gate */ 15970Sstevel@tonic-gate if (c->cache_cflags & KMC_NOTOUCH) 15980Sstevel@tonic-gate return (WALK_NEXT); 15990Sstevel@tonic-gate 16000Sstevel@tonic-gate if (mdb_pwalk(wsp->walk_data, wsp->walk_callback, 16010Sstevel@tonic-gate wsp->walk_cbdata, addr) == -1) 16020Sstevel@tonic-gate return (WALK_DONE); 16030Sstevel@tonic-gate 16040Sstevel@tonic-gate return (WALK_NEXT); 16050Sstevel@tonic-gate } 16060Sstevel@tonic-gate 16070Sstevel@tonic-gate #define KMEM_WALK_ALL(name, wsp) { \ 16080Sstevel@tonic-gate wsp->walk_data = (name); \ 16090Sstevel@tonic-gate if (mdb_walk("kmem_cache", (mdb_walk_cb_t)kmem_walk_all, wsp) == -1) \ 16100Sstevel@tonic-gate return (WALK_ERR); \ 16110Sstevel@tonic-gate return (WALK_DONE); \ 16120Sstevel@tonic-gate } 16130Sstevel@tonic-gate 16140Sstevel@tonic-gate int 16150Sstevel@tonic-gate kmem_walk_init(mdb_walk_state_t *wsp) 16160Sstevel@tonic-gate { 16170Sstevel@tonic-gate if (wsp->walk_arg != NULL) 16180Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)wsp->walk_arg; 16190Sstevel@tonic-gate 16200Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16210Sstevel@tonic-gate KMEM_WALK_ALL("kmem", wsp); 16220Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_ALLOCATED)); 16230Sstevel@tonic-gate } 16240Sstevel@tonic-gate 16250Sstevel@tonic-gate int 16260Sstevel@tonic-gate bufctl_walk_init(mdb_walk_state_t *wsp) 16270Sstevel@tonic-gate { 16280Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16290Sstevel@tonic-gate KMEM_WALK_ALL("bufctl", wsp); 16300Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_ALLOCATED | KM_BUFCTL)); 16310Sstevel@tonic-gate } 16320Sstevel@tonic-gate 16330Sstevel@tonic-gate int 16340Sstevel@tonic-gate freemem_walk_init(mdb_walk_state_t *wsp) 16350Sstevel@tonic-gate { 16360Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16370Sstevel@tonic-gate KMEM_WALK_ALL("freemem", wsp); 16380Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE)); 16390Sstevel@tonic-gate } 16400Sstevel@tonic-gate 16410Sstevel@tonic-gate int 16420Sstevel@tonic-gate freemem_constructed_walk_init(mdb_walk_state_t *wsp) 16430Sstevel@tonic-gate { 16440Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16450Sstevel@tonic-gate KMEM_WALK_ALL("freemem_constructed", wsp); 16460Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE | KM_CONSTRUCTED)); 16470Sstevel@tonic-gate } 16480Sstevel@tonic-gate 16490Sstevel@tonic-gate int 16500Sstevel@tonic-gate freectl_walk_init(mdb_walk_state_t *wsp) 16510Sstevel@tonic-gate { 16520Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16530Sstevel@tonic-gate KMEM_WALK_ALL("freectl", wsp); 16540Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE | KM_BUFCTL)); 16550Sstevel@tonic-gate } 16560Sstevel@tonic-gate 16570Sstevel@tonic-gate int 16580Sstevel@tonic-gate freectl_constructed_walk_init(mdb_walk_state_t *wsp) 16590Sstevel@tonic-gate { 16600Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16610Sstevel@tonic-gate KMEM_WALK_ALL("freectl_constructed", wsp); 16620Sstevel@tonic-gate return (kmem_walk_init_common(wsp, 16630Sstevel@tonic-gate KM_FREE | KM_BUFCTL | KM_CONSTRUCTED)); 16640Sstevel@tonic-gate } 16650Sstevel@tonic-gate 16660Sstevel@tonic-gate typedef struct bufctl_history_walk { 16670Sstevel@tonic-gate void *bhw_next; 16680Sstevel@tonic-gate kmem_cache_t *bhw_cache; 16690Sstevel@tonic-gate kmem_slab_t *bhw_slab; 16700Sstevel@tonic-gate hrtime_t bhw_timestamp; 16710Sstevel@tonic-gate } bufctl_history_walk_t; 16720Sstevel@tonic-gate 16730Sstevel@tonic-gate int 16740Sstevel@tonic-gate bufctl_history_walk_init(mdb_walk_state_t *wsp) 16750Sstevel@tonic-gate { 16760Sstevel@tonic-gate bufctl_history_walk_t *bhw; 16770Sstevel@tonic-gate kmem_bufctl_audit_t bc; 16780Sstevel@tonic-gate kmem_bufctl_audit_t bcn; 16790Sstevel@tonic-gate 16800Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 16810Sstevel@tonic-gate mdb_warn("bufctl_history walk doesn't support global walks\n"); 16820Sstevel@tonic-gate return (WALK_ERR); 16830Sstevel@tonic-gate } 16840Sstevel@tonic-gate 16850Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), wsp->walk_addr) == -1) { 16860Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", wsp->walk_addr); 16870Sstevel@tonic-gate return (WALK_ERR); 16880Sstevel@tonic-gate } 16890Sstevel@tonic-gate 16900Sstevel@tonic-gate bhw = mdb_zalloc(sizeof (*bhw), UM_SLEEP); 16910Sstevel@tonic-gate bhw->bhw_timestamp = 0; 16920Sstevel@tonic-gate bhw->bhw_cache = bc.bc_cache; 16930Sstevel@tonic-gate bhw->bhw_slab = bc.bc_slab; 16940Sstevel@tonic-gate 16950Sstevel@tonic-gate /* 16960Sstevel@tonic-gate * sometimes the first log entry matches the base bufctl; in that 16970Sstevel@tonic-gate * case, skip the base bufctl. 16980Sstevel@tonic-gate */ 16990Sstevel@tonic-gate if (bc.bc_lastlog != NULL && 17000Sstevel@tonic-gate mdb_vread(&bcn, sizeof (bcn), (uintptr_t)bc.bc_lastlog) != -1 && 17010Sstevel@tonic-gate bc.bc_addr == bcn.bc_addr && 17020Sstevel@tonic-gate bc.bc_cache == bcn.bc_cache && 17030Sstevel@tonic-gate bc.bc_slab == bcn.bc_slab && 17040Sstevel@tonic-gate bc.bc_timestamp == bcn.bc_timestamp && 17050Sstevel@tonic-gate bc.bc_thread == bcn.bc_thread) 17060Sstevel@tonic-gate bhw->bhw_next = bc.bc_lastlog; 17070Sstevel@tonic-gate else 17080Sstevel@tonic-gate bhw->bhw_next = (void *)wsp->walk_addr; 17090Sstevel@tonic-gate 17100Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)bc.bc_addr; 17110Sstevel@tonic-gate wsp->walk_data = bhw; 17120Sstevel@tonic-gate 17130Sstevel@tonic-gate return (WALK_NEXT); 17140Sstevel@tonic-gate } 17150Sstevel@tonic-gate 17160Sstevel@tonic-gate int 17170Sstevel@tonic-gate bufctl_history_walk_step(mdb_walk_state_t *wsp) 17180Sstevel@tonic-gate { 17190Sstevel@tonic-gate bufctl_history_walk_t *bhw = wsp->walk_data; 17200Sstevel@tonic-gate uintptr_t addr = (uintptr_t)bhw->bhw_next; 17210Sstevel@tonic-gate uintptr_t baseaddr = wsp->walk_addr; 17220Sstevel@tonic-gate kmem_bufctl_audit_t bc; 17230Sstevel@tonic-gate 17240Sstevel@tonic-gate if (addr == NULL) 17250Sstevel@tonic-gate return (WALK_DONE); 17260Sstevel@tonic-gate 17270Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) { 17280Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", bhw->bhw_next); 17290Sstevel@tonic-gate return (WALK_ERR); 17300Sstevel@tonic-gate } 17310Sstevel@tonic-gate 17320Sstevel@tonic-gate /* 17330Sstevel@tonic-gate * The bufctl is only valid if the address, cache, and slab are 17340Sstevel@tonic-gate * correct. We also check that the timestamp is decreasing, to 17350Sstevel@tonic-gate * prevent infinite loops. 17360Sstevel@tonic-gate */ 17370Sstevel@tonic-gate if ((uintptr_t)bc.bc_addr != baseaddr || 17380Sstevel@tonic-gate bc.bc_cache != bhw->bhw_cache || 17390Sstevel@tonic-gate bc.bc_slab != bhw->bhw_slab || 17400Sstevel@tonic-gate (bhw->bhw_timestamp != 0 && bc.bc_timestamp >= bhw->bhw_timestamp)) 17410Sstevel@tonic-gate return (WALK_DONE); 17420Sstevel@tonic-gate 17430Sstevel@tonic-gate bhw->bhw_next = bc.bc_lastlog; 17440Sstevel@tonic-gate bhw->bhw_timestamp = bc.bc_timestamp; 17450Sstevel@tonic-gate 17460Sstevel@tonic-gate return (wsp->walk_callback(addr, &bc, wsp->walk_cbdata)); 17470Sstevel@tonic-gate } 17480Sstevel@tonic-gate 17490Sstevel@tonic-gate void 17500Sstevel@tonic-gate bufctl_history_walk_fini(mdb_walk_state_t *wsp) 17510Sstevel@tonic-gate { 17520Sstevel@tonic-gate bufctl_history_walk_t *bhw = wsp->walk_data; 17530Sstevel@tonic-gate 17540Sstevel@tonic-gate mdb_free(bhw, sizeof (*bhw)); 17550Sstevel@tonic-gate } 17560Sstevel@tonic-gate 17570Sstevel@tonic-gate typedef struct kmem_log_walk { 17580Sstevel@tonic-gate kmem_bufctl_audit_t *klw_base; 17590Sstevel@tonic-gate kmem_bufctl_audit_t **klw_sorted; 17600Sstevel@tonic-gate kmem_log_header_t klw_lh; 17610Sstevel@tonic-gate size_t klw_size; 17620Sstevel@tonic-gate size_t klw_maxndx; 17630Sstevel@tonic-gate size_t klw_ndx; 17640Sstevel@tonic-gate } kmem_log_walk_t; 17650Sstevel@tonic-gate 17660Sstevel@tonic-gate int 17670Sstevel@tonic-gate kmem_log_walk_init(mdb_walk_state_t *wsp) 17680Sstevel@tonic-gate { 17690Sstevel@tonic-gate uintptr_t lp = wsp->walk_addr; 17700Sstevel@tonic-gate kmem_log_walk_t *klw; 17710Sstevel@tonic-gate kmem_log_header_t *lhp; 17720Sstevel@tonic-gate int maxndx, i, j, k; 17730Sstevel@tonic-gate 17740Sstevel@tonic-gate /* 17750Sstevel@tonic-gate * By default (global walk), walk the kmem_transaction_log. Otherwise 17760Sstevel@tonic-gate * read the log whose kmem_log_header_t is stored at walk_addr. 17770Sstevel@tonic-gate */ 17780Sstevel@tonic-gate if (lp == NULL && mdb_readvar(&lp, "kmem_transaction_log") == -1) { 17790Sstevel@tonic-gate mdb_warn("failed to read 'kmem_transaction_log'"); 17800Sstevel@tonic-gate return (WALK_ERR); 17810Sstevel@tonic-gate } 17820Sstevel@tonic-gate 17830Sstevel@tonic-gate if (lp == NULL) { 17840Sstevel@tonic-gate mdb_warn("log is disabled\n"); 17850Sstevel@tonic-gate return (WALK_ERR); 17860Sstevel@tonic-gate } 17870Sstevel@tonic-gate 17880Sstevel@tonic-gate klw = mdb_zalloc(sizeof (kmem_log_walk_t), UM_SLEEP); 17890Sstevel@tonic-gate lhp = &klw->klw_lh; 17900Sstevel@tonic-gate 17910Sstevel@tonic-gate if (mdb_vread(lhp, sizeof (kmem_log_header_t), lp) == -1) { 17920Sstevel@tonic-gate mdb_warn("failed to read log header at %p", lp); 17930Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t)); 17940Sstevel@tonic-gate return (WALK_ERR); 17950Sstevel@tonic-gate } 17960Sstevel@tonic-gate 17970Sstevel@tonic-gate klw->klw_size = lhp->lh_chunksize * lhp->lh_nchunks; 17980Sstevel@tonic-gate klw->klw_base = mdb_alloc(klw->klw_size, UM_SLEEP); 17990Sstevel@tonic-gate maxndx = lhp->lh_chunksize / sizeof (kmem_bufctl_audit_t) - 1; 18000Sstevel@tonic-gate 18010Sstevel@tonic-gate if (mdb_vread(klw->klw_base, klw->klw_size, 18020Sstevel@tonic-gate (uintptr_t)lhp->lh_base) == -1) { 18030Sstevel@tonic-gate mdb_warn("failed to read log at base %p", lhp->lh_base); 18040Sstevel@tonic-gate mdb_free(klw->klw_base, klw->klw_size); 18050Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t)); 18060Sstevel@tonic-gate return (WALK_ERR); 18070Sstevel@tonic-gate } 18080Sstevel@tonic-gate 18090Sstevel@tonic-gate klw->klw_sorted = mdb_alloc(maxndx * lhp->lh_nchunks * 18100Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t *), UM_SLEEP); 18110Sstevel@tonic-gate 18120Sstevel@tonic-gate for (i = 0, k = 0; i < lhp->lh_nchunks; i++) { 18130Sstevel@tonic-gate kmem_bufctl_audit_t *chunk = (kmem_bufctl_audit_t *) 18140Sstevel@tonic-gate ((uintptr_t)klw->klw_base + i * lhp->lh_chunksize); 18150Sstevel@tonic-gate 18160Sstevel@tonic-gate for (j = 0; j < maxndx; j++) 18170Sstevel@tonic-gate klw->klw_sorted[k++] = &chunk[j]; 18180Sstevel@tonic-gate } 18190Sstevel@tonic-gate 18200Sstevel@tonic-gate qsort(klw->klw_sorted, k, sizeof (kmem_bufctl_audit_t *), 18210Sstevel@tonic-gate (int(*)(const void *, const void *))bufctlcmp); 18220Sstevel@tonic-gate 18230Sstevel@tonic-gate klw->klw_maxndx = k; 18240Sstevel@tonic-gate wsp->walk_data = klw; 18250Sstevel@tonic-gate 18260Sstevel@tonic-gate return (WALK_NEXT); 18270Sstevel@tonic-gate } 18280Sstevel@tonic-gate 18290Sstevel@tonic-gate int 18300Sstevel@tonic-gate kmem_log_walk_step(mdb_walk_state_t *wsp) 18310Sstevel@tonic-gate { 18320Sstevel@tonic-gate kmem_log_walk_t *klw = wsp->walk_data; 18330Sstevel@tonic-gate kmem_bufctl_audit_t *bcp; 18340Sstevel@tonic-gate 18350Sstevel@tonic-gate if (klw->klw_ndx == klw->klw_maxndx) 18360Sstevel@tonic-gate return (WALK_DONE); 18370Sstevel@tonic-gate 18380Sstevel@tonic-gate bcp = klw->klw_sorted[klw->klw_ndx++]; 18390Sstevel@tonic-gate 18400Sstevel@tonic-gate return (wsp->walk_callback((uintptr_t)bcp - (uintptr_t)klw->klw_base + 18410Sstevel@tonic-gate (uintptr_t)klw->klw_lh.lh_base, bcp, wsp->walk_cbdata)); 18420Sstevel@tonic-gate } 18430Sstevel@tonic-gate 18440Sstevel@tonic-gate void 18450Sstevel@tonic-gate kmem_log_walk_fini(mdb_walk_state_t *wsp) 18460Sstevel@tonic-gate { 18470Sstevel@tonic-gate kmem_log_walk_t *klw = wsp->walk_data; 18480Sstevel@tonic-gate 18490Sstevel@tonic-gate mdb_free(klw->klw_base, klw->klw_size); 18500Sstevel@tonic-gate mdb_free(klw->klw_sorted, klw->klw_maxndx * 18510Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t *)); 18520Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t)); 18530Sstevel@tonic-gate } 18540Sstevel@tonic-gate 18550Sstevel@tonic-gate typedef struct allocdby_bufctl { 18560Sstevel@tonic-gate uintptr_t abb_addr; 18570Sstevel@tonic-gate hrtime_t abb_ts; 18580Sstevel@tonic-gate } allocdby_bufctl_t; 18590Sstevel@tonic-gate 18600Sstevel@tonic-gate typedef struct allocdby_walk { 18610Sstevel@tonic-gate const char *abw_walk; 18620Sstevel@tonic-gate uintptr_t abw_thread; 18630Sstevel@tonic-gate size_t abw_nbufs; 18640Sstevel@tonic-gate size_t abw_size; 18650Sstevel@tonic-gate allocdby_bufctl_t *abw_buf; 18660Sstevel@tonic-gate size_t abw_ndx; 18670Sstevel@tonic-gate } allocdby_walk_t; 18680Sstevel@tonic-gate 18690Sstevel@tonic-gate int 18700Sstevel@tonic-gate allocdby_walk_bufctl(uintptr_t addr, const kmem_bufctl_audit_t *bcp, 18710Sstevel@tonic-gate allocdby_walk_t *abw) 18720Sstevel@tonic-gate { 18730Sstevel@tonic-gate if ((uintptr_t)bcp->bc_thread != abw->abw_thread) 18740Sstevel@tonic-gate return (WALK_NEXT); 18750Sstevel@tonic-gate 18760Sstevel@tonic-gate if (abw->abw_nbufs == abw->abw_size) { 18770Sstevel@tonic-gate allocdby_bufctl_t *buf; 18780Sstevel@tonic-gate size_t oldsize = sizeof (allocdby_bufctl_t) * abw->abw_size; 18790Sstevel@tonic-gate 18800Sstevel@tonic-gate buf = mdb_zalloc(oldsize << 1, UM_SLEEP); 18810Sstevel@tonic-gate 18820Sstevel@tonic-gate bcopy(abw->abw_buf, buf, oldsize); 18830Sstevel@tonic-gate mdb_free(abw->abw_buf, oldsize); 18840Sstevel@tonic-gate 18850Sstevel@tonic-gate abw->abw_size <<= 1; 18860Sstevel@tonic-gate abw->abw_buf = buf; 18870Sstevel@tonic-gate } 18880Sstevel@tonic-gate 18890Sstevel@tonic-gate abw->abw_buf[abw->abw_nbufs].abb_addr = addr; 18900Sstevel@tonic-gate abw->abw_buf[abw->abw_nbufs].abb_ts = bcp->bc_timestamp; 18910Sstevel@tonic-gate abw->abw_nbufs++; 18920Sstevel@tonic-gate 18930Sstevel@tonic-gate return (WALK_NEXT); 18940Sstevel@tonic-gate } 18950Sstevel@tonic-gate 18960Sstevel@tonic-gate /*ARGSUSED*/ 18970Sstevel@tonic-gate int 18980Sstevel@tonic-gate allocdby_walk_cache(uintptr_t addr, const kmem_cache_t *c, allocdby_walk_t *abw) 18990Sstevel@tonic-gate { 19000Sstevel@tonic-gate if (mdb_pwalk(abw->abw_walk, (mdb_walk_cb_t)allocdby_walk_bufctl, 19010Sstevel@tonic-gate abw, addr) == -1) { 19020Sstevel@tonic-gate mdb_warn("couldn't walk bufctl for cache %p", addr); 19030Sstevel@tonic-gate return (WALK_DONE); 19040Sstevel@tonic-gate } 19050Sstevel@tonic-gate 19060Sstevel@tonic-gate return (WALK_NEXT); 19070Sstevel@tonic-gate } 19080Sstevel@tonic-gate 19090Sstevel@tonic-gate static int 19100Sstevel@tonic-gate allocdby_cmp(const allocdby_bufctl_t *lhs, const allocdby_bufctl_t *rhs) 19110Sstevel@tonic-gate { 19120Sstevel@tonic-gate if (lhs->abb_ts < rhs->abb_ts) 19130Sstevel@tonic-gate return (1); 19140Sstevel@tonic-gate if (lhs->abb_ts > rhs->abb_ts) 19150Sstevel@tonic-gate return (-1); 19160Sstevel@tonic-gate return (0); 19170Sstevel@tonic-gate } 19180Sstevel@tonic-gate 19190Sstevel@tonic-gate static int 19200Sstevel@tonic-gate allocdby_walk_init_common(mdb_walk_state_t *wsp, const char *walk) 19210Sstevel@tonic-gate { 19220Sstevel@tonic-gate allocdby_walk_t *abw; 19230Sstevel@tonic-gate 19240Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 19250Sstevel@tonic-gate mdb_warn("allocdby walk doesn't support global walks\n"); 19260Sstevel@tonic-gate return (WALK_ERR); 19270Sstevel@tonic-gate } 19280Sstevel@tonic-gate 19290Sstevel@tonic-gate abw = mdb_zalloc(sizeof (allocdby_walk_t), UM_SLEEP); 19300Sstevel@tonic-gate 19310Sstevel@tonic-gate abw->abw_thread = wsp->walk_addr; 19320Sstevel@tonic-gate abw->abw_walk = walk; 19330Sstevel@tonic-gate abw->abw_size = 128; /* something reasonable */ 19340Sstevel@tonic-gate abw->abw_buf = 19350Sstevel@tonic-gate mdb_zalloc(abw->abw_size * sizeof (allocdby_bufctl_t), UM_SLEEP); 19360Sstevel@tonic-gate 19370Sstevel@tonic-gate wsp->walk_data = abw; 19380Sstevel@tonic-gate 19390Sstevel@tonic-gate if (mdb_walk("kmem_cache", 19400Sstevel@tonic-gate (mdb_walk_cb_t)allocdby_walk_cache, abw) == -1) { 19410Sstevel@tonic-gate mdb_warn("couldn't walk kmem_cache"); 19420Sstevel@tonic-gate allocdby_walk_fini(wsp); 19430Sstevel@tonic-gate return (WALK_ERR); 19440Sstevel@tonic-gate } 19450Sstevel@tonic-gate 19460Sstevel@tonic-gate qsort(abw->abw_buf, abw->abw_nbufs, sizeof (allocdby_bufctl_t), 19470Sstevel@tonic-gate (int(*)(const void *, const void *))allocdby_cmp); 19480Sstevel@tonic-gate 19490Sstevel@tonic-gate return (WALK_NEXT); 19500Sstevel@tonic-gate } 19510Sstevel@tonic-gate 19520Sstevel@tonic-gate int 19530Sstevel@tonic-gate allocdby_walk_init(mdb_walk_state_t *wsp) 19540Sstevel@tonic-gate { 19550Sstevel@tonic-gate return (allocdby_walk_init_common(wsp, "bufctl")); 19560Sstevel@tonic-gate } 19570Sstevel@tonic-gate 19580Sstevel@tonic-gate int 19590Sstevel@tonic-gate freedby_walk_init(mdb_walk_state_t *wsp) 19600Sstevel@tonic-gate { 19610Sstevel@tonic-gate return (allocdby_walk_init_common(wsp, "freectl")); 19620Sstevel@tonic-gate } 19630Sstevel@tonic-gate 19640Sstevel@tonic-gate int 19650Sstevel@tonic-gate allocdby_walk_step(mdb_walk_state_t *wsp) 19660Sstevel@tonic-gate { 19670Sstevel@tonic-gate allocdby_walk_t *abw = wsp->walk_data; 19680Sstevel@tonic-gate kmem_bufctl_audit_t bc; 19690Sstevel@tonic-gate uintptr_t addr; 19700Sstevel@tonic-gate 19710Sstevel@tonic-gate if (abw->abw_ndx == abw->abw_nbufs) 19720Sstevel@tonic-gate return (WALK_DONE); 19730Sstevel@tonic-gate 19740Sstevel@tonic-gate addr = abw->abw_buf[abw->abw_ndx++].abb_addr; 19750Sstevel@tonic-gate 19760Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) { 19770Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr); 19780Sstevel@tonic-gate return (WALK_DONE); 19790Sstevel@tonic-gate } 19800Sstevel@tonic-gate 19810Sstevel@tonic-gate return (wsp->walk_callback(addr, &bc, wsp->walk_cbdata)); 19820Sstevel@tonic-gate } 19830Sstevel@tonic-gate 19840Sstevel@tonic-gate void 19850Sstevel@tonic-gate allocdby_walk_fini(mdb_walk_state_t *wsp) 19860Sstevel@tonic-gate { 19870Sstevel@tonic-gate allocdby_walk_t *abw = wsp->walk_data; 19880Sstevel@tonic-gate 19890Sstevel@tonic-gate mdb_free(abw->abw_buf, sizeof (allocdby_bufctl_t) * abw->abw_size); 19900Sstevel@tonic-gate mdb_free(abw, sizeof (allocdby_walk_t)); 19910Sstevel@tonic-gate } 19920Sstevel@tonic-gate 19930Sstevel@tonic-gate /*ARGSUSED*/ 19940Sstevel@tonic-gate int 19950Sstevel@tonic-gate allocdby_walk(uintptr_t addr, const kmem_bufctl_audit_t *bcp, void *ignored) 19960Sstevel@tonic-gate { 19970Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 19980Sstevel@tonic-gate GElf_Sym sym; 19990Sstevel@tonic-gate int i; 20000Sstevel@tonic-gate 20010Sstevel@tonic-gate mdb_printf("%0?p %12llx ", addr, bcp->bc_timestamp); 20020Sstevel@tonic-gate for (i = 0; i < bcp->bc_depth; i++) { 20030Sstevel@tonic-gate if (mdb_lookup_by_addr(bcp->bc_stack[i], 20040Sstevel@tonic-gate MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1) 20050Sstevel@tonic-gate continue; 20060Sstevel@tonic-gate if (strncmp(c, "kmem_", 5) == 0) 20070Sstevel@tonic-gate continue; 20080Sstevel@tonic-gate mdb_printf("%s+0x%lx", 20090Sstevel@tonic-gate c, bcp->bc_stack[i] - (uintptr_t)sym.st_value); 20100Sstevel@tonic-gate break; 20110Sstevel@tonic-gate } 20120Sstevel@tonic-gate mdb_printf("\n"); 20130Sstevel@tonic-gate 20140Sstevel@tonic-gate return (WALK_NEXT); 20150Sstevel@tonic-gate } 20160Sstevel@tonic-gate 20170Sstevel@tonic-gate static int 20180Sstevel@tonic-gate allocdby_common(uintptr_t addr, uint_t flags, const char *w) 20190Sstevel@tonic-gate { 20200Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 20210Sstevel@tonic-gate return (DCMD_USAGE); 20220Sstevel@tonic-gate 20230Sstevel@tonic-gate mdb_printf("%-?s %12s %s\n", "BUFCTL", "TIMESTAMP", "CALLER"); 20240Sstevel@tonic-gate 20250Sstevel@tonic-gate if (mdb_pwalk(w, (mdb_walk_cb_t)allocdby_walk, NULL, addr) == -1) { 20260Sstevel@tonic-gate mdb_warn("can't walk '%s' for %p", w, addr); 20270Sstevel@tonic-gate return (DCMD_ERR); 20280Sstevel@tonic-gate } 20290Sstevel@tonic-gate 20300Sstevel@tonic-gate return (DCMD_OK); 20310Sstevel@tonic-gate } 20320Sstevel@tonic-gate 20330Sstevel@tonic-gate /*ARGSUSED*/ 20340Sstevel@tonic-gate int 20350Sstevel@tonic-gate allocdby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 20360Sstevel@tonic-gate { 20370Sstevel@tonic-gate return (allocdby_common(addr, flags, "allocdby")); 20380Sstevel@tonic-gate } 20390Sstevel@tonic-gate 20400Sstevel@tonic-gate /*ARGSUSED*/ 20410Sstevel@tonic-gate int 20420Sstevel@tonic-gate freedby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 20430Sstevel@tonic-gate { 20440Sstevel@tonic-gate return (allocdby_common(addr, flags, "freedby")); 20450Sstevel@tonic-gate } 20460Sstevel@tonic-gate 20470Sstevel@tonic-gate /* 20480Sstevel@tonic-gate * Return a string describing the address in relation to the given thread's 20490Sstevel@tonic-gate * stack. 20500Sstevel@tonic-gate * 20510Sstevel@tonic-gate * - If the thread state is TS_FREE, return " (inactive interrupt thread)". 20520Sstevel@tonic-gate * 20530Sstevel@tonic-gate * - If the address is above the stack pointer, return an empty string 20540Sstevel@tonic-gate * signifying that the address is active. 20550Sstevel@tonic-gate * 20560Sstevel@tonic-gate * - If the address is below the stack pointer, and the thread is not on proc, 20570Sstevel@tonic-gate * return " (below sp)". 20580Sstevel@tonic-gate * 20590Sstevel@tonic-gate * - If the address is below the stack pointer, and the thread is on proc, 20600Sstevel@tonic-gate * return " (possibly below sp)". Depending on context, we may or may not 20610Sstevel@tonic-gate * have an accurate t_sp. 20620Sstevel@tonic-gate */ 20630Sstevel@tonic-gate static const char * 20640Sstevel@tonic-gate stack_active(const kthread_t *t, uintptr_t addr) 20650Sstevel@tonic-gate { 20660Sstevel@tonic-gate uintptr_t panicstk; 20670Sstevel@tonic-gate GElf_Sym sym; 20680Sstevel@tonic-gate 20690Sstevel@tonic-gate if (t->t_state == TS_FREE) 20700Sstevel@tonic-gate return (" (inactive interrupt thread)"); 20710Sstevel@tonic-gate 20720Sstevel@tonic-gate /* 20730Sstevel@tonic-gate * Check to see if we're on the panic stack. If so, ignore t_sp, as it 20740Sstevel@tonic-gate * no longer relates to the thread's real stack. 20750Sstevel@tonic-gate */ 20760Sstevel@tonic-gate if (mdb_lookup_by_name("panic_stack", &sym) == 0) { 20770Sstevel@tonic-gate panicstk = (uintptr_t)sym.st_value; 20780Sstevel@tonic-gate 20790Sstevel@tonic-gate if (t->t_sp >= panicstk && t->t_sp < panicstk + PANICSTKSIZE) 20800Sstevel@tonic-gate return (""); 20810Sstevel@tonic-gate } 20820Sstevel@tonic-gate 20830Sstevel@tonic-gate if (addr >= t->t_sp + STACK_BIAS) 20840Sstevel@tonic-gate return (""); 20850Sstevel@tonic-gate 20860Sstevel@tonic-gate if (t->t_state == TS_ONPROC) 20870Sstevel@tonic-gate return (" (possibly below sp)"); 20880Sstevel@tonic-gate 20890Sstevel@tonic-gate return (" (below sp)"); 20900Sstevel@tonic-gate } 20910Sstevel@tonic-gate 20920Sstevel@tonic-gate typedef struct whatis { 20930Sstevel@tonic-gate uintptr_t w_addr; 20940Sstevel@tonic-gate const kmem_cache_t *w_cache; 20950Sstevel@tonic-gate const vmem_t *w_vmem; 20960Sstevel@tonic-gate size_t w_slab_align; 20970Sstevel@tonic-gate int w_slab_found; 20980Sstevel@tonic-gate int w_found; 20990Sstevel@tonic-gate int w_kmem_lite_count; 21000Sstevel@tonic-gate uint_t w_all; 21010Sstevel@tonic-gate uint_t w_bufctl; 2102*10388SJonathan.Adams@Sun.COM uint_t w_freemem; 21030Sstevel@tonic-gate uint_t w_idspace; 2104*10388SJonathan.Adams@Sun.COM uint_t w_quiet; 2105*10388SJonathan.Adams@Sun.COM uint_t w_verbose; 21060Sstevel@tonic-gate } whatis_t; 21070Sstevel@tonic-gate 2108*10388SJonathan.Adams@Sun.COM /* nicely report pointers as offsets from a base */ 2109*10388SJonathan.Adams@Sun.COM static void 2110*10388SJonathan.Adams@Sun.COM whatis_report_pointer(uintptr_t addr, uintptr_t base, const char *description) 2111*10388SJonathan.Adams@Sun.COM { 2112*10388SJonathan.Adams@Sun.COM if (addr == base) 2113*10388SJonathan.Adams@Sun.COM mdb_printf("%p is %s", 2114*10388SJonathan.Adams@Sun.COM addr, description); 2115*10388SJonathan.Adams@Sun.COM else 2116*10388SJonathan.Adams@Sun.COM mdb_printf("%p is %p+%p, %s", 2117*10388SJonathan.Adams@Sun.COM addr, base, addr - base, description); 2118*10388SJonathan.Adams@Sun.COM } 2119*10388SJonathan.Adams@Sun.COM 2120*10388SJonathan.Adams@Sun.COM /* call one of our dcmd functions with "-v" and the provided address */ 2121*10388SJonathan.Adams@Sun.COM static void 2122*10388SJonathan.Adams@Sun.COM whatis_call_printer(mdb_dcmd_f *dcmd, uintptr_t addr) 2123*10388SJonathan.Adams@Sun.COM { 2124*10388SJonathan.Adams@Sun.COM mdb_arg_t a; 2125*10388SJonathan.Adams@Sun.COM a.a_type = MDB_TYPE_STRING; 2126*10388SJonathan.Adams@Sun.COM a.a_un.a_str = "-v"; 2127*10388SJonathan.Adams@Sun.COM 2128*10388SJonathan.Adams@Sun.COM (void) (*dcmd)(addr, DCMD_ADDRSPEC, 1, &a); 2129*10388SJonathan.Adams@Sun.COM } 2130*10388SJonathan.Adams@Sun.COM 21310Sstevel@tonic-gate static void 21320Sstevel@tonic-gate whatis_print_kmem(uintptr_t addr, uintptr_t baddr, whatis_t *w) 21330Sstevel@tonic-gate { 2134*10388SJonathan.Adams@Sun.COM const kmem_cache_t *cp = w->w_cache; 21350Sstevel@tonic-gate /* LINTED pointer cast may result in improper alignment */ 2136*10388SJonathan.Adams@Sun.COM uintptr_t btaddr = (uintptr_t)KMEM_BUFTAG(cp, addr); 21370Sstevel@tonic-gate intptr_t stat; 2138*10388SJonathan.Adams@Sun.COM int call_printer; 21390Sstevel@tonic-gate int count = 0; 21400Sstevel@tonic-gate int i; 21410Sstevel@tonic-gate pc_t callers[16]; 21420Sstevel@tonic-gate 2143*10388SJonathan.Adams@Sun.COM if (cp->cache_flags & KMF_REDZONE) { 21440Sstevel@tonic-gate kmem_buftag_t bt; 21450Sstevel@tonic-gate 21460Sstevel@tonic-gate if (mdb_vread(&bt, sizeof (bt), btaddr) == -1) 21470Sstevel@tonic-gate goto done; 21480Sstevel@tonic-gate 21490Sstevel@tonic-gate stat = (intptr_t)bt.bt_bufctl ^ bt.bt_bxstat; 21500Sstevel@tonic-gate 21510Sstevel@tonic-gate if (stat != KMEM_BUFTAG_ALLOC && stat != KMEM_BUFTAG_FREE) 21520Sstevel@tonic-gate goto done; 21530Sstevel@tonic-gate 21540Sstevel@tonic-gate /* 21550Sstevel@tonic-gate * provide the bufctl ptr if it has useful information 21560Sstevel@tonic-gate */ 2157*10388SJonathan.Adams@Sun.COM if (baddr == 0 && (cp->cache_flags & KMF_AUDIT)) 21580Sstevel@tonic-gate baddr = (uintptr_t)bt.bt_bufctl; 21590Sstevel@tonic-gate 2160*10388SJonathan.Adams@Sun.COM if (cp->cache_flags & KMF_LITE) { 21610Sstevel@tonic-gate count = w->w_kmem_lite_count; 21620Sstevel@tonic-gate 21630Sstevel@tonic-gate if (count * sizeof (pc_t) > sizeof (callers)) 21640Sstevel@tonic-gate count = 0; 21650Sstevel@tonic-gate 21660Sstevel@tonic-gate if (count > 0 && 21670Sstevel@tonic-gate mdb_vread(callers, count * sizeof (pc_t), 21680Sstevel@tonic-gate btaddr + 21690Sstevel@tonic-gate offsetof(kmem_buftag_lite_t, bt_history)) == -1) 21700Sstevel@tonic-gate count = 0; 21710Sstevel@tonic-gate 21720Sstevel@tonic-gate /* 21730Sstevel@tonic-gate * skip unused callers 21740Sstevel@tonic-gate */ 21750Sstevel@tonic-gate while (count > 0 && callers[count - 1] == 21760Sstevel@tonic-gate (pc_t)KMEM_UNINITIALIZED_PATTERN) 21770Sstevel@tonic-gate count--; 21780Sstevel@tonic-gate } 21790Sstevel@tonic-gate } 21800Sstevel@tonic-gate 21810Sstevel@tonic-gate done: 2182*10388SJonathan.Adams@Sun.COM call_printer = 2183*10388SJonathan.Adams@Sun.COM (!w->w_quiet && baddr != 0 && (cp->cache_flags & KMF_AUDIT)); 2184*10388SJonathan.Adams@Sun.COM 2185*10388SJonathan.Adams@Sun.COM whatis_report_pointer(w->w_addr, addr, ""); 2186*10388SJonathan.Adams@Sun.COM 2187*10388SJonathan.Adams@Sun.COM if (baddr != 0 && !call_printer) 2188*10388SJonathan.Adams@Sun.COM mdb_printf("bufctl %p ", baddr); 2189*10388SJonathan.Adams@Sun.COM 2190*10388SJonathan.Adams@Sun.COM mdb_printf("%s from %s%s\n", 2191*10388SJonathan.Adams@Sun.COM (w->w_freemem == FALSE) ? "allocated" : "freed", cp->cache_name, 2192*10388SJonathan.Adams@Sun.COM (call_printer || (!w->w_quiet && count > 0)) ? ":" : ""); 2193*10388SJonathan.Adams@Sun.COM 2194*10388SJonathan.Adams@Sun.COM if (call_printer) 2195*10388SJonathan.Adams@Sun.COM whatis_call_printer(bufctl, baddr); 2196*10388SJonathan.Adams@Sun.COM 2197*10388SJonathan.Adams@Sun.COM if (!w->w_quiet && count > 0) { 21980Sstevel@tonic-gate mdb_inc_indent(8); 21990Sstevel@tonic-gate mdb_printf("recent caller%s: %a%s", (count != 1)? "s":"", 22000Sstevel@tonic-gate callers[0], (count != 1)? ", ":"\n"); 22010Sstevel@tonic-gate for (i = 1; i < count; i++) 22020Sstevel@tonic-gate mdb_printf("%a%s", callers[i], 22030Sstevel@tonic-gate (i + 1 < count)? ", ":"\n"); 22040Sstevel@tonic-gate mdb_dec_indent(8); 22050Sstevel@tonic-gate } 22060Sstevel@tonic-gate } 22070Sstevel@tonic-gate 22080Sstevel@tonic-gate /*ARGSUSED*/ 22090Sstevel@tonic-gate static int 22100Sstevel@tonic-gate whatis_walk_kmem(uintptr_t addr, void *ignored, whatis_t *w) 22110Sstevel@tonic-gate { 22120Sstevel@tonic-gate if (w->w_addr < addr || w->w_addr >= addr + w->w_cache->cache_bufsize) 22130Sstevel@tonic-gate return (WALK_NEXT); 22140Sstevel@tonic-gate 22150Sstevel@tonic-gate whatis_print_kmem(addr, 0, w); 22160Sstevel@tonic-gate w->w_found++; 22170Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 22180Sstevel@tonic-gate } 22190Sstevel@tonic-gate 22200Sstevel@tonic-gate static int 22210Sstevel@tonic-gate whatis_walk_seg(uintptr_t addr, const vmem_seg_t *vs, whatis_t *w) 22220Sstevel@tonic-gate { 22230Sstevel@tonic-gate if (w->w_addr < vs->vs_start || w->w_addr >= vs->vs_end) 22240Sstevel@tonic-gate return (WALK_NEXT); 22250Sstevel@tonic-gate 2226*10388SJonathan.Adams@Sun.COM whatis_report_pointer(w->w_addr, vs->vs_start, ""); 22270Sstevel@tonic-gate 22280Sstevel@tonic-gate /* 2229*10388SJonathan.Adams@Sun.COM * If we're not printing it seperately, provide the vmem_seg 2230*10388SJonathan.Adams@Sun.COM * pointer if it has a stack trace. 22310Sstevel@tonic-gate */ 2232*10388SJonathan.Adams@Sun.COM if (w->w_quiet && (w->w_bufctl == TRUE || 2233*10388SJonathan.Adams@Sun.COM (vs->vs_type == VMEM_ALLOC && vs->vs_depth != 0))) { 2234*10388SJonathan.Adams@Sun.COM mdb_printf("vmem_seg %p ", addr); 22350Sstevel@tonic-gate } 22360Sstevel@tonic-gate 2237*10388SJonathan.Adams@Sun.COM mdb_printf("%s from %s vmem arena%s\n", 2238*10388SJonathan.Adams@Sun.COM (w->w_freemem == FALSE) ? "allocated" : "freed", w->w_vmem->vm_name, 2239*10388SJonathan.Adams@Sun.COM !w->w_quiet ? ":" : ""); 2240*10388SJonathan.Adams@Sun.COM 2241*10388SJonathan.Adams@Sun.COM if (!w->w_quiet) 2242*10388SJonathan.Adams@Sun.COM whatis_call_printer(vmem_seg, addr); 22430Sstevel@tonic-gate 22440Sstevel@tonic-gate w->w_found++; 22450Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 22460Sstevel@tonic-gate } 22470Sstevel@tonic-gate 22480Sstevel@tonic-gate static int 22490Sstevel@tonic-gate whatis_walk_vmem(uintptr_t addr, const vmem_t *vmem, whatis_t *w) 22500Sstevel@tonic-gate { 22510Sstevel@tonic-gate const char *nm = vmem->vm_name; 22520Sstevel@tonic-gate w->w_vmem = vmem; 22530Sstevel@tonic-gate w->w_freemem = FALSE; 22540Sstevel@tonic-gate 22550Sstevel@tonic-gate if (((vmem->vm_cflags & VMC_IDENTIFIER) != 0) ^ w->w_idspace) 22560Sstevel@tonic-gate return (WALK_NEXT); 22570Sstevel@tonic-gate 22580Sstevel@tonic-gate if (w->w_verbose) 22590Sstevel@tonic-gate mdb_printf("Searching vmem arena %s...\n", nm); 22600Sstevel@tonic-gate 22610Sstevel@tonic-gate if (mdb_pwalk("vmem_alloc", 22620Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_seg, w, addr) == -1) { 22630Sstevel@tonic-gate mdb_warn("can't walk vmem seg for %p", addr); 22640Sstevel@tonic-gate return (WALK_NEXT); 22650Sstevel@tonic-gate } 22660Sstevel@tonic-gate 22670Sstevel@tonic-gate if (w->w_found && w->w_all == FALSE) 22680Sstevel@tonic-gate return (WALK_DONE); 22690Sstevel@tonic-gate 22700Sstevel@tonic-gate if (w->w_verbose) 22710Sstevel@tonic-gate mdb_printf("Searching vmem arena %s for free virtual...\n", nm); 22720Sstevel@tonic-gate 22730Sstevel@tonic-gate w->w_freemem = TRUE; 22740Sstevel@tonic-gate 22750Sstevel@tonic-gate if (mdb_pwalk("vmem_free", 22760Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_seg, w, addr) == -1) { 22770Sstevel@tonic-gate mdb_warn("can't walk vmem seg for %p", addr); 22780Sstevel@tonic-gate return (WALK_NEXT); 22790Sstevel@tonic-gate } 22800Sstevel@tonic-gate 22810Sstevel@tonic-gate return (w->w_found && w->w_all == FALSE ? WALK_DONE : WALK_NEXT); 22820Sstevel@tonic-gate } 22830Sstevel@tonic-gate 22840Sstevel@tonic-gate /*ARGSUSED*/ 22850Sstevel@tonic-gate static int 22860Sstevel@tonic-gate whatis_walk_bufctl(uintptr_t baddr, const kmem_bufctl_t *bcp, whatis_t *w) 22870Sstevel@tonic-gate { 22880Sstevel@tonic-gate uintptr_t addr; 22890Sstevel@tonic-gate 22900Sstevel@tonic-gate if (bcp == NULL) 22910Sstevel@tonic-gate return (WALK_NEXT); 22920Sstevel@tonic-gate 22930Sstevel@tonic-gate addr = (uintptr_t)bcp->bc_addr; 22940Sstevel@tonic-gate 22950Sstevel@tonic-gate if (w->w_addr < addr || w->w_addr >= addr + w->w_cache->cache_bufsize) 22960Sstevel@tonic-gate return (WALK_NEXT); 22970Sstevel@tonic-gate 22980Sstevel@tonic-gate whatis_print_kmem(addr, baddr, w); 22990Sstevel@tonic-gate w->w_found++; 23000Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 23010Sstevel@tonic-gate } 23020Sstevel@tonic-gate 23030Sstevel@tonic-gate /*ARGSUSED*/ 23040Sstevel@tonic-gate static int 23050Sstevel@tonic-gate whatis_walk_slab(uintptr_t saddr, const kmem_slab_t *sp, whatis_t *w) 23060Sstevel@tonic-gate { 23070Sstevel@tonic-gate uintptr_t base = P2ALIGN((uintptr_t)sp->slab_base, w->w_slab_align); 23080Sstevel@tonic-gate 23090Sstevel@tonic-gate if ((w->w_addr - base) >= w->w_cache->cache_slabsize) 23100Sstevel@tonic-gate return (WALK_NEXT); 23110Sstevel@tonic-gate 23120Sstevel@tonic-gate w->w_slab_found++; 23130Sstevel@tonic-gate return (WALK_DONE); 23140Sstevel@tonic-gate } 23150Sstevel@tonic-gate 23160Sstevel@tonic-gate static int 23170Sstevel@tonic-gate whatis_walk_cache(uintptr_t addr, const kmem_cache_t *c, whatis_t *w) 23180Sstevel@tonic-gate { 23190Sstevel@tonic-gate char *walk, *freewalk; 23200Sstevel@tonic-gate mdb_walk_cb_t func; 23210Sstevel@tonic-gate vmem_t *vmp = c->cache_arena; 23220Sstevel@tonic-gate 2323*10388SJonathan.Adams@Sun.COM if (((c->cache_flags & KMC_IDENTIFIER) != 0) ^ w->w_idspace) 23240Sstevel@tonic-gate return (WALK_NEXT); 23250Sstevel@tonic-gate 2326*10388SJonathan.Adams@Sun.COM /* For caches with auditing info, we always walk the bufctls */ 2327*10388SJonathan.Adams@Sun.COM if (w->w_bufctl || (c->cache_flags & KMF_AUDIT)) { 2328*10388SJonathan.Adams@Sun.COM walk = "bufctl"; 2329*10388SJonathan.Adams@Sun.COM freewalk = "freectl"; 2330*10388SJonathan.Adams@Sun.COM func = (mdb_walk_cb_t)whatis_walk_bufctl; 2331*10388SJonathan.Adams@Sun.COM } else { 23320Sstevel@tonic-gate walk = "kmem"; 23330Sstevel@tonic-gate freewalk = "freemem"; 23340Sstevel@tonic-gate func = (mdb_walk_cb_t)whatis_walk_kmem; 23350Sstevel@tonic-gate } 23360Sstevel@tonic-gate 23370Sstevel@tonic-gate w->w_cache = c; 23380Sstevel@tonic-gate 23390Sstevel@tonic-gate if (w->w_verbose) 23400Sstevel@tonic-gate mdb_printf("Searching %s's slabs...\n", c->cache_name); 23410Sstevel@tonic-gate 23420Sstevel@tonic-gate /* 23430Sstevel@tonic-gate * Verify that the address is in one of the cache's slabs. If not, 23440Sstevel@tonic-gate * we can skip the more expensive walkers. (this is purely a 23450Sstevel@tonic-gate * heuristic -- as long as there are no false-negatives, we'll be fine) 23460Sstevel@tonic-gate * 23470Sstevel@tonic-gate * We try to get the cache's arena's quantum, since to accurately 23480Sstevel@tonic-gate * get the base of a slab, you have to align it to the quantum. If 23490Sstevel@tonic-gate * it doesn't look sensible, we fall back to not aligning. 23500Sstevel@tonic-gate */ 23510Sstevel@tonic-gate if (mdb_vread(&w->w_slab_align, sizeof (w->w_slab_align), 23520Sstevel@tonic-gate (uintptr_t)&vmp->vm_quantum) == -1) { 23530Sstevel@tonic-gate mdb_warn("unable to read %p->cache_arena->vm_quantum", c); 23540Sstevel@tonic-gate w->w_slab_align = 1; 23550Sstevel@tonic-gate } 23560Sstevel@tonic-gate 23570Sstevel@tonic-gate if ((c->cache_slabsize < w->w_slab_align) || w->w_slab_align == 0 || 23580Sstevel@tonic-gate (w->w_slab_align & (w->w_slab_align - 1))) { 23590Sstevel@tonic-gate mdb_warn("%p's arena has invalid quantum (0x%p)\n", c, 23600Sstevel@tonic-gate w->w_slab_align); 23610Sstevel@tonic-gate w->w_slab_align = 1; 23620Sstevel@tonic-gate } 23630Sstevel@tonic-gate 23640Sstevel@tonic-gate w->w_slab_found = 0; 23650Sstevel@tonic-gate if (mdb_pwalk("kmem_slab", (mdb_walk_cb_t)whatis_walk_slab, w, 23660Sstevel@tonic-gate addr) == -1) { 23670Sstevel@tonic-gate mdb_warn("can't find kmem_slab walker"); 23680Sstevel@tonic-gate return (WALK_DONE); 23690Sstevel@tonic-gate } 23700Sstevel@tonic-gate if (w->w_slab_found == 0) 23710Sstevel@tonic-gate return (WALK_NEXT); 23720Sstevel@tonic-gate 23730Sstevel@tonic-gate if (c->cache_flags & KMF_LITE) { 23740Sstevel@tonic-gate if (mdb_readvar(&w->w_kmem_lite_count, 23750Sstevel@tonic-gate "kmem_lite_count") == -1 || w->w_kmem_lite_count > 16) 23760Sstevel@tonic-gate w->w_kmem_lite_count = 0; 23770Sstevel@tonic-gate } 23780Sstevel@tonic-gate 23790Sstevel@tonic-gate if (w->w_verbose) 23800Sstevel@tonic-gate mdb_printf("Searching %s...\n", c->cache_name); 23810Sstevel@tonic-gate 23820Sstevel@tonic-gate w->w_freemem = FALSE; 23830Sstevel@tonic-gate 23840Sstevel@tonic-gate if (mdb_pwalk(walk, func, w, addr) == -1) { 23850Sstevel@tonic-gate mdb_warn("can't find %s walker", walk); 23860Sstevel@tonic-gate return (WALK_DONE); 23870Sstevel@tonic-gate } 23880Sstevel@tonic-gate 23890Sstevel@tonic-gate if (w->w_found && w->w_all == FALSE) 23900Sstevel@tonic-gate return (WALK_DONE); 23910Sstevel@tonic-gate 23920Sstevel@tonic-gate /* 23930Sstevel@tonic-gate * We have searched for allocated memory; now search for freed memory. 23940Sstevel@tonic-gate */ 23950Sstevel@tonic-gate if (w->w_verbose) 23960Sstevel@tonic-gate mdb_printf("Searching %s for free memory...\n", c->cache_name); 23970Sstevel@tonic-gate 23980Sstevel@tonic-gate w->w_freemem = TRUE; 23990Sstevel@tonic-gate 24000Sstevel@tonic-gate if (mdb_pwalk(freewalk, func, w, addr) == -1) { 24010Sstevel@tonic-gate mdb_warn("can't find %s walker", freewalk); 24020Sstevel@tonic-gate return (WALK_DONE); 24030Sstevel@tonic-gate } 24040Sstevel@tonic-gate 24050Sstevel@tonic-gate return (w->w_found && w->w_all == FALSE ? WALK_DONE : WALK_NEXT); 24060Sstevel@tonic-gate } 24070Sstevel@tonic-gate 24080Sstevel@tonic-gate static int 24090Sstevel@tonic-gate whatis_walk_touch(uintptr_t addr, const kmem_cache_t *c, whatis_t *w) 24100Sstevel@tonic-gate { 24110Sstevel@tonic-gate if (c->cache_cflags & KMC_NOTOUCH) 24120Sstevel@tonic-gate return (WALK_NEXT); 24130Sstevel@tonic-gate 24140Sstevel@tonic-gate return (whatis_walk_cache(addr, c, w)); 24150Sstevel@tonic-gate } 24160Sstevel@tonic-gate 24170Sstevel@tonic-gate static int 24180Sstevel@tonic-gate whatis_walk_notouch(uintptr_t addr, const kmem_cache_t *c, whatis_t *w) 24190Sstevel@tonic-gate { 24200Sstevel@tonic-gate if (!(c->cache_cflags & KMC_NOTOUCH)) 24210Sstevel@tonic-gate return (WALK_NEXT); 24220Sstevel@tonic-gate 24230Sstevel@tonic-gate return (whatis_walk_cache(addr, c, w)); 24240Sstevel@tonic-gate } 24250Sstevel@tonic-gate 24260Sstevel@tonic-gate static int 24270Sstevel@tonic-gate whatis_walk_thread(uintptr_t addr, const kthread_t *t, whatis_t *w) 24280Sstevel@tonic-gate { 24290Sstevel@tonic-gate /* 24300Sstevel@tonic-gate * Often, one calls ::whatis on an address from a thread structure. 24310Sstevel@tonic-gate * We use this opportunity to short circuit this case... 24320Sstevel@tonic-gate */ 24330Sstevel@tonic-gate if (w->w_addr >= addr && w->w_addr < addr + sizeof (kthread_t)) { 2434*10388SJonathan.Adams@Sun.COM whatis_report_pointer(w->w_addr, addr, 2435*10388SJonathan.Adams@Sun.COM "allocated as a thread structure\n"); 24360Sstevel@tonic-gate w->w_found++; 24370Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 24380Sstevel@tonic-gate } 24390Sstevel@tonic-gate 24400Sstevel@tonic-gate if (w->w_addr < (uintptr_t)t->t_stkbase || 24410Sstevel@tonic-gate w->w_addr > (uintptr_t)t->t_stk) 24420Sstevel@tonic-gate return (WALK_NEXT); 24430Sstevel@tonic-gate 24440Sstevel@tonic-gate if (t->t_stkbase == NULL) 24450Sstevel@tonic-gate return (WALK_NEXT); 24460Sstevel@tonic-gate 24470Sstevel@tonic-gate mdb_printf("%p is in thread %p's stack%s\n", w->w_addr, addr, 24480Sstevel@tonic-gate stack_active(t, w->w_addr)); 24490Sstevel@tonic-gate 24500Sstevel@tonic-gate w->w_found++; 24510Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 24520Sstevel@tonic-gate } 24530Sstevel@tonic-gate 24540Sstevel@tonic-gate static int 24550Sstevel@tonic-gate whatis_walk_modctl(uintptr_t addr, const struct modctl *m, whatis_t *w) 24560Sstevel@tonic-gate { 24570Sstevel@tonic-gate struct module mod; 24580Sstevel@tonic-gate char name[MODMAXNAMELEN], *where; 24590Sstevel@tonic-gate Shdr shdr; 24600Sstevel@tonic-gate GElf_Sym sym; 24610Sstevel@tonic-gate 24620Sstevel@tonic-gate if (m->mod_mp == NULL) 24630Sstevel@tonic-gate return (WALK_NEXT); 24640Sstevel@tonic-gate 24650Sstevel@tonic-gate if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) { 24660Sstevel@tonic-gate mdb_warn("couldn't read modctl %p's module", addr); 24670Sstevel@tonic-gate return (WALK_NEXT); 24680Sstevel@tonic-gate } 24690Sstevel@tonic-gate 24700Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.text && 24710Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.text + mod.text_size) { 24720Sstevel@tonic-gate where = "text segment"; 24730Sstevel@tonic-gate goto found; 24740Sstevel@tonic-gate } 24750Sstevel@tonic-gate 24760Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.data && 24770Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.data + mod.data_size) { 24780Sstevel@tonic-gate where = "data segment"; 24790Sstevel@tonic-gate goto found; 24800Sstevel@tonic-gate } 24810Sstevel@tonic-gate 24820Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.bss && 24830Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.bss + mod.bss_size) { 24840Sstevel@tonic-gate where = "bss"; 24850Sstevel@tonic-gate goto found; 24860Sstevel@tonic-gate } 24870Sstevel@tonic-gate 24880Sstevel@tonic-gate if (mdb_vread(&shdr, sizeof (shdr), (uintptr_t)mod.symhdr) == -1) { 24890Sstevel@tonic-gate mdb_warn("couldn't read symbol header for %p's module", addr); 24900Sstevel@tonic-gate return (WALK_NEXT); 24910Sstevel@tonic-gate } 24920Sstevel@tonic-gate 24930Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.symtbl && w->w_addr < 24940Sstevel@tonic-gate (uintptr_t)mod.symtbl + (uintptr_t)mod.nsyms * shdr.sh_entsize) { 24950Sstevel@tonic-gate where = "symtab"; 24960Sstevel@tonic-gate goto found; 24970Sstevel@tonic-gate } 24980Sstevel@tonic-gate 24990Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.symspace && 25000Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.symspace + (uintptr_t)mod.symsize) { 25010Sstevel@tonic-gate where = "symspace"; 25020Sstevel@tonic-gate goto found; 25030Sstevel@tonic-gate } 25040Sstevel@tonic-gate 25050Sstevel@tonic-gate return (WALK_NEXT); 25060Sstevel@tonic-gate 25070Sstevel@tonic-gate found: 25080Sstevel@tonic-gate if (mdb_readstr(name, sizeof (name), (uintptr_t)m->mod_modname) == -1) 25090Sstevel@tonic-gate (void) mdb_snprintf(name, sizeof (name), "0x%p", addr); 25100Sstevel@tonic-gate 25110Sstevel@tonic-gate mdb_printf("%p is ", w->w_addr); 25120Sstevel@tonic-gate 25130Sstevel@tonic-gate /* 25140Sstevel@tonic-gate * If we found this address in a module, then there's a chance that 25150Sstevel@tonic-gate * it's actually a named symbol. Try the symbol lookup. 25160Sstevel@tonic-gate */ 2517*10388SJonathan.Adams@Sun.COM if (mdb_lookup_by_addr(w->w_addr, MDB_SYM_FUZZY, NULL, 0, &sym) != -1 && 2518*10388SJonathan.Adams@Sun.COM (w->w_addr - (uintptr_t)sym.st_value) < sym.st_size) { 2519*10388SJonathan.Adams@Sun.COM mdb_printf("%a, ", w->w_addr); 25200Sstevel@tonic-gate } 25210Sstevel@tonic-gate 25220Sstevel@tonic-gate mdb_printf("in %s's %s\n", name, where); 25230Sstevel@tonic-gate 25240Sstevel@tonic-gate w->w_found++; 25250Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 25260Sstevel@tonic-gate } 25270Sstevel@tonic-gate 25280Sstevel@tonic-gate /*ARGSUSED*/ 25290Sstevel@tonic-gate static int 25300Sstevel@tonic-gate whatis_walk_page(uintptr_t addr, const void *ignored, whatis_t *w) 25310Sstevel@tonic-gate { 25320Sstevel@tonic-gate static int machsize = 0; 25330Sstevel@tonic-gate mdb_ctf_id_t id; 25340Sstevel@tonic-gate 25350Sstevel@tonic-gate if (machsize == 0) { 25360Sstevel@tonic-gate if (mdb_ctf_lookup_by_name("unix`page_t", &id) == 0) 25370Sstevel@tonic-gate machsize = mdb_ctf_type_size(id); 25380Sstevel@tonic-gate else { 25390Sstevel@tonic-gate mdb_warn("could not get size of page_t"); 25400Sstevel@tonic-gate machsize = sizeof (page_t); 25410Sstevel@tonic-gate } 25420Sstevel@tonic-gate } 25430Sstevel@tonic-gate 25440Sstevel@tonic-gate if (w->w_addr < addr || w->w_addr >= addr + machsize) 25450Sstevel@tonic-gate return (WALK_NEXT); 25460Sstevel@tonic-gate 2547*10388SJonathan.Adams@Sun.COM whatis_report_pointer(w->w_addr, addr, 2548*10388SJonathan.Adams@Sun.COM "allocated as a page structure\n"); 25490Sstevel@tonic-gate 25500Sstevel@tonic-gate w->w_found++; 25510Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 25520Sstevel@tonic-gate } 25530Sstevel@tonic-gate 25540Sstevel@tonic-gate int 25550Sstevel@tonic-gate whatis(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 25560Sstevel@tonic-gate { 25570Sstevel@tonic-gate whatis_t w; 25580Sstevel@tonic-gate 25590Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 25600Sstevel@tonic-gate return (DCMD_USAGE); 25610Sstevel@tonic-gate 2562*10388SJonathan.Adams@Sun.COM w.w_all = FALSE; 25630Sstevel@tonic-gate w.w_bufctl = FALSE; 25640Sstevel@tonic-gate w.w_idspace = FALSE; 2565*10388SJonathan.Adams@Sun.COM w.w_quiet = FALSE; 2566*10388SJonathan.Adams@Sun.COM w.w_verbose = FALSE; 25670Sstevel@tonic-gate 25680Sstevel@tonic-gate if (mdb_getopts(argc, argv, 2569*10388SJonathan.Adams@Sun.COM 'a', MDB_OPT_SETBITS, TRUE, &w.w_all, 2570*10388SJonathan.Adams@Sun.COM 'b', MDB_OPT_SETBITS, TRUE, &w.w_bufctl, 2571*10388SJonathan.Adams@Sun.COM 'i', MDB_OPT_SETBITS, TRUE, &w.w_idspace, 2572*10388SJonathan.Adams@Sun.COM 'q', MDB_OPT_SETBITS, TRUE, &w.w_quiet, 25730Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &w.w_verbose, 2574*10388SJonathan.Adams@Sun.COM NULL) != argc) 25750Sstevel@tonic-gate return (DCMD_USAGE); 25760Sstevel@tonic-gate 25770Sstevel@tonic-gate w.w_addr = addr; 25780Sstevel@tonic-gate w.w_found = 0; 25790Sstevel@tonic-gate 25800Sstevel@tonic-gate if (w.w_verbose) 25810Sstevel@tonic-gate mdb_printf("Searching modules...\n"); 25820Sstevel@tonic-gate 25830Sstevel@tonic-gate if (!w.w_idspace) { 25840Sstevel@tonic-gate if (mdb_walk("modctl", (mdb_walk_cb_t)whatis_walk_modctl, &w) 25850Sstevel@tonic-gate == -1) { 25860Sstevel@tonic-gate mdb_warn("couldn't find modctl walker"); 25870Sstevel@tonic-gate return (DCMD_ERR); 25880Sstevel@tonic-gate } 25890Sstevel@tonic-gate 25900Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 25910Sstevel@tonic-gate return (DCMD_OK); 25920Sstevel@tonic-gate 25930Sstevel@tonic-gate /* 25940Sstevel@tonic-gate * Now search all thread stacks. Yes, this is a little weak; we 25950Sstevel@tonic-gate * can save a lot of work by first checking to see if the 25960Sstevel@tonic-gate * address is in segkp vs. segkmem. But hey, computers are 25970Sstevel@tonic-gate * fast. 25980Sstevel@tonic-gate */ 25990Sstevel@tonic-gate if (w.w_verbose) 26000Sstevel@tonic-gate mdb_printf("Searching threads...\n"); 26010Sstevel@tonic-gate 26020Sstevel@tonic-gate if (mdb_walk("thread", (mdb_walk_cb_t)whatis_walk_thread, &w) 26030Sstevel@tonic-gate == -1) { 26040Sstevel@tonic-gate mdb_warn("couldn't find thread walker"); 26050Sstevel@tonic-gate return (DCMD_ERR); 26060Sstevel@tonic-gate } 26070Sstevel@tonic-gate 26080Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 26090Sstevel@tonic-gate return (DCMD_OK); 26100Sstevel@tonic-gate 26110Sstevel@tonic-gate if (w.w_verbose) 26120Sstevel@tonic-gate mdb_printf("Searching page structures...\n"); 26130Sstevel@tonic-gate 26140Sstevel@tonic-gate if (mdb_walk("page", (mdb_walk_cb_t)whatis_walk_page, &w) 26150Sstevel@tonic-gate == -1) { 26160Sstevel@tonic-gate mdb_warn("couldn't find page walker"); 26170Sstevel@tonic-gate return (DCMD_ERR); 26180Sstevel@tonic-gate } 26190Sstevel@tonic-gate 26200Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 26210Sstevel@tonic-gate return (DCMD_OK); 26220Sstevel@tonic-gate } 26230Sstevel@tonic-gate 26240Sstevel@tonic-gate if (mdb_walk("kmem_cache", 26250Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_touch, &w) == -1) { 26260Sstevel@tonic-gate mdb_warn("couldn't find kmem_cache walker"); 26270Sstevel@tonic-gate return (DCMD_ERR); 26280Sstevel@tonic-gate } 26290Sstevel@tonic-gate 26300Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 26310Sstevel@tonic-gate return (DCMD_OK); 26320Sstevel@tonic-gate 26330Sstevel@tonic-gate if (mdb_walk("kmem_cache", 26340Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_notouch, &w) == -1) { 26350Sstevel@tonic-gate mdb_warn("couldn't find kmem_cache walker"); 26360Sstevel@tonic-gate return (DCMD_ERR); 26370Sstevel@tonic-gate } 26380Sstevel@tonic-gate 26390Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 26400Sstevel@tonic-gate return (DCMD_OK); 26410Sstevel@tonic-gate 26420Sstevel@tonic-gate if (mdb_walk("vmem_postfix", 26430Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_vmem, &w) == -1) { 26440Sstevel@tonic-gate mdb_warn("couldn't find vmem_postfix walker"); 26450Sstevel@tonic-gate return (DCMD_ERR); 26460Sstevel@tonic-gate } 26470Sstevel@tonic-gate 26480Sstevel@tonic-gate if (w.w_found == 0) 26490Sstevel@tonic-gate mdb_printf("%p is unknown\n", addr); 26500Sstevel@tonic-gate 26510Sstevel@tonic-gate return (DCMD_OK); 26520Sstevel@tonic-gate } 26530Sstevel@tonic-gate 26540Sstevel@tonic-gate void 26550Sstevel@tonic-gate whatis_help(void) 26560Sstevel@tonic-gate { 26570Sstevel@tonic-gate mdb_printf( 26580Sstevel@tonic-gate "Given a virtual address, attempt to determine where it came\n" 26590Sstevel@tonic-gate "from.\n" 26600Sstevel@tonic-gate "\n" 26610Sstevel@tonic-gate "\t-a\tFind all possible sources. Default behavior is to stop at\n" 26620Sstevel@tonic-gate "\t\tthe first (most specific) source.\n" 2663*10388SJonathan.Adams@Sun.COM "\t-b\tReport bufctls and vmem_segs for matches in kmem and vmem,\n" 2664*10388SJonathan.Adams@Sun.COM "\t\trespectively. Warning: if the buffer exists, but does not\n" 2665*10388SJonathan.Adams@Sun.COM "\t\thave a bufctl, it will not be reported.\n" 26660Sstevel@tonic-gate "\t-i\tSearch only identifier arenas and caches. By default\n" 26670Sstevel@tonic-gate "\t\tthese are ignored.\n" 2668*10388SJonathan.Adams@Sun.COM "\t-q\tDon't print multi-line reports (stack traces, etc.)\n" 2669*10388SJonathan.Adams@Sun.COM "\t-v\tVerbose output; display caches/arenas/etc as they are\n" 2670*10388SJonathan.Adams@Sun.COM "\t\tsearched\n"); 26710Sstevel@tonic-gate } 26720Sstevel@tonic-gate 26730Sstevel@tonic-gate typedef struct kmem_log_cpu { 26740Sstevel@tonic-gate uintptr_t kmc_low; 26750Sstevel@tonic-gate uintptr_t kmc_high; 26760Sstevel@tonic-gate } kmem_log_cpu_t; 26770Sstevel@tonic-gate 26780Sstevel@tonic-gate typedef struct kmem_log_data { 26790Sstevel@tonic-gate uintptr_t kmd_addr; 26800Sstevel@tonic-gate kmem_log_cpu_t *kmd_cpu; 26810Sstevel@tonic-gate } kmem_log_data_t; 26820Sstevel@tonic-gate 26830Sstevel@tonic-gate int 26840Sstevel@tonic-gate kmem_log_walk(uintptr_t addr, const kmem_bufctl_audit_t *b, 26850Sstevel@tonic-gate kmem_log_data_t *kmd) 26860Sstevel@tonic-gate { 26870Sstevel@tonic-gate int i; 26880Sstevel@tonic-gate kmem_log_cpu_t *kmc = kmd->kmd_cpu; 26890Sstevel@tonic-gate size_t bufsize; 26900Sstevel@tonic-gate 26910Sstevel@tonic-gate for (i = 0; i < NCPU; i++) { 26920Sstevel@tonic-gate if (addr >= kmc[i].kmc_low && addr < kmc[i].kmc_high) 26930Sstevel@tonic-gate break; 26940Sstevel@tonic-gate } 26950Sstevel@tonic-gate 26960Sstevel@tonic-gate if (kmd->kmd_addr) { 26970Sstevel@tonic-gate if (b->bc_cache == NULL) 26980Sstevel@tonic-gate return (WALK_NEXT); 26990Sstevel@tonic-gate 27000Sstevel@tonic-gate if (mdb_vread(&bufsize, sizeof (bufsize), 27010Sstevel@tonic-gate (uintptr_t)&b->bc_cache->cache_bufsize) == -1) { 27020Sstevel@tonic-gate mdb_warn( 27030Sstevel@tonic-gate "failed to read cache_bufsize for cache at %p", 27040Sstevel@tonic-gate b->bc_cache); 27050Sstevel@tonic-gate return (WALK_ERR); 27060Sstevel@tonic-gate } 27070Sstevel@tonic-gate 27080Sstevel@tonic-gate if (kmd->kmd_addr < (uintptr_t)b->bc_addr || 27090Sstevel@tonic-gate kmd->kmd_addr >= (uintptr_t)b->bc_addr + bufsize) 27100Sstevel@tonic-gate return (WALK_NEXT); 27110Sstevel@tonic-gate } 27120Sstevel@tonic-gate 27130Sstevel@tonic-gate if (i == NCPU) 27140Sstevel@tonic-gate mdb_printf(" "); 27150Sstevel@tonic-gate else 27160Sstevel@tonic-gate mdb_printf("%3d", i); 27170Sstevel@tonic-gate 27180Sstevel@tonic-gate mdb_printf(" %0?p %0?p %16llx %0?p\n", addr, b->bc_addr, 27190Sstevel@tonic-gate b->bc_timestamp, b->bc_thread); 27200Sstevel@tonic-gate 27210Sstevel@tonic-gate return (WALK_NEXT); 27220Sstevel@tonic-gate } 27230Sstevel@tonic-gate 27240Sstevel@tonic-gate /*ARGSUSED*/ 27250Sstevel@tonic-gate int 27260Sstevel@tonic-gate kmem_log(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 27270Sstevel@tonic-gate { 27280Sstevel@tonic-gate kmem_log_header_t lh; 27290Sstevel@tonic-gate kmem_cpu_log_header_t clh; 27300Sstevel@tonic-gate uintptr_t lhp, clhp; 27310Sstevel@tonic-gate int ncpus; 27320Sstevel@tonic-gate uintptr_t *cpu; 27330Sstevel@tonic-gate GElf_Sym sym; 27340Sstevel@tonic-gate kmem_log_cpu_t *kmc; 27350Sstevel@tonic-gate int i; 27360Sstevel@tonic-gate kmem_log_data_t kmd; 27370Sstevel@tonic-gate uint_t opt_b = FALSE; 27380Sstevel@tonic-gate 27390Sstevel@tonic-gate if (mdb_getopts(argc, argv, 27400Sstevel@tonic-gate 'b', MDB_OPT_SETBITS, TRUE, &opt_b, NULL) != argc) 27410Sstevel@tonic-gate return (DCMD_USAGE); 27420Sstevel@tonic-gate 27430Sstevel@tonic-gate if (mdb_readvar(&lhp, "kmem_transaction_log") == -1) { 27440Sstevel@tonic-gate mdb_warn("failed to read 'kmem_transaction_log'"); 27450Sstevel@tonic-gate return (DCMD_ERR); 27460Sstevel@tonic-gate } 27470Sstevel@tonic-gate 27480Sstevel@tonic-gate if (lhp == NULL) { 27490Sstevel@tonic-gate mdb_warn("no kmem transaction log\n"); 27500Sstevel@tonic-gate return (DCMD_ERR); 27510Sstevel@tonic-gate } 27520Sstevel@tonic-gate 27530Sstevel@tonic-gate mdb_readvar(&ncpus, "ncpus"); 27540Sstevel@tonic-gate 27550Sstevel@tonic-gate if (mdb_vread(&lh, sizeof (kmem_log_header_t), lhp) == -1) { 27560Sstevel@tonic-gate mdb_warn("failed to read log header at %p", lhp); 27570Sstevel@tonic-gate return (DCMD_ERR); 27580Sstevel@tonic-gate } 27590Sstevel@tonic-gate 27600Sstevel@tonic-gate clhp = lhp + ((uintptr_t)&lh.lh_cpu[0] - (uintptr_t)&lh); 27610Sstevel@tonic-gate 27620Sstevel@tonic-gate cpu = mdb_alloc(sizeof (uintptr_t) * NCPU, UM_SLEEP | UM_GC); 27630Sstevel@tonic-gate 27640Sstevel@tonic-gate if (mdb_lookup_by_name("cpu", &sym) == -1) { 27650Sstevel@tonic-gate mdb_warn("couldn't find 'cpu' array"); 27660Sstevel@tonic-gate return (DCMD_ERR); 27670Sstevel@tonic-gate } 27680Sstevel@tonic-gate 27690Sstevel@tonic-gate if (sym.st_size != NCPU * sizeof (uintptr_t)) { 27700Sstevel@tonic-gate mdb_warn("expected 'cpu' to be of size %d; found %d\n", 27710Sstevel@tonic-gate NCPU * sizeof (uintptr_t), sym.st_size); 27720Sstevel@tonic-gate return (DCMD_ERR); 27730Sstevel@tonic-gate } 27740Sstevel@tonic-gate 27750Sstevel@tonic-gate if (mdb_vread(cpu, sym.st_size, (uintptr_t)sym.st_value) == -1) { 27760Sstevel@tonic-gate mdb_warn("failed to read cpu array at %p", sym.st_value); 27770Sstevel@tonic-gate return (DCMD_ERR); 27780Sstevel@tonic-gate } 27790Sstevel@tonic-gate 27800Sstevel@tonic-gate kmc = mdb_zalloc(sizeof (kmem_log_cpu_t) * NCPU, UM_SLEEP | UM_GC); 27810Sstevel@tonic-gate kmd.kmd_addr = NULL; 27820Sstevel@tonic-gate kmd.kmd_cpu = kmc; 27830Sstevel@tonic-gate 27840Sstevel@tonic-gate for (i = 0; i < NCPU; i++) { 27850Sstevel@tonic-gate 27860Sstevel@tonic-gate if (cpu[i] == NULL) 27870Sstevel@tonic-gate continue; 27880Sstevel@tonic-gate 27890Sstevel@tonic-gate if (mdb_vread(&clh, sizeof (clh), clhp) == -1) { 27900Sstevel@tonic-gate mdb_warn("cannot read cpu %d's log header at %p", 27910Sstevel@tonic-gate i, clhp); 27920Sstevel@tonic-gate return (DCMD_ERR); 27930Sstevel@tonic-gate } 27940Sstevel@tonic-gate 27950Sstevel@tonic-gate kmc[i].kmc_low = clh.clh_chunk * lh.lh_chunksize + 27960Sstevel@tonic-gate (uintptr_t)lh.lh_base; 27970Sstevel@tonic-gate kmc[i].kmc_high = (uintptr_t)clh.clh_current; 27980Sstevel@tonic-gate 27990Sstevel@tonic-gate clhp += sizeof (kmem_cpu_log_header_t); 28000Sstevel@tonic-gate } 28010Sstevel@tonic-gate 28020Sstevel@tonic-gate mdb_printf("%3s %-?s %-?s %16s %-?s\n", "CPU", "ADDR", "BUFADDR", 28030Sstevel@tonic-gate "TIMESTAMP", "THREAD"); 28040Sstevel@tonic-gate 28050Sstevel@tonic-gate /* 28060Sstevel@tonic-gate * If we have been passed an address, print out only log entries 28070Sstevel@tonic-gate * corresponding to that address. If opt_b is specified, then interpret 28080Sstevel@tonic-gate * the address as a bufctl. 28090Sstevel@tonic-gate */ 28100Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) { 28110Sstevel@tonic-gate kmem_bufctl_audit_t b; 28120Sstevel@tonic-gate 28130Sstevel@tonic-gate if (opt_b) { 28140Sstevel@tonic-gate kmd.kmd_addr = addr; 28150Sstevel@tonic-gate } else { 28160Sstevel@tonic-gate if (mdb_vread(&b, 28170Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t), addr) == -1) { 28180Sstevel@tonic-gate mdb_warn("failed to read bufctl at %p", addr); 28190Sstevel@tonic-gate return (DCMD_ERR); 28200Sstevel@tonic-gate } 28210Sstevel@tonic-gate 28220Sstevel@tonic-gate (void) kmem_log_walk(addr, &b, &kmd); 28230Sstevel@tonic-gate 28240Sstevel@tonic-gate return (DCMD_OK); 28250Sstevel@tonic-gate } 28260Sstevel@tonic-gate } 28270Sstevel@tonic-gate 28280Sstevel@tonic-gate if (mdb_walk("kmem_log", (mdb_walk_cb_t)kmem_log_walk, &kmd) == -1) { 28290Sstevel@tonic-gate mdb_warn("can't find kmem log walker"); 28300Sstevel@tonic-gate return (DCMD_ERR); 28310Sstevel@tonic-gate } 28320Sstevel@tonic-gate 28330Sstevel@tonic-gate return (DCMD_OK); 28340Sstevel@tonic-gate } 28350Sstevel@tonic-gate 28360Sstevel@tonic-gate typedef struct bufctl_history_cb { 28370Sstevel@tonic-gate int bhc_flags; 28380Sstevel@tonic-gate int bhc_argc; 28390Sstevel@tonic-gate const mdb_arg_t *bhc_argv; 28400Sstevel@tonic-gate int bhc_ret; 28410Sstevel@tonic-gate } bufctl_history_cb_t; 28420Sstevel@tonic-gate 28430Sstevel@tonic-gate /*ARGSUSED*/ 28440Sstevel@tonic-gate static int 28450Sstevel@tonic-gate bufctl_history_callback(uintptr_t addr, const void *ign, void *arg) 28460Sstevel@tonic-gate { 28470Sstevel@tonic-gate bufctl_history_cb_t *bhc = arg; 28480Sstevel@tonic-gate 28490Sstevel@tonic-gate bhc->bhc_ret = 28500Sstevel@tonic-gate bufctl(addr, bhc->bhc_flags, bhc->bhc_argc, bhc->bhc_argv); 28510Sstevel@tonic-gate 28520Sstevel@tonic-gate bhc->bhc_flags &= ~DCMD_LOOPFIRST; 28530Sstevel@tonic-gate 28540Sstevel@tonic-gate return ((bhc->bhc_ret == DCMD_OK)? WALK_NEXT : WALK_DONE); 28550Sstevel@tonic-gate } 28560Sstevel@tonic-gate 28570Sstevel@tonic-gate void 28580Sstevel@tonic-gate bufctl_help(void) 28590Sstevel@tonic-gate { 28606712Stomee mdb_printf("%s", 28616712Stomee "Display the contents of kmem_bufctl_audit_ts, with optional filtering.\n\n"); 28620Sstevel@tonic-gate mdb_dec_indent(2); 28630Sstevel@tonic-gate mdb_printf("%<b>OPTIONS%</b>\n"); 28640Sstevel@tonic-gate mdb_inc_indent(2); 28650Sstevel@tonic-gate mdb_printf("%s", 28660Sstevel@tonic-gate " -v Display the full content of the bufctl, including its stack trace\n" 28670Sstevel@tonic-gate " -h retrieve the bufctl's transaction history, if available\n" 28680Sstevel@tonic-gate " -a addr\n" 28690Sstevel@tonic-gate " filter out bufctls not involving the buffer at addr\n" 28700Sstevel@tonic-gate " -c caller\n" 28710Sstevel@tonic-gate " filter out bufctls without the function/PC in their stack trace\n" 28720Sstevel@tonic-gate " -e earliest\n" 28730Sstevel@tonic-gate " filter out bufctls timestamped before earliest\n" 28740Sstevel@tonic-gate " -l latest\n" 28750Sstevel@tonic-gate " filter out bufctls timestamped after latest\n" 28760Sstevel@tonic-gate " -t thread\n" 28770Sstevel@tonic-gate " filter out bufctls not involving thread\n"); 28780Sstevel@tonic-gate } 28790Sstevel@tonic-gate 28800Sstevel@tonic-gate int 28810Sstevel@tonic-gate bufctl(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 28820Sstevel@tonic-gate { 28830Sstevel@tonic-gate kmem_bufctl_audit_t bc; 28840Sstevel@tonic-gate uint_t verbose = FALSE; 28850Sstevel@tonic-gate uint_t history = FALSE; 28860Sstevel@tonic-gate uint_t in_history = FALSE; 28870Sstevel@tonic-gate uintptr_t caller = NULL, thread = NULL; 28880Sstevel@tonic-gate uintptr_t laddr, haddr, baddr = NULL; 28890Sstevel@tonic-gate hrtime_t earliest = 0, latest = 0; 28900Sstevel@tonic-gate int i, depth; 28910Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 28920Sstevel@tonic-gate GElf_Sym sym; 28930Sstevel@tonic-gate 28940Sstevel@tonic-gate if (mdb_getopts(argc, argv, 28950Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &verbose, 28960Sstevel@tonic-gate 'h', MDB_OPT_SETBITS, TRUE, &history, 28970Sstevel@tonic-gate 'H', MDB_OPT_SETBITS, TRUE, &in_history, /* internal */ 28980Sstevel@tonic-gate 'c', MDB_OPT_UINTPTR, &caller, 28990Sstevel@tonic-gate 't', MDB_OPT_UINTPTR, &thread, 29000Sstevel@tonic-gate 'e', MDB_OPT_UINT64, &earliest, 29010Sstevel@tonic-gate 'l', MDB_OPT_UINT64, &latest, 29020Sstevel@tonic-gate 'a', MDB_OPT_UINTPTR, &baddr, NULL) != argc) 29030Sstevel@tonic-gate return (DCMD_USAGE); 29040Sstevel@tonic-gate 29050Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 29060Sstevel@tonic-gate return (DCMD_USAGE); 29070Sstevel@tonic-gate 29080Sstevel@tonic-gate if (in_history && !history) 29090Sstevel@tonic-gate return (DCMD_USAGE); 29100Sstevel@tonic-gate 29110Sstevel@tonic-gate if (history && !in_history) { 29120Sstevel@tonic-gate mdb_arg_t *nargv = mdb_zalloc(sizeof (*nargv) * (argc + 1), 29130Sstevel@tonic-gate UM_SLEEP | UM_GC); 29140Sstevel@tonic-gate bufctl_history_cb_t bhc; 29150Sstevel@tonic-gate 29160Sstevel@tonic-gate nargv[0].a_type = MDB_TYPE_STRING; 29170Sstevel@tonic-gate nargv[0].a_un.a_str = "-H"; /* prevent recursion */ 29180Sstevel@tonic-gate 29190Sstevel@tonic-gate for (i = 0; i < argc; i++) 29200Sstevel@tonic-gate nargv[i + 1] = argv[i]; 29210Sstevel@tonic-gate 29220Sstevel@tonic-gate /* 29230Sstevel@tonic-gate * When in history mode, we treat each element as if it 29240Sstevel@tonic-gate * were in a seperate loop, so that the headers group 29250Sstevel@tonic-gate * bufctls with similar histories. 29260Sstevel@tonic-gate */ 29270Sstevel@tonic-gate bhc.bhc_flags = flags | DCMD_LOOP | DCMD_LOOPFIRST; 29280Sstevel@tonic-gate bhc.bhc_argc = argc + 1; 29290Sstevel@tonic-gate bhc.bhc_argv = nargv; 29300Sstevel@tonic-gate bhc.bhc_ret = DCMD_OK; 29310Sstevel@tonic-gate 29320Sstevel@tonic-gate if (mdb_pwalk("bufctl_history", bufctl_history_callback, &bhc, 29330Sstevel@tonic-gate addr) == -1) { 29340Sstevel@tonic-gate mdb_warn("unable to walk bufctl_history"); 29350Sstevel@tonic-gate return (DCMD_ERR); 29360Sstevel@tonic-gate } 29370Sstevel@tonic-gate 29380Sstevel@tonic-gate if (bhc.bhc_ret == DCMD_OK && !(flags & DCMD_PIPE_OUT)) 29390Sstevel@tonic-gate mdb_printf("\n"); 29400Sstevel@tonic-gate 29410Sstevel@tonic-gate return (bhc.bhc_ret); 29420Sstevel@tonic-gate } 29430Sstevel@tonic-gate 29440Sstevel@tonic-gate if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) { 29450Sstevel@tonic-gate if (verbose) { 29460Sstevel@tonic-gate mdb_printf("%16s %16s %16s %16s\n" 29470Sstevel@tonic-gate "%<u>%16s %16s %16s %16s%</u>\n", 29480Sstevel@tonic-gate "ADDR", "BUFADDR", "TIMESTAMP", "THREAD", 29490Sstevel@tonic-gate "", "CACHE", "LASTLOG", "CONTENTS"); 29500Sstevel@tonic-gate } else { 29510Sstevel@tonic-gate mdb_printf("%<u>%-?s %-?s %-12s %-?s %s%</u>\n", 29520Sstevel@tonic-gate "ADDR", "BUFADDR", "TIMESTAMP", "THREAD", "CALLER"); 29530Sstevel@tonic-gate } 29540Sstevel@tonic-gate } 29550Sstevel@tonic-gate 29560Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) { 29570Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr); 29580Sstevel@tonic-gate return (DCMD_ERR); 29590Sstevel@tonic-gate } 29600Sstevel@tonic-gate 29610Sstevel@tonic-gate /* 29620Sstevel@tonic-gate * Guard against bogus bc_depth in case the bufctl is corrupt or 29630Sstevel@tonic-gate * the address does not really refer to a bufctl. 29640Sstevel@tonic-gate */ 29650Sstevel@tonic-gate depth = MIN(bc.bc_depth, KMEM_STACK_DEPTH); 29660Sstevel@tonic-gate 29670Sstevel@tonic-gate if (caller != NULL) { 29680Sstevel@tonic-gate laddr = caller; 29690Sstevel@tonic-gate haddr = caller + sizeof (caller); 29700Sstevel@tonic-gate 29710Sstevel@tonic-gate if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, sizeof (c), 29720Sstevel@tonic-gate &sym) != -1 && caller == (uintptr_t)sym.st_value) { 29730Sstevel@tonic-gate /* 29740Sstevel@tonic-gate * We were provided an exact symbol value; any 29750Sstevel@tonic-gate * address in the function is valid. 29760Sstevel@tonic-gate */ 29770Sstevel@tonic-gate laddr = (uintptr_t)sym.st_value; 29780Sstevel@tonic-gate haddr = (uintptr_t)sym.st_value + sym.st_size; 29790Sstevel@tonic-gate } 29800Sstevel@tonic-gate 29810Sstevel@tonic-gate for (i = 0; i < depth; i++) 29820Sstevel@tonic-gate if (bc.bc_stack[i] >= laddr && bc.bc_stack[i] < haddr) 29830Sstevel@tonic-gate break; 29840Sstevel@tonic-gate 29850Sstevel@tonic-gate if (i == depth) 29860Sstevel@tonic-gate return (DCMD_OK); 29870Sstevel@tonic-gate } 29880Sstevel@tonic-gate 29890Sstevel@tonic-gate if (thread != NULL && (uintptr_t)bc.bc_thread != thread) 29900Sstevel@tonic-gate return (DCMD_OK); 29910Sstevel@tonic-gate 29920Sstevel@tonic-gate if (earliest != 0 && bc.bc_timestamp < earliest) 29930Sstevel@tonic-gate return (DCMD_OK); 29940Sstevel@tonic-gate 29950Sstevel@tonic-gate if (latest != 0 && bc.bc_timestamp > latest) 29960Sstevel@tonic-gate return (DCMD_OK); 29970Sstevel@tonic-gate 29980Sstevel@tonic-gate if (baddr != 0 && (uintptr_t)bc.bc_addr != baddr) 29990Sstevel@tonic-gate return (DCMD_OK); 30000Sstevel@tonic-gate 30010Sstevel@tonic-gate if (flags & DCMD_PIPE_OUT) { 30020Sstevel@tonic-gate mdb_printf("%#lr\n", addr); 30030Sstevel@tonic-gate return (DCMD_OK); 30040Sstevel@tonic-gate } 30050Sstevel@tonic-gate 30060Sstevel@tonic-gate if (verbose) { 30070Sstevel@tonic-gate mdb_printf( 30080Sstevel@tonic-gate "%<b>%16p%</b> %16p %16llx %16p\n" 30090Sstevel@tonic-gate "%16s %16p %16p %16p\n", 30100Sstevel@tonic-gate addr, bc.bc_addr, bc.bc_timestamp, bc.bc_thread, 30110Sstevel@tonic-gate "", bc.bc_cache, bc.bc_lastlog, bc.bc_contents); 30120Sstevel@tonic-gate 30130Sstevel@tonic-gate mdb_inc_indent(17); 30140Sstevel@tonic-gate for (i = 0; i < depth; i++) 30150Sstevel@tonic-gate mdb_printf("%a\n", bc.bc_stack[i]); 30160Sstevel@tonic-gate mdb_dec_indent(17); 30170Sstevel@tonic-gate mdb_printf("\n"); 30180Sstevel@tonic-gate } else { 30190Sstevel@tonic-gate mdb_printf("%0?p %0?p %12llx %0?p", addr, bc.bc_addr, 30200Sstevel@tonic-gate bc.bc_timestamp, bc.bc_thread); 30210Sstevel@tonic-gate 30220Sstevel@tonic-gate for (i = 0; i < depth; i++) { 30230Sstevel@tonic-gate if (mdb_lookup_by_addr(bc.bc_stack[i], 30240Sstevel@tonic-gate MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1) 30250Sstevel@tonic-gate continue; 30260Sstevel@tonic-gate if (strncmp(c, "kmem_", 5) == 0) 30270Sstevel@tonic-gate continue; 30280Sstevel@tonic-gate mdb_printf(" %a\n", bc.bc_stack[i]); 30290Sstevel@tonic-gate break; 30300Sstevel@tonic-gate } 30310Sstevel@tonic-gate 30320Sstevel@tonic-gate if (i >= depth) 30330Sstevel@tonic-gate mdb_printf("\n"); 30340Sstevel@tonic-gate } 30350Sstevel@tonic-gate 30360Sstevel@tonic-gate return (DCMD_OK); 30370Sstevel@tonic-gate } 30380Sstevel@tonic-gate 30390Sstevel@tonic-gate typedef struct kmem_verify { 30400Sstevel@tonic-gate uint64_t *kmv_buf; /* buffer to read cache contents into */ 30410Sstevel@tonic-gate size_t kmv_size; /* number of bytes in kmv_buf */ 30420Sstevel@tonic-gate int kmv_corruption; /* > 0 if corruption found. */ 30430Sstevel@tonic-gate int kmv_besilent; /* report actual corruption sites */ 30440Sstevel@tonic-gate struct kmem_cache kmv_cache; /* the cache we're operating on */ 30450Sstevel@tonic-gate } kmem_verify_t; 30460Sstevel@tonic-gate 30470Sstevel@tonic-gate /* 30480Sstevel@tonic-gate * verify_pattern() 30490Sstevel@tonic-gate * verify that buf is filled with the pattern pat. 30500Sstevel@tonic-gate */ 30510Sstevel@tonic-gate static int64_t 30520Sstevel@tonic-gate verify_pattern(uint64_t *buf_arg, size_t size, uint64_t pat) 30530Sstevel@tonic-gate { 30540Sstevel@tonic-gate /*LINTED*/ 30550Sstevel@tonic-gate uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 30560Sstevel@tonic-gate uint64_t *buf; 30570Sstevel@tonic-gate 30580Sstevel@tonic-gate for (buf = buf_arg; buf < bufend; buf++) 30590Sstevel@tonic-gate if (*buf != pat) 30600Sstevel@tonic-gate return ((uintptr_t)buf - (uintptr_t)buf_arg); 30610Sstevel@tonic-gate return (-1); 30620Sstevel@tonic-gate } 30630Sstevel@tonic-gate 30640Sstevel@tonic-gate /* 30650Sstevel@tonic-gate * verify_buftag() 30660Sstevel@tonic-gate * verify that btp->bt_bxstat == (bcp ^ pat) 30670Sstevel@tonic-gate */ 30680Sstevel@tonic-gate static int 30690Sstevel@tonic-gate verify_buftag(kmem_buftag_t *btp, uintptr_t pat) 30700Sstevel@tonic-gate { 30710Sstevel@tonic-gate return (btp->bt_bxstat == ((intptr_t)btp->bt_bufctl ^ pat) ? 0 : -1); 30720Sstevel@tonic-gate } 30730Sstevel@tonic-gate 30740Sstevel@tonic-gate /* 30750Sstevel@tonic-gate * verify_free() 30760Sstevel@tonic-gate * verify the integrity of a free block of memory by checking 30770Sstevel@tonic-gate * that it is filled with 0xdeadbeef and that its buftag is sane. 30780Sstevel@tonic-gate */ 30790Sstevel@tonic-gate /*ARGSUSED1*/ 30800Sstevel@tonic-gate static int 30810Sstevel@tonic-gate verify_free(uintptr_t addr, const void *data, void *private) 30820Sstevel@tonic-gate { 30830Sstevel@tonic-gate kmem_verify_t *kmv = (kmem_verify_t *)private; 30840Sstevel@tonic-gate uint64_t *buf = kmv->kmv_buf; /* buf to validate */ 30850Sstevel@tonic-gate int64_t corrupt; /* corruption offset */ 30860Sstevel@tonic-gate kmem_buftag_t *buftagp; /* ptr to buftag */ 30870Sstevel@tonic-gate kmem_cache_t *cp = &kmv->kmv_cache; 30880Sstevel@tonic-gate int besilent = kmv->kmv_besilent; 30890Sstevel@tonic-gate 30900Sstevel@tonic-gate /*LINTED*/ 30910Sstevel@tonic-gate buftagp = KMEM_BUFTAG(cp, buf); 30920Sstevel@tonic-gate 30930Sstevel@tonic-gate /* 30940Sstevel@tonic-gate * Read the buffer to check. 30950Sstevel@tonic-gate */ 30960Sstevel@tonic-gate if (mdb_vread(buf, kmv->kmv_size, addr) == -1) { 30970Sstevel@tonic-gate if (!besilent) 30980Sstevel@tonic-gate mdb_warn("couldn't read %p", addr); 30990Sstevel@tonic-gate return (WALK_NEXT); 31000Sstevel@tonic-gate } 31010Sstevel@tonic-gate 31020Sstevel@tonic-gate if ((corrupt = verify_pattern(buf, cp->cache_verify, 31030Sstevel@tonic-gate KMEM_FREE_PATTERN)) >= 0) { 31040Sstevel@tonic-gate if (!besilent) 31050Sstevel@tonic-gate mdb_printf("buffer %p (free) seems corrupted, at %p\n", 31060Sstevel@tonic-gate addr, (uintptr_t)addr + corrupt); 31070Sstevel@tonic-gate goto corrupt; 31080Sstevel@tonic-gate } 31090Sstevel@tonic-gate /* 31100Sstevel@tonic-gate * When KMF_LITE is set, buftagp->bt_redzone is used to hold 31110Sstevel@tonic-gate * the first bytes of the buffer, hence we cannot check for red 31120Sstevel@tonic-gate * zone corruption. 31130Sstevel@tonic-gate */ 31140Sstevel@tonic-gate if ((cp->cache_flags & (KMF_HASH | KMF_LITE)) == KMF_HASH && 31150Sstevel@tonic-gate buftagp->bt_redzone != KMEM_REDZONE_PATTERN) { 31160Sstevel@tonic-gate if (!besilent) 31170Sstevel@tonic-gate mdb_printf("buffer %p (free) seems to " 31180Sstevel@tonic-gate "have a corrupt redzone pattern\n", addr); 31190Sstevel@tonic-gate goto corrupt; 31200Sstevel@tonic-gate } 31210Sstevel@tonic-gate 31220Sstevel@tonic-gate /* 31230Sstevel@tonic-gate * confirm bufctl pointer integrity. 31240Sstevel@tonic-gate */ 31250Sstevel@tonic-gate if (verify_buftag(buftagp, KMEM_BUFTAG_FREE) == -1) { 31260Sstevel@tonic-gate if (!besilent) 31270Sstevel@tonic-gate mdb_printf("buffer %p (free) has a corrupt " 31280Sstevel@tonic-gate "buftag\n", addr); 31290Sstevel@tonic-gate goto corrupt; 31300Sstevel@tonic-gate } 31310Sstevel@tonic-gate 31320Sstevel@tonic-gate return (WALK_NEXT); 31330Sstevel@tonic-gate corrupt: 31340Sstevel@tonic-gate kmv->kmv_corruption++; 31350Sstevel@tonic-gate return (WALK_NEXT); 31360Sstevel@tonic-gate } 31370Sstevel@tonic-gate 31380Sstevel@tonic-gate /* 31390Sstevel@tonic-gate * verify_alloc() 31400Sstevel@tonic-gate * Verify that the buftag of an allocated buffer makes sense with respect 31410Sstevel@tonic-gate * to the buffer. 31420Sstevel@tonic-gate */ 31430Sstevel@tonic-gate /*ARGSUSED1*/ 31440Sstevel@tonic-gate static int 31450Sstevel@tonic-gate verify_alloc(uintptr_t addr, const void *data, void *private) 31460Sstevel@tonic-gate { 31470Sstevel@tonic-gate kmem_verify_t *kmv = (kmem_verify_t *)private; 31480Sstevel@tonic-gate kmem_cache_t *cp = &kmv->kmv_cache; 31490Sstevel@tonic-gate uint64_t *buf = kmv->kmv_buf; /* buf to validate */ 31500Sstevel@tonic-gate /*LINTED*/ 31510Sstevel@tonic-gate kmem_buftag_t *buftagp = KMEM_BUFTAG(cp, buf); 31520Sstevel@tonic-gate uint32_t *ip = (uint32_t *)buftagp; 31530Sstevel@tonic-gate uint8_t *bp = (uint8_t *)buf; 31540Sstevel@tonic-gate int looks_ok = 0, size_ok = 1; /* flags for finding corruption */ 31550Sstevel@tonic-gate int besilent = kmv->kmv_besilent; 31560Sstevel@tonic-gate 31570Sstevel@tonic-gate /* 31580Sstevel@tonic-gate * Read the buffer to check. 31590Sstevel@tonic-gate */ 31600Sstevel@tonic-gate if (mdb_vread(buf, kmv->kmv_size, addr) == -1) { 31610Sstevel@tonic-gate if (!besilent) 31620Sstevel@tonic-gate mdb_warn("couldn't read %p", addr); 31630Sstevel@tonic-gate return (WALK_NEXT); 31640Sstevel@tonic-gate } 31650Sstevel@tonic-gate 31660Sstevel@tonic-gate /* 31670Sstevel@tonic-gate * There are two cases to handle: 31680Sstevel@tonic-gate * 1. If the buf was alloc'd using kmem_cache_alloc, it will have 31690Sstevel@tonic-gate * 0xfeedfacefeedface at the end of it 31700Sstevel@tonic-gate * 2. If the buf was alloc'd using kmem_alloc, it will have 31710Sstevel@tonic-gate * 0xbb just past the end of the region in use. At the buftag, 31720Sstevel@tonic-gate * it will have 0xfeedface (or, if the whole buffer is in use, 31730Sstevel@tonic-gate * 0xfeedface & bb000000 or 0xfeedfacf & 000000bb depending on 31740Sstevel@tonic-gate * endianness), followed by 32 bits containing the offset of the 31750Sstevel@tonic-gate * 0xbb byte in the buffer. 31760Sstevel@tonic-gate * 31770Sstevel@tonic-gate * Finally, the two 32-bit words that comprise the second half of the 31780Sstevel@tonic-gate * buftag should xor to KMEM_BUFTAG_ALLOC 31790Sstevel@tonic-gate */ 31800Sstevel@tonic-gate 31810Sstevel@tonic-gate if (buftagp->bt_redzone == KMEM_REDZONE_PATTERN) 31820Sstevel@tonic-gate looks_ok = 1; 31830Sstevel@tonic-gate else if (!KMEM_SIZE_VALID(ip[1])) 31840Sstevel@tonic-gate size_ok = 0; 31850Sstevel@tonic-gate else if (bp[KMEM_SIZE_DECODE(ip[1])] == KMEM_REDZONE_BYTE) 31860Sstevel@tonic-gate looks_ok = 1; 31870Sstevel@tonic-gate else 31880Sstevel@tonic-gate size_ok = 0; 31890Sstevel@tonic-gate 31900Sstevel@tonic-gate if (!size_ok) { 31910Sstevel@tonic-gate if (!besilent) 31920Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a corrupt " 31930Sstevel@tonic-gate "redzone size encoding\n", addr); 31940Sstevel@tonic-gate goto corrupt; 31950Sstevel@tonic-gate } 31960Sstevel@tonic-gate 31970Sstevel@tonic-gate if (!looks_ok) { 31980Sstevel@tonic-gate if (!besilent) 31990Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a corrupt " 32000Sstevel@tonic-gate "redzone signature\n", addr); 32010Sstevel@tonic-gate goto corrupt; 32020Sstevel@tonic-gate } 32030Sstevel@tonic-gate 32040Sstevel@tonic-gate if (verify_buftag(buftagp, KMEM_BUFTAG_ALLOC) == -1) { 32050Sstevel@tonic-gate if (!besilent) 32060Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a " 32070Sstevel@tonic-gate "corrupt buftag\n", addr); 32080Sstevel@tonic-gate goto corrupt; 32090Sstevel@tonic-gate } 32100Sstevel@tonic-gate 32110Sstevel@tonic-gate return (WALK_NEXT); 32120Sstevel@tonic-gate corrupt: 32130Sstevel@tonic-gate kmv->kmv_corruption++; 32140Sstevel@tonic-gate return (WALK_NEXT); 32150Sstevel@tonic-gate } 32160Sstevel@tonic-gate 32170Sstevel@tonic-gate /*ARGSUSED2*/ 32180Sstevel@tonic-gate int 32190Sstevel@tonic-gate kmem_verify(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 32200Sstevel@tonic-gate { 32210Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) { 32220Sstevel@tonic-gate int check_alloc = 0, check_free = 0; 32230Sstevel@tonic-gate kmem_verify_t kmv; 32240Sstevel@tonic-gate 32250Sstevel@tonic-gate if (mdb_vread(&kmv.kmv_cache, sizeof (kmv.kmv_cache), 32260Sstevel@tonic-gate addr) == -1) { 32270Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache %p", addr); 32280Sstevel@tonic-gate return (DCMD_ERR); 32290Sstevel@tonic-gate } 32300Sstevel@tonic-gate 32310Sstevel@tonic-gate kmv.kmv_size = kmv.kmv_cache.cache_buftag + 32320Sstevel@tonic-gate sizeof (kmem_buftag_t); 32330Sstevel@tonic-gate kmv.kmv_buf = mdb_alloc(kmv.kmv_size, UM_SLEEP | UM_GC); 32340Sstevel@tonic-gate kmv.kmv_corruption = 0; 32350Sstevel@tonic-gate 32360Sstevel@tonic-gate if ((kmv.kmv_cache.cache_flags & KMF_REDZONE)) { 32370Sstevel@tonic-gate check_alloc = 1; 32380Sstevel@tonic-gate if (kmv.kmv_cache.cache_flags & KMF_DEADBEEF) 32390Sstevel@tonic-gate check_free = 1; 32400Sstevel@tonic-gate } else { 32410Sstevel@tonic-gate if (!(flags & DCMD_LOOP)) { 32420Sstevel@tonic-gate mdb_warn("cache %p (%s) does not have " 32430Sstevel@tonic-gate "redzone checking enabled\n", addr, 32440Sstevel@tonic-gate kmv.kmv_cache.cache_name); 32450Sstevel@tonic-gate } 32460Sstevel@tonic-gate return (DCMD_ERR); 32470Sstevel@tonic-gate } 32480Sstevel@tonic-gate 32490Sstevel@tonic-gate if (flags & DCMD_LOOP) { 32500Sstevel@tonic-gate /* 32510Sstevel@tonic-gate * table mode, don't print out every corrupt buffer 32520Sstevel@tonic-gate */ 32530Sstevel@tonic-gate kmv.kmv_besilent = 1; 32540Sstevel@tonic-gate } else { 32550Sstevel@tonic-gate mdb_printf("Summary for cache '%s'\n", 32560Sstevel@tonic-gate kmv.kmv_cache.cache_name); 32570Sstevel@tonic-gate mdb_inc_indent(2); 32580Sstevel@tonic-gate kmv.kmv_besilent = 0; 32590Sstevel@tonic-gate } 32600Sstevel@tonic-gate 32610Sstevel@tonic-gate if (check_alloc) 32620Sstevel@tonic-gate (void) mdb_pwalk("kmem", verify_alloc, &kmv, addr); 32630Sstevel@tonic-gate if (check_free) 32640Sstevel@tonic-gate (void) mdb_pwalk("freemem", verify_free, &kmv, addr); 32650Sstevel@tonic-gate 32660Sstevel@tonic-gate if (flags & DCMD_LOOP) { 32670Sstevel@tonic-gate if (kmv.kmv_corruption == 0) { 32680Sstevel@tonic-gate mdb_printf("%-*s %?p clean\n", 32690Sstevel@tonic-gate KMEM_CACHE_NAMELEN, 32700Sstevel@tonic-gate kmv.kmv_cache.cache_name, addr); 32710Sstevel@tonic-gate } else { 32720Sstevel@tonic-gate char *s = ""; /* optional s in "buffer[s]" */ 32730Sstevel@tonic-gate if (kmv.kmv_corruption > 1) 32740Sstevel@tonic-gate s = "s"; 32750Sstevel@tonic-gate 32760Sstevel@tonic-gate mdb_printf("%-*s %?p %d corrupt buffer%s\n", 32770Sstevel@tonic-gate KMEM_CACHE_NAMELEN, 32780Sstevel@tonic-gate kmv.kmv_cache.cache_name, addr, 32790Sstevel@tonic-gate kmv.kmv_corruption, s); 32800Sstevel@tonic-gate } 32810Sstevel@tonic-gate } else { 32820Sstevel@tonic-gate /* 32830Sstevel@tonic-gate * This is the more verbose mode, when the user has 32840Sstevel@tonic-gate * type addr::kmem_verify. If the cache was clean, 32850Sstevel@tonic-gate * nothing will have yet been printed. So say something. 32860Sstevel@tonic-gate */ 32870Sstevel@tonic-gate if (kmv.kmv_corruption == 0) 32880Sstevel@tonic-gate mdb_printf("clean\n"); 32890Sstevel@tonic-gate 32900Sstevel@tonic-gate mdb_dec_indent(2); 32910Sstevel@tonic-gate } 32920Sstevel@tonic-gate } else { 32930Sstevel@tonic-gate /* 32940Sstevel@tonic-gate * If the user didn't specify a cache to verify, we'll walk all 32950Sstevel@tonic-gate * kmem_cache's, specifying ourself as a callback for each... 32960Sstevel@tonic-gate * this is the equivalent of '::walk kmem_cache .::kmem_verify' 32970Sstevel@tonic-gate */ 32980Sstevel@tonic-gate mdb_printf("%<u>%-*s %-?s %-20s%</b>\n", KMEM_CACHE_NAMELEN, 32990Sstevel@tonic-gate "Cache Name", "Addr", "Cache Integrity"); 33000Sstevel@tonic-gate (void) (mdb_walk_dcmd("kmem_cache", "kmem_verify", 0, NULL)); 33010Sstevel@tonic-gate } 33020Sstevel@tonic-gate 33030Sstevel@tonic-gate return (DCMD_OK); 33040Sstevel@tonic-gate } 33050Sstevel@tonic-gate 33060Sstevel@tonic-gate typedef struct vmem_node { 33070Sstevel@tonic-gate struct vmem_node *vn_next; 33080Sstevel@tonic-gate struct vmem_node *vn_parent; 33090Sstevel@tonic-gate struct vmem_node *vn_sibling; 33100Sstevel@tonic-gate struct vmem_node *vn_children; 33110Sstevel@tonic-gate uintptr_t vn_addr; 33120Sstevel@tonic-gate int vn_marked; 33130Sstevel@tonic-gate vmem_t vn_vmem; 33140Sstevel@tonic-gate } vmem_node_t; 33150Sstevel@tonic-gate 33160Sstevel@tonic-gate typedef struct vmem_walk { 33170Sstevel@tonic-gate vmem_node_t *vw_root; 33180Sstevel@tonic-gate vmem_node_t *vw_current; 33190Sstevel@tonic-gate } vmem_walk_t; 33200Sstevel@tonic-gate 33210Sstevel@tonic-gate int 33220Sstevel@tonic-gate vmem_walk_init(mdb_walk_state_t *wsp) 33230Sstevel@tonic-gate { 33240Sstevel@tonic-gate uintptr_t vaddr, paddr; 33250Sstevel@tonic-gate vmem_node_t *head = NULL, *root = NULL, *current = NULL, *parent, *vp; 33260Sstevel@tonic-gate vmem_walk_t *vw; 33270Sstevel@tonic-gate 33280Sstevel@tonic-gate if (mdb_readvar(&vaddr, "vmem_list") == -1) { 33290Sstevel@tonic-gate mdb_warn("couldn't read 'vmem_list'"); 33300Sstevel@tonic-gate return (WALK_ERR); 33310Sstevel@tonic-gate } 33320Sstevel@tonic-gate 33330Sstevel@tonic-gate while (vaddr != NULL) { 33340Sstevel@tonic-gate vp = mdb_zalloc(sizeof (vmem_node_t), UM_SLEEP); 33350Sstevel@tonic-gate vp->vn_addr = vaddr; 33360Sstevel@tonic-gate vp->vn_next = head; 33370Sstevel@tonic-gate head = vp; 33380Sstevel@tonic-gate 33390Sstevel@tonic-gate if (vaddr == wsp->walk_addr) 33400Sstevel@tonic-gate current = vp; 33410Sstevel@tonic-gate 33420Sstevel@tonic-gate if (mdb_vread(&vp->vn_vmem, sizeof (vmem_t), vaddr) == -1) { 33430Sstevel@tonic-gate mdb_warn("couldn't read vmem_t at %p", vaddr); 33440Sstevel@tonic-gate goto err; 33450Sstevel@tonic-gate } 33460Sstevel@tonic-gate 33470Sstevel@tonic-gate vaddr = (uintptr_t)vp->vn_vmem.vm_next; 33480Sstevel@tonic-gate } 33490Sstevel@tonic-gate 33500Sstevel@tonic-gate for (vp = head; vp != NULL; vp = vp->vn_next) { 33510Sstevel@tonic-gate 33520Sstevel@tonic-gate if ((paddr = (uintptr_t)vp->vn_vmem.vm_source) == NULL) { 33530Sstevel@tonic-gate vp->vn_sibling = root; 33540Sstevel@tonic-gate root = vp; 33550Sstevel@tonic-gate continue; 33560Sstevel@tonic-gate } 33570Sstevel@tonic-gate 33580Sstevel@tonic-gate for (parent = head; parent != NULL; parent = parent->vn_next) { 33590Sstevel@tonic-gate if (parent->vn_addr != paddr) 33600Sstevel@tonic-gate continue; 33610Sstevel@tonic-gate vp->vn_sibling = parent->vn_children; 33620Sstevel@tonic-gate parent->vn_children = vp; 33630Sstevel@tonic-gate vp->vn_parent = parent; 33640Sstevel@tonic-gate break; 33650Sstevel@tonic-gate } 33660Sstevel@tonic-gate 33670Sstevel@tonic-gate if (parent == NULL) { 33680Sstevel@tonic-gate mdb_warn("couldn't find %p's parent (%p)\n", 33690Sstevel@tonic-gate vp->vn_addr, paddr); 33700Sstevel@tonic-gate goto err; 33710Sstevel@tonic-gate } 33720Sstevel@tonic-gate } 33730Sstevel@tonic-gate 33740Sstevel@tonic-gate vw = mdb_zalloc(sizeof (vmem_walk_t), UM_SLEEP); 33750Sstevel@tonic-gate vw->vw_root = root; 33760Sstevel@tonic-gate 33770Sstevel@tonic-gate if (current != NULL) 33780Sstevel@tonic-gate vw->vw_current = current; 33790Sstevel@tonic-gate else 33800Sstevel@tonic-gate vw->vw_current = root; 33810Sstevel@tonic-gate 33820Sstevel@tonic-gate wsp->walk_data = vw; 33830Sstevel@tonic-gate return (WALK_NEXT); 33840Sstevel@tonic-gate err: 33850Sstevel@tonic-gate for (vp = head; head != NULL; vp = head) { 33860Sstevel@tonic-gate head = vp->vn_next; 33870Sstevel@tonic-gate mdb_free(vp, sizeof (vmem_node_t)); 33880Sstevel@tonic-gate } 33890Sstevel@tonic-gate 33900Sstevel@tonic-gate return (WALK_ERR); 33910Sstevel@tonic-gate } 33920Sstevel@tonic-gate 33930Sstevel@tonic-gate int 33940Sstevel@tonic-gate vmem_walk_step(mdb_walk_state_t *wsp) 33950Sstevel@tonic-gate { 33960Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 33970Sstevel@tonic-gate vmem_node_t *vp; 33980Sstevel@tonic-gate int rval; 33990Sstevel@tonic-gate 34000Sstevel@tonic-gate if ((vp = vw->vw_current) == NULL) 34010Sstevel@tonic-gate return (WALK_DONE); 34020Sstevel@tonic-gate 34030Sstevel@tonic-gate rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata); 34040Sstevel@tonic-gate 34050Sstevel@tonic-gate if (vp->vn_children != NULL) { 34060Sstevel@tonic-gate vw->vw_current = vp->vn_children; 34070Sstevel@tonic-gate return (rval); 34080Sstevel@tonic-gate } 34090Sstevel@tonic-gate 34100Sstevel@tonic-gate do { 34110Sstevel@tonic-gate vw->vw_current = vp->vn_sibling; 34120Sstevel@tonic-gate vp = vp->vn_parent; 34130Sstevel@tonic-gate } while (vw->vw_current == NULL && vp != NULL); 34140Sstevel@tonic-gate 34150Sstevel@tonic-gate return (rval); 34160Sstevel@tonic-gate } 34170Sstevel@tonic-gate 34180Sstevel@tonic-gate /* 34190Sstevel@tonic-gate * The "vmem_postfix" walk walks the vmem arenas in post-fix order; all 34200Sstevel@tonic-gate * children are visited before their parent. We perform the postfix walk 34210Sstevel@tonic-gate * iteratively (rather than recursively) to allow mdb to regain control 34220Sstevel@tonic-gate * after each callback. 34230Sstevel@tonic-gate */ 34240Sstevel@tonic-gate int 34250Sstevel@tonic-gate vmem_postfix_walk_step(mdb_walk_state_t *wsp) 34260Sstevel@tonic-gate { 34270Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 34280Sstevel@tonic-gate vmem_node_t *vp = vw->vw_current; 34290Sstevel@tonic-gate int rval; 34300Sstevel@tonic-gate 34310Sstevel@tonic-gate /* 34320Sstevel@tonic-gate * If this node is marked, then we know that we have already visited 34330Sstevel@tonic-gate * all of its children. If the node has any siblings, they need to 34340Sstevel@tonic-gate * be visited next; otherwise, we need to visit the parent. Note 34350Sstevel@tonic-gate * that vp->vn_marked will only be zero on the first invocation of 34360Sstevel@tonic-gate * the step function. 34370Sstevel@tonic-gate */ 34380Sstevel@tonic-gate if (vp->vn_marked) { 34390Sstevel@tonic-gate if (vp->vn_sibling != NULL) 34400Sstevel@tonic-gate vp = vp->vn_sibling; 34410Sstevel@tonic-gate else if (vp->vn_parent != NULL) 34420Sstevel@tonic-gate vp = vp->vn_parent; 34430Sstevel@tonic-gate else { 34440Sstevel@tonic-gate /* 34450Sstevel@tonic-gate * We have neither a parent, nor a sibling, and we 34460Sstevel@tonic-gate * have already been visited; we're done. 34470Sstevel@tonic-gate */ 34480Sstevel@tonic-gate return (WALK_DONE); 34490Sstevel@tonic-gate } 34500Sstevel@tonic-gate } 34510Sstevel@tonic-gate 34520Sstevel@tonic-gate /* 34530Sstevel@tonic-gate * Before we visit this node, visit its children. 34540Sstevel@tonic-gate */ 34550Sstevel@tonic-gate while (vp->vn_children != NULL && !vp->vn_children->vn_marked) 34560Sstevel@tonic-gate vp = vp->vn_children; 34570Sstevel@tonic-gate 34580Sstevel@tonic-gate vp->vn_marked = 1; 34590Sstevel@tonic-gate vw->vw_current = vp; 34600Sstevel@tonic-gate rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata); 34610Sstevel@tonic-gate 34620Sstevel@tonic-gate return (rval); 34630Sstevel@tonic-gate } 34640Sstevel@tonic-gate 34650Sstevel@tonic-gate void 34660Sstevel@tonic-gate vmem_walk_fini(mdb_walk_state_t *wsp) 34670Sstevel@tonic-gate { 34680Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 34690Sstevel@tonic-gate vmem_node_t *root = vw->vw_root; 34700Sstevel@tonic-gate int done; 34710Sstevel@tonic-gate 34720Sstevel@tonic-gate if (root == NULL) 34730Sstevel@tonic-gate return; 34740Sstevel@tonic-gate 34750Sstevel@tonic-gate if ((vw->vw_root = root->vn_children) != NULL) 34760Sstevel@tonic-gate vmem_walk_fini(wsp); 34770Sstevel@tonic-gate 34780Sstevel@tonic-gate vw->vw_root = root->vn_sibling; 34790Sstevel@tonic-gate done = (root->vn_sibling == NULL && root->vn_parent == NULL); 34800Sstevel@tonic-gate mdb_free(root, sizeof (vmem_node_t)); 34810Sstevel@tonic-gate 34820Sstevel@tonic-gate if (done) { 34830Sstevel@tonic-gate mdb_free(vw, sizeof (vmem_walk_t)); 34840Sstevel@tonic-gate } else { 34850Sstevel@tonic-gate vmem_walk_fini(wsp); 34860Sstevel@tonic-gate } 34870Sstevel@tonic-gate } 34880Sstevel@tonic-gate 34890Sstevel@tonic-gate typedef struct vmem_seg_walk { 34900Sstevel@tonic-gate uint8_t vsw_type; 34910Sstevel@tonic-gate uintptr_t vsw_start; 34920Sstevel@tonic-gate uintptr_t vsw_current; 34930Sstevel@tonic-gate } vmem_seg_walk_t; 34940Sstevel@tonic-gate 34950Sstevel@tonic-gate /*ARGSUSED*/ 34960Sstevel@tonic-gate int 34970Sstevel@tonic-gate vmem_seg_walk_common_init(mdb_walk_state_t *wsp, uint8_t type, char *name) 34980Sstevel@tonic-gate { 34990Sstevel@tonic-gate vmem_seg_walk_t *vsw; 35000Sstevel@tonic-gate 35010Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 35020Sstevel@tonic-gate mdb_warn("vmem_%s does not support global walks\n", name); 35030Sstevel@tonic-gate return (WALK_ERR); 35040Sstevel@tonic-gate } 35050Sstevel@tonic-gate 35060Sstevel@tonic-gate wsp->walk_data = vsw = mdb_alloc(sizeof (vmem_seg_walk_t), UM_SLEEP); 35070Sstevel@tonic-gate 35080Sstevel@tonic-gate vsw->vsw_type = type; 35090Sstevel@tonic-gate vsw->vsw_start = wsp->walk_addr + offsetof(vmem_t, vm_seg0); 35100Sstevel@tonic-gate vsw->vsw_current = vsw->vsw_start; 35110Sstevel@tonic-gate 35120Sstevel@tonic-gate return (WALK_NEXT); 35130Sstevel@tonic-gate } 35140Sstevel@tonic-gate 35150Sstevel@tonic-gate /* 35160Sstevel@tonic-gate * vmem segments can't have type 0 (this should be added to vmem_impl.h). 35170Sstevel@tonic-gate */ 35180Sstevel@tonic-gate #define VMEM_NONE 0 35190Sstevel@tonic-gate 35200Sstevel@tonic-gate int 35210Sstevel@tonic-gate vmem_alloc_walk_init(mdb_walk_state_t *wsp) 35220Sstevel@tonic-gate { 35230Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_ALLOC, "alloc")); 35240Sstevel@tonic-gate } 35250Sstevel@tonic-gate 35260Sstevel@tonic-gate int 35270Sstevel@tonic-gate vmem_free_walk_init(mdb_walk_state_t *wsp) 35280Sstevel@tonic-gate { 35290Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_FREE, "free")); 35300Sstevel@tonic-gate } 35310Sstevel@tonic-gate 35320Sstevel@tonic-gate int 35330Sstevel@tonic-gate vmem_span_walk_init(mdb_walk_state_t *wsp) 35340Sstevel@tonic-gate { 35350Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_SPAN, "span")); 35360Sstevel@tonic-gate } 35370Sstevel@tonic-gate 35380Sstevel@tonic-gate int 35390Sstevel@tonic-gate vmem_seg_walk_init(mdb_walk_state_t *wsp) 35400Sstevel@tonic-gate { 35410Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_NONE, "seg")); 35420Sstevel@tonic-gate } 35430Sstevel@tonic-gate 35440Sstevel@tonic-gate int 35450Sstevel@tonic-gate vmem_seg_walk_step(mdb_walk_state_t *wsp) 35460Sstevel@tonic-gate { 35470Sstevel@tonic-gate vmem_seg_t seg; 35480Sstevel@tonic-gate vmem_seg_walk_t *vsw = wsp->walk_data; 35490Sstevel@tonic-gate uintptr_t addr = vsw->vsw_current; 35500Sstevel@tonic-gate static size_t seg_size = 0; 35510Sstevel@tonic-gate int rval; 35520Sstevel@tonic-gate 35530Sstevel@tonic-gate if (!seg_size) { 35540Sstevel@tonic-gate if (mdb_readvar(&seg_size, "vmem_seg_size") == -1) { 35550Sstevel@tonic-gate mdb_warn("failed to read 'vmem_seg_size'"); 35560Sstevel@tonic-gate seg_size = sizeof (vmem_seg_t); 35570Sstevel@tonic-gate } 35580Sstevel@tonic-gate } 35590Sstevel@tonic-gate 35600Sstevel@tonic-gate if (seg_size < sizeof (seg)) 35610Sstevel@tonic-gate bzero((caddr_t)&seg + seg_size, sizeof (seg) - seg_size); 35620Sstevel@tonic-gate 35630Sstevel@tonic-gate if (mdb_vread(&seg, seg_size, addr) == -1) { 35640Sstevel@tonic-gate mdb_warn("couldn't read vmem_seg at %p", addr); 35650Sstevel@tonic-gate return (WALK_ERR); 35660Sstevel@tonic-gate } 35670Sstevel@tonic-gate 35680Sstevel@tonic-gate vsw->vsw_current = (uintptr_t)seg.vs_anext; 35690Sstevel@tonic-gate if (vsw->vsw_type != VMEM_NONE && seg.vs_type != vsw->vsw_type) { 35700Sstevel@tonic-gate rval = WALK_NEXT; 35710Sstevel@tonic-gate } else { 35720Sstevel@tonic-gate rval = wsp->walk_callback(addr, &seg, wsp->walk_cbdata); 35730Sstevel@tonic-gate } 35740Sstevel@tonic-gate 35750Sstevel@tonic-gate if (vsw->vsw_current == vsw->vsw_start) 35760Sstevel@tonic-gate return (WALK_DONE); 35770Sstevel@tonic-gate 35780Sstevel@tonic-gate return (rval); 35790Sstevel@tonic-gate } 35800Sstevel@tonic-gate 35810Sstevel@tonic-gate void 35820Sstevel@tonic-gate vmem_seg_walk_fini(mdb_walk_state_t *wsp) 35830Sstevel@tonic-gate { 35840Sstevel@tonic-gate vmem_seg_walk_t *vsw = wsp->walk_data; 35850Sstevel@tonic-gate 35860Sstevel@tonic-gate mdb_free(vsw, sizeof (vmem_seg_walk_t)); 35870Sstevel@tonic-gate } 35880Sstevel@tonic-gate 35890Sstevel@tonic-gate #define VMEM_NAMEWIDTH 22 35900Sstevel@tonic-gate 35910Sstevel@tonic-gate int 35920Sstevel@tonic-gate vmem(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 35930Sstevel@tonic-gate { 35940Sstevel@tonic-gate vmem_t v, parent; 35950Sstevel@tonic-gate vmem_kstat_t *vkp = &v.vm_kstat; 35960Sstevel@tonic-gate uintptr_t paddr; 35970Sstevel@tonic-gate int ident = 0; 35980Sstevel@tonic-gate char c[VMEM_NAMEWIDTH]; 35990Sstevel@tonic-gate 36000Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) { 36010Sstevel@tonic-gate if (mdb_walk_dcmd("vmem", "vmem", argc, argv) == -1) { 36020Sstevel@tonic-gate mdb_warn("can't walk vmem"); 36030Sstevel@tonic-gate return (DCMD_ERR); 36040Sstevel@tonic-gate } 36050Sstevel@tonic-gate return (DCMD_OK); 36060Sstevel@tonic-gate } 36070Sstevel@tonic-gate 36080Sstevel@tonic-gate if (DCMD_HDRSPEC(flags)) 36090Sstevel@tonic-gate mdb_printf("%-?s %-*s %10s %12s %9s %5s\n", 36100Sstevel@tonic-gate "ADDR", VMEM_NAMEWIDTH, "NAME", "INUSE", 36110Sstevel@tonic-gate "TOTAL", "SUCCEED", "FAIL"); 36120Sstevel@tonic-gate 36130Sstevel@tonic-gate if (mdb_vread(&v, sizeof (v), addr) == -1) { 36140Sstevel@tonic-gate mdb_warn("couldn't read vmem at %p", addr); 36150Sstevel@tonic-gate return (DCMD_ERR); 36160Sstevel@tonic-gate } 36170Sstevel@tonic-gate 36180Sstevel@tonic-gate for (paddr = (uintptr_t)v.vm_source; paddr != NULL; ident += 2) { 36190Sstevel@tonic-gate if (mdb_vread(&parent, sizeof (parent), paddr) == -1) { 36200Sstevel@tonic-gate mdb_warn("couldn't trace %p's ancestry", addr); 36210Sstevel@tonic-gate ident = 0; 36220Sstevel@tonic-gate break; 36230Sstevel@tonic-gate } 36240Sstevel@tonic-gate paddr = (uintptr_t)parent.vm_source; 36250Sstevel@tonic-gate } 36260Sstevel@tonic-gate 36270Sstevel@tonic-gate (void) mdb_snprintf(c, VMEM_NAMEWIDTH, "%*s%s", ident, "", v.vm_name); 36280Sstevel@tonic-gate 36290Sstevel@tonic-gate mdb_printf("%0?p %-*s %10llu %12llu %9llu %5llu\n", 36300Sstevel@tonic-gate addr, VMEM_NAMEWIDTH, c, 36310Sstevel@tonic-gate vkp->vk_mem_inuse.value.ui64, vkp->vk_mem_total.value.ui64, 36320Sstevel@tonic-gate vkp->vk_alloc.value.ui64, vkp->vk_fail.value.ui64); 36330Sstevel@tonic-gate 36340Sstevel@tonic-gate return (DCMD_OK); 36350Sstevel@tonic-gate } 36360Sstevel@tonic-gate 36370Sstevel@tonic-gate void 36380Sstevel@tonic-gate vmem_seg_help(void) 36390Sstevel@tonic-gate { 36406712Stomee mdb_printf("%s", 36416712Stomee "Display the contents of vmem_seg_ts, with optional filtering.\n\n" 36420Sstevel@tonic-gate "\n" 36430Sstevel@tonic-gate "A vmem_seg_t represents a range of addresses (or arbitrary numbers),\n" 36440Sstevel@tonic-gate "representing a single chunk of data. Only ALLOC segments have debugging\n" 36450Sstevel@tonic-gate "information.\n"); 36460Sstevel@tonic-gate mdb_dec_indent(2); 36470Sstevel@tonic-gate mdb_printf("%<b>OPTIONS%</b>\n"); 36480Sstevel@tonic-gate mdb_inc_indent(2); 36490Sstevel@tonic-gate mdb_printf("%s", 36500Sstevel@tonic-gate " -v Display the full content of the vmem_seg, including its stack trace\n" 36510Sstevel@tonic-gate " -s report the size of the segment, instead of the end address\n" 36520Sstevel@tonic-gate " -c caller\n" 36530Sstevel@tonic-gate " filter out segments without the function/PC in their stack trace\n" 36540Sstevel@tonic-gate " -e earliest\n" 36550Sstevel@tonic-gate " filter out segments timestamped before earliest\n" 36560Sstevel@tonic-gate " -l latest\n" 36570Sstevel@tonic-gate " filter out segments timestamped after latest\n" 36580Sstevel@tonic-gate " -m minsize\n" 36590Sstevel@tonic-gate " filer out segments smaller than minsize\n" 36600Sstevel@tonic-gate " -M maxsize\n" 36610Sstevel@tonic-gate " filer out segments larger than maxsize\n" 36620Sstevel@tonic-gate " -t thread\n" 36630Sstevel@tonic-gate " filter out segments not involving thread\n" 36640Sstevel@tonic-gate " -T type\n" 36650Sstevel@tonic-gate " filter out segments not of type 'type'\n" 36660Sstevel@tonic-gate " type is one of: ALLOC/FREE/SPAN/ROTOR/WALKER\n"); 36670Sstevel@tonic-gate } 36680Sstevel@tonic-gate 36690Sstevel@tonic-gate /*ARGSUSED*/ 36700Sstevel@tonic-gate int 36710Sstevel@tonic-gate vmem_seg(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 36720Sstevel@tonic-gate { 36730Sstevel@tonic-gate vmem_seg_t vs; 36740Sstevel@tonic-gate pc_t *stk = vs.vs_stack; 36750Sstevel@tonic-gate uintptr_t sz; 36760Sstevel@tonic-gate uint8_t t; 36770Sstevel@tonic-gate const char *type = NULL; 36780Sstevel@tonic-gate GElf_Sym sym; 36790Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 36800Sstevel@tonic-gate int no_debug; 36810Sstevel@tonic-gate int i; 36820Sstevel@tonic-gate int depth; 36830Sstevel@tonic-gate uintptr_t laddr, haddr; 36840Sstevel@tonic-gate 36850Sstevel@tonic-gate uintptr_t caller = NULL, thread = NULL; 36860Sstevel@tonic-gate uintptr_t minsize = 0, maxsize = 0; 36870Sstevel@tonic-gate 36880Sstevel@tonic-gate hrtime_t earliest = 0, latest = 0; 36890Sstevel@tonic-gate 36900Sstevel@tonic-gate uint_t size = 0; 36910Sstevel@tonic-gate uint_t verbose = 0; 36920Sstevel@tonic-gate 36930Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 36940Sstevel@tonic-gate return (DCMD_USAGE); 36950Sstevel@tonic-gate 36960Sstevel@tonic-gate if (mdb_getopts(argc, argv, 36970Sstevel@tonic-gate 'c', MDB_OPT_UINTPTR, &caller, 36980Sstevel@tonic-gate 'e', MDB_OPT_UINT64, &earliest, 36990Sstevel@tonic-gate 'l', MDB_OPT_UINT64, &latest, 37000Sstevel@tonic-gate 's', MDB_OPT_SETBITS, TRUE, &size, 37010Sstevel@tonic-gate 'm', MDB_OPT_UINTPTR, &minsize, 37020Sstevel@tonic-gate 'M', MDB_OPT_UINTPTR, &maxsize, 37030Sstevel@tonic-gate 't', MDB_OPT_UINTPTR, &thread, 37040Sstevel@tonic-gate 'T', MDB_OPT_STR, &type, 37050Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &verbose, 37060Sstevel@tonic-gate NULL) != argc) 37070Sstevel@tonic-gate return (DCMD_USAGE); 37080Sstevel@tonic-gate 37090Sstevel@tonic-gate if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) { 37100Sstevel@tonic-gate if (verbose) { 37110Sstevel@tonic-gate mdb_printf("%16s %4s %16s %16s %16s\n" 37120Sstevel@tonic-gate "%<u>%16s %4s %16s %16s %16s%</u>\n", 37130Sstevel@tonic-gate "ADDR", "TYPE", "START", "END", "SIZE", 37140Sstevel@tonic-gate "", "", "THREAD", "TIMESTAMP", ""); 37150Sstevel@tonic-gate } else { 37160Sstevel@tonic-gate mdb_printf("%?s %4s %?s %?s %s\n", "ADDR", "TYPE", 37170Sstevel@tonic-gate "START", size? "SIZE" : "END", "WHO"); 37180Sstevel@tonic-gate } 37190Sstevel@tonic-gate } 37200Sstevel@tonic-gate 37210Sstevel@tonic-gate if (mdb_vread(&vs, sizeof (vs), addr) == -1) { 37220Sstevel@tonic-gate mdb_warn("couldn't read vmem_seg at %p", addr); 37230Sstevel@tonic-gate return (DCMD_ERR); 37240Sstevel@tonic-gate } 37250Sstevel@tonic-gate 37260Sstevel@tonic-gate if (type != NULL) { 37270Sstevel@tonic-gate if (strcmp(type, "ALLC") == 0 || strcmp(type, "ALLOC") == 0) 37280Sstevel@tonic-gate t = VMEM_ALLOC; 37290Sstevel@tonic-gate else if (strcmp(type, "FREE") == 0) 37300Sstevel@tonic-gate t = VMEM_FREE; 37310Sstevel@tonic-gate else if (strcmp(type, "SPAN") == 0) 37320Sstevel@tonic-gate t = VMEM_SPAN; 37330Sstevel@tonic-gate else if (strcmp(type, "ROTR") == 0 || 37340Sstevel@tonic-gate strcmp(type, "ROTOR") == 0) 37350Sstevel@tonic-gate t = VMEM_ROTOR; 37360Sstevel@tonic-gate else if (strcmp(type, "WLKR") == 0 || 37370Sstevel@tonic-gate strcmp(type, "WALKER") == 0) 37380Sstevel@tonic-gate t = VMEM_WALKER; 37390Sstevel@tonic-gate else { 37400Sstevel@tonic-gate mdb_warn("\"%s\" is not a recognized vmem_seg type\n", 37410Sstevel@tonic-gate type); 37420Sstevel@tonic-gate return (DCMD_ERR); 37430Sstevel@tonic-gate } 37440Sstevel@tonic-gate 37450Sstevel@tonic-gate if (vs.vs_type != t) 37460Sstevel@tonic-gate return (DCMD_OK); 37470Sstevel@tonic-gate } 37480Sstevel@tonic-gate 37490Sstevel@tonic-gate sz = vs.vs_end - vs.vs_start; 37500Sstevel@tonic-gate 37510Sstevel@tonic-gate if (minsize != 0 && sz < minsize) 37520Sstevel@tonic-gate return (DCMD_OK); 37530Sstevel@tonic-gate 37540Sstevel@tonic-gate if (maxsize != 0 && sz > maxsize) 37550Sstevel@tonic-gate return (DCMD_OK); 37560Sstevel@tonic-gate 37570Sstevel@tonic-gate t = vs.vs_type; 37580Sstevel@tonic-gate depth = vs.vs_depth; 37590Sstevel@tonic-gate 37600Sstevel@tonic-gate /* 37610Sstevel@tonic-gate * debug info, when present, is only accurate for VMEM_ALLOC segments 37620Sstevel@tonic-gate */ 37630Sstevel@tonic-gate no_debug = (t != VMEM_ALLOC) || 37640Sstevel@tonic-gate (depth == 0 || depth > VMEM_STACK_DEPTH); 37650Sstevel@tonic-gate 37660Sstevel@tonic-gate if (no_debug) { 37670Sstevel@tonic-gate if (caller != NULL || thread != NULL || earliest != 0 || 37680Sstevel@tonic-gate latest != 0) 37690Sstevel@tonic-gate return (DCMD_OK); /* not enough info */ 37700Sstevel@tonic-gate } else { 37710Sstevel@tonic-gate if (caller != NULL) { 37720Sstevel@tonic-gate laddr = caller; 37730Sstevel@tonic-gate haddr = caller + sizeof (caller); 37740Sstevel@tonic-gate 37750Sstevel@tonic-gate if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, 37760Sstevel@tonic-gate sizeof (c), &sym) != -1 && 37770Sstevel@tonic-gate caller == (uintptr_t)sym.st_value) { 37780Sstevel@tonic-gate /* 37790Sstevel@tonic-gate * We were provided an exact symbol value; any 37800Sstevel@tonic-gate * address in the function is valid. 37810Sstevel@tonic-gate */ 37820Sstevel@tonic-gate laddr = (uintptr_t)sym.st_value; 37830Sstevel@tonic-gate haddr = (uintptr_t)sym.st_value + sym.st_size; 37840Sstevel@tonic-gate } 37850Sstevel@tonic-gate 37860Sstevel@tonic-gate for (i = 0; i < depth; i++) 37870Sstevel@tonic-gate if (vs.vs_stack[i] >= laddr && 37880Sstevel@tonic-gate vs.vs_stack[i] < haddr) 37890Sstevel@tonic-gate break; 37900Sstevel@tonic-gate 37910Sstevel@tonic-gate if (i == depth) 37920Sstevel@tonic-gate return (DCMD_OK); 37930Sstevel@tonic-gate } 37940Sstevel@tonic-gate 37950Sstevel@tonic-gate if (thread != NULL && (uintptr_t)vs.vs_thread != thread) 37960Sstevel@tonic-gate return (DCMD_OK); 37970Sstevel@tonic-gate 37980Sstevel@tonic-gate if (earliest != 0 && vs.vs_timestamp < earliest) 37990Sstevel@tonic-gate return (DCMD_OK); 38000Sstevel@tonic-gate 38010Sstevel@tonic-gate if (latest != 0 && vs.vs_timestamp > latest) 38020Sstevel@tonic-gate return (DCMD_OK); 38030Sstevel@tonic-gate } 38040Sstevel@tonic-gate 38050Sstevel@tonic-gate type = (t == VMEM_ALLOC ? "ALLC" : 38060Sstevel@tonic-gate t == VMEM_FREE ? "FREE" : 38070Sstevel@tonic-gate t == VMEM_SPAN ? "SPAN" : 38080Sstevel@tonic-gate t == VMEM_ROTOR ? "ROTR" : 38090Sstevel@tonic-gate t == VMEM_WALKER ? "WLKR" : 38100Sstevel@tonic-gate "????"); 38110Sstevel@tonic-gate 38120Sstevel@tonic-gate if (flags & DCMD_PIPE_OUT) { 38130Sstevel@tonic-gate mdb_printf("%#lr\n", addr); 38140Sstevel@tonic-gate return (DCMD_OK); 38150Sstevel@tonic-gate } 38160Sstevel@tonic-gate 38170Sstevel@tonic-gate if (verbose) { 38180Sstevel@tonic-gate mdb_printf("%<b>%16p%</b> %4s %16p %16p %16d\n", 38190Sstevel@tonic-gate addr, type, vs.vs_start, vs.vs_end, sz); 38200Sstevel@tonic-gate 38210Sstevel@tonic-gate if (no_debug) 38220Sstevel@tonic-gate return (DCMD_OK); 38230Sstevel@tonic-gate 38240Sstevel@tonic-gate mdb_printf("%16s %4s %16p %16llx\n", 38250Sstevel@tonic-gate "", "", vs.vs_thread, vs.vs_timestamp); 38260Sstevel@tonic-gate 38270Sstevel@tonic-gate mdb_inc_indent(17); 38280Sstevel@tonic-gate for (i = 0; i < depth; i++) { 38290Sstevel@tonic-gate mdb_printf("%a\n", stk[i]); 38300Sstevel@tonic-gate } 38310Sstevel@tonic-gate mdb_dec_indent(17); 38320Sstevel@tonic-gate mdb_printf("\n"); 38330Sstevel@tonic-gate } else { 38340Sstevel@tonic-gate mdb_printf("%0?p %4s %0?p %0?p", addr, type, 38350Sstevel@tonic-gate vs.vs_start, size? sz : vs.vs_end); 38360Sstevel@tonic-gate 38370Sstevel@tonic-gate if (no_debug) { 38380Sstevel@tonic-gate mdb_printf("\n"); 38390Sstevel@tonic-gate return (DCMD_OK); 38400Sstevel@tonic-gate } 38410Sstevel@tonic-gate 38420Sstevel@tonic-gate for (i = 0; i < depth; i++) { 38430Sstevel@tonic-gate if (mdb_lookup_by_addr(stk[i], MDB_SYM_FUZZY, 38440Sstevel@tonic-gate c, sizeof (c), &sym) == -1) 38450Sstevel@tonic-gate continue; 38460Sstevel@tonic-gate if (strncmp(c, "vmem_", 5) == 0) 38470Sstevel@tonic-gate continue; 38480Sstevel@tonic-gate break; 38490Sstevel@tonic-gate } 38500Sstevel@tonic-gate mdb_printf(" %a\n", stk[i]); 38510Sstevel@tonic-gate } 38520Sstevel@tonic-gate return (DCMD_OK); 38530Sstevel@tonic-gate } 38540Sstevel@tonic-gate 38550Sstevel@tonic-gate typedef struct kmalog_data { 38560Sstevel@tonic-gate uintptr_t kma_addr; 38570Sstevel@tonic-gate hrtime_t kma_newest; 38580Sstevel@tonic-gate } kmalog_data_t; 38590Sstevel@tonic-gate 38600Sstevel@tonic-gate /*ARGSUSED*/ 38610Sstevel@tonic-gate static int 38620Sstevel@tonic-gate showbc(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmalog_data_t *kma) 38630Sstevel@tonic-gate { 38640Sstevel@tonic-gate char name[KMEM_CACHE_NAMELEN + 1]; 38650Sstevel@tonic-gate hrtime_t delta; 38660Sstevel@tonic-gate int i, depth; 38670Sstevel@tonic-gate size_t bufsize; 38680Sstevel@tonic-gate 38690Sstevel@tonic-gate if (bcp->bc_timestamp == 0) 38700Sstevel@tonic-gate return (WALK_DONE); 38710Sstevel@tonic-gate 38720Sstevel@tonic-gate if (kma->kma_newest == 0) 38730Sstevel@tonic-gate kma->kma_newest = bcp->bc_timestamp; 38740Sstevel@tonic-gate 38750Sstevel@tonic-gate if (kma->kma_addr) { 38760Sstevel@tonic-gate if (mdb_vread(&bufsize, sizeof (bufsize), 38770Sstevel@tonic-gate (uintptr_t)&bcp->bc_cache->cache_bufsize) == -1) { 38780Sstevel@tonic-gate mdb_warn( 38790Sstevel@tonic-gate "failed to read cache_bufsize for cache at %p", 38800Sstevel@tonic-gate bcp->bc_cache); 38810Sstevel@tonic-gate return (WALK_ERR); 38820Sstevel@tonic-gate } 38830Sstevel@tonic-gate 38840Sstevel@tonic-gate if (kma->kma_addr < (uintptr_t)bcp->bc_addr || 38850Sstevel@tonic-gate kma->kma_addr >= (uintptr_t)bcp->bc_addr + bufsize) 38860Sstevel@tonic-gate return (WALK_NEXT); 38870Sstevel@tonic-gate } 38880Sstevel@tonic-gate 38890Sstevel@tonic-gate delta = kma->kma_newest - bcp->bc_timestamp; 38900Sstevel@tonic-gate depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH); 38910Sstevel@tonic-gate 38920Sstevel@tonic-gate if (mdb_readstr(name, sizeof (name), (uintptr_t) 38930Sstevel@tonic-gate &bcp->bc_cache->cache_name) <= 0) 38940Sstevel@tonic-gate (void) mdb_snprintf(name, sizeof (name), "%a", bcp->bc_cache); 38950Sstevel@tonic-gate 38960Sstevel@tonic-gate mdb_printf("\nT-%lld.%09lld addr=%p %s\n", 38970Sstevel@tonic-gate delta / NANOSEC, delta % NANOSEC, bcp->bc_addr, name); 38980Sstevel@tonic-gate 38990Sstevel@tonic-gate for (i = 0; i < depth; i++) 39000Sstevel@tonic-gate mdb_printf("\t %a\n", bcp->bc_stack[i]); 39010Sstevel@tonic-gate 39020Sstevel@tonic-gate return (WALK_NEXT); 39030Sstevel@tonic-gate } 39040Sstevel@tonic-gate 39050Sstevel@tonic-gate int 39060Sstevel@tonic-gate kmalog(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 39070Sstevel@tonic-gate { 39080Sstevel@tonic-gate const char *logname = "kmem_transaction_log"; 39090Sstevel@tonic-gate kmalog_data_t kma; 39100Sstevel@tonic-gate 39110Sstevel@tonic-gate if (argc > 1) 39120Sstevel@tonic-gate return (DCMD_USAGE); 39130Sstevel@tonic-gate 39140Sstevel@tonic-gate kma.kma_newest = 0; 39150Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) 39160Sstevel@tonic-gate kma.kma_addr = addr; 39170Sstevel@tonic-gate else 39180Sstevel@tonic-gate kma.kma_addr = NULL; 39190Sstevel@tonic-gate 39200Sstevel@tonic-gate if (argc > 0) { 39210Sstevel@tonic-gate if (argv->a_type != MDB_TYPE_STRING) 39220Sstevel@tonic-gate return (DCMD_USAGE); 39230Sstevel@tonic-gate if (strcmp(argv->a_un.a_str, "fail") == 0) 39240Sstevel@tonic-gate logname = "kmem_failure_log"; 39250Sstevel@tonic-gate else if (strcmp(argv->a_un.a_str, "slab") == 0) 39260Sstevel@tonic-gate logname = "kmem_slab_log"; 39270Sstevel@tonic-gate else 39280Sstevel@tonic-gate return (DCMD_USAGE); 39290Sstevel@tonic-gate } 39300Sstevel@tonic-gate 39310Sstevel@tonic-gate if (mdb_readvar(&addr, logname) == -1) { 39320Sstevel@tonic-gate mdb_warn("failed to read %s log header pointer"); 39330Sstevel@tonic-gate return (DCMD_ERR); 39340Sstevel@tonic-gate } 39350Sstevel@tonic-gate 39360Sstevel@tonic-gate if (mdb_pwalk("kmem_log", (mdb_walk_cb_t)showbc, &kma, addr) == -1) { 39370Sstevel@tonic-gate mdb_warn("failed to walk kmem log"); 39380Sstevel@tonic-gate return (DCMD_ERR); 39390Sstevel@tonic-gate } 39400Sstevel@tonic-gate 39410Sstevel@tonic-gate return (DCMD_OK); 39420Sstevel@tonic-gate } 39430Sstevel@tonic-gate 39440Sstevel@tonic-gate /* 39450Sstevel@tonic-gate * As the final lure for die-hard crash(1M) users, we provide ::kmausers here. 39460Sstevel@tonic-gate * The first piece is a structure which we use to accumulate kmem_cache_t 39470Sstevel@tonic-gate * addresses of interest. The kmc_add is used as a callback for the kmem_cache 39480Sstevel@tonic-gate * walker; we either add all caches, or ones named explicitly as arguments. 39490Sstevel@tonic-gate */ 39500Sstevel@tonic-gate 39510Sstevel@tonic-gate typedef struct kmclist { 39520Sstevel@tonic-gate const char *kmc_name; /* Name to match (or NULL) */ 39530Sstevel@tonic-gate uintptr_t *kmc_caches; /* List of kmem_cache_t addrs */ 39540Sstevel@tonic-gate int kmc_nelems; /* Num entries in kmc_caches */ 39550Sstevel@tonic-gate int kmc_size; /* Size of kmc_caches array */ 39560Sstevel@tonic-gate } kmclist_t; 39570Sstevel@tonic-gate 39580Sstevel@tonic-gate static int 39590Sstevel@tonic-gate kmc_add(uintptr_t addr, const kmem_cache_t *cp, kmclist_t *kmc) 39600Sstevel@tonic-gate { 39610Sstevel@tonic-gate void *p; 39620Sstevel@tonic-gate int s; 39630Sstevel@tonic-gate 39640Sstevel@tonic-gate if (kmc->kmc_name == NULL || 39650Sstevel@tonic-gate strcmp(cp->cache_name, kmc->kmc_name) == 0) { 39660Sstevel@tonic-gate /* 39670Sstevel@tonic-gate * If we have a match, grow our array (if necessary), and then 39680Sstevel@tonic-gate * add the virtual address of the matching cache to our list. 39690Sstevel@tonic-gate */ 39700Sstevel@tonic-gate if (kmc->kmc_nelems >= kmc->kmc_size) { 39710Sstevel@tonic-gate s = kmc->kmc_size ? kmc->kmc_size * 2 : 256; 39720Sstevel@tonic-gate p = mdb_alloc(sizeof (uintptr_t) * s, UM_SLEEP | UM_GC); 39730Sstevel@tonic-gate 39740Sstevel@tonic-gate bcopy(kmc->kmc_caches, p, 39750Sstevel@tonic-gate sizeof (uintptr_t) * kmc->kmc_size); 39760Sstevel@tonic-gate 39770Sstevel@tonic-gate kmc->kmc_caches = p; 39780Sstevel@tonic-gate kmc->kmc_size = s; 39790Sstevel@tonic-gate } 39800Sstevel@tonic-gate 39810Sstevel@tonic-gate kmc->kmc_caches[kmc->kmc_nelems++] = addr; 39820Sstevel@tonic-gate return (kmc->kmc_name ? WALK_DONE : WALK_NEXT); 39830Sstevel@tonic-gate } 39840Sstevel@tonic-gate 39850Sstevel@tonic-gate return (WALK_NEXT); 39860Sstevel@tonic-gate } 39870Sstevel@tonic-gate 39880Sstevel@tonic-gate /* 39890Sstevel@tonic-gate * The second piece of ::kmausers is a hash table of allocations. Each 39900Sstevel@tonic-gate * allocation owner is identified by its stack trace and data_size. We then 39910Sstevel@tonic-gate * track the total bytes of all such allocations, and the number of allocations 39920Sstevel@tonic-gate * to report at the end. Once we have a list of caches, we walk through the 39930Sstevel@tonic-gate * allocated bufctls of each, and update our hash table accordingly. 39940Sstevel@tonic-gate */ 39950Sstevel@tonic-gate 39960Sstevel@tonic-gate typedef struct kmowner { 39970Sstevel@tonic-gate struct kmowner *kmo_head; /* First hash elt in bucket */ 39980Sstevel@tonic-gate struct kmowner *kmo_next; /* Next hash elt in chain */ 39990Sstevel@tonic-gate size_t kmo_signature; /* Hash table signature */ 40000Sstevel@tonic-gate uint_t kmo_num; /* Number of allocations */ 40010Sstevel@tonic-gate size_t kmo_data_size; /* Size of each allocation */ 40020Sstevel@tonic-gate size_t kmo_total_size; /* Total bytes of allocation */ 40030Sstevel@tonic-gate int kmo_depth; /* Depth of stack trace */ 40040Sstevel@tonic-gate uintptr_t kmo_stack[KMEM_STACK_DEPTH]; /* Stack trace */ 40050Sstevel@tonic-gate } kmowner_t; 40060Sstevel@tonic-gate 40070Sstevel@tonic-gate typedef struct kmusers { 40080Sstevel@tonic-gate uintptr_t kmu_addr; /* address of interest */ 40090Sstevel@tonic-gate const kmem_cache_t *kmu_cache; /* Current kmem cache */ 40100Sstevel@tonic-gate kmowner_t *kmu_hash; /* Hash table of owners */ 40110Sstevel@tonic-gate int kmu_nelems; /* Number of entries in use */ 40120Sstevel@tonic-gate int kmu_size; /* Total number of entries */ 40130Sstevel@tonic-gate } kmusers_t; 40140Sstevel@tonic-gate 40150Sstevel@tonic-gate static void 40160Sstevel@tonic-gate kmu_add(kmusers_t *kmu, const kmem_bufctl_audit_t *bcp, 40170Sstevel@tonic-gate size_t size, size_t data_size) 40180Sstevel@tonic-gate { 40190Sstevel@tonic-gate int i, depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH); 40200Sstevel@tonic-gate size_t bucket, signature = data_size; 40210Sstevel@tonic-gate kmowner_t *kmo, *kmoend; 40220Sstevel@tonic-gate 40230Sstevel@tonic-gate /* 40240Sstevel@tonic-gate * If the hash table is full, double its size and rehash everything. 40250Sstevel@tonic-gate */ 40260Sstevel@tonic-gate if (kmu->kmu_nelems >= kmu->kmu_size) { 40270Sstevel@tonic-gate int s = kmu->kmu_size ? kmu->kmu_size * 2 : 1024; 40280Sstevel@tonic-gate 40290Sstevel@tonic-gate kmo = mdb_alloc(sizeof (kmowner_t) * s, UM_SLEEP | UM_GC); 40300Sstevel@tonic-gate bcopy(kmu->kmu_hash, kmo, sizeof (kmowner_t) * kmu->kmu_size); 40310Sstevel@tonic-gate kmu->kmu_hash = kmo; 40320Sstevel@tonic-gate kmu->kmu_size = s; 40330Sstevel@tonic-gate 40340Sstevel@tonic-gate kmoend = kmu->kmu_hash + kmu->kmu_size; 40350Sstevel@tonic-gate for (kmo = kmu->kmu_hash; kmo < kmoend; kmo++) 40360Sstevel@tonic-gate kmo->kmo_head = NULL; 40370Sstevel@tonic-gate 40380Sstevel@tonic-gate kmoend = kmu->kmu_hash + kmu->kmu_nelems; 40390Sstevel@tonic-gate for (kmo = kmu->kmu_hash; kmo < kmoend; kmo++) { 40400Sstevel@tonic-gate bucket = kmo->kmo_signature & (kmu->kmu_size - 1); 40410Sstevel@tonic-gate kmo->kmo_next = kmu->kmu_hash[bucket].kmo_head; 40420Sstevel@tonic-gate kmu->kmu_hash[bucket].kmo_head = kmo; 40430Sstevel@tonic-gate } 40440Sstevel@tonic-gate } 40450Sstevel@tonic-gate 40460Sstevel@tonic-gate /* 40470Sstevel@tonic-gate * Finish computing the hash signature from the stack trace, and then 40480Sstevel@tonic-gate * see if the owner is in the hash table. If so, update our stats. 40490Sstevel@tonic-gate */ 40500Sstevel@tonic-gate for (i = 0; i < depth; i++) 40510Sstevel@tonic-gate signature += bcp->bc_stack[i]; 40520Sstevel@tonic-gate 40530Sstevel@tonic-gate bucket = signature & (kmu->kmu_size - 1); 40540Sstevel@tonic-gate 40550Sstevel@tonic-gate for (kmo = kmu->kmu_hash[bucket].kmo_head; kmo; kmo = kmo->kmo_next) { 40560Sstevel@tonic-gate if (kmo->kmo_signature == signature) { 40570Sstevel@tonic-gate size_t difference = 0; 40580Sstevel@tonic-gate 40590Sstevel@tonic-gate difference |= kmo->kmo_data_size - data_size; 40600Sstevel@tonic-gate difference |= kmo->kmo_depth - depth; 40610Sstevel@tonic-gate 40620Sstevel@tonic-gate for (i = 0; i < depth; i++) { 40630Sstevel@tonic-gate difference |= kmo->kmo_stack[i] - 40640Sstevel@tonic-gate bcp->bc_stack[i]; 40650Sstevel@tonic-gate } 40660Sstevel@tonic-gate 40670Sstevel@tonic-gate if (difference == 0) { 40680Sstevel@tonic-gate kmo->kmo_total_size += size; 40690Sstevel@tonic-gate kmo->kmo_num++; 40700Sstevel@tonic-gate return; 40710Sstevel@tonic-gate } 40720Sstevel@tonic-gate } 40730Sstevel@tonic-gate } 40740Sstevel@tonic-gate 40750Sstevel@tonic-gate /* 40760Sstevel@tonic-gate * If the owner is not yet hashed, grab the next element and fill it 40770Sstevel@tonic-gate * in based on the allocation information. 40780Sstevel@tonic-gate */ 40790Sstevel@tonic-gate kmo = &kmu->kmu_hash[kmu->kmu_nelems++]; 40800Sstevel@tonic-gate kmo->kmo_next = kmu->kmu_hash[bucket].kmo_head; 40810Sstevel@tonic-gate kmu->kmu_hash[bucket].kmo_head = kmo; 40820Sstevel@tonic-gate 40830Sstevel@tonic-gate kmo->kmo_signature = signature; 40840Sstevel@tonic-gate kmo->kmo_num = 1; 40850Sstevel@tonic-gate kmo->kmo_data_size = data_size; 40860Sstevel@tonic-gate kmo->kmo_total_size = size; 40870Sstevel@tonic-gate kmo->kmo_depth = depth; 40880Sstevel@tonic-gate 40890Sstevel@tonic-gate for (i = 0; i < depth; i++) 40900Sstevel@tonic-gate kmo->kmo_stack[i] = bcp->bc_stack[i]; 40910Sstevel@tonic-gate } 40920Sstevel@tonic-gate 40930Sstevel@tonic-gate /* 40940Sstevel@tonic-gate * When ::kmausers is invoked without the -f flag, we simply update our hash 40950Sstevel@tonic-gate * table with the information from each allocated bufctl. 40960Sstevel@tonic-gate */ 40970Sstevel@tonic-gate /*ARGSUSED*/ 40980Sstevel@tonic-gate static int 40990Sstevel@tonic-gate kmause1(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmusers_t *kmu) 41000Sstevel@tonic-gate { 41010Sstevel@tonic-gate const kmem_cache_t *cp = kmu->kmu_cache; 41020Sstevel@tonic-gate 41030Sstevel@tonic-gate kmu_add(kmu, bcp, cp->cache_bufsize, cp->cache_bufsize); 41040Sstevel@tonic-gate return (WALK_NEXT); 41050Sstevel@tonic-gate } 41060Sstevel@tonic-gate 41070Sstevel@tonic-gate /* 41080Sstevel@tonic-gate * When ::kmausers is invoked with the -f flag, we print out the information 41090Sstevel@tonic-gate * for each bufctl as well as updating the hash table. 41100Sstevel@tonic-gate */ 41110Sstevel@tonic-gate static int 41120Sstevel@tonic-gate kmause2(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmusers_t *kmu) 41130Sstevel@tonic-gate { 41140Sstevel@tonic-gate int i, depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH); 41150Sstevel@tonic-gate const kmem_cache_t *cp = kmu->kmu_cache; 41160Sstevel@tonic-gate kmem_bufctl_t bufctl; 41170Sstevel@tonic-gate 41180Sstevel@tonic-gate if (kmu->kmu_addr) { 41190Sstevel@tonic-gate if (mdb_vread(&bufctl, sizeof (bufctl), addr) == -1) 41200Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr); 41210Sstevel@tonic-gate else if (kmu->kmu_addr < (uintptr_t)bufctl.bc_addr || 41220Sstevel@tonic-gate kmu->kmu_addr >= (uintptr_t)bufctl.bc_addr + 41230Sstevel@tonic-gate cp->cache_bufsize) 41240Sstevel@tonic-gate return (WALK_NEXT); 41250Sstevel@tonic-gate } 41260Sstevel@tonic-gate 41270Sstevel@tonic-gate mdb_printf("size %d, addr %p, thread %p, cache %s\n", 41280Sstevel@tonic-gate cp->cache_bufsize, addr, bcp->bc_thread, cp->cache_name); 41290Sstevel@tonic-gate 41300Sstevel@tonic-gate for (i = 0; i < depth; i++) 41310Sstevel@tonic-gate mdb_printf("\t %a\n", bcp->bc_stack[i]); 41320Sstevel@tonic-gate 41330Sstevel@tonic-gate kmu_add(kmu, bcp, cp->cache_bufsize, cp->cache_bufsize); 41340Sstevel@tonic-gate return (WALK_NEXT); 41350Sstevel@tonic-gate } 41360Sstevel@tonic-gate 41370Sstevel@tonic-gate /* 41380Sstevel@tonic-gate * We sort our results by allocation size before printing them. 41390Sstevel@tonic-gate */ 41400Sstevel@tonic-gate static int 41410Sstevel@tonic-gate kmownercmp(const void *lp, const void *rp) 41420Sstevel@tonic-gate { 41430Sstevel@tonic-gate const kmowner_t *lhs = lp; 41440Sstevel@tonic-gate const kmowner_t *rhs = rp; 41450Sstevel@tonic-gate 41460Sstevel@tonic-gate return (rhs->kmo_total_size - lhs->kmo_total_size); 41470Sstevel@tonic-gate } 41480Sstevel@tonic-gate 41490Sstevel@tonic-gate /* 41500Sstevel@tonic-gate * The main engine of ::kmausers is relatively straightforward: First we 41510Sstevel@tonic-gate * accumulate our list of kmem_cache_t addresses into the kmclist_t. Next we 41520Sstevel@tonic-gate * iterate over the allocated bufctls of each cache in the list. Finally, 41530Sstevel@tonic-gate * we sort and print our results. 41540Sstevel@tonic-gate */ 41550Sstevel@tonic-gate /*ARGSUSED*/ 41560Sstevel@tonic-gate int 41570Sstevel@tonic-gate kmausers(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 41580Sstevel@tonic-gate { 41590Sstevel@tonic-gate int mem_threshold = 8192; /* Minimum # bytes for printing */ 41600Sstevel@tonic-gate int cnt_threshold = 100; /* Minimum # blocks for printing */ 41610Sstevel@tonic-gate int audited_caches = 0; /* Number of KMF_AUDIT caches found */ 41620Sstevel@tonic-gate int do_all_caches = 1; /* Do all caches (no arguments) */ 41630Sstevel@tonic-gate int opt_e = FALSE; /* Include "small" users */ 41640Sstevel@tonic-gate int opt_f = FALSE; /* Print stack traces */ 41650Sstevel@tonic-gate 41660Sstevel@tonic-gate mdb_walk_cb_t callback = (mdb_walk_cb_t)kmause1; 41670Sstevel@tonic-gate kmowner_t *kmo, *kmoend; 41680Sstevel@tonic-gate int i, oelems; 41690Sstevel@tonic-gate 41700Sstevel@tonic-gate kmclist_t kmc; 41710Sstevel@tonic-gate kmusers_t kmu; 41720Sstevel@tonic-gate 41730Sstevel@tonic-gate bzero(&kmc, sizeof (kmc)); 41740Sstevel@tonic-gate bzero(&kmu, sizeof (kmu)); 41750Sstevel@tonic-gate 41760Sstevel@tonic-gate while ((i = mdb_getopts(argc, argv, 41770Sstevel@tonic-gate 'e', MDB_OPT_SETBITS, TRUE, &opt_e, 41780Sstevel@tonic-gate 'f', MDB_OPT_SETBITS, TRUE, &opt_f, NULL)) != argc) { 41790Sstevel@tonic-gate 41800Sstevel@tonic-gate argv += i; /* skip past options we just processed */ 41810Sstevel@tonic-gate argc -= i; /* adjust argc */ 41820Sstevel@tonic-gate 41830Sstevel@tonic-gate if (argv->a_type != MDB_TYPE_STRING || *argv->a_un.a_str == '-') 41840Sstevel@tonic-gate return (DCMD_USAGE); 41850Sstevel@tonic-gate 41860Sstevel@tonic-gate oelems = kmc.kmc_nelems; 41870Sstevel@tonic-gate kmc.kmc_name = argv->a_un.a_str; 41880Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmc_add, &kmc); 41890Sstevel@tonic-gate 41900Sstevel@tonic-gate if (kmc.kmc_nelems == oelems) { 41910Sstevel@tonic-gate mdb_warn("unknown kmem cache: %s\n", kmc.kmc_name); 41920Sstevel@tonic-gate return (DCMD_ERR); 41930Sstevel@tonic-gate } 41940Sstevel@tonic-gate 41950Sstevel@tonic-gate do_all_caches = 0; 41960Sstevel@tonic-gate argv++; 41970Sstevel@tonic-gate argc--; 41980Sstevel@tonic-gate } 41990Sstevel@tonic-gate 42000Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) { 42010Sstevel@tonic-gate opt_f = TRUE; 42020Sstevel@tonic-gate kmu.kmu_addr = addr; 42030Sstevel@tonic-gate } else { 42040Sstevel@tonic-gate kmu.kmu_addr = NULL; 42050Sstevel@tonic-gate } 42060Sstevel@tonic-gate 42070Sstevel@tonic-gate if (opt_e) 42080Sstevel@tonic-gate mem_threshold = cnt_threshold = 0; 42090Sstevel@tonic-gate 42100Sstevel@tonic-gate if (opt_f) 42110Sstevel@tonic-gate callback = (mdb_walk_cb_t)kmause2; 42120Sstevel@tonic-gate 42130Sstevel@tonic-gate if (do_all_caches) { 42140Sstevel@tonic-gate kmc.kmc_name = NULL; /* match all cache names */ 42150Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmc_add, &kmc); 42160Sstevel@tonic-gate } 42170Sstevel@tonic-gate 42180Sstevel@tonic-gate for (i = 0; i < kmc.kmc_nelems; i++) { 42190Sstevel@tonic-gate uintptr_t cp = kmc.kmc_caches[i]; 42200Sstevel@tonic-gate kmem_cache_t c; 42210Sstevel@tonic-gate 42220Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), cp) == -1) { 42230Sstevel@tonic-gate mdb_warn("failed to read cache at %p", cp); 42240Sstevel@tonic-gate continue; 42250Sstevel@tonic-gate } 42260Sstevel@tonic-gate 42270Sstevel@tonic-gate if (!(c.cache_flags & KMF_AUDIT)) { 42280Sstevel@tonic-gate if (!do_all_caches) { 42290Sstevel@tonic-gate mdb_warn("KMF_AUDIT is not enabled for %s\n", 42300Sstevel@tonic-gate c.cache_name); 42310Sstevel@tonic-gate } 42320Sstevel@tonic-gate continue; 42330Sstevel@tonic-gate } 42340Sstevel@tonic-gate 42350Sstevel@tonic-gate kmu.kmu_cache = &c; 42360Sstevel@tonic-gate (void) mdb_pwalk("bufctl", callback, &kmu, cp); 42370Sstevel@tonic-gate audited_caches++; 42380Sstevel@tonic-gate } 42390Sstevel@tonic-gate 42400Sstevel@tonic-gate if (audited_caches == 0 && do_all_caches) { 42410Sstevel@tonic-gate mdb_warn("KMF_AUDIT is not enabled for any caches\n"); 42420Sstevel@tonic-gate return (DCMD_ERR); 42430Sstevel@tonic-gate } 42440Sstevel@tonic-gate 42450Sstevel@tonic-gate qsort(kmu.kmu_hash, kmu.kmu_nelems, sizeof (kmowner_t), kmownercmp); 42460Sstevel@tonic-gate kmoend = kmu.kmu_hash + kmu.kmu_nelems; 42470Sstevel@tonic-gate 42480Sstevel@tonic-gate for (kmo = kmu.kmu_hash; kmo < kmoend; kmo++) { 42490Sstevel@tonic-gate if (kmo->kmo_total_size < mem_threshold && 42500Sstevel@tonic-gate kmo->kmo_num < cnt_threshold) 42510Sstevel@tonic-gate continue; 42520Sstevel@tonic-gate mdb_printf("%lu bytes for %u allocations with data size %lu:\n", 42530Sstevel@tonic-gate kmo->kmo_total_size, kmo->kmo_num, kmo->kmo_data_size); 42540Sstevel@tonic-gate for (i = 0; i < kmo->kmo_depth; i++) 42550Sstevel@tonic-gate mdb_printf("\t %a\n", kmo->kmo_stack[i]); 42560Sstevel@tonic-gate } 42570Sstevel@tonic-gate 42580Sstevel@tonic-gate return (DCMD_OK); 42590Sstevel@tonic-gate } 42600Sstevel@tonic-gate 42610Sstevel@tonic-gate void 42620Sstevel@tonic-gate kmausers_help(void) 42630Sstevel@tonic-gate { 42640Sstevel@tonic-gate mdb_printf( 42650Sstevel@tonic-gate "Displays the largest users of the kmem allocator, sorted by \n" 42660Sstevel@tonic-gate "trace. If one or more caches is specified, only those caches\n" 42670Sstevel@tonic-gate "will be searched. By default, all caches are searched. If an\n" 42680Sstevel@tonic-gate "address is specified, then only those allocations which include\n" 42690Sstevel@tonic-gate "the given address are displayed. Specifying an address implies\n" 42700Sstevel@tonic-gate "-f.\n" 42710Sstevel@tonic-gate "\n" 42720Sstevel@tonic-gate "\t-e\tInclude all users, not just the largest\n" 42730Sstevel@tonic-gate "\t-f\tDisplay individual allocations. By default, users are\n" 42740Sstevel@tonic-gate "\t\tgrouped by stack\n"); 42750Sstevel@tonic-gate } 42760Sstevel@tonic-gate 42770Sstevel@tonic-gate static int 42780Sstevel@tonic-gate kmem_ready_check(void) 42790Sstevel@tonic-gate { 42800Sstevel@tonic-gate int ready; 42810Sstevel@tonic-gate 42820Sstevel@tonic-gate if (mdb_readvar(&ready, "kmem_ready") < 0) 42830Sstevel@tonic-gate return (-1); /* errno is set for us */ 42840Sstevel@tonic-gate 42850Sstevel@tonic-gate return (ready); 42860Sstevel@tonic-gate } 42870Sstevel@tonic-gate 42888721SJonathan.Adams@Sun.COM void 42898721SJonathan.Adams@Sun.COM kmem_statechange(void) 42900Sstevel@tonic-gate { 42911528Sjwadams static int been_ready = 0; 42921528Sjwadams 42931528Sjwadams if (been_ready) 42941528Sjwadams return; 42951528Sjwadams 42960Sstevel@tonic-gate if (kmem_ready_check() <= 0) 42970Sstevel@tonic-gate return; 42980Sstevel@tonic-gate 42991528Sjwadams been_ready = 1; 43000Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmem_init_walkers, NULL); 43010Sstevel@tonic-gate } 43020Sstevel@tonic-gate 43030Sstevel@tonic-gate void 43040Sstevel@tonic-gate kmem_init(void) 43050Sstevel@tonic-gate { 43060Sstevel@tonic-gate mdb_walker_t w = { 43070Sstevel@tonic-gate "kmem_cache", "walk list of kmem caches", kmem_cache_walk_init, 43086712Stomee list_walk_step, list_walk_fini 43090Sstevel@tonic-gate }; 43100Sstevel@tonic-gate 43110Sstevel@tonic-gate /* 43120Sstevel@tonic-gate * If kmem is ready, we'll need to invoke the kmem_cache walker 43130Sstevel@tonic-gate * immediately. Walkers in the linkage structure won't be ready until 43140Sstevel@tonic-gate * _mdb_init returns, so we'll need to add this one manually. If kmem 43150Sstevel@tonic-gate * is ready, we'll use the walker to initialize the caches. If kmem 43160Sstevel@tonic-gate * isn't ready, we'll register a callback that will allow us to defer 43170Sstevel@tonic-gate * cache walking until it is. 43180Sstevel@tonic-gate */ 43190Sstevel@tonic-gate if (mdb_add_walker(&w) != 0) { 43200Sstevel@tonic-gate mdb_warn("failed to add kmem_cache walker"); 43210Sstevel@tonic-gate return; 43220Sstevel@tonic-gate } 43230Sstevel@tonic-gate 43248721SJonathan.Adams@Sun.COM kmem_statechange(); 43250Sstevel@tonic-gate } 43260Sstevel@tonic-gate 43270Sstevel@tonic-gate typedef struct whatthread { 43280Sstevel@tonic-gate uintptr_t wt_target; 43290Sstevel@tonic-gate int wt_verbose; 43300Sstevel@tonic-gate } whatthread_t; 43310Sstevel@tonic-gate 43320Sstevel@tonic-gate static int 43330Sstevel@tonic-gate whatthread_walk_thread(uintptr_t addr, const kthread_t *t, whatthread_t *w) 43340Sstevel@tonic-gate { 43350Sstevel@tonic-gate uintptr_t current, data; 43360Sstevel@tonic-gate 43370Sstevel@tonic-gate if (t->t_stkbase == NULL) 43380Sstevel@tonic-gate return (WALK_NEXT); 43390Sstevel@tonic-gate 43400Sstevel@tonic-gate /* 43410Sstevel@tonic-gate * Warn about swapped out threads, but drive on anyway 43420Sstevel@tonic-gate */ 43430Sstevel@tonic-gate if (!(t->t_schedflag & TS_LOAD)) { 43440Sstevel@tonic-gate mdb_warn("thread %p's stack swapped out\n", addr); 43450Sstevel@tonic-gate return (WALK_NEXT); 43460Sstevel@tonic-gate } 43470Sstevel@tonic-gate 43480Sstevel@tonic-gate /* 43490Sstevel@tonic-gate * Search the thread's stack for the given pointer. Note that it would 43500Sstevel@tonic-gate * be more efficient to follow ::kgrep's lead and read in page-sized 43510Sstevel@tonic-gate * chunks, but this routine is already fast and simple. 43520Sstevel@tonic-gate */ 43530Sstevel@tonic-gate for (current = (uintptr_t)t->t_stkbase; current < (uintptr_t)t->t_stk; 43540Sstevel@tonic-gate current += sizeof (uintptr_t)) { 43550Sstevel@tonic-gate if (mdb_vread(&data, sizeof (data), current) == -1) { 43560Sstevel@tonic-gate mdb_warn("couldn't read thread %p's stack at %p", 43570Sstevel@tonic-gate addr, current); 43580Sstevel@tonic-gate return (WALK_ERR); 43590Sstevel@tonic-gate } 43600Sstevel@tonic-gate 43610Sstevel@tonic-gate if (data == w->wt_target) { 43620Sstevel@tonic-gate if (w->wt_verbose) { 43630Sstevel@tonic-gate mdb_printf("%p in thread %p's stack%s\n", 43640Sstevel@tonic-gate current, addr, stack_active(t, current)); 43650Sstevel@tonic-gate } else { 43660Sstevel@tonic-gate mdb_printf("%#lr\n", addr); 43670Sstevel@tonic-gate return (WALK_NEXT); 43680Sstevel@tonic-gate } 43690Sstevel@tonic-gate } 43700Sstevel@tonic-gate } 43710Sstevel@tonic-gate 43720Sstevel@tonic-gate return (WALK_NEXT); 43730Sstevel@tonic-gate } 43740Sstevel@tonic-gate 43750Sstevel@tonic-gate int 43760Sstevel@tonic-gate whatthread(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 43770Sstevel@tonic-gate { 43780Sstevel@tonic-gate whatthread_t w; 43790Sstevel@tonic-gate 43800Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 43810Sstevel@tonic-gate return (DCMD_USAGE); 43820Sstevel@tonic-gate 43830Sstevel@tonic-gate w.wt_verbose = FALSE; 43840Sstevel@tonic-gate w.wt_target = addr; 43850Sstevel@tonic-gate 43860Sstevel@tonic-gate if (mdb_getopts(argc, argv, 43870Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &w.wt_verbose, NULL) != argc) 43880Sstevel@tonic-gate return (DCMD_USAGE); 43890Sstevel@tonic-gate 43900Sstevel@tonic-gate if (mdb_walk("thread", (mdb_walk_cb_t)whatthread_walk_thread, &w) 43910Sstevel@tonic-gate == -1) { 43920Sstevel@tonic-gate mdb_warn("couldn't walk threads"); 43930Sstevel@tonic-gate return (DCMD_ERR); 43940Sstevel@tonic-gate } 43950Sstevel@tonic-gate 43960Sstevel@tonic-gate return (DCMD_OK); 43970Sstevel@tonic-gate } 4398