10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
51528Sjwadams * Common Development and Distribution License (the "License").
61528Sjwadams * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
228721SJonathan.Adams@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
230Sstevel@tonic-gate * Use is subject to license terms.
240Sstevel@tonic-gate */
250Sstevel@tonic-gate
260Sstevel@tonic-gate #include <mdb/mdb_param.h>
270Sstevel@tonic-gate #include <mdb/mdb_modapi.h>
280Sstevel@tonic-gate #include <mdb/mdb_ctf.h>
2910610SJonathan.Adams@Sun.COM #include <mdb/mdb_whatis.h>
300Sstevel@tonic-gate #include <sys/cpuvar.h>
310Sstevel@tonic-gate #include <sys/kmem_impl.h>
320Sstevel@tonic-gate #include <sys/vmem_impl.h>
330Sstevel@tonic-gate #include <sys/machelf.h>
340Sstevel@tonic-gate #include <sys/modctl.h>
350Sstevel@tonic-gate #include <sys/kobj.h>
360Sstevel@tonic-gate #include <sys/panic.h>
370Sstevel@tonic-gate #include <sys/stack.h>
380Sstevel@tonic-gate #include <sys/sysmacros.h>
390Sstevel@tonic-gate #include <vm/page.h>
400Sstevel@tonic-gate
416712Stomee #include "avl.h"
426712Stomee #include "combined.h"
434798Stomee #include "dist.h"
440Sstevel@tonic-gate #include "kmem.h"
456712Stomee #include "list.h"
460Sstevel@tonic-gate
470Sstevel@tonic-gate #define dprintf(x) if (mdb_debug_level) { \
480Sstevel@tonic-gate mdb_printf("kmem debug: "); \
490Sstevel@tonic-gate /*CSTYLED*/\
500Sstevel@tonic-gate mdb_printf x ;\
510Sstevel@tonic-gate }
520Sstevel@tonic-gate
530Sstevel@tonic-gate #define KM_ALLOCATED 0x01
540Sstevel@tonic-gate #define KM_FREE 0x02
550Sstevel@tonic-gate #define KM_BUFCTL 0x04
560Sstevel@tonic-gate #define KM_CONSTRUCTED 0x08 /* only constructed free buffers */
570Sstevel@tonic-gate #define KM_HASH 0x10
580Sstevel@tonic-gate
590Sstevel@tonic-gate static int mdb_debug_level = 0;
600Sstevel@tonic-gate
610Sstevel@tonic-gate /*ARGSUSED*/
620Sstevel@tonic-gate static int
kmem_init_walkers(uintptr_t addr,const kmem_cache_t * c,void * ignored)630Sstevel@tonic-gate kmem_init_walkers(uintptr_t addr, const kmem_cache_t *c, void *ignored)
640Sstevel@tonic-gate {
650Sstevel@tonic-gate mdb_walker_t w;
660Sstevel@tonic-gate char descr[64];
670Sstevel@tonic-gate
680Sstevel@tonic-gate (void) mdb_snprintf(descr, sizeof (descr),
690Sstevel@tonic-gate "walk the %s cache", c->cache_name);
700Sstevel@tonic-gate
710Sstevel@tonic-gate w.walk_name = c->cache_name;
720Sstevel@tonic-gate w.walk_descr = descr;
730Sstevel@tonic-gate w.walk_init = kmem_walk_init;
740Sstevel@tonic-gate w.walk_step = kmem_walk_step;
750Sstevel@tonic-gate w.walk_fini = kmem_walk_fini;
760Sstevel@tonic-gate w.walk_init_arg = (void *)addr;
770Sstevel@tonic-gate
780Sstevel@tonic-gate if (mdb_add_walker(&w) == -1)
790Sstevel@tonic-gate mdb_warn("failed to add %s walker", c->cache_name);
800Sstevel@tonic-gate
810Sstevel@tonic-gate return (WALK_NEXT);
820Sstevel@tonic-gate }
830Sstevel@tonic-gate
840Sstevel@tonic-gate /*ARGSUSED*/
850Sstevel@tonic-gate int
kmem_debug(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)860Sstevel@tonic-gate kmem_debug(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
870Sstevel@tonic-gate {
880Sstevel@tonic-gate mdb_debug_level ^= 1;
890Sstevel@tonic-gate
900Sstevel@tonic-gate mdb_printf("kmem: debugging is now %s\n",
910Sstevel@tonic-gate mdb_debug_level ? "on" : "off");
920Sstevel@tonic-gate
930Sstevel@tonic-gate return (DCMD_OK);
940Sstevel@tonic-gate }
950Sstevel@tonic-gate
960Sstevel@tonic-gate int
kmem_cache_walk_init(mdb_walk_state_t * wsp)970Sstevel@tonic-gate kmem_cache_walk_init(mdb_walk_state_t *wsp)
980Sstevel@tonic-gate {
990Sstevel@tonic-gate GElf_Sym sym;
1000Sstevel@tonic-gate
1016712Stomee if (mdb_lookup_by_name("kmem_caches", &sym) == -1) {
1026712Stomee mdb_warn("couldn't find kmem_caches");
1030Sstevel@tonic-gate return (WALK_ERR);
1040Sstevel@tonic-gate }
1050Sstevel@tonic-gate
1066712Stomee wsp->walk_addr = (uintptr_t)sym.st_value;
1076712Stomee
1086712Stomee return (list_walk_init_named(wsp, "cache list", "cache"));
1090Sstevel@tonic-gate }
1100Sstevel@tonic-gate
1110Sstevel@tonic-gate int
kmem_cpu_cache_walk_init(mdb_walk_state_t * wsp)1120Sstevel@tonic-gate kmem_cpu_cache_walk_init(mdb_walk_state_t *wsp)
1130Sstevel@tonic-gate {
1140Sstevel@tonic-gate if (wsp->walk_addr == NULL) {
1150Sstevel@tonic-gate mdb_warn("kmem_cpu_cache doesn't support global walks");
1160Sstevel@tonic-gate return (WALK_ERR);
1170Sstevel@tonic-gate }
1180Sstevel@tonic-gate
1190Sstevel@tonic-gate if (mdb_layered_walk("cpu", wsp) == -1) {
1200Sstevel@tonic-gate mdb_warn("couldn't walk 'cpu'");
1210Sstevel@tonic-gate return (WALK_ERR);
1220Sstevel@tonic-gate }
1230Sstevel@tonic-gate
1240Sstevel@tonic-gate wsp->walk_data = (void *)wsp->walk_addr;
1250Sstevel@tonic-gate
1260Sstevel@tonic-gate return (WALK_NEXT);
1270Sstevel@tonic-gate }
1280Sstevel@tonic-gate
1290Sstevel@tonic-gate int
kmem_cpu_cache_walk_step(mdb_walk_state_t * wsp)1300Sstevel@tonic-gate kmem_cpu_cache_walk_step(mdb_walk_state_t *wsp)
1310Sstevel@tonic-gate {
1320Sstevel@tonic-gate uintptr_t caddr = (uintptr_t)wsp->walk_data;
1330Sstevel@tonic-gate const cpu_t *cpu = wsp->walk_layer;
1340Sstevel@tonic-gate kmem_cpu_cache_t cc;
1350Sstevel@tonic-gate
1369019SMichael.Corcoran@Sun.COM caddr += OFFSETOF(kmem_cache_t, cache_cpu[cpu->cpu_seqid]);
1370Sstevel@tonic-gate
1380Sstevel@tonic-gate if (mdb_vread(&cc, sizeof (kmem_cpu_cache_t), caddr) == -1) {
1390Sstevel@tonic-gate mdb_warn("couldn't read kmem_cpu_cache at %p", caddr);
1400Sstevel@tonic-gate return (WALK_ERR);
1410Sstevel@tonic-gate }
1420Sstevel@tonic-gate
1430Sstevel@tonic-gate return (wsp->walk_callback(caddr, &cc, wsp->walk_cbdata));
1440Sstevel@tonic-gate }
1450Sstevel@tonic-gate
1466712Stomee static int
kmem_slab_check(void * p,uintptr_t saddr,void * arg)1476712Stomee kmem_slab_check(void *p, uintptr_t saddr, void *arg)
1486712Stomee {
1496712Stomee kmem_slab_t *sp = p;
1506712Stomee uintptr_t caddr = (uintptr_t)arg;
1516712Stomee if ((uintptr_t)sp->slab_cache != caddr) {
1526712Stomee mdb_warn("slab %p isn't in cache %p (in cache %p)\n",
1536712Stomee saddr, caddr, sp->slab_cache);
1546712Stomee return (-1);
1556712Stomee }
1566712Stomee
1576712Stomee return (0);
1586712Stomee }
1596712Stomee
1606712Stomee static int
kmem_partial_slab_check(void * p,uintptr_t saddr,void * arg)1616712Stomee kmem_partial_slab_check(void *p, uintptr_t saddr, void *arg)
1626712Stomee {
1636712Stomee kmem_slab_t *sp = p;
1646712Stomee
1656712Stomee int rc = kmem_slab_check(p, saddr, arg);
1666712Stomee if (rc != 0) {
1676712Stomee return (rc);
1686712Stomee }
1696712Stomee
1706712Stomee if (!KMEM_SLAB_IS_PARTIAL(sp)) {
1716712Stomee mdb_warn("slab %p is not a partial slab\n", saddr);
1726712Stomee return (-1);
1736712Stomee }
1746712Stomee
1756712Stomee return (0);
1766712Stomee }
1776712Stomee
1786712Stomee static int
kmem_complete_slab_check(void * p,uintptr_t saddr,void * arg)1796712Stomee kmem_complete_slab_check(void *p, uintptr_t saddr, void *arg)
1806712Stomee {
1816712Stomee kmem_slab_t *sp = p;
1826712Stomee
1836712Stomee int rc = kmem_slab_check(p, saddr, arg);
1846712Stomee if (rc != 0) {
1856712Stomee return (rc);
1866712Stomee }
1876712Stomee
1886712Stomee if (!KMEM_SLAB_IS_ALL_USED(sp)) {
1896712Stomee mdb_warn("slab %p is not completely allocated\n", saddr);
1906712Stomee return (-1);
1916712Stomee }
1926712Stomee
1936712Stomee return (0);
1946712Stomee }
1956712Stomee
1966712Stomee typedef struct {
1976712Stomee uintptr_t kns_cache_addr;
1986712Stomee int kns_nslabs;
1996712Stomee } kmem_nth_slab_t;
2006712Stomee
2016712Stomee static int
kmem_nth_slab_check(void * p,uintptr_t saddr,void * arg)2026712Stomee kmem_nth_slab_check(void *p, uintptr_t saddr, void *arg)
2036712Stomee {
2046712Stomee kmem_nth_slab_t *chkp = arg;
2056712Stomee
2066712Stomee int rc = kmem_slab_check(p, saddr, (void *)chkp->kns_cache_addr);
2076712Stomee if (rc != 0) {
2086712Stomee return (rc);
2096712Stomee }
2106712Stomee
2116712Stomee return (chkp->kns_nslabs-- == 0 ? 1 : 0);
2126712Stomee }
2136712Stomee
2146712Stomee static int
kmem_complete_slab_walk_init(mdb_walk_state_t * wsp)2156712Stomee kmem_complete_slab_walk_init(mdb_walk_state_t *wsp)
2166712Stomee {
2176712Stomee uintptr_t caddr = wsp->walk_addr;
2186712Stomee
2196712Stomee wsp->walk_addr = (uintptr_t)(caddr +
2206712Stomee offsetof(kmem_cache_t, cache_complete_slabs));
2216712Stomee
2226712Stomee return (list_walk_init_checked(wsp, "slab list", "slab",
2236712Stomee kmem_complete_slab_check, (void *)caddr));
2246712Stomee }
2256712Stomee
2266712Stomee static int
kmem_partial_slab_walk_init(mdb_walk_state_t * wsp)2276712Stomee kmem_partial_slab_walk_init(mdb_walk_state_t *wsp)
2286712Stomee {
2296712Stomee uintptr_t caddr = wsp->walk_addr;
2306712Stomee
2316712Stomee wsp->walk_addr = (uintptr_t)(caddr +
2326712Stomee offsetof(kmem_cache_t, cache_partial_slabs));
2336712Stomee
2346712Stomee return (avl_walk_init_checked(wsp, "slab list", "slab",
2356712Stomee kmem_partial_slab_check, (void *)caddr));
2366712Stomee }
2376712Stomee
2380Sstevel@tonic-gate int
kmem_slab_walk_init(mdb_walk_state_t * wsp)2390Sstevel@tonic-gate kmem_slab_walk_init(mdb_walk_state_t *wsp)
2400Sstevel@tonic-gate {
2410Sstevel@tonic-gate uintptr_t caddr = wsp->walk_addr;
2420Sstevel@tonic-gate
2430Sstevel@tonic-gate if (caddr == NULL) {
2440Sstevel@tonic-gate mdb_warn("kmem_slab doesn't support global walks\n");
2450Sstevel@tonic-gate return (WALK_ERR);
2460Sstevel@tonic-gate }
2470Sstevel@tonic-gate
2486712Stomee combined_walk_init(wsp);
2496712Stomee combined_walk_add(wsp,
2506712Stomee kmem_complete_slab_walk_init, list_walk_step, list_walk_fini);
2516712Stomee combined_walk_add(wsp,
2526712Stomee kmem_partial_slab_walk_init, avl_walk_step, avl_walk_fini);
2530Sstevel@tonic-gate
2540Sstevel@tonic-gate return (WALK_NEXT);
2550Sstevel@tonic-gate }
2560Sstevel@tonic-gate
2576712Stomee static int
kmem_first_complete_slab_walk_init(mdb_walk_state_t * wsp)2586712Stomee kmem_first_complete_slab_walk_init(mdb_walk_state_t *wsp)
2596712Stomee {
2606712Stomee uintptr_t caddr = wsp->walk_addr;
2616712Stomee kmem_nth_slab_t *chk;
2626712Stomee
2636712Stomee chk = mdb_alloc(sizeof (kmem_nth_slab_t),
2646712Stomee UM_SLEEP | UM_GC);
2656712Stomee chk->kns_cache_addr = caddr;
2666712Stomee chk->kns_nslabs = 1;
2676712Stomee wsp->walk_addr = (uintptr_t)(caddr +
2686712Stomee offsetof(kmem_cache_t, cache_complete_slabs));
2696712Stomee
2706712Stomee return (list_walk_init_checked(wsp, "slab list", "slab",
2716712Stomee kmem_nth_slab_check, chk));
2726712Stomee }
2736712Stomee
2740Sstevel@tonic-gate int
kmem_slab_walk_partial_init(mdb_walk_state_t * wsp)2750Sstevel@tonic-gate kmem_slab_walk_partial_init(mdb_walk_state_t *wsp)
2760Sstevel@tonic-gate {
2770Sstevel@tonic-gate uintptr_t caddr = wsp->walk_addr;
2780Sstevel@tonic-gate kmem_cache_t c;
2790Sstevel@tonic-gate
2800Sstevel@tonic-gate if (caddr == NULL) {
2810Sstevel@tonic-gate mdb_warn("kmem_slab_partial doesn't support global walks\n");
2820Sstevel@tonic-gate return (WALK_ERR);
2830Sstevel@tonic-gate }
2840Sstevel@tonic-gate
2850Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), caddr) == -1) {
2860Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache at %p", caddr);
2870Sstevel@tonic-gate return (WALK_ERR);
2880Sstevel@tonic-gate }
2890Sstevel@tonic-gate
2906712Stomee combined_walk_init(wsp);
2910Sstevel@tonic-gate
2920Sstevel@tonic-gate /*
2930Sstevel@tonic-gate * Some consumers (umem_walk_step(), in particular) require at
2940Sstevel@tonic-gate * least one callback if there are any buffers in the cache. So
2956712Stomee * if there are *no* partial slabs, report the first full slab, if
2960Sstevel@tonic-gate * any.
2970Sstevel@tonic-gate *
2980Sstevel@tonic-gate * Yes, this is ugly, but it's cleaner than the other possibilities.
2990Sstevel@tonic-gate */
3006712Stomee if (c.cache_partial_slabs.avl_numnodes == 0) {
3016712Stomee combined_walk_add(wsp, kmem_first_complete_slab_walk_init,
3026712Stomee list_walk_step, list_walk_fini);
3036712Stomee } else {
3046712Stomee combined_walk_add(wsp, kmem_partial_slab_walk_init,
3056712Stomee avl_walk_step, avl_walk_fini);
3066712Stomee }
3070Sstevel@tonic-gate
3080Sstevel@tonic-gate return (WALK_NEXT);
3090Sstevel@tonic-gate }
3100Sstevel@tonic-gate
3110Sstevel@tonic-gate int
kmem_cache(uintptr_t addr,uint_t flags,int ac,const mdb_arg_t * argv)3120Sstevel@tonic-gate kmem_cache(uintptr_t addr, uint_t flags, int ac, const mdb_arg_t *argv)
3130Sstevel@tonic-gate {
3140Sstevel@tonic-gate kmem_cache_t c;
3156712Stomee const char *filter = NULL;
3166712Stomee
3176712Stomee if (mdb_getopts(ac, argv,
3186712Stomee 'n', MDB_OPT_STR, &filter,
3196712Stomee NULL) != ac) {
3206712Stomee return (DCMD_USAGE);
3216712Stomee }
3220Sstevel@tonic-gate
3230Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) {
3240Sstevel@tonic-gate if (mdb_walk_dcmd("kmem_cache", "kmem_cache", ac, argv) == -1) {
3250Sstevel@tonic-gate mdb_warn("can't walk kmem_cache");
3260Sstevel@tonic-gate return (DCMD_ERR);
3270Sstevel@tonic-gate }
3280Sstevel@tonic-gate return (DCMD_OK);
3290Sstevel@tonic-gate }
3300Sstevel@tonic-gate
3310Sstevel@tonic-gate if (DCMD_HDRSPEC(flags))
3320Sstevel@tonic-gate mdb_printf("%-?s %-25s %4s %6s %8s %8s\n", "ADDR", "NAME",
3330Sstevel@tonic-gate "FLAG", "CFLAG", "BUFSIZE", "BUFTOTL");
3340Sstevel@tonic-gate
3350Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), addr) == -1) {
3360Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache at %p", addr);
3370Sstevel@tonic-gate return (DCMD_ERR);
3380Sstevel@tonic-gate }
3390Sstevel@tonic-gate
3406712Stomee if ((filter != NULL) && (strstr(c.cache_name, filter) == NULL))
3416712Stomee return (DCMD_OK);
3426712Stomee
3430Sstevel@tonic-gate mdb_printf("%0?p %-25s %04x %06x %8ld %8lld\n", addr, c.cache_name,
3440Sstevel@tonic-gate c.cache_flags, c.cache_cflags, c.cache_bufsize, c.cache_buftotal);
3450Sstevel@tonic-gate
3460Sstevel@tonic-gate return (DCMD_OK);
3470Sstevel@tonic-gate }
3480Sstevel@tonic-gate
3496712Stomee void
kmem_cache_help(void)3506712Stomee kmem_cache_help(void)
3516712Stomee {
3526712Stomee mdb_printf("%s", "Print kernel memory caches.\n\n");
3536712Stomee mdb_dec_indent(2);
3546712Stomee mdb_printf("%<b>OPTIONS%</b>\n");
3556712Stomee mdb_inc_indent(2);
3566712Stomee mdb_printf("%s",
3576712Stomee " -n name\n"
3586712Stomee " name of kmem cache (or matching partial name)\n"
3596712Stomee "\n"
3606712Stomee "Column\tDescription\n"
3616712Stomee "\n"
3626712Stomee "ADDR\t\taddress of kmem cache\n"
3636712Stomee "NAME\t\tname of kmem cache\n"
3646712Stomee "FLAG\t\tvarious cache state flags\n"
3656712Stomee "CFLAG\t\tcache creation flags\n"
3666712Stomee "BUFSIZE\tobject size in bytes\n"
3676712Stomee "BUFTOTL\tcurrent total buffers in cache (allocated and free)\n");
3686712Stomee }
3694688Stomee
3704688Stomee #define LABEL_WIDTH 11
3714688Stomee static void
kmem_slabs_print_dist(uint_t * ks_bucket,size_t buffers_per_slab,size_t maxbuckets,size_t minbucketsize)3724688Stomee kmem_slabs_print_dist(uint_t *ks_bucket, size_t buffers_per_slab,
3734688Stomee size_t maxbuckets, size_t minbucketsize)
3744688Stomee {
3754688Stomee uint64_t total;
3764688Stomee int buckets;
3774688Stomee int i;
3784688Stomee const int *distarray;
3794688Stomee int complete[2];
3804688Stomee
3814688Stomee buckets = buffers_per_slab;
3824688Stomee
3834688Stomee total = 0;
3844688Stomee for (i = 0; i <= buffers_per_slab; i++)
3854688Stomee total += ks_bucket[i];
3864688Stomee
3874688Stomee if (maxbuckets > 1)
3884688Stomee buckets = MIN(buckets, maxbuckets);
3894688Stomee
3904688Stomee if (minbucketsize > 1) {
3914688Stomee /*
3924688Stomee * minbucketsize does not apply to the first bucket reserved
3934688Stomee * for completely allocated slabs
3944688Stomee */
3954688Stomee buckets = MIN(buckets, 1 + ((buffers_per_slab - 1) /
3964688Stomee minbucketsize));
3974688Stomee if ((buckets < 2) && (buffers_per_slab > 1)) {
3984688Stomee buckets = 2;
3994688Stomee minbucketsize = (buffers_per_slab - 1);
4004688Stomee }
4014688Stomee }
4024688Stomee
4034688Stomee /*
4044688Stomee * The first printed bucket is reserved for completely allocated slabs.
4054688Stomee * Passing (buckets - 1) excludes that bucket from the generated
4064688Stomee * distribution, since we're handling it as a special case.
4074688Stomee */
4084688Stomee complete[0] = buffers_per_slab;
4094688Stomee complete[1] = buffers_per_slab + 1;
4104798Stomee distarray = dist_linear(buckets - 1, 1, buffers_per_slab - 1);
4114688Stomee
4124688Stomee mdb_printf("%*s\n", LABEL_WIDTH, "Allocated");
4134798Stomee dist_print_header("Buffers", LABEL_WIDTH, "Slabs");
4144798Stomee
4154798Stomee dist_print_bucket(complete, 0, ks_bucket, total, LABEL_WIDTH);
4164688Stomee /*
4174688Stomee * Print bucket ranges in descending order after the first bucket for
4184688Stomee * completely allocated slabs, so a person can see immediately whether
4194688Stomee * or not there is fragmentation without having to scan possibly
4204688Stomee * multiple screens of output. Starting at (buckets - 2) excludes the
4214688Stomee * extra terminating bucket.
4224688Stomee */
4234688Stomee for (i = buckets - 2; i >= 0; i--) {
4244798Stomee dist_print_bucket(distarray, i, ks_bucket, total, LABEL_WIDTH);
4254688Stomee }
4264688Stomee mdb_printf("\n");
4274688Stomee }
4284688Stomee #undef LABEL_WIDTH
4294688Stomee
4304688Stomee /*ARGSUSED*/
4314688Stomee static int
kmem_first_slab(uintptr_t addr,const kmem_slab_t * sp,boolean_t * is_slab)4324688Stomee kmem_first_slab(uintptr_t addr, const kmem_slab_t *sp, boolean_t *is_slab)
4334688Stomee {
4344688Stomee *is_slab = B_TRUE;
4354688Stomee return (WALK_DONE);
4364688Stomee }
4374688Stomee
4384688Stomee /*ARGSUSED*/
4394688Stomee static int
kmem_first_partial_slab(uintptr_t addr,const kmem_slab_t * sp,boolean_t * is_slab)4404688Stomee kmem_first_partial_slab(uintptr_t addr, const kmem_slab_t *sp,
4414688Stomee boolean_t *is_slab)
4424688Stomee {
4434688Stomee /*
4446712Stomee * The "kmem_partial_slab" walker reports the first full slab if there
4454688Stomee * are no partial slabs (for the sake of consumers that require at least
4464688Stomee * one callback if there are any buffers in the cache).
4474688Stomee */
4486712Stomee *is_slab = KMEM_SLAB_IS_PARTIAL(sp);
4494688Stomee return (WALK_DONE);
4504688Stomee }
4514688Stomee
4526712Stomee typedef struct kmem_slab_usage {
4536712Stomee int ksu_refcnt; /* count of allocated buffers on slab */
4546712Stomee boolean_t ksu_nomove; /* slab marked non-reclaimable */
4556712Stomee } kmem_slab_usage_t;
4566712Stomee
4576712Stomee typedef struct kmem_slab_stats {
4586712Stomee const kmem_cache_t *ks_cp;
4596712Stomee int ks_slabs; /* slabs in cache */
4606712Stomee int ks_partial_slabs; /* partially allocated slabs in cache */
4616712Stomee uint64_t ks_unused_buffers; /* total unused buffers in cache */
4626712Stomee int ks_max_buffers_per_slab; /* max buffers per slab */
4636712Stomee int ks_usage_len; /* ks_usage array length */
4646712Stomee kmem_slab_usage_t *ks_usage; /* partial slab usage */
4656712Stomee uint_t *ks_bucket; /* slab usage distribution */
4666712Stomee } kmem_slab_stats_t;
4676712Stomee
4684688Stomee /*ARGSUSED*/
4694688Stomee static int
kmem_slablist_stat(uintptr_t addr,const kmem_slab_t * sp,kmem_slab_stats_t * ks)4704688Stomee kmem_slablist_stat(uintptr_t addr, const kmem_slab_t *sp,
4714688Stomee kmem_slab_stats_t *ks)
4724688Stomee {
4734688Stomee kmem_slab_usage_t *ksu;
4744688Stomee long unused;
4754688Stomee
4764688Stomee ks->ks_slabs++;
4774688Stomee ks->ks_bucket[sp->slab_refcnt]++;
4784688Stomee
4794688Stomee unused = (sp->slab_chunks - sp->slab_refcnt);
4804688Stomee if (unused == 0) {
4814688Stomee return (WALK_NEXT);
4824688Stomee }
4834688Stomee
4844688Stomee ks->ks_partial_slabs++;
4854688Stomee ks->ks_unused_buffers += unused;
4864688Stomee
4874688Stomee if (ks->ks_partial_slabs > ks->ks_usage_len) {
4884688Stomee kmem_slab_usage_t *usage;
4894688Stomee int len = ks->ks_usage_len;
4904688Stomee
4914688Stomee len = (len == 0 ? 16 : len * 2);
4924688Stomee usage = mdb_zalloc(len * sizeof (kmem_slab_usage_t), UM_SLEEP);
4934688Stomee if (ks->ks_usage != NULL) {
4944688Stomee bcopy(ks->ks_usage, usage,
4954688Stomee ks->ks_usage_len * sizeof (kmem_slab_usage_t));
4964688Stomee mdb_free(ks->ks_usage,
4974688Stomee ks->ks_usage_len * sizeof (kmem_slab_usage_t));
4984688Stomee }
4994688Stomee ks->ks_usage = usage;
5004688Stomee ks->ks_usage_len = len;
5014688Stomee }
5024688Stomee
5034688Stomee ksu = &ks->ks_usage[ks->ks_partial_slabs - 1];
5044688Stomee ksu->ksu_refcnt = sp->slab_refcnt;
5056712Stomee ksu->ksu_nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE);
5064688Stomee return (WALK_NEXT);
5074688Stomee }
5084688Stomee
5094688Stomee static void
kmem_slabs_header()5104688Stomee kmem_slabs_header()
5114688Stomee {
5124688Stomee mdb_printf("%-25s %8s %8s %9s %9s %6s\n",
5134688Stomee "", "", "Partial", "", "Unused", "");
5144688Stomee mdb_printf("%-25s %8s %8s %9s %9s %6s\n",
5154688Stomee "Cache Name", "Slabs", "Slabs", "Buffers", "Buffers", "Waste");
5164688Stomee mdb_printf("%-25s %8s %8s %9s %9s %6s\n",
5174688Stomee "-------------------------", "--------", "--------", "---------",
5184688Stomee "---------", "------");
5194688Stomee }
5204688Stomee
5214688Stomee int
kmem_slabs(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)5224688Stomee kmem_slabs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
5234688Stomee {
5244688Stomee kmem_cache_t c;
5254688Stomee kmem_slab_stats_t stats;
5264688Stomee mdb_walk_cb_t cb;
5274688Stomee int pct;
5284688Stomee int tenths_pct;
5294688Stomee size_t maxbuckets = 1;
5304688Stomee size_t minbucketsize = 0;
5314688Stomee const char *filter = NULL;
5326712Stomee const char *name = NULL;
5334688Stomee uint_t opt_v = FALSE;
5346712Stomee boolean_t buckets = B_FALSE;
5354688Stomee boolean_t skip = B_FALSE;
5364688Stomee
5374688Stomee if (mdb_getopts(argc, argv,
5384688Stomee 'B', MDB_OPT_UINTPTR, &minbucketsize,
5394688Stomee 'b', MDB_OPT_UINTPTR, &maxbuckets,
5404688Stomee 'n', MDB_OPT_STR, &filter,
5416712Stomee 'N', MDB_OPT_STR, &name,
5424688Stomee 'v', MDB_OPT_SETBITS, TRUE, &opt_v,
5434688Stomee NULL) != argc) {
5444688Stomee return (DCMD_USAGE);
5454688Stomee }
5464688Stomee
5476712Stomee if ((maxbuckets != 1) || (minbucketsize != 0)) {
5486712Stomee buckets = B_TRUE;
5494688Stomee }
5504688Stomee
5514688Stomee if (!(flags & DCMD_ADDRSPEC)) {
5524688Stomee if (mdb_walk_dcmd("kmem_cache", "kmem_slabs", argc,
5534688Stomee argv) == -1) {
5544688Stomee mdb_warn("can't walk kmem_cache");
5554688Stomee return (DCMD_ERR);
5564688Stomee }
5574688Stomee return (DCMD_OK);
5584688Stomee }
5594688Stomee
5604688Stomee if (mdb_vread(&c, sizeof (c), addr) == -1) {
5614688Stomee mdb_warn("couldn't read kmem_cache at %p", addr);
5624688Stomee return (DCMD_ERR);
5634688Stomee }
5644688Stomee
5656712Stomee if (name == NULL) {
5666712Stomee skip = ((filter != NULL) &&
5676712Stomee (strstr(c.cache_name, filter) == NULL));
5686712Stomee } else if (filter == NULL) {
5696712Stomee skip = (strcmp(c.cache_name, name) != 0);
5706712Stomee } else {
5716712Stomee /* match either -n or -N */
5726712Stomee skip = ((strcmp(c.cache_name, name) != 0) &&
5736712Stomee (strstr(c.cache_name, filter) == NULL));
5744688Stomee }
5754688Stomee
5766712Stomee if (!(opt_v || buckets) && DCMD_HDRSPEC(flags)) {
5774688Stomee kmem_slabs_header();
5786712Stomee } else if ((opt_v || buckets) && !skip) {
5794688Stomee if (DCMD_HDRSPEC(flags)) {
5804688Stomee kmem_slabs_header();
5814688Stomee } else {
5824688Stomee boolean_t is_slab = B_FALSE;
5834688Stomee const char *walker_name;
5844688Stomee if (opt_v) {
5854688Stomee cb = (mdb_walk_cb_t)kmem_first_partial_slab;
5864688Stomee walker_name = "kmem_slab_partial";
5874688Stomee } else {
5884688Stomee cb = (mdb_walk_cb_t)kmem_first_slab;
5894688Stomee walker_name = "kmem_slab";
5904688Stomee }
5914688Stomee (void) mdb_pwalk(walker_name, cb, &is_slab, addr);
5924688Stomee if (is_slab) {
5934688Stomee kmem_slabs_header();
5944688Stomee }
5954688Stomee }
5964688Stomee }
5974688Stomee
5984688Stomee if (skip) {
5994688Stomee return (DCMD_OK);
6004688Stomee }
6014688Stomee
6024688Stomee bzero(&stats, sizeof (kmem_slab_stats_t));
6036712Stomee stats.ks_cp = &c;
6046712Stomee stats.ks_max_buffers_per_slab = c.cache_maxchunks;
6056712Stomee /* +1 to include a zero bucket */
6066712Stomee stats.ks_bucket = mdb_zalloc((stats.ks_max_buffers_per_slab + 1) *
6076712Stomee sizeof (*stats.ks_bucket), UM_SLEEP);
6084688Stomee cb = (mdb_walk_cb_t)kmem_slablist_stat;
6094688Stomee (void) mdb_pwalk("kmem_slab", cb, &stats, addr);
6104688Stomee
6114688Stomee if (c.cache_buftotal == 0) {
6124688Stomee pct = 0;
6134688Stomee tenths_pct = 0;
6144688Stomee } else {
6154688Stomee uint64_t n = stats.ks_unused_buffers * 10000;
6164688Stomee pct = (int)(n / c.cache_buftotal);
6174688Stomee tenths_pct = pct - ((pct / 100) * 100);
6184688Stomee tenths_pct = (tenths_pct + 5) / 10; /* round nearest tenth */
6194688Stomee if (tenths_pct == 10) {
6204688Stomee pct += 100;
6214688Stomee tenths_pct = 0;
6224688Stomee }
6234688Stomee }
6244688Stomee
6254688Stomee pct /= 100;
6264688Stomee mdb_printf("%-25s %8d %8d %9lld %9lld %3d.%1d%%\n", c.cache_name,
6274688Stomee stats.ks_slabs, stats.ks_partial_slabs, c.cache_buftotal,
6284688Stomee stats.ks_unused_buffers, pct, tenths_pct);
6294688Stomee
6304688Stomee if (maxbuckets == 0) {
6316712Stomee maxbuckets = stats.ks_max_buffers_per_slab;
6324688Stomee }
6334688Stomee
6344688Stomee if (((maxbuckets > 1) || (minbucketsize > 0)) &&
6354688Stomee (stats.ks_slabs > 0)) {
6364688Stomee mdb_printf("\n");
6374688Stomee kmem_slabs_print_dist(stats.ks_bucket,
6386712Stomee stats.ks_max_buffers_per_slab, maxbuckets, minbucketsize);
6396712Stomee }
6406712Stomee
6416712Stomee mdb_free(stats.ks_bucket, (stats.ks_max_buffers_per_slab + 1) *
6426712Stomee sizeof (*stats.ks_bucket));
6436712Stomee
6446712Stomee if (!opt_v) {
6456712Stomee return (DCMD_OK);
6464688Stomee }
6474688Stomee
6484688Stomee if (opt_v && (stats.ks_partial_slabs > 0)) {
6494688Stomee int i;
6504688Stomee kmem_slab_usage_t *ksu;
6514688Stomee
65210217STom.Erickson@Sun.COM mdb_printf(" %d complete (%d), %d partial:",
6534688Stomee (stats.ks_slabs - stats.ks_partial_slabs),
65410217STom.Erickson@Sun.COM stats.ks_max_buffers_per_slab,
6554688Stomee stats.ks_partial_slabs);
65610217STom.Erickson@Sun.COM
6574688Stomee for (i = 0; i < stats.ks_partial_slabs; i++) {
6584688Stomee ksu = &stats.ks_usage[i];
65910217STom.Erickson@Sun.COM mdb_printf(" %d%s", ksu->ksu_refcnt,
66010217STom.Erickson@Sun.COM (ksu->ksu_nomove ? "*" : ""));
6614688Stomee }
6624688Stomee mdb_printf("\n\n");
6634688Stomee }
6644688Stomee
6654688Stomee if (stats.ks_usage_len > 0) {
6664688Stomee mdb_free(stats.ks_usage,
6674688Stomee stats.ks_usage_len * sizeof (kmem_slab_usage_t));
6684688Stomee }
6694688Stomee
6704688Stomee return (DCMD_OK);
6714688Stomee }
6724688Stomee
6734688Stomee void
kmem_slabs_help(void)6744688Stomee kmem_slabs_help(void)
6754688Stomee {
6766712Stomee mdb_printf("%s",
6776712Stomee "Display slab usage per kmem cache.\n\n");
6784688Stomee mdb_dec_indent(2);
6794688Stomee mdb_printf("%<b>OPTIONS%</b>\n");
6804688Stomee mdb_inc_indent(2);
6814688Stomee mdb_printf("%s",
6824688Stomee " -n name\n"
6834688Stomee " name of kmem cache (or matching partial name)\n"
6846712Stomee " -N name\n"
6856712Stomee " exact name of kmem cache\n"
6864688Stomee " -b maxbins\n"
6874688Stomee " Print a distribution of allocated buffers per slab using at\n"
6884688Stomee " most maxbins bins. The first bin is reserved for completely\n"
6894688Stomee " allocated slabs. Setting maxbins to zero (-b 0) has the same\n"
6904688Stomee " effect as specifying the maximum allocated buffers per slab\n"
6914688Stomee " or setting minbinsize to 1 (-B 1).\n"
6924688Stomee " -B minbinsize\n"
6934688Stomee " Print a distribution of allocated buffers per slab, making\n"
6944688Stomee " all bins (except the first, reserved for completely allocated\n"
6954688Stomee " slabs) at least minbinsize buffers apart.\n"
6964688Stomee " -v verbose output: List the allocated buffer count of each partial\n"
6974688Stomee " slab on the free list in order from front to back to show how\n"
6984688Stomee " closely the slabs are ordered by usage. For example\n"
6994688Stomee "\n"
7004688Stomee " 10 complete, 3 partial (8): 7 3 1\n"
7014688Stomee "\n"
7024688Stomee " means there are thirteen slabs with eight buffers each, including\n"
7034688Stomee " three partially allocated slabs with less than all eight buffers\n"
7044688Stomee " allocated.\n"
7054688Stomee "\n"
7064688Stomee " Buffer allocations are always from the front of the partial slab\n"
7074688Stomee " list. When a buffer is freed from a completely used slab, that\n"
7084688Stomee " slab is added to the front of the partial slab list. Assuming\n"
7094688Stomee " that all buffers are equally likely to be freed soon, the\n"
7104688Stomee " desired order of partial slabs is most-used at the front of the\n"
7114688Stomee " list and least-used at the back (as in the example above).\n"
7124688Stomee " However, if a slab contains an allocated buffer that will not\n"
7134688Stomee " soon be freed, it would be better for that slab to be at the\n"
7146712Stomee " front where all of its buffers can be allocated. Taking a slab\n"
7156712Stomee " off the partial slab list (either with all buffers freed or all\n"
7166712Stomee " buffers allocated) reduces cache fragmentation.\n"
7176712Stomee "\n"
7186712Stomee " A slab's allocated buffer count representing a partial slab (9 in\n"
7196712Stomee " the example below) may be marked as follows:\n"
7206712Stomee "\n"
7216712Stomee " 9* An asterisk indicates that kmem has marked the slab non-\n"
7226712Stomee " reclaimable because the kmem client refused to move one of the\n"
7236712Stomee " slab's buffers. Since kmem does not expect to completely free the\n"
7246712Stomee " slab, it moves it to the front of the list in the hope of\n"
7256712Stomee " completely allocating it instead. A slab marked with an asterisk\n"
7266712Stomee " stays marked for as long as it remains on the partial slab list.\n"
7274688Stomee "\n"
7284688Stomee "Column\t\tDescription\n"
7294688Stomee "\n"
7304688Stomee "Cache Name\t\tname of kmem cache\n"
7314688Stomee "Slabs\t\t\ttotal slab count\n"
7324688Stomee "Partial Slabs\t\tcount of partially allocated slabs on the free list\n"
7334688Stomee "Buffers\t\ttotal buffer count (Slabs * (buffers per slab))\n"
7344688Stomee "Unused Buffers\tcount of unallocated buffers across all partial slabs\n"
7354688Stomee "Waste\t\t\t(Unused Buffers / Buffers) does not include space\n"
7364688Stomee "\t\t\t for accounting structures (debug mode), slab\n"
7374688Stomee "\t\t\t coloring (incremental small offsets to stagger\n"
7384688Stomee "\t\t\t buffer alignment), or the per-CPU magazine layer\n");
7394688Stomee }
7404688Stomee
7410Sstevel@tonic-gate static int
addrcmp(const void * lhs,const void * rhs)7420Sstevel@tonic-gate addrcmp(const void *lhs, const void *rhs)
7430Sstevel@tonic-gate {
7440Sstevel@tonic-gate uintptr_t p1 = *((uintptr_t *)lhs);
7450Sstevel@tonic-gate uintptr_t p2 = *((uintptr_t *)rhs);
7460Sstevel@tonic-gate
7470Sstevel@tonic-gate if (p1 < p2)
7480Sstevel@tonic-gate return (-1);
7490Sstevel@tonic-gate if (p1 > p2)
7500Sstevel@tonic-gate return (1);
7510Sstevel@tonic-gate return (0);
7520Sstevel@tonic-gate }
7530Sstevel@tonic-gate
7540Sstevel@tonic-gate static int
bufctlcmp(const kmem_bufctl_audit_t ** lhs,const kmem_bufctl_audit_t ** rhs)7550Sstevel@tonic-gate bufctlcmp(const kmem_bufctl_audit_t **lhs, const kmem_bufctl_audit_t **rhs)
7560Sstevel@tonic-gate {
7570Sstevel@tonic-gate const kmem_bufctl_audit_t *bcp1 = *lhs;
7580Sstevel@tonic-gate const kmem_bufctl_audit_t *bcp2 = *rhs;
7590Sstevel@tonic-gate
7600Sstevel@tonic-gate if (bcp1->bc_timestamp > bcp2->bc_timestamp)
7610Sstevel@tonic-gate return (-1);
7620Sstevel@tonic-gate
7630Sstevel@tonic-gate if (bcp1->bc_timestamp < bcp2->bc_timestamp)
7640Sstevel@tonic-gate return (1);
7650Sstevel@tonic-gate
7660Sstevel@tonic-gate return (0);
7670Sstevel@tonic-gate }
7680Sstevel@tonic-gate
7690Sstevel@tonic-gate typedef struct kmem_hash_walk {
7700Sstevel@tonic-gate uintptr_t *kmhw_table;
7710Sstevel@tonic-gate size_t kmhw_nelems;
7720Sstevel@tonic-gate size_t kmhw_pos;
7730Sstevel@tonic-gate kmem_bufctl_t kmhw_cur;
7740Sstevel@tonic-gate } kmem_hash_walk_t;
7750Sstevel@tonic-gate
7760Sstevel@tonic-gate int
kmem_hash_walk_init(mdb_walk_state_t * wsp)7770Sstevel@tonic-gate kmem_hash_walk_init(mdb_walk_state_t *wsp)
7780Sstevel@tonic-gate {
7790Sstevel@tonic-gate kmem_hash_walk_t *kmhw;
7800Sstevel@tonic-gate uintptr_t *hash;
7810Sstevel@tonic-gate kmem_cache_t c;
7820Sstevel@tonic-gate uintptr_t haddr, addr = wsp->walk_addr;
7830Sstevel@tonic-gate size_t nelems;
7840Sstevel@tonic-gate size_t hsize;
7850Sstevel@tonic-gate
7860Sstevel@tonic-gate if (addr == NULL) {
7870Sstevel@tonic-gate mdb_warn("kmem_hash doesn't support global walks\n");
7880Sstevel@tonic-gate return (WALK_ERR);
7890Sstevel@tonic-gate }
7900Sstevel@tonic-gate
7910Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), addr) == -1) {
7920Sstevel@tonic-gate mdb_warn("couldn't read cache at addr %p", addr);
7930Sstevel@tonic-gate return (WALK_ERR);
7940Sstevel@tonic-gate }
7950Sstevel@tonic-gate
7960Sstevel@tonic-gate if (!(c.cache_flags & KMF_HASH)) {
7970Sstevel@tonic-gate mdb_warn("cache %p doesn't have a hash table\n", addr);
7980Sstevel@tonic-gate return (WALK_DONE); /* nothing to do */
7990Sstevel@tonic-gate }
8000Sstevel@tonic-gate
8010Sstevel@tonic-gate kmhw = mdb_zalloc(sizeof (kmem_hash_walk_t), UM_SLEEP);
8020Sstevel@tonic-gate kmhw->kmhw_cur.bc_next = NULL;
8030Sstevel@tonic-gate kmhw->kmhw_pos = 0;
8040Sstevel@tonic-gate
8050Sstevel@tonic-gate kmhw->kmhw_nelems = nelems = c.cache_hash_mask + 1;
8060Sstevel@tonic-gate hsize = nelems * sizeof (uintptr_t);
8070Sstevel@tonic-gate haddr = (uintptr_t)c.cache_hash_table;
8080Sstevel@tonic-gate
8090Sstevel@tonic-gate kmhw->kmhw_table = hash = mdb_alloc(hsize, UM_SLEEP);
8100Sstevel@tonic-gate if (mdb_vread(hash, hsize, haddr) == -1) {
8110Sstevel@tonic-gate mdb_warn("failed to read hash table at %p", haddr);
8120Sstevel@tonic-gate mdb_free(hash, hsize);
8130Sstevel@tonic-gate mdb_free(kmhw, sizeof (kmem_hash_walk_t));
8140Sstevel@tonic-gate return (WALK_ERR);
8150Sstevel@tonic-gate }
8160Sstevel@tonic-gate
8170Sstevel@tonic-gate wsp->walk_data = kmhw;
8180Sstevel@tonic-gate
8190Sstevel@tonic-gate return (WALK_NEXT);
8200Sstevel@tonic-gate }
8210Sstevel@tonic-gate
8220Sstevel@tonic-gate int
kmem_hash_walk_step(mdb_walk_state_t * wsp)8230Sstevel@tonic-gate kmem_hash_walk_step(mdb_walk_state_t *wsp)
8240Sstevel@tonic-gate {
8250Sstevel@tonic-gate kmem_hash_walk_t *kmhw = wsp->walk_data;
8260Sstevel@tonic-gate uintptr_t addr = NULL;
8270Sstevel@tonic-gate
8280Sstevel@tonic-gate if ((addr = (uintptr_t)kmhw->kmhw_cur.bc_next) == NULL) {
8290Sstevel@tonic-gate while (kmhw->kmhw_pos < kmhw->kmhw_nelems) {
8300Sstevel@tonic-gate if ((addr = kmhw->kmhw_table[kmhw->kmhw_pos++]) != NULL)
8310Sstevel@tonic-gate break;
8320Sstevel@tonic-gate }
8330Sstevel@tonic-gate }
8340Sstevel@tonic-gate if (addr == NULL)
8350Sstevel@tonic-gate return (WALK_DONE);
8360Sstevel@tonic-gate
8370Sstevel@tonic-gate if (mdb_vread(&kmhw->kmhw_cur, sizeof (kmem_bufctl_t), addr) == -1) {
8380Sstevel@tonic-gate mdb_warn("couldn't read kmem_bufctl_t at addr %p", addr);
8390Sstevel@tonic-gate return (WALK_ERR);
8400Sstevel@tonic-gate }
8410Sstevel@tonic-gate
8420Sstevel@tonic-gate return (wsp->walk_callback(addr, &kmhw->kmhw_cur, wsp->walk_cbdata));
8430Sstevel@tonic-gate }
8440Sstevel@tonic-gate
8450Sstevel@tonic-gate void
kmem_hash_walk_fini(mdb_walk_state_t * wsp)8460Sstevel@tonic-gate kmem_hash_walk_fini(mdb_walk_state_t *wsp)
8470Sstevel@tonic-gate {
8480Sstevel@tonic-gate kmem_hash_walk_t *kmhw = wsp->walk_data;
8490Sstevel@tonic-gate
8500Sstevel@tonic-gate if (kmhw == NULL)
8510Sstevel@tonic-gate return;
8520Sstevel@tonic-gate
8530Sstevel@tonic-gate mdb_free(kmhw->kmhw_table, kmhw->kmhw_nelems * sizeof (uintptr_t));
8540Sstevel@tonic-gate mdb_free(kmhw, sizeof (kmem_hash_walk_t));
8550Sstevel@tonic-gate }
8560Sstevel@tonic-gate
8570Sstevel@tonic-gate /*
8580Sstevel@tonic-gate * Find the address of the bufctl structure for the address 'buf' in cache
8590Sstevel@tonic-gate * 'cp', which is at address caddr, and place it in *out.
8600Sstevel@tonic-gate */
8610Sstevel@tonic-gate static int
kmem_hash_lookup(kmem_cache_t * cp,uintptr_t caddr,void * buf,uintptr_t * out)8620Sstevel@tonic-gate kmem_hash_lookup(kmem_cache_t *cp, uintptr_t caddr, void *buf, uintptr_t *out)
8630Sstevel@tonic-gate {
8640Sstevel@tonic-gate uintptr_t bucket = (uintptr_t)KMEM_HASH(cp, buf);
8650Sstevel@tonic-gate kmem_bufctl_t *bcp;
8660Sstevel@tonic-gate kmem_bufctl_t bc;
8670Sstevel@tonic-gate
8680Sstevel@tonic-gate if (mdb_vread(&bcp, sizeof (kmem_bufctl_t *), bucket) == -1) {
8690Sstevel@tonic-gate mdb_warn("unable to read hash bucket for %p in cache %p",
8700Sstevel@tonic-gate buf, caddr);
8710Sstevel@tonic-gate return (-1);
8720Sstevel@tonic-gate }
8730Sstevel@tonic-gate
8740Sstevel@tonic-gate while (bcp != NULL) {
8750Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (kmem_bufctl_t),
8760Sstevel@tonic-gate (uintptr_t)bcp) == -1) {
8770Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", bcp);
8780Sstevel@tonic-gate return (-1);
8790Sstevel@tonic-gate }
8800Sstevel@tonic-gate if (bc.bc_addr == buf) {
8810Sstevel@tonic-gate *out = (uintptr_t)bcp;
8820Sstevel@tonic-gate return (0);
8830Sstevel@tonic-gate }
8840Sstevel@tonic-gate bcp = bc.bc_next;
8850Sstevel@tonic-gate }
8860Sstevel@tonic-gate
8870Sstevel@tonic-gate mdb_warn("unable to find bufctl for %p in cache %p\n", buf, caddr);
8880Sstevel@tonic-gate return (-1);
8890Sstevel@tonic-gate }
8900Sstevel@tonic-gate
8910Sstevel@tonic-gate int
kmem_get_magsize(const kmem_cache_t * cp)8920Sstevel@tonic-gate kmem_get_magsize(const kmem_cache_t *cp)
8930Sstevel@tonic-gate {
8940Sstevel@tonic-gate uintptr_t addr = (uintptr_t)cp->cache_magtype;
8950Sstevel@tonic-gate GElf_Sym mt_sym;
8960Sstevel@tonic-gate kmem_magtype_t mt;
8970Sstevel@tonic-gate int res;
8980Sstevel@tonic-gate
8990Sstevel@tonic-gate /*
9000Sstevel@tonic-gate * if cpu 0 has a non-zero magsize, it must be correct. caches
9010Sstevel@tonic-gate * with KMF_NOMAGAZINE have disabled their magazine layers, so
9020Sstevel@tonic-gate * it is okay to return 0 for them.
9030Sstevel@tonic-gate */
9040Sstevel@tonic-gate if ((res = cp->cache_cpu[0].cc_magsize) != 0 ||
9050Sstevel@tonic-gate (cp->cache_flags & KMF_NOMAGAZINE))
9060Sstevel@tonic-gate return (res);
9070Sstevel@tonic-gate
9080Sstevel@tonic-gate if (mdb_lookup_by_name("kmem_magtype", &mt_sym) == -1) {
9090Sstevel@tonic-gate mdb_warn("unable to read 'kmem_magtype'");
9100Sstevel@tonic-gate } else if (addr < mt_sym.st_value ||
9110Sstevel@tonic-gate addr + sizeof (mt) - 1 > mt_sym.st_value + mt_sym.st_size - 1 ||
9120Sstevel@tonic-gate ((addr - mt_sym.st_value) % sizeof (mt)) != 0) {
9130Sstevel@tonic-gate mdb_warn("cache '%s' has invalid magtype pointer (%p)\n",
9140Sstevel@tonic-gate cp->cache_name, addr);
9150Sstevel@tonic-gate return (0);
9160Sstevel@tonic-gate }
9170Sstevel@tonic-gate if (mdb_vread(&mt, sizeof (mt), addr) == -1) {
9180Sstevel@tonic-gate mdb_warn("unable to read magtype at %a", addr);
9190Sstevel@tonic-gate return (0);
9200Sstevel@tonic-gate }
9210Sstevel@tonic-gate return (mt.mt_magsize);
9220Sstevel@tonic-gate }
9230Sstevel@tonic-gate
9240Sstevel@tonic-gate /*ARGSUSED*/
9250Sstevel@tonic-gate static int
kmem_estimate_slab(uintptr_t addr,const kmem_slab_t * sp,size_t * est)9260Sstevel@tonic-gate kmem_estimate_slab(uintptr_t addr, const kmem_slab_t *sp, size_t *est)
9270Sstevel@tonic-gate {
9280Sstevel@tonic-gate *est -= (sp->slab_chunks - sp->slab_refcnt);
9290Sstevel@tonic-gate
9300Sstevel@tonic-gate return (WALK_NEXT);
9310Sstevel@tonic-gate }
9320Sstevel@tonic-gate
9330Sstevel@tonic-gate /*
9340Sstevel@tonic-gate * Returns an upper bound on the number of allocated buffers in a given
9350Sstevel@tonic-gate * cache.
9360Sstevel@tonic-gate */
9370Sstevel@tonic-gate size_t
kmem_estimate_allocated(uintptr_t addr,const kmem_cache_t * cp)9380Sstevel@tonic-gate kmem_estimate_allocated(uintptr_t addr, const kmem_cache_t *cp)
9390Sstevel@tonic-gate {
9400Sstevel@tonic-gate int magsize;
9410Sstevel@tonic-gate size_t cache_est;
9420Sstevel@tonic-gate
9430Sstevel@tonic-gate cache_est = cp->cache_buftotal;
9440Sstevel@tonic-gate
9450Sstevel@tonic-gate (void) mdb_pwalk("kmem_slab_partial",
9460Sstevel@tonic-gate (mdb_walk_cb_t)kmem_estimate_slab, &cache_est, addr);
9470Sstevel@tonic-gate
9480Sstevel@tonic-gate if ((magsize = kmem_get_magsize(cp)) != 0) {
9490Sstevel@tonic-gate size_t mag_est = cp->cache_full.ml_total * magsize;
9500Sstevel@tonic-gate
9510Sstevel@tonic-gate if (cache_est >= mag_est) {
9520Sstevel@tonic-gate cache_est -= mag_est;
9530Sstevel@tonic-gate } else {
9540Sstevel@tonic-gate mdb_warn("cache %p's magazine layer holds more buffers "
9550Sstevel@tonic-gate "than the slab layer.\n", addr);
9560Sstevel@tonic-gate }
9570Sstevel@tonic-gate }
9580Sstevel@tonic-gate return (cache_est);
9590Sstevel@tonic-gate }
9600Sstevel@tonic-gate
9610Sstevel@tonic-gate #define READMAG_ROUNDS(rounds) { \
9620Sstevel@tonic-gate if (mdb_vread(mp, magbsize, (uintptr_t)kmp) == -1) { \
9630Sstevel@tonic-gate mdb_warn("couldn't read magazine at %p", kmp); \
9640Sstevel@tonic-gate goto fail; \
9650Sstevel@tonic-gate } \
9660Sstevel@tonic-gate for (i = 0; i < rounds; i++) { \
9670Sstevel@tonic-gate maglist[magcnt++] = mp->mag_round[i]; \
9680Sstevel@tonic-gate if (magcnt == magmax) { \
9690Sstevel@tonic-gate mdb_warn("%d magazines exceeds fudge factor\n", \
9700Sstevel@tonic-gate magcnt); \
9710Sstevel@tonic-gate goto fail; \
9720Sstevel@tonic-gate } \
9730Sstevel@tonic-gate } \
9740Sstevel@tonic-gate }
9750Sstevel@tonic-gate
9760Sstevel@tonic-gate int
kmem_read_magazines(kmem_cache_t * cp,uintptr_t addr,int ncpus,void *** maglistp,size_t * magcntp,size_t * magmaxp,int alloc_flags)9770Sstevel@tonic-gate kmem_read_magazines(kmem_cache_t *cp, uintptr_t addr, int ncpus,
9780Sstevel@tonic-gate void ***maglistp, size_t *magcntp, size_t *magmaxp, int alloc_flags)
9790Sstevel@tonic-gate {
9800Sstevel@tonic-gate kmem_magazine_t *kmp, *mp;
9810Sstevel@tonic-gate void **maglist = NULL;
9820Sstevel@tonic-gate int i, cpu;
9830Sstevel@tonic-gate size_t magsize, magmax, magbsize;
9840Sstevel@tonic-gate size_t magcnt = 0;
9850Sstevel@tonic-gate
9860Sstevel@tonic-gate /*
9870Sstevel@tonic-gate * Read the magtype out of the cache, after verifying the pointer's
9880Sstevel@tonic-gate * correctness.
9890Sstevel@tonic-gate */
9900Sstevel@tonic-gate magsize = kmem_get_magsize(cp);
9911528Sjwadams if (magsize == 0) {
9921528Sjwadams *maglistp = NULL;
9931528Sjwadams *magcntp = 0;
9941528Sjwadams *magmaxp = 0;
9951528Sjwadams return (WALK_NEXT);
9961528Sjwadams }
9970Sstevel@tonic-gate
9980Sstevel@tonic-gate /*
9990Sstevel@tonic-gate * There are several places where we need to go buffer hunting:
10000Sstevel@tonic-gate * the per-CPU loaded magazine, the per-CPU spare full magazine,
10010Sstevel@tonic-gate * and the full magazine list in the depot.
10020Sstevel@tonic-gate *
10030Sstevel@tonic-gate * For an upper bound on the number of buffers in the magazine
10040Sstevel@tonic-gate * layer, we have the number of magazines on the cache_full
10050Sstevel@tonic-gate * list plus at most two magazines per CPU (the loaded and the
10060Sstevel@tonic-gate * spare). Toss in 100 magazines as a fudge factor in case this
10070Sstevel@tonic-gate * is live (the number "100" comes from the same fudge factor in
10080Sstevel@tonic-gate * crash(1M)).
10090Sstevel@tonic-gate */
10100Sstevel@tonic-gate magmax = (cp->cache_full.ml_total + 2 * ncpus + 100) * magsize;
10110Sstevel@tonic-gate magbsize = offsetof(kmem_magazine_t, mag_round[magsize]);
10120Sstevel@tonic-gate
10130Sstevel@tonic-gate if (magbsize >= PAGESIZE / 2) {
10140Sstevel@tonic-gate mdb_warn("magazine size for cache %p unreasonable (%x)\n",
10150Sstevel@tonic-gate addr, magbsize);
10161528Sjwadams return (WALK_ERR);
10170Sstevel@tonic-gate }
10180Sstevel@tonic-gate
10190Sstevel@tonic-gate maglist = mdb_alloc(magmax * sizeof (void *), alloc_flags);
10200Sstevel@tonic-gate mp = mdb_alloc(magbsize, alloc_flags);
10210Sstevel@tonic-gate if (mp == NULL || maglist == NULL)
10220Sstevel@tonic-gate goto fail;
10230Sstevel@tonic-gate
10240Sstevel@tonic-gate /*
10250Sstevel@tonic-gate * First up: the magazines in the depot (i.e. on the cache_full list).
10260Sstevel@tonic-gate */
10270Sstevel@tonic-gate for (kmp = cp->cache_full.ml_list; kmp != NULL; ) {
10280Sstevel@tonic-gate READMAG_ROUNDS(magsize);
10290Sstevel@tonic-gate kmp = mp->mag_next;
10300Sstevel@tonic-gate
10310Sstevel@tonic-gate if (kmp == cp->cache_full.ml_list)
10320Sstevel@tonic-gate break; /* cache_full list loop detected */
10330Sstevel@tonic-gate }
10340Sstevel@tonic-gate
10350Sstevel@tonic-gate dprintf(("cache_full list done\n"));
10360Sstevel@tonic-gate
10370Sstevel@tonic-gate /*
10380Sstevel@tonic-gate * Now whip through the CPUs, snagging the loaded magazines
10390Sstevel@tonic-gate * and full spares.
1040*11178SDave.Plauger@Sun.COM *
1041*11178SDave.Plauger@Sun.COM * In order to prevent inconsistent dumps, rounds and prounds
1042*11178SDave.Plauger@Sun.COM * are copied aside before dumping begins.
10430Sstevel@tonic-gate */
10440Sstevel@tonic-gate for (cpu = 0; cpu < ncpus; cpu++) {
10450Sstevel@tonic-gate kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu];
1046*11178SDave.Plauger@Sun.COM short rounds, prounds;
1047*11178SDave.Plauger@Sun.COM
1048*11178SDave.Plauger@Sun.COM if (KMEM_DUMPCC(ccp)) {
1049*11178SDave.Plauger@Sun.COM rounds = ccp->cc_dump_rounds;
1050*11178SDave.Plauger@Sun.COM prounds = ccp->cc_dump_prounds;
1051*11178SDave.Plauger@Sun.COM } else {
1052*11178SDave.Plauger@Sun.COM rounds = ccp->cc_rounds;
1053*11178SDave.Plauger@Sun.COM prounds = ccp->cc_prounds;
1054*11178SDave.Plauger@Sun.COM }
10550Sstevel@tonic-gate
10560Sstevel@tonic-gate dprintf(("reading cpu cache %p\n",
10570Sstevel@tonic-gate (uintptr_t)ccp - (uintptr_t)cp + addr));
10580Sstevel@tonic-gate
1059*11178SDave.Plauger@Sun.COM if (rounds > 0 &&
10600Sstevel@tonic-gate (kmp = ccp->cc_loaded) != NULL) {
1061*11178SDave.Plauger@Sun.COM dprintf(("reading %d loaded rounds\n", rounds));
1062*11178SDave.Plauger@Sun.COM READMAG_ROUNDS(rounds);
10630Sstevel@tonic-gate }
10640Sstevel@tonic-gate
1065*11178SDave.Plauger@Sun.COM if (prounds > 0 &&
10660Sstevel@tonic-gate (kmp = ccp->cc_ploaded) != NULL) {
10670Sstevel@tonic-gate dprintf(("reading %d previously loaded rounds\n",
1068*11178SDave.Plauger@Sun.COM prounds));
1069*11178SDave.Plauger@Sun.COM READMAG_ROUNDS(prounds);
10700Sstevel@tonic-gate }
10710Sstevel@tonic-gate }
10720Sstevel@tonic-gate
10730Sstevel@tonic-gate dprintf(("magazine layer: %d buffers\n", magcnt));
10740Sstevel@tonic-gate
10750Sstevel@tonic-gate if (!(alloc_flags & UM_GC))
10760Sstevel@tonic-gate mdb_free(mp, magbsize);
10770Sstevel@tonic-gate
10780Sstevel@tonic-gate *maglistp = maglist;
10790Sstevel@tonic-gate *magcntp = magcnt;
10800Sstevel@tonic-gate *magmaxp = magmax;
10810Sstevel@tonic-gate
10820Sstevel@tonic-gate return (WALK_NEXT);
10830Sstevel@tonic-gate
10840Sstevel@tonic-gate fail:
10850Sstevel@tonic-gate if (!(alloc_flags & UM_GC)) {
10860Sstevel@tonic-gate if (mp)
10870Sstevel@tonic-gate mdb_free(mp, magbsize);
10880Sstevel@tonic-gate if (maglist)
10890Sstevel@tonic-gate mdb_free(maglist, magmax * sizeof (void *));
10900Sstevel@tonic-gate }
10910Sstevel@tonic-gate return (WALK_ERR);
10920Sstevel@tonic-gate }
10930Sstevel@tonic-gate
10940Sstevel@tonic-gate static int
kmem_walk_callback(mdb_walk_state_t * wsp,uintptr_t buf)10950Sstevel@tonic-gate kmem_walk_callback(mdb_walk_state_t *wsp, uintptr_t buf)
10960Sstevel@tonic-gate {
10970Sstevel@tonic-gate return (wsp->walk_callback(buf, NULL, wsp->walk_cbdata));
10980Sstevel@tonic-gate }
10990Sstevel@tonic-gate
11000Sstevel@tonic-gate static int
bufctl_walk_callback(kmem_cache_t * cp,mdb_walk_state_t * wsp,uintptr_t buf)11010Sstevel@tonic-gate bufctl_walk_callback(kmem_cache_t *cp, mdb_walk_state_t *wsp, uintptr_t buf)
11020Sstevel@tonic-gate {
11030Sstevel@tonic-gate kmem_bufctl_audit_t b;
11040Sstevel@tonic-gate
11050Sstevel@tonic-gate /*
11060Sstevel@tonic-gate * if KMF_AUDIT is not set, we know that we're looking at a
11070Sstevel@tonic-gate * kmem_bufctl_t.
11080Sstevel@tonic-gate */
11090Sstevel@tonic-gate if (!(cp->cache_flags & KMF_AUDIT) ||
11100Sstevel@tonic-gate mdb_vread(&b, sizeof (kmem_bufctl_audit_t), buf) == -1) {
11110Sstevel@tonic-gate (void) memset(&b, 0, sizeof (b));
11120Sstevel@tonic-gate if (mdb_vread(&b, sizeof (kmem_bufctl_t), buf) == -1) {
11130Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", buf);
11140Sstevel@tonic-gate return (WALK_ERR);
11150Sstevel@tonic-gate }
11160Sstevel@tonic-gate }
11170Sstevel@tonic-gate
11180Sstevel@tonic-gate return (wsp->walk_callback(buf, &b, wsp->walk_cbdata));
11190Sstevel@tonic-gate }
11200Sstevel@tonic-gate
11210Sstevel@tonic-gate typedef struct kmem_walk {
11220Sstevel@tonic-gate int kmw_type;
11230Sstevel@tonic-gate
11240Sstevel@tonic-gate int kmw_addr; /* cache address */
11250Sstevel@tonic-gate kmem_cache_t *kmw_cp;
11260Sstevel@tonic-gate size_t kmw_csize;
11270Sstevel@tonic-gate
11280Sstevel@tonic-gate /*
11290Sstevel@tonic-gate * magazine layer
11300Sstevel@tonic-gate */
11310Sstevel@tonic-gate void **kmw_maglist;
11320Sstevel@tonic-gate size_t kmw_max;
11330Sstevel@tonic-gate size_t kmw_count;
11340Sstevel@tonic-gate size_t kmw_pos;
11350Sstevel@tonic-gate
11360Sstevel@tonic-gate /*
11370Sstevel@tonic-gate * slab layer
11380Sstevel@tonic-gate */
11390Sstevel@tonic-gate char *kmw_valid; /* to keep track of freed buffers */
11400Sstevel@tonic-gate char *kmw_ubase; /* buffer for slab data */
11410Sstevel@tonic-gate } kmem_walk_t;
11420Sstevel@tonic-gate
11430Sstevel@tonic-gate static int
kmem_walk_init_common(mdb_walk_state_t * wsp,int type)11440Sstevel@tonic-gate kmem_walk_init_common(mdb_walk_state_t *wsp, int type)
11450Sstevel@tonic-gate {
11460Sstevel@tonic-gate kmem_walk_t *kmw;
11470Sstevel@tonic-gate int ncpus, csize;
11480Sstevel@tonic-gate kmem_cache_t *cp;
11491528Sjwadams size_t vm_quantum;
11500Sstevel@tonic-gate
11510Sstevel@tonic-gate size_t magmax, magcnt;
11520Sstevel@tonic-gate void **maglist = NULL;
11530Sstevel@tonic-gate uint_t chunksize, slabsize;
11540Sstevel@tonic-gate int status = WALK_ERR;
11550Sstevel@tonic-gate uintptr_t addr = wsp->walk_addr;
11560Sstevel@tonic-gate const char *layered;
11570Sstevel@tonic-gate
11580Sstevel@tonic-gate type &= ~KM_HASH;
11590Sstevel@tonic-gate
11600Sstevel@tonic-gate if (addr == NULL) {
11610Sstevel@tonic-gate mdb_warn("kmem walk doesn't support global walks\n");
11620Sstevel@tonic-gate return (WALK_ERR);
11630Sstevel@tonic-gate }
11640Sstevel@tonic-gate
11650Sstevel@tonic-gate dprintf(("walking %p\n", addr));
11660Sstevel@tonic-gate
11670Sstevel@tonic-gate /*
11680Sstevel@tonic-gate * First we need to figure out how many CPUs are configured in the
11690Sstevel@tonic-gate * system to know how much to slurp out.
11700Sstevel@tonic-gate */
11710Sstevel@tonic-gate mdb_readvar(&ncpus, "max_ncpus");
11720Sstevel@tonic-gate
11730Sstevel@tonic-gate csize = KMEM_CACHE_SIZE(ncpus);
11740Sstevel@tonic-gate cp = mdb_alloc(csize, UM_SLEEP);
11750Sstevel@tonic-gate
11760Sstevel@tonic-gate if (mdb_vread(cp, csize, addr) == -1) {
11770Sstevel@tonic-gate mdb_warn("couldn't read cache at addr %p", addr);
11780Sstevel@tonic-gate goto out2;
11790Sstevel@tonic-gate }
11800Sstevel@tonic-gate
11811528Sjwadams /*
11821528Sjwadams * It's easy for someone to hand us an invalid cache address.
11831528Sjwadams * Unfortunately, it is hard for this walker to survive an
11841528Sjwadams * invalid cache cleanly. So we make sure that:
11851528Sjwadams *
11861528Sjwadams * 1. the vmem arena for the cache is readable,
11871528Sjwadams * 2. the vmem arena's quantum is a power of 2,
11881528Sjwadams * 3. our slabsize is a multiple of the quantum, and
11891528Sjwadams * 4. our chunksize is >0 and less than our slabsize.
11901528Sjwadams */
11911528Sjwadams if (mdb_vread(&vm_quantum, sizeof (vm_quantum),
11921528Sjwadams (uintptr_t)&cp->cache_arena->vm_quantum) == -1 ||
11931528Sjwadams vm_quantum == 0 ||
11941528Sjwadams (vm_quantum & (vm_quantum - 1)) != 0 ||
11951528Sjwadams cp->cache_slabsize < vm_quantum ||
11961528Sjwadams P2PHASE(cp->cache_slabsize, vm_quantum) != 0 ||
11971528Sjwadams cp->cache_chunksize == 0 ||
11981528Sjwadams cp->cache_chunksize > cp->cache_slabsize) {
11991528Sjwadams mdb_warn("%p is not a valid kmem_cache_t\n", addr);
12001528Sjwadams goto out2;
12011528Sjwadams }
12021528Sjwadams
12030Sstevel@tonic-gate dprintf(("buf total is %d\n", cp->cache_buftotal));
12040Sstevel@tonic-gate
12050Sstevel@tonic-gate if (cp->cache_buftotal == 0) {
12060Sstevel@tonic-gate mdb_free(cp, csize);
12070Sstevel@tonic-gate return (WALK_DONE);
12080Sstevel@tonic-gate }
12090Sstevel@tonic-gate
12100Sstevel@tonic-gate /*
12110Sstevel@tonic-gate * If they ask for bufctls, but it's a small-slab cache,
12120Sstevel@tonic-gate * there is nothing to report.
12130Sstevel@tonic-gate */
12140Sstevel@tonic-gate if ((type & KM_BUFCTL) && !(cp->cache_flags & KMF_HASH)) {
12150Sstevel@tonic-gate dprintf(("bufctl requested, not KMF_HASH (flags: %p)\n",
12160Sstevel@tonic-gate cp->cache_flags));
12170Sstevel@tonic-gate mdb_free(cp, csize);
12180Sstevel@tonic-gate return (WALK_DONE);
12190Sstevel@tonic-gate }
12200Sstevel@tonic-gate
12210Sstevel@tonic-gate /*
12220Sstevel@tonic-gate * If they want constructed buffers, but there's no constructor or
12230Sstevel@tonic-gate * the cache has DEADBEEF checking enabled, there is nothing to report.
12240Sstevel@tonic-gate */
12250Sstevel@tonic-gate if ((type & KM_CONSTRUCTED) && (!(type & KM_FREE) ||
12260Sstevel@tonic-gate cp->cache_constructor == NULL ||
12270Sstevel@tonic-gate (cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) == KMF_DEADBEEF)) {
12280Sstevel@tonic-gate mdb_free(cp, csize);
12290Sstevel@tonic-gate return (WALK_DONE);
12300Sstevel@tonic-gate }
12310Sstevel@tonic-gate
12320Sstevel@tonic-gate /*
12330Sstevel@tonic-gate * Read in the contents of the magazine layer
12340Sstevel@tonic-gate */
12350Sstevel@tonic-gate if (kmem_read_magazines(cp, addr, ncpus, &maglist, &magcnt,
12360Sstevel@tonic-gate &magmax, UM_SLEEP) == WALK_ERR)
12370Sstevel@tonic-gate goto out2;
12380Sstevel@tonic-gate
12390Sstevel@tonic-gate /*
12400Sstevel@tonic-gate * We have all of the buffers from the magazines; if we are walking
12410Sstevel@tonic-gate * allocated buffers, sort them so we can bsearch them later.
12420Sstevel@tonic-gate */
12430Sstevel@tonic-gate if (type & KM_ALLOCATED)
12440Sstevel@tonic-gate qsort(maglist, magcnt, sizeof (void *), addrcmp);
12450Sstevel@tonic-gate
12460Sstevel@tonic-gate wsp->walk_data = kmw = mdb_zalloc(sizeof (kmem_walk_t), UM_SLEEP);
12470Sstevel@tonic-gate
12480Sstevel@tonic-gate kmw->kmw_type = type;
12490Sstevel@tonic-gate kmw->kmw_addr = addr;
12500Sstevel@tonic-gate kmw->kmw_cp = cp;
12510Sstevel@tonic-gate kmw->kmw_csize = csize;
12520Sstevel@tonic-gate kmw->kmw_maglist = maglist;
12530Sstevel@tonic-gate kmw->kmw_max = magmax;
12540Sstevel@tonic-gate kmw->kmw_count = magcnt;
12550Sstevel@tonic-gate kmw->kmw_pos = 0;
12560Sstevel@tonic-gate
12570Sstevel@tonic-gate /*
12580Sstevel@tonic-gate * When walking allocated buffers in a KMF_HASH cache, we walk the
12590Sstevel@tonic-gate * hash table instead of the slab layer.
12600Sstevel@tonic-gate */
12610Sstevel@tonic-gate if ((cp->cache_flags & KMF_HASH) && (type & KM_ALLOCATED)) {
12620Sstevel@tonic-gate layered = "kmem_hash";
12630Sstevel@tonic-gate
12640Sstevel@tonic-gate kmw->kmw_type |= KM_HASH;
12650Sstevel@tonic-gate } else {
12660Sstevel@tonic-gate /*
12670Sstevel@tonic-gate * If we are walking freed buffers, we only need the
12680Sstevel@tonic-gate * magazine layer plus the partially allocated slabs.
12690Sstevel@tonic-gate * To walk allocated buffers, we need all of the slabs.
12700Sstevel@tonic-gate */
12710Sstevel@tonic-gate if (type & KM_ALLOCATED)
12720Sstevel@tonic-gate layered = "kmem_slab";
12730Sstevel@tonic-gate else
12740Sstevel@tonic-gate layered = "kmem_slab_partial";
12750Sstevel@tonic-gate
12760Sstevel@tonic-gate /*
12770Sstevel@tonic-gate * for small-slab caches, we read in the entire slab. For
12780Sstevel@tonic-gate * freed buffers, we can just walk the freelist. For
12790Sstevel@tonic-gate * allocated buffers, we use a 'valid' array to track
12800Sstevel@tonic-gate * the freed buffers.
12810Sstevel@tonic-gate */
12820Sstevel@tonic-gate if (!(cp->cache_flags & KMF_HASH)) {
12830Sstevel@tonic-gate chunksize = cp->cache_chunksize;
12840Sstevel@tonic-gate slabsize = cp->cache_slabsize;
12850Sstevel@tonic-gate
12860Sstevel@tonic-gate kmw->kmw_ubase = mdb_alloc(slabsize +
12870Sstevel@tonic-gate sizeof (kmem_bufctl_t), UM_SLEEP);
12880Sstevel@tonic-gate
12890Sstevel@tonic-gate if (type & KM_ALLOCATED)
12900Sstevel@tonic-gate kmw->kmw_valid =
12910Sstevel@tonic-gate mdb_alloc(slabsize / chunksize, UM_SLEEP);
12920Sstevel@tonic-gate }
12930Sstevel@tonic-gate }
12940Sstevel@tonic-gate
12950Sstevel@tonic-gate status = WALK_NEXT;
12960Sstevel@tonic-gate
12970Sstevel@tonic-gate if (mdb_layered_walk(layered, wsp) == -1) {
12980Sstevel@tonic-gate mdb_warn("unable to start layered '%s' walk", layered);
12990Sstevel@tonic-gate status = WALK_ERR;
13000Sstevel@tonic-gate }
13010Sstevel@tonic-gate
13020Sstevel@tonic-gate out1:
13030Sstevel@tonic-gate if (status == WALK_ERR) {
13040Sstevel@tonic-gate if (kmw->kmw_valid)
13050Sstevel@tonic-gate mdb_free(kmw->kmw_valid, slabsize / chunksize);
13060Sstevel@tonic-gate
13070Sstevel@tonic-gate if (kmw->kmw_ubase)
13080Sstevel@tonic-gate mdb_free(kmw->kmw_ubase, slabsize +
13090Sstevel@tonic-gate sizeof (kmem_bufctl_t));
13100Sstevel@tonic-gate
13111528Sjwadams if (kmw->kmw_maglist)
13121528Sjwadams mdb_free(kmw->kmw_maglist,
13131528Sjwadams kmw->kmw_max * sizeof (uintptr_t));
13141528Sjwadams
13150Sstevel@tonic-gate mdb_free(kmw, sizeof (kmem_walk_t));
13160Sstevel@tonic-gate wsp->walk_data = NULL;
13170Sstevel@tonic-gate }
13180Sstevel@tonic-gate
13190Sstevel@tonic-gate out2:
13200Sstevel@tonic-gate if (status == WALK_ERR)
13210Sstevel@tonic-gate mdb_free(cp, csize);
13220Sstevel@tonic-gate
13230Sstevel@tonic-gate return (status);
13240Sstevel@tonic-gate }
13250Sstevel@tonic-gate
13260Sstevel@tonic-gate int
kmem_walk_step(mdb_walk_state_t * wsp)13270Sstevel@tonic-gate kmem_walk_step(mdb_walk_state_t *wsp)
13280Sstevel@tonic-gate {
13290Sstevel@tonic-gate kmem_walk_t *kmw = wsp->walk_data;
13300Sstevel@tonic-gate int type = kmw->kmw_type;
13310Sstevel@tonic-gate kmem_cache_t *cp = kmw->kmw_cp;
13320Sstevel@tonic-gate
13330Sstevel@tonic-gate void **maglist = kmw->kmw_maglist;
13340Sstevel@tonic-gate int magcnt = kmw->kmw_count;
13350Sstevel@tonic-gate
13360Sstevel@tonic-gate uintptr_t chunksize, slabsize;
13370Sstevel@tonic-gate uintptr_t addr;
13380Sstevel@tonic-gate const kmem_slab_t *sp;
13390Sstevel@tonic-gate const kmem_bufctl_t *bcp;
13400Sstevel@tonic-gate kmem_bufctl_t bc;
13410Sstevel@tonic-gate
13420Sstevel@tonic-gate int chunks;
13430Sstevel@tonic-gate char *kbase;
13440Sstevel@tonic-gate void *buf;
13450Sstevel@tonic-gate int i, ret;
13460Sstevel@tonic-gate
13470Sstevel@tonic-gate char *valid, *ubase;
13480Sstevel@tonic-gate
13490Sstevel@tonic-gate /*
13500Sstevel@tonic-gate * first, handle the 'kmem_hash' layered walk case
13510Sstevel@tonic-gate */
13520Sstevel@tonic-gate if (type & KM_HASH) {
13530Sstevel@tonic-gate /*
13540Sstevel@tonic-gate * We have a buffer which has been allocated out of the
13550Sstevel@tonic-gate * global layer. We need to make sure that it's not
13560Sstevel@tonic-gate * actually sitting in a magazine before we report it as
13570Sstevel@tonic-gate * an allocated buffer.
13580Sstevel@tonic-gate */
13590Sstevel@tonic-gate buf = ((const kmem_bufctl_t *)wsp->walk_layer)->bc_addr;
13600Sstevel@tonic-gate
13610Sstevel@tonic-gate if (magcnt > 0 &&
13620Sstevel@tonic-gate bsearch(&buf, maglist, magcnt, sizeof (void *),
13630Sstevel@tonic-gate addrcmp) != NULL)
13640Sstevel@tonic-gate return (WALK_NEXT);
13650Sstevel@tonic-gate
13660Sstevel@tonic-gate if (type & KM_BUFCTL)
13670Sstevel@tonic-gate return (bufctl_walk_callback(cp, wsp, wsp->walk_addr));
13680Sstevel@tonic-gate
13690Sstevel@tonic-gate return (kmem_walk_callback(wsp, (uintptr_t)buf));
13700Sstevel@tonic-gate }
13710Sstevel@tonic-gate
13720Sstevel@tonic-gate ret = WALK_NEXT;
13730Sstevel@tonic-gate
13740Sstevel@tonic-gate addr = kmw->kmw_addr;
13750Sstevel@tonic-gate
13760Sstevel@tonic-gate /*
13770Sstevel@tonic-gate * If we're walking freed buffers, report everything in the
13780Sstevel@tonic-gate * magazine layer before processing the first slab.
13790Sstevel@tonic-gate */
13800Sstevel@tonic-gate if ((type & KM_FREE) && magcnt != 0) {
13810Sstevel@tonic-gate kmw->kmw_count = 0; /* only do this once */
13820Sstevel@tonic-gate for (i = 0; i < magcnt; i++) {
13830Sstevel@tonic-gate buf = maglist[i];
13840Sstevel@tonic-gate
13850Sstevel@tonic-gate if (type & KM_BUFCTL) {
13860Sstevel@tonic-gate uintptr_t out;
13870Sstevel@tonic-gate
13880Sstevel@tonic-gate if (cp->cache_flags & KMF_BUFTAG) {
13890Sstevel@tonic-gate kmem_buftag_t *btp;
13900Sstevel@tonic-gate kmem_buftag_t tag;
13910Sstevel@tonic-gate
13920Sstevel@tonic-gate /* LINTED - alignment */
13930Sstevel@tonic-gate btp = KMEM_BUFTAG(cp, buf);
13940Sstevel@tonic-gate if (mdb_vread(&tag, sizeof (tag),
13950Sstevel@tonic-gate (uintptr_t)btp) == -1) {
13960Sstevel@tonic-gate mdb_warn("reading buftag for "
13970Sstevel@tonic-gate "%p at %p", buf, btp);
13980Sstevel@tonic-gate continue;
13990Sstevel@tonic-gate }
14000Sstevel@tonic-gate out = (uintptr_t)tag.bt_bufctl;
14010Sstevel@tonic-gate } else {
14020Sstevel@tonic-gate if (kmem_hash_lookup(cp, addr, buf,
14030Sstevel@tonic-gate &out) == -1)
14040Sstevel@tonic-gate continue;
14050Sstevel@tonic-gate }
14060Sstevel@tonic-gate ret = bufctl_walk_callback(cp, wsp, out);
14070Sstevel@tonic-gate } else {
14080Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf);
14090Sstevel@tonic-gate }
14100Sstevel@tonic-gate
14110Sstevel@tonic-gate if (ret != WALK_NEXT)
14120Sstevel@tonic-gate return (ret);
14130Sstevel@tonic-gate }
14140Sstevel@tonic-gate }
14150Sstevel@tonic-gate
14160Sstevel@tonic-gate /*
14170Sstevel@tonic-gate * If they want constructed buffers, we're finished, since the
14180Sstevel@tonic-gate * magazine layer holds them all.
14190Sstevel@tonic-gate */
14200Sstevel@tonic-gate if (type & KM_CONSTRUCTED)
14210Sstevel@tonic-gate return (WALK_DONE);
14220Sstevel@tonic-gate
14230Sstevel@tonic-gate /*
14240Sstevel@tonic-gate * Handle the buffers in the current slab
14250Sstevel@tonic-gate */
14260Sstevel@tonic-gate chunksize = cp->cache_chunksize;
14270Sstevel@tonic-gate slabsize = cp->cache_slabsize;
14280Sstevel@tonic-gate
14290Sstevel@tonic-gate sp = wsp->walk_layer;
14300Sstevel@tonic-gate chunks = sp->slab_chunks;
14310Sstevel@tonic-gate kbase = sp->slab_base;
14320Sstevel@tonic-gate
14330Sstevel@tonic-gate dprintf(("kbase is %p\n", kbase));
14340Sstevel@tonic-gate
14350Sstevel@tonic-gate if (!(cp->cache_flags & KMF_HASH)) {
14360Sstevel@tonic-gate valid = kmw->kmw_valid;
14370Sstevel@tonic-gate ubase = kmw->kmw_ubase;
14380Sstevel@tonic-gate
14390Sstevel@tonic-gate if (mdb_vread(ubase, chunks * chunksize,
14400Sstevel@tonic-gate (uintptr_t)kbase) == -1) {
14410Sstevel@tonic-gate mdb_warn("failed to read slab contents at %p", kbase);
14420Sstevel@tonic-gate return (WALK_ERR);
14430Sstevel@tonic-gate }
14440Sstevel@tonic-gate
14450Sstevel@tonic-gate /*
14460Sstevel@tonic-gate * Set up the valid map as fully allocated -- we'll punch
14470Sstevel@tonic-gate * out the freelist.
14480Sstevel@tonic-gate */
14490Sstevel@tonic-gate if (type & KM_ALLOCATED)
14500Sstevel@tonic-gate (void) memset(valid, 1, chunks);
14510Sstevel@tonic-gate } else {
14520Sstevel@tonic-gate valid = NULL;
14530Sstevel@tonic-gate ubase = NULL;
14540Sstevel@tonic-gate }
14550Sstevel@tonic-gate
14560Sstevel@tonic-gate /*
14570Sstevel@tonic-gate * walk the slab's freelist
14580Sstevel@tonic-gate */
14590Sstevel@tonic-gate bcp = sp->slab_head;
14600Sstevel@tonic-gate
14610Sstevel@tonic-gate dprintf(("refcnt is %d; chunks is %d\n", sp->slab_refcnt, chunks));
14620Sstevel@tonic-gate
14630Sstevel@tonic-gate /*
14640Sstevel@tonic-gate * since we could be in the middle of allocating a buffer,
14650Sstevel@tonic-gate * our refcnt could be one higher than it aught. So we
14660Sstevel@tonic-gate * check one further on the freelist than the count allows.
14670Sstevel@tonic-gate */
14680Sstevel@tonic-gate for (i = sp->slab_refcnt; i <= chunks; i++) {
14690Sstevel@tonic-gate uint_t ndx;
14700Sstevel@tonic-gate
14710Sstevel@tonic-gate dprintf(("bcp is %p\n", bcp));
14720Sstevel@tonic-gate
14730Sstevel@tonic-gate if (bcp == NULL) {
14740Sstevel@tonic-gate if (i == chunks)
14750Sstevel@tonic-gate break;
14760Sstevel@tonic-gate mdb_warn(
14770Sstevel@tonic-gate "slab %p in cache %p freelist too short by %d\n",
14780Sstevel@tonic-gate sp, addr, chunks - i);
14790Sstevel@tonic-gate break;
14800Sstevel@tonic-gate }
14810Sstevel@tonic-gate
14820Sstevel@tonic-gate if (cp->cache_flags & KMF_HASH) {
14830Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), (uintptr_t)bcp) == -1) {
14840Sstevel@tonic-gate mdb_warn("failed to read bufctl ptr at %p",
14850Sstevel@tonic-gate bcp);
14860Sstevel@tonic-gate break;
14870Sstevel@tonic-gate }
14880Sstevel@tonic-gate buf = bc.bc_addr;
14890Sstevel@tonic-gate } else {
14900Sstevel@tonic-gate /*
14910Sstevel@tonic-gate * Otherwise the buffer is in the slab which
14920Sstevel@tonic-gate * we've read in; we just need to determine
14930Sstevel@tonic-gate * its offset in the slab to find the
14940Sstevel@tonic-gate * kmem_bufctl_t.
14950Sstevel@tonic-gate */
14960Sstevel@tonic-gate bc = *((kmem_bufctl_t *)
14970Sstevel@tonic-gate ((uintptr_t)bcp - (uintptr_t)kbase +
14980Sstevel@tonic-gate (uintptr_t)ubase));
14990Sstevel@tonic-gate
15000Sstevel@tonic-gate buf = KMEM_BUF(cp, bcp);
15010Sstevel@tonic-gate }
15020Sstevel@tonic-gate
15030Sstevel@tonic-gate ndx = ((uintptr_t)buf - (uintptr_t)kbase) / chunksize;
15040Sstevel@tonic-gate
15050Sstevel@tonic-gate if (ndx > slabsize / cp->cache_bufsize) {
15060Sstevel@tonic-gate /*
15070Sstevel@tonic-gate * This is very wrong; we have managed to find
15080Sstevel@tonic-gate * a buffer in the slab which shouldn't
15090Sstevel@tonic-gate * actually be here. Emit a warning, and
15100Sstevel@tonic-gate * try to continue.
15110Sstevel@tonic-gate */
15120Sstevel@tonic-gate mdb_warn("buf %p is out of range for "
15130Sstevel@tonic-gate "slab %p, cache %p\n", buf, sp, addr);
15140Sstevel@tonic-gate } else if (type & KM_ALLOCATED) {
15150Sstevel@tonic-gate /*
15160Sstevel@tonic-gate * we have found a buffer on the slab's freelist;
15170Sstevel@tonic-gate * clear its entry
15180Sstevel@tonic-gate */
15190Sstevel@tonic-gate valid[ndx] = 0;
15200Sstevel@tonic-gate } else {
15210Sstevel@tonic-gate /*
15220Sstevel@tonic-gate * Report this freed buffer
15230Sstevel@tonic-gate */
15240Sstevel@tonic-gate if (type & KM_BUFCTL) {
15250Sstevel@tonic-gate ret = bufctl_walk_callback(cp, wsp,
15260Sstevel@tonic-gate (uintptr_t)bcp);
15270Sstevel@tonic-gate } else {
15280Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf);
15290Sstevel@tonic-gate }
15300Sstevel@tonic-gate if (ret != WALK_NEXT)
15310Sstevel@tonic-gate return (ret);
15320Sstevel@tonic-gate }
15330Sstevel@tonic-gate
15340Sstevel@tonic-gate bcp = bc.bc_next;
15350Sstevel@tonic-gate }
15360Sstevel@tonic-gate
15370Sstevel@tonic-gate if (bcp != NULL) {
15380Sstevel@tonic-gate dprintf(("slab %p in cache %p freelist too long (%p)\n",
15390Sstevel@tonic-gate sp, addr, bcp));
15400Sstevel@tonic-gate }
15410Sstevel@tonic-gate
15420Sstevel@tonic-gate /*
15430Sstevel@tonic-gate * If we are walking freed buffers, the loop above handled reporting
15440Sstevel@tonic-gate * them.
15450Sstevel@tonic-gate */
15460Sstevel@tonic-gate if (type & KM_FREE)
15470Sstevel@tonic-gate return (WALK_NEXT);
15480Sstevel@tonic-gate
15490Sstevel@tonic-gate if (type & KM_BUFCTL) {
15500Sstevel@tonic-gate mdb_warn("impossible situation: small-slab KM_BUFCTL walk for "
15510Sstevel@tonic-gate "cache %p\n", addr);
15520Sstevel@tonic-gate return (WALK_ERR);
15530Sstevel@tonic-gate }
15540Sstevel@tonic-gate
15550Sstevel@tonic-gate /*
15560Sstevel@tonic-gate * Report allocated buffers, skipping buffers in the magazine layer.
15570Sstevel@tonic-gate * We only get this far for small-slab caches.
15580Sstevel@tonic-gate */
15590Sstevel@tonic-gate for (i = 0; ret == WALK_NEXT && i < chunks; i++) {
15600Sstevel@tonic-gate buf = (char *)kbase + i * chunksize;
15610Sstevel@tonic-gate
15620Sstevel@tonic-gate if (!valid[i])
15630Sstevel@tonic-gate continue; /* on slab freelist */
15640Sstevel@tonic-gate
15650Sstevel@tonic-gate if (magcnt > 0 &&
15660Sstevel@tonic-gate bsearch(&buf, maglist, magcnt, sizeof (void *),
15670Sstevel@tonic-gate addrcmp) != NULL)
15680Sstevel@tonic-gate continue; /* in magazine layer */
15690Sstevel@tonic-gate
15700Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf);
15710Sstevel@tonic-gate }
15720Sstevel@tonic-gate return (ret);
15730Sstevel@tonic-gate }
15740Sstevel@tonic-gate
15750Sstevel@tonic-gate void
kmem_walk_fini(mdb_walk_state_t * wsp)15760Sstevel@tonic-gate kmem_walk_fini(mdb_walk_state_t *wsp)
15770Sstevel@tonic-gate {
15780Sstevel@tonic-gate kmem_walk_t *kmw = wsp->walk_data;
15790Sstevel@tonic-gate uintptr_t chunksize;
15800Sstevel@tonic-gate uintptr_t slabsize;
15810Sstevel@tonic-gate
15820Sstevel@tonic-gate if (kmw == NULL)
15830Sstevel@tonic-gate return;
15840Sstevel@tonic-gate
15850Sstevel@tonic-gate if (kmw->kmw_maglist != NULL)
15860Sstevel@tonic-gate mdb_free(kmw->kmw_maglist, kmw->kmw_max * sizeof (void *));
15870Sstevel@tonic-gate
15880Sstevel@tonic-gate chunksize = kmw->kmw_cp->cache_chunksize;
15890Sstevel@tonic-gate slabsize = kmw->kmw_cp->cache_slabsize;
15900Sstevel@tonic-gate
15910Sstevel@tonic-gate if (kmw->kmw_valid != NULL)
15920Sstevel@tonic-gate mdb_free(kmw->kmw_valid, slabsize / chunksize);
15930Sstevel@tonic-gate if (kmw->kmw_ubase != NULL)
15940Sstevel@tonic-gate mdb_free(kmw->kmw_ubase, slabsize + sizeof (kmem_bufctl_t));
15950Sstevel@tonic-gate
15960Sstevel@tonic-gate mdb_free(kmw->kmw_cp, kmw->kmw_csize);
15970Sstevel@tonic-gate mdb_free(kmw, sizeof (kmem_walk_t));
15980Sstevel@tonic-gate }
15990Sstevel@tonic-gate
16000Sstevel@tonic-gate /*ARGSUSED*/
16010Sstevel@tonic-gate static int
kmem_walk_all(uintptr_t addr,const kmem_cache_t * c,mdb_walk_state_t * wsp)16020Sstevel@tonic-gate kmem_walk_all(uintptr_t addr, const kmem_cache_t *c, mdb_walk_state_t *wsp)
16030Sstevel@tonic-gate {
16040Sstevel@tonic-gate /*
16050Sstevel@tonic-gate * Buffers allocated from NOTOUCH caches can also show up as freed
16060Sstevel@tonic-gate * memory in other caches. This can be a little confusing, so we
16070Sstevel@tonic-gate * don't walk NOTOUCH caches when walking all caches (thereby assuring
16080Sstevel@tonic-gate * that "::walk kmem" and "::walk freemem" yield disjoint output).
16090Sstevel@tonic-gate */
16100Sstevel@tonic-gate if (c->cache_cflags & KMC_NOTOUCH)
16110Sstevel@tonic-gate return (WALK_NEXT);
16120Sstevel@tonic-gate
16130Sstevel@tonic-gate if (mdb_pwalk(wsp->walk_data, wsp->walk_callback,
16140Sstevel@tonic-gate wsp->walk_cbdata, addr) == -1)
16150Sstevel@tonic-gate return (WALK_DONE);
16160Sstevel@tonic-gate
16170Sstevel@tonic-gate return (WALK_NEXT);
16180Sstevel@tonic-gate }
16190Sstevel@tonic-gate
16200Sstevel@tonic-gate #define KMEM_WALK_ALL(name, wsp) { \
16210Sstevel@tonic-gate wsp->walk_data = (name); \
16220Sstevel@tonic-gate if (mdb_walk("kmem_cache", (mdb_walk_cb_t)kmem_walk_all, wsp) == -1) \
16230Sstevel@tonic-gate return (WALK_ERR); \
16240Sstevel@tonic-gate return (WALK_DONE); \
16250Sstevel@tonic-gate }
16260Sstevel@tonic-gate
16270Sstevel@tonic-gate int
kmem_walk_init(mdb_walk_state_t * wsp)16280Sstevel@tonic-gate kmem_walk_init(mdb_walk_state_t *wsp)
16290Sstevel@tonic-gate {
16300Sstevel@tonic-gate if (wsp->walk_arg != NULL)
16310Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)wsp->walk_arg;
16320Sstevel@tonic-gate
16330Sstevel@tonic-gate if (wsp->walk_addr == NULL)
16340Sstevel@tonic-gate KMEM_WALK_ALL("kmem", wsp);
16350Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_ALLOCATED));
16360Sstevel@tonic-gate }
16370Sstevel@tonic-gate
16380Sstevel@tonic-gate int
bufctl_walk_init(mdb_walk_state_t * wsp)16390Sstevel@tonic-gate bufctl_walk_init(mdb_walk_state_t *wsp)
16400Sstevel@tonic-gate {
16410Sstevel@tonic-gate if (wsp->walk_addr == NULL)
16420Sstevel@tonic-gate KMEM_WALK_ALL("bufctl", wsp);
16430Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_ALLOCATED | KM_BUFCTL));
16440Sstevel@tonic-gate }
16450Sstevel@tonic-gate
16460Sstevel@tonic-gate int
freemem_walk_init(mdb_walk_state_t * wsp)16470Sstevel@tonic-gate freemem_walk_init(mdb_walk_state_t *wsp)
16480Sstevel@tonic-gate {
16490Sstevel@tonic-gate if (wsp->walk_addr == NULL)
16500Sstevel@tonic-gate KMEM_WALK_ALL("freemem", wsp);
16510Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE));
16520Sstevel@tonic-gate }
16530Sstevel@tonic-gate
16540Sstevel@tonic-gate int
freemem_constructed_walk_init(mdb_walk_state_t * wsp)16550Sstevel@tonic-gate freemem_constructed_walk_init(mdb_walk_state_t *wsp)
16560Sstevel@tonic-gate {
16570Sstevel@tonic-gate if (wsp->walk_addr == NULL)
16580Sstevel@tonic-gate KMEM_WALK_ALL("freemem_constructed", wsp);
16590Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE | KM_CONSTRUCTED));
16600Sstevel@tonic-gate }
16610Sstevel@tonic-gate
16620Sstevel@tonic-gate int
freectl_walk_init(mdb_walk_state_t * wsp)16630Sstevel@tonic-gate freectl_walk_init(mdb_walk_state_t *wsp)
16640Sstevel@tonic-gate {
16650Sstevel@tonic-gate if (wsp->walk_addr == NULL)
16660Sstevel@tonic-gate KMEM_WALK_ALL("freectl", wsp);
16670Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE | KM_BUFCTL));
16680Sstevel@tonic-gate }
16690Sstevel@tonic-gate
16700Sstevel@tonic-gate int
freectl_constructed_walk_init(mdb_walk_state_t * wsp)16710Sstevel@tonic-gate freectl_constructed_walk_init(mdb_walk_state_t *wsp)
16720Sstevel@tonic-gate {
16730Sstevel@tonic-gate if (wsp->walk_addr == NULL)
16740Sstevel@tonic-gate KMEM_WALK_ALL("freectl_constructed", wsp);
16750Sstevel@tonic-gate return (kmem_walk_init_common(wsp,
16760Sstevel@tonic-gate KM_FREE | KM_BUFCTL | KM_CONSTRUCTED));
16770Sstevel@tonic-gate }
16780Sstevel@tonic-gate
16790Sstevel@tonic-gate typedef struct bufctl_history_walk {
16800Sstevel@tonic-gate void *bhw_next;
16810Sstevel@tonic-gate kmem_cache_t *bhw_cache;
16820Sstevel@tonic-gate kmem_slab_t *bhw_slab;
16830Sstevel@tonic-gate hrtime_t bhw_timestamp;
16840Sstevel@tonic-gate } bufctl_history_walk_t;
16850Sstevel@tonic-gate
16860Sstevel@tonic-gate int
bufctl_history_walk_init(mdb_walk_state_t * wsp)16870Sstevel@tonic-gate bufctl_history_walk_init(mdb_walk_state_t *wsp)
16880Sstevel@tonic-gate {
16890Sstevel@tonic-gate bufctl_history_walk_t *bhw;
16900Sstevel@tonic-gate kmem_bufctl_audit_t bc;
16910Sstevel@tonic-gate kmem_bufctl_audit_t bcn;
16920Sstevel@tonic-gate
16930Sstevel@tonic-gate if (wsp->walk_addr == NULL) {
16940Sstevel@tonic-gate mdb_warn("bufctl_history walk doesn't support global walks\n");
16950Sstevel@tonic-gate return (WALK_ERR);
16960Sstevel@tonic-gate }
16970Sstevel@tonic-gate
16980Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), wsp->walk_addr) == -1) {
16990Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", wsp->walk_addr);
17000Sstevel@tonic-gate return (WALK_ERR);
17010Sstevel@tonic-gate }
17020Sstevel@tonic-gate
17030Sstevel@tonic-gate bhw = mdb_zalloc(sizeof (*bhw), UM_SLEEP);
17040Sstevel@tonic-gate bhw->bhw_timestamp = 0;
17050Sstevel@tonic-gate bhw->bhw_cache = bc.bc_cache;
17060Sstevel@tonic-gate bhw->bhw_slab = bc.bc_slab;
17070Sstevel@tonic-gate
17080Sstevel@tonic-gate /*
17090Sstevel@tonic-gate * sometimes the first log entry matches the base bufctl; in that
17100Sstevel@tonic-gate * case, skip the base bufctl.
17110Sstevel@tonic-gate */
17120Sstevel@tonic-gate if (bc.bc_lastlog != NULL &&
17130Sstevel@tonic-gate mdb_vread(&bcn, sizeof (bcn), (uintptr_t)bc.bc_lastlog) != -1 &&
17140Sstevel@tonic-gate bc.bc_addr == bcn.bc_addr &&
17150Sstevel@tonic-gate bc.bc_cache == bcn.bc_cache &&
17160Sstevel@tonic-gate bc.bc_slab == bcn.bc_slab &&
17170Sstevel@tonic-gate bc.bc_timestamp == bcn.bc_timestamp &&
17180Sstevel@tonic-gate bc.bc_thread == bcn.bc_thread)
17190Sstevel@tonic-gate bhw->bhw_next = bc.bc_lastlog;
17200Sstevel@tonic-gate else
17210Sstevel@tonic-gate bhw->bhw_next = (void *)wsp->walk_addr;
17220Sstevel@tonic-gate
17230Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)bc.bc_addr;
17240Sstevel@tonic-gate wsp->walk_data = bhw;
17250Sstevel@tonic-gate
17260Sstevel@tonic-gate return (WALK_NEXT);
17270Sstevel@tonic-gate }
17280Sstevel@tonic-gate
17290Sstevel@tonic-gate int
bufctl_history_walk_step(mdb_walk_state_t * wsp)17300Sstevel@tonic-gate bufctl_history_walk_step(mdb_walk_state_t *wsp)
17310Sstevel@tonic-gate {
17320Sstevel@tonic-gate bufctl_history_walk_t *bhw = wsp->walk_data;
17330Sstevel@tonic-gate uintptr_t addr = (uintptr_t)bhw->bhw_next;
17340Sstevel@tonic-gate uintptr_t baseaddr = wsp->walk_addr;
17350Sstevel@tonic-gate kmem_bufctl_audit_t bc;
17360Sstevel@tonic-gate
17370Sstevel@tonic-gate if (addr == NULL)
17380Sstevel@tonic-gate return (WALK_DONE);
17390Sstevel@tonic-gate
17400Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) {
17410Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", bhw->bhw_next);
17420Sstevel@tonic-gate return (WALK_ERR);
17430Sstevel@tonic-gate }
17440Sstevel@tonic-gate
17450Sstevel@tonic-gate /*
17460Sstevel@tonic-gate * The bufctl is only valid if the address, cache, and slab are
17470Sstevel@tonic-gate * correct. We also check that the timestamp is decreasing, to
17480Sstevel@tonic-gate * prevent infinite loops.
17490Sstevel@tonic-gate */
17500Sstevel@tonic-gate if ((uintptr_t)bc.bc_addr != baseaddr ||
17510Sstevel@tonic-gate bc.bc_cache != bhw->bhw_cache ||
17520Sstevel@tonic-gate bc.bc_slab != bhw->bhw_slab ||
17530Sstevel@tonic-gate (bhw->bhw_timestamp != 0 && bc.bc_timestamp >= bhw->bhw_timestamp))
17540Sstevel@tonic-gate return (WALK_DONE);
17550Sstevel@tonic-gate
17560Sstevel@tonic-gate bhw->bhw_next = bc.bc_lastlog;
17570Sstevel@tonic-gate bhw->bhw_timestamp = bc.bc_timestamp;
17580Sstevel@tonic-gate
17590Sstevel@tonic-gate return (wsp->walk_callback(addr, &bc, wsp->walk_cbdata));
17600Sstevel@tonic-gate }
17610Sstevel@tonic-gate
17620Sstevel@tonic-gate void
bufctl_history_walk_fini(mdb_walk_state_t * wsp)17630Sstevel@tonic-gate bufctl_history_walk_fini(mdb_walk_state_t *wsp)
17640Sstevel@tonic-gate {
17650Sstevel@tonic-gate bufctl_history_walk_t *bhw = wsp->walk_data;
17660Sstevel@tonic-gate
17670Sstevel@tonic-gate mdb_free(bhw, sizeof (*bhw));
17680Sstevel@tonic-gate }
17690Sstevel@tonic-gate
17700Sstevel@tonic-gate typedef struct kmem_log_walk {
17710Sstevel@tonic-gate kmem_bufctl_audit_t *klw_base;
17720Sstevel@tonic-gate kmem_bufctl_audit_t **klw_sorted;
17730Sstevel@tonic-gate kmem_log_header_t klw_lh;
17740Sstevel@tonic-gate size_t klw_size;
17750Sstevel@tonic-gate size_t klw_maxndx;
17760Sstevel@tonic-gate size_t klw_ndx;
17770Sstevel@tonic-gate } kmem_log_walk_t;
17780Sstevel@tonic-gate
17790Sstevel@tonic-gate int
kmem_log_walk_init(mdb_walk_state_t * wsp)17800Sstevel@tonic-gate kmem_log_walk_init(mdb_walk_state_t *wsp)
17810Sstevel@tonic-gate {
17820Sstevel@tonic-gate uintptr_t lp = wsp->walk_addr;
17830Sstevel@tonic-gate kmem_log_walk_t *klw;
17840Sstevel@tonic-gate kmem_log_header_t *lhp;
17850Sstevel@tonic-gate int maxndx, i, j, k;
17860Sstevel@tonic-gate
17870Sstevel@tonic-gate /*
17880Sstevel@tonic-gate * By default (global walk), walk the kmem_transaction_log. Otherwise
17890Sstevel@tonic-gate * read the log whose kmem_log_header_t is stored at walk_addr.
17900Sstevel@tonic-gate */
17910Sstevel@tonic-gate if (lp == NULL && mdb_readvar(&lp, "kmem_transaction_log") == -1) {
17920Sstevel@tonic-gate mdb_warn("failed to read 'kmem_transaction_log'");
17930Sstevel@tonic-gate return (WALK_ERR);
17940Sstevel@tonic-gate }
17950Sstevel@tonic-gate
17960Sstevel@tonic-gate if (lp == NULL) {
17970Sstevel@tonic-gate mdb_warn("log is disabled\n");
17980Sstevel@tonic-gate return (WALK_ERR);
17990Sstevel@tonic-gate }
18000Sstevel@tonic-gate
18010Sstevel@tonic-gate klw = mdb_zalloc(sizeof (kmem_log_walk_t), UM_SLEEP);
18020Sstevel@tonic-gate lhp = &klw->klw_lh;
18030Sstevel@tonic-gate
18040Sstevel@tonic-gate if (mdb_vread(lhp, sizeof (kmem_log_header_t), lp) == -1) {
18050Sstevel@tonic-gate mdb_warn("failed to read log header at %p", lp);
18060Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t));
18070Sstevel@tonic-gate return (WALK_ERR);
18080Sstevel@tonic-gate }
18090Sstevel@tonic-gate
18100Sstevel@tonic-gate klw->klw_size = lhp->lh_chunksize * lhp->lh_nchunks;
18110Sstevel@tonic-gate klw->klw_base = mdb_alloc(klw->klw_size, UM_SLEEP);
18120Sstevel@tonic-gate maxndx = lhp->lh_chunksize / sizeof (kmem_bufctl_audit_t) - 1;
18130Sstevel@tonic-gate
18140Sstevel@tonic-gate if (mdb_vread(klw->klw_base, klw->klw_size,
18150Sstevel@tonic-gate (uintptr_t)lhp->lh_base) == -1) {
18160Sstevel@tonic-gate mdb_warn("failed to read log at base %p", lhp->lh_base);
18170Sstevel@tonic-gate mdb_free(klw->klw_base, klw->klw_size);
18180Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t));
18190Sstevel@tonic-gate return (WALK_ERR);
18200Sstevel@tonic-gate }
18210Sstevel@tonic-gate
18220Sstevel@tonic-gate klw->klw_sorted = mdb_alloc(maxndx * lhp->lh_nchunks *
18230Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t *), UM_SLEEP);
18240Sstevel@tonic-gate
18250Sstevel@tonic-gate for (i = 0, k = 0; i < lhp->lh_nchunks; i++) {
18260Sstevel@tonic-gate kmem_bufctl_audit_t *chunk = (kmem_bufctl_audit_t *)
18270Sstevel@tonic-gate ((uintptr_t)klw->klw_base + i * lhp->lh_chunksize);
18280Sstevel@tonic-gate
18290Sstevel@tonic-gate for (j = 0; j < maxndx; j++)
18300Sstevel@tonic-gate klw->klw_sorted[k++] = &chunk[j];
18310Sstevel@tonic-gate }
18320Sstevel@tonic-gate
18330Sstevel@tonic-gate qsort(klw->klw_sorted, k, sizeof (kmem_bufctl_audit_t *),
18340Sstevel@tonic-gate (int(*)(const void *, const void *))bufctlcmp);
18350Sstevel@tonic-gate
18360Sstevel@tonic-gate klw->klw_maxndx = k;
18370Sstevel@tonic-gate wsp->walk_data = klw;
18380Sstevel@tonic-gate
18390Sstevel@tonic-gate return (WALK_NEXT);
18400Sstevel@tonic-gate }
18410Sstevel@tonic-gate
18420Sstevel@tonic-gate int
kmem_log_walk_step(mdb_walk_state_t * wsp)18430Sstevel@tonic-gate kmem_log_walk_step(mdb_walk_state_t *wsp)
18440Sstevel@tonic-gate {
18450Sstevel@tonic-gate kmem_log_walk_t *klw = wsp->walk_data;
18460Sstevel@tonic-gate kmem_bufctl_audit_t *bcp;
18470Sstevel@tonic-gate
18480Sstevel@tonic-gate if (klw->klw_ndx == klw->klw_maxndx)
18490Sstevel@tonic-gate return (WALK_DONE);
18500Sstevel@tonic-gate
18510Sstevel@tonic-gate bcp = klw->klw_sorted[klw->klw_ndx++];
18520Sstevel@tonic-gate
18530Sstevel@tonic-gate return (wsp->walk_callback((uintptr_t)bcp - (uintptr_t)klw->klw_base +
18540Sstevel@tonic-gate (uintptr_t)klw->klw_lh.lh_base, bcp, wsp->walk_cbdata));
18550Sstevel@tonic-gate }
18560Sstevel@tonic-gate
18570Sstevel@tonic-gate void
kmem_log_walk_fini(mdb_walk_state_t * wsp)18580Sstevel@tonic-gate kmem_log_walk_fini(mdb_walk_state_t *wsp)
18590Sstevel@tonic-gate {
18600Sstevel@tonic-gate kmem_log_walk_t *klw = wsp->walk_data;
18610Sstevel@tonic-gate
18620Sstevel@tonic-gate mdb_free(klw->klw_base, klw->klw_size);
18630Sstevel@tonic-gate mdb_free(klw->klw_sorted, klw->klw_maxndx *
18640Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t *));
18650Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t));
18660Sstevel@tonic-gate }
18670Sstevel@tonic-gate
18680Sstevel@tonic-gate typedef struct allocdby_bufctl {
18690Sstevel@tonic-gate uintptr_t abb_addr;
18700Sstevel@tonic-gate hrtime_t abb_ts;
18710Sstevel@tonic-gate } allocdby_bufctl_t;
18720Sstevel@tonic-gate
18730Sstevel@tonic-gate typedef struct allocdby_walk {
18740Sstevel@tonic-gate const char *abw_walk;
18750Sstevel@tonic-gate uintptr_t abw_thread;
18760Sstevel@tonic-gate size_t abw_nbufs;
18770Sstevel@tonic-gate size_t abw_size;
18780Sstevel@tonic-gate allocdby_bufctl_t *abw_buf;
18790Sstevel@tonic-gate size_t abw_ndx;
18800Sstevel@tonic-gate } allocdby_walk_t;
18810Sstevel@tonic-gate
18820Sstevel@tonic-gate int
allocdby_walk_bufctl(uintptr_t addr,const kmem_bufctl_audit_t * bcp,allocdby_walk_t * abw)18830Sstevel@tonic-gate allocdby_walk_bufctl(uintptr_t addr, const kmem_bufctl_audit_t *bcp,
18840Sstevel@tonic-gate allocdby_walk_t *abw)
18850Sstevel@tonic-gate {
18860Sstevel@tonic-gate if ((uintptr_t)bcp->bc_thread != abw->abw_thread)
18870Sstevel@tonic-gate return (WALK_NEXT);
18880Sstevel@tonic-gate
18890Sstevel@tonic-gate if (abw->abw_nbufs == abw->abw_size) {
18900Sstevel@tonic-gate allocdby_bufctl_t *buf;
18910Sstevel@tonic-gate size_t oldsize = sizeof (allocdby_bufctl_t) * abw->abw_size;
18920Sstevel@tonic-gate
18930Sstevel@tonic-gate buf = mdb_zalloc(oldsize << 1, UM_SLEEP);
18940Sstevel@tonic-gate
18950Sstevel@tonic-gate bcopy(abw->abw_buf, buf, oldsize);
18960Sstevel@tonic-gate mdb_free(abw->abw_buf, oldsize);
18970Sstevel@tonic-gate
18980Sstevel@tonic-gate abw->abw_size <<= 1;
18990Sstevel@tonic-gate abw->abw_buf = buf;
19000Sstevel@tonic-gate }
19010Sstevel@tonic-gate
19020Sstevel@tonic-gate abw->abw_buf[abw->abw_nbufs].abb_addr = addr;
19030Sstevel@tonic-gate abw->abw_buf[abw->abw_nbufs].abb_ts = bcp->bc_timestamp;
19040Sstevel@tonic-gate abw->abw_nbufs++;
19050Sstevel@tonic-gate
19060Sstevel@tonic-gate return (WALK_NEXT);
19070Sstevel@tonic-gate }
19080Sstevel@tonic-gate
19090Sstevel@tonic-gate /*ARGSUSED*/
19100Sstevel@tonic-gate int
allocdby_walk_cache(uintptr_t addr,const kmem_cache_t * c,allocdby_walk_t * abw)19110Sstevel@tonic-gate allocdby_walk_cache(uintptr_t addr, const kmem_cache_t *c, allocdby_walk_t *abw)
19120Sstevel@tonic-gate {
19130Sstevel@tonic-gate if (mdb_pwalk(abw->abw_walk, (mdb_walk_cb_t)allocdby_walk_bufctl,
19140Sstevel@tonic-gate abw, addr) == -1) {
19150Sstevel@tonic-gate mdb_warn("couldn't walk bufctl for cache %p", addr);
19160Sstevel@tonic-gate return (WALK_DONE);
19170Sstevel@tonic-gate }
19180Sstevel@tonic-gate
19190Sstevel@tonic-gate return (WALK_NEXT);
19200Sstevel@tonic-gate }
19210Sstevel@tonic-gate
19220Sstevel@tonic-gate static int
allocdby_cmp(const allocdby_bufctl_t * lhs,const allocdby_bufctl_t * rhs)19230Sstevel@tonic-gate allocdby_cmp(const allocdby_bufctl_t *lhs, const allocdby_bufctl_t *rhs)
19240Sstevel@tonic-gate {
19250Sstevel@tonic-gate if (lhs->abb_ts < rhs->abb_ts)
19260Sstevel@tonic-gate return (1);
19270Sstevel@tonic-gate if (lhs->abb_ts > rhs->abb_ts)
19280Sstevel@tonic-gate return (-1);
19290Sstevel@tonic-gate return (0);
19300Sstevel@tonic-gate }
19310Sstevel@tonic-gate
19320Sstevel@tonic-gate static int
allocdby_walk_init_common(mdb_walk_state_t * wsp,const char * walk)19330Sstevel@tonic-gate allocdby_walk_init_common(mdb_walk_state_t *wsp, const char *walk)
19340Sstevel@tonic-gate {
19350Sstevel@tonic-gate allocdby_walk_t *abw;
19360Sstevel@tonic-gate
19370Sstevel@tonic-gate if (wsp->walk_addr == NULL) {
19380Sstevel@tonic-gate mdb_warn("allocdby walk doesn't support global walks\n");
19390Sstevel@tonic-gate return (WALK_ERR);
19400Sstevel@tonic-gate }
19410Sstevel@tonic-gate
19420Sstevel@tonic-gate abw = mdb_zalloc(sizeof (allocdby_walk_t), UM_SLEEP);
19430Sstevel@tonic-gate
19440Sstevel@tonic-gate abw->abw_thread = wsp->walk_addr;
19450Sstevel@tonic-gate abw->abw_walk = walk;
19460Sstevel@tonic-gate abw->abw_size = 128; /* something reasonable */
19470Sstevel@tonic-gate abw->abw_buf =
19480Sstevel@tonic-gate mdb_zalloc(abw->abw_size * sizeof (allocdby_bufctl_t), UM_SLEEP);
19490Sstevel@tonic-gate
19500Sstevel@tonic-gate wsp->walk_data = abw;
19510Sstevel@tonic-gate
19520Sstevel@tonic-gate if (mdb_walk("kmem_cache",
19530Sstevel@tonic-gate (mdb_walk_cb_t)allocdby_walk_cache, abw) == -1) {
19540Sstevel@tonic-gate mdb_warn("couldn't walk kmem_cache");
19550Sstevel@tonic-gate allocdby_walk_fini(wsp);
19560Sstevel@tonic-gate return (WALK_ERR);
19570Sstevel@tonic-gate }
19580Sstevel@tonic-gate
19590Sstevel@tonic-gate qsort(abw->abw_buf, abw->abw_nbufs, sizeof (allocdby_bufctl_t),
19600Sstevel@tonic-gate (int(*)(const void *, const void *))allocdby_cmp);
19610Sstevel@tonic-gate
19620Sstevel@tonic-gate return (WALK_NEXT);
19630Sstevel@tonic-gate }
19640Sstevel@tonic-gate
19650Sstevel@tonic-gate int
allocdby_walk_init(mdb_walk_state_t * wsp)19660Sstevel@tonic-gate allocdby_walk_init(mdb_walk_state_t *wsp)
19670Sstevel@tonic-gate {
19680Sstevel@tonic-gate return (allocdby_walk_init_common(wsp, "bufctl"));
19690Sstevel@tonic-gate }
19700Sstevel@tonic-gate
19710Sstevel@tonic-gate int
freedby_walk_init(mdb_walk_state_t * wsp)19720Sstevel@tonic-gate freedby_walk_init(mdb_walk_state_t *wsp)
19730Sstevel@tonic-gate {
19740Sstevel@tonic-gate return (allocdby_walk_init_common(wsp, "freectl"));
19750Sstevel@tonic-gate }
19760Sstevel@tonic-gate
19770Sstevel@tonic-gate int
allocdby_walk_step(mdb_walk_state_t * wsp)19780Sstevel@tonic-gate allocdby_walk_step(mdb_walk_state_t *wsp)
19790Sstevel@tonic-gate {
19800Sstevel@tonic-gate allocdby_walk_t *abw = wsp->walk_data;
19810Sstevel@tonic-gate kmem_bufctl_audit_t bc;
19820Sstevel@tonic-gate uintptr_t addr;
19830Sstevel@tonic-gate
19840Sstevel@tonic-gate if (abw->abw_ndx == abw->abw_nbufs)
19850Sstevel@tonic-gate return (WALK_DONE);
19860Sstevel@tonic-gate
19870Sstevel@tonic-gate addr = abw->abw_buf[abw->abw_ndx++].abb_addr;
19880Sstevel@tonic-gate
19890Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) {
19900Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr);
19910Sstevel@tonic-gate return (WALK_DONE);
19920Sstevel@tonic-gate }
19930Sstevel@tonic-gate
19940Sstevel@tonic-gate return (wsp->walk_callback(addr, &bc, wsp->walk_cbdata));
19950Sstevel@tonic-gate }
19960Sstevel@tonic-gate
19970Sstevel@tonic-gate void
allocdby_walk_fini(mdb_walk_state_t * wsp)19980Sstevel@tonic-gate allocdby_walk_fini(mdb_walk_state_t *wsp)
19990Sstevel@tonic-gate {
20000Sstevel@tonic-gate allocdby_walk_t *abw = wsp->walk_data;
20010Sstevel@tonic-gate
20020Sstevel@tonic-gate mdb_free(abw->abw_buf, sizeof (allocdby_bufctl_t) * abw->abw_size);
20030Sstevel@tonic-gate mdb_free(abw, sizeof (allocdby_walk_t));
20040Sstevel@tonic-gate }
20050Sstevel@tonic-gate
20060Sstevel@tonic-gate /*ARGSUSED*/
20070Sstevel@tonic-gate int
allocdby_walk(uintptr_t addr,const kmem_bufctl_audit_t * bcp,void * ignored)20080Sstevel@tonic-gate allocdby_walk(uintptr_t addr, const kmem_bufctl_audit_t *bcp, void *ignored)
20090Sstevel@tonic-gate {
20100Sstevel@tonic-gate char c[MDB_SYM_NAMLEN];
20110Sstevel@tonic-gate GElf_Sym sym;
20120Sstevel@tonic-gate int i;
20130Sstevel@tonic-gate
20140Sstevel@tonic-gate mdb_printf("%0?p %12llx ", addr, bcp->bc_timestamp);
20150Sstevel@tonic-gate for (i = 0; i < bcp->bc_depth; i++) {
20160Sstevel@tonic-gate if (mdb_lookup_by_addr(bcp->bc_stack[i],
20170Sstevel@tonic-gate MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1)
20180Sstevel@tonic-gate continue;
20190Sstevel@tonic-gate if (strncmp(c, "kmem_", 5) == 0)
20200Sstevel@tonic-gate continue;
20210Sstevel@tonic-gate mdb_printf("%s+0x%lx",
20220Sstevel@tonic-gate c, bcp->bc_stack[i] - (uintptr_t)sym.st_value);
20230Sstevel@tonic-gate break;
20240Sstevel@tonic-gate }
20250Sstevel@tonic-gate mdb_printf("\n");
20260Sstevel@tonic-gate
20270Sstevel@tonic-gate return (WALK_NEXT);
20280Sstevel@tonic-gate }
20290Sstevel@tonic-gate
20300Sstevel@tonic-gate static int
allocdby_common(uintptr_t addr,uint_t flags,const char * w)20310Sstevel@tonic-gate allocdby_common(uintptr_t addr, uint_t flags, const char *w)
20320Sstevel@tonic-gate {
20330Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC))
20340Sstevel@tonic-gate return (DCMD_USAGE);
20350Sstevel@tonic-gate
20360Sstevel@tonic-gate mdb_printf("%-?s %12s %s\n", "BUFCTL", "TIMESTAMP", "CALLER");
20370Sstevel@tonic-gate
20380Sstevel@tonic-gate if (mdb_pwalk(w, (mdb_walk_cb_t)allocdby_walk, NULL, addr) == -1) {
20390Sstevel@tonic-gate mdb_warn("can't walk '%s' for %p", w, addr);
20400Sstevel@tonic-gate return (DCMD_ERR);
20410Sstevel@tonic-gate }
20420Sstevel@tonic-gate
20430Sstevel@tonic-gate return (DCMD_OK);
20440Sstevel@tonic-gate }
20450Sstevel@tonic-gate
20460Sstevel@tonic-gate /*ARGSUSED*/
20470Sstevel@tonic-gate int
allocdby(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)20480Sstevel@tonic-gate allocdby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
20490Sstevel@tonic-gate {
20500Sstevel@tonic-gate return (allocdby_common(addr, flags, "allocdby"));
20510Sstevel@tonic-gate }
20520Sstevel@tonic-gate
20530Sstevel@tonic-gate /*ARGSUSED*/
20540Sstevel@tonic-gate int
freedby(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)20550Sstevel@tonic-gate freedby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
20560Sstevel@tonic-gate {
20570Sstevel@tonic-gate return (allocdby_common(addr, flags, "freedby"));
20580Sstevel@tonic-gate }
20590Sstevel@tonic-gate
20600Sstevel@tonic-gate /*
20610Sstevel@tonic-gate * Return a string describing the address in relation to the given thread's
20620Sstevel@tonic-gate * stack.
20630Sstevel@tonic-gate *
20640Sstevel@tonic-gate * - If the thread state is TS_FREE, return " (inactive interrupt thread)".
20650Sstevel@tonic-gate *
20660Sstevel@tonic-gate * - If the address is above the stack pointer, return an empty string
20670Sstevel@tonic-gate * signifying that the address is active.
20680Sstevel@tonic-gate *
20690Sstevel@tonic-gate * - If the address is below the stack pointer, and the thread is not on proc,
20700Sstevel@tonic-gate * return " (below sp)".
20710Sstevel@tonic-gate *
20720Sstevel@tonic-gate * - If the address is below the stack pointer, and the thread is on proc,
20730Sstevel@tonic-gate * return " (possibly below sp)". Depending on context, we may or may not
20740Sstevel@tonic-gate * have an accurate t_sp.
20750Sstevel@tonic-gate */
20760Sstevel@tonic-gate static const char *
stack_active(const kthread_t * t,uintptr_t addr)20770Sstevel@tonic-gate stack_active(const kthread_t *t, uintptr_t addr)
20780Sstevel@tonic-gate {
20790Sstevel@tonic-gate uintptr_t panicstk;
20800Sstevel@tonic-gate GElf_Sym sym;
20810Sstevel@tonic-gate
20820Sstevel@tonic-gate if (t->t_state == TS_FREE)
20830Sstevel@tonic-gate return (" (inactive interrupt thread)");
20840Sstevel@tonic-gate
20850Sstevel@tonic-gate /*
20860Sstevel@tonic-gate * Check to see if we're on the panic stack. If so, ignore t_sp, as it
20870Sstevel@tonic-gate * no longer relates to the thread's real stack.
20880Sstevel@tonic-gate */
20890Sstevel@tonic-gate if (mdb_lookup_by_name("panic_stack", &sym) == 0) {
20900Sstevel@tonic-gate panicstk = (uintptr_t)sym.st_value;
20910Sstevel@tonic-gate
20920Sstevel@tonic-gate if (t->t_sp >= panicstk && t->t_sp < panicstk + PANICSTKSIZE)
20930Sstevel@tonic-gate return ("");
20940Sstevel@tonic-gate }
20950Sstevel@tonic-gate
20960Sstevel@tonic-gate if (addr >= t->t_sp + STACK_BIAS)
20970Sstevel@tonic-gate return ("");
20980Sstevel@tonic-gate
20990Sstevel@tonic-gate if (t->t_state == TS_ONPROC)
21000Sstevel@tonic-gate return (" (possibly below sp)");
21010Sstevel@tonic-gate
21020Sstevel@tonic-gate return (" (below sp)");
21030Sstevel@tonic-gate }
21040Sstevel@tonic-gate
210510610SJonathan.Adams@Sun.COM /*
210610610SJonathan.Adams@Sun.COM * Additional state for the kmem and vmem ::whatis handlers
210710610SJonathan.Adams@Sun.COM */
210810610SJonathan.Adams@Sun.COM typedef struct whatis_info {
210910610SJonathan.Adams@Sun.COM mdb_whatis_t *wi_w;
211010610SJonathan.Adams@Sun.COM const kmem_cache_t *wi_cache;
211110610SJonathan.Adams@Sun.COM const vmem_t *wi_vmem;
211210610SJonathan.Adams@Sun.COM vmem_t *wi_msb_arena;
211310610SJonathan.Adams@Sun.COM size_t wi_slab_size;
211410610SJonathan.Adams@Sun.COM uint_t wi_slab_found;
211510610SJonathan.Adams@Sun.COM uint_t wi_kmem_lite_count;
211610610SJonathan.Adams@Sun.COM uint_t wi_freemem;
211710610SJonathan.Adams@Sun.COM } whatis_info_t;
211810388SJonathan.Adams@Sun.COM
211910388SJonathan.Adams@Sun.COM /* call one of our dcmd functions with "-v" and the provided address */
212010388SJonathan.Adams@Sun.COM static void
whatis_call_printer(mdb_dcmd_f * dcmd,uintptr_t addr)212110388SJonathan.Adams@Sun.COM whatis_call_printer(mdb_dcmd_f *dcmd, uintptr_t addr)
212210388SJonathan.Adams@Sun.COM {
212310388SJonathan.Adams@Sun.COM mdb_arg_t a;
212410388SJonathan.Adams@Sun.COM a.a_type = MDB_TYPE_STRING;
212510388SJonathan.Adams@Sun.COM a.a_un.a_str = "-v";
212610388SJonathan.Adams@Sun.COM
212710610SJonathan.Adams@Sun.COM mdb_printf(":\n");
212810388SJonathan.Adams@Sun.COM (void) (*dcmd)(addr, DCMD_ADDRSPEC, 1, &a);
212910388SJonathan.Adams@Sun.COM }
213010388SJonathan.Adams@Sun.COM
21310Sstevel@tonic-gate static void
whatis_print_kmf_lite(uintptr_t btaddr,size_t count)213210610SJonathan.Adams@Sun.COM whatis_print_kmf_lite(uintptr_t btaddr, size_t count)
21330Sstevel@tonic-gate {
213410610SJonathan.Adams@Sun.COM #define KMEM_LITE_MAX 16
213510610SJonathan.Adams@Sun.COM pc_t callers[KMEM_LITE_MAX];
213610610SJonathan.Adams@Sun.COM pc_t uninit = (pc_t)KMEM_UNINITIALIZED_PATTERN;
213710610SJonathan.Adams@Sun.COM
213810610SJonathan.Adams@Sun.COM kmem_buftag_t bt;
213910610SJonathan.Adams@Sun.COM intptr_t stat;
214010610SJonathan.Adams@Sun.COM const char *plural = "";
214110610SJonathan.Adams@Sun.COM int i;
214210610SJonathan.Adams@Sun.COM
214310610SJonathan.Adams@Sun.COM /* validate our arguments and read in the buftag */
214410610SJonathan.Adams@Sun.COM if (count == 0 || count > KMEM_LITE_MAX ||
214510610SJonathan.Adams@Sun.COM mdb_vread(&bt, sizeof (bt), btaddr) == -1)
214610610SJonathan.Adams@Sun.COM return;
214710610SJonathan.Adams@Sun.COM
214810610SJonathan.Adams@Sun.COM /* validate the buffer state and read in the callers */
214910610SJonathan.Adams@Sun.COM stat = (intptr_t)bt.bt_bufctl ^ bt.bt_bxstat;
215010610SJonathan.Adams@Sun.COM
215110610SJonathan.Adams@Sun.COM if (stat != KMEM_BUFTAG_ALLOC || stat != KMEM_BUFTAG_FREE ||
215210610SJonathan.Adams@Sun.COM mdb_vread(callers, count * sizeof (pc_t),
215310610SJonathan.Adams@Sun.COM btaddr + offsetof(kmem_buftag_lite_t, bt_history)) == -1)
215410610SJonathan.Adams@Sun.COM return;
215510610SJonathan.Adams@Sun.COM
215610610SJonathan.Adams@Sun.COM /* If there aren't any filled in callers, bail */
215710610SJonathan.Adams@Sun.COM if (callers[0] == uninit)
215810610SJonathan.Adams@Sun.COM return;
215910610SJonathan.Adams@Sun.COM
216010610SJonathan.Adams@Sun.COM plural = (callers[1] == uninit) ? "" : "s";
216110610SJonathan.Adams@Sun.COM
216210610SJonathan.Adams@Sun.COM /* Everything's done and checked; print them out */
216310610SJonathan.Adams@Sun.COM mdb_printf(":\n");
216410610SJonathan.Adams@Sun.COM
216510610SJonathan.Adams@Sun.COM mdb_inc_indent(8);
216610610SJonathan.Adams@Sun.COM mdb_printf("recent caller%s: %a", plural, callers[0]);
216710610SJonathan.Adams@Sun.COM for (i = 1; i < count; i++) {
216810610SJonathan.Adams@Sun.COM if (callers[i] == uninit)
216910610SJonathan.Adams@Sun.COM break;
217010610SJonathan.Adams@Sun.COM mdb_printf(", %a", callers[i]);
217110610SJonathan.Adams@Sun.COM }
217210610SJonathan.Adams@Sun.COM mdb_dec_indent(8);
217310610SJonathan.Adams@Sun.COM }
217410610SJonathan.Adams@Sun.COM
217510610SJonathan.Adams@Sun.COM static void
whatis_print_kmem(whatis_info_t * wi,uintptr_t maddr,uintptr_t addr,uintptr_t baddr)217610610SJonathan.Adams@Sun.COM whatis_print_kmem(whatis_info_t *wi, uintptr_t maddr, uintptr_t addr,
217710610SJonathan.Adams@Sun.COM uintptr_t baddr)
217810610SJonathan.Adams@Sun.COM {
217910610SJonathan.Adams@Sun.COM mdb_whatis_t *w = wi->wi_w;
218010610SJonathan.Adams@Sun.COM
218110610SJonathan.Adams@Sun.COM const kmem_cache_t *cp = wi->wi_cache;
21820Sstevel@tonic-gate /* LINTED pointer cast may result in improper alignment */
218310388SJonathan.Adams@Sun.COM uintptr_t btaddr = (uintptr_t)KMEM_BUFTAG(cp, addr);
218410610SJonathan.Adams@Sun.COM int quiet = (mdb_whatis_flags(w) & WHATIS_QUIET);
218510610SJonathan.Adams@Sun.COM int call_printer = (!quiet && (cp->cache_flags & KMF_AUDIT));
218610610SJonathan.Adams@Sun.COM
218710610SJonathan.Adams@Sun.COM mdb_whatis_report_object(w, maddr, addr, "");
218810388SJonathan.Adams@Sun.COM
218910388SJonathan.Adams@Sun.COM if (baddr != 0 && !call_printer)
219010388SJonathan.Adams@Sun.COM mdb_printf("bufctl %p ", baddr);
219110388SJonathan.Adams@Sun.COM
219210610SJonathan.Adams@Sun.COM mdb_printf("%s from %s",
219310610SJonathan.Adams@Sun.COM (wi->wi_freemem == FALSE) ? "allocated" : "freed", cp->cache_name);
219410610SJonathan.Adams@Sun.COM
219510610SJonathan.Adams@Sun.COM if (baddr != 0 && call_printer) {
219610388SJonathan.Adams@Sun.COM whatis_call_printer(bufctl, baddr);
219710610SJonathan.Adams@Sun.COM return;
21980Sstevel@tonic-gate }
219910610SJonathan.Adams@Sun.COM
220010610SJonathan.Adams@Sun.COM /* for KMF_LITE caches, try to print out the previous callers */
220110610SJonathan.Adams@Sun.COM if (!quiet && (cp->cache_flags & KMF_LITE))
220210610SJonathan.Adams@Sun.COM whatis_print_kmf_lite(btaddr, wi->wi_kmem_lite_count);
220310610SJonathan.Adams@Sun.COM
220410610SJonathan.Adams@Sun.COM mdb_printf("\n");
220510610SJonathan.Adams@Sun.COM }
220610610SJonathan.Adams@Sun.COM
220710610SJonathan.Adams@Sun.COM /*ARGSUSED*/
220810610SJonathan.Adams@Sun.COM static int
whatis_walk_kmem(uintptr_t addr,void * ignored,whatis_info_t * wi)220910610SJonathan.Adams@Sun.COM whatis_walk_kmem(uintptr_t addr, void *ignored, whatis_info_t *wi)
221010610SJonathan.Adams@Sun.COM {
221110610SJonathan.Adams@Sun.COM mdb_whatis_t *w = wi->wi_w;
221210610SJonathan.Adams@Sun.COM
221310610SJonathan.Adams@Sun.COM uintptr_t cur;
221410610SJonathan.Adams@Sun.COM size_t size = wi->wi_cache->cache_bufsize;
221510610SJonathan.Adams@Sun.COM
221610610SJonathan.Adams@Sun.COM while (mdb_whatis_match(w, addr, size, &cur))
221710610SJonathan.Adams@Sun.COM whatis_print_kmem(wi, cur, addr, NULL);
221810610SJonathan.Adams@Sun.COM
221910610SJonathan.Adams@Sun.COM return (WHATIS_WALKRET(w));
22200Sstevel@tonic-gate }
22210Sstevel@tonic-gate
22220Sstevel@tonic-gate /*ARGSUSED*/
22230Sstevel@tonic-gate static int
whatis_walk_bufctl(uintptr_t baddr,const kmem_bufctl_t * bcp,whatis_info_t * wi)222410610SJonathan.Adams@Sun.COM whatis_walk_bufctl(uintptr_t baddr, const kmem_bufctl_t *bcp, whatis_info_t *wi)
22250Sstevel@tonic-gate {
222610610SJonathan.Adams@Sun.COM mdb_whatis_t *w = wi->wi_w;
222710610SJonathan.Adams@Sun.COM
222810610SJonathan.Adams@Sun.COM uintptr_t cur;
222910610SJonathan.Adams@Sun.COM uintptr_t addr = (uintptr_t)bcp->bc_addr;
223010610SJonathan.Adams@Sun.COM size_t size = wi->wi_cache->cache_bufsize;
223110610SJonathan.Adams@Sun.COM
223210610SJonathan.Adams@Sun.COM while (mdb_whatis_match(w, addr, size, &cur))
223310610SJonathan.Adams@Sun.COM whatis_print_kmem(wi, cur, addr, baddr);
223410610SJonathan.Adams@Sun.COM
223510610SJonathan.Adams@Sun.COM return (WHATIS_WALKRET(w));
22360Sstevel@tonic-gate }
22370Sstevel@tonic-gate
22380Sstevel@tonic-gate static int
whatis_walk_seg(uintptr_t addr,const vmem_seg_t * vs,whatis_info_t * wi)223910610SJonathan.Adams@Sun.COM whatis_walk_seg(uintptr_t addr, const vmem_seg_t *vs, whatis_info_t *wi)
22400Sstevel@tonic-gate {
224110610SJonathan.Adams@Sun.COM mdb_whatis_t *w = wi->wi_w;
224210610SJonathan.Adams@Sun.COM
224310610SJonathan.Adams@Sun.COM size_t size = vs->vs_end - vs->vs_start;
224410610SJonathan.Adams@Sun.COM uintptr_t cur;
224510610SJonathan.Adams@Sun.COM
224610610SJonathan.Adams@Sun.COM /* We're not interested in anything but alloc and free segments */
224710610SJonathan.Adams@Sun.COM if (vs->vs_type != VMEM_ALLOC && vs->vs_type != VMEM_FREE)
22480Sstevel@tonic-gate return (WALK_NEXT);
22490Sstevel@tonic-gate
225010610SJonathan.Adams@Sun.COM while (mdb_whatis_match(w, vs->vs_start, size, &cur)) {
225110610SJonathan.Adams@Sun.COM mdb_whatis_report_object(w, cur, vs->vs_start, "");
225210610SJonathan.Adams@Sun.COM
225310610SJonathan.Adams@Sun.COM /*
225410610SJonathan.Adams@Sun.COM * If we're not printing it seperately, provide the vmem_seg
225510610SJonathan.Adams@Sun.COM * pointer if it has a stack trace.
225610610SJonathan.Adams@Sun.COM */
225710610SJonathan.Adams@Sun.COM if ((mdb_whatis_flags(w) & WHATIS_QUIET) &&
225810610SJonathan.Adams@Sun.COM (!(mdb_whatis_flags(w) & WHATIS_BUFCTL) ||
225910610SJonathan.Adams@Sun.COM (vs->vs_type == VMEM_ALLOC && vs->vs_depth != 0))) {
226010610SJonathan.Adams@Sun.COM mdb_printf("vmem_seg %p ", addr);
226110610SJonathan.Adams@Sun.COM }
226210610SJonathan.Adams@Sun.COM
226310610SJonathan.Adams@Sun.COM mdb_printf("%s from the %s vmem arena",
226410610SJonathan.Adams@Sun.COM (vs->vs_type == VMEM_ALLOC) ? "allocated" : "freed",
226510610SJonathan.Adams@Sun.COM wi->wi_vmem->vm_name);
226610610SJonathan.Adams@Sun.COM
226710610SJonathan.Adams@Sun.COM if (!(mdb_whatis_flags(w) & WHATIS_QUIET))
226810610SJonathan.Adams@Sun.COM whatis_call_printer(vmem_seg, addr);
226910610SJonathan.Adams@Sun.COM else
227010610SJonathan.Adams@Sun.COM mdb_printf("\n");
22710Sstevel@tonic-gate }
22720Sstevel@tonic-gate
227310610SJonathan.Adams@Sun.COM return (WHATIS_WALKRET(w));
22740Sstevel@tonic-gate }
22750Sstevel@tonic-gate
22760Sstevel@tonic-gate static int
whatis_walk_vmem(uintptr_t addr,const vmem_t * vmem,whatis_info_t * wi)227710610SJonathan.Adams@Sun.COM whatis_walk_vmem(uintptr_t addr, const vmem_t *vmem, whatis_info_t *wi)
22780Sstevel@tonic-gate {
227910610SJonathan.Adams@Sun.COM mdb_whatis_t *w = wi->wi_w;
22800Sstevel@tonic-gate const char *nm = vmem->vm_name;
228110610SJonathan.Adams@Sun.COM
228210610SJonathan.Adams@Sun.COM int identifier = ((vmem->vm_cflags & VMC_IDENTIFIER) != 0);
228310610SJonathan.Adams@Sun.COM int idspace = ((mdb_whatis_flags(w) & WHATIS_IDSPACE) != 0);
228410610SJonathan.Adams@Sun.COM
228510610SJonathan.Adams@Sun.COM if (identifier != idspace)
22860Sstevel@tonic-gate return (WALK_NEXT);
22870Sstevel@tonic-gate
228810610SJonathan.Adams@Sun.COM wi->wi_vmem = vmem;
228910610SJonathan.Adams@Sun.COM
229010610SJonathan.Adams@Sun.COM if (mdb_whatis_flags(w) & WHATIS_VERBOSE)
22910Sstevel@tonic-gate mdb_printf("Searching vmem arena %s...\n", nm);
22920Sstevel@tonic-gate
229310610SJonathan.Adams@Sun.COM if (mdb_pwalk("vmem_seg",
229410610SJonathan.Adams@Sun.COM (mdb_walk_cb_t)whatis_walk_seg, wi, addr) == -1) {
229510610SJonathan.Adams@Sun.COM mdb_warn("can't walk vmem_seg for %p", addr);
22960Sstevel@tonic-gate return (WALK_NEXT);
22970Sstevel@tonic-gate }
22980Sstevel@tonic-gate
229910610SJonathan.Adams@Sun.COM return (WHATIS_WALKRET(w));
23000Sstevel@tonic-gate }
23010Sstevel@tonic-gate
23020Sstevel@tonic-gate /*ARGSUSED*/
23030Sstevel@tonic-gate static int
whatis_walk_slab(uintptr_t saddr,const kmem_slab_t * sp,whatis_info_t * wi)230410610SJonathan.Adams@Sun.COM whatis_walk_slab(uintptr_t saddr, const kmem_slab_t *sp, whatis_info_t *wi)
23050Sstevel@tonic-gate {
230610610SJonathan.Adams@Sun.COM mdb_whatis_t *w = wi->wi_w;
230710610SJonathan.Adams@Sun.COM
230810610SJonathan.Adams@Sun.COM /* It must overlap with the slab data, or it's not interesting */
230910610SJonathan.Adams@Sun.COM if (mdb_whatis_overlaps(w,
231010610SJonathan.Adams@Sun.COM (uintptr_t)sp->slab_base, wi->wi_slab_size)) {
231110610SJonathan.Adams@Sun.COM wi->wi_slab_found++;
231210610SJonathan.Adams@Sun.COM return (WALK_DONE);
231310610SJonathan.Adams@Sun.COM }
231410610SJonathan.Adams@Sun.COM return (WALK_NEXT);
23150Sstevel@tonic-gate }
23160Sstevel@tonic-gate
23170Sstevel@tonic-gate static int
whatis_walk_cache(uintptr_t addr,const kmem_cache_t * c,whatis_info_t * wi)231810610SJonathan.Adams@Sun.COM whatis_walk_cache(uintptr_t addr, const kmem_cache_t *c, whatis_info_t *wi)
23190Sstevel@tonic-gate {
232010610SJonathan.Adams@Sun.COM mdb_whatis_t *w = wi->wi_w;
232110610SJonathan.Adams@Sun.COM
23220Sstevel@tonic-gate char *walk, *freewalk;
23230Sstevel@tonic-gate mdb_walk_cb_t func;
232410610SJonathan.Adams@Sun.COM int do_bufctl;
232510610SJonathan.Adams@Sun.COM
232610610SJonathan.Adams@Sun.COM int identifier = ((c->cache_flags & KMC_IDENTIFIER) != 0);
232710610SJonathan.Adams@Sun.COM int idspace = ((mdb_whatis_flags(w) & WHATIS_IDSPACE) != 0);
232810610SJonathan.Adams@Sun.COM
232910610SJonathan.Adams@Sun.COM if (identifier != idspace)
23300Sstevel@tonic-gate return (WALK_NEXT);
23310Sstevel@tonic-gate
233210610SJonathan.Adams@Sun.COM /* Override the '-b' flag as necessary */
233310610SJonathan.Adams@Sun.COM if (!(c->cache_flags & KMF_HASH))
233410610SJonathan.Adams@Sun.COM do_bufctl = FALSE; /* no bufctls to walk */
233510610SJonathan.Adams@Sun.COM else if (c->cache_flags & KMF_AUDIT)
233610610SJonathan.Adams@Sun.COM do_bufctl = TRUE; /* we always want debugging info */
233710610SJonathan.Adams@Sun.COM else
233810610SJonathan.Adams@Sun.COM do_bufctl = ((mdb_whatis_flags(w) & WHATIS_BUFCTL) != 0);
233910610SJonathan.Adams@Sun.COM
234010610SJonathan.Adams@Sun.COM if (do_bufctl) {
234110388SJonathan.Adams@Sun.COM walk = "bufctl";
234210388SJonathan.Adams@Sun.COM freewalk = "freectl";
234310388SJonathan.Adams@Sun.COM func = (mdb_walk_cb_t)whatis_walk_bufctl;
234410388SJonathan.Adams@Sun.COM } else {
23450Sstevel@tonic-gate walk = "kmem";
23460Sstevel@tonic-gate freewalk = "freemem";
23470Sstevel@tonic-gate func = (mdb_walk_cb_t)whatis_walk_kmem;
23480Sstevel@tonic-gate }
23490Sstevel@tonic-gate
235010610SJonathan.Adams@Sun.COM wi->wi_cache = c;
235110610SJonathan.Adams@Sun.COM
235210610SJonathan.Adams@Sun.COM if (mdb_whatis_flags(w) & WHATIS_VERBOSE)
235310610SJonathan.Adams@Sun.COM mdb_printf("Searching %s...\n", c->cache_name);
23540Sstevel@tonic-gate
23550Sstevel@tonic-gate /*
235610610SJonathan.Adams@Sun.COM * If more then two buffers live on each slab, figure out if we're
235710610SJonathan.Adams@Sun.COM * interested in anything in any slab before doing the more expensive
235810610SJonathan.Adams@Sun.COM * kmem/freemem (bufctl/freectl) walkers.
23590Sstevel@tonic-gate */
236010610SJonathan.Adams@Sun.COM wi->wi_slab_size = c->cache_slabsize - c->cache_maxcolor;
236110610SJonathan.Adams@Sun.COM if (!(c->cache_flags & KMF_HASH))
236210610SJonathan.Adams@Sun.COM wi->wi_slab_size -= sizeof (kmem_slab_t);
236310610SJonathan.Adams@Sun.COM
236410610SJonathan.Adams@Sun.COM if ((wi->wi_slab_size / c->cache_chunksize) > 2) {
236510610SJonathan.Adams@Sun.COM wi->wi_slab_found = 0;
236610610SJonathan.Adams@Sun.COM if (mdb_pwalk("kmem_slab", (mdb_walk_cb_t)whatis_walk_slab, wi,
236710610SJonathan.Adams@Sun.COM addr) == -1) {
236810610SJonathan.Adams@Sun.COM mdb_warn("can't find kmem_slab walker");
236910610SJonathan.Adams@Sun.COM return (WALK_DONE);
237010610SJonathan.Adams@Sun.COM }
237110610SJonathan.Adams@Sun.COM if (wi->wi_slab_found == 0)
237210610SJonathan.Adams@Sun.COM return (WALK_NEXT);
23730Sstevel@tonic-gate }
23740Sstevel@tonic-gate
237510610SJonathan.Adams@Sun.COM wi->wi_freemem = FALSE;
237610610SJonathan.Adams@Sun.COM if (mdb_pwalk(walk, func, wi, addr) == -1) {
23770Sstevel@tonic-gate mdb_warn("can't find %s walker", walk);
23780Sstevel@tonic-gate return (WALK_DONE);
23790Sstevel@tonic-gate }
23800Sstevel@tonic-gate
238110610SJonathan.Adams@Sun.COM if (mdb_whatis_done(w))
23820Sstevel@tonic-gate return (WALK_DONE);
23830Sstevel@tonic-gate
23840Sstevel@tonic-gate /*
23850Sstevel@tonic-gate * We have searched for allocated memory; now search for freed memory.
23860Sstevel@tonic-gate */
238710610SJonathan.Adams@Sun.COM if (mdb_whatis_flags(w) & WHATIS_VERBOSE)
23880Sstevel@tonic-gate mdb_printf("Searching %s for free memory...\n", c->cache_name);
23890Sstevel@tonic-gate
239010610SJonathan.Adams@Sun.COM wi->wi_freemem = TRUE;
239110610SJonathan.Adams@Sun.COM if (mdb_pwalk(freewalk, func, wi, addr) == -1) {
23920Sstevel@tonic-gate mdb_warn("can't find %s walker", freewalk);
23930Sstevel@tonic-gate return (WALK_DONE);
23940Sstevel@tonic-gate }
23950Sstevel@tonic-gate
239610610SJonathan.Adams@Sun.COM return (WHATIS_WALKRET(w));
239710610SJonathan.Adams@Sun.COM }
239810610SJonathan.Adams@Sun.COM
239910610SJonathan.Adams@Sun.COM static int
whatis_walk_touch(uintptr_t addr,const kmem_cache_t * c,whatis_info_t * wi)240010610SJonathan.Adams@Sun.COM whatis_walk_touch(uintptr_t addr, const kmem_cache_t *c, whatis_info_t *wi)
240110610SJonathan.Adams@Sun.COM {
240210610SJonathan.Adams@Sun.COM if (c->cache_arena == wi->wi_msb_arena ||
240310610SJonathan.Adams@Sun.COM (c->cache_cflags & KMC_NOTOUCH))
240410610SJonathan.Adams@Sun.COM return (WALK_NEXT);
240510610SJonathan.Adams@Sun.COM
240610610SJonathan.Adams@Sun.COM return (whatis_walk_cache(addr, c, wi));
24070Sstevel@tonic-gate }
24080Sstevel@tonic-gate
24090Sstevel@tonic-gate static int
whatis_walk_metadata(uintptr_t addr,const kmem_cache_t * c,whatis_info_t * wi)241010610SJonathan.Adams@Sun.COM whatis_walk_metadata(uintptr_t addr, const kmem_cache_t *c, whatis_info_t *wi)
24110Sstevel@tonic-gate {
241210610SJonathan.Adams@Sun.COM if (c->cache_arena != wi->wi_msb_arena)
24130Sstevel@tonic-gate return (WALK_NEXT);
24140Sstevel@tonic-gate
241510610SJonathan.Adams@Sun.COM return (whatis_walk_cache(addr, c, wi));
24160Sstevel@tonic-gate }
24170Sstevel@tonic-gate
24180Sstevel@tonic-gate static int
whatis_walk_notouch(uintptr_t addr,const kmem_cache_t * c,whatis_info_t * wi)241910610SJonathan.Adams@Sun.COM whatis_walk_notouch(uintptr_t addr, const kmem_cache_t *c, whatis_info_t *wi)
24200Sstevel@tonic-gate {
242110610SJonathan.Adams@Sun.COM if (c->cache_arena == wi->wi_msb_arena ||
242210610SJonathan.Adams@Sun.COM !(c->cache_cflags & KMC_NOTOUCH))
24230Sstevel@tonic-gate return (WALK_NEXT);
24240Sstevel@tonic-gate
242510610SJonathan.Adams@Sun.COM return (whatis_walk_cache(addr, c, wi));
24260Sstevel@tonic-gate }
24270Sstevel@tonic-gate
24280Sstevel@tonic-gate static int
whatis_walk_thread(uintptr_t addr,const kthread_t * t,mdb_whatis_t * w)242910610SJonathan.Adams@Sun.COM whatis_walk_thread(uintptr_t addr, const kthread_t *t, mdb_whatis_t *w)
24300Sstevel@tonic-gate {
243110610SJonathan.Adams@Sun.COM uintptr_t cur;
243210610SJonathan.Adams@Sun.COM uintptr_t saddr;
243310610SJonathan.Adams@Sun.COM size_t size;
243410610SJonathan.Adams@Sun.COM
24350Sstevel@tonic-gate /*
24360Sstevel@tonic-gate * Often, one calls ::whatis on an address from a thread structure.
24370Sstevel@tonic-gate * We use this opportunity to short circuit this case...
24380Sstevel@tonic-gate */
243910610SJonathan.Adams@Sun.COM while (mdb_whatis_match(w, addr, sizeof (kthread_t), &cur))
244010610SJonathan.Adams@Sun.COM mdb_whatis_report_object(w, cur, addr,
244110388SJonathan.Adams@Sun.COM "allocated as a thread structure\n");
244210610SJonathan.Adams@Sun.COM
244310610SJonathan.Adams@Sun.COM /*
244410610SJonathan.Adams@Sun.COM * Now check the stack
244510610SJonathan.Adams@Sun.COM */
24460Sstevel@tonic-gate if (t->t_stkbase == NULL)
24470Sstevel@tonic-gate return (WALK_NEXT);
24480Sstevel@tonic-gate
244910610SJonathan.Adams@Sun.COM /*
245010610SJonathan.Adams@Sun.COM * This assumes that t_stk is the end of the stack, but it's really
245110610SJonathan.Adams@Sun.COM * only the initial stack pointer for the thread. Arguments to the
245210610SJonathan.Adams@Sun.COM * initial procedure, SA(MINFRAME), etc. are all after t_stk. So
245310610SJonathan.Adams@Sun.COM * that 't->t_stk::whatis' reports "part of t's stack", we include
245410610SJonathan.Adams@Sun.COM * t_stk in the range (the "+ 1", below), but the kernel should
245510610SJonathan.Adams@Sun.COM * really include the full stack bounds where we can find it.
245610610SJonathan.Adams@Sun.COM */
245710610SJonathan.Adams@Sun.COM saddr = (uintptr_t)t->t_stkbase;
245810610SJonathan.Adams@Sun.COM size = (uintptr_t)t->t_stk - saddr + 1;
245910610SJonathan.Adams@Sun.COM while (mdb_whatis_match(w, saddr, size, &cur))
246010610SJonathan.Adams@Sun.COM mdb_whatis_report_object(w, cur, cur,
246110610SJonathan.Adams@Sun.COM "in thread %p's stack%s\n", addr, stack_active(t, cur));
246210610SJonathan.Adams@Sun.COM
246310610SJonathan.Adams@Sun.COM return (WHATIS_WALKRET(w));
246410610SJonathan.Adams@Sun.COM }
246510610SJonathan.Adams@Sun.COM
246610610SJonathan.Adams@Sun.COM static void
whatis_modctl_match(mdb_whatis_t * w,const char * name,uintptr_t base,size_t size,const char * where)246710610SJonathan.Adams@Sun.COM whatis_modctl_match(mdb_whatis_t *w, const char *name,
246810610SJonathan.Adams@Sun.COM uintptr_t base, size_t size, const char *where)
246910610SJonathan.Adams@Sun.COM {
247010610SJonathan.Adams@Sun.COM uintptr_t cur;
247110610SJonathan.Adams@Sun.COM
247210610SJonathan.Adams@Sun.COM /*
247310610SJonathan.Adams@Sun.COM * Since we're searching for addresses inside a module, we report
247410610SJonathan.Adams@Sun.COM * them as symbols.
247510610SJonathan.Adams@Sun.COM */
247610610SJonathan.Adams@Sun.COM while (mdb_whatis_match(w, base, size, &cur))
247710610SJonathan.Adams@Sun.COM mdb_whatis_report_address(w, cur, "in %s's %s\n", name, where);
24780Sstevel@tonic-gate }
24790Sstevel@tonic-gate
24800Sstevel@tonic-gate static int
whatis_walk_modctl(uintptr_t addr,const struct modctl * m,mdb_whatis_t * w)248110610SJonathan.Adams@Sun.COM whatis_walk_modctl(uintptr_t addr, const struct modctl *m, mdb_whatis_t *w)
24820Sstevel@tonic-gate {
248310610SJonathan.Adams@Sun.COM char name[MODMAXNAMELEN];
24840Sstevel@tonic-gate struct module mod;
24850Sstevel@tonic-gate Shdr shdr;
24860Sstevel@tonic-gate
24870Sstevel@tonic-gate if (m->mod_mp == NULL)
24880Sstevel@tonic-gate return (WALK_NEXT);
24890Sstevel@tonic-gate
24900Sstevel@tonic-gate if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) {
24910Sstevel@tonic-gate mdb_warn("couldn't read modctl %p's module", addr);
24920Sstevel@tonic-gate return (WALK_NEXT);
24930Sstevel@tonic-gate }
24940Sstevel@tonic-gate
249510610SJonathan.Adams@Sun.COM if (mdb_readstr(name, sizeof (name), (uintptr_t)m->mod_modname) == -1)
249610610SJonathan.Adams@Sun.COM (void) mdb_snprintf(name, sizeof (name), "0x%p", addr);
249710610SJonathan.Adams@Sun.COM
249810610SJonathan.Adams@Sun.COM whatis_modctl_match(w, name,
249910610SJonathan.Adams@Sun.COM (uintptr_t)mod.text, mod.text_size, "text segment");
250010610SJonathan.Adams@Sun.COM whatis_modctl_match(w, name,
250110610SJonathan.Adams@Sun.COM (uintptr_t)mod.data, mod.data_size, "data segment");
250210610SJonathan.Adams@Sun.COM whatis_modctl_match(w, name,
250310610SJonathan.Adams@Sun.COM (uintptr_t)mod.bss, mod.bss_size, "bss segment");
25040Sstevel@tonic-gate
25050Sstevel@tonic-gate if (mdb_vread(&shdr, sizeof (shdr), (uintptr_t)mod.symhdr) == -1) {
25060Sstevel@tonic-gate mdb_warn("couldn't read symbol header for %p's module", addr);
25070Sstevel@tonic-gate return (WALK_NEXT);
25080Sstevel@tonic-gate }
25090Sstevel@tonic-gate
251010610SJonathan.Adams@Sun.COM whatis_modctl_match(w, name,
251110610SJonathan.Adams@Sun.COM (uintptr_t)mod.symtbl, mod.nsyms * shdr.sh_entsize, "symtab");
251210610SJonathan.Adams@Sun.COM whatis_modctl_match(w, name,
251310610SJonathan.Adams@Sun.COM (uintptr_t)mod.symspace, mod.symsize, "symtab");
251410610SJonathan.Adams@Sun.COM
251510610SJonathan.Adams@Sun.COM return (WHATIS_WALKRET(w));
251610610SJonathan.Adams@Sun.COM }
251710610SJonathan.Adams@Sun.COM
251810610SJonathan.Adams@Sun.COM /*ARGSUSED*/
251910610SJonathan.Adams@Sun.COM static int
whatis_walk_memseg(uintptr_t addr,const struct memseg * seg,mdb_whatis_t * w)252010610SJonathan.Adams@Sun.COM whatis_walk_memseg(uintptr_t addr, const struct memseg *seg, mdb_whatis_t *w)
252110610SJonathan.Adams@Sun.COM {
252210610SJonathan.Adams@Sun.COM uintptr_t cur;
252310610SJonathan.Adams@Sun.COM
252410610SJonathan.Adams@Sun.COM uintptr_t base = (uintptr_t)seg->pages;
252510610SJonathan.Adams@Sun.COM size_t size = (uintptr_t)seg->epages - base;
252610610SJonathan.Adams@Sun.COM
252710610SJonathan.Adams@Sun.COM while (mdb_whatis_match(w, base, size, &cur)) {
252810610SJonathan.Adams@Sun.COM /* round our found pointer down to the page_t base. */
252910610SJonathan.Adams@Sun.COM size_t offset = (cur - base) % sizeof (page_t);
253010610SJonathan.Adams@Sun.COM
253110610SJonathan.Adams@Sun.COM mdb_whatis_report_object(w, cur, cur - offset,
253210610SJonathan.Adams@Sun.COM "allocated as a page structure\n");
25330Sstevel@tonic-gate }
25340Sstevel@tonic-gate
253510610SJonathan.Adams@Sun.COM return (WHATIS_WALKRET(w));
253610610SJonathan.Adams@Sun.COM }
253710610SJonathan.Adams@Sun.COM
253810610SJonathan.Adams@Sun.COM /*ARGSUSED*/
253910610SJonathan.Adams@Sun.COM static int
whatis_run_modules(mdb_whatis_t * w,void * arg)254010610SJonathan.Adams@Sun.COM whatis_run_modules(mdb_whatis_t *w, void *arg)
254110610SJonathan.Adams@Sun.COM {
254210610SJonathan.Adams@Sun.COM if (mdb_walk("modctl", (mdb_walk_cb_t)whatis_walk_modctl, w) == -1) {
254310610SJonathan.Adams@Sun.COM mdb_warn("couldn't find modctl walker");
254410610SJonathan.Adams@Sun.COM return (1);
254510610SJonathan.Adams@Sun.COM }
254610610SJonathan.Adams@Sun.COM return (0);
254710610SJonathan.Adams@Sun.COM }
254810610SJonathan.Adams@Sun.COM
254910610SJonathan.Adams@Sun.COM /*ARGSUSED*/
255010610SJonathan.Adams@Sun.COM static int
whatis_run_threads(mdb_whatis_t * w,void * ignored)255110610SJonathan.Adams@Sun.COM whatis_run_threads(mdb_whatis_t *w, void *ignored)
255210610SJonathan.Adams@Sun.COM {
25530Sstevel@tonic-gate /*
255410610SJonathan.Adams@Sun.COM * Now search all thread stacks. Yes, this is a little weak; we
255510610SJonathan.Adams@Sun.COM * can save a lot of work by first checking to see if the
255610610SJonathan.Adams@Sun.COM * address is in segkp vs. segkmem. But hey, computers are
255710610SJonathan.Adams@Sun.COM * fast.
25580Sstevel@tonic-gate */
255910610SJonathan.Adams@Sun.COM if (mdb_walk("thread", (mdb_walk_cb_t)whatis_walk_thread, w) == -1) {
256010610SJonathan.Adams@Sun.COM mdb_warn("couldn't find thread walker");
256110610SJonathan.Adams@Sun.COM return (1);
25620Sstevel@tonic-gate }
256310610SJonathan.Adams@Sun.COM return (0);
25640Sstevel@tonic-gate }
25650Sstevel@tonic-gate
25660Sstevel@tonic-gate /*ARGSUSED*/
25670Sstevel@tonic-gate static int
whatis_run_pages(mdb_whatis_t * w,void * ignored)256810610SJonathan.Adams@Sun.COM whatis_run_pages(mdb_whatis_t *w, void *ignored)
25690Sstevel@tonic-gate {
257010610SJonathan.Adams@Sun.COM if (mdb_walk("memseg", (mdb_walk_cb_t)whatis_walk_memseg, w) == -1) {
257110610SJonathan.Adams@Sun.COM mdb_warn("couldn't find memseg walker");
257210610SJonathan.Adams@Sun.COM return (1);
25730Sstevel@tonic-gate }
257410610SJonathan.Adams@Sun.COM return (0);
25750Sstevel@tonic-gate }
25760Sstevel@tonic-gate
257710610SJonathan.Adams@Sun.COM /*ARGSUSED*/
257810610SJonathan.Adams@Sun.COM static int
whatis_run_kmem(mdb_whatis_t * w,void * ignored)257910610SJonathan.Adams@Sun.COM whatis_run_kmem(mdb_whatis_t *w, void *ignored)
25800Sstevel@tonic-gate {
258110610SJonathan.Adams@Sun.COM whatis_info_t wi;
258210610SJonathan.Adams@Sun.COM
258310610SJonathan.Adams@Sun.COM bzero(&wi, sizeof (wi));
258410610SJonathan.Adams@Sun.COM wi.wi_w = w;
258510610SJonathan.Adams@Sun.COM
258610610SJonathan.Adams@Sun.COM if (mdb_readvar(&wi.wi_msb_arena, "kmem_msb_arena") == -1)
258710610SJonathan.Adams@Sun.COM mdb_warn("unable to readvar \"kmem_msb_arena\"");
258810610SJonathan.Adams@Sun.COM
258910610SJonathan.Adams@Sun.COM if (mdb_readvar(&wi.wi_kmem_lite_count,
259010610SJonathan.Adams@Sun.COM "kmem_lite_count") == -1 || wi.wi_kmem_lite_count > 16)
259110610SJonathan.Adams@Sun.COM wi.wi_kmem_lite_count = 0;
259210610SJonathan.Adams@Sun.COM
259310610SJonathan.Adams@Sun.COM /*
259410610SJonathan.Adams@Sun.COM * We process kmem caches in the following order:
259510610SJonathan.Adams@Sun.COM *
259610610SJonathan.Adams@Sun.COM * non-KMC_NOTOUCH, non-metadata (typically the most interesting)
259710610SJonathan.Adams@Sun.COM * metadata (can be huge with KMF_AUDIT)
259810610SJonathan.Adams@Sun.COM * KMC_NOTOUCH, non-metadata (see kmem_walk_all())
259910610SJonathan.Adams@Sun.COM */
260010610SJonathan.Adams@Sun.COM if (mdb_walk("kmem_cache", (mdb_walk_cb_t)whatis_walk_touch,
260110610SJonathan.Adams@Sun.COM &wi) == -1 ||
260210610SJonathan.Adams@Sun.COM mdb_walk("kmem_cache", (mdb_walk_cb_t)whatis_walk_metadata,
260310610SJonathan.Adams@Sun.COM &wi) == -1 ||
260410610SJonathan.Adams@Sun.COM mdb_walk("kmem_cache", (mdb_walk_cb_t)whatis_walk_notouch,
260510610SJonathan.Adams@Sun.COM &wi) == -1) {
260610610SJonathan.Adams@Sun.COM mdb_warn("couldn't find kmem_cache walker");
260710610SJonathan.Adams@Sun.COM return (1);
26080Sstevel@tonic-gate }
260910610SJonathan.Adams@Sun.COM return (0);
261010610SJonathan.Adams@Sun.COM }
261110610SJonathan.Adams@Sun.COM
261210610SJonathan.Adams@Sun.COM /*ARGSUSED*/
261310610SJonathan.Adams@Sun.COM static int
whatis_run_vmem(mdb_whatis_t * w,void * ignored)261410610SJonathan.Adams@Sun.COM whatis_run_vmem(mdb_whatis_t *w, void *ignored)
261510610SJonathan.Adams@Sun.COM {
261610610SJonathan.Adams@Sun.COM whatis_info_t wi;
261710610SJonathan.Adams@Sun.COM
261810610SJonathan.Adams@Sun.COM bzero(&wi, sizeof (wi));
261910610SJonathan.Adams@Sun.COM wi.wi_w = w;
26200Sstevel@tonic-gate
26210Sstevel@tonic-gate if (mdb_walk("vmem_postfix",
262210610SJonathan.Adams@Sun.COM (mdb_walk_cb_t)whatis_walk_vmem, &wi) == -1) {
26230Sstevel@tonic-gate mdb_warn("couldn't find vmem_postfix walker");
262410610SJonathan.Adams@Sun.COM return (1);
26250Sstevel@tonic-gate }
262610610SJonathan.Adams@Sun.COM return (0);
26270Sstevel@tonic-gate }
26280Sstevel@tonic-gate
26290Sstevel@tonic-gate typedef struct kmem_log_cpu {
26300Sstevel@tonic-gate uintptr_t kmc_low;
26310Sstevel@tonic-gate uintptr_t kmc_high;
26320Sstevel@tonic-gate } kmem_log_cpu_t;
26330Sstevel@tonic-gate
26340Sstevel@tonic-gate typedef struct kmem_log_data {
26350Sstevel@tonic-gate uintptr_t kmd_addr;
26360Sstevel@tonic-gate kmem_log_cpu_t *kmd_cpu;
26370Sstevel@tonic-gate } kmem_log_data_t;
26380Sstevel@tonic-gate
26390Sstevel@tonic-gate int
kmem_log_walk(uintptr_t addr,const kmem_bufctl_audit_t * b,kmem_log_data_t * kmd)26400Sstevel@tonic-gate kmem_log_walk(uintptr_t addr, const kmem_bufctl_audit_t *b,
26410Sstevel@tonic-gate kmem_log_data_t *kmd)
26420Sstevel@tonic-gate {
26430Sstevel@tonic-gate int i;
26440Sstevel@tonic-gate kmem_log_cpu_t *kmc = kmd->kmd_cpu;
26450Sstevel@tonic-gate size_t bufsize;
26460Sstevel@tonic-gate
26470Sstevel@tonic-gate for (i = 0; i < NCPU; i++) {
26480Sstevel@tonic-gate if (addr >= kmc[i].kmc_low && addr < kmc[i].kmc_high)
26490Sstevel@tonic-gate break;
26500Sstevel@tonic-gate }
26510Sstevel@tonic-gate
26520Sstevel@tonic-gate if (kmd->kmd_addr) {
26530Sstevel@tonic-gate if (b->bc_cache == NULL)
26540Sstevel@tonic-gate return (WALK_NEXT);
26550Sstevel@tonic-gate
26560Sstevel@tonic-gate if (mdb_vread(&bufsize, sizeof (bufsize),
26570Sstevel@tonic-gate (uintptr_t)&b->bc_cache->cache_bufsize) == -1) {
26580Sstevel@tonic-gate mdb_warn(
26590Sstevel@tonic-gate "failed to read cache_bufsize for cache at %p",
26600Sstevel@tonic-gate b->bc_cache);
26610Sstevel@tonic-gate return (WALK_ERR);
26620Sstevel@tonic-gate }
26630Sstevel@tonic-gate
26640Sstevel@tonic-gate if (kmd->kmd_addr < (uintptr_t)b->bc_addr ||
26650Sstevel@tonic-gate kmd->kmd_addr >= (uintptr_t)b->bc_addr + bufsize)
26660Sstevel@tonic-gate return (WALK_NEXT);
26670Sstevel@tonic-gate }
26680Sstevel@tonic-gate
26690Sstevel@tonic-gate if (i == NCPU)
26700Sstevel@tonic-gate mdb_printf(" ");
26710Sstevel@tonic-gate else
26720Sstevel@tonic-gate mdb_printf("%3d", i);
26730Sstevel@tonic-gate
26740Sstevel@tonic-gate mdb_printf(" %0?p %0?p %16llx %0?p\n", addr, b->bc_addr,
26750Sstevel@tonic-gate b->bc_timestamp, b->bc_thread);
26760Sstevel@tonic-gate
26770Sstevel@tonic-gate return (WALK_NEXT);
26780Sstevel@tonic-gate }
26790Sstevel@tonic-gate
26800Sstevel@tonic-gate /*ARGSUSED*/
26810Sstevel@tonic-gate int
kmem_log(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)26820Sstevel@tonic-gate kmem_log(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
26830Sstevel@tonic-gate {
26840Sstevel@tonic-gate kmem_log_header_t lh;
26850Sstevel@tonic-gate kmem_cpu_log_header_t clh;
26860Sstevel@tonic-gate uintptr_t lhp, clhp;
26870Sstevel@tonic-gate int ncpus;
26880Sstevel@tonic-gate uintptr_t *cpu;
26890Sstevel@tonic-gate GElf_Sym sym;
26900Sstevel@tonic-gate kmem_log_cpu_t *kmc;
26910Sstevel@tonic-gate int i;
26920Sstevel@tonic-gate kmem_log_data_t kmd;
26930Sstevel@tonic-gate uint_t opt_b = FALSE;
26940Sstevel@tonic-gate
26950Sstevel@tonic-gate if (mdb_getopts(argc, argv,
26960Sstevel@tonic-gate 'b', MDB_OPT_SETBITS, TRUE, &opt_b, NULL) != argc)
26970Sstevel@tonic-gate return (DCMD_USAGE);
26980Sstevel@tonic-gate
26990Sstevel@tonic-gate if (mdb_readvar(&lhp, "kmem_transaction_log") == -1) {
27000Sstevel@tonic-gate mdb_warn("failed to read 'kmem_transaction_log'");
27010Sstevel@tonic-gate return (DCMD_ERR);
27020Sstevel@tonic-gate }
27030Sstevel@tonic-gate
27040Sstevel@tonic-gate if (lhp == NULL) {
27050Sstevel@tonic-gate mdb_warn("no kmem transaction log\n");
27060Sstevel@tonic-gate return (DCMD_ERR);
27070Sstevel@tonic-gate }
27080Sstevel@tonic-gate
27090Sstevel@tonic-gate mdb_readvar(&ncpus, "ncpus");
27100Sstevel@tonic-gate
27110Sstevel@tonic-gate if (mdb_vread(&lh, sizeof (kmem_log_header_t), lhp) == -1) {
27120Sstevel@tonic-gate mdb_warn("failed to read log header at %p", lhp);
27130Sstevel@tonic-gate return (DCMD_ERR);
27140Sstevel@tonic-gate }
27150Sstevel@tonic-gate
27160Sstevel@tonic-gate clhp = lhp + ((uintptr_t)&lh.lh_cpu[0] - (uintptr_t)&lh);
27170Sstevel@tonic-gate
27180Sstevel@tonic-gate cpu = mdb_alloc(sizeof (uintptr_t) * NCPU, UM_SLEEP | UM_GC);
27190Sstevel@tonic-gate
27200Sstevel@tonic-gate if (mdb_lookup_by_name("cpu", &sym) == -1) {
27210Sstevel@tonic-gate mdb_warn("couldn't find 'cpu' array");
27220Sstevel@tonic-gate return (DCMD_ERR);
27230Sstevel@tonic-gate }
27240Sstevel@tonic-gate
27250Sstevel@tonic-gate if (sym.st_size != NCPU * sizeof (uintptr_t)) {
27260Sstevel@tonic-gate mdb_warn("expected 'cpu' to be of size %d; found %d\n",
27270Sstevel@tonic-gate NCPU * sizeof (uintptr_t), sym.st_size);
27280Sstevel@tonic-gate return (DCMD_ERR);
27290Sstevel@tonic-gate }
27300Sstevel@tonic-gate
27310Sstevel@tonic-gate if (mdb_vread(cpu, sym.st_size, (uintptr_t)sym.st_value) == -1) {
27320Sstevel@tonic-gate mdb_warn("failed to read cpu array at %p", sym.st_value);
27330Sstevel@tonic-gate return (DCMD_ERR);
27340Sstevel@tonic-gate }
27350Sstevel@tonic-gate
27360Sstevel@tonic-gate kmc = mdb_zalloc(sizeof (kmem_log_cpu_t) * NCPU, UM_SLEEP | UM_GC);
27370Sstevel@tonic-gate kmd.kmd_addr = NULL;
27380Sstevel@tonic-gate kmd.kmd_cpu = kmc;
27390Sstevel@tonic-gate
27400Sstevel@tonic-gate for (i = 0; i < NCPU; i++) {
27410Sstevel@tonic-gate
27420Sstevel@tonic-gate if (cpu[i] == NULL)
27430Sstevel@tonic-gate continue;
27440Sstevel@tonic-gate
27450Sstevel@tonic-gate if (mdb_vread(&clh, sizeof (clh), clhp) == -1) {
27460Sstevel@tonic-gate mdb_warn("cannot read cpu %d's log header at %p",
27470Sstevel@tonic-gate i, clhp);
27480Sstevel@tonic-gate return (DCMD_ERR);
27490Sstevel@tonic-gate }
27500Sstevel@tonic-gate
27510Sstevel@tonic-gate kmc[i].kmc_low = clh.clh_chunk * lh.lh_chunksize +
27520Sstevel@tonic-gate (uintptr_t)lh.lh_base;
27530Sstevel@tonic-gate kmc[i].kmc_high = (uintptr_t)clh.clh_current;
27540Sstevel@tonic-gate
27550Sstevel@tonic-gate clhp += sizeof (kmem_cpu_log_header_t);
27560Sstevel@tonic-gate }
27570Sstevel@tonic-gate
27580Sstevel@tonic-gate mdb_printf("%3s %-?s %-?s %16s %-?s\n", "CPU", "ADDR", "BUFADDR",
27590Sstevel@tonic-gate "TIMESTAMP", "THREAD");
27600Sstevel@tonic-gate
27610Sstevel@tonic-gate /*
27620Sstevel@tonic-gate * If we have been passed an address, print out only log entries
27630Sstevel@tonic-gate * corresponding to that address. If opt_b is specified, then interpret
27640Sstevel@tonic-gate * the address as a bufctl.
27650Sstevel@tonic-gate */
27660Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) {
27670Sstevel@tonic-gate kmem_bufctl_audit_t b;
27680Sstevel@tonic-gate
27690Sstevel@tonic-gate if (opt_b) {
27700Sstevel@tonic-gate kmd.kmd_addr = addr;
27710Sstevel@tonic-gate } else {
27720Sstevel@tonic-gate if (mdb_vread(&b,
27730Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t), addr) == -1) {
27740Sstevel@tonic-gate mdb_warn("failed to read bufctl at %p", addr);
27750Sstevel@tonic-gate return (DCMD_ERR);
27760Sstevel@tonic-gate }
27770Sstevel@tonic-gate
27780Sstevel@tonic-gate (void) kmem_log_walk(addr, &b, &kmd);
27790Sstevel@tonic-gate
27800Sstevel@tonic-gate return (DCMD_OK);
27810Sstevel@tonic-gate }
27820Sstevel@tonic-gate }
27830Sstevel@tonic-gate
27840Sstevel@tonic-gate if (mdb_walk("kmem_log", (mdb_walk_cb_t)kmem_log_walk, &kmd) == -1) {
27850Sstevel@tonic-gate mdb_warn("can't find kmem log walker");
27860Sstevel@tonic-gate return (DCMD_ERR);
27870Sstevel@tonic-gate }
27880Sstevel@tonic-gate
27890Sstevel@tonic-gate return (DCMD_OK);
27900Sstevel@tonic-gate }
27910Sstevel@tonic-gate
27920Sstevel@tonic-gate typedef struct bufctl_history_cb {
27930Sstevel@tonic-gate int bhc_flags;
27940Sstevel@tonic-gate int bhc_argc;
27950Sstevel@tonic-gate const mdb_arg_t *bhc_argv;
27960Sstevel@tonic-gate int bhc_ret;
27970Sstevel@tonic-gate } bufctl_history_cb_t;
27980Sstevel@tonic-gate
27990Sstevel@tonic-gate /*ARGSUSED*/
28000Sstevel@tonic-gate static int
bufctl_history_callback(uintptr_t addr,const void * ign,void * arg)28010Sstevel@tonic-gate bufctl_history_callback(uintptr_t addr, const void *ign, void *arg)
28020Sstevel@tonic-gate {
28030Sstevel@tonic-gate bufctl_history_cb_t *bhc = arg;
28040Sstevel@tonic-gate
28050Sstevel@tonic-gate bhc->bhc_ret =
28060Sstevel@tonic-gate bufctl(addr, bhc->bhc_flags, bhc->bhc_argc, bhc->bhc_argv);
28070Sstevel@tonic-gate
28080Sstevel@tonic-gate bhc->bhc_flags &= ~DCMD_LOOPFIRST;
28090Sstevel@tonic-gate
28100Sstevel@tonic-gate return ((bhc->bhc_ret == DCMD_OK)? WALK_NEXT : WALK_DONE);
28110Sstevel@tonic-gate }
28120Sstevel@tonic-gate
28130Sstevel@tonic-gate void
bufctl_help(void)28140Sstevel@tonic-gate bufctl_help(void)
28150Sstevel@tonic-gate {
28166712Stomee mdb_printf("%s",
28176712Stomee "Display the contents of kmem_bufctl_audit_ts, with optional filtering.\n\n");
28180Sstevel@tonic-gate mdb_dec_indent(2);
28190Sstevel@tonic-gate mdb_printf("%<b>OPTIONS%</b>\n");
28200Sstevel@tonic-gate mdb_inc_indent(2);
28210Sstevel@tonic-gate mdb_printf("%s",
28220Sstevel@tonic-gate " -v Display the full content of the bufctl, including its stack trace\n"
28230Sstevel@tonic-gate " -h retrieve the bufctl's transaction history, if available\n"
28240Sstevel@tonic-gate " -a addr\n"
28250Sstevel@tonic-gate " filter out bufctls not involving the buffer at addr\n"
28260Sstevel@tonic-gate " -c caller\n"
28270Sstevel@tonic-gate " filter out bufctls without the function/PC in their stack trace\n"
28280Sstevel@tonic-gate " -e earliest\n"
28290Sstevel@tonic-gate " filter out bufctls timestamped before earliest\n"
28300Sstevel@tonic-gate " -l latest\n"
28310Sstevel@tonic-gate " filter out bufctls timestamped after latest\n"
28320Sstevel@tonic-gate " -t thread\n"
28330Sstevel@tonic-gate " filter out bufctls not involving thread\n");
28340Sstevel@tonic-gate }
28350Sstevel@tonic-gate
28360Sstevel@tonic-gate int
bufctl(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)28370Sstevel@tonic-gate bufctl(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
28380Sstevel@tonic-gate {
28390Sstevel@tonic-gate kmem_bufctl_audit_t bc;
28400Sstevel@tonic-gate uint_t verbose = FALSE;
28410Sstevel@tonic-gate uint_t history = FALSE;
28420Sstevel@tonic-gate uint_t in_history = FALSE;
28430Sstevel@tonic-gate uintptr_t caller = NULL, thread = NULL;
28440Sstevel@tonic-gate uintptr_t laddr, haddr, baddr = NULL;
28450Sstevel@tonic-gate hrtime_t earliest = 0, latest = 0;
28460Sstevel@tonic-gate int i, depth;
28470Sstevel@tonic-gate char c[MDB_SYM_NAMLEN];
28480Sstevel@tonic-gate GElf_Sym sym;
28490Sstevel@tonic-gate
28500Sstevel@tonic-gate if (mdb_getopts(argc, argv,
28510Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &verbose,
28520Sstevel@tonic-gate 'h', MDB_OPT_SETBITS, TRUE, &history,
28530Sstevel@tonic-gate 'H', MDB_OPT_SETBITS, TRUE, &in_history, /* internal */
28540Sstevel@tonic-gate 'c', MDB_OPT_UINTPTR, &caller,
28550Sstevel@tonic-gate 't', MDB_OPT_UINTPTR, &thread,
28560Sstevel@tonic-gate 'e', MDB_OPT_UINT64, &earliest,
28570Sstevel@tonic-gate 'l', MDB_OPT_UINT64, &latest,
28580Sstevel@tonic-gate 'a', MDB_OPT_UINTPTR, &baddr, NULL) != argc)
28590Sstevel@tonic-gate return (DCMD_USAGE);
28600Sstevel@tonic-gate
28610Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC))
28620Sstevel@tonic-gate return (DCMD_USAGE);
28630Sstevel@tonic-gate
28640Sstevel@tonic-gate if (in_history && !history)
28650Sstevel@tonic-gate return (DCMD_USAGE);
28660Sstevel@tonic-gate
28670Sstevel@tonic-gate if (history && !in_history) {
28680Sstevel@tonic-gate mdb_arg_t *nargv = mdb_zalloc(sizeof (*nargv) * (argc + 1),
28690Sstevel@tonic-gate UM_SLEEP | UM_GC);
28700Sstevel@tonic-gate bufctl_history_cb_t bhc;
28710Sstevel@tonic-gate
28720Sstevel@tonic-gate nargv[0].a_type = MDB_TYPE_STRING;
28730Sstevel@tonic-gate nargv[0].a_un.a_str = "-H"; /* prevent recursion */
28740Sstevel@tonic-gate
28750Sstevel@tonic-gate for (i = 0; i < argc; i++)
28760Sstevel@tonic-gate nargv[i + 1] = argv[i];
28770Sstevel@tonic-gate
28780Sstevel@tonic-gate /*
28790Sstevel@tonic-gate * When in history mode, we treat each element as if it
28800Sstevel@tonic-gate * were in a seperate loop, so that the headers group
28810Sstevel@tonic-gate * bufctls with similar histories.
28820Sstevel@tonic-gate */
28830Sstevel@tonic-gate bhc.bhc_flags = flags | DCMD_LOOP | DCMD_LOOPFIRST;
28840Sstevel@tonic-gate bhc.bhc_argc = argc + 1;
28850Sstevel@tonic-gate bhc.bhc_argv = nargv;
28860Sstevel@tonic-gate bhc.bhc_ret = DCMD_OK;
28870Sstevel@tonic-gate
28880Sstevel@tonic-gate if (mdb_pwalk("bufctl_history", bufctl_history_callback, &bhc,
28890Sstevel@tonic-gate addr) == -1) {
28900Sstevel@tonic-gate mdb_warn("unable to walk bufctl_history");
28910Sstevel@tonic-gate return (DCMD_ERR);
28920Sstevel@tonic-gate }
28930Sstevel@tonic-gate
28940Sstevel@tonic-gate if (bhc.bhc_ret == DCMD_OK && !(flags & DCMD_PIPE_OUT))
28950Sstevel@tonic-gate mdb_printf("\n");
28960Sstevel@tonic-gate
28970Sstevel@tonic-gate return (bhc.bhc_ret);
28980Sstevel@tonic-gate }
28990Sstevel@tonic-gate
29000Sstevel@tonic-gate if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) {
29010Sstevel@tonic-gate if (verbose) {
29020Sstevel@tonic-gate mdb_printf("%16s %16s %16s %16s\n"
29030Sstevel@tonic-gate "%<u>%16s %16s %16s %16s%</u>\n",
29040Sstevel@tonic-gate "ADDR", "BUFADDR", "TIMESTAMP", "THREAD",
29050Sstevel@tonic-gate "", "CACHE", "LASTLOG", "CONTENTS");
29060Sstevel@tonic-gate } else {
29070Sstevel@tonic-gate mdb_printf("%<u>%-?s %-?s %-12s %-?s %s%</u>\n",
29080Sstevel@tonic-gate "ADDR", "BUFADDR", "TIMESTAMP", "THREAD", "CALLER");
29090Sstevel@tonic-gate }
29100Sstevel@tonic-gate }
29110Sstevel@tonic-gate
29120Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) {
29130Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr);
29140Sstevel@tonic-gate return (DCMD_ERR);
29150Sstevel@tonic-gate }
29160Sstevel@tonic-gate
29170Sstevel@tonic-gate /*
29180Sstevel@tonic-gate * Guard against bogus bc_depth in case the bufctl is corrupt or
29190Sstevel@tonic-gate * the address does not really refer to a bufctl.
29200Sstevel@tonic-gate */
29210Sstevel@tonic-gate depth = MIN(bc.bc_depth, KMEM_STACK_DEPTH);
29220Sstevel@tonic-gate
29230Sstevel@tonic-gate if (caller != NULL) {
29240Sstevel@tonic-gate laddr = caller;
29250Sstevel@tonic-gate haddr = caller + sizeof (caller);
29260Sstevel@tonic-gate
29270Sstevel@tonic-gate if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, sizeof (c),
29280Sstevel@tonic-gate &sym) != -1 && caller == (uintptr_t)sym.st_value) {
29290Sstevel@tonic-gate /*
29300Sstevel@tonic-gate * We were provided an exact symbol value; any
29310Sstevel@tonic-gate * address in the function is valid.
29320Sstevel@tonic-gate */
29330Sstevel@tonic-gate laddr = (uintptr_t)sym.st_value;
29340Sstevel@tonic-gate haddr = (uintptr_t)sym.st_value + sym.st_size;
29350Sstevel@tonic-gate }
29360Sstevel@tonic-gate
29370Sstevel@tonic-gate for (i = 0; i < depth; i++)
29380Sstevel@tonic-gate if (bc.bc_stack[i] >= laddr && bc.bc_stack[i] < haddr)
29390Sstevel@tonic-gate break;
29400Sstevel@tonic-gate
29410Sstevel@tonic-gate if (i == depth)
29420Sstevel@tonic-gate return (DCMD_OK);
29430Sstevel@tonic-gate }
29440Sstevel@tonic-gate
29450Sstevel@tonic-gate if (thread != NULL && (uintptr_t)bc.bc_thread != thread)
29460Sstevel@tonic-gate return (DCMD_OK);
29470Sstevel@tonic-gate
29480Sstevel@tonic-gate if (earliest != 0 && bc.bc_timestamp < earliest)
29490Sstevel@tonic-gate return (DCMD_OK);
29500Sstevel@tonic-gate
29510Sstevel@tonic-gate if (latest != 0 && bc.bc_timestamp > latest)
29520Sstevel@tonic-gate return (DCMD_OK);
29530Sstevel@tonic-gate
29540Sstevel@tonic-gate if (baddr != 0 && (uintptr_t)bc.bc_addr != baddr)
29550Sstevel@tonic-gate return (DCMD_OK);
29560Sstevel@tonic-gate
29570Sstevel@tonic-gate if (flags & DCMD_PIPE_OUT) {
29580Sstevel@tonic-gate mdb_printf("%#lr\n", addr);
29590Sstevel@tonic-gate return (DCMD_OK);
29600Sstevel@tonic-gate }
29610Sstevel@tonic-gate
29620Sstevel@tonic-gate if (verbose) {
29630Sstevel@tonic-gate mdb_printf(
29640Sstevel@tonic-gate "%<b>%16p%</b> %16p %16llx %16p\n"
29650Sstevel@tonic-gate "%16s %16p %16p %16p\n",
29660Sstevel@tonic-gate addr, bc.bc_addr, bc.bc_timestamp, bc.bc_thread,
29670Sstevel@tonic-gate "", bc.bc_cache, bc.bc_lastlog, bc.bc_contents);
29680Sstevel@tonic-gate
29690Sstevel@tonic-gate mdb_inc_indent(17);
29700Sstevel@tonic-gate for (i = 0; i < depth; i++)
29710Sstevel@tonic-gate mdb_printf("%a\n", bc.bc_stack[i]);
29720Sstevel@tonic-gate mdb_dec_indent(17);
29730Sstevel@tonic-gate mdb_printf("\n");
29740Sstevel@tonic-gate } else {
29750Sstevel@tonic-gate mdb_printf("%0?p %0?p %12llx %0?p", addr, bc.bc_addr,
29760Sstevel@tonic-gate bc.bc_timestamp, bc.bc_thread);
29770Sstevel@tonic-gate
29780Sstevel@tonic-gate for (i = 0; i < depth; i++) {
29790Sstevel@tonic-gate if (mdb_lookup_by_addr(bc.bc_stack[i],
29800Sstevel@tonic-gate MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1)
29810Sstevel@tonic-gate continue;
29820Sstevel@tonic-gate if (strncmp(c, "kmem_", 5) == 0)
29830Sstevel@tonic-gate continue;
29840Sstevel@tonic-gate mdb_printf(" %a\n", bc.bc_stack[i]);
29850Sstevel@tonic-gate break;
29860Sstevel@tonic-gate }
29870Sstevel@tonic-gate
29880Sstevel@tonic-gate if (i >= depth)
29890Sstevel@tonic-gate mdb_printf("\n");
29900Sstevel@tonic-gate }
29910Sstevel@tonic-gate
29920Sstevel@tonic-gate return (DCMD_OK);
29930Sstevel@tonic-gate }
29940Sstevel@tonic-gate
29950Sstevel@tonic-gate typedef struct kmem_verify {
29960Sstevel@tonic-gate uint64_t *kmv_buf; /* buffer to read cache contents into */
29970Sstevel@tonic-gate size_t kmv_size; /* number of bytes in kmv_buf */
29980Sstevel@tonic-gate int kmv_corruption; /* > 0 if corruption found. */
29990Sstevel@tonic-gate int kmv_besilent; /* report actual corruption sites */
30000Sstevel@tonic-gate struct kmem_cache kmv_cache; /* the cache we're operating on */
30010Sstevel@tonic-gate } kmem_verify_t;
30020Sstevel@tonic-gate
30030Sstevel@tonic-gate /*
30040Sstevel@tonic-gate * verify_pattern()
30050Sstevel@tonic-gate * verify that buf is filled with the pattern pat.
30060Sstevel@tonic-gate */
30070Sstevel@tonic-gate static int64_t
verify_pattern(uint64_t * buf_arg,size_t size,uint64_t pat)30080Sstevel@tonic-gate verify_pattern(uint64_t *buf_arg, size_t size, uint64_t pat)
30090Sstevel@tonic-gate {
30100Sstevel@tonic-gate /*LINTED*/
30110Sstevel@tonic-gate uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
30120Sstevel@tonic-gate uint64_t *buf;
30130Sstevel@tonic-gate
30140Sstevel@tonic-gate for (buf = buf_arg; buf < bufend; buf++)
30150Sstevel@tonic-gate if (*buf != pat)
30160Sstevel@tonic-gate return ((uintptr_t)buf - (uintptr_t)buf_arg);
30170Sstevel@tonic-gate return (-1);
30180Sstevel@tonic-gate }
30190Sstevel@tonic-gate
30200Sstevel@tonic-gate /*
30210Sstevel@tonic-gate * verify_buftag()
30220Sstevel@tonic-gate * verify that btp->bt_bxstat == (bcp ^ pat)
30230Sstevel@tonic-gate */
30240Sstevel@tonic-gate static int
verify_buftag(kmem_buftag_t * btp,uintptr_t pat)30250Sstevel@tonic-gate verify_buftag(kmem_buftag_t *btp, uintptr_t pat)
30260Sstevel@tonic-gate {
30270Sstevel@tonic-gate return (btp->bt_bxstat == ((intptr_t)btp->bt_bufctl ^ pat) ? 0 : -1);
30280Sstevel@tonic-gate }
30290Sstevel@tonic-gate
30300Sstevel@tonic-gate /*
30310Sstevel@tonic-gate * verify_free()
30320Sstevel@tonic-gate * verify the integrity of a free block of memory by checking
30330Sstevel@tonic-gate * that it is filled with 0xdeadbeef and that its buftag is sane.
30340Sstevel@tonic-gate */
30350Sstevel@tonic-gate /*ARGSUSED1*/
30360Sstevel@tonic-gate static int
verify_free(uintptr_t addr,const void * data,void * private)30370Sstevel@tonic-gate verify_free(uintptr_t addr, const void *data, void *private)
30380Sstevel@tonic-gate {
30390Sstevel@tonic-gate kmem_verify_t *kmv = (kmem_verify_t *)private;
30400Sstevel@tonic-gate uint64_t *buf = kmv->kmv_buf; /* buf to validate */
30410Sstevel@tonic-gate int64_t corrupt; /* corruption offset */
30420Sstevel@tonic-gate kmem_buftag_t *buftagp; /* ptr to buftag */
30430Sstevel@tonic-gate kmem_cache_t *cp = &kmv->kmv_cache;
30440Sstevel@tonic-gate int besilent = kmv->kmv_besilent;
30450Sstevel@tonic-gate
30460Sstevel@tonic-gate /*LINTED*/
30470Sstevel@tonic-gate buftagp = KMEM_BUFTAG(cp, buf);
30480Sstevel@tonic-gate
30490Sstevel@tonic-gate /*
30500Sstevel@tonic-gate * Read the buffer to check.
30510Sstevel@tonic-gate */
30520Sstevel@tonic-gate if (mdb_vread(buf, kmv->kmv_size, addr) == -1) {
30530Sstevel@tonic-gate if (!besilent)
30540Sstevel@tonic-gate mdb_warn("couldn't read %p", addr);
30550Sstevel@tonic-gate return (WALK_NEXT);
30560Sstevel@tonic-gate }
30570Sstevel@tonic-gate
30580Sstevel@tonic-gate if ((corrupt = verify_pattern(buf, cp->cache_verify,
30590Sstevel@tonic-gate KMEM_FREE_PATTERN)) >= 0) {
30600Sstevel@tonic-gate if (!besilent)
30610Sstevel@tonic-gate mdb_printf("buffer %p (free) seems corrupted, at %p\n",
30620Sstevel@tonic-gate addr, (uintptr_t)addr + corrupt);
30630Sstevel@tonic-gate goto corrupt;
30640Sstevel@tonic-gate }
30650Sstevel@tonic-gate /*
30660Sstevel@tonic-gate * When KMF_LITE is set, buftagp->bt_redzone is used to hold
30670Sstevel@tonic-gate * the first bytes of the buffer, hence we cannot check for red
30680Sstevel@tonic-gate * zone corruption.
30690Sstevel@tonic-gate */
30700Sstevel@tonic-gate if ((cp->cache_flags & (KMF_HASH | KMF_LITE)) == KMF_HASH &&
30710Sstevel@tonic-gate buftagp->bt_redzone != KMEM_REDZONE_PATTERN) {
30720Sstevel@tonic-gate if (!besilent)
30730Sstevel@tonic-gate mdb_printf("buffer %p (free) seems to "
30740Sstevel@tonic-gate "have a corrupt redzone pattern\n", addr);
30750Sstevel@tonic-gate goto corrupt;
30760Sstevel@tonic-gate }
30770Sstevel@tonic-gate
30780Sstevel@tonic-gate /*
30790Sstevel@tonic-gate * confirm bufctl pointer integrity.
30800Sstevel@tonic-gate */
30810Sstevel@tonic-gate if (verify_buftag(buftagp, KMEM_BUFTAG_FREE) == -1) {
30820Sstevel@tonic-gate if (!besilent)
30830Sstevel@tonic-gate mdb_printf("buffer %p (free) has a corrupt "
30840Sstevel@tonic-gate "buftag\n", addr);
30850Sstevel@tonic-gate goto corrupt;
30860Sstevel@tonic-gate }
30870Sstevel@tonic-gate
30880Sstevel@tonic-gate return (WALK_NEXT);
30890Sstevel@tonic-gate corrupt:
30900Sstevel@tonic-gate kmv->kmv_corruption++;
30910Sstevel@tonic-gate return (WALK_NEXT);
30920Sstevel@tonic-gate }
30930Sstevel@tonic-gate
30940Sstevel@tonic-gate /*
30950Sstevel@tonic-gate * verify_alloc()
30960Sstevel@tonic-gate * Verify that the buftag of an allocated buffer makes sense with respect
30970Sstevel@tonic-gate * to the buffer.
30980Sstevel@tonic-gate */
30990Sstevel@tonic-gate /*ARGSUSED1*/
31000Sstevel@tonic-gate static int
verify_alloc(uintptr_t addr,const void * data,void * private)31010Sstevel@tonic-gate verify_alloc(uintptr_t addr, const void *data, void *private)
31020Sstevel@tonic-gate {
31030Sstevel@tonic-gate kmem_verify_t *kmv = (kmem_verify_t *)private;
31040Sstevel@tonic-gate kmem_cache_t *cp = &kmv->kmv_cache;
31050Sstevel@tonic-gate uint64_t *buf = kmv->kmv_buf; /* buf to validate */
31060Sstevel@tonic-gate /*LINTED*/
31070Sstevel@tonic-gate kmem_buftag_t *buftagp = KMEM_BUFTAG(cp, buf);
31080Sstevel@tonic-gate uint32_t *ip = (uint32_t *)buftagp;
31090Sstevel@tonic-gate uint8_t *bp = (uint8_t *)buf;
31100Sstevel@tonic-gate int looks_ok = 0, size_ok = 1; /* flags for finding corruption */
31110Sstevel@tonic-gate int besilent = kmv->kmv_besilent;
31120Sstevel@tonic-gate
31130Sstevel@tonic-gate /*
31140Sstevel@tonic-gate * Read the buffer to check.
31150Sstevel@tonic-gate */
31160Sstevel@tonic-gate if (mdb_vread(buf, kmv->kmv_size, addr) == -1) {
31170Sstevel@tonic-gate if (!besilent)
31180Sstevel@tonic-gate mdb_warn("couldn't read %p", addr);
31190Sstevel@tonic-gate return (WALK_NEXT);
31200Sstevel@tonic-gate }
31210Sstevel@tonic-gate
31220Sstevel@tonic-gate /*
31230Sstevel@tonic-gate * There are two cases to handle:
31240Sstevel@tonic-gate * 1. If the buf was alloc'd using kmem_cache_alloc, it will have
31250Sstevel@tonic-gate * 0xfeedfacefeedface at the end of it
31260Sstevel@tonic-gate * 2. If the buf was alloc'd using kmem_alloc, it will have
31270Sstevel@tonic-gate * 0xbb just past the end of the region in use. At the buftag,
31280Sstevel@tonic-gate * it will have 0xfeedface (or, if the whole buffer is in use,
31290Sstevel@tonic-gate * 0xfeedface & bb000000 or 0xfeedfacf & 000000bb depending on
31300Sstevel@tonic-gate * endianness), followed by 32 bits containing the offset of the
31310Sstevel@tonic-gate * 0xbb byte in the buffer.
31320Sstevel@tonic-gate *
31330Sstevel@tonic-gate * Finally, the two 32-bit words that comprise the second half of the
31340Sstevel@tonic-gate * buftag should xor to KMEM_BUFTAG_ALLOC
31350Sstevel@tonic-gate */
31360Sstevel@tonic-gate
31370Sstevel@tonic-gate if (buftagp->bt_redzone == KMEM_REDZONE_PATTERN)
31380Sstevel@tonic-gate looks_ok = 1;
31390Sstevel@tonic-gate else if (!KMEM_SIZE_VALID(ip[1]))
31400Sstevel@tonic-gate size_ok = 0;
31410Sstevel@tonic-gate else if (bp[KMEM_SIZE_DECODE(ip[1])] == KMEM_REDZONE_BYTE)
31420Sstevel@tonic-gate looks_ok = 1;
31430Sstevel@tonic-gate else
31440Sstevel@tonic-gate size_ok = 0;
31450Sstevel@tonic-gate
31460Sstevel@tonic-gate if (!size_ok) {
31470Sstevel@tonic-gate if (!besilent)
31480Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a corrupt "
31490Sstevel@tonic-gate "redzone size encoding\n", addr);
31500Sstevel@tonic-gate goto corrupt;
31510Sstevel@tonic-gate }
31520Sstevel@tonic-gate
31530Sstevel@tonic-gate if (!looks_ok) {
31540Sstevel@tonic-gate if (!besilent)
31550Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a corrupt "
31560Sstevel@tonic-gate "redzone signature\n", addr);
31570Sstevel@tonic-gate goto corrupt;
31580Sstevel@tonic-gate }
31590Sstevel@tonic-gate
31600Sstevel@tonic-gate if (verify_buftag(buftagp, KMEM_BUFTAG_ALLOC) == -1) {
31610Sstevel@tonic-gate if (!besilent)
31620Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a "
31630Sstevel@tonic-gate "corrupt buftag\n", addr);
31640Sstevel@tonic-gate goto corrupt;
31650Sstevel@tonic-gate }
31660Sstevel@tonic-gate
31670Sstevel@tonic-gate return (WALK_NEXT);
31680Sstevel@tonic-gate corrupt:
31690Sstevel@tonic-gate kmv->kmv_corruption++;
31700Sstevel@tonic-gate return (WALK_NEXT);
31710Sstevel@tonic-gate }
31720Sstevel@tonic-gate
31730Sstevel@tonic-gate /*ARGSUSED2*/
31740Sstevel@tonic-gate int
kmem_verify(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)31750Sstevel@tonic-gate kmem_verify(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
31760Sstevel@tonic-gate {
31770Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) {
31780Sstevel@tonic-gate int check_alloc = 0, check_free = 0;
31790Sstevel@tonic-gate kmem_verify_t kmv;
31800Sstevel@tonic-gate
31810Sstevel@tonic-gate if (mdb_vread(&kmv.kmv_cache, sizeof (kmv.kmv_cache),
31820Sstevel@tonic-gate addr) == -1) {
31830Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache %p", addr);
31840Sstevel@tonic-gate return (DCMD_ERR);
31850Sstevel@tonic-gate }
31860Sstevel@tonic-gate
31870Sstevel@tonic-gate kmv.kmv_size = kmv.kmv_cache.cache_buftag +
31880Sstevel@tonic-gate sizeof (kmem_buftag_t);
31890Sstevel@tonic-gate kmv.kmv_buf = mdb_alloc(kmv.kmv_size, UM_SLEEP | UM_GC);
31900Sstevel@tonic-gate kmv.kmv_corruption = 0;
31910Sstevel@tonic-gate
31920Sstevel@tonic-gate if ((kmv.kmv_cache.cache_flags & KMF_REDZONE)) {
31930Sstevel@tonic-gate check_alloc = 1;
31940Sstevel@tonic-gate if (kmv.kmv_cache.cache_flags & KMF_DEADBEEF)
31950Sstevel@tonic-gate check_free = 1;
31960Sstevel@tonic-gate } else {
31970Sstevel@tonic-gate if (!(flags & DCMD_LOOP)) {
31980Sstevel@tonic-gate mdb_warn("cache %p (%s) does not have "
31990Sstevel@tonic-gate "redzone checking enabled\n", addr,
32000Sstevel@tonic-gate kmv.kmv_cache.cache_name);
32010Sstevel@tonic-gate }
32020Sstevel@tonic-gate return (DCMD_ERR);
32030Sstevel@tonic-gate }
32040Sstevel@tonic-gate
32050Sstevel@tonic-gate if (flags & DCMD_LOOP) {
32060Sstevel@tonic-gate /*
32070Sstevel@tonic-gate * table mode, don't print out every corrupt buffer
32080Sstevel@tonic-gate */
32090Sstevel@tonic-gate kmv.kmv_besilent = 1;
32100Sstevel@tonic-gate } else {
32110Sstevel@tonic-gate mdb_printf("Summary for cache '%s'\n",
32120Sstevel@tonic-gate kmv.kmv_cache.cache_name);
32130Sstevel@tonic-gate mdb_inc_indent(2);
32140Sstevel@tonic-gate kmv.kmv_besilent = 0;
32150Sstevel@tonic-gate }
32160Sstevel@tonic-gate
32170Sstevel@tonic-gate if (check_alloc)
32180Sstevel@tonic-gate (void) mdb_pwalk("kmem", verify_alloc, &kmv, addr);
32190Sstevel@tonic-gate if (check_free)
32200Sstevel@tonic-gate (void) mdb_pwalk("freemem", verify_free, &kmv, addr);
32210Sstevel@tonic-gate
32220Sstevel@tonic-gate if (flags & DCMD_LOOP) {
32230Sstevel@tonic-gate if (kmv.kmv_corruption == 0) {
32240Sstevel@tonic-gate mdb_printf("%-*s %?p clean\n",
32250Sstevel@tonic-gate KMEM_CACHE_NAMELEN,
32260Sstevel@tonic-gate kmv.kmv_cache.cache_name, addr);
32270Sstevel@tonic-gate } else {
32280Sstevel@tonic-gate char *s = ""; /* optional s in "buffer[s]" */
32290Sstevel@tonic-gate if (kmv.kmv_corruption > 1)
32300Sstevel@tonic-gate s = "s";
32310Sstevel@tonic-gate
32320Sstevel@tonic-gate mdb_printf("%-*s %?p %d corrupt buffer%s\n",
32330Sstevel@tonic-gate KMEM_CACHE_NAMELEN,
32340Sstevel@tonic-gate kmv.kmv_cache.cache_name, addr,
32350Sstevel@tonic-gate kmv.kmv_corruption, s);
32360Sstevel@tonic-gate }
32370Sstevel@tonic-gate } else {
32380Sstevel@tonic-gate /*
32390Sstevel@tonic-gate * This is the more verbose mode, when the user has
32400Sstevel@tonic-gate * type addr::kmem_verify. If the cache was clean,
32410Sstevel@tonic-gate * nothing will have yet been printed. So say something.
32420Sstevel@tonic-gate */
32430Sstevel@tonic-gate if (kmv.kmv_corruption == 0)
32440Sstevel@tonic-gate mdb_printf("clean\n");
32450Sstevel@tonic-gate
32460Sstevel@tonic-gate mdb_dec_indent(2);
32470Sstevel@tonic-gate }
32480Sstevel@tonic-gate } else {
32490Sstevel@tonic-gate /*
32500Sstevel@tonic-gate * If the user didn't specify a cache to verify, we'll walk all
32510Sstevel@tonic-gate * kmem_cache's, specifying ourself as a callback for each...
32520Sstevel@tonic-gate * this is the equivalent of '::walk kmem_cache .::kmem_verify'
32530Sstevel@tonic-gate */
32540Sstevel@tonic-gate mdb_printf("%<u>%-*s %-?s %-20s%</b>\n", KMEM_CACHE_NAMELEN,
32550Sstevel@tonic-gate "Cache Name", "Addr", "Cache Integrity");
32560Sstevel@tonic-gate (void) (mdb_walk_dcmd("kmem_cache", "kmem_verify", 0, NULL));
32570Sstevel@tonic-gate }
32580Sstevel@tonic-gate
32590Sstevel@tonic-gate return (DCMD_OK);
32600Sstevel@tonic-gate }
32610Sstevel@tonic-gate
32620Sstevel@tonic-gate typedef struct vmem_node {
32630Sstevel@tonic-gate struct vmem_node *vn_next;
32640Sstevel@tonic-gate struct vmem_node *vn_parent;
32650Sstevel@tonic-gate struct vmem_node *vn_sibling;
32660Sstevel@tonic-gate struct vmem_node *vn_children;
32670Sstevel@tonic-gate uintptr_t vn_addr;
32680Sstevel@tonic-gate int vn_marked;
32690Sstevel@tonic-gate vmem_t vn_vmem;
32700Sstevel@tonic-gate } vmem_node_t;
32710Sstevel@tonic-gate
32720Sstevel@tonic-gate typedef struct vmem_walk {
32730Sstevel@tonic-gate vmem_node_t *vw_root;
32740Sstevel@tonic-gate vmem_node_t *vw_current;
32750Sstevel@tonic-gate } vmem_walk_t;
32760Sstevel@tonic-gate
32770Sstevel@tonic-gate int
vmem_walk_init(mdb_walk_state_t * wsp)32780Sstevel@tonic-gate vmem_walk_init(mdb_walk_state_t *wsp)
32790Sstevel@tonic-gate {
32800Sstevel@tonic-gate uintptr_t vaddr, paddr;
32810Sstevel@tonic-gate vmem_node_t *head = NULL, *root = NULL, *current = NULL, *parent, *vp;
32820Sstevel@tonic-gate vmem_walk_t *vw;
32830Sstevel@tonic-gate
32840Sstevel@tonic-gate if (mdb_readvar(&vaddr, "vmem_list") == -1) {
32850Sstevel@tonic-gate mdb_warn("couldn't read 'vmem_list'");
32860Sstevel@tonic-gate return (WALK_ERR);
32870Sstevel@tonic-gate }
32880Sstevel@tonic-gate
32890Sstevel@tonic-gate while (vaddr != NULL) {
32900Sstevel@tonic-gate vp = mdb_zalloc(sizeof (vmem_node_t), UM_SLEEP);
32910Sstevel@tonic-gate vp->vn_addr = vaddr;
32920Sstevel@tonic-gate vp->vn_next = head;
32930Sstevel@tonic-gate head = vp;
32940Sstevel@tonic-gate
32950Sstevel@tonic-gate if (vaddr == wsp->walk_addr)
32960Sstevel@tonic-gate current = vp;
32970Sstevel@tonic-gate
32980Sstevel@tonic-gate if (mdb_vread(&vp->vn_vmem, sizeof (vmem_t), vaddr) == -1) {
32990Sstevel@tonic-gate mdb_warn("couldn't read vmem_t at %p", vaddr);
33000Sstevel@tonic-gate goto err;
33010Sstevel@tonic-gate }
33020Sstevel@tonic-gate
33030Sstevel@tonic-gate vaddr = (uintptr_t)vp->vn_vmem.vm_next;
33040Sstevel@tonic-gate }
33050Sstevel@tonic-gate
33060Sstevel@tonic-gate for (vp = head; vp != NULL; vp = vp->vn_next) {
33070Sstevel@tonic-gate
33080Sstevel@tonic-gate if ((paddr = (uintptr_t)vp->vn_vmem.vm_source) == NULL) {
33090Sstevel@tonic-gate vp->vn_sibling = root;
33100Sstevel@tonic-gate root = vp;
33110Sstevel@tonic-gate continue;
33120Sstevel@tonic-gate }
33130Sstevel@tonic-gate
33140Sstevel@tonic-gate for (parent = head; parent != NULL; parent = parent->vn_next) {
33150Sstevel@tonic-gate if (parent->vn_addr != paddr)
33160Sstevel@tonic-gate continue;
33170Sstevel@tonic-gate vp->vn_sibling = parent->vn_children;
33180Sstevel@tonic-gate parent->vn_children = vp;
33190Sstevel@tonic-gate vp->vn_parent = parent;
33200Sstevel@tonic-gate break;
33210Sstevel@tonic-gate }
33220Sstevel@tonic-gate
33230Sstevel@tonic-gate if (parent == NULL) {
33240Sstevel@tonic-gate mdb_warn("couldn't find %p's parent (%p)\n",
33250Sstevel@tonic-gate vp->vn_addr, paddr);
33260Sstevel@tonic-gate goto err;
33270Sstevel@tonic-gate }
33280Sstevel@tonic-gate }
33290Sstevel@tonic-gate
33300Sstevel@tonic-gate vw = mdb_zalloc(sizeof (vmem_walk_t), UM_SLEEP);
33310Sstevel@tonic-gate vw->vw_root = root;
33320Sstevel@tonic-gate
33330Sstevel@tonic-gate if (current != NULL)
33340Sstevel@tonic-gate vw->vw_current = current;
33350Sstevel@tonic-gate else
33360Sstevel@tonic-gate vw->vw_current = root;
33370Sstevel@tonic-gate
33380Sstevel@tonic-gate wsp->walk_data = vw;
33390Sstevel@tonic-gate return (WALK_NEXT);
33400Sstevel@tonic-gate err:
33410Sstevel@tonic-gate for (vp = head; head != NULL; vp = head) {
33420Sstevel@tonic-gate head = vp->vn_next;
33430Sstevel@tonic-gate mdb_free(vp, sizeof (vmem_node_t));
33440Sstevel@tonic-gate }
33450Sstevel@tonic-gate
33460Sstevel@tonic-gate return (WALK_ERR);
33470Sstevel@tonic-gate }
33480Sstevel@tonic-gate
33490Sstevel@tonic-gate int
vmem_walk_step(mdb_walk_state_t * wsp)33500Sstevel@tonic-gate vmem_walk_step(mdb_walk_state_t *wsp)
33510Sstevel@tonic-gate {
33520Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data;
33530Sstevel@tonic-gate vmem_node_t *vp;
33540Sstevel@tonic-gate int rval;
33550Sstevel@tonic-gate
33560Sstevel@tonic-gate if ((vp = vw->vw_current) == NULL)
33570Sstevel@tonic-gate return (WALK_DONE);
33580Sstevel@tonic-gate
33590Sstevel@tonic-gate rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata);
33600Sstevel@tonic-gate
33610Sstevel@tonic-gate if (vp->vn_children != NULL) {
33620Sstevel@tonic-gate vw->vw_current = vp->vn_children;
33630Sstevel@tonic-gate return (rval);
33640Sstevel@tonic-gate }
33650Sstevel@tonic-gate
33660Sstevel@tonic-gate do {
33670Sstevel@tonic-gate vw->vw_current = vp->vn_sibling;
33680Sstevel@tonic-gate vp = vp->vn_parent;
33690Sstevel@tonic-gate } while (vw->vw_current == NULL && vp != NULL);
33700Sstevel@tonic-gate
33710Sstevel@tonic-gate return (rval);
33720Sstevel@tonic-gate }
33730Sstevel@tonic-gate
33740Sstevel@tonic-gate /*
33750Sstevel@tonic-gate * The "vmem_postfix" walk walks the vmem arenas in post-fix order; all
33760Sstevel@tonic-gate * children are visited before their parent. We perform the postfix walk
33770Sstevel@tonic-gate * iteratively (rather than recursively) to allow mdb to regain control
33780Sstevel@tonic-gate * after each callback.
33790Sstevel@tonic-gate */
33800Sstevel@tonic-gate int
vmem_postfix_walk_step(mdb_walk_state_t * wsp)33810Sstevel@tonic-gate vmem_postfix_walk_step(mdb_walk_state_t *wsp)
33820Sstevel@tonic-gate {
33830Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data;
33840Sstevel@tonic-gate vmem_node_t *vp = vw->vw_current;
33850Sstevel@tonic-gate int rval;
33860Sstevel@tonic-gate
33870Sstevel@tonic-gate /*
33880Sstevel@tonic-gate * If this node is marked, then we know that we have already visited
33890Sstevel@tonic-gate * all of its children. If the node has any siblings, they need to
33900Sstevel@tonic-gate * be visited next; otherwise, we need to visit the parent. Note
33910Sstevel@tonic-gate * that vp->vn_marked will only be zero on the first invocation of
33920Sstevel@tonic-gate * the step function.
33930Sstevel@tonic-gate */
33940Sstevel@tonic-gate if (vp->vn_marked) {
33950Sstevel@tonic-gate if (vp->vn_sibling != NULL)
33960Sstevel@tonic-gate vp = vp->vn_sibling;
33970Sstevel@tonic-gate else if (vp->vn_parent != NULL)
33980Sstevel@tonic-gate vp = vp->vn_parent;
33990Sstevel@tonic-gate else {
34000Sstevel@tonic-gate /*
34010Sstevel@tonic-gate * We have neither a parent, nor a sibling, and we
34020Sstevel@tonic-gate * have already been visited; we're done.
34030Sstevel@tonic-gate */
34040Sstevel@tonic-gate return (WALK_DONE);
34050Sstevel@tonic-gate }
34060Sstevel@tonic-gate }
34070Sstevel@tonic-gate
34080Sstevel@tonic-gate /*
34090Sstevel@tonic-gate * Before we visit this node, visit its children.
34100Sstevel@tonic-gate */
34110Sstevel@tonic-gate while (vp->vn_children != NULL && !vp->vn_children->vn_marked)
34120Sstevel@tonic-gate vp = vp->vn_children;
34130Sstevel@tonic-gate
34140Sstevel@tonic-gate vp->vn_marked = 1;
34150Sstevel@tonic-gate vw->vw_current = vp;
34160Sstevel@tonic-gate rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata);
34170Sstevel@tonic-gate
34180Sstevel@tonic-gate return (rval);
34190Sstevel@tonic-gate }
34200Sstevel@tonic-gate
34210Sstevel@tonic-gate void
vmem_walk_fini(mdb_walk_state_t * wsp)34220Sstevel@tonic-gate vmem_walk_fini(mdb_walk_state_t *wsp)
34230Sstevel@tonic-gate {
34240Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data;
34250Sstevel@tonic-gate vmem_node_t *root = vw->vw_root;
34260Sstevel@tonic-gate int done;
34270Sstevel@tonic-gate
34280Sstevel@tonic-gate if (root == NULL)
34290Sstevel@tonic-gate return;
34300Sstevel@tonic-gate
34310Sstevel@tonic-gate if ((vw->vw_root = root->vn_children) != NULL)
34320Sstevel@tonic-gate vmem_walk_fini(wsp);
34330Sstevel@tonic-gate
34340Sstevel@tonic-gate vw->vw_root = root->vn_sibling;
34350Sstevel@tonic-gate done = (root->vn_sibling == NULL && root->vn_parent == NULL);
34360Sstevel@tonic-gate mdb_free(root, sizeof (vmem_node_t));
34370Sstevel@tonic-gate
34380Sstevel@tonic-gate if (done) {
34390Sstevel@tonic-gate mdb_free(vw, sizeof (vmem_walk_t));
34400Sstevel@tonic-gate } else {
34410Sstevel@tonic-gate vmem_walk_fini(wsp);
34420Sstevel@tonic-gate }
34430Sstevel@tonic-gate }
34440Sstevel@tonic-gate
34450Sstevel@tonic-gate typedef struct vmem_seg_walk {
34460Sstevel@tonic-gate uint8_t vsw_type;
34470Sstevel@tonic-gate uintptr_t vsw_start;
34480Sstevel@tonic-gate uintptr_t vsw_current;
34490Sstevel@tonic-gate } vmem_seg_walk_t;
34500Sstevel@tonic-gate
34510Sstevel@tonic-gate /*ARGSUSED*/
34520Sstevel@tonic-gate int
vmem_seg_walk_common_init(mdb_walk_state_t * wsp,uint8_t type,char * name)34530Sstevel@tonic-gate vmem_seg_walk_common_init(mdb_walk_state_t *wsp, uint8_t type, char *name)
34540Sstevel@tonic-gate {
34550Sstevel@tonic-gate vmem_seg_walk_t *vsw;
34560Sstevel@tonic-gate
34570Sstevel@tonic-gate if (wsp->walk_addr == NULL) {
34580Sstevel@tonic-gate mdb_warn("vmem_%s does not support global walks\n", name);
34590Sstevel@tonic-gate return (WALK_ERR);
34600Sstevel@tonic-gate }
34610Sstevel@tonic-gate
34620Sstevel@tonic-gate wsp->walk_data = vsw = mdb_alloc(sizeof (vmem_seg_walk_t), UM_SLEEP);
34630Sstevel@tonic-gate
34640Sstevel@tonic-gate vsw->vsw_type = type;
34650Sstevel@tonic-gate vsw->vsw_start = wsp->walk_addr + offsetof(vmem_t, vm_seg0);
34660Sstevel@tonic-gate vsw->vsw_current = vsw->vsw_start;
34670Sstevel@tonic-gate
34680Sstevel@tonic-gate return (WALK_NEXT);
34690Sstevel@tonic-gate }
34700Sstevel@tonic-gate
34710Sstevel@tonic-gate /*
34720Sstevel@tonic-gate * vmem segments can't have type 0 (this should be added to vmem_impl.h).
34730Sstevel@tonic-gate */
34740Sstevel@tonic-gate #define VMEM_NONE 0
34750Sstevel@tonic-gate
34760Sstevel@tonic-gate int
vmem_alloc_walk_init(mdb_walk_state_t * wsp)34770Sstevel@tonic-gate vmem_alloc_walk_init(mdb_walk_state_t *wsp)
34780Sstevel@tonic-gate {
34790Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_ALLOC, "alloc"));
34800Sstevel@tonic-gate }
34810Sstevel@tonic-gate
34820Sstevel@tonic-gate int
vmem_free_walk_init(mdb_walk_state_t * wsp)34830Sstevel@tonic-gate vmem_free_walk_init(mdb_walk_state_t *wsp)
34840Sstevel@tonic-gate {
34850Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_FREE, "free"));
34860Sstevel@tonic-gate }
34870Sstevel@tonic-gate
34880Sstevel@tonic-gate int
vmem_span_walk_init(mdb_walk_state_t * wsp)34890Sstevel@tonic-gate vmem_span_walk_init(mdb_walk_state_t *wsp)
34900Sstevel@tonic-gate {
34910Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_SPAN, "span"));
34920Sstevel@tonic-gate }
34930Sstevel@tonic-gate
34940Sstevel@tonic-gate int
vmem_seg_walk_init(mdb_walk_state_t * wsp)34950Sstevel@tonic-gate vmem_seg_walk_init(mdb_walk_state_t *wsp)
34960Sstevel@tonic-gate {
34970Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_NONE, "seg"));
34980Sstevel@tonic-gate }
34990Sstevel@tonic-gate
35000Sstevel@tonic-gate int
vmem_seg_walk_step(mdb_walk_state_t * wsp)35010Sstevel@tonic-gate vmem_seg_walk_step(mdb_walk_state_t *wsp)
35020Sstevel@tonic-gate {
35030Sstevel@tonic-gate vmem_seg_t seg;
35040Sstevel@tonic-gate vmem_seg_walk_t *vsw = wsp->walk_data;
35050Sstevel@tonic-gate uintptr_t addr = vsw->vsw_current;
35060Sstevel@tonic-gate static size_t seg_size = 0;
35070Sstevel@tonic-gate int rval;
35080Sstevel@tonic-gate
35090Sstevel@tonic-gate if (!seg_size) {
35100Sstevel@tonic-gate if (mdb_readvar(&seg_size, "vmem_seg_size") == -1) {
35110Sstevel@tonic-gate mdb_warn("failed to read 'vmem_seg_size'");
35120Sstevel@tonic-gate seg_size = sizeof (vmem_seg_t);
35130Sstevel@tonic-gate }
35140Sstevel@tonic-gate }
35150Sstevel@tonic-gate
35160Sstevel@tonic-gate if (seg_size < sizeof (seg))
35170Sstevel@tonic-gate bzero((caddr_t)&seg + seg_size, sizeof (seg) - seg_size);
35180Sstevel@tonic-gate
35190Sstevel@tonic-gate if (mdb_vread(&seg, seg_size, addr) == -1) {
35200Sstevel@tonic-gate mdb_warn("couldn't read vmem_seg at %p", addr);
35210Sstevel@tonic-gate return (WALK_ERR);
35220Sstevel@tonic-gate }
35230Sstevel@tonic-gate
35240Sstevel@tonic-gate vsw->vsw_current = (uintptr_t)seg.vs_anext;
35250Sstevel@tonic-gate if (vsw->vsw_type != VMEM_NONE && seg.vs_type != vsw->vsw_type) {
35260Sstevel@tonic-gate rval = WALK_NEXT;
35270Sstevel@tonic-gate } else {
35280Sstevel@tonic-gate rval = wsp->walk_callback(addr, &seg, wsp->walk_cbdata);
35290Sstevel@tonic-gate }
35300Sstevel@tonic-gate
35310Sstevel@tonic-gate if (vsw->vsw_current == vsw->vsw_start)
35320Sstevel@tonic-gate return (WALK_DONE);
35330Sstevel@tonic-gate
35340Sstevel@tonic-gate return (rval);
35350Sstevel@tonic-gate }
35360Sstevel@tonic-gate
35370Sstevel@tonic-gate void
vmem_seg_walk_fini(mdb_walk_state_t * wsp)35380Sstevel@tonic-gate vmem_seg_walk_fini(mdb_walk_state_t *wsp)
35390Sstevel@tonic-gate {
35400Sstevel@tonic-gate vmem_seg_walk_t *vsw = wsp->walk_data;
35410Sstevel@tonic-gate
35420Sstevel@tonic-gate mdb_free(vsw, sizeof (vmem_seg_walk_t));
35430Sstevel@tonic-gate }
35440Sstevel@tonic-gate
35450Sstevel@tonic-gate #define VMEM_NAMEWIDTH 22
35460Sstevel@tonic-gate
35470Sstevel@tonic-gate int
vmem(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)35480Sstevel@tonic-gate vmem(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
35490Sstevel@tonic-gate {
35500Sstevel@tonic-gate vmem_t v, parent;
35510Sstevel@tonic-gate vmem_kstat_t *vkp = &v.vm_kstat;
35520Sstevel@tonic-gate uintptr_t paddr;
35530Sstevel@tonic-gate int ident = 0;
35540Sstevel@tonic-gate char c[VMEM_NAMEWIDTH];
35550Sstevel@tonic-gate
35560Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) {
35570Sstevel@tonic-gate if (mdb_walk_dcmd("vmem", "vmem", argc, argv) == -1) {
35580Sstevel@tonic-gate mdb_warn("can't walk vmem");
35590Sstevel@tonic-gate return (DCMD_ERR);
35600Sstevel@tonic-gate }
35610Sstevel@tonic-gate return (DCMD_OK);
35620Sstevel@tonic-gate }
35630Sstevel@tonic-gate
35640Sstevel@tonic-gate if (DCMD_HDRSPEC(flags))
35650Sstevel@tonic-gate mdb_printf("%-?s %-*s %10s %12s %9s %5s\n",
35660Sstevel@tonic-gate "ADDR", VMEM_NAMEWIDTH, "NAME", "INUSE",
35670Sstevel@tonic-gate "TOTAL", "SUCCEED", "FAIL");
35680Sstevel@tonic-gate
35690Sstevel@tonic-gate if (mdb_vread(&v, sizeof (v), addr) == -1) {
35700Sstevel@tonic-gate mdb_warn("couldn't read vmem at %p", addr);
35710Sstevel@tonic-gate return (DCMD_ERR);
35720Sstevel@tonic-gate }
35730Sstevel@tonic-gate
35740Sstevel@tonic-gate for (paddr = (uintptr_t)v.vm_source; paddr != NULL; ident += 2) {
35750Sstevel@tonic-gate if (mdb_vread(&parent, sizeof (parent), paddr) == -1) {
35760Sstevel@tonic-gate mdb_warn("couldn't trace %p's ancestry", addr);
35770Sstevel@tonic-gate ident = 0;
35780Sstevel@tonic-gate break;
35790Sstevel@tonic-gate }
35800Sstevel@tonic-gate paddr = (uintptr_t)parent.vm_source;
35810Sstevel@tonic-gate }
35820Sstevel@tonic-gate
35830Sstevel@tonic-gate (void) mdb_snprintf(c, VMEM_NAMEWIDTH, "%*s%s", ident, "", v.vm_name);
35840Sstevel@tonic-gate
35850Sstevel@tonic-gate mdb_printf("%0?p %-*s %10llu %12llu %9llu %5llu\n",
35860Sstevel@tonic-gate addr, VMEM_NAMEWIDTH, c,
35870Sstevel@tonic-gate vkp->vk_mem_inuse.value.ui64, vkp->vk_mem_total.value.ui64,
35880Sstevel@tonic-gate vkp->vk_alloc.value.ui64, vkp->vk_fail.value.ui64);
35890Sstevel@tonic-gate
35900Sstevel@tonic-gate return (DCMD_OK);
35910Sstevel@tonic-gate }
35920Sstevel@tonic-gate
35930Sstevel@tonic-gate void
vmem_seg_help(void)35940Sstevel@tonic-gate vmem_seg_help(void)
35950Sstevel@tonic-gate {
35966712Stomee mdb_printf("%s",
35976712Stomee "Display the contents of vmem_seg_ts, with optional filtering.\n\n"
35980Sstevel@tonic-gate "\n"
35990Sstevel@tonic-gate "A vmem_seg_t represents a range of addresses (or arbitrary numbers),\n"
36000Sstevel@tonic-gate "representing a single chunk of data. Only ALLOC segments have debugging\n"
36010Sstevel@tonic-gate "information.\n");
36020Sstevel@tonic-gate mdb_dec_indent(2);
36030Sstevel@tonic-gate mdb_printf("%<b>OPTIONS%</b>\n");
36040Sstevel@tonic-gate mdb_inc_indent(2);
36050Sstevel@tonic-gate mdb_printf("%s",
36060Sstevel@tonic-gate " -v Display the full content of the vmem_seg, including its stack trace\n"
36070Sstevel@tonic-gate " -s report the size of the segment, instead of the end address\n"
36080Sstevel@tonic-gate " -c caller\n"
36090Sstevel@tonic-gate " filter out segments without the function/PC in their stack trace\n"
36100Sstevel@tonic-gate " -e earliest\n"
36110Sstevel@tonic-gate " filter out segments timestamped before earliest\n"
36120Sstevel@tonic-gate " -l latest\n"
36130Sstevel@tonic-gate " filter out segments timestamped after latest\n"
36140Sstevel@tonic-gate " -m minsize\n"
36150Sstevel@tonic-gate " filer out segments smaller than minsize\n"
36160Sstevel@tonic-gate " -M maxsize\n"
36170Sstevel@tonic-gate " filer out segments larger than maxsize\n"
36180Sstevel@tonic-gate " -t thread\n"
36190Sstevel@tonic-gate " filter out segments not involving thread\n"
36200Sstevel@tonic-gate " -T type\n"
36210Sstevel@tonic-gate " filter out segments not of type 'type'\n"
36220Sstevel@tonic-gate " type is one of: ALLOC/FREE/SPAN/ROTOR/WALKER\n");
36230Sstevel@tonic-gate }
36240Sstevel@tonic-gate
36250Sstevel@tonic-gate /*ARGSUSED*/
36260Sstevel@tonic-gate int
vmem_seg(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)36270Sstevel@tonic-gate vmem_seg(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
36280Sstevel@tonic-gate {
36290Sstevel@tonic-gate vmem_seg_t vs;
36300Sstevel@tonic-gate pc_t *stk = vs.vs_stack;
36310Sstevel@tonic-gate uintptr_t sz;
36320Sstevel@tonic-gate uint8_t t;
36330Sstevel@tonic-gate const char *type = NULL;
36340Sstevel@tonic-gate GElf_Sym sym;
36350Sstevel@tonic-gate char c[MDB_SYM_NAMLEN];
36360Sstevel@tonic-gate int no_debug;
36370Sstevel@tonic-gate int i;
36380Sstevel@tonic-gate int depth;
36390Sstevel@tonic-gate uintptr_t laddr, haddr;
36400Sstevel@tonic-gate
36410Sstevel@tonic-gate uintptr_t caller = NULL, thread = NULL;
36420Sstevel@tonic-gate uintptr_t minsize = 0, maxsize = 0;
36430Sstevel@tonic-gate
36440Sstevel@tonic-gate hrtime_t earliest = 0, latest = 0;
36450Sstevel@tonic-gate
36460Sstevel@tonic-gate uint_t size = 0;
36470Sstevel@tonic-gate uint_t verbose = 0;
36480Sstevel@tonic-gate
36490Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC))
36500Sstevel@tonic-gate return (DCMD_USAGE);
36510Sstevel@tonic-gate
36520Sstevel@tonic-gate if (mdb_getopts(argc, argv,
36530Sstevel@tonic-gate 'c', MDB_OPT_UINTPTR, &caller,
36540Sstevel@tonic-gate 'e', MDB_OPT_UINT64, &earliest,
36550Sstevel@tonic-gate 'l', MDB_OPT_UINT64, &latest,
36560Sstevel@tonic-gate 's', MDB_OPT_SETBITS, TRUE, &size,
36570Sstevel@tonic-gate 'm', MDB_OPT_UINTPTR, &minsize,
36580Sstevel@tonic-gate 'M', MDB_OPT_UINTPTR, &maxsize,
36590Sstevel@tonic-gate 't', MDB_OPT_UINTPTR, &thread,
36600Sstevel@tonic-gate 'T', MDB_OPT_STR, &type,
36610Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &verbose,
36620Sstevel@tonic-gate NULL) != argc)
36630Sstevel@tonic-gate return (DCMD_USAGE);
36640Sstevel@tonic-gate
36650Sstevel@tonic-gate if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) {
36660Sstevel@tonic-gate if (verbose) {
36670Sstevel@tonic-gate mdb_printf("%16s %4s %16s %16s %16s\n"
36680Sstevel@tonic-gate "%<u>%16s %4s %16s %16s %16s%</u>\n",
36690Sstevel@tonic-gate "ADDR", "TYPE", "START", "END", "SIZE",
36700Sstevel@tonic-gate "", "", "THREAD", "TIMESTAMP", "");
36710Sstevel@tonic-gate } else {
36720Sstevel@tonic-gate mdb_printf("%?s %4s %?s %?s %s\n", "ADDR", "TYPE",
36730Sstevel@tonic-gate "START", size? "SIZE" : "END", "WHO");
36740Sstevel@tonic-gate }
36750Sstevel@tonic-gate }
36760Sstevel@tonic-gate
36770Sstevel@tonic-gate if (mdb_vread(&vs, sizeof (vs), addr) == -1) {
36780Sstevel@tonic-gate mdb_warn("couldn't read vmem_seg at %p", addr);
36790Sstevel@tonic-gate return (DCMD_ERR);
36800Sstevel@tonic-gate }
36810Sstevel@tonic-gate
36820Sstevel@tonic-gate if (type != NULL) {
36830Sstevel@tonic-gate if (strcmp(type, "ALLC") == 0 || strcmp(type, "ALLOC") == 0)
36840Sstevel@tonic-gate t = VMEM_ALLOC;
36850Sstevel@tonic-gate else if (strcmp(type, "FREE") == 0)
36860Sstevel@tonic-gate t = VMEM_FREE;
36870Sstevel@tonic-gate else if (strcmp(type, "SPAN") == 0)
36880Sstevel@tonic-gate t = VMEM_SPAN;
36890Sstevel@tonic-gate else if (strcmp(type, "ROTR") == 0 ||
36900Sstevel@tonic-gate strcmp(type, "ROTOR") == 0)
36910Sstevel@tonic-gate t = VMEM_ROTOR;
36920Sstevel@tonic-gate else if (strcmp(type, "WLKR") == 0 ||
36930Sstevel@tonic-gate strcmp(type, "WALKER") == 0)
36940Sstevel@tonic-gate t = VMEM_WALKER;
36950Sstevel@tonic-gate else {
36960Sstevel@tonic-gate mdb_warn("\"%s\" is not a recognized vmem_seg type\n",
36970Sstevel@tonic-gate type);
36980Sstevel@tonic-gate return (DCMD_ERR);
36990Sstevel@tonic-gate }
37000Sstevel@tonic-gate
37010Sstevel@tonic-gate if (vs.vs_type != t)
37020Sstevel@tonic-gate return (DCMD_OK);
37030Sstevel@tonic-gate }
37040Sstevel@tonic-gate
37050Sstevel@tonic-gate sz = vs.vs_end - vs.vs_start;
37060Sstevel@tonic-gate
37070Sstevel@tonic-gate if (minsize != 0 && sz < minsize)
37080Sstevel@tonic-gate return (DCMD_OK);
37090Sstevel@tonic-gate
37100Sstevel@tonic-gate if (maxsize != 0 && sz > maxsize)
37110Sstevel@tonic-gate return (DCMD_OK);
37120Sstevel@tonic-gate
37130Sstevel@tonic-gate t = vs.vs_type;
37140Sstevel@tonic-gate depth = vs.vs_depth;
37150Sstevel@tonic-gate
37160Sstevel@tonic-gate /*
37170Sstevel@tonic-gate * debug info, when present, is only accurate for VMEM_ALLOC segments
37180Sstevel@tonic-gate */
37190Sstevel@tonic-gate no_debug = (t != VMEM_ALLOC) ||
37200Sstevel@tonic-gate (depth == 0 || depth > VMEM_STACK_DEPTH);
37210Sstevel@tonic-gate
37220Sstevel@tonic-gate if (no_debug) {
37230Sstevel@tonic-gate if (caller != NULL || thread != NULL || earliest != 0 ||
37240Sstevel@tonic-gate latest != 0)
37250Sstevel@tonic-gate return (DCMD_OK); /* not enough info */
37260Sstevel@tonic-gate } else {
37270Sstevel@tonic-gate if (caller != NULL) {
37280Sstevel@tonic-gate laddr = caller;
37290Sstevel@tonic-gate haddr = caller + sizeof (caller);
37300Sstevel@tonic-gate
37310Sstevel@tonic-gate if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c,
37320Sstevel@tonic-gate sizeof (c), &sym) != -1 &&
37330Sstevel@tonic-gate caller == (uintptr_t)sym.st_value) {
37340Sstevel@tonic-gate /*
37350Sstevel@tonic-gate * We were provided an exact symbol value; any
37360Sstevel@tonic-gate * address in the function is valid.
37370Sstevel@tonic-gate */
37380Sstevel@tonic-gate laddr = (uintptr_t)sym.st_value;
37390Sstevel@tonic-gate haddr = (uintptr_t)sym.st_value + sym.st_size;
37400Sstevel@tonic-gate }
37410Sstevel@tonic-gate
37420Sstevel@tonic-gate for (i = 0; i < depth; i++)
37430Sstevel@tonic-gate if (vs.vs_stack[i] >= laddr &&
37440Sstevel@tonic-gate vs.vs_stack[i] < haddr)
37450Sstevel@tonic-gate break;
37460Sstevel@tonic-gate
37470Sstevel@tonic-gate if (i == depth)
37480Sstevel@tonic-gate return (DCMD_OK);
37490Sstevel@tonic-gate }
37500Sstevel@tonic-gate
37510Sstevel@tonic-gate if (thread != NULL && (uintptr_t)vs.vs_thread != thread)
37520Sstevel@tonic-gate return (DCMD_OK);
37530Sstevel@tonic-gate
37540Sstevel@tonic-gate if (earliest != 0 && vs.vs_timestamp < earliest)
37550Sstevel@tonic-gate return (DCMD_OK);
37560Sstevel@tonic-gate
37570Sstevel@tonic-gate if (latest != 0 && vs.vs_timestamp > latest)
37580Sstevel@tonic-gate return (DCMD_OK);
37590Sstevel@tonic-gate }
37600Sstevel@tonic-gate
37610Sstevel@tonic-gate type = (t == VMEM_ALLOC ? "ALLC" :
37620Sstevel@tonic-gate t == VMEM_FREE ? "FREE" :
37630Sstevel@tonic-gate t == VMEM_SPAN ? "SPAN" :
37640Sstevel@tonic-gate t == VMEM_ROTOR ? "ROTR" :
37650Sstevel@tonic-gate t == VMEM_WALKER ? "WLKR" :
37660Sstevel@tonic-gate "????");
37670Sstevel@tonic-gate
37680Sstevel@tonic-gate if (flags & DCMD_PIPE_OUT) {
37690Sstevel@tonic-gate mdb_printf("%#lr\n", addr);
37700Sstevel@tonic-gate return (DCMD_OK);
37710Sstevel@tonic-gate }
37720Sstevel@tonic-gate
37730Sstevel@tonic-gate if (verbose) {
37740Sstevel@tonic-gate mdb_printf("%<b>%16p%</b> %4s %16p %16p %16d\n",
37750Sstevel@tonic-gate addr, type, vs.vs_start, vs.vs_end, sz);
37760Sstevel@tonic-gate
37770Sstevel@tonic-gate if (no_debug)
37780Sstevel@tonic-gate return (DCMD_OK);
37790Sstevel@tonic-gate
37800Sstevel@tonic-gate mdb_printf("%16s %4s %16p %16llx\n",
37810Sstevel@tonic-gate "", "", vs.vs_thread, vs.vs_timestamp);
37820Sstevel@tonic-gate
37830Sstevel@tonic-gate mdb_inc_indent(17);
37840Sstevel@tonic-gate for (i = 0; i < depth; i++) {
37850Sstevel@tonic-gate mdb_printf("%a\n", stk[i]);
37860Sstevel@tonic-gate }
37870Sstevel@tonic-gate mdb_dec_indent(17);
37880Sstevel@tonic-gate mdb_printf("\n");
37890Sstevel@tonic-gate } else {
37900Sstevel@tonic-gate mdb_printf("%0?p %4s %0?p %0?p", addr, type,
37910Sstevel@tonic-gate vs.vs_start, size? sz : vs.vs_end);
37920Sstevel@tonic-gate
37930Sstevel@tonic-gate if (no_debug) {
37940Sstevel@tonic-gate mdb_printf("\n");
37950Sstevel@tonic-gate return (DCMD_OK);
37960Sstevel@tonic-gate }
37970Sstevel@tonic-gate
37980Sstevel@tonic-gate for (i = 0; i < depth; i++) {
37990Sstevel@tonic-gate if (mdb_lookup_by_addr(stk[i], MDB_SYM_FUZZY,
38000Sstevel@tonic-gate c, sizeof (c), &sym) == -1)
38010Sstevel@tonic-gate continue;
38020Sstevel@tonic-gate if (strncmp(c, "vmem_", 5) == 0)
38030Sstevel@tonic-gate continue;
38040Sstevel@tonic-gate break;
38050Sstevel@tonic-gate }
38060Sstevel@tonic-gate mdb_printf(" %a\n", stk[i]);
38070Sstevel@tonic-gate }
38080Sstevel@tonic-gate return (DCMD_OK);
38090Sstevel@tonic-gate }
38100Sstevel@tonic-gate
38110Sstevel@tonic-gate typedef struct kmalog_data {
38120Sstevel@tonic-gate uintptr_t kma_addr;
38130Sstevel@tonic-gate hrtime_t kma_newest;
38140Sstevel@tonic-gate } kmalog_data_t;
38150Sstevel@tonic-gate
38160Sstevel@tonic-gate /*ARGSUSED*/
38170Sstevel@tonic-gate static int
showbc(uintptr_t addr,const kmem_bufctl_audit_t * bcp,kmalog_data_t * kma)38180Sstevel@tonic-gate showbc(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmalog_data_t *kma)
38190Sstevel@tonic-gate {
38200Sstevel@tonic-gate char name[KMEM_CACHE_NAMELEN + 1];
38210Sstevel@tonic-gate hrtime_t delta;
38220Sstevel@tonic-gate int i, depth;
38230Sstevel@tonic-gate size_t bufsize;
38240Sstevel@tonic-gate
38250Sstevel@tonic-gate if (bcp->bc_timestamp == 0)
38260Sstevel@tonic-gate return (WALK_DONE);
38270Sstevel@tonic-gate
38280Sstevel@tonic-gate if (kma->kma_newest == 0)
38290Sstevel@tonic-gate kma->kma_newest = bcp->bc_timestamp;
38300Sstevel@tonic-gate
38310Sstevel@tonic-gate if (kma->kma_addr) {
38320Sstevel@tonic-gate if (mdb_vread(&bufsize, sizeof (bufsize),
38330Sstevel@tonic-gate (uintptr_t)&bcp->bc_cache->cache_bufsize) == -1) {
38340Sstevel@tonic-gate mdb_warn(
38350Sstevel@tonic-gate "failed to read cache_bufsize for cache at %p",
38360Sstevel@tonic-gate bcp->bc_cache);
38370Sstevel@tonic-gate return (WALK_ERR);
38380Sstevel@tonic-gate }
38390Sstevel@tonic-gate
38400Sstevel@tonic-gate if (kma->kma_addr < (uintptr_t)bcp->bc_addr ||
38410Sstevel@tonic-gate kma->kma_addr >= (uintptr_t)bcp->bc_addr + bufsize)
38420Sstevel@tonic-gate return (WALK_NEXT);
38430Sstevel@tonic-gate }
38440Sstevel@tonic-gate
38450Sstevel@tonic-gate delta = kma->kma_newest - bcp->bc_timestamp;
38460Sstevel@tonic-gate depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH);
38470Sstevel@tonic-gate
38480Sstevel@tonic-gate if (mdb_readstr(name, sizeof (name), (uintptr_t)
38490Sstevel@tonic-gate &bcp->bc_cache->cache_name) <= 0)
38500Sstevel@tonic-gate (void) mdb_snprintf(name, sizeof (name), "%a", bcp->bc_cache);
38510Sstevel@tonic-gate
38520Sstevel@tonic-gate mdb_printf("\nT-%lld.%09lld addr=%p %s\n",
38530Sstevel@tonic-gate delta / NANOSEC, delta % NANOSEC, bcp->bc_addr, name);
38540Sstevel@tonic-gate
38550Sstevel@tonic-gate for (i = 0; i < depth; i++)
38560Sstevel@tonic-gate mdb_printf("\t %a\n", bcp->bc_stack[i]);
38570Sstevel@tonic-gate
38580Sstevel@tonic-gate return (WALK_NEXT);
38590Sstevel@tonic-gate }
38600Sstevel@tonic-gate
38610Sstevel@tonic-gate int
kmalog(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)38620Sstevel@tonic-gate kmalog(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
38630Sstevel@tonic-gate {
38640Sstevel@tonic-gate const char *logname = "kmem_transaction_log";
38650Sstevel@tonic-gate kmalog_data_t kma;
38660Sstevel@tonic-gate
38670Sstevel@tonic-gate if (argc > 1)
38680Sstevel@tonic-gate return (DCMD_USAGE);
38690Sstevel@tonic-gate
38700Sstevel@tonic-gate kma.kma_newest = 0;
38710Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC)
38720Sstevel@tonic-gate kma.kma_addr = addr;
38730Sstevel@tonic-gate else
38740Sstevel@tonic-gate kma.kma_addr = NULL;
38750Sstevel@tonic-gate
38760Sstevel@tonic-gate if (argc > 0) {
38770Sstevel@tonic-gate if (argv->a_type != MDB_TYPE_STRING)
38780Sstevel@tonic-gate return (DCMD_USAGE);
38790Sstevel@tonic-gate if (strcmp(argv->a_un.a_str, "fail") == 0)
38800Sstevel@tonic-gate logname = "kmem_failure_log";
38810Sstevel@tonic-gate else if (strcmp(argv->a_un.a_str, "slab") == 0)
38820Sstevel@tonic-gate logname = "kmem_slab_log";
38830Sstevel@tonic-gate else
38840Sstevel@tonic-gate return (DCMD_USAGE);
38850Sstevel@tonic-gate }
38860Sstevel@tonic-gate
38870Sstevel@tonic-gate if (mdb_readvar(&addr, logname) == -1) {
38880Sstevel@tonic-gate mdb_warn("failed to read %s log header pointer");
38890Sstevel@tonic-gate return (DCMD_ERR);
38900Sstevel@tonic-gate }
38910Sstevel@tonic-gate
38920Sstevel@tonic-gate if (mdb_pwalk("kmem_log", (mdb_walk_cb_t)showbc, &kma, addr) == -1) {
38930Sstevel@tonic-gate mdb_warn("failed to walk kmem log");
38940Sstevel@tonic-gate return (DCMD_ERR);
38950Sstevel@tonic-gate }
38960Sstevel@tonic-gate
38970Sstevel@tonic-gate return (DCMD_OK);
38980Sstevel@tonic-gate }
38990Sstevel@tonic-gate
39000Sstevel@tonic-gate /*
39010Sstevel@tonic-gate * As the final lure for die-hard crash(1M) users, we provide ::kmausers here.
39020Sstevel@tonic-gate * The first piece is a structure which we use to accumulate kmem_cache_t
39030Sstevel@tonic-gate * addresses of interest. The kmc_add is used as a callback for the kmem_cache
39040Sstevel@tonic-gate * walker; we either add all caches, or ones named explicitly as arguments.
39050Sstevel@tonic-gate */
39060Sstevel@tonic-gate
39070Sstevel@tonic-gate typedef struct kmclist {
39080Sstevel@tonic-gate const char *kmc_name; /* Name to match (or NULL) */
39090Sstevel@tonic-gate uintptr_t *kmc_caches; /* List of kmem_cache_t addrs */
39100Sstevel@tonic-gate int kmc_nelems; /* Num entries in kmc_caches */
39110Sstevel@tonic-gate int kmc_size; /* Size of kmc_caches array */
39120Sstevel@tonic-gate } kmclist_t;
39130Sstevel@tonic-gate
39140Sstevel@tonic-gate static int
kmc_add(uintptr_t addr,const kmem_cache_t * cp,kmclist_t * kmc)39150Sstevel@tonic-gate kmc_add(uintptr_t addr, const kmem_cache_t *cp, kmclist_t *kmc)
39160Sstevel@tonic-gate {
39170Sstevel@tonic-gate void *p;
39180Sstevel@tonic-gate int s;
39190Sstevel@tonic-gate
39200Sstevel@tonic-gate if (kmc->kmc_name == NULL ||
39210Sstevel@tonic-gate strcmp(cp->cache_name, kmc->kmc_name) == 0) {
39220Sstevel@tonic-gate /*
39230Sstevel@tonic-gate * If we have a match, grow our array (if necessary), and then
39240Sstevel@tonic-gate * add the virtual address of the matching cache to our list.
39250Sstevel@tonic-gate */
39260Sstevel@tonic-gate if (kmc->kmc_nelems >= kmc->kmc_size) {
39270Sstevel@tonic-gate s = kmc->kmc_size ? kmc->kmc_size * 2 : 256;
39280Sstevel@tonic-gate p = mdb_alloc(sizeof (uintptr_t) * s, UM_SLEEP | UM_GC);
39290Sstevel@tonic-gate
39300Sstevel@tonic-gate bcopy(kmc->kmc_caches, p,
39310Sstevel@tonic-gate sizeof (uintptr_t) * kmc->kmc_size);
39320Sstevel@tonic-gate
39330Sstevel@tonic-gate kmc->kmc_caches = p;
39340Sstevel@tonic-gate kmc->kmc_size = s;
39350Sstevel@tonic-gate }
39360Sstevel@tonic-gate
39370Sstevel@tonic-gate kmc->kmc_caches[kmc->kmc_nelems++] = addr;
39380Sstevel@tonic-gate return (kmc->kmc_name ? WALK_DONE : WALK_NEXT);
39390Sstevel@tonic-gate }
39400Sstevel@tonic-gate
39410Sstevel@tonic-gate return (WALK_NEXT);
39420Sstevel@tonic-gate }
39430Sstevel@tonic-gate
39440Sstevel@tonic-gate /*
39450Sstevel@tonic-gate * The second piece of ::kmausers is a hash table of allocations. Each
39460Sstevel@tonic-gate * allocation owner is identified by its stack trace and data_size. We then
39470Sstevel@tonic-gate * track the total bytes of all such allocations, and the number of allocations
39480Sstevel@tonic-gate * to report at the end. Once we have a list of caches, we walk through the
39490Sstevel@tonic-gate * allocated bufctls of each, and update our hash table accordingly.
39500Sstevel@tonic-gate */
39510Sstevel@tonic-gate
39520Sstevel@tonic-gate typedef struct kmowner {
39530Sstevel@tonic-gate struct kmowner *kmo_head; /* First hash elt in bucket */
39540Sstevel@tonic-gate struct kmowner *kmo_next; /* Next hash elt in chain */
39550Sstevel@tonic-gate size_t kmo_signature; /* Hash table signature */
39560Sstevel@tonic-gate uint_t kmo_num; /* Number of allocations */
39570Sstevel@tonic-gate size_t kmo_data_size; /* Size of each allocation */
39580Sstevel@tonic-gate size_t kmo_total_size; /* Total bytes of allocation */
39590Sstevel@tonic-gate int kmo_depth; /* Depth of stack trace */
39600Sstevel@tonic-gate uintptr_t kmo_stack[KMEM_STACK_DEPTH]; /* Stack trace */
39610Sstevel@tonic-gate } kmowner_t;
39620Sstevel@tonic-gate
39630Sstevel@tonic-gate typedef struct kmusers {
39640Sstevel@tonic-gate uintptr_t kmu_addr; /* address of interest */
39650Sstevel@tonic-gate const kmem_cache_t *kmu_cache; /* Current kmem cache */
39660Sstevel@tonic-gate kmowner_t *kmu_hash; /* Hash table of owners */
39670Sstevel@tonic-gate int kmu_nelems; /* Number of entries in use */
39680Sstevel@tonic-gate int kmu_size; /* Total number of entries */
39690Sstevel@tonic-gate } kmusers_t;
39700Sstevel@tonic-gate
39710Sstevel@tonic-gate static void
kmu_add(kmusers_t * kmu,const kmem_bufctl_audit_t * bcp,size_t size,size_t data_size)39720Sstevel@tonic-gate kmu_add(kmusers_t *kmu, const kmem_bufctl_audit_t *bcp,
39730Sstevel@tonic-gate size_t size, size_t data_size)
39740Sstevel@tonic-gate {
39750Sstevel@tonic-gate int i, depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH);
39760Sstevel@tonic-gate size_t bucket, signature = data_size;
39770Sstevel@tonic-gate kmowner_t *kmo, *kmoend;
39780Sstevel@tonic-gate
39790Sstevel@tonic-gate /*
39800Sstevel@tonic-gate * If the hash table is full, double its size and rehash everything.
39810Sstevel@tonic-gate */
39820Sstevel@tonic-gate if (kmu->kmu_nelems >= kmu->kmu_size) {
39830Sstevel@tonic-gate int s = kmu->kmu_size ? kmu->kmu_size * 2 : 1024;
39840Sstevel@tonic-gate
39850Sstevel@tonic-gate kmo = mdb_alloc(sizeof (kmowner_t) * s, UM_SLEEP | UM_GC);
39860Sstevel@tonic-gate bcopy(kmu->kmu_hash, kmo, sizeof (kmowner_t) * kmu->kmu_size);
39870Sstevel@tonic-gate kmu->kmu_hash = kmo;
39880Sstevel@tonic-gate kmu->kmu_size = s;
39890Sstevel@tonic-gate
39900Sstevel@tonic-gate kmoend = kmu->kmu_hash + kmu->kmu_size;
39910Sstevel@tonic-gate for (kmo = kmu->kmu_hash; kmo < kmoend; kmo++)
39920Sstevel@tonic-gate kmo->kmo_head = NULL;
39930Sstevel@tonic-gate
39940Sstevel@tonic-gate kmoend = kmu->kmu_hash + kmu->kmu_nelems;
39950Sstevel@tonic-gate for (kmo = kmu->kmu_hash; kmo < kmoend; kmo++) {
39960Sstevel@tonic-gate bucket = kmo->kmo_signature & (kmu->kmu_size - 1);
39970Sstevel@tonic-gate kmo->kmo_next = kmu->kmu_hash[bucket].kmo_head;
39980Sstevel@tonic-gate kmu->kmu_hash[bucket].kmo_head = kmo;
39990Sstevel@tonic-gate }
40000Sstevel@tonic-gate }
40010Sstevel@tonic-gate
40020Sstevel@tonic-gate /*
40030Sstevel@tonic-gate * Finish computing the hash signature from the stack trace, and then
40040Sstevel@tonic-gate * see if the owner is in the hash table. If so, update our stats.
40050Sstevel@tonic-gate */
40060Sstevel@tonic-gate for (i = 0; i < depth; i++)
40070Sstevel@tonic-gate signature += bcp->bc_stack[i];
40080Sstevel@tonic-gate
40090Sstevel@tonic-gate bucket = signature & (kmu->kmu_size - 1);
40100Sstevel@tonic-gate
40110Sstevel@tonic-gate for (kmo = kmu->kmu_hash[bucket].kmo_head; kmo; kmo = kmo->kmo_next) {
40120Sstevel@tonic-gate if (kmo->kmo_signature == signature) {
40130Sstevel@tonic-gate size_t difference = 0;
40140Sstevel@tonic-gate
40150Sstevel@tonic-gate difference |= kmo->kmo_data_size - data_size;
40160Sstevel@tonic-gate difference |= kmo->kmo_depth - depth;
40170Sstevel@tonic-gate
40180Sstevel@tonic-gate for (i = 0; i < depth; i++) {
40190Sstevel@tonic-gate difference |= kmo->kmo_stack[i] -
40200Sstevel@tonic-gate bcp->bc_stack[i];
40210Sstevel@tonic-gate }
40220Sstevel@tonic-gate
40230Sstevel@tonic-gate if (difference == 0) {
40240Sstevel@tonic-gate kmo->kmo_total_size += size;
40250Sstevel@tonic-gate kmo->kmo_num++;
40260Sstevel@tonic-gate return;
40270Sstevel@tonic-gate }
40280Sstevel@tonic-gate }
40290Sstevel@tonic-gate }
40300Sstevel@tonic-gate
40310Sstevel@tonic-gate /*
40320Sstevel@tonic-gate * If the owner is not yet hashed, grab the next element and fill it
40330Sstevel@tonic-gate * in based on the allocation information.
40340Sstevel@tonic-gate */
40350Sstevel@tonic-gate kmo = &kmu->kmu_hash[kmu->kmu_nelems++];
40360Sstevel@tonic-gate kmo->kmo_next = kmu->kmu_hash[bucket].kmo_head;
40370Sstevel@tonic-gate kmu->kmu_hash[bucket].kmo_head = kmo;
40380Sstevel@tonic-gate
40390Sstevel@tonic-gate kmo->kmo_signature = signature;
40400Sstevel@tonic-gate kmo->kmo_num = 1;
40410Sstevel@tonic-gate kmo->kmo_data_size = data_size;
40420Sstevel@tonic-gate kmo->kmo_total_size = size;
40430Sstevel@tonic-gate kmo->kmo_depth = depth;
40440Sstevel@tonic-gate
40450Sstevel@tonic-gate for (i = 0; i < depth; i++)
40460Sstevel@tonic-gate kmo->kmo_stack[i] = bcp->bc_stack[i];
40470Sstevel@tonic-gate }
40480Sstevel@tonic-gate
40490Sstevel@tonic-gate /*
40500Sstevel@tonic-gate * When ::kmausers is invoked without the -f flag, we simply update our hash
40510Sstevel@tonic-gate * table with the information from each allocated bufctl.
40520Sstevel@tonic-gate */
40530Sstevel@tonic-gate /*ARGSUSED*/
40540Sstevel@tonic-gate static int
kmause1(uintptr_t addr,const kmem_bufctl_audit_t * bcp,kmusers_t * kmu)40550Sstevel@tonic-gate kmause1(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmusers_t *kmu)
40560Sstevel@tonic-gate {
40570Sstevel@tonic-gate const kmem_cache_t *cp = kmu->kmu_cache;
40580Sstevel@tonic-gate
40590Sstevel@tonic-gate kmu_add(kmu, bcp, cp->cache_bufsize, cp->cache_bufsize);
40600Sstevel@tonic-gate return (WALK_NEXT);
40610Sstevel@tonic-gate }
40620Sstevel@tonic-gate
40630Sstevel@tonic-gate /*
40640Sstevel@tonic-gate * When ::kmausers is invoked with the -f flag, we print out the information
40650Sstevel@tonic-gate * for each bufctl as well as updating the hash table.
40660Sstevel@tonic-gate */
40670Sstevel@tonic-gate static int
kmause2(uintptr_t addr,const kmem_bufctl_audit_t * bcp,kmusers_t * kmu)40680Sstevel@tonic-gate kmause2(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmusers_t *kmu)
40690Sstevel@tonic-gate {
40700Sstevel@tonic-gate int i, depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH);
40710Sstevel@tonic-gate const kmem_cache_t *cp = kmu->kmu_cache;
40720Sstevel@tonic-gate kmem_bufctl_t bufctl;
40730Sstevel@tonic-gate
40740Sstevel@tonic-gate if (kmu->kmu_addr) {
40750Sstevel@tonic-gate if (mdb_vread(&bufctl, sizeof (bufctl), addr) == -1)
40760Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr);
40770Sstevel@tonic-gate else if (kmu->kmu_addr < (uintptr_t)bufctl.bc_addr ||
40780Sstevel@tonic-gate kmu->kmu_addr >= (uintptr_t)bufctl.bc_addr +
40790Sstevel@tonic-gate cp->cache_bufsize)
40800Sstevel@tonic-gate return (WALK_NEXT);
40810Sstevel@tonic-gate }
40820Sstevel@tonic-gate
40830Sstevel@tonic-gate mdb_printf("size %d, addr %p, thread %p, cache %s\n",
40840Sstevel@tonic-gate cp->cache_bufsize, addr, bcp->bc_thread, cp->cache_name);
40850Sstevel@tonic-gate
40860Sstevel@tonic-gate for (i = 0; i < depth; i++)
40870Sstevel@tonic-gate mdb_printf("\t %a\n", bcp->bc_stack[i]);
40880Sstevel@tonic-gate
40890Sstevel@tonic-gate kmu_add(kmu, bcp, cp->cache_bufsize, cp->cache_bufsize);
40900Sstevel@tonic-gate return (WALK_NEXT);
40910Sstevel@tonic-gate }
40920Sstevel@tonic-gate
40930Sstevel@tonic-gate /*
40940Sstevel@tonic-gate * We sort our results by allocation size before printing them.
40950Sstevel@tonic-gate */
40960Sstevel@tonic-gate static int
kmownercmp(const void * lp,const void * rp)40970Sstevel@tonic-gate kmownercmp(const void *lp, const void *rp)
40980Sstevel@tonic-gate {
40990Sstevel@tonic-gate const kmowner_t *lhs = lp;
41000Sstevel@tonic-gate const kmowner_t *rhs = rp;
41010Sstevel@tonic-gate
41020Sstevel@tonic-gate return (rhs->kmo_total_size - lhs->kmo_total_size);
41030Sstevel@tonic-gate }
41040Sstevel@tonic-gate
41050Sstevel@tonic-gate /*
41060Sstevel@tonic-gate * The main engine of ::kmausers is relatively straightforward: First we
41070Sstevel@tonic-gate * accumulate our list of kmem_cache_t addresses into the kmclist_t. Next we
41080Sstevel@tonic-gate * iterate over the allocated bufctls of each cache in the list. Finally,
41090Sstevel@tonic-gate * we sort and print our results.
41100Sstevel@tonic-gate */
41110Sstevel@tonic-gate /*ARGSUSED*/
41120Sstevel@tonic-gate int
kmausers(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)41130Sstevel@tonic-gate kmausers(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
41140Sstevel@tonic-gate {
41150Sstevel@tonic-gate int mem_threshold = 8192; /* Minimum # bytes for printing */
41160Sstevel@tonic-gate int cnt_threshold = 100; /* Minimum # blocks for printing */
41170Sstevel@tonic-gate int audited_caches = 0; /* Number of KMF_AUDIT caches found */
41180Sstevel@tonic-gate int do_all_caches = 1; /* Do all caches (no arguments) */
41190Sstevel@tonic-gate int opt_e = FALSE; /* Include "small" users */
41200Sstevel@tonic-gate int opt_f = FALSE; /* Print stack traces */
41210Sstevel@tonic-gate
41220Sstevel@tonic-gate mdb_walk_cb_t callback = (mdb_walk_cb_t)kmause1;
41230Sstevel@tonic-gate kmowner_t *kmo, *kmoend;
41240Sstevel@tonic-gate int i, oelems;
41250Sstevel@tonic-gate
41260Sstevel@tonic-gate kmclist_t kmc;
41270Sstevel@tonic-gate kmusers_t kmu;
41280Sstevel@tonic-gate
41290Sstevel@tonic-gate bzero(&kmc, sizeof (kmc));
41300Sstevel@tonic-gate bzero(&kmu, sizeof (kmu));
41310Sstevel@tonic-gate
41320Sstevel@tonic-gate while ((i = mdb_getopts(argc, argv,
41330Sstevel@tonic-gate 'e', MDB_OPT_SETBITS, TRUE, &opt_e,
41340Sstevel@tonic-gate 'f', MDB_OPT_SETBITS, TRUE, &opt_f, NULL)) != argc) {
41350Sstevel@tonic-gate
41360Sstevel@tonic-gate argv += i; /* skip past options we just processed */
41370Sstevel@tonic-gate argc -= i; /* adjust argc */
41380Sstevel@tonic-gate
41390Sstevel@tonic-gate if (argv->a_type != MDB_TYPE_STRING || *argv->a_un.a_str == '-')
41400Sstevel@tonic-gate return (DCMD_USAGE);
41410Sstevel@tonic-gate
41420Sstevel@tonic-gate oelems = kmc.kmc_nelems;
41430Sstevel@tonic-gate kmc.kmc_name = argv->a_un.a_str;
41440Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmc_add, &kmc);
41450Sstevel@tonic-gate
41460Sstevel@tonic-gate if (kmc.kmc_nelems == oelems) {
41470Sstevel@tonic-gate mdb_warn("unknown kmem cache: %s\n", kmc.kmc_name);
41480Sstevel@tonic-gate return (DCMD_ERR);
41490Sstevel@tonic-gate }
41500Sstevel@tonic-gate
41510Sstevel@tonic-gate do_all_caches = 0;
41520Sstevel@tonic-gate argv++;
41530Sstevel@tonic-gate argc--;
41540Sstevel@tonic-gate }
41550Sstevel@tonic-gate
41560Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) {
41570Sstevel@tonic-gate opt_f = TRUE;
41580Sstevel@tonic-gate kmu.kmu_addr = addr;
41590Sstevel@tonic-gate } else {
41600Sstevel@tonic-gate kmu.kmu_addr = NULL;
41610Sstevel@tonic-gate }
41620Sstevel@tonic-gate
41630Sstevel@tonic-gate if (opt_e)
41640Sstevel@tonic-gate mem_threshold = cnt_threshold = 0;
41650Sstevel@tonic-gate
41660Sstevel@tonic-gate if (opt_f)
41670Sstevel@tonic-gate callback = (mdb_walk_cb_t)kmause2;
41680Sstevel@tonic-gate
41690Sstevel@tonic-gate if (do_all_caches) {
41700Sstevel@tonic-gate kmc.kmc_name = NULL; /* match all cache names */
41710Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmc_add, &kmc);
41720Sstevel@tonic-gate }
41730Sstevel@tonic-gate
41740Sstevel@tonic-gate for (i = 0; i < kmc.kmc_nelems; i++) {
41750Sstevel@tonic-gate uintptr_t cp = kmc.kmc_caches[i];
41760Sstevel@tonic-gate kmem_cache_t c;
41770Sstevel@tonic-gate
41780Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), cp) == -1) {
41790Sstevel@tonic-gate mdb_warn("failed to read cache at %p", cp);
41800Sstevel@tonic-gate continue;
41810Sstevel@tonic-gate }
41820Sstevel@tonic-gate
41830Sstevel@tonic-gate if (!(c.cache_flags & KMF_AUDIT)) {
41840Sstevel@tonic-gate if (!do_all_caches) {
41850Sstevel@tonic-gate mdb_warn("KMF_AUDIT is not enabled for %s\n",
41860Sstevel@tonic-gate c.cache_name);
41870Sstevel@tonic-gate }
41880Sstevel@tonic-gate continue;
41890Sstevel@tonic-gate }
41900Sstevel@tonic-gate
41910Sstevel@tonic-gate kmu.kmu_cache = &c;
41920Sstevel@tonic-gate (void) mdb_pwalk("bufctl", callback, &kmu, cp);
41930Sstevel@tonic-gate audited_caches++;
41940Sstevel@tonic-gate }
41950Sstevel@tonic-gate
41960Sstevel@tonic-gate if (audited_caches == 0 && do_all_caches) {
41970Sstevel@tonic-gate mdb_warn("KMF_AUDIT is not enabled for any caches\n");
41980Sstevel@tonic-gate return (DCMD_ERR);
41990Sstevel@tonic-gate }
42000Sstevel@tonic-gate
42010Sstevel@tonic-gate qsort(kmu.kmu_hash, kmu.kmu_nelems, sizeof (kmowner_t), kmownercmp);
42020Sstevel@tonic-gate kmoend = kmu.kmu_hash + kmu.kmu_nelems;
42030Sstevel@tonic-gate
42040Sstevel@tonic-gate for (kmo = kmu.kmu_hash; kmo < kmoend; kmo++) {
42050Sstevel@tonic-gate if (kmo->kmo_total_size < mem_threshold &&
42060Sstevel@tonic-gate kmo->kmo_num < cnt_threshold)
42070Sstevel@tonic-gate continue;
42080Sstevel@tonic-gate mdb_printf("%lu bytes for %u allocations with data size %lu:\n",
42090Sstevel@tonic-gate kmo->kmo_total_size, kmo->kmo_num, kmo->kmo_data_size);
42100Sstevel@tonic-gate for (i = 0; i < kmo->kmo_depth; i++)
42110Sstevel@tonic-gate mdb_printf("\t %a\n", kmo->kmo_stack[i]);
42120Sstevel@tonic-gate }
42130Sstevel@tonic-gate
42140Sstevel@tonic-gate return (DCMD_OK);
42150Sstevel@tonic-gate }
42160Sstevel@tonic-gate
42170Sstevel@tonic-gate void
kmausers_help(void)42180Sstevel@tonic-gate kmausers_help(void)
42190Sstevel@tonic-gate {
42200Sstevel@tonic-gate mdb_printf(
42210Sstevel@tonic-gate "Displays the largest users of the kmem allocator, sorted by \n"
42220Sstevel@tonic-gate "trace. If one or more caches is specified, only those caches\n"
42230Sstevel@tonic-gate "will be searched. By default, all caches are searched. If an\n"
42240Sstevel@tonic-gate "address is specified, then only those allocations which include\n"
42250Sstevel@tonic-gate "the given address are displayed. Specifying an address implies\n"
42260Sstevel@tonic-gate "-f.\n"
42270Sstevel@tonic-gate "\n"
42280Sstevel@tonic-gate "\t-e\tInclude all users, not just the largest\n"
42290Sstevel@tonic-gate "\t-f\tDisplay individual allocations. By default, users are\n"
42300Sstevel@tonic-gate "\t\tgrouped by stack\n");
42310Sstevel@tonic-gate }
42320Sstevel@tonic-gate
42330Sstevel@tonic-gate static int
kmem_ready_check(void)42340Sstevel@tonic-gate kmem_ready_check(void)
42350Sstevel@tonic-gate {
42360Sstevel@tonic-gate int ready;
42370Sstevel@tonic-gate
42380Sstevel@tonic-gate if (mdb_readvar(&ready, "kmem_ready") < 0)
42390Sstevel@tonic-gate return (-1); /* errno is set for us */
42400Sstevel@tonic-gate
42410Sstevel@tonic-gate return (ready);
42420Sstevel@tonic-gate }
42430Sstevel@tonic-gate
42448721SJonathan.Adams@Sun.COM void
kmem_statechange(void)42458721SJonathan.Adams@Sun.COM kmem_statechange(void)
42460Sstevel@tonic-gate {
42471528Sjwadams static int been_ready = 0;
42481528Sjwadams
42491528Sjwadams if (been_ready)
42501528Sjwadams return;
42511528Sjwadams
42520Sstevel@tonic-gate if (kmem_ready_check() <= 0)
42530Sstevel@tonic-gate return;
42540Sstevel@tonic-gate
42551528Sjwadams been_ready = 1;
42560Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmem_init_walkers, NULL);
42570Sstevel@tonic-gate }
42580Sstevel@tonic-gate
42590Sstevel@tonic-gate void
kmem_init(void)42600Sstevel@tonic-gate kmem_init(void)
42610Sstevel@tonic-gate {
42620Sstevel@tonic-gate mdb_walker_t w = {
42630Sstevel@tonic-gate "kmem_cache", "walk list of kmem caches", kmem_cache_walk_init,
42646712Stomee list_walk_step, list_walk_fini
42650Sstevel@tonic-gate };
42660Sstevel@tonic-gate
42670Sstevel@tonic-gate /*
42680Sstevel@tonic-gate * If kmem is ready, we'll need to invoke the kmem_cache walker
42690Sstevel@tonic-gate * immediately. Walkers in the linkage structure won't be ready until
42700Sstevel@tonic-gate * _mdb_init returns, so we'll need to add this one manually. If kmem
42710Sstevel@tonic-gate * is ready, we'll use the walker to initialize the caches. If kmem
42720Sstevel@tonic-gate * isn't ready, we'll register a callback that will allow us to defer
42730Sstevel@tonic-gate * cache walking until it is.
42740Sstevel@tonic-gate */
42750Sstevel@tonic-gate if (mdb_add_walker(&w) != 0) {
42760Sstevel@tonic-gate mdb_warn("failed to add kmem_cache walker");
42770Sstevel@tonic-gate return;
42780Sstevel@tonic-gate }
42790Sstevel@tonic-gate
42808721SJonathan.Adams@Sun.COM kmem_statechange();
428110610SJonathan.Adams@Sun.COM
428210610SJonathan.Adams@Sun.COM /* register our ::whatis handlers */
428310610SJonathan.Adams@Sun.COM mdb_whatis_register("modules", whatis_run_modules, NULL,
428410610SJonathan.Adams@Sun.COM WHATIS_PRIO_EARLY, WHATIS_REG_NO_ID);
428510610SJonathan.Adams@Sun.COM mdb_whatis_register("threads", whatis_run_threads, NULL,
428610610SJonathan.Adams@Sun.COM WHATIS_PRIO_EARLY, WHATIS_REG_NO_ID);
428710610SJonathan.Adams@Sun.COM mdb_whatis_register("pages", whatis_run_pages, NULL,
428810610SJonathan.Adams@Sun.COM WHATIS_PRIO_EARLY, WHATIS_REG_NO_ID);
428910610SJonathan.Adams@Sun.COM mdb_whatis_register("kmem", whatis_run_kmem, NULL,
429010610SJonathan.Adams@Sun.COM WHATIS_PRIO_ALLOCATOR, 0);
429110610SJonathan.Adams@Sun.COM mdb_whatis_register("vmem", whatis_run_vmem, NULL,
429210610SJonathan.Adams@Sun.COM WHATIS_PRIO_ALLOCATOR, 0);
42930Sstevel@tonic-gate }
42940Sstevel@tonic-gate
42950Sstevel@tonic-gate typedef struct whatthread {
42960Sstevel@tonic-gate uintptr_t wt_target;
42970Sstevel@tonic-gate int wt_verbose;
42980Sstevel@tonic-gate } whatthread_t;
42990Sstevel@tonic-gate
43000Sstevel@tonic-gate static int
whatthread_walk_thread(uintptr_t addr,const kthread_t * t,whatthread_t * w)43010Sstevel@tonic-gate whatthread_walk_thread(uintptr_t addr, const kthread_t *t, whatthread_t *w)
43020Sstevel@tonic-gate {
43030Sstevel@tonic-gate uintptr_t current, data;
43040Sstevel@tonic-gate
43050Sstevel@tonic-gate if (t->t_stkbase == NULL)
43060Sstevel@tonic-gate return (WALK_NEXT);
43070Sstevel@tonic-gate
43080Sstevel@tonic-gate /*
43090Sstevel@tonic-gate * Warn about swapped out threads, but drive on anyway
43100Sstevel@tonic-gate */
43110Sstevel@tonic-gate if (!(t->t_schedflag & TS_LOAD)) {
43120Sstevel@tonic-gate mdb_warn("thread %p's stack swapped out\n", addr);
43130Sstevel@tonic-gate return (WALK_NEXT);
43140Sstevel@tonic-gate }
43150Sstevel@tonic-gate
43160Sstevel@tonic-gate /*
43170Sstevel@tonic-gate * Search the thread's stack for the given pointer. Note that it would
43180Sstevel@tonic-gate * be more efficient to follow ::kgrep's lead and read in page-sized
43190Sstevel@tonic-gate * chunks, but this routine is already fast and simple.
43200Sstevel@tonic-gate */
43210Sstevel@tonic-gate for (current = (uintptr_t)t->t_stkbase; current < (uintptr_t)t->t_stk;
43220Sstevel@tonic-gate current += sizeof (uintptr_t)) {
43230Sstevel@tonic-gate if (mdb_vread(&data, sizeof (data), current) == -1) {
43240Sstevel@tonic-gate mdb_warn("couldn't read thread %p's stack at %p",
43250Sstevel@tonic-gate addr, current);
43260Sstevel@tonic-gate return (WALK_ERR);
43270Sstevel@tonic-gate }
43280Sstevel@tonic-gate
43290Sstevel@tonic-gate if (data == w->wt_target) {
43300Sstevel@tonic-gate if (w->wt_verbose) {
43310Sstevel@tonic-gate mdb_printf("%p in thread %p's stack%s\n",
43320Sstevel@tonic-gate current, addr, stack_active(t, current));
43330Sstevel@tonic-gate } else {
43340Sstevel@tonic-gate mdb_printf("%#lr\n", addr);
43350Sstevel@tonic-gate return (WALK_NEXT);
43360Sstevel@tonic-gate }
43370Sstevel@tonic-gate }
43380Sstevel@tonic-gate }
43390Sstevel@tonic-gate
43400Sstevel@tonic-gate return (WALK_NEXT);
43410Sstevel@tonic-gate }
43420Sstevel@tonic-gate
43430Sstevel@tonic-gate int
whatthread(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)43440Sstevel@tonic-gate whatthread(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
43450Sstevel@tonic-gate {
43460Sstevel@tonic-gate whatthread_t w;
43470Sstevel@tonic-gate
43480Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC))
43490Sstevel@tonic-gate return (DCMD_USAGE);
43500Sstevel@tonic-gate
43510Sstevel@tonic-gate w.wt_verbose = FALSE;
43520Sstevel@tonic-gate w.wt_target = addr;
43530Sstevel@tonic-gate
43540Sstevel@tonic-gate if (mdb_getopts(argc, argv,
43550Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &w.wt_verbose, NULL) != argc)
43560Sstevel@tonic-gate return (DCMD_USAGE);
43570Sstevel@tonic-gate
43580Sstevel@tonic-gate if (mdb_walk("thread", (mdb_walk_cb_t)whatthread_walk_thread, &w)
43590Sstevel@tonic-gate == -1) {
43600Sstevel@tonic-gate mdb_warn("couldn't walk threads");
43610Sstevel@tonic-gate return (DCMD_ERR);
43620Sstevel@tonic-gate }
43630Sstevel@tonic-gate
43640Sstevel@tonic-gate return (DCMD_OK);
43650Sstevel@tonic-gate }
4366