10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
53290Sjohansen * Common Development and Distribution License (the "License").
63290Sjohansen * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
22*13078SJonathan.Adams@Sun.COM * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
230Sstevel@tonic-gate */
240Sstevel@tonic-gate
2511459SJonathan.Adams@Sun.COM #include <mdb/mdb_param.h>
260Sstevel@tonic-gate #include <mdb/mdb_modapi.h>
2711459SJonathan.Adams@Sun.COM #include <mdb/mdb_ks.h>
280Sstevel@tonic-gate #include <sys/types.h>
2911459SJonathan.Adams@Sun.COM #include <sys/memlist.h>
3011459SJonathan.Adams@Sun.COM #include <sys/swap.h>
3111459SJonathan.Adams@Sun.COM #include <sys/systm.h>
3211459SJonathan.Adams@Sun.COM #include <sys/thread.h>
3311459SJonathan.Adams@Sun.COM #include <vm/anon.h>
3411459SJonathan.Adams@Sun.COM #include <vm/as.h>
350Sstevel@tonic-gate #include <vm/page.h>
360Sstevel@tonic-gate #include <sys/thread.h>
370Sstevel@tonic-gate #include <sys/swap.h>
380Sstevel@tonic-gate #include <sys/memlist.h>
3911185SSean.McEnroe@Sun.COM #include <sys/vnode.h>
4011459SJonathan.Adams@Sun.COM #include <vm/seg_map.h>
4111459SJonathan.Adams@Sun.COM #include <vm/seg_vn.h>
425084Sjohnlev #if defined(__i386) || defined(__amd64)
435084Sjohnlev #include <sys/balloon_impl.h>
445084Sjohnlev #endif
450Sstevel@tonic-gate
4611459SJonathan.Adams@Sun.COM #include "avl.h"
47*13078SJonathan.Adams@Sun.COM #include "memory.h"
4811459SJonathan.Adams@Sun.COM
490Sstevel@tonic-gate /*
500Sstevel@tonic-gate * Page walker.
510Sstevel@tonic-gate * By default, this will walk all pages in the system. If given an
520Sstevel@tonic-gate * address, it will walk all pages belonging to the vnode at that
530Sstevel@tonic-gate * address.
540Sstevel@tonic-gate */
550Sstevel@tonic-gate
560Sstevel@tonic-gate /*
570Sstevel@tonic-gate * page_walk_data
580Sstevel@tonic-gate *
590Sstevel@tonic-gate * pw_hashleft is set to -1 when walking a vnode's pages, and holds the
600Sstevel@tonic-gate * number of hash locations remaining in the page hash table when
610Sstevel@tonic-gate * walking all pages.
620Sstevel@tonic-gate *
630Sstevel@tonic-gate * The astute reader will notice that pw_hashloc is only used when
640Sstevel@tonic-gate * reading all pages (to hold a pointer to our location in the page
650Sstevel@tonic-gate * hash table), and that pw_first is only used when reading the pages
660Sstevel@tonic-gate * belonging to a particular vnode (to hold a pointer to the first
670Sstevel@tonic-gate * page). While these could be combined to be a single pointer, they
680Sstevel@tonic-gate * are left separate for clarity.
690Sstevel@tonic-gate */
700Sstevel@tonic-gate typedef struct page_walk_data {
710Sstevel@tonic-gate long pw_hashleft;
720Sstevel@tonic-gate void **pw_hashloc;
730Sstevel@tonic-gate uintptr_t pw_first;
740Sstevel@tonic-gate } page_walk_data_t;
750Sstevel@tonic-gate
760Sstevel@tonic-gate int
page_walk_init(mdb_walk_state_t * wsp)770Sstevel@tonic-gate page_walk_init(mdb_walk_state_t *wsp)
780Sstevel@tonic-gate {
790Sstevel@tonic-gate page_walk_data_t *pwd;
800Sstevel@tonic-gate void **ptr;
810Sstevel@tonic-gate size_t hashsz;
820Sstevel@tonic-gate vnode_t vn;
830Sstevel@tonic-gate
840Sstevel@tonic-gate if (wsp->walk_addr == NULL) {
850Sstevel@tonic-gate
860Sstevel@tonic-gate /*
870Sstevel@tonic-gate * Walk all pages
880Sstevel@tonic-gate */
890Sstevel@tonic-gate
900Sstevel@tonic-gate if ((mdb_readvar(&ptr, "page_hash") == -1) ||
910Sstevel@tonic-gate (mdb_readvar(&hashsz, "page_hashsz") == -1) ||
920Sstevel@tonic-gate (ptr == NULL) || (hashsz == 0)) {
930Sstevel@tonic-gate mdb_warn("page_hash, page_hashsz not found or invalid");
940Sstevel@tonic-gate return (WALK_ERR);
950Sstevel@tonic-gate }
960Sstevel@tonic-gate
970Sstevel@tonic-gate /*
980Sstevel@tonic-gate * Since we are walking all pages, initialize hashleft
990Sstevel@tonic-gate * to be the remaining number of entries in the page
1000Sstevel@tonic-gate * hash. hashloc is set the start of the page hash
1010Sstevel@tonic-gate * table. Setting the walk address to 0 indicates that
1020Sstevel@tonic-gate * we aren't currently following a hash chain, and that
1030Sstevel@tonic-gate * we need to scan the page hash table for a page.
1040Sstevel@tonic-gate */
1050Sstevel@tonic-gate pwd = mdb_alloc(sizeof (page_walk_data_t), UM_SLEEP);
1060Sstevel@tonic-gate pwd->pw_hashleft = hashsz;
1070Sstevel@tonic-gate pwd->pw_hashloc = ptr;
1080Sstevel@tonic-gate wsp->walk_addr = 0;
1090Sstevel@tonic-gate } else {
1100Sstevel@tonic-gate
1110Sstevel@tonic-gate /*
1120Sstevel@tonic-gate * Walk just this vnode
1130Sstevel@tonic-gate */
1140Sstevel@tonic-gate
1150Sstevel@tonic-gate if (mdb_vread(&vn, sizeof (vnode_t), wsp->walk_addr) == -1) {
1160Sstevel@tonic-gate mdb_warn("unable to read vnode_t at %#lx",
1170Sstevel@tonic-gate wsp->walk_addr);
1180Sstevel@tonic-gate return (WALK_ERR);
1190Sstevel@tonic-gate }
1200Sstevel@tonic-gate
1210Sstevel@tonic-gate /*
1220Sstevel@tonic-gate * We set hashleft to -1 to indicate that we are
1230Sstevel@tonic-gate * walking a vnode, and initialize first to 0 (it is
1240Sstevel@tonic-gate * used to terminate the walk, so it must not be set
1250Sstevel@tonic-gate * until after we have walked the first page). The
1260Sstevel@tonic-gate * walk address is set to the first page.
1270Sstevel@tonic-gate */
1280Sstevel@tonic-gate pwd = mdb_alloc(sizeof (page_walk_data_t), UM_SLEEP);
1290Sstevel@tonic-gate pwd->pw_hashleft = -1;
1300Sstevel@tonic-gate pwd->pw_first = 0;
1310Sstevel@tonic-gate
1320Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)vn.v_pages;
1330Sstevel@tonic-gate }
1340Sstevel@tonic-gate
1350Sstevel@tonic-gate wsp->walk_data = pwd;
1360Sstevel@tonic-gate
1370Sstevel@tonic-gate return (WALK_NEXT);
1380Sstevel@tonic-gate }
1390Sstevel@tonic-gate
1400Sstevel@tonic-gate int
page_walk_step(mdb_walk_state_t * wsp)1410Sstevel@tonic-gate page_walk_step(mdb_walk_state_t *wsp)
1420Sstevel@tonic-gate {
1430Sstevel@tonic-gate page_walk_data_t *pwd = wsp->walk_data;
1440Sstevel@tonic-gate page_t page;
1450Sstevel@tonic-gate uintptr_t pp;
1460Sstevel@tonic-gate
1470Sstevel@tonic-gate pp = wsp->walk_addr;
1480Sstevel@tonic-gate
1490Sstevel@tonic-gate if (pwd->pw_hashleft < 0) {
1500Sstevel@tonic-gate
1510Sstevel@tonic-gate /* We're walking a vnode's pages */
1520Sstevel@tonic-gate
1530Sstevel@tonic-gate /*
1540Sstevel@tonic-gate * If we don't have any pages to walk, we have come
1550Sstevel@tonic-gate * back around to the first one (we finished), or we
1560Sstevel@tonic-gate * can't read the page we're looking at, we are done.
1570Sstevel@tonic-gate */
1580Sstevel@tonic-gate if (pp == NULL || pp == pwd->pw_first)
1590Sstevel@tonic-gate return (WALK_DONE);
1600Sstevel@tonic-gate if (mdb_vread(&page, sizeof (page_t), pp) == -1) {
1610Sstevel@tonic-gate mdb_warn("unable to read page_t at %#lx", pp);
1620Sstevel@tonic-gate return (WALK_ERR);
1630Sstevel@tonic-gate }
1640Sstevel@tonic-gate
1650Sstevel@tonic-gate /*
1660Sstevel@tonic-gate * Set the walk address to the next page, and if the
1670Sstevel@tonic-gate * first page hasn't been set yet (i.e. we are on the
1680Sstevel@tonic-gate * first page), set it.
1690Sstevel@tonic-gate */
1700Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)page.p_vpnext;
1710Sstevel@tonic-gate if (pwd->pw_first == NULL)
1720Sstevel@tonic-gate pwd->pw_first = pp;
1730Sstevel@tonic-gate
1740Sstevel@tonic-gate } else if (pwd->pw_hashleft > 0) {
1750Sstevel@tonic-gate
1760Sstevel@tonic-gate /* We're walking all pages */
1770Sstevel@tonic-gate
1780Sstevel@tonic-gate /*
1790Sstevel@tonic-gate * If pp (the walk address) is NULL, we scan through
1800Sstevel@tonic-gate * the page hash table until we find a page.
1810Sstevel@tonic-gate */
1820Sstevel@tonic-gate if (pp == NULL) {
1830Sstevel@tonic-gate
1840Sstevel@tonic-gate /*
1850Sstevel@tonic-gate * Iterate through the page hash table until we
1860Sstevel@tonic-gate * find a page or reach the end.
1870Sstevel@tonic-gate */
1880Sstevel@tonic-gate do {
1890Sstevel@tonic-gate if (mdb_vread(&pp, sizeof (uintptr_t),
1900Sstevel@tonic-gate (uintptr_t)pwd->pw_hashloc) == -1) {
1910Sstevel@tonic-gate mdb_warn("unable to read from %#p",
1920Sstevel@tonic-gate pwd->pw_hashloc);
1930Sstevel@tonic-gate return (WALK_ERR);
1940Sstevel@tonic-gate }
1950Sstevel@tonic-gate pwd->pw_hashleft--;
1960Sstevel@tonic-gate pwd->pw_hashloc++;
1970Sstevel@tonic-gate } while (pwd->pw_hashleft && (pp == NULL));
1980Sstevel@tonic-gate
1990Sstevel@tonic-gate /*
2000Sstevel@tonic-gate * We've reached the end; exit.
2010Sstevel@tonic-gate */
2020Sstevel@tonic-gate if (pp == NULL)
2030Sstevel@tonic-gate return (WALK_DONE);
2040Sstevel@tonic-gate }
2050Sstevel@tonic-gate
2060Sstevel@tonic-gate if (mdb_vread(&page, sizeof (page_t), pp) == -1) {
2070Sstevel@tonic-gate mdb_warn("unable to read page_t at %#lx", pp);
2080Sstevel@tonic-gate return (WALK_ERR);
2090Sstevel@tonic-gate }
2100Sstevel@tonic-gate
2110Sstevel@tonic-gate /*
2120Sstevel@tonic-gate * Set the walk address to the next page.
2130Sstevel@tonic-gate */
2140Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)page.p_hash;
2150Sstevel@tonic-gate
2160Sstevel@tonic-gate } else {
2170Sstevel@tonic-gate /* We've finished walking all pages. */
2180Sstevel@tonic-gate return (WALK_DONE);
2190Sstevel@tonic-gate }
2200Sstevel@tonic-gate
2210Sstevel@tonic-gate return (wsp->walk_callback(pp, &page, wsp->walk_cbdata));
2220Sstevel@tonic-gate }
2230Sstevel@tonic-gate
2240Sstevel@tonic-gate void
page_walk_fini(mdb_walk_state_t * wsp)2250Sstevel@tonic-gate page_walk_fini(mdb_walk_state_t *wsp)
2260Sstevel@tonic-gate {
2270Sstevel@tonic-gate mdb_free(wsp->walk_data, sizeof (page_walk_data_t));
2280Sstevel@tonic-gate }
2290Sstevel@tonic-gate
2309894SPavel.Tatashin@Sun.COM /*
2319894SPavel.Tatashin@Sun.COM * allpages walks all pages in the system in order they appear in
2329894SPavel.Tatashin@Sun.COM * the memseg structure
2339894SPavel.Tatashin@Sun.COM */
2349894SPavel.Tatashin@Sun.COM
2359894SPavel.Tatashin@Sun.COM #define PAGE_BUFFER 128
2369894SPavel.Tatashin@Sun.COM
2379894SPavel.Tatashin@Sun.COM int
allpages_walk_init(mdb_walk_state_t * wsp)2389894SPavel.Tatashin@Sun.COM allpages_walk_init(mdb_walk_state_t *wsp)
2399894SPavel.Tatashin@Sun.COM {
2409894SPavel.Tatashin@Sun.COM if (wsp->walk_addr != 0) {
2419894SPavel.Tatashin@Sun.COM mdb_warn("allpages only supports global walks.\n");
2429894SPavel.Tatashin@Sun.COM return (WALK_ERR);
2439894SPavel.Tatashin@Sun.COM }
2449894SPavel.Tatashin@Sun.COM
2459894SPavel.Tatashin@Sun.COM if (mdb_layered_walk("memseg", wsp) == -1) {
2469894SPavel.Tatashin@Sun.COM mdb_warn("couldn't walk 'memseg'");
2479894SPavel.Tatashin@Sun.COM return (WALK_ERR);
2489894SPavel.Tatashin@Sun.COM }
2499894SPavel.Tatashin@Sun.COM
2509894SPavel.Tatashin@Sun.COM wsp->walk_data = mdb_alloc(sizeof (page_t) * PAGE_BUFFER, UM_SLEEP);
2519894SPavel.Tatashin@Sun.COM return (WALK_NEXT);
2529894SPavel.Tatashin@Sun.COM }
2539894SPavel.Tatashin@Sun.COM
2549894SPavel.Tatashin@Sun.COM int
allpages_walk_step(mdb_walk_state_t * wsp)2559894SPavel.Tatashin@Sun.COM allpages_walk_step(mdb_walk_state_t *wsp)
2569894SPavel.Tatashin@Sun.COM {
2579894SPavel.Tatashin@Sun.COM const struct memseg *msp = wsp->walk_layer;
2589894SPavel.Tatashin@Sun.COM page_t *buf = wsp->walk_data;
2599894SPavel.Tatashin@Sun.COM size_t pg_read, i;
2609894SPavel.Tatashin@Sun.COM size_t pg_num = msp->pages_end - msp->pages_base;
2619894SPavel.Tatashin@Sun.COM const page_t *pg_addr = msp->pages;
2629894SPavel.Tatashin@Sun.COM
2639894SPavel.Tatashin@Sun.COM while (pg_num > 0) {
2649894SPavel.Tatashin@Sun.COM pg_read = MIN(pg_num, PAGE_BUFFER);
2659894SPavel.Tatashin@Sun.COM
2669894SPavel.Tatashin@Sun.COM if (mdb_vread(buf, pg_read * sizeof (page_t),
2679894SPavel.Tatashin@Sun.COM (uintptr_t)pg_addr) == -1) {
2689894SPavel.Tatashin@Sun.COM mdb_warn("can't read page_t's at %#lx", pg_addr);
2699894SPavel.Tatashin@Sun.COM return (WALK_ERR);
2709894SPavel.Tatashin@Sun.COM }
2719894SPavel.Tatashin@Sun.COM for (i = 0; i < pg_read; i++) {
2729894SPavel.Tatashin@Sun.COM int ret = wsp->walk_callback((uintptr_t)&pg_addr[i],
2739894SPavel.Tatashin@Sun.COM &buf[i], wsp->walk_cbdata);
2749894SPavel.Tatashin@Sun.COM
2759894SPavel.Tatashin@Sun.COM if (ret != WALK_NEXT)
2769894SPavel.Tatashin@Sun.COM return (ret);
2779894SPavel.Tatashin@Sun.COM }
2789894SPavel.Tatashin@Sun.COM pg_num -= pg_read;
2799894SPavel.Tatashin@Sun.COM pg_addr += pg_read;
2809894SPavel.Tatashin@Sun.COM }
2819894SPavel.Tatashin@Sun.COM
2829894SPavel.Tatashin@Sun.COM return (WALK_NEXT);
2839894SPavel.Tatashin@Sun.COM }
2849894SPavel.Tatashin@Sun.COM
2859894SPavel.Tatashin@Sun.COM void
allpages_walk_fini(mdb_walk_state_t * wsp)2869894SPavel.Tatashin@Sun.COM allpages_walk_fini(mdb_walk_state_t *wsp)
2879894SPavel.Tatashin@Sun.COM {
2889894SPavel.Tatashin@Sun.COM mdb_free(wsp->walk_data, sizeof (page_t) * PAGE_BUFFER);
2899894SPavel.Tatashin@Sun.COM }
2909894SPavel.Tatashin@Sun.COM
2919894SPavel.Tatashin@Sun.COM /*
2929894SPavel.Tatashin@Sun.COM * Hash table + LRU queue.
2939894SPavel.Tatashin@Sun.COM * This table is used to cache recently read vnodes for the memstat
2949894SPavel.Tatashin@Sun.COM * command, to reduce the number of mdb_vread calls. This greatly
2959894SPavel.Tatashin@Sun.COM * speeds the memstat command on on live, large CPU count systems.
2969894SPavel.Tatashin@Sun.COM */
2979894SPavel.Tatashin@Sun.COM
2989894SPavel.Tatashin@Sun.COM #define VN_SMALL 401
2999894SPavel.Tatashin@Sun.COM #define VN_LARGE 10007
3009894SPavel.Tatashin@Sun.COM #define VN_HTABLE_KEY(p, hp) ((p) % ((hp)->vn_htable_buckets))
3019894SPavel.Tatashin@Sun.COM
3029894SPavel.Tatashin@Sun.COM struct vn_htable_list {
3039894SPavel.Tatashin@Sun.COM uint_t vn_flag; /* v_flag from vnode */
3049894SPavel.Tatashin@Sun.COM uintptr_t vn_ptr; /* pointer to vnode */
3059894SPavel.Tatashin@Sun.COM struct vn_htable_list *vn_q_next; /* queue next pointer */
3069894SPavel.Tatashin@Sun.COM struct vn_htable_list *vn_q_prev; /* queue prev pointer */
3079894SPavel.Tatashin@Sun.COM struct vn_htable_list *vn_h_next; /* hash table pointer */
3089894SPavel.Tatashin@Sun.COM };
3099894SPavel.Tatashin@Sun.COM
3109894SPavel.Tatashin@Sun.COM /*
3119894SPavel.Tatashin@Sun.COM * vn_q_first -> points to to head of queue: the vnode that was most
3129894SPavel.Tatashin@Sun.COM * recently used
3139894SPavel.Tatashin@Sun.COM * vn_q_last -> points to the oldest used vnode, and is freed once a new
3149894SPavel.Tatashin@Sun.COM * vnode is read.
3159894SPavel.Tatashin@Sun.COM * vn_htable -> hash table
3169894SPavel.Tatashin@Sun.COM * vn_htable_buf -> contains htable objects
3179894SPavel.Tatashin@Sun.COM * vn_htable_size -> total number of items in the hash table
3189894SPavel.Tatashin@Sun.COM * vn_htable_buckets -> number of buckets in the hash table
3199894SPavel.Tatashin@Sun.COM */
3209894SPavel.Tatashin@Sun.COM typedef struct vn_htable {
3219894SPavel.Tatashin@Sun.COM struct vn_htable_list *vn_q_first;
3229894SPavel.Tatashin@Sun.COM struct vn_htable_list *vn_q_last;
3239894SPavel.Tatashin@Sun.COM struct vn_htable_list **vn_htable;
3249894SPavel.Tatashin@Sun.COM struct vn_htable_list *vn_htable_buf;
3259894SPavel.Tatashin@Sun.COM int vn_htable_size;
3269894SPavel.Tatashin@Sun.COM int vn_htable_buckets;
3279894SPavel.Tatashin@Sun.COM } vn_htable_t;
3289894SPavel.Tatashin@Sun.COM
3299894SPavel.Tatashin@Sun.COM
3309894SPavel.Tatashin@Sun.COM /* allocate memory, initilize hash table and LRU queue */
3319894SPavel.Tatashin@Sun.COM static void
vn_htable_init(vn_htable_t * hp,size_t vn_size)3329894SPavel.Tatashin@Sun.COM vn_htable_init(vn_htable_t *hp, size_t vn_size)
3339894SPavel.Tatashin@Sun.COM {
3349894SPavel.Tatashin@Sun.COM int i;
3359894SPavel.Tatashin@Sun.COM int htable_size = MAX(vn_size, VN_LARGE);
3369894SPavel.Tatashin@Sun.COM
3379894SPavel.Tatashin@Sun.COM if ((hp->vn_htable_buf = mdb_zalloc(sizeof (struct vn_htable_list)
3389894SPavel.Tatashin@Sun.COM * htable_size, UM_NOSLEEP|UM_GC)) == NULL) {
3399894SPavel.Tatashin@Sun.COM htable_size = VN_SMALL;
3409894SPavel.Tatashin@Sun.COM hp->vn_htable_buf = mdb_zalloc(sizeof (struct vn_htable_list)
3419894SPavel.Tatashin@Sun.COM * htable_size, UM_SLEEP|UM_GC);
3429894SPavel.Tatashin@Sun.COM }
3439894SPavel.Tatashin@Sun.COM
3449894SPavel.Tatashin@Sun.COM hp->vn_htable = mdb_zalloc(sizeof (struct vn_htable_list *)
3459894SPavel.Tatashin@Sun.COM * htable_size, UM_SLEEP|UM_GC);
3469894SPavel.Tatashin@Sun.COM
3479894SPavel.Tatashin@Sun.COM hp->vn_q_first = &hp->vn_htable_buf[0];
3489894SPavel.Tatashin@Sun.COM hp->vn_q_last = &hp->vn_htable_buf[htable_size - 1];
3499894SPavel.Tatashin@Sun.COM hp->vn_q_first->vn_q_next = &hp->vn_htable_buf[1];
3509894SPavel.Tatashin@Sun.COM hp->vn_q_last->vn_q_prev = &hp->vn_htable_buf[htable_size - 2];
3519894SPavel.Tatashin@Sun.COM
3529894SPavel.Tatashin@Sun.COM for (i = 1; i < (htable_size-1); i++) {
3539894SPavel.Tatashin@Sun.COM hp->vn_htable_buf[i].vn_q_next = &hp->vn_htable_buf[i + 1];
3549894SPavel.Tatashin@Sun.COM hp->vn_htable_buf[i].vn_q_prev = &hp->vn_htable_buf[i - 1];
3559894SPavel.Tatashin@Sun.COM }
3569894SPavel.Tatashin@Sun.COM
3579894SPavel.Tatashin@Sun.COM hp->vn_htable_size = htable_size;
3589894SPavel.Tatashin@Sun.COM hp->vn_htable_buckets = htable_size;
3599894SPavel.Tatashin@Sun.COM }
3609894SPavel.Tatashin@Sun.COM
3619894SPavel.Tatashin@Sun.COM
3629894SPavel.Tatashin@Sun.COM /*
3639894SPavel.Tatashin@Sun.COM * Find the vnode whose address is ptr, and return its v_flag in vp->v_flag.
3649894SPavel.Tatashin@Sun.COM * The function tries to find needed information in the following order:
3659894SPavel.Tatashin@Sun.COM *
3669894SPavel.Tatashin@Sun.COM * 1. check if ptr is the first in queue
3679894SPavel.Tatashin@Sun.COM * 2. check if ptr is in hash table (if so move it to the top of queue)
3689894SPavel.Tatashin@Sun.COM * 3. do mdb_vread, remove last queue item from queue and hash table.
3699894SPavel.Tatashin@Sun.COM * Insert new information to freed object, and put this object in to the
3709894SPavel.Tatashin@Sun.COM * top of the queue.
3719894SPavel.Tatashin@Sun.COM */
3729894SPavel.Tatashin@Sun.COM static int
vn_get(vn_htable_t * hp,struct vnode * vp,uintptr_t ptr)3739894SPavel.Tatashin@Sun.COM vn_get(vn_htable_t *hp, struct vnode *vp, uintptr_t ptr)
3749894SPavel.Tatashin@Sun.COM {
3759894SPavel.Tatashin@Sun.COM int hkey;
3769894SPavel.Tatashin@Sun.COM struct vn_htable_list *hent, **htmp, *q_next, *q_prev;
3779894SPavel.Tatashin@Sun.COM struct vn_htable_list *q_first = hp->vn_q_first;
3789894SPavel.Tatashin@Sun.COM
3799894SPavel.Tatashin@Sun.COM /* 1. vnode ptr is the first in queue, just get v_flag and return */
3809894SPavel.Tatashin@Sun.COM if (q_first->vn_ptr == ptr) {
3819894SPavel.Tatashin@Sun.COM vp->v_flag = q_first->vn_flag;
3829894SPavel.Tatashin@Sun.COM
3839894SPavel.Tatashin@Sun.COM return (0);
3849894SPavel.Tatashin@Sun.COM }
3859894SPavel.Tatashin@Sun.COM
3869894SPavel.Tatashin@Sun.COM /* 2. search the hash table for this ptr */
3879894SPavel.Tatashin@Sun.COM hkey = VN_HTABLE_KEY(ptr, hp);
3889894SPavel.Tatashin@Sun.COM hent = hp->vn_htable[hkey];
3899894SPavel.Tatashin@Sun.COM while (hent && (hent->vn_ptr != ptr))
3909894SPavel.Tatashin@Sun.COM hent = hent->vn_h_next;
3919894SPavel.Tatashin@Sun.COM
3929894SPavel.Tatashin@Sun.COM /* 3. if hent is NULL, we did not find in hash table, do mdb_vread */
3939894SPavel.Tatashin@Sun.COM if (hent == NULL) {
3949894SPavel.Tatashin@Sun.COM struct vnode vn;
3959894SPavel.Tatashin@Sun.COM
3969894SPavel.Tatashin@Sun.COM if (mdb_vread(&vn, sizeof (vnode_t), ptr) == -1) {
3979894SPavel.Tatashin@Sun.COM mdb_warn("unable to read vnode_t at %#lx", ptr);
3989894SPavel.Tatashin@Sun.COM return (-1);
3999894SPavel.Tatashin@Sun.COM }
4009894SPavel.Tatashin@Sun.COM
4019894SPavel.Tatashin@Sun.COM /* we will insert read data into the last element in queue */
4029894SPavel.Tatashin@Sun.COM hent = hp->vn_q_last;
4039894SPavel.Tatashin@Sun.COM
4049894SPavel.Tatashin@Sun.COM /* remove last hp->vn_q_last object from hash table */
4059894SPavel.Tatashin@Sun.COM if (hent->vn_ptr) {
4069894SPavel.Tatashin@Sun.COM htmp = &hp->vn_htable[VN_HTABLE_KEY(hent->vn_ptr, hp)];
4079894SPavel.Tatashin@Sun.COM while (*htmp != hent)
4089894SPavel.Tatashin@Sun.COM htmp = &(*htmp)->vn_h_next;
4099894SPavel.Tatashin@Sun.COM *htmp = hent->vn_h_next;
4109894SPavel.Tatashin@Sun.COM }
4119894SPavel.Tatashin@Sun.COM
4129894SPavel.Tatashin@Sun.COM /* insert data into new free object */
4139894SPavel.Tatashin@Sun.COM hent->vn_ptr = ptr;
4149894SPavel.Tatashin@Sun.COM hent->vn_flag = vn.v_flag;
4159894SPavel.Tatashin@Sun.COM
4169894SPavel.Tatashin@Sun.COM /* insert new object into hash table */
4179894SPavel.Tatashin@Sun.COM hent->vn_h_next = hp->vn_htable[hkey];
4189894SPavel.Tatashin@Sun.COM hp->vn_htable[hkey] = hent;
4199894SPavel.Tatashin@Sun.COM }
4209894SPavel.Tatashin@Sun.COM
4219894SPavel.Tatashin@Sun.COM /* Remove from queue. hent is not first, vn_q_prev is not NULL */
4229894SPavel.Tatashin@Sun.COM q_next = hent->vn_q_next;
4239894SPavel.Tatashin@Sun.COM q_prev = hent->vn_q_prev;
4249894SPavel.Tatashin@Sun.COM if (q_next == NULL)
4259894SPavel.Tatashin@Sun.COM hp->vn_q_last = q_prev;
4269894SPavel.Tatashin@Sun.COM else
4279894SPavel.Tatashin@Sun.COM q_next->vn_q_prev = q_prev;
4289894SPavel.Tatashin@Sun.COM q_prev->vn_q_next = q_next;
4299894SPavel.Tatashin@Sun.COM
4309894SPavel.Tatashin@Sun.COM /* Add to the front of queue */
4319894SPavel.Tatashin@Sun.COM hent->vn_q_prev = NULL;
4329894SPavel.Tatashin@Sun.COM hent->vn_q_next = q_first;
4339894SPavel.Tatashin@Sun.COM q_first->vn_q_prev = hent;
4349894SPavel.Tatashin@Sun.COM hp->vn_q_first = hent;
4359894SPavel.Tatashin@Sun.COM
4369894SPavel.Tatashin@Sun.COM /* Set v_flag in vnode pointer from hent */
4379894SPavel.Tatashin@Sun.COM vp->v_flag = hent->vn_flag;
4389894SPavel.Tatashin@Sun.COM
4399894SPavel.Tatashin@Sun.COM return (0);
4409894SPavel.Tatashin@Sun.COM }
4419894SPavel.Tatashin@Sun.COM
4420Sstevel@tonic-gate /* Summary statistics of pages */
4430Sstevel@tonic-gate typedef struct memstat {
4440Sstevel@tonic-gate struct vnode *ms_kvp; /* Cached address of kernel vnode */
4459894SPavel.Tatashin@Sun.COM struct vnode *ms_unused_vp; /* Unused pages vnode pointer */
4463290Sjohansen struct vnode *ms_zvp; /* Cached address of zio vnode */
4470Sstevel@tonic-gate uint64_t ms_kmem; /* Pages of kernel memory */
4487315SJonathan.Adams@Sun.COM uint64_t ms_zfs_data; /* Pages of zfs data */
4490Sstevel@tonic-gate uint64_t ms_anon; /* Pages of anonymous memory */
4500Sstevel@tonic-gate uint64_t ms_vnode; /* Pages of named (vnode) memory */
4510Sstevel@tonic-gate uint64_t ms_exec; /* Pages of exec/library memory */
4520Sstevel@tonic-gate uint64_t ms_cachelist; /* Pages on the cachelist (free) */
4530Sstevel@tonic-gate uint64_t ms_total; /* Pages on page hash */
4549894SPavel.Tatashin@Sun.COM vn_htable_t *ms_vn_htable; /* Pointer to hash table */
4559894SPavel.Tatashin@Sun.COM struct vnode ms_vn; /* vnode buffer */
4560Sstevel@tonic-gate } memstat_t;
4570Sstevel@tonic-gate
4583290Sjohansen #define MS_PP_ISKAS(pp, stats) \
4597315SJonathan.Adams@Sun.COM ((pp)->p_vnode == (stats)->ms_kvp)
4607315SJonathan.Adams@Sun.COM
4617315SJonathan.Adams@Sun.COM #define MS_PP_ISZFS_DATA(pp, stats) \
4627315SJonathan.Adams@Sun.COM (((stats)->ms_zvp != NULL) && ((pp)->p_vnode == (stats)->ms_zvp))
4633290Sjohansen
4640Sstevel@tonic-gate /*
4659894SPavel.Tatashin@Sun.COM * Summarize pages by type and update stat information
4660Sstevel@tonic-gate */
4670Sstevel@tonic-gate
4680Sstevel@tonic-gate /* ARGSUSED */
4690Sstevel@tonic-gate static int
memstat_callback(page_t * page,page_t * pp,memstat_t * stats)4700Sstevel@tonic-gate memstat_callback(page_t *page, page_t *pp, memstat_t *stats)
4710Sstevel@tonic-gate {
4729894SPavel.Tatashin@Sun.COM struct vnode *vp = &stats->ms_vn;
4730Sstevel@tonic-gate
4749894SPavel.Tatashin@Sun.COM if (pp->p_vnode == NULL || pp->p_vnode == stats->ms_unused_vp)
4759894SPavel.Tatashin@Sun.COM return (WALK_NEXT);
4769894SPavel.Tatashin@Sun.COM else if (MS_PP_ISKAS(pp, stats))
4779894SPavel.Tatashin@Sun.COM stats->ms_kmem++;
4787315SJonathan.Adams@Sun.COM else if (MS_PP_ISZFS_DATA(pp, stats))
4797315SJonathan.Adams@Sun.COM stats->ms_zfs_data++;
4809894SPavel.Tatashin@Sun.COM else if (PP_ISFREE(pp))
4819894SPavel.Tatashin@Sun.COM stats->ms_cachelist++;
4829894SPavel.Tatashin@Sun.COM else if (vn_get(stats->ms_vn_htable, vp, (uintptr_t)pp->p_vnode))
4839894SPavel.Tatashin@Sun.COM return (WALK_ERR);
4849894SPavel.Tatashin@Sun.COM else if (IS_SWAPFSVP(vp))
4859894SPavel.Tatashin@Sun.COM stats->ms_anon++;
4869894SPavel.Tatashin@Sun.COM else if ((vp->v_flag & VVMEXEC) != 0)
4870Sstevel@tonic-gate stats->ms_exec++;
4880Sstevel@tonic-gate else
4890Sstevel@tonic-gate stats->ms_vnode++;
4900Sstevel@tonic-gate
4910Sstevel@tonic-gate stats->ms_total++;
4920Sstevel@tonic-gate
4930Sstevel@tonic-gate return (WALK_NEXT);
4940Sstevel@tonic-gate }
4950Sstevel@tonic-gate
4960Sstevel@tonic-gate /* ARGSUSED */
4970Sstevel@tonic-gate int
memstat(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)4980Sstevel@tonic-gate memstat(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
4990Sstevel@tonic-gate {
5007180Sjwadams pgcnt_t total_pages, physmem;
5017180Sjwadams ulong_t freemem;
5020Sstevel@tonic-gate memstat_t stats;
5030Sstevel@tonic-gate GElf_Sym sym;
5049894SPavel.Tatashin@Sun.COM vn_htable_t ht;
50511185SSean.McEnroe@Sun.COM struct vnode *kvps;
5069894SPavel.Tatashin@Sun.COM uintptr_t vn_size = 0;
5075084Sjohnlev #if defined(__i386) || defined(__amd64)
5085084Sjohnlev bln_stats_t bln_stats;
5095084Sjohnlev ssize_t bln_size;
5105084Sjohnlev #endif
5110Sstevel@tonic-gate
5120Sstevel@tonic-gate bzero(&stats, sizeof (memstat_t));
5130Sstevel@tonic-gate
5149894SPavel.Tatashin@Sun.COM /*
5159894SPavel.Tatashin@Sun.COM * -s size, is an internal option. It specifies the size of vn_htable.
5169894SPavel.Tatashin@Sun.COM * Hash table size is set in the following order:
5179894SPavel.Tatashin@Sun.COM * If user has specified the size that is larger than VN_LARGE: try it,
5189894SPavel.Tatashin@Sun.COM * but if malloc failed default to VN_SMALL. Otherwise try VN_LARGE, if
5199894SPavel.Tatashin@Sun.COM * failed to allocate default to VN_SMALL.
5209894SPavel.Tatashin@Sun.COM * For a better efficiency of hash table it is highly recommended to
5219894SPavel.Tatashin@Sun.COM * set size to a prime number.
5229894SPavel.Tatashin@Sun.COM */
5239894SPavel.Tatashin@Sun.COM if ((flags & DCMD_ADDRSPEC) || mdb_getopts(argc, argv,
5249894SPavel.Tatashin@Sun.COM 's', MDB_OPT_UINTPTR, &vn_size, NULL) != argc)
5250Sstevel@tonic-gate return (DCMD_USAGE);
5260Sstevel@tonic-gate
5279894SPavel.Tatashin@Sun.COM /* Initialize vnode hash list and queue */
5289894SPavel.Tatashin@Sun.COM vn_htable_init(&ht, vn_size);
5299894SPavel.Tatashin@Sun.COM stats.ms_vn_htable = &ht;
5309894SPavel.Tatashin@Sun.COM
5310Sstevel@tonic-gate /* Total physical memory */
5320Sstevel@tonic-gate if (mdb_readvar(&total_pages, "total_pages") == -1) {
5330Sstevel@tonic-gate mdb_warn("unable to read total_pages");
5340Sstevel@tonic-gate return (DCMD_ERR);
5350Sstevel@tonic-gate }
5360Sstevel@tonic-gate
5370Sstevel@tonic-gate /* Artificially limited memory */
5380Sstevel@tonic-gate if (mdb_readvar(&physmem, "physmem") == -1) {
5390Sstevel@tonic-gate mdb_warn("unable to read physmem");
5400Sstevel@tonic-gate return (DCMD_ERR);
5410Sstevel@tonic-gate }
5420Sstevel@tonic-gate
54311185SSean.McEnroe@Sun.COM /* read kernel vnode array pointer */
54411185SSean.McEnroe@Sun.COM if (mdb_lookup_by_obj(MDB_OBJ_EXEC, "kvps",
5455084Sjohnlev (GElf_Sym *)&sym) == -1) {
54611185SSean.McEnroe@Sun.COM mdb_warn("unable to read kvps");
5470Sstevel@tonic-gate return (DCMD_ERR);
5480Sstevel@tonic-gate }
54911185SSean.McEnroe@Sun.COM kvps = (struct vnode *)(uintptr_t)sym.st_value;
55011185SSean.McEnroe@Sun.COM stats.ms_kvp = &kvps[KV_KVP];
5510Sstevel@tonic-gate
5523290Sjohansen /*
55311185SSean.McEnroe@Sun.COM * Read the zio vnode pointer.
5543290Sjohansen */
55511185SSean.McEnroe@Sun.COM stats.ms_zvp = &kvps[KV_ZVP];
5563290Sjohansen
5579894SPavel.Tatashin@Sun.COM /*
5589894SPavel.Tatashin@Sun.COM * If physmem != total_pages, then the administrator has limited the
5599894SPavel.Tatashin@Sun.COM * number of pages available in the system. Excluded pages are
5609894SPavel.Tatashin@Sun.COM * associated with the unused pages vnode. Read this vnode so the
5619894SPavel.Tatashin@Sun.COM * pages can be excluded in the page accounting.
5629894SPavel.Tatashin@Sun.COM */
5630Sstevel@tonic-gate if (mdb_lookup_by_obj(MDB_OBJ_EXEC, "unused_pages_vp",
5645084Sjohnlev (GElf_Sym *)&sym) == -1) {
5650Sstevel@tonic-gate mdb_warn("unable to read unused_pages_vp");
5660Sstevel@tonic-gate return (DCMD_ERR);
5670Sstevel@tonic-gate }
5689894SPavel.Tatashin@Sun.COM stats.ms_unused_vp = (struct vnode *)(uintptr_t)sym.st_value;
5690Sstevel@tonic-gate
5709894SPavel.Tatashin@Sun.COM /* walk all pages, collect statistics */
5719894SPavel.Tatashin@Sun.COM if (mdb_walk("allpages", (mdb_walk_cb_t)memstat_callback,
5729894SPavel.Tatashin@Sun.COM &stats) == -1) {
5739894SPavel.Tatashin@Sun.COM mdb_warn("can't walk memseg");
5740Sstevel@tonic-gate return (DCMD_ERR);
5750Sstevel@tonic-gate }
5760Sstevel@tonic-gate
5777180Sjwadams #define MS_PCT_TOTAL(x) ((ulong_t)((((5 * total_pages) + ((x) * 1000ull))) / \
5787180Sjwadams ((physmem) * 10)))
5790Sstevel@tonic-gate
5800Sstevel@tonic-gate mdb_printf("Page Summary Pages MB"
5810Sstevel@tonic-gate " %%Tot\n");
5820Sstevel@tonic-gate mdb_printf("------------ ---------------- ----------------"
5835084Sjohnlev " ----\n");
5847180Sjwadams mdb_printf("Kernel %16llu %16llu %3lu%%\n",
5850Sstevel@tonic-gate stats.ms_kmem,
58611459SJonathan.Adams@Sun.COM (uint64_t)stats.ms_kmem * PAGESIZE / (1024 * 1024),
5870Sstevel@tonic-gate MS_PCT_TOTAL(stats.ms_kmem));
5887315SJonathan.Adams@Sun.COM
5897315SJonathan.Adams@Sun.COM if (stats.ms_zfs_data != 0)
5907315SJonathan.Adams@Sun.COM mdb_printf("ZFS File Data %16llu %16llu %3lu%%\n",
5917315SJonathan.Adams@Sun.COM stats.ms_zfs_data,
59211459SJonathan.Adams@Sun.COM (uint64_t)stats.ms_zfs_data * PAGESIZE / (1024 * 1024),
5937315SJonathan.Adams@Sun.COM MS_PCT_TOTAL(stats.ms_zfs_data));
5947315SJonathan.Adams@Sun.COM
5957180Sjwadams mdb_printf("Anon %16llu %16llu %3lu%%\n",
5960Sstevel@tonic-gate stats.ms_anon,
59711459SJonathan.Adams@Sun.COM (uint64_t)stats.ms_anon * PAGESIZE / (1024 * 1024),
5980Sstevel@tonic-gate MS_PCT_TOTAL(stats.ms_anon));
5997180Sjwadams mdb_printf("Exec and libs %16llu %16llu %3lu%%\n",
6000Sstevel@tonic-gate stats.ms_exec,
60111459SJonathan.Adams@Sun.COM (uint64_t)stats.ms_exec * PAGESIZE / (1024 * 1024),
6020Sstevel@tonic-gate MS_PCT_TOTAL(stats.ms_exec));
6037180Sjwadams mdb_printf("Page cache %16llu %16llu %3lu%%\n",
6040Sstevel@tonic-gate stats.ms_vnode,
60511459SJonathan.Adams@Sun.COM (uint64_t)stats.ms_vnode * PAGESIZE / (1024 * 1024),
6060Sstevel@tonic-gate MS_PCT_TOTAL(stats.ms_vnode));
6077180Sjwadams mdb_printf("Free (cachelist) %16llu %16llu %3lu%%\n",
6080Sstevel@tonic-gate stats.ms_cachelist,
60911459SJonathan.Adams@Sun.COM (uint64_t)stats.ms_cachelist * PAGESIZE / (1024 * 1024),
6100Sstevel@tonic-gate MS_PCT_TOTAL(stats.ms_cachelist));
6115084Sjohnlev
6127180Sjwadams /*
6137180Sjwadams * occasionally, we double count pages above. To avoid printing
6147180Sjwadams * absurdly large values for freemem, we clamp it at zero.
6157180Sjwadams */
6167180Sjwadams if (physmem > stats.ms_total)
6177180Sjwadams freemem = physmem - stats.ms_total;
6187180Sjwadams else
6197180Sjwadams freemem = 0;
6205084Sjohnlev
6215084Sjohnlev #if defined(__i386) || defined(__amd64)
6225084Sjohnlev /* Are we running under Xen? If so, get balloon memory usage. */
6235084Sjohnlev if ((bln_size = mdb_readvar(&bln_stats, "bln_stats")) != -1) {
6247180Sjwadams if (freemem > bln_stats.bln_hv_pages)
6257180Sjwadams freemem -= bln_stats.bln_hv_pages;
6267180Sjwadams else
6277180Sjwadams freemem = 0;
6285084Sjohnlev }
6295084Sjohnlev #endif
6305084Sjohnlev
6317180Sjwadams mdb_printf("Free (freelist) %16lu %16llu %3lu%%\n", freemem,
63211459SJonathan.Adams@Sun.COM (uint64_t)freemem * PAGESIZE / (1024 * 1024),
6335084Sjohnlev MS_PCT_TOTAL(freemem));
6345084Sjohnlev
6355084Sjohnlev #if defined(__i386) || defined(__amd64)
6365084Sjohnlev if (bln_size != -1) {
6377180Sjwadams mdb_printf("Balloon %16lu %16llu %3lu%%\n",
6385084Sjohnlev bln_stats.bln_hv_pages,
63911459SJonathan.Adams@Sun.COM (uint64_t)bln_stats.bln_hv_pages * PAGESIZE / (1024 * 1024),
6405084Sjohnlev MS_PCT_TOTAL(bln_stats.bln_hv_pages));
6415084Sjohnlev }
6425084Sjohnlev #endif
6435084Sjohnlev
6440Sstevel@tonic-gate mdb_printf("\nTotal %16lu %16lu\n",
6450Sstevel@tonic-gate physmem,
64611459SJonathan.Adams@Sun.COM (uint64_t)physmem * PAGESIZE / (1024 * 1024));
6470Sstevel@tonic-gate
6480Sstevel@tonic-gate if (physmem != total_pages) {
6490Sstevel@tonic-gate mdb_printf("Physical %16lu %16lu\n",
6500Sstevel@tonic-gate total_pages,
65111459SJonathan.Adams@Sun.COM (uint64_t)total_pages * PAGESIZE / (1024 * 1024));
6520Sstevel@tonic-gate }
6530Sstevel@tonic-gate
6540Sstevel@tonic-gate #undef MS_PCT_TOTAL
6550Sstevel@tonic-gate
6560Sstevel@tonic-gate return (DCMD_OK);
6570Sstevel@tonic-gate }
6580Sstevel@tonic-gate
65911459SJonathan.Adams@Sun.COM void
pagelookup_help(void)66011459SJonathan.Adams@Sun.COM pagelookup_help(void)
66111459SJonathan.Adams@Sun.COM {
66211459SJonathan.Adams@Sun.COM mdb_printf(
66311459SJonathan.Adams@Sun.COM "Finds the page with name { %<b>vp%</b>, %<b>offset%</b> }.\n"
66411459SJonathan.Adams@Sun.COM "\n"
66511459SJonathan.Adams@Sun.COM "Can be invoked three different ways:\n\n"
66611459SJonathan.Adams@Sun.COM " ::pagelookup -v %<b>vp%</b> -o %<b>offset%</b>\n"
66711459SJonathan.Adams@Sun.COM " %<b>vp%</b>::pagelookup -o %<b>offset%</b>\n"
66811459SJonathan.Adams@Sun.COM " %<b>offset%</b>::pagelookup -v %<b>vp%</b>\n"
66911459SJonathan.Adams@Sun.COM "\n"
67011474SJonathan.Adams@Sun.COM "The latter two forms are useful in pipelines.\n");
67111459SJonathan.Adams@Sun.COM }
67211459SJonathan.Adams@Sun.COM
67311459SJonathan.Adams@Sun.COM int
pagelookup(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)67411459SJonathan.Adams@Sun.COM pagelookup(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
67511459SJonathan.Adams@Sun.COM {
67611459SJonathan.Adams@Sun.COM uintptr_t vp = -(uintptr_t)1;
67711459SJonathan.Adams@Sun.COM uint64_t offset = -(uint64_t)1;
67811459SJonathan.Adams@Sun.COM
67911459SJonathan.Adams@Sun.COM uintptr_t pageaddr;
68011459SJonathan.Adams@Sun.COM int hasaddr = (flags & DCMD_ADDRSPEC);
68111459SJonathan.Adams@Sun.COM int usedaddr = 0;
68211459SJonathan.Adams@Sun.COM
68311459SJonathan.Adams@Sun.COM if (mdb_getopts(argc, argv,
68411459SJonathan.Adams@Sun.COM 'v', MDB_OPT_UINTPTR, &vp,
68511459SJonathan.Adams@Sun.COM 'o', MDB_OPT_UINT64, &offset,
68611459SJonathan.Adams@Sun.COM 0) != argc) {
68711459SJonathan.Adams@Sun.COM return (DCMD_USAGE);
68811459SJonathan.Adams@Sun.COM }
68911459SJonathan.Adams@Sun.COM
69011459SJonathan.Adams@Sun.COM if (vp == -(uintptr_t)1) {
69111459SJonathan.Adams@Sun.COM if (offset == -(uint64_t)1) {
69211459SJonathan.Adams@Sun.COM mdb_warn(
69311459SJonathan.Adams@Sun.COM "pagelookup: at least one of -v vp or -o offset "
69411459SJonathan.Adams@Sun.COM "required.\n");
69511459SJonathan.Adams@Sun.COM return (DCMD_USAGE);
69611459SJonathan.Adams@Sun.COM }
69711459SJonathan.Adams@Sun.COM vp = addr;
69811459SJonathan.Adams@Sun.COM usedaddr = 1;
69911459SJonathan.Adams@Sun.COM } else if (offset == -(uint64_t)1) {
70011459SJonathan.Adams@Sun.COM offset = mdb_get_dot();
70111459SJonathan.Adams@Sun.COM usedaddr = 1;
70211459SJonathan.Adams@Sun.COM }
70311459SJonathan.Adams@Sun.COM if (usedaddr && !hasaddr) {
70411459SJonathan.Adams@Sun.COM mdb_warn("pagelookup: address required\n");
70511459SJonathan.Adams@Sun.COM return (DCMD_USAGE);
70611459SJonathan.Adams@Sun.COM }
70711459SJonathan.Adams@Sun.COM if (!usedaddr && hasaddr) {
70811459SJonathan.Adams@Sun.COM mdb_warn(
70911459SJonathan.Adams@Sun.COM "pagelookup: address specified when both -v and -o were "
71011459SJonathan.Adams@Sun.COM "passed");
71111459SJonathan.Adams@Sun.COM return (DCMD_USAGE);
71211459SJonathan.Adams@Sun.COM }
71311459SJonathan.Adams@Sun.COM
71411459SJonathan.Adams@Sun.COM pageaddr = mdb_page_lookup(vp, offset);
71511459SJonathan.Adams@Sun.COM if (pageaddr == 0) {
71611459SJonathan.Adams@Sun.COM mdb_warn("pagelookup: no page for {vp = %p, offset = %llp)\n",
71711459SJonathan.Adams@Sun.COM vp, offset);
71811459SJonathan.Adams@Sun.COM return (DCMD_OK);
71911459SJonathan.Adams@Sun.COM }
72011459SJonathan.Adams@Sun.COM mdb_printf("%#lr\n", pageaddr); /* this is PIPE_OUT friendly */
72111459SJonathan.Adams@Sun.COM return (DCMD_OK);
72211459SJonathan.Adams@Sun.COM }
72311459SJonathan.Adams@Sun.COM
72411459SJonathan.Adams@Sun.COM /*ARGSUSED*/
72511459SJonathan.Adams@Sun.COM int
page_num2pp(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)72611459SJonathan.Adams@Sun.COM page_num2pp(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
72711459SJonathan.Adams@Sun.COM {
72811459SJonathan.Adams@Sun.COM uintptr_t pp;
72911459SJonathan.Adams@Sun.COM
73011459SJonathan.Adams@Sun.COM if (argc != 0 || !(flags & DCMD_ADDRSPEC)) {
73111459SJonathan.Adams@Sun.COM return (DCMD_USAGE);
73211459SJonathan.Adams@Sun.COM }
73311459SJonathan.Adams@Sun.COM
73411459SJonathan.Adams@Sun.COM pp = mdb_pfn2page((pfn_t)addr);
73511459SJonathan.Adams@Sun.COM if (pp == 0) {
73611459SJonathan.Adams@Sun.COM return (DCMD_ERR);
73711459SJonathan.Adams@Sun.COM }
73811459SJonathan.Adams@Sun.COM
73911459SJonathan.Adams@Sun.COM if (flags & DCMD_PIPE_OUT) {
74011459SJonathan.Adams@Sun.COM mdb_printf("%#lr\n", pp);
74111459SJonathan.Adams@Sun.COM } else {
74211459SJonathan.Adams@Sun.COM mdb_printf("%lx has page_t at %#lx\n", (pfn_t)addr, pp);
74311459SJonathan.Adams@Sun.COM }
74411459SJonathan.Adams@Sun.COM
74511459SJonathan.Adams@Sun.COM return (DCMD_OK);
74611459SJonathan.Adams@Sun.COM }
74711459SJonathan.Adams@Sun.COM
7480Sstevel@tonic-gate int
page(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)7490Sstevel@tonic-gate page(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
7500Sstevel@tonic-gate {
7510Sstevel@tonic-gate page_t p;
7520Sstevel@tonic-gate
7530Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) {
7540Sstevel@tonic-gate if (mdb_walk_dcmd("page", "page", argc, argv) == -1) {
7550Sstevel@tonic-gate mdb_warn("can't walk pages");
7560Sstevel@tonic-gate return (DCMD_ERR);
7570Sstevel@tonic-gate }
7580Sstevel@tonic-gate return (DCMD_OK);
7590Sstevel@tonic-gate }
7600Sstevel@tonic-gate
7610Sstevel@tonic-gate if (DCMD_HDRSPEC(flags)) {
7620Sstevel@tonic-gate mdb_printf("%<u>%?s %?s %16s %8s %3s %3s %2s %2s %2s%</u>\n",
7630Sstevel@tonic-gate "PAGE", "VNODE", "OFFSET", "SELOCK",
7640Sstevel@tonic-gate "LCT", "COW", "IO", "FS", "ST");
7650Sstevel@tonic-gate }
7660Sstevel@tonic-gate
7670Sstevel@tonic-gate if (mdb_vread(&p, sizeof (page_t), addr) == -1) {
7680Sstevel@tonic-gate mdb_warn("can't read page_t at %#lx", addr);
7690Sstevel@tonic-gate return (DCMD_ERR);
7700Sstevel@tonic-gate }
7710Sstevel@tonic-gate
7720Sstevel@tonic-gate mdb_printf("%0?lx %?p %16llx %8x %3d %3d %2x %2x %2x\n",
7730Sstevel@tonic-gate addr, p.p_vnode, p.p_offset, p.p_selock, p.p_lckcnt, p.p_cowcnt,
7740Sstevel@tonic-gate p.p_iolock_state, p.p_fsdata, p.p_state);
7750Sstevel@tonic-gate
7760Sstevel@tonic-gate return (DCMD_OK);
7770Sstevel@tonic-gate }
7780Sstevel@tonic-gate
7790Sstevel@tonic-gate int
swap_walk_init(mdb_walk_state_t * wsp)7800Sstevel@tonic-gate swap_walk_init(mdb_walk_state_t *wsp)
7810Sstevel@tonic-gate {
7820Sstevel@tonic-gate void *ptr;
7830Sstevel@tonic-gate
7840Sstevel@tonic-gate if ((mdb_readvar(&ptr, "swapinfo") == -1) || ptr == NULL) {
7850Sstevel@tonic-gate mdb_warn("swapinfo not found or invalid");
7860Sstevel@tonic-gate return (WALK_ERR);
7870Sstevel@tonic-gate }
7880Sstevel@tonic-gate
7890Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)ptr;
7900Sstevel@tonic-gate
7910Sstevel@tonic-gate return (WALK_NEXT);
7920Sstevel@tonic-gate }
7930Sstevel@tonic-gate
7940Sstevel@tonic-gate int
swap_walk_step(mdb_walk_state_t * wsp)7950Sstevel@tonic-gate swap_walk_step(mdb_walk_state_t *wsp)
7960Sstevel@tonic-gate {
7970Sstevel@tonic-gate uintptr_t sip;
7980Sstevel@tonic-gate struct swapinfo si;
7990Sstevel@tonic-gate
8000Sstevel@tonic-gate sip = wsp->walk_addr;
8010Sstevel@tonic-gate
8020Sstevel@tonic-gate if (sip == NULL)
8030Sstevel@tonic-gate return (WALK_DONE);
8040Sstevel@tonic-gate
8050Sstevel@tonic-gate if (mdb_vread(&si, sizeof (struct swapinfo), sip) == -1) {
8060Sstevel@tonic-gate mdb_warn("unable to read swapinfo at %#lx", sip);
8070Sstevel@tonic-gate return (WALK_ERR);
8080Sstevel@tonic-gate }
8090Sstevel@tonic-gate
8100Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)si.si_next;
8110Sstevel@tonic-gate
8120Sstevel@tonic-gate return (wsp->walk_callback(sip, &si, wsp->walk_cbdata));
8130Sstevel@tonic-gate }
8140Sstevel@tonic-gate
8150Sstevel@tonic-gate int
swapinfof(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)8160Sstevel@tonic-gate swapinfof(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
8170Sstevel@tonic-gate {
8180Sstevel@tonic-gate struct swapinfo si;
8190Sstevel@tonic-gate char *name;
8200Sstevel@tonic-gate
8210Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) {
8220Sstevel@tonic-gate if (mdb_walk_dcmd("swapinfo", "swapinfo", argc, argv) == -1) {
8230Sstevel@tonic-gate mdb_warn("can't walk swapinfo");
8240Sstevel@tonic-gate return (DCMD_ERR);
8250Sstevel@tonic-gate }
8260Sstevel@tonic-gate return (DCMD_OK);
8270Sstevel@tonic-gate }
8280Sstevel@tonic-gate
8290Sstevel@tonic-gate if (DCMD_HDRSPEC(flags)) {
8300Sstevel@tonic-gate mdb_printf("%<u>%?s %?s %9s %9s %s%</u>\n",
8310Sstevel@tonic-gate "ADDR", "VNODE", "PAGES", "FREE", "NAME");
8320Sstevel@tonic-gate }
8330Sstevel@tonic-gate
8340Sstevel@tonic-gate if (mdb_vread(&si, sizeof (struct swapinfo), addr) == -1) {
8350Sstevel@tonic-gate mdb_warn("can't read swapinfo at %#lx", addr);
8360Sstevel@tonic-gate return (DCMD_ERR);
8370Sstevel@tonic-gate }
8380Sstevel@tonic-gate
8390Sstevel@tonic-gate name = mdb_alloc(si.si_pnamelen, UM_SLEEP | UM_GC);
8400Sstevel@tonic-gate if (mdb_vread(name, si.si_pnamelen, (uintptr_t)si.si_pname) == -1)
8410Sstevel@tonic-gate name = "*error*";
8420Sstevel@tonic-gate
8430Sstevel@tonic-gate mdb_printf("%0?lx %?p %9d %9d %s\n",
8440Sstevel@tonic-gate addr, si.si_vp, si.si_npgs, si.si_nfpgs, name);
8450Sstevel@tonic-gate
8460Sstevel@tonic-gate return (DCMD_OK);
8470Sstevel@tonic-gate }
8480Sstevel@tonic-gate
8490Sstevel@tonic-gate int
memlist_walk_step(mdb_walk_state_t * wsp)8500Sstevel@tonic-gate memlist_walk_step(mdb_walk_state_t *wsp)
8510Sstevel@tonic-gate {
8520Sstevel@tonic-gate uintptr_t mlp;
8530Sstevel@tonic-gate struct memlist ml;
8540Sstevel@tonic-gate
8550Sstevel@tonic-gate mlp = wsp->walk_addr;
8560Sstevel@tonic-gate
8570Sstevel@tonic-gate if (mlp == NULL)
8580Sstevel@tonic-gate return (WALK_DONE);
8590Sstevel@tonic-gate
8600Sstevel@tonic-gate if (mdb_vread(&ml, sizeof (struct memlist), mlp) == -1) {
8610Sstevel@tonic-gate mdb_warn("unable to read memlist at %#lx", mlp);
8620Sstevel@tonic-gate return (WALK_ERR);
8630Sstevel@tonic-gate }
8640Sstevel@tonic-gate
86511474SJonathan.Adams@Sun.COM wsp->walk_addr = (uintptr_t)ml.ml_next;
8660Sstevel@tonic-gate
8670Sstevel@tonic-gate return (wsp->walk_callback(mlp, &ml, wsp->walk_cbdata));
8680Sstevel@tonic-gate }
8690Sstevel@tonic-gate
8700Sstevel@tonic-gate int
memlist(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)8710Sstevel@tonic-gate memlist(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
8720Sstevel@tonic-gate {
8730Sstevel@tonic-gate struct memlist ml;
8740Sstevel@tonic-gate
8750Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) {
8760Sstevel@tonic-gate uintptr_t ptr;
8770Sstevel@tonic-gate uint_t list = 0;
8780Sstevel@tonic-gate int i;
8790Sstevel@tonic-gate static const char *lists[] = {
8800Sstevel@tonic-gate "phys_install",
8810Sstevel@tonic-gate "phys_avail",
8820Sstevel@tonic-gate "virt_avail"
8830Sstevel@tonic-gate };
8840Sstevel@tonic-gate
8850Sstevel@tonic-gate if (mdb_getopts(argc, argv,
8860Sstevel@tonic-gate 'i', MDB_OPT_SETBITS, (1 << 0), &list,
8870Sstevel@tonic-gate 'a', MDB_OPT_SETBITS, (1 << 1), &list,
8880Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, (1 << 2), &list, NULL) != argc)
8890Sstevel@tonic-gate return (DCMD_USAGE);
8900Sstevel@tonic-gate
8910Sstevel@tonic-gate if (!list)
8920Sstevel@tonic-gate list = 1;
8930Sstevel@tonic-gate
8940Sstevel@tonic-gate for (i = 0; list; i++, list >>= 1) {
8950Sstevel@tonic-gate if (!(list & 1))
8960Sstevel@tonic-gate continue;
8970Sstevel@tonic-gate if ((mdb_readvar(&ptr, lists[i]) == -1) ||
8980Sstevel@tonic-gate (ptr == NULL)) {
8990Sstevel@tonic-gate mdb_warn("%s not found or invalid", lists[i]);
9000Sstevel@tonic-gate return (DCMD_ERR);
9010Sstevel@tonic-gate }
9020Sstevel@tonic-gate
9030Sstevel@tonic-gate mdb_printf("%s:\n", lists[i]);
9040Sstevel@tonic-gate if (mdb_pwalk_dcmd("memlist", "memlist", 0, NULL,
9050Sstevel@tonic-gate ptr) == -1) {
9060Sstevel@tonic-gate mdb_warn("can't walk memlist");
9070Sstevel@tonic-gate return (DCMD_ERR);
9080Sstevel@tonic-gate }
9090Sstevel@tonic-gate }
9100Sstevel@tonic-gate return (DCMD_OK);
9110Sstevel@tonic-gate }
9120Sstevel@tonic-gate
9130Sstevel@tonic-gate if (DCMD_HDRSPEC(flags))
9140Sstevel@tonic-gate mdb_printf("%<u>%?s %16s %16s%</u>\n", "ADDR", "BASE", "SIZE");
9150Sstevel@tonic-gate
9160Sstevel@tonic-gate if (mdb_vread(&ml, sizeof (struct memlist), addr) == -1) {
9170Sstevel@tonic-gate mdb_warn("can't read memlist at %#lx", addr);
9180Sstevel@tonic-gate return (DCMD_ERR);
9190Sstevel@tonic-gate }
9200Sstevel@tonic-gate
92111474SJonathan.Adams@Sun.COM mdb_printf("%0?lx %16llx %16llx\n", addr, ml.ml_address, ml.ml_size);
9220Sstevel@tonic-gate
9230Sstevel@tonic-gate return (DCMD_OK);
9240Sstevel@tonic-gate }
92511459SJonathan.Adams@Sun.COM
92611459SJonathan.Adams@Sun.COM int
seg_walk_init(mdb_walk_state_t * wsp)92711459SJonathan.Adams@Sun.COM seg_walk_init(mdb_walk_state_t *wsp)
92811459SJonathan.Adams@Sun.COM {
92911459SJonathan.Adams@Sun.COM if (wsp->walk_addr == NULL) {
93011459SJonathan.Adams@Sun.COM mdb_warn("seg walk must begin at struct as *\n");
93111459SJonathan.Adams@Sun.COM return (WALK_ERR);
93211459SJonathan.Adams@Sun.COM }
93311459SJonathan.Adams@Sun.COM
93411459SJonathan.Adams@Sun.COM /*
93511459SJonathan.Adams@Sun.COM * this is really just a wrapper to AVL tree walk
93611459SJonathan.Adams@Sun.COM */
93711459SJonathan.Adams@Sun.COM wsp->walk_addr = (uintptr_t)&((struct as *)wsp->walk_addr)->a_segtree;
93811459SJonathan.Adams@Sun.COM return (avl_walk_init(wsp));
93911459SJonathan.Adams@Sun.COM }
94011459SJonathan.Adams@Sun.COM
94111459SJonathan.Adams@Sun.COM /*ARGSUSED*/
94211459SJonathan.Adams@Sun.COM int
seg(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)94311459SJonathan.Adams@Sun.COM seg(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
94411459SJonathan.Adams@Sun.COM {
94511459SJonathan.Adams@Sun.COM struct seg s;
94611459SJonathan.Adams@Sun.COM
94711459SJonathan.Adams@Sun.COM if (argc != 0)
94811459SJonathan.Adams@Sun.COM return (DCMD_USAGE);
94911459SJonathan.Adams@Sun.COM
95011459SJonathan.Adams@Sun.COM if ((flags & DCMD_LOOPFIRST) || !(flags & DCMD_LOOP)) {
95111459SJonathan.Adams@Sun.COM mdb_printf("%<u>%?s %?s %?s %?s %s%</u>\n",
95211459SJonathan.Adams@Sun.COM "SEG", "BASE", "SIZE", "DATA", "OPS");
95311459SJonathan.Adams@Sun.COM }
95411459SJonathan.Adams@Sun.COM
95511459SJonathan.Adams@Sun.COM if (mdb_vread(&s, sizeof (s), addr) == -1) {
95611459SJonathan.Adams@Sun.COM mdb_warn("failed to read seg at %p", addr);
95711459SJonathan.Adams@Sun.COM return (DCMD_ERR);
95811459SJonathan.Adams@Sun.COM }
95911459SJonathan.Adams@Sun.COM
96011459SJonathan.Adams@Sun.COM mdb_printf("%?p %?p %?lx %?p %a\n",
96111459SJonathan.Adams@Sun.COM addr, s.s_base, s.s_size, s.s_data, s.s_ops);
96211459SJonathan.Adams@Sun.COM
96311459SJonathan.Adams@Sun.COM return (DCMD_OK);
96411459SJonathan.Adams@Sun.COM }
96511459SJonathan.Adams@Sun.COM
96611459SJonathan.Adams@Sun.COM /*ARGSUSED*/
96711459SJonathan.Adams@Sun.COM static int
pmap_walk_count_pages(uintptr_t addr,const void * data,void * out)968*13078SJonathan.Adams@Sun.COM pmap_walk_count_pages(uintptr_t addr, const void *data, void *out)
96911459SJonathan.Adams@Sun.COM {
970*13078SJonathan.Adams@Sun.COM pgcnt_t *nres = out;
97111459SJonathan.Adams@Sun.COM
972*13078SJonathan.Adams@Sun.COM (*nres)++;
97311459SJonathan.Adams@Sun.COM
97411459SJonathan.Adams@Sun.COM return (WALK_NEXT);
97511459SJonathan.Adams@Sun.COM }
97611459SJonathan.Adams@Sun.COM
97711459SJonathan.Adams@Sun.COM static int
pmap_walk_seg(uintptr_t addr,const struct seg * seg,uintptr_t segvn)97811459SJonathan.Adams@Sun.COM pmap_walk_seg(uintptr_t addr, const struct seg *seg, uintptr_t segvn)
97911459SJonathan.Adams@Sun.COM {
98011459SJonathan.Adams@Sun.COM
98111459SJonathan.Adams@Sun.COM mdb_printf("%0?p %0?p %7dk", addr, seg->s_base, seg->s_size / 1024);
98211459SJonathan.Adams@Sun.COM
983*13078SJonathan.Adams@Sun.COM if (segvn == (uintptr_t)seg->s_ops && seg->s_data != NULL) {
98411459SJonathan.Adams@Sun.COM struct segvn_data svn;
985*13078SJonathan.Adams@Sun.COM pgcnt_t nres = 0;
98611459SJonathan.Adams@Sun.COM
987*13078SJonathan.Adams@Sun.COM svn.vp = NULL;
98811459SJonathan.Adams@Sun.COM (void) mdb_vread(&svn, sizeof (svn), (uintptr_t)seg->s_data);
98911459SJonathan.Adams@Sun.COM
990*13078SJonathan.Adams@Sun.COM /*
991*13078SJonathan.Adams@Sun.COM * Use the segvn_pages walker to find all of the in-core pages
992*13078SJonathan.Adams@Sun.COM * for this mapping.
993*13078SJonathan.Adams@Sun.COM */
994*13078SJonathan.Adams@Sun.COM if (mdb_pwalk("segvn_pages", pmap_walk_count_pages, &nres,
995*13078SJonathan.Adams@Sun.COM (uintptr_t)seg->s_data) == -1) {
996*13078SJonathan.Adams@Sun.COM mdb_warn("failed to walk segvn_pages (s_data=%p)",
997*13078SJonathan.Adams@Sun.COM seg->s_data);
99811459SJonathan.Adams@Sun.COM }
999*13078SJonathan.Adams@Sun.COM mdb_printf(" %7ldk", (nres * PAGESIZE) / 1024);
100011459SJonathan.Adams@Sun.COM
100111459SJonathan.Adams@Sun.COM if (svn.vp != NULL) {
100211459SJonathan.Adams@Sun.COM char buf[29];
100311459SJonathan.Adams@Sun.COM
100411459SJonathan.Adams@Sun.COM mdb_vnode2path((uintptr_t)svn.vp, buf, sizeof (buf));
100511459SJonathan.Adams@Sun.COM mdb_printf(" %s", buf);
1006*13078SJonathan.Adams@Sun.COM } else {
100711459SJonathan.Adams@Sun.COM mdb_printf(" [ anon ]");
1008*13078SJonathan.Adams@Sun.COM }
1009*13078SJonathan.Adams@Sun.COM } else {
1010*13078SJonathan.Adams@Sun.COM mdb_printf(" %8s [ &%a ]", "?", seg->s_ops);
101111459SJonathan.Adams@Sun.COM }
101211459SJonathan.Adams@Sun.COM
101311459SJonathan.Adams@Sun.COM mdb_printf("\n");
101411459SJonathan.Adams@Sun.COM return (WALK_NEXT);
101511459SJonathan.Adams@Sun.COM }
101611459SJonathan.Adams@Sun.COM
101711459SJonathan.Adams@Sun.COM static int
pmap_walk_seg_quick(uintptr_t addr,const struct seg * seg,uintptr_t segvn)101811459SJonathan.Adams@Sun.COM pmap_walk_seg_quick(uintptr_t addr, const struct seg *seg, uintptr_t segvn)
101911459SJonathan.Adams@Sun.COM {
102011459SJonathan.Adams@Sun.COM mdb_printf("%0?p %0?p %7dk", addr, seg->s_base, seg->s_size / 1024);
102111459SJonathan.Adams@Sun.COM
1022*13078SJonathan.Adams@Sun.COM if (segvn == (uintptr_t)seg->s_ops && seg->s_data != NULL) {
102311459SJonathan.Adams@Sun.COM struct segvn_data svn;
102411459SJonathan.Adams@Sun.COM
1025*13078SJonathan.Adams@Sun.COM svn.vp = NULL;
102611459SJonathan.Adams@Sun.COM (void) mdb_vread(&svn, sizeof (svn), (uintptr_t)seg->s_data);
102711459SJonathan.Adams@Sun.COM
102811459SJonathan.Adams@Sun.COM if (svn.vp != NULL) {
102911459SJonathan.Adams@Sun.COM mdb_printf(" %0?p", svn.vp);
103011459SJonathan.Adams@Sun.COM } else {
103111459SJonathan.Adams@Sun.COM mdb_printf(" [ anon ]");
103211459SJonathan.Adams@Sun.COM }
1033*13078SJonathan.Adams@Sun.COM } else {
1034*13078SJonathan.Adams@Sun.COM mdb_printf(" [ &%a ]", seg->s_ops);
103511459SJonathan.Adams@Sun.COM }
103611459SJonathan.Adams@Sun.COM
103711459SJonathan.Adams@Sun.COM mdb_printf("\n");
103811459SJonathan.Adams@Sun.COM return (WALK_NEXT);
103911459SJonathan.Adams@Sun.COM }
104011459SJonathan.Adams@Sun.COM
104111459SJonathan.Adams@Sun.COM /*ARGSUSED*/
104211459SJonathan.Adams@Sun.COM int
pmap(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)104311459SJonathan.Adams@Sun.COM pmap(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
104411459SJonathan.Adams@Sun.COM {
104511459SJonathan.Adams@Sun.COM uintptr_t segvn;
104611459SJonathan.Adams@Sun.COM proc_t proc;
104711459SJonathan.Adams@Sun.COM uint_t quick = FALSE;
104811459SJonathan.Adams@Sun.COM mdb_walk_cb_t cb = (mdb_walk_cb_t)pmap_walk_seg;
104911459SJonathan.Adams@Sun.COM
105011459SJonathan.Adams@Sun.COM GElf_Sym sym;
105111459SJonathan.Adams@Sun.COM
105211459SJonathan.Adams@Sun.COM if (!(flags & DCMD_ADDRSPEC))
105311459SJonathan.Adams@Sun.COM return (DCMD_USAGE);
105411459SJonathan.Adams@Sun.COM
105511459SJonathan.Adams@Sun.COM if (mdb_getopts(argc, argv,
105611459SJonathan.Adams@Sun.COM 'q', MDB_OPT_SETBITS, TRUE, &quick, NULL) != argc)
105711459SJonathan.Adams@Sun.COM return (DCMD_USAGE);
105811459SJonathan.Adams@Sun.COM
105911459SJonathan.Adams@Sun.COM if (mdb_vread(&proc, sizeof (proc), addr) == -1) {
106011459SJonathan.Adams@Sun.COM mdb_warn("failed to read proc at %p", addr);
106111459SJonathan.Adams@Sun.COM return (DCMD_ERR);
106211459SJonathan.Adams@Sun.COM }
106311459SJonathan.Adams@Sun.COM
106411459SJonathan.Adams@Sun.COM if (mdb_lookup_by_name("segvn_ops", &sym) == 0)
106511459SJonathan.Adams@Sun.COM segvn = (uintptr_t)sym.st_value;
106611459SJonathan.Adams@Sun.COM else
106711459SJonathan.Adams@Sun.COM segvn = NULL;
106811459SJonathan.Adams@Sun.COM
106911459SJonathan.Adams@Sun.COM mdb_printf("%?s %?s %8s ", "SEG", "BASE", "SIZE");
107011459SJonathan.Adams@Sun.COM
107111459SJonathan.Adams@Sun.COM if (quick) {
107211459SJonathan.Adams@Sun.COM mdb_printf("VNODE\n");
107311459SJonathan.Adams@Sun.COM cb = (mdb_walk_cb_t)pmap_walk_seg_quick;
107411459SJonathan.Adams@Sun.COM } else {
107511459SJonathan.Adams@Sun.COM mdb_printf("%8s %s\n", "RES", "PATH");
107611459SJonathan.Adams@Sun.COM }
107711459SJonathan.Adams@Sun.COM
107811459SJonathan.Adams@Sun.COM if (mdb_pwalk("seg", cb, (void *)segvn, (uintptr_t)proc.p_as) == -1) {
107911459SJonathan.Adams@Sun.COM mdb_warn("failed to walk segments of as %p", proc.p_as);
108011459SJonathan.Adams@Sun.COM return (DCMD_ERR);
108111459SJonathan.Adams@Sun.COM }
108211459SJonathan.Adams@Sun.COM
108311459SJonathan.Adams@Sun.COM return (DCMD_OK);
108411459SJonathan.Adams@Sun.COM }
108511459SJonathan.Adams@Sun.COM
108611459SJonathan.Adams@Sun.COM typedef struct anon_walk_data {
108711459SJonathan.Adams@Sun.COM uintptr_t *aw_levone;
108811459SJonathan.Adams@Sun.COM uintptr_t *aw_levtwo;
1089*13078SJonathan.Adams@Sun.COM size_t aw_minslot;
1090*13078SJonathan.Adams@Sun.COM size_t aw_maxslot;
1091*13078SJonathan.Adams@Sun.COM pgcnt_t aw_nlevone;
1092*13078SJonathan.Adams@Sun.COM pgcnt_t aw_levone_ndx;
1093*13078SJonathan.Adams@Sun.COM size_t aw_levtwo_ndx;
1094*13078SJonathan.Adams@Sun.COM struct anon_map *aw_ampp;
109511459SJonathan.Adams@Sun.COM struct anon_map aw_amp;
1096*13078SJonathan.Adams@Sun.COM struct anon_hdr aw_ahp;
1097*13078SJonathan.Adams@Sun.COM int aw_all; /* report all anon pointers, even NULLs */
109811459SJonathan.Adams@Sun.COM } anon_walk_data_t;
109911459SJonathan.Adams@Sun.COM
110011459SJonathan.Adams@Sun.COM int
anon_walk_init_common(mdb_walk_state_t * wsp,ulong_t minslot,ulong_t maxslot)1101*13078SJonathan.Adams@Sun.COM anon_walk_init_common(mdb_walk_state_t *wsp, ulong_t minslot, ulong_t maxslot)
110211459SJonathan.Adams@Sun.COM {
110311459SJonathan.Adams@Sun.COM anon_walk_data_t *aw;
110411459SJonathan.Adams@Sun.COM
110511459SJonathan.Adams@Sun.COM if (wsp->walk_addr == NULL) {
110611459SJonathan.Adams@Sun.COM mdb_warn("anon walk doesn't support global walks\n");
110711459SJonathan.Adams@Sun.COM return (WALK_ERR);
110811459SJonathan.Adams@Sun.COM }
110911459SJonathan.Adams@Sun.COM
111011459SJonathan.Adams@Sun.COM aw = mdb_alloc(sizeof (anon_walk_data_t), UM_SLEEP);
1111*13078SJonathan.Adams@Sun.COM aw->aw_ampp = (struct anon_map *)wsp->walk_addr;
111211459SJonathan.Adams@Sun.COM
111311459SJonathan.Adams@Sun.COM if (mdb_vread(&aw->aw_amp, sizeof (aw->aw_amp), wsp->walk_addr) == -1) {
111411459SJonathan.Adams@Sun.COM mdb_warn("failed to read anon map at %p", wsp->walk_addr);
111511459SJonathan.Adams@Sun.COM mdb_free(aw, sizeof (anon_walk_data_t));
111611459SJonathan.Adams@Sun.COM return (WALK_ERR);
111711459SJonathan.Adams@Sun.COM }
111811459SJonathan.Adams@Sun.COM
111911459SJonathan.Adams@Sun.COM if (mdb_vread(&aw->aw_ahp, sizeof (aw->aw_ahp),
112011459SJonathan.Adams@Sun.COM (uintptr_t)(aw->aw_amp.ahp)) == -1) {
112111459SJonathan.Adams@Sun.COM mdb_warn("failed to read anon hdr ptr at %p", aw->aw_amp.ahp);
112211459SJonathan.Adams@Sun.COM mdb_free(aw, sizeof (anon_walk_data_t));
112311459SJonathan.Adams@Sun.COM return (WALK_ERR);
112411459SJonathan.Adams@Sun.COM }
112511459SJonathan.Adams@Sun.COM
1126*13078SJonathan.Adams@Sun.COM /* update min and maxslot with the given constraints */
1127*13078SJonathan.Adams@Sun.COM maxslot = MIN(maxslot, aw->aw_ahp.size);
1128*13078SJonathan.Adams@Sun.COM minslot = MIN(minslot, maxslot);
1129*13078SJonathan.Adams@Sun.COM
113011459SJonathan.Adams@Sun.COM if (aw->aw_ahp.size <= ANON_CHUNK_SIZE ||
113111459SJonathan.Adams@Sun.COM (aw->aw_ahp.flags & ANON_ALLOC_FORCE)) {
1132*13078SJonathan.Adams@Sun.COM aw->aw_nlevone = maxslot;
1133*13078SJonathan.Adams@Sun.COM aw->aw_levone_ndx = minslot;
113411459SJonathan.Adams@Sun.COM aw->aw_levtwo = NULL;
113511459SJonathan.Adams@Sun.COM } else {
113611459SJonathan.Adams@Sun.COM aw->aw_nlevone =
1137*13078SJonathan.Adams@Sun.COM (maxslot + ANON_CHUNK_OFF) >> ANON_CHUNK_SHIFT;
1138*13078SJonathan.Adams@Sun.COM aw->aw_levone_ndx = 0;
113911459SJonathan.Adams@Sun.COM aw->aw_levtwo =
114011459SJonathan.Adams@Sun.COM mdb_zalloc(ANON_CHUNK_SIZE * sizeof (uintptr_t), UM_SLEEP);
114111459SJonathan.Adams@Sun.COM }
114211459SJonathan.Adams@Sun.COM
114311459SJonathan.Adams@Sun.COM aw->aw_levone =
114411459SJonathan.Adams@Sun.COM mdb_alloc(aw->aw_nlevone * sizeof (uintptr_t), UM_SLEEP);
1145*13078SJonathan.Adams@Sun.COM aw->aw_all = (wsp->walk_arg == ANON_WALK_ALL);
114611459SJonathan.Adams@Sun.COM
114711459SJonathan.Adams@Sun.COM mdb_vread(aw->aw_levone, aw->aw_nlevone * sizeof (uintptr_t),
114811459SJonathan.Adams@Sun.COM (uintptr_t)aw->aw_ahp.array_chunk);
114911459SJonathan.Adams@Sun.COM
1150*13078SJonathan.Adams@Sun.COM aw->aw_levtwo_ndx = 0;
1151*13078SJonathan.Adams@Sun.COM aw->aw_minslot = minslot;
1152*13078SJonathan.Adams@Sun.COM aw->aw_maxslot = maxslot;
115311459SJonathan.Adams@Sun.COM
115411459SJonathan.Adams@Sun.COM out:
115511459SJonathan.Adams@Sun.COM wsp->walk_data = aw;
115611459SJonathan.Adams@Sun.COM return (0);
115711459SJonathan.Adams@Sun.COM }
115811459SJonathan.Adams@Sun.COM
115911459SJonathan.Adams@Sun.COM int
anon_walk_step(mdb_walk_state_t * wsp)116011459SJonathan.Adams@Sun.COM anon_walk_step(mdb_walk_state_t *wsp)
116111459SJonathan.Adams@Sun.COM {
116211459SJonathan.Adams@Sun.COM anon_walk_data_t *aw = (anon_walk_data_t *)wsp->walk_data;
116311459SJonathan.Adams@Sun.COM struct anon anon;
116411459SJonathan.Adams@Sun.COM uintptr_t anonptr;
1165*13078SJonathan.Adams@Sun.COM ulong_t slot;
116611459SJonathan.Adams@Sun.COM
116711459SJonathan.Adams@Sun.COM /*
116811459SJonathan.Adams@Sun.COM * Once we've walked through level one, we're done.
116911459SJonathan.Adams@Sun.COM */
1170*13078SJonathan.Adams@Sun.COM if (aw->aw_levone_ndx >= aw->aw_nlevone) {
117111459SJonathan.Adams@Sun.COM return (WALK_DONE);
1172*13078SJonathan.Adams@Sun.COM }
117311459SJonathan.Adams@Sun.COM
117411459SJonathan.Adams@Sun.COM if (aw->aw_levtwo == NULL) {
117511459SJonathan.Adams@Sun.COM anonptr = aw->aw_levone[aw->aw_levone_ndx];
117611459SJonathan.Adams@Sun.COM aw->aw_levone_ndx++;
117711459SJonathan.Adams@Sun.COM } else {
1178*13078SJonathan.Adams@Sun.COM if (aw->aw_levtwo_ndx == 0) {
1179*13078SJonathan.Adams@Sun.COM uintptr_t levtwoptr;
1180*13078SJonathan.Adams@Sun.COM
1181*13078SJonathan.Adams@Sun.COM /* The first time through, skip to our first index. */
1182*13078SJonathan.Adams@Sun.COM if (aw->aw_levone_ndx == 0) {
1183*13078SJonathan.Adams@Sun.COM aw->aw_levone_ndx =
1184*13078SJonathan.Adams@Sun.COM aw->aw_minslot / ANON_CHUNK_SIZE;
1185*13078SJonathan.Adams@Sun.COM aw->aw_levtwo_ndx =
1186*13078SJonathan.Adams@Sun.COM aw->aw_minslot % ANON_CHUNK_SIZE;
1187*13078SJonathan.Adams@Sun.COM }
1188*13078SJonathan.Adams@Sun.COM
1189*13078SJonathan.Adams@Sun.COM levtwoptr = (uintptr_t)aw->aw_levone[aw->aw_levone_ndx];
1190*13078SJonathan.Adams@Sun.COM
1191*13078SJonathan.Adams@Sun.COM if (levtwoptr == NULL) {
1192*13078SJonathan.Adams@Sun.COM if (!aw->aw_all) {
1193*13078SJonathan.Adams@Sun.COM aw->aw_levtwo_ndx = 0;
1194*13078SJonathan.Adams@Sun.COM aw->aw_levone_ndx++;
1195*13078SJonathan.Adams@Sun.COM return (WALK_NEXT);
1196*13078SJonathan.Adams@Sun.COM }
1197*13078SJonathan.Adams@Sun.COM bzero(aw->aw_levtwo,
1198*13078SJonathan.Adams@Sun.COM ANON_CHUNK_SIZE * sizeof (uintptr_t));
1199*13078SJonathan.Adams@Sun.COM
1200*13078SJonathan.Adams@Sun.COM } else if (mdb_vread(aw->aw_levtwo,
1201*13078SJonathan.Adams@Sun.COM ANON_CHUNK_SIZE * sizeof (uintptr_t), levtwoptr) ==
1202*13078SJonathan.Adams@Sun.COM -1) {
1203*13078SJonathan.Adams@Sun.COM mdb_warn("unable to read anon_map %p's "
1204*13078SJonathan.Adams@Sun.COM "second-level map %d at %p",
1205*13078SJonathan.Adams@Sun.COM aw->aw_ampp, aw->aw_levone_ndx,
1206*13078SJonathan.Adams@Sun.COM levtwoptr);
1207*13078SJonathan.Adams@Sun.COM return (WALK_ERR);
1208*13078SJonathan.Adams@Sun.COM }
1209*13078SJonathan.Adams@Sun.COM }
1210*13078SJonathan.Adams@Sun.COM slot = aw->aw_levone_ndx * ANON_CHUNK_SIZE + aw->aw_levtwo_ndx;
121111459SJonathan.Adams@Sun.COM anonptr = aw->aw_levtwo[aw->aw_levtwo_ndx];
1212*13078SJonathan.Adams@Sun.COM
1213*13078SJonathan.Adams@Sun.COM /* update the indices for next time */
121411459SJonathan.Adams@Sun.COM aw->aw_levtwo_ndx++;
121511459SJonathan.Adams@Sun.COM if (aw->aw_levtwo_ndx == ANON_CHUNK_SIZE) {
121611459SJonathan.Adams@Sun.COM aw->aw_levtwo_ndx = 0;
1217*13078SJonathan.Adams@Sun.COM aw->aw_levone_ndx++;
1218*13078SJonathan.Adams@Sun.COM }
121911459SJonathan.Adams@Sun.COM
1220*13078SJonathan.Adams@Sun.COM /* make sure the slot # is in the requested range */
1221*13078SJonathan.Adams@Sun.COM if (slot >= aw->aw_maxslot) {
1222*13078SJonathan.Adams@Sun.COM return (WALK_DONE);
122311459SJonathan.Adams@Sun.COM }
122411459SJonathan.Adams@Sun.COM }
122511459SJonathan.Adams@Sun.COM
122611459SJonathan.Adams@Sun.COM if (anonptr != NULL) {
122711459SJonathan.Adams@Sun.COM mdb_vread(&anon, sizeof (anon), anonptr);
1228*13078SJonathan.Adams@Sun.COM return (wsp->walk_callback(anonptr, &anon, wsp->walk_cbdata));
1229*13078SJonathan.Adams@Sun.COM }
1230*13078SJonathan.Adams@Sun.COM if (aw->aw_all) {
1231*13078SJonathan.Adams@Sun.COM return (wsp->walk_callback(NULL, NULL, wsp->walk_cbdata));
1232*13078SJonathan.Adams@Sun.COM }
1233*13078SJonathan.Adams@Sun.COM return (WALK_NEXT);
123411459SJonathan.Adams@Sun.COM }
123511459SJonathan.Adams@Sun.COM
123611459SJonathan.Adams@Sun.COM void
anon_walk_fini(mdb_walk_state_t * wsp)123711459SJonathan.Adams@Sun.COM anon_walk_fini(mdb_walk_state_t *wsp)
123811459SJonathan.Adams@Sun.COM {
123911459SJonathan.Adams@Sun.COM anon_walk_data_t *aw = (anon_walk_data_t *)wsp->walk_data;
124011459SJonathan.Adams@Sun.COM
124111459SJonathan.Adams@Sun.COM if (aw->aw_levtwo != NULL)
124211459SJonathan.Adams@Sun.COM mdb_free(aw->aw_levtwo, ANON_CHUNK_SIZE * sizeof (uintptr_t));
124311459SJonathan.Adams@Sun.COM
124411459SJonathan.Adams@Sun.COM mdb_free(aw->aw_levone, aw->aw_nlevone * sizeof (uintptr_t));
124511459SJonathan.Adams@Sun.COM mdb_free(aw, sizeof (anon_walk_data_t));
124611459SJonathan.Adams@Sun.COM }
124711459SJonathan.Adams@Sun.COM
1248*13078SJonathan.Adams@Sun.COM int
anon_walk_init(mdb_walk_state_t * wsp)1249*13078SJonathan.Adams@Sun.COM anon_walk_init(mdb_walk_state_t *wsp)
1250*13078SJonathan.Adams@Sun.COM {
1251*13078SJonathan.Adams@Sun.COM return (anon_walk_init_common(wsp, 0, ULONG_MAX));
1252*13078SJonathan.Adams@Sun.COM }
1253*13078SJonathan.Adams@Sun.COM
1254*13078SJonathan.Adams@Sun.COM int
segvn_anon_walk_init(mdb_walk_state_t * wsp)1255*13078SJonathan.Adams@Sun.COM segvn_anon_walk_init(mdb_walk_state_t *wsp)
1256*13078SJonathan.Adams@Sun.COM {
1257*13078SJonathan.Adams@Sun.COM const uintptr_t svd_addr = wsp->walk_addr;
1258*13078SJonathan.Adams@Sun.COM uintptr_t amp_addr;
1259*13078SJonathan.Adams@Sun.COM uintptr_t seg_addr;
1260*13078SJonathan.Adams@Sun.COM struct segvn_data svd;
1261*13078SJonathan.Adams@Sun.COM struct anon_map amp;
1262*13078SJonathan.Adams@Sun.COM struct seg seg;
1263*13078SJonathan.Adams@Sun.COM
1264*13078SJonathan.Adams@Sun.COM if (svd_addr == NULL) {
1265*13078SJonathan.Adams@Sun.COM mdb_warn("segvn_anon walk doesn't support global walks\n");
1266*13078SJonathan.Adams@Sun.COM return (WALK_ERR);
1267*13078SJonathan.Adams@Sun.COM }
1268*13078SJonathan.Adams@Sun.COM if (mdb_vread(&svd, sizeof (svd), svd_addr) == -1) {
1269*13078SJonathan.Adams@Sun.COM mdb_warn("segvn_anon walk: unable to read segvn_data at %p",
1270*13078SJonathan.Adams@Sun.COM svd_addr);
1271*13078SJonathan.Adams@Sun.COM return (WALK_ERR);
1272*13078SJonathan.Adams@Sun.COM }
1273*13078SJonathan.Adams@Sun.COM if (svd.amp == NULL) {
1274*13078SJonathan.Adams@Sun.COM mdb_warn("segvn_anon walk: segvn_data at %p has no anon map\n",
1275*13078SJonathan.Adams@Sun.COM svd_addr);
1276*13078SJonathan.Adams@Sun.COM return (WALK_ERR);
1277*13078SJonathan.Adams@Sun.COM }
1278*13078SJonathan.Adams@Sun.COM amp_addr = (uintptr_t)svd.amp;
1279*13078SJonathan.Adams@Sun.COM if (mdb_vread(&, sizeof (amp), amp_addr) == -1) {
1280*13078SJonathan.Adams@Sun.COM mdb_warn("segvn_anon walk: unable to read amp %p for "
1281*13078SJonathan.Adams@Sun.COM "segvn_data %p", amp_addr, svd_addr);
1282*13078SJonathan.Adams@Sun.COM return (WALK_ERR);
1283*13078SJonathan.Adams@Sun.COM }
1284*13078SJonathan.Adams@Sun.COM seg_addr = (uintptr_t)svd.seg;
1285*13078SJonathan.Adams@Sun.COM if (mdb_vread(&seg, sizeof (seg), seg_addr) == -1) {
1286*13078SJonathan.Adams@Sun.COM mdb_warn("segvn_anon walk: unable to read seg %p for "
1287*13078SJonathan.Adams@Sun.COM "segvn_data %p", seg_addr, svd_addr);
1288*13078SJonathan.Adams@Sun.COM return (WALK_ERR);
1289*13078SJonathan.Adams@Sun.COM }
1290*13078SJonathan.Adams@Sun.COM if ((seg.s_size + (svd.anon_index << PAGESHIFT)) > amp.size) {
1291*13078SJonathan.Adams@Sun.COM mdb_warn("anon map %p is too small for segment %p\n",
1292*13078SJonathan.Adams@Sun.COM amp_addr, seg_addr);
1293*13078SJonathan.Adams@Sun.COM return (WALK_ERR);
1294*13078SJonathan.Adams@Sun.COM }
1295*13078SJonathan.Adams@Sun.COM
1296*13078SJonathan.Adams@Sun.COM wsp->walk_addr = amp_addr;
1297*13078SJonathan.Adams@Sun.COM return (anon_walk_init_common(wsp,
1298*13078SJonathan.Adams@Sun.COM svd.anon_index, svd.anon_index + (seg.s_size >> PAGESHIFT)));
1299*13078SJonathan.Adams@Sun.COM }
1300*13078SJonathan.Adams@Sun.COM
1301*13078SJonathan.Adams@Sun.COM
1302*13078SJonathan.Adams@Sun.COM typedef struct {
1303*13078SJonathan.Adams@Sun.COM u_offset_t svs_offset;
1304*13078SJonathan.Adams@Sun.COM uintptr_t svs_page;
1305*13078SJonathan.Adams@Sun.COM } segvn_sparse_t;
1306*13078SJonathan.Adams@Sun.COM #define SEGVN_MAX_SPARSE ((128 * 1024) / sizeof (segvn_sparse_t))
1307*13078SJonathan.Adams@Sun.COM
1308*13078SJonathan.Adams@Sun.COM typedef struct {
1309*13078SJonathan.Adams@Sun.COM uintptr_t svw_svdp;
1310*13078SJonathan.Adams@Sun.COM struct segvn_data svw_svd;
1311*13078SJonathan.Adams@Sun.COM struct seg svw_seg;
1312*13078SJonathan.Adams@Sun.COM size_t svw_walkoff;
1313*13078SJonathan.Adams@Sun.COM ulong_t svw_anonskip;
1314*13078SJonathan.Adams@Sun.COM segvn_sparse_t *svw_sparse;
1315*13078SJonathan.Adams@Sun.COM size_t svw_sparse_idx;
1316*13078SJonathan.Adams@Sun.COM size_t svw_sparse_count;
1317*13078SJonathan.Adams@Sun.COM size_t svw_sparse_size;
1318*13078SJonathan.Adams@Sun.COM uint8_t svw_sparse_overflow;
1319*13078SJonathan.Adams@Sun.COM uint8_t svw_all;
1320*13078SJonathan.Adams@Sun.COM } segvn_walk_data_t;
1321*13078SJonathan.Adams@Sun.COM
1322*13078SJonathan.Adams@Sun.COM static int
segvn_sparse_fill(uintptr_t addr,const void * pp_arg,void * arg)1323*13078SJonathan.Adams@Sun.COM segvn_sparse_fill(uintptr_t addr, const void *pp_arg, void *arg)
1324*13078SJonathan.Adams@Sun.COM {
1325*13078SJonathan.Adams@Sun.COM segvn_walk_data_t *const svw = arg;
1326*13078SJonathan.Adams@Sun.COM const page_t *const pp = pp_arg;
1327*13078SJonathan.Adams@Sun.COM const u_offset_t offset = pp->p_offset;
1328*13078SJonathan.Adams@Sun.COM segvn_sparse_t *const cur =
1329*13078SJonathan.Adams@Sun.COM &svw->svw_sparse[svw->svw_sparse_count];
1330*13078SJonathan.Adams@Sun.COM
1331*13078SJonathan.Adams@Sun.COM /* See if the page is of interest */
1332*13078SJonathan.Adams@Sun.COM if ((u_offset_t)(offset - svw->svw_svd.offset) >= svw->svw_seg.s_size) {
1333*13078SJonathan.Adams@Sun.COM return (WALK_NEXT);
1334*13078SJonathan.Adams@Sun.COM }
1335*13078SJonathan.Adams@Sun.COM /* See if we have space for the new entry, then add it. */
1336*13078SJonathan.Adams@Sun.COM if (svw->svw_sparse_count >= svw->svw_sparse_size) {
1337*13078SJonathan.Adams@Sun.COM svw->svw_sparse_overflow = 1;
1338*13078SJonathan.Adams@Sun.COM return (WALK_DONE);
1339*13078SJonathan.Adams@Sun.COM }
1340*13078SJonathan.Adams@Sun.COM svw->svw_sparse_count++;
1341*13078SJonathan.Adams@Sun.COM cur->svs_offset = offset;
1342*13078SJonathan.Adams@Sun.COM cur->svs_page = addr;
1343*13078SJonathan.Adams@Sun.COM return (WALK_NEXT);
1344*13078SJonathan.Adams@Sun.COM }
1345*13078SJonathan.Adams@Sun.COM
1346*13078SJonathan.Adams@Sun.COM static int
segvn_sparse_cmp(const void * lp,const void * rp)1347*13078SJonathan.Adams@Sun.COM segvn_sparse_cmp(const void *lp, const void *rp)
1348*13078SJonathan.Adams@Sun.COM {
1349*13078SJonathan.Adams@Sun.COM const segvn_sparse_t *const l = lp;
1350*13078SJonathan.Adams@Sun.COM const segvn_sparse_t *const r = rp;
1351*13078SJonathan.Adams@Sun.COM
1352*13078SJonathan.Adams@Sun.COM if (l->svs_offset < r->svs_offset) {
1353*13078SJonathan.Adams@Sun.COM return (-1);
1354*13078SJonathan.Adams@Sun.COM }
1355*13078SJonathan.Adams@Sun.COM if (l->svs_offset > r->svs_offset) {
1356*13078SJonathan.Adams@Sun.COM return (1);
1357*13078SJonathan.Adams@Sun.COM }
1358*13078SJonathan.Adams@Sun.COM return (0);
1359*13078SJonathan.Adams@Sun.COM }
1360*13078SJonathan.Adams@Sun.COM
1361*13078SJonathan.Adams@Sun.COM /*
1362*13078SJonathan.Adams@Sun.COM * Builds on the "anon_all" walker to walk all resident pages in a segvn_data
1363*13078SJonathan.Adams@Sun.COM * structure. For segvn_datas without an anon structure, it just looks up
1364*13078SJonathan.Adams@Sun.COM * pages in the vnode. For segvn_datas with an anon structure, NULL slots
1365*13078SJonathan.Adams@Sun.COM * pass through to the vnode, and non-null slots are checked for residency.
1366*13078SJonathan.Adams@Sun.COM */
1367*13078SJonathan.Adams@Sun.COM int
segvn_pages_walk_init(mdb_walk_state_t * wsp)1368*13078SJonathan.Adams@Sun.COM segvn_pages_walk_init(mdb_walk_state_t *wsp)
1369*13078SJonathan.Adams@Sun.COM {
1370*13078SJonathan.Adams@Sun.COM segvn_walk_data_t *svw;
1371*13078SJonathan.Adams@Sun.COM struct segvn_data *svd;
1372*13078SJonathan.Adams@Sun.COM
1373*13078SJonathan.Adams@Sun.COM if (wsp->walk_addr == NULL) {
1374*13078SJonathan.Adams@Sun.COM mdb_warn("segvn walk doesn't support global walks\n");
1375*13078SJonathan.Adams@Sun.COM return (WALK_ERR);
1376*13078SJonathan.Adams@Sun.COM }
1377*13078SJonathan.Adams@Sun.COM
1378*13078SJonathan.Adams@Sun.COM svw = mdb_zalloc(sizeof (*svw), UM_SLEEP);
1379*13078SJonathan.Adams@Sun.COM svw->svw_svdp = wsp->walk_addr;
1380*13078SJonathan.Adams@Sun.COM svw->svw_anonskip = 0;
1381*13078SJonathan.Adams@Sun.COM svw->svw_sparse_idx = 0;
1382*13078SJonathan.Adams@Sun.COM svw->svw_walkoff = 0;
1383*13078SJonathan.Adams@Sun.COM svw->svw_all = (wsp->walk_arg == SEGVN_PAGES_ALL);
1384*13078SJonathan.Adams@Sun.COM
1385*13078SJonathan.Adams@Sun.COM if (mdb_vread(&svw->svw_svd, sizeof (svw->svw_svd), wsp->walk_addr) ==
1386*13078SJonathan.Adams@Sun.COM -1) {
1387*13078SJonathan.Adams@Sun.COM mdb_warn("failed to read segvn_data at %p", wsp->walk_addr);
1388*13078SJonathan.Adams@Sun.COM mdb_free(svw, sizeof (*svw));
1389*13078SJonathan.Adams@Sun.COM return (WALK_ERR);
1390*13078SJonathan.Adams@Sun.COM }
1391*13078SJonathan.Adams@Sun.COM
1392*13078SJonathan.Adams@Sun.COM svd = &svw->svw_svd;
1393*13078SJonathan.Adams@Sun.COM if (mdb_vread(&svw->svw_seg, sizeof (svw->svw_seg),
1394*13078SJonathan.Adams@Sun.COM (uintptr_t)svd->seg) == -1) {
1395*13078SJonathan.Adams@Sun.COM mdb_warn("failed to read seg at %p (from %p)",
1396*13078SJonathan.Adams@Sun.COM svd->seg, &((struct segvn_data *)(wsp->walk_addr))->seg);
1397*13078SJonathan.Adams@Sun.COM mdb_free(svw, sizeof (*svw));
1398*13078SJonathan.Adams@Sun.COM return (WALK_ERR);
1399*13078SJonathan.Adams@Sun.COM }
1400*13078SJonathan.Adams@Sun.COM
1401*13078SJonathan.Adams@Sun.COM if (svd->amp == NULL && svd->vp == NULL) {
1402*13078SJonathan.Adams@Sun.COM /* make the walk terminate immediately; no pages */
1403*13078SJonathan.Adams@Sun.COM svw->svw_walkoff = svw->svw_seg.s_size;
1404*13078SJonathan.Adams@Sun.COM
1405*13078SJonathan.Adams@Sun.COM } else if (svd->amp == NULL &&
1406*13078SJonathan.Adams@Sun.COM (svw->svw_seg.s_size >> PAGESHIFT) >= SEGVN_MAX_SPARSE) {
1407*13078SJonathan.Adams@Sun.COM /*
1408*13078SJonathan.Adams@Sun.COM * If we don't have an anon pointer, and the segment is large,
1409*13078SJonathan.Adams@Sun.COM * we try to load the in-memory pages into a fixed-size array,
1410*13078SJonathan.Adams@Sun.COM * which is then sorted and reported directly. This is much
1411*13078SJonathan.Adams@Sun.COM * faster than doing a mdb_page_lookup() for each possible
1412*13078SJonathan.Adams@Sun.COM * offset.
1413*13078SJonathan.Adams@Sun.COM *
1414*13078SJonathan.Adams@Sun.COM * If the allocation fails, or there are too many pages
1415*13078SJonathan.Adams@Sun.COM * in-core, we fall back to looking up the pages individually.
1416*13078SJonathan.Adams@Sun.COM */
1417*13078SJonathan.Adams@Sun.COM svw->svw_sparse = mdb_alloc(
1418*13078SJonathan.Adams@Sun.COM SEGVN_MAX_SPARSE * sizeof (*svw->svw_sparse), UM_NOSLEEP);
1419*13078SJonathan.Adams@Sun.COM if (svw->svw_sparse != NULL) {
1420*13078SJonathan.Adams@Sun.COM svw->svw_sparse_size = SEGVN_MAX_SPARSE;
1421*13078SJonathan.Adams@Sun.COM
1422*13078SJonathan.Adams@Sun.COM if (mdb_pwalk("page", segvn_sparse_fill, svw,
1423*13078SJonathan.Adams@Sun.COM (uintptr_t)svd->vp) == -1 ||
1424*13078SJonathan.Adams@Sun.COM svw->svw_sparse_overflow) {
1425*13078SJonathan.Adams@Sun.COM mdb_free(svw->svw_sparse, SEGVN_MAX_SPARSE *
1426*13078SJonathan.Adams@Sun.COM sizeof (*svw->svw_sparse));
1427*13078SJonathan.Adams@Sun.COM svw->svw_sparse = NULL;
1428*13078SJonathan.Adams@Sun.COM } else {
1429*13078SJonathan.Adams@Sun.COM qsort(svw->svw_sparse, svw->svw_sparse_count,
1430*13078SJonathan.Adams@Sun.COM sizeof (*svw->svw_sparse),
1431*13078SJonathan.Adams@Sun.COM segvn_sparse_cmp);
1432*13078SJonathan.Adams@Sun.COM }
1433*13078SJonathan.Adams@Sun.COM }
1434*13078SJonathan.Adams@Sun.COM
1435*13078SJonathan.Adams@Sun.COM } else if (svd->amp != NULL) {
1436*13078SJonathan.Adams@Sun.COM const char *const layer = (!svw->svw_all && svd->vp == NULL) ?
1437*13078SJonathan.Adams@Sun.COM "segvn_anon" : "segvn_anon_all";
1438*13078SJonathan.Adams@Sun.COM /*
1439*13078SJonathan.Adams@Sun.COM * If we're not printing all offsets, and the segvn_data has
1440*13078SJonathan.Adams@Sun.COM * no backing VP, we can use the "segvn_anon" walker, which
1441*13078SJonathan.Adams@Sun.COM * efficiently skips NULL slots.
1442*13078SJonathan.Adams@Sun.COM *
1443*13078SJonathan.Adams@Sun.COM * Otherwise, we layer over the "segvn_anon_all" walker
1444*13078SJonathan.Adams@Sun.COM * (which reports all anon slots, even NULL ones), so that
1445*13078SJonathan.Adams@Sun.COM * segvn_pages_walk_step() knows the precise offset for each
1446*13078SJonathan.Adams@Sun.COM * element. It uses that offset information to look up the
1447*13078SJonathan.Adams@Sun.COM * backing pages for NULL anon slots.
1448*13078SJonathan.Adams@Sun.COM */
1449*13078SJonathan.Adams@Sun.COM if (mdb_layered_walk(layer, wsp) == -1) {
1450*13078SJonathan.Adams@Sun.COM mdb_warn("segvn_pages: failed to layer \"%s\" "
1451*13078SJonathan.Adams@Sun.COM "for segvn_data %p", layer, svw->svw_svdp);
1452*13078SJonathan.Adams@Sun.COM mdb_free(svw, sizeof (*svw));
1453*13078SJonathan.Adams@Sun.COM return (WALK_ERR);
1454*13078SJonathan.Adams@Sun.COM }
1455*13078SJonathan.Adams@Sun.COM }
1456*13078SJonathan.Adams@Sun.COM
1457*13078SJonathan.Adams@Sun.COM wsp->walk_data = svw;
1458*13078SJonathan.Adams@Sun.COM return (WALK_NEXT);
1459*13078SJonathan.Adams@Sun.COM }
1460*13078SJonathan.Adams@Sun.COM
1461*13078SJonathan.Adams@Sun.COM int
segvn_pages_walk_step(mdb_walk_state_t * wsp)1462*13078SJonathan.Adams@Sun.COM segvn_pages_walk_step(mdb_walk_state_t *wsp)
1463*13078SJonathan.Adams@Sun.COM {
1464*13078SJonathan.Adams@Sun.COM segvn_walk_data_t *const svw = wsp->walk_data;
1465*13078SJonathan.Adams@Sun.COM struct seg *const seg = &svw->svw_seg;
1466*13078SJonathan.Adams@Sun.COM struct segvn_data *const svd = &svw->svw_svd;
1467*13078SJonathan.Adams@Sun.COM uintptr_t pp;
1468*13078SJonathan.Adams@Sun.COM page_t page;
1469*13078SJonathan.Adams@Sun.COM
1470*13078SJonathan.Adams@Sun.COM /* If we've walked off the end of the segment, we're done. */
1471*13078SJonathan.Adams@Sun.COM if (svw->svw_walkoff >= seg->s_size) {
1472*13078SJonathan.Adams@Sun.COM return (WALK_DONE);
1473*13078SJonathan.Adams@Sun.COM }
1474*13078SJonathan.Adams@Sun.COM
1475*13078SJonathan.Adams@Sun.COM /*
1476*13078SJonathan.Adams@Sun.COM * If we've got a sparse page array, just send it directly.
1477*13078SJonathan.Adams@Sun.COM */
1478*13078SJonathan.Adams@Sun.COM if (svw->svw_sparse != NULL) {
1479*13078SJonathan.Adams@Sun.COM u_offset_t off;
1480*13078SJonathan.Adams@Sun.COM
1481*13078SJonathan.Adams@Sun.COM if (svw->svw_sparse_idx >= svw->svw_sparse_count) {
1482*13078SJonathan.Adams@Sun.COM pp = NULL;
1483*13078SJonathan.Adams@Sun.COM if (!svw->svw_all) {
1484*13078SJonathan.Adams@Sun.COM return (WALK_DONE);
1485*13078SJonathan.Adams@Sun.COM }
1486*13078SJonathan.Adams@Sun.COM } else {
1487*13078SJonathan.Adams@Sun.COM segvn_sparse_t *const svs =
1488*13078SJonathan.Adams@Sun.COM &svw->svw_sparse[svw->svw_sparse_idx];
1489*13078SJonathan.Adams@Sun.COM off = svs->svs_offset - svd->offset;
1490*13078SJonathan.Adams@Sun.COM if (svw->svw_all && svw->svw_walkoff != off) {
1491*13078SJonathan.Adams@Sun.COM pp = NULL;
1492*13078SJonathan.Adams@Sun.COM } else {
1493*13078SJonathan.Adams@Sun.COM pp = svs->svs_page;
1494*13078SJonathan.Adams@Sun.COM svw->svw_sparse_idx++;
1495*13078SJonathan.Adams@Sun.COM }
1496*13078SJonathan.Adams@Sun.COM }
1497*13078SJonathan.Adams@Sun.COM
1498*13078SJonathan.Adams@Sun.COM } else if (svd->amp == NULL || wsp->walk_addr == NULL) {
1499*13078SJonathan.Adams@Sun.COM /*
1500*13078SJonathan.Adams@Sun.COM * If there's no anon, or the anon slot is NULL, look up
1501*13078SJonathan.Adams@Sun.COM * <vp, offset>.
1502*13078SJonathan.Adams@Sun.COM */
1503*13078SJonathan.Adams@Sun.COM if (svd->vp != NULL) {
1504*13078SJonathan.Adams@Sun.COM pp = mdb_page_lookup((uintptr_t)svd->vp,
1505*13078SJonathan.Adams@Sun.COM svd->offset + svw->svw_walkoff);
1506*13078SJonathan.Adams@Sun.COM } else {
1507*13078SJonathan.Adams@Sun.COM pp = NULL;
1508*13078SJonathan.Adams@Sun.COM }
1509*13078SJonathan.Adams@Sun.COM
1510*13078SJonathan.Adams@Sun.COM } else {
1511*13078SJonathan.Adams@Sun.COM const struct anon *const anon = wsp->walk_layer;
1512*13078SJonathan.Adams@Sun.COM
1513*13078SJonathan.Adams@Sun.COM /*
1514*13078SJonathan.Adams@Sun.COM * We have a "struct anon"; if it's not swapped out,
1515*13078SJonathan.Adams@Sun.COM * look up the page.
1516*13078SJonathan.Adams@Sun.COM */
1517*13078SJonathan.Adams@Sun.COM if (anon->an_vp != NULL || anon->an_off != 0) {
1518*13078SJonathan.Adams@Sun.COM pp = mdb_page_lookup((uintptr_t)anon->an_vp,
1519*13078SJonathan.Adams@Sun.COM anon->an_off);
1520*13078SJonathan.Adams@Sun.COM if (pp == 0 && mdb_get_state() != MDB_STATE_RUNNING) {
1521*13078SJonathan.Adams@Sun.COM mdb_warn("walk segvn_pages: segvn_data %p "
1522*13078SJonathan.Adams@Sun.COM "offset %ld, anon page <%p, %llx> not "
1523*13078SJonathan.Adams@Sun.COM "found.\n", svw->svw_svdp, svw->svw_walkoff,
1524*13078SJonathan.Adams@Sun.COM anon->an_vp, anon->an_off);
1525*13078SJonathan.Adams@Sun.COM }
1526*13078SJonathan.Adams@Sun.COM } else {
1527*13078SJonathan.Adams@Sun.COM if (anon->an_pvp == NULL) {
1528*13078SJonathan.Adams@Sun.COM mdb_warn("walk segvn_pages: useless struct "
1529*13078SJonathan.Adams@Sun.COM "anon at %p\n", wsp->walk_addr);
1530*13078SJonathan.Adams@Sun.COM }
1531*13078SJonathan.Adams@Sun.COM pp = NULL; /* nothing at this offset */
1532*13078SJonathan.Adams@Sun.COM }
1533*13078SJonathan.Adams@Sun.COM }
1534*13078SJonathan.Adams@Sun.COM
1535*13078SJonathan.Adams@Sun.COM svw->svw_walkoff += PAGESIZE; /* Update for the next call */
1536*13078SJonathan.Adams@Sun.COM if (pp != NULL) {
1537*13078SJonathan.Adams@Sun.COM if (mdb_vread(&page, sizeof (page_t), pp) == -1) {
1538*13078SJonathan.Adams@Sun.COM mdb_warn("unable to read page_t at %#lx", pp);
1539*13078SJonathan.Adams@Sun.COM return (WALK_ERR);
1540*13078SJonathan.Adams@Sun.COM }
1541*13078SJonathan.Adams@Sun.COM return (wsp->walk_callback(pp, &page, wsp->walk_cbdata));
1542*13078SJonathan.Adams@Sun.COM }
1543*13078SJonathan.Adams@Sun.COM if (svw->svw_all) {
1544*13078SJonathan.Adams@Sun.COM return (wsp->walk_callback(NULL, NULL, wsp->walk_cbdata));
1545*13078SJonathan.Adams@Sun.COM }
1546*13078SJonathan.Adams@Sun.COM return (WALK_NEXT);
1547*13078SJonathan.Adams@Sun.COM }
1548*13078SJonathan.Adams@Sun.COM
1549*13078SJonathan.Adams@Sun.COM void
segvn_pages_walk_fini(mdb_walk_state_t * wsp)1550*13078SJonathan.Adams@Sun.COM segvn_pages_walk_fini(mdb_walk_state_t *wsp)
1551*13078SJonathan.Adams@Sun.COM {
1552*13078SJonathan.Adams@Sun.COM segvn_walk_data_t *const svw = wsp->walk_data;
1553*13078SJonathan.Adams@Sun.COM
1554*13078SJonathan.Adams@Sun.COM if (svw->svw_sparse != NULL) {
1555*13078SJonathan.Adams@Sun.COM mdb_free(svw->svw_sparse, SEGVN_MAX_SPARSE *
1556*13078SJonathan.Adams@Sun.COM sizeof (*svw->svw_sparse));
1557*13078SJonathan.Adams@Sun.COM }
1558*13078SJonathan.Adams@Sun.COM mdb_free(svw, sizeof (*svw));
1559*13078SJonathan.Adams@Sun.COM }
1560*13078SJonathan.Adams@Sun.COM
156111459SJonathan.Adams@Sun.COM /*
156211459SJonathan.Adams@Sun.COM * Grumble, grumble.
156311459SJonathan.Adams@Sun.COM */
156411459SJonathan.Adams@Sun.COM #define SMAP_HASHFUNC(vp, off) \
156511459SJonathan.Adams@Sun.COM ((((uintptr_t)(vp) >> 6) + ((uintptr_t)(vp) >> 3) + \
156611459SJonathan.Adams@Sun.COM ((off) >> MAXBSHIFT)) & smd_hashmsk)
156711459SJonathan.Adams@Sun.COM
156811459SJonathan.Adams@Sun.COM int
vnode2smap(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)156911459SJonathan.Adams@Sun.COM vnode2smap(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
157011459SJonathan.Adams@Sun.COM {
157111459SJonathan.Adams@Sun.COM long smd_hashmsk;
157211459SJonathan.Adams@Sun.COM int hash;
157311459SJonathan.Adams@Sun.COM uintptr_t offset = 0;
157411459SJonathan.Adams@Sun.COM struct smap smp;
157511459SJonathan.Adams@Sun.COM uintptr_t saddr, kaddr;
157611459SJonathan.Adams@Sun.COM uintptr_t smd_hash, smd_smap;
157711459SJonathan.Adams@Sun.COM struct seg seg;
157811459SJonathan.Adams@Sun.COM
157911459SJonathan.Adams@Sun.COM if (!(flags & DCMD_ADDRSPEC))
158011459SJonathan.Adams@Sun.COM return (DCMD_USAGE);
158111459SJonathan.Adams@Sun.COM
158211459SJonathan.Adams@Sun.COM if (mdb_readvar(&smd_hashmsk, "smd_hashmsk") == -1) {
158311459SJonathan.Adams@Sun.COM mdb_warn("failed to read smd_hashmsk");
158411459SJonathan.Adams@Sun.COM return (DCMD_ERR);
158511459SJonathan.Adams@Sun.COM }
158611459SJonathan.Adams@Sun.COM
158711459SJonathan.Adams@Sun.COM if (mdb_readvar(&smd_hash, "smd_hash") == -1) {
158811459SJonathan.Adams@Sun.COM mdb_warn("failed to read smd_hash");
158911459SJonathan.Adams@Sun.COM return (DCMD_ERR);
159011459SJonathan.Adams@Sun.COM }
159111459SJonathan.Adams@Sun.COM
159211459SJonathan.Adams@Sun.COM if (mdb_readvar(&smd_smap, "smd_smap") == -1) {
159311459SJonathan.Adams@Sun.COM mdb_warn("failed to read smd_hash");
159411459SJonathan.Adams@Sun.COM return (DCMD_ERR);
159511459SJonathan.Adams@Sun.COM }
159611459SJonathan.Adams@Sun.COM
159711459SJonathan.Adams@Sun.COM if (mdb_readvar(&kaddr, "segkmap") == -1) {
159811459SJonathan.Adams@Sun.COM mdb_warn("failed to read segkmap");
159911459SJonathan.Adams@Sun.COM return (DCMD_ERR);
160011459SJonathan.Adams@Sun.COM }
160111459SJonathan.Adams@Sun.COM
160211459SJonathan.Adams@Sun.COM if (mdb_vread(&seg, sizeof (seg), kaddr) == -1) {
160311459SJonathan.Adams@Sun.COM mdb_warn("failed to read segkmap at %p", kaddr);
160411459SJonathan.Adams@Sun.COM return (DCMD_ERR);
160511459SJonathan.Adams@Sun.COM }
160611459SJonathan.Adams@Sun.COM
160711459SJonathan.Adams@Sun.COM if (argc != 0) {
160811459SJonathan.Adams@Sun.COM const mdb_arg_t *arg = &argv[0];
160911459SJonathan.Adams@Sun.COM
161011459SJonathan.Adams@Sun.COM if (arg->a_type == MDB_TYPE_IMMEDIATE)
161111459SJonathan.Adams@Sun.COM offset = arg->a_un.a_val;
161211459SJonathan.Adams@Sun.COM else
161311459SJonathan.Adams@Sun.COM offset = (uintptr_t)mdb_strtoull(arg->a_un.a_str);
161411459SJonathan.Adams@Sun.COM }
161511459SJonathan.Adams@Sun.COM
161611459SJonathan.Adams@Sun.COM hash = SMAP_HASHFUNC(addr, offset);
161711459SJonathan.Adams@Sun.COM
161811459SJonathan.Adams@Sun.COM if (mdb_vread(&saddr, sizeof (saddr),
161911459SJonathan.Adams@Sun.COM smd_hash + hash * sizeof (uintptr_t)) == -1) {
162011459SJonathan.Adams@Sun.COM mdb_warn("couldn't read smap at %p",
162111459SJonathan.Adams@Sun.COM smd_hash + hash * sizeof (uintptr_t));
162211459SJonathan.Adams@Sun.COM return (DCMD_ERR);
162311459SJonathan.Adams@Sun.COM }
162411459SJonathan.Adams@Sun.COM
162511459SJonathan.Adams@Sun.COM do {
162611459SJonathan.Adams@Sun.COM if (mdb_vread(&smp, sizeof (smp), saddr) == -1) {
162711459SJonathan.Adams@Sun.COM mdb_warn("couldn't read smap at %p", saddr);
162811459SJonathan.Adams@Sun.COM return (DCMD_ERR);
162911459SJonathan.Adams@Sun.COM }
163011459SJonathan.Adams@Sun.COM
163111459SJonathan.Adams@Sun.COM if ((uintptr_t)smp.sm_vp == addr && smp.sm_off == offset) {
163211459SJonathan.Adams@Sun.COM mdb_printf("vnode %p, offs %p is smap %p, vaddr %p\n",
163311459SJonathan.Adams@Sun.COM addr, offset, saddr, ((saddr - smd_smap) /
163411459SJonathan.Adams@Sun.COM sizeof (smp)) * MAXBSIZE + seg.s_base);
163511459SJonathan.Adams@Sun.COM return (DCMD_OK);
163611459SJonathan.Adams@Sun.COM }
163711459SJonathan.Adams@Sun.COM
163811459SJonathan.Adams@Sun.COM saddr = (uintptr_t)smp.sm_hash;
163911459SJonathan.Adams@Sun.COM } while (saddr != NULL);
164011459SJonathan.Adams@Sun.COM
164111459SJonathan.Adams@Sun.COM mdb_printf("no smap for vnode %p, offs %p\n", addr, offset);
164211459SJonathan.Adams@Sun.COM return (DCMD_OK);
164311459SJonathan.Adams@Sun.COM }
164411459SJonathan.Adams@Sun.COM
164511459SJonathan.Adams@Sun.COM /*ARGSUSED*/
164611459SJonathan.Adams@Sun.COM int
addr2smap(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)164711459SJonathan.Adams@Sun.COM addr2smap(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
164811459SJonathan.Adams@Sun.COM {
164911459SJonathan.Adams@Sun.COM uintptr_t kaddr;
165011459SJonathan.Adams@Sun.COM struct seg seg;
165111459SJonathan.Adams@Sun.COM struct segmap_data sd;
165211459SJonathan.Adams@Sun.COM
165311459SJonathan.Adams@Sun.COM if (!(flags & DCMD_ADDRSPEC))
165411459SJonathan.Adams@Sun.COM return (DCMD_USAGE);
165511459SJonathan.Adams@Sun.COM
165611459SJonathan.Adams@Sun.COM if (mdb_readvar(&kaddr, "segkmap") == -1) {
165711459SJonathan.Adams@Sun.COM mdb_warn("failed to read segkmap");
165811459SJonathan.Adams@Sun.COM return (DCMD_ERR);
165911459SJonathan.Adams@Sun.COM }
166011459SJonathan.Adams@Sun.COM
166111459SJonathan.Adams@Sun.COM if (mdb_vread(&seg, sizeof (seg), kaddr) == -1) {
166211459SJonathan.Adams@Sun.COM mdb_warn("failed to read segkmap at %p", kaddr);
166311459SJonathan.Adams@Sun.COM return (DCMD_ERR);
166411459SJonathan.Adams@Sun.COM }
166511459SJonathan.Adams@Sun.COM
166611459SJonathan.Adams@Sun.COM if (mdb_vread(&sd, sizeof (sd), (uintptr_t)seg.s_data) == -1) {
166711459SJonathan.Adams@Sun.COM mdb_warn("failed to read segmap_data at %p", seg.s_data);
166811459SJonathan.Adams@Sun.COM return (DCMD_ERR);
166911459SJonathan.Adams@Sun.COM }
167011459SJonathan.Adams@Sun.COM
167111459SJonathan.Adams@Sun.COM mdb_printf("%p is smap %p\n", addr,
167211459SJonathan.Adams@Sun.COM ((addr - (uintptr_t)seg.s_base) >> MAXBSHIFT) *
167311459SJonathan.Adams@Sun.COM sizeof (struct smap) + (uintptr_t)sd.smd_sm);
167411459SJonathan.Adams@Sun.COM
167511459SJonathan.Adams@Sun.COM return (DCMD_OK);
167611459SJonathan.Adams@Sun.COM }
1677