xref: /onnv-gate/usr/src/uts/common/vm/hat_refmod.c (revision 7874:0c7adbb7ddc0)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
53085Strevtom  * Common Development and Distribution License (the "License").
63085Strevtom  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
22*7874SMichael.Corcoran@Sun.COM  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate /*
270Sstevel@tonic-gate  * The following routines implement the hat layer's
280Sstevel@tonic-gate  * recording of the referenced and modified bits.
290Sstevel@tonic-gate  */
300Sstevel@tonic-gate 
310Sstevel@tonic-gate #include <sys/types.h>
320Sstevel@tonic-gate #include <sys/param.h>
330Sstevel@tonic-gate #include <sys/systm.h>
340Sstevel@tonic-gate #include <sys/debug.h>
350Sstevel@tonic-gate #include <sys/kmem.h>
360Sstevel@tonic-gate 
370Sstevel@tonic-gate /*
380Sstevel@tonic-gate  * Note, usage of cmn_err requires you not hold any hat layer locks.
390Sstevel@tonic-gate  */
400Sstevel@tonic-gate #include <sys/cmn_err.h>
410Sstevel@tonic-gate 
420Sstevel@tonic-gate #include <vm/as.h>
430Sstevel@tonic-gate #include <vm/hat.h>
440Sstevel@tonic-gate 
450Sstevel@tonic-gate kmutex_t hat_statlock;		/* protects all hat statistics data */
460Sstevel@tonic-gate struct hrmstat *hrm_memlist;	/* tracks memory alloced for hrm_blist blocks */
470Sstevel@tonic-gate struct hrmstat **hrm_hashtab;	/* hash table for finding blocks quickly */
480Sstevel@tonic-gate struct hrmstat *hrm_blist;
490Sstevel@tonic-gate int hrm_blist_incr = HRM_BLIST_INCR;
500Sstevel@tonic-gate int hrm_blist_lowater = HRM_BLIST_INCR/2;
510Sstevel@tonic-gate int hrm_blist_num = 0;
520Sstevel@tonic-gate int hrm_blist_total = 0;
530Sstevel@tonic-gate int hrm_mlockinited = 0;
540Sstevel@tonic-gate int hrm_allocfailmsg = 0;	/* print a message when allocations fail */
550Sstevel@tonic-gate int hrm_allocfail = 0;
560Sstevel@tonic-gate 
570Sstevel@tonic-gate static struct hrmstat	*hrm_balloc(void);
580Sstevel@tonic-gate static void	hrm_link(struct hrmstat *);
590Sstevel@tonic-gate static void	hrm_setbits(struct hrmstat *, caddr_t, uint_t);
600Sstevel@tonic-gate static void	hrm_hashout(struct hrmstat *);
610Sstevel@tonic-gate static void	hrm_getblk(int);
620Sstevel@tonic-gate 
630Sstevel@tonic-gate #define	hrm_hash(as, addr) \
640Sstevel@tonic-gate 	(HRM_HASHMASK & \
650Sstevel@tonic-gate 	(((uintptr_t)(addr) >> HRM_BASESHIFT) ^ ((uintptr_t)(as) >> 2)))
660Sstevel@tonic-gate 
670Sstevel@tonic-gate #define	hrm_match(hrm, as, addr) \
680Sstevel@tonic-gate 	(((hrm)->hrm_as == (as) && \
690Sstevel@tonic-gate 	((hrm)->hrm_base == ((uintptr_t)(addr) & HRM_BASEMASK))) ? 1 : 0)
700Sstevel@tonic-gate 
710Sstevel@tonic-gate /*
72*7874SMichael.Corcoran@Sun.COM  * Called when an address space maps in more pages while stats are being
73*7874SMichael.Corcoran@Sun.COM  * collected.
740Sstevel@tonic-gate  */
750Sstevel@tonic-gate /* ARGSUSED */
760Sstevel@tonic-gate void
hat_resvstat(size_t chunk,struct as * as,caddr_t addr)770Sstevel@tonic-gate hat_resvstat(size_t chunk, struct as *as, caddr_t addr)
780Sstevel@tonic-gate {
790Sstevel@tonic-gate }
800Sstevel@tonic-gate 
810Sstevel@tonic-gate /*
820Sstevel@tonic-gate  * Start the statistics gathering for an address space.
830Sstevel@tonic-gate  * Return -1 if we can't do it, otherwise return an opaque
840Sstevel@tonic-gate  * identifier to be used when querying for the gathered statistics.
850Sstevel@tonic-gate  * The identifier is an unused bit in a_vbits.
860Sstevel@tonic-gate  * Bit 0 is reserved for swsmon.
870Sstevel@tonic-gate  */
880Sstevel@tonic-gate int
hat_startstat(struct as * as)890Sstevel@tonic-gate hat_startstat(struct as *as)
900Sstevel@tonic-gate {
910Sstevel@tonic-gate 	uint_t nbits;		/* number of bits */
920Sstevel@tonic-gate 	uint_t bn;		/* bit number */
930Sstevel@tonic-gate 	uint_t id;		/* new vbit, identifier */
940Sstevel@tonic-gate 	uint_t vbits;		/* used vbits of address space */
950Sstevel@tonic-gate 	size_t chunk;		/* mapped size for stats */
960Sstevel@tonic-gate 
970Sstevel@tonic-gate 	/*
980Sstevel@tonic-gate 	 * If the refmod saving memory allocator runs out, print
990Sstevel@tonic-gate 	 * a warning message about how to fix it, see comment at
1000Sstevel@tonic-gate 	 * the beginning of hat_setstat.
1010Sstevel@tonic-gate 	 */
1020Sstevel@tonic-gate 	if (hrm_allocfailmsg) {
1030Sstevel@tonic-gate 		cmn_err(CE_WARN,
1040Sstevel@tonic-gate 		    "hrm_balloc failures occured, increase hrm_blist_incr");
1050Sstevel@tonic-gate 		hrm_allocfailmsg = 0;
1060Sstevel@tonic-gate 	}
1070Sstevel@tonic-gate 
1080Sstevel@tonic-gate 	/*
1090Sstevel@tonic-gate 	 * Verify that a buffer of statistics blocks exists
1100Sstevel@tonic-gate 	 * and allocate more, if needed.
1110Sstevel@tonic-gate 	 */
1120Sstevel@tonic-gate 
1130Sstevel@tonic-gate 	chunk = hat_get_mapped_size(as->a_hat);
1140Sstevel@tonic-gate 	chunk = (btop(chunk)/HRM_PAGES);
1150Sstevel@tonic-gate 	if (chunk < HRM_BLIST_INCR)
1160Sstevel@tonic-gate 		chunk = 0;
1170Sstevel@tonic-gate 
1180Sstevel@tonic-gate 	hrm_getblk((int)chunk);
1190Sstevel@tonic-gate 
1200Sstevel@tonic-gate 	/*
1210Sstevel@tonic-gate 	 * Find a unused id in the given address space.
1220Sstevel@tonic-gate 	 */
1230Sstevel@tonic-gate 	hat_enter(as->a_hat);
1240Sstevel@tonic-gate 	vbits = as->a_vbits;
1250Sstevel@tonic-gate 	nbits = sizeof (as->a_vbits) * NBBY;
1260Sstevel@tonic-gate 	for (bn = 1, id = 2; bn < (nbits - 1); bn++, id <<= 1)
1270Sstevel@tonic-gate 		if ((id & vbits) == 0)
1280Sstevel@tonic-gate 			break;
1290Sstevel@tonic-gate 	if (bn >= (nbits - 1)) {
1300Sstevel@tonic-gate 		hat_exit(as->a_hat);
1310Sstevel@tonic-gate 		return (-1);
1320Sstevel@tonic-gate 	}
1330Sstevel@tonic-gate 	as->a_vbits |= id;
1340Sstevel@tonic-gate 	hat_exit(as->a_hat);
1350Sstevel@tonic-gate 	(void) hat_stats_enable(as->a_hat);
1360Sstevel@tonic-gate 	return (id);
1370Sstevel@tonic-gate }
1380Sstevel@tonic-gate 
1390Sstevel@tonic-gate /*
1400Sstevel@tonic-gate  * Record referenced and modified information for an address space.
1410Sstevel@tonic-gate  * Rmbits is a word containing the referenced bit in bit position 1
1420Sstevel@tonic-gate  * and the modified bit in bit position 0.
1430Sstevel@tonic-gate  *
1440Sstevel@tonic-gate  * For current informational uses, one can rerun any program using
1450Sstevel@tonic-gate  * this facility after modifying the hrm_blist_incr to be a larger
1460Sstevel@tonic-gate  * amount so that a larger buffer of blocks will be maintained.
1470Sstevel@tonic-gate  */
1480Sstevel@tonic-gate void
hat_setstat(struct as * as,caddr_t addr,size_t len,uint_t rmbits)1490Sstevel@tonic-gate hat_setstat(struct as *as, caddr_t addr, size_t len, uint_t rmbits)
1500Sstevel@tonic-gate {
1510Sstevel@tonic-gate 	struct hrmstat	*hrm;
1520Sstevel@tonic-gate 	uint_t		vbits, newbits, nb;
1530Sstevel@tonic-gate 	int		h;
1540Sstevel@tonic-gate 
1550Sstevel@tonic-gate 	ASSERT(len == PAGESIZE);
1560Sstevel@tonic-gate 	ASSERT((rmbits & ~(P_MOD|P_REF)) == 0);
1570Sstevel@tonic-gate 
1580Sstevel@tonic-gate 	if (rmbits == 0)
1590Sstevel@tonic-gate 		return;
1600Sstevel@tonic-gate 
1610Sstevel@tonic-gate 	mutex_enter(&hat_statlock);
1620Sstevel@tonic-gate 
1630Sstevel@tonic-gate 	/*
1640Sstevel@tonic-gate 	 * Search the hash list for the as and addr we are looking for
1650Sstevel@tonic-gate 	 * and set the ref and mod bits in every block that matches.
1660Sstevel@tonic-gate 	 */
1670Sstevel@tonic-gate 	vbits = 0;
1680Sstevel@tonic-gate 	h = hrm_hash(as, addr);
1690Sstevel@tonic-gate 	for (hrm = hrm_hashtab[h]; hrm; hrm = hrm->hrm_hnext) {
1700Sstevel@tonic-gate 		if (hrm_match(hrm, as, addr)) {
1710Sstevel@tonic-gate 			hrm_setbits(hrm, addr, rmbits);
1720Sstevel@tonic-gate 			vbits |= hrm->hrm_id;
1730Sstevel@tonic-gate 		}
1740Sstevel@tonic-gate 	}
1750Sstevel@tonic-gate 
1760Sstevel@tonic-gate 	/*
1770Sstevel@tonic-gate 	 * If we didn't find a block for all of the enabled
1780Sstevel@tonic-gate 	 * vpages bits, then allocate and initialize a block
1790Sstevel@tonic-gate 	 * for each bit that was not found.
1800Sstevel@tonic-gate 	 */
1810Sstevel@tonic-gate 	if (vbits != as->a_vbits) {
1823258Strevtom 		newbits = (vbits ^ as->a_vbits) & as->a_vbits;
1830Sstevel@tonic-gate 		while (newbits) {
1840Sstevel@tonic-gate 			if (ffs(newbits))
1850Sstevel@tonic-gate 				nb = 1 << (ffs(newbits)-1);
1860Sstevel@tonic-gate 			hrm = (struct hrmstat *)hrm_balloc();
1870Sstevel@tonic-gate 			if (hrm == NULL) {
1880Sstevel@tonic-gate 				hrm_allocfailmsg = 1;
1890Sstevel@tonic-gate 				hrm_allocfail++;
1900Sstevel@tonic-gate 				mutex_exit(&hat_statlock);
1910Sstevel@tonic-gate 				return;
1920Sstevel@tonic-gate 			}
1930Sstevel@tonic-gate 			hrm->hrm_as = as;
1940Sstevel@tonic-gate 			hrm->hrm_base = (uintptr_t)addr & HRM_BASEMASK;
1950Sstevel@tonic-gate 			hrm->hrm_id = nb;
1960Sstevel@tonic-gate 			hrm_link(hrm);
1970Sstevel@tonic-gate 			hrm_setbits(hrm, addr, rmbits);
1980Sstevel@tonic-gate 			newbits &= ~nb;
1990Sstevel@tonic-gate 		}
2000Sstevel@tonic-gate 	}
2010Sstevel@tonic-gate 	mutex_exit(&hat_statlock);
2020Sstevel@tonic-gate }
2030Sstevel@tonic-gate 
2040Sstevel@tonic-gate /*
2050Sstevel@tonic-gate  * Free the resources used to maintain the referenced and modified
2060Sstevel@tonic-gate  * statistics for the virtual page view of an address space
2070Sstevel@tonic-gate  * identified by id.
2080Sstevel@tonic-gate  */
2090Sstevel@tonic-gate void
hat_freestat(struct as * as,int id)2100Sstevel@tonic-gate hat_freestat(struct as *as, int id)
2110Sstevel@tonic-gate {
2123258Strevtom 	struct hrmstat *hrm;
2133258Strevtom 	struct hrmstat *prev_ahrm;
2143258Strevtom 	struct hrmstat *hrm_tmplist;
2153258Strevtom 	struct hrmstat *hrm_next;
2160Sstevel@tonic-gate 
2170Sstevel@tonic-gate 	hat_stats_disable(as->a_hat);	/* tell the hat layer to stop */
2180Sstevel@tonic-gate 	hat_enter(as->a_hat);
2190Sstevel@tonic-gate 	if (id == 0)
2200Sstevel@tonic-gate 		as->a_vbits = 0;
2210Sstevel@tonic-gate 	else
2220Sstevel@tonic-gate 		as->a_vbits &= ~id;
2230Sstevel@tonic-gate 
2240Sstevel@tonic-gate 	if ((hrm = as->a_hrm) == NULL) {
2250Sstevel@tonic-gate 		hat_exit(as->a_hat);
2260Sstevel@tonic-gate 		return;
2270Sstevel@tonic-gate 	}
2280Sstevel@tonic-gate 	hat_exit(as->a_hat);
2290Sstevel@tonic-gate 
2300Sstevel@tonic-gate 	mutex_enter(&hat_statlock);
2313258Strevtom 
2320Sstevel@tonic-gate 	for (prev_ahrm = NULL; hrm; hrm = hrm->hrm_anext) {
2330Sstevel@tonic-gate 		if ((id == hrm->hrm_id) || (id == NULL)) {
2340Sstevel@tonic-gate 
2350Sstevel@tonic-gate 			hrm_hashout(hrm);
2360Sstevel@tonic-gate 			hrm->hrm_hnext = hrm_blist;
2370Sstevel@tonic-gate 			hrm_blist = hrm;
2380Sstevel@tonic-gate 			hrm_blist_num++;
2390Sstevel@tonic-gate 
2400Sstevel@tonic-gate 			if (prev_ahrm == NULL)
2410Sstevel@tonic-gate 				as->a_hrm = hrm->hrm_anext;
2420Sstevel@tonic-gate 			else
2430Sstevel@tonic-gate 				prev_ahrm->hrm_anext = hrm->hrm_anext;
2440Sstevel@tonic-gate 
2450Sstevel@tonic-gate 		} else
2460Sstevel@tonic-gate 			prev_ahrm = hrm;
2470Sstevel@tonic-gate 	}
2480Sstevel@tonic-gate 
2490Sstevel@tonic-gate 	/*
2500Sstevel@tonic-gate 	 * If all statistics blocks are free,
2510Sstevel@tonic-gate 	 * return the memory to the system.
2520Sstevel@tonic-gate 	 */
2530Sstevel@tonic-gate 	if (hrm_blist_num == hrm_blist_total) {
2540Sstevel@tonic-gate 		/* zero the block list since we are giving back its memory */
2550Sstevel@tonic-gate 		hrm_blist = NULL;
2560Sstevel@tonic-gate 		hrm_blist_num = 0;
2570Sstevel@tonic-gate 		hrm_blist_total = 0;
2583258Strevtom 		hrm_tmplist = hrm_memlist;
2593085Strevtom 		hrm_memlist = NULL;
2603085Strevtom 	} else {
2613258Strevtom 		hrm_tmplist = NULL;
2623085Strevtom 	}
2633085Strevtom 
2643085Strevtom 	mutex_exit(&hat_statlock);
2653085Strevtom 
2663258Strevtom 	/*
2673258Strevtom 	 * If there are any hrmstat structures to be freed, this must only
2683258Strevtom 	 * be done after we've released hat_statlock.
2693258Strevtom 	 */
2703258Strevtom 	while (hrm_tmplist != NULL) {
2713258Strevtom 		hrm_next = hrm_tmplist->hrm_hnext;
2723258Strevtom 		kmem_free(hrm_tmplist, hrm_tmplist->hrm_base);
2733258Strevtom 		hrm_tmplist = hrm_next;
2740Sstevel@tonic-gate 	}
2750Sstevel@tonic-gate }
2760Sstevel@tonic-gate 
2770Sstevel@tonic-gate /*
2780Sstevel@tonic-gate  * Grab memory for statistics gathering of the hat layer.
2790Sstevel@tonic-gate  */
2800Sstevel@tonic-gate static void
hrm_getblk(int chunk)2810Sstevel@tonic-gate hrm_getblk(int chunk)
2820Sstevel@tonic-gate {
2830Sstevel@tonic-gate 	struct hrmstat *hrm, *l;
2840Sstevel@tonic-gate 	int i;
2850Sstevel@tonic-gate 	int hrm_incr;
2860Sstevel@tonic-gate 
2870Sstevel@tonic-gate 	mutex_enter(&hat_statlock);
288*7874SMichael.Corcoran@Sun.COM 	/*
289*7874SMichael.Corcoran@Sun.COM 	 * XXX The whole private freelist management here really should be
290*7874SMichael.Corcoran@Sun.COM 	 * overhauled.
291*7874SMichael.Corcoran@Sun.COM 	 *
292*7874SMichael.Corcoran@Sun.COM 	 * The freelist should have some knowledge of how much memory is
293*7874SMichael.Corcoran@Sun.COM 	 * needed by a process and thus when hat_resvstat get's called, we can
294*7874SMichael.Corcoran@Sun.COM 	 * increment the freelist needs for that process within this subsystem.
295*7874SMichael.Corcoran@Sun.COM 	 * Thus there will be reservations for all processes which are being
296*7874SMichael.Corcoran@Sun.COM 	 * watched which should be accurate, and consume less memory overall.
297*7874SMichael.Corcoran@Sun.COM 	 *
298*7874SMichael.Corcoran@Sun.COM 	 * For now, just make sure there's enough entries on the freelist to
299*7874SMichael.Corcoran@Sun.COM 	 * handle the current chunk.
300*7874SMichael.Corcoran@Sun.COM 	 */
3010Sstevel@tonic-gate 	if ((hrm_blist == NULL) ||
3020Sstevel@tonic-gate 	    (hrm_blist_num <= hrm_blist_lowater) ||
303*7874SMichael.Corcoran@Sun.COM 	    (chunk && (hrm_blist_num < chunk + hrm_blist_incr))) {
3040Sstevel@tonic-gate 		mutex_exit(&hat_statlock);
3050Sstevel@tonic-gate 
306*7874SMichael.Corcoran@Sun.COM 		hrm_incr = chunk  + hrm_blist_incr;
3070Sstevel@tonic-gate 		hrm = kmem_zalloc(sizeof (struct hrmstat) * hrm_incr, KM_SLEEP);
3080Sstevel@tonic-gate 		hrm->hrm_base = sizeof (struct hrmstat) * hrm_incr;
3090Sstevel@tonic-gate 
3100Sstevel@tonic-gate 		/*
3110Sstevel@tonic-gate 		 * thread the allocated blocks onto a freelist
3120Sstevel@tonic-gate 		 * using the first block to hold information for
3130Sstevel@tonic-gate 		 * freeing them all later
3140Sstevel@tonic-gate 		 */
3150Sstevel@tonic-gate 		mutex_enter(&hat_statlock);
3160Sstevel@tonic-gate 		hrm->hrm_hnext = hrm_memlist;
3170Sstevel@tonic-gate 		hrm_memlist = hrm;
3180Sstevel@tonic-gate 
3190Sstevel@tonic-gate 		hrm_blist_total += (hrm_incr - 1);
3200Sstevel@tonic-gate 		for (i = 1; i < hrm_incr; i++) {
3210Sstevel@tonic-gate 			l = &hrm[i];
3220Sstevel@tonic-gate 			l->hrm_hnext = hrm_blist;
3230Sstevel@tonic-gate 			hrm_blist = l;
3240Sstevel@tonic-gate 			hrm_blist_num++;
3250Sstevel@tonic-gate 		}
3260Sstevel@tonic-gate 	}
3270Sstevel@tonic-gate 	mutex_exit(&hat_statlock);
3280Sstevel@tonic-gate }
3290Sstevel@tonic-gate 
3300Sstevel@tonic-gate static void
hrm_hashin(struct hrmstat * hrm)3310Sstevel@tonic-gate hrm_hashin(struct hrmstat *hrm)
3320Sstevel@tonic-gate {
3330Sstevel@tonic-gate 	int 		h;
3340Sstevel@tonic-gate 
3350Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&hat_statlock));
3360Sstevel@tonic-gate 	h = hrm_hash(hrm->hrm_as, hrm->hrm_base);
3370Sstevel@tonic-gate 
3380Sstevel@tonic-gate 	hrm->hrm_hnext = hrm_hashtab[h];
3390Sstevel@tonic-gate 	hrm_hashtab[h] = hrm;
3400Sstevel@tonic-gate }
3410Sstevel@tonic-gate 
3420Sstevel@tonic-gate static void
hrm_hashout(struct hrmstat * hrm)3430Sstevel@tonic-gate hrm_hashout(struct hrmstat *hrm)
3440Sstevel@tonic-gate {
3450Sstevel@tonic-gate 	struct hrmstat	*list, **prev_hrm;
3460Sstevel@tonic-gate 	int		h;
3470Sstevel@tonic-gate 
3480Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&hat_statlock));
3490Sstevel@tonic-gate 	h = hrm_hash(hrm->hrm_as, hrm->hrm_base);
3500Sstevel@tonic-gate 	list = hrm_hashtab[h];
3510Sstevel@tonic-gate 	prev_hrm = &hrm_hashtab[h];
3520Sstevel@tonic-gate 
3530Sstevel@tonic-gate 	while (list) {
3540Sstevel@tonic-gate 		if (list == hrm) {
3550Sstevel@tonic-gate 			*prev_hrm = list->hrm_hnext;
3560Sstevel@tonic-gate 			return;
3570Sstevel@tonic-gate 		}
3580Sstevel@tonic-gate 		prev_hrm = &list->hrm_hnext;
3590Sstevel@tonic-gate 		list = list->hrm_hnext;
3600Sstevel@tonic-gate 	}
3610Sstevel@tonic-gate }
3620Sstevel@tonic-gate 
3630Sstevel@tonic-gate 
3640Sstevel@tonic-gate /*
3650Sstevel@tonic-gate  * Link a statistic block into an address space and also put it
3660Sstevel@tonic-gate  * on the hash list for future references.
3670Sstevel@tonic-gate  */
3680Sstevel@tonic-gate static void
hrm_link(struct hrmstat * hrm)3690Sstevel@tonic-gate hrm_link(struct hrmstat *hrm)
3700Sstevel@tonic-gate {
3710Sstevel@tonic-gate 	struct as *as = hrm->hrm_as;
3720Sstevel@tonic-gate 
3730Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&hat_statlock));
3740Sstevel@tonic-gate 	hrm->hrm_anext = as->a_hrm;
3750Sstevel@tonic-gate 	as->a_hrm = hrm;
3760Sstevel@tonic-gate 	hrm_hashin(hrm);
3770Sstevel@tonic-gate }
3780Sstevel@tonic-gate 
3790Sstevel@tonic-gate /*
3800Sstevel@tonic-gate  * Allocate a block for statistics keeping.
3810Sstevel@tonic-gate  * Returns NULL if blocks are unavailable.
3820Sstevel@tonic-gate  */
3830Sstevel@tonic-gate static struct hrmstat *
hrm_balloc(void)3840Sstevel@tonic-gate hrm_balloc(void)
3850Sstevel@tonic-gate {
3860Sstevel@tonic-gate 	struct hrmstat *hrm;
3870Sstevel@tonic-gate 
3880Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&hat_statlock));
3890Sstevel@tonic-gate 
3900Sstevel@tonic-gate 	hrm = hrm_blist;
3910Sstevel@tonic-gate 	if (hrm != NULL) {
3920Sstevel@tonic-gate 		hrm_blist = hrm->hrm_hnext;
3930Sstevel@tonic-gate 		hrm_blist_num--;
3940Sstevel@tonic-gate 		hrm->hrm_hnext = NULL;
3950Sstevel@tonic-gate 	}
3960Sstevel@tonic-gate 	return (hrm);
3970Sstevel@tonic-gate }
3980Sstevel@tonic-gate 
3990Sstevel@tonic-gate /*
4000Sstevel@tonic-gate  * Set the ref and mod bits for addr within statistics block hrm.
4010Sstevel@tonic-gate  */
4020Sstevel@tonic-gate static void
hrm_setbits(struct hrmstat * hrm,caddr_t addr,uint_t bits)4030Sstevel@tonic-gate hrm_setbits(struct hrmstat *hrm, caddr_t addr, uint_t bits)
4040Sstevel@tonic-gate {
4050Sstevel@tonic-gate 	uint_t po, bo, spb;
4060Sstevel@tonic-gate 	uint_t nbits;
4070Sstevel@tonic-gate 
4080Sstevel@tonic-gate 	po = ((uintptr_t)addr & HRM_BASEOFFSET) >> MMU_PAGESHIFT; /* pg off */
4090Sstevel@tonic-gate 	bo = po / (NBBY / 2);			/* which byte in bit array */
4100Sstevel@tonic-gate 	spb = (3 - (po & 3)) * 2;		/* shift position within byte */
4110Sstevel@tonic-gate 	nbits = bits << spb;			/* bit mask */
4120Sstevel@tonic-gate 	hrm->hrm_bits[bo] |= nbits;
4130Sstevel@tonic-gate }
4140Sstevel@tonic-gate 
4150Sstevel@tonic-gate /*
4160Sstevel@tonic-gate  * Return collected statistics about an address space.
4170Sstevel@tonic-gate  * If clearflag is set, atomically read and zero the bits.
4180Sstevel@tonic-gate  *
4190Sstevel@tonic-gate  * Fill in the data array supplied with the referenced and
4200Sstevel@tonic-gate  * modified bits collected for address range [addr ... addr + len]
4210Sstevel@tonic-gate  * in address space, as, uniquely identified by id.
4220Sstevel@tonic-gate  * The destination is a byte array.  We fill in three bits per byte:
4230Sstevel@tonic-gate  * referenced, modified, and hwmapped bits.
4240Sstevel@tonic-gate  * Kernel only interface, can't fault on destination data array.
4250Sstevel@tonic-gate  *
4260Sstevel@tonic-gate  */
4270Sstevel@tonic-gate void
hat_getstat(struct as * as,caddr_t addr,size_t len,uint_t id,caddr_t datap,int clearflag)4280Sstevel@tonic-gate hat_getstat(struct as *as, caddr_t addr, size_t len, uint_t id,
4290Sstevel@tonic-gate     caddr_t datap, int clearflag)
4300Sstevel@tonic-gate {
4310Sstevel@tonic-gate 	size_t	np;		/* number of pages */
4320Sstevel@tonic-gate 	caddr_t	a;
4330Sstevel@tonic-gate 	char 	*dp;
4340Sstevel@tonic-gate 
4350Sstevel@tonic-gate 	np = btop(len);
4360Sstevel@tonic-gate 	bzero(datap, np);
4370Sstevel@tonic-gate 
438*7874SMichael.Corcoran@Sun.COM 	/* allocate enough statistics blocks to cover the len passed in */
439*7874SMichael.Corcoran@Sun.COM 	hrm_getblk(np / HRM_PAGES);
440*7874SMichael.Corcoran@Sun.COM 
4410Sstevel@tonic-gate 	hat_sync(as->a_hat, addr, len, clearflag);
4420Sstevel@tonic-gate 
4430Sstevel@tonic-gate 	/* allocate more statistics blocks if needed */
4440Sstevel@tonic-gate 	hrm_getblk(0);
4450Sstevel@tonic-gate 
4460Sstevel@tonic-gate 	mutex_enter(&hat_statlock);
4470Sstevel@tonic-gate 	if (hrm_hashtab == NULL) {
4480Sstevel@tonic-gate 		/* can happen when victim process exits */
4490Sstevel@tonic-gate 		mutex_exit(&hat_statlock);
4500Sstevel@tonic-gate 		return;
4510Sstevel@tonic-gate 	}
4520Sstevel@tonic-gate 	dp = datap;
4530Sstevel@tonic-gate 	a = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
4540Sstevel@tonic-gate 	while (a < addr + len) {
4550Sstevel@tonic-gate 		struct hrmstat	*hrm;
4560Sstevel@tonic-gate 		size_t	n;		/* number of pages, temp */
4570Sstevel@tonic-gate 		int	h;		/* hash index */
4580Sstevel@tonic-gate 		uint_t	po;
4590Sstevel@tonic-gate 
4600Sstevel@tonic-gate 		h = hrm_hash(as, a);
4610Sstevel@tonic-gate 		n = (HRM_PAGES -
462*7874SMichael.Corcoran@Sun.COM 		    (((uintptr_t)a & HRM_PAGEMASK) >> MMU_PAGESHIFT));
4630Sstevel@tonic-gate 		if (n > np)
4640Sstevel@tonic-gate 			n = np;
4650Sstevel@tonic-gate 		po = ((uintptr_t)a & HRM_BASEOFFSET) >> MMU_PAGESHIFT;
4660Sstevel@tonic-gate 
4670Sstevel@tonic-gate 		for (hrm = hrm_hashtab[h]; hrm; hrm = hrm->hrm_hnext) {
4680Sstevel@tonic-gate 			if (hrm->hrm_as == as &&
4690Sstevel@tonic-gate 			    hrm->hrm_base == ((uintptr_t)a & HRM_BASEMASK) &&
4700Sstevel@tonic-gate 			    id == hrm->hrm_id) {
4710Sstevel@tonic-gate 				int i, nr;
4720Sstevel@tonic-gate 				uint_t bo, spb;
4730Sstevel@tonic-gate 
4740Sstevel@tonic-gate 				/*
4750Sstevel@tonic-gate 				 * Extract leading unaligned bits.
4760Sstevel@tonic-gate 				 */
4770Sstevel@tonic-gate 				i = 0;
4780Sstevel@tonic-gate 				while (i < n && (po & 3)) {
4790Sstevel@tonic-gate 					bo = po / (NBBY / 2);
4800Sstevel@tonic-gate 					spb = (3 - (po & 3)) * 2;
4810Sstevel@tonic-gate 					*dp++ |= (hrm->hrm_bits[bo] >> spb) & 3;
4820Sstevel@tonic-gate 					if (clearflag)
4830Sstevel@tonic-gate 						hrm->hrm_bits[bo] &= ~(3<<spb);
4840Sstevel@tonic-gate 					po++;
4850Sstevel@tonic-gate 					i++;
4860Sstevel@tonic-gate 				}
4870Sstevel@tonic-gate 				/*
4880Sstevel@tonic-gate 				 * Extract aligned bits.
4890Sstevel@tonic-gate 				 */
4900Sstevel@tonic-gate 				nr = n/4*4;
4910Sstevel@tonic-gate 				bo = po / (NBBY / 2);
4920Sstevel@tonic-gate 				while (i < nr) {
4930Sstevel@tonic-gate 					int bits = hrm->hrm_bits[bo];
4940Sstevel@tonic-gate 					*dp++ |= (bits >> 6) & 3;
4950Sstevel@tonic-gate 					*dp++ |= (bits >> 4) & 3;
4960Sstevel@tonic-gate 					*dp++ |= (bits >> 2) & 3;
4970Sstevel@tonic-gate 					*dp++ |= (bits >> 0) & 3;
4980Sstevel@tonic-gate 					if (clearflag)
4990Sstevel@tonic-gate 						hrm->hrm_bits[bo] = 0;
5000Sstevel@tonic-gate 					bo++;
5010Sstevel@tonic-gate 					po += 4;
5020Sstevel@tonic-gate 					i += 4;
5030Sstevel@tonic-gate 				}
5040Sstevel@tonic-gate 				/*
5050Sstevel@tonic-gate 				 * Extract trailing unaligned bits.
5060Sstevel@tonic-gate 				 */
5070Sstevel@tonic-gate 				while (i < n) {
5080Sstevel@tonic-gate 					bo = po / (NBBY / 2);
5090Sstevel@tonic-gate 					spb = (3 - (po & 3)) * 2;
5100Sstevel@tonic-gate 					*dp++ |= (hrm->hrm_bits[bo] >> spb) & 3;
5110Sstevel@tonic-gate 					if (clearflag)
5120Sstevel@tonic-gate 						hrm->hrm_bits[bo] &= ~(3<<spb);
5130Sstevel@tonic-gate 					po++;
5140Sstevel@tonic-gate 					i++;
5150Sstevel@tonic-gate 				}
5160Sstevel@tonic-gate 
5170Sstevel@tonic-gate 				break;
5180Sstevel@tonic-gate 			}
5190Sstevel@tonic-gate 		}
5200Sstevel@tonic-gate 		if (hrm == NULL)
5210Sstevel@tonic-gate 			dp += n;
5220Sstevel@tonic-gate 		np -= n;
5230Sstevel@tonic-gate 		a += n * MMU_PAGESIZE;
5240Sstevel@tonic-gate 	}
5250Sstevel@tonic-gate 	mutex_exit(&hat_statlock);
5260Sstevel@tonic-gate }
527