xref: /onnv-gate/usr/src/uts/i86pc/vm/htable.c (revision 3446:5903aece022d)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51747Sjosephb  * Common Development and Distribution License (the "License").
61747Sjosephb  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
21*3446Smrj 
220Sstevel@tonic-gate /*
23*3446Smrj  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
290Sstevel@tonic-gate #include <sys/types.h>
300Sstevel@tonic-gate #include <sys/sysmacros.h>
310Sstevel@tonic-gate #include <sys/kmem.h>
320Sstevel@tonic-gate #include <sys/atomic.h>
330Sstevel@tonic-gate #include <sys/bitmap.h>
340Sstevel@tonic-gate #include <sys/machparam.h>
350Sstevel@tonic-gate #include <sys/machsystm.h>
360Sstevel@tonic-gate #include <sys/mman.h>
370Sstevel@tonic-gate #include <sys/systm.h>
380Sstevel@tonic-gate #include <sys/cpuvar.h>
390Sstevel@tonic-gate #include <sys/thread.h>
400Sstevel@tonic-gate #include <sys/proc.h>
410Sstevel@tonic-gate #include <sys/cpu.h>
420Sstevel@tonic-gate #include <sys/kmem.h>
430Sstevel@tonic-gate #include <sys/disp.h>
440Sstevel@tonic-gate #include <sys/vmem.h>
450Sstevel@tonic-gate #include <sys/vmsystm.h>
460Sstevel@tonic-gate #include <sys/promif.h>
470Sstevel@tonic-gate #include <sys/var.h>
480Sstevel@tonic-gate #include <sys/x86_archext.h>
49*3446Smrj #include <sys/archsystm.h>
500Sstevel@tonic-gate #include <sys/bootconf.h>
510Sstevel@tonic-gate #include <sys/dumphdr.h>
520Sstevel@tonic-gate #include <vm/seg_kmem.h>
530Sstevel@tonic-gate #include <vm/seg_kpm.h>
540Sstevel@tonic-gate #include <vm/hat.h>
550Sstevel@tonic-gate #include <vm/hat_i86.h>
560Sstevel@tonic-gate #include <sys/cmn_err.h>
570Sstevel@tonic-gate 
58*3446Smrj #include <sys/bootinfo.h>
59*3446Smrj #include <vm/kboot_mmu.h>
60*3446Smrj 
61*3446Smrj static void x86pte_zero(htable_t *dest, uint_t entry, uint_t count);
62*3446Smrj 
630Sstevel@tonic-gate kmem_cache_t *htable_cache;
640Sstevel@tonic-gate 
650Sstevel@tonic-gate /*
660Sstevel@tonic-gate  * The variable htable_reserve_amount, rather than HTABLE_RESERVE_AMOUNT,
670Sstevel@tonic-gate  * is used in order to facilitate testing of the htable_steal() code.
680Sstevel@tonic-gate  * By resetting htable_reserve_amount to a lower value, we can force
690Sstevel@tonic-gate  * stealing to occur.  The reserve amount is a guess to get us through boot.
700Sstevel@tonic-gate  */
710Sstevel@tonic-gate #define	HTABLE_RESERVE_AMOUNT	(200)
720Sstevel@tonic-gate uint_t htable_reserve_amount = HTABLE_RESERVE_AMOUNT;
730Sstevel@tonic-gate kmutex_t htable_reserve_mutex;
740Sstevel@tonic-gate uint_t htable_reserve_cnt;
750Sstevel@tonic-gate htable_t *htable_reserve_pool;
760Sstevel@tonic-gate 
770Sstevel@tonic-gate /*
781747Sjosephb  * Used to hand test htable_steal().
790Sstevel@tonic-gate  */
801747Sjosephb #ifdef DEBUG
811747Sjosephb ulong_t force_steal = 0;
821747Sjosephb ulong_t ptable_cnt = 0;
831747Sjosephb #endif
841747Sjosephb 
851747Sjosephb /*
861747Sjosephb  * This variable is so that we can tune this via /etc/system
871747Sjosephb  * Any value works, but a power of two <= mmu.ptes_per_table is best.
881747Sjosephb  */
891747Sjosephb uint_t htable_steal_passes = 8;
900Sstevel@tonic-gate 
910Sstevel@tonic-gate /*
920Sstevel@tonic-gate  * mutex stuff for access to htable hash
930Sstevel@tonic-gate  */
940Sstevel@tonic-gate #define	NUM_HTABLE_MUTEX 128
950Sstevel@tonic-gate kmutex_t htable_mutex[NUM_HTABLE_MUTEX];
960Sstevel@tonic-gate #define	HTABLE_MUTEX_HASH(h) ((h) & (NUM_HTABLE_MUTEX - 1))
970Sstevel@tonic-gate 
980Sstevel@tonic-gate #define	HTABLE_ENTER(h)	mutex_enter(&htable_mutex[HTABLE_MUTEX_HASH(h)]);
990Sstevel@tonic-gate #define	HTABLE_EXIT(h)	mutex_exit(&htable_mutex[HTABLE_MUTEX_HASH(h)]);
1000Sstevel@tonic-gate 
1010Sstevel@tonic-gate /*
1020Sstevel@tonic-gate  * forward declarations
1030Sstevel@tonic-gate  */
1040Sstevel@tonic-gate static void link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr);
1050Sstevel@tonic-gate static void unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr);
1060Sstevel@tonic-gate static void htable_free(htable_t *ht);
107*3446Smrj static x86pte_t *x86pte_access_pagetable(htable_t *ht, uint_t index);
1080Sstevel@tonic-gate static void x86pte_release_pagetable(htable_t *ht);
1090Sstevel@tonic-gate static x86pte_t x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old,
1100Sstevel@tonic-gate 	x86pte_t new);
1110Sstevel@tonic-gate 
1120Sstevel@tonic-gate /*
1130Sstevel@tonic-gate  * A counter to track if we are stealing or reaping htables. When non-zero
1140Sstevel@tonic-gate  * htable_free() will directly free htables (either to the reserve or kmem)
1150Sstevel@tonic-gate  * instead of putting them in a hat's htable cache.
1160Sstevel@tonic-gate  */
1170Sstevel@tonic-gate uint32_t htable_dont_cache = 0;
1180Sstevel@tonic-gate 
1190Sstevel@tonic-gate /*
1200Sstevel@tonic-gate  * Track the number of active pagetables, so we can know how many to reap
1210Sstevel@tonic-gate  */
1220Sstevel@tonic-gate static uint32_t active_ptables = 0;
1230Sstevel@tonic-gate 
1240Sstevel@tonic-gate /*
1250Sstevel@tonic-gate  * Allocate a memory page for a hardware page table.
1260Sstevel@tonic-gate  *
127*3446Smrj  * A wrapper around page_get_physical(), with some extra checks.
1280Sstevel@tonic-gate  */
129*3446Smrj static pfn_t
130*3446Smrj ptable_alloc(uintptr_t seed)
1310Sstevel@tonic-gate {
1320Sstevel@tonic-gate 	pfn_t pfn;
1330Sstevel@tonic-gate 	page_t *pp;
1340Sstevel@tonic-gate 
135*3446Smrj 	pfn = PFN_INVALID;
1360Sstevel@tonic-gate 	atomic_add_32(&active_ptables, 1);
1370Sstevel@tonic-gate 
138*3446Smrj 	/*
139*3446Smrj 	 * The first check is to see if there is memory in the system. If we
140*3446Smrj 	 * drop to throttlefree, then fail the ptable_alloc() and let the
141*3446Smrj 	 * stealing code kick in. Note that we have to do this test here,
142*3446Smrj 	 * since the test in page_create_throttle() would let the NOSLEEP
143*3446Smrj 	 * allocation go through and deplete the page reserves.
144*3446Smrj 	 *
145*3446Smrj 	 * The !NOMEMWAIT() lets pageout, fsflush, etc. skip this check.
146*3446Smrj 	 */
147*3446Smrj 	if (!NOMEMWAIT() && freemem <= throttlefree + 1)
148*3446Smrj 		return (PFN_INVALID);
1490Sstevel@tonic-gate 
1501747Sjosephb #ifdef DEBUG
151*3446Smrj 	/*
152*3446Smrj 	 * This code makes htable_steal() easier to test. By setting
153*3446Smrj 	 * force_steal we force pagetable allocations to fall
154*3446Smrj 	 * into the stealing code. Roughly 1 in ever "force_steal"
155*3446Smrj 	 * page table allocations will fail.
156*3446Smrj 	 */
157*3446Smrj 	if (proc_pageout != NULL && force_steal > 1 &&
158*3446Smrj 	    ++ptable_cnt > force_steal) {
159*3446Smrj 		ptable_cnt = 0;
160*3446Smrj 		return (PFN_INVALID);
161*3446Smrj 	}
1621747Sjosephb #endif /* DEBUG */
1631747Sjosephb 
164*3446Smrj 	pp = page_get_physical(seed);
165*3446Smrj 	if (pp == NULL)
166*3446Smrj 		return (PFN_INVALID);
167*3446Smrj 	pfn = pp->p_pagenum;
1680Sstevel@tonic-gate 	page_downgrade(pp);
1690Sstevel@tonic-gate 	ASSERT(PAGE_SHARED(pp));
1700Sstevel@tonic-gate 
1710Sstevel@tonic-gate 	if (pfn == PFN_INVALID)
1720Sstevel@tonic-gate 		panic("ptable_alloc(): Invalid PFN!!");
1731747Sjosephb 	HATSTAT_INC(hs_ptable_allocs);
174*3446Smrj 	return (pfn);
1750Sstevel@tonic-gate }
1760Sstevel@tonic-gate 
1770Sstevel@tonic-gate /*
1780Sstevel@tonic-gate  * Free an htable's associated page table page.  See the comments
1790Sstevel@tonic-gate  * for ptable_alloc().
1800Sstevel@tonic-gate  */
1810Sstevel@tonic-gate static void
182*3446Smrj ptable_free(pfn_t pfn)
1830Sstevel@tonic-gate {
184*3446Smrj 	page_t *pp = page_numtopp_nolock(pfn);
1850Sstevel@tonic-gate 
1860Sstevel@tonic-gate 	/*
1870Sstevel@tonic-gate 	 * need to destroy the page used for the pagetable
1880Sstevel@tonic-gate 	 */
1890Sstevel@tonic-gate 	ASSERT(pfn != PFN_INVALID);
1900Sstevel@tonic-gate 	HATSTAT_INC(hs_ptable_frees);
1910Sstevel@tonic-gate 	atomic_add_32(&active_ptables, -1);
1920Sstevel@tonic-gate 	if (pp == NULL)
1930Sstevel@tonic-gate 		panic("ptable_free(): no page for pfn!");
1940Sstevel@tonic-gate 	ASSERT(PAGE_SHARED(pp));
1950Sstevel@tonic-gate 	ASSERT(pfn == pp->p_pagenum);
1960Sstevel@tonic-gate 
1970Sstevel@tonic-gate 	/*
1980Sstevel@tonic-gate 	 * Get an exclusive lock, might have to wait for a kmem reader.
1990Sstevel@tonic-gate 	 */
2000Sstevel@tonic-gate 	if (!page_tryupgrade(pp)) {
2010Sstevel@tonic-gate 		page_unlock(pp);
2020Sstevel@tonic-gate 		/*
2030Sstevel@tonic-gate 		 * RFE: we could change this to not loop forever
2040Sstevel@tonic-gate 		 * George Cameron had some idea on how to do that.
2050Sstevel@tonic-gate 		 * For now looping works - it's just like sfmmu.
2060Sstevel@tonic-gate 		 */
2070Sstevel@tonic-gate 		while (!page_lock(pp, SE_EXCL, (kmutex_t *)NULL, P_RECLAIM))
2080Sstevel@tonic-gate 			continue;
2090Sstevel@tonic-gate 	}
2100Sstevel@tonic-gate 	page_free(pp, 1);
2110Sstevel@tonic-gate 	page_unresv(1);
2120Sstevel@tonic-gate }
2130Sstevel@tonic-gate 
2140Sstevel@tonic-gate /*
2150Sstevel@tonic-gate  * Put one htable on the reserve list.
2160Sstevel@tonic-gate  */
2170Sstevel@tonic-gate static void
2180Sstevel@tonic-gate htable_put_reserve(htable_t *ht)
2190Sstevel@tonic-gate {
2200Sstevel@tonic-gate 	ht->ht_hat = NULL;		/* no longer tied to a hat */
2210Sstevel@tonic-gate 	ASSERT(ht->ht_pfn == PFN_INVALID);
2220Sstevel@tonic-gate 	HATSTAT_INC(hs_htable_rputs);
2230Sstevel@tonic-gate 	mutex_enter(&htable_reserve_mutex);
2240Sstevel@tonic-gate 	ht->ht_next = htable_reserve_pool;
2250Sstevel@tonic-gate 	htable_reserve_pool = ht;
2260Sstevel@tonic-gate 	++htable_reserve_cnt;
2270Sstevel@tonic-gate 	mutex_exit(&htable_reserve_mutex);
2280Sstevel@tonic-gate }
2290Sstevel@tonic-gate 
2300Sstevel@tonic-gate /*
2310Sstevel@tonic-gate  * Take one htable from the reserve.
2320Sstevel@tonic-gate  */
2330Sstevel@tonic-gate static htable_t *
2340Sstevel@tonic-gate htable_get_reserve(void)
2350Sstevel@tonic-gate {
2360Sstevel@tonic-gate 	htable_t *ht = NULL;
2370Sstevel@tonic-gate 
2380Sstevel@tonic-gate 	mutex_enter(&htable_reserve_mutex);
2390Sstevel@tonic-gate 	if (htable_reserve_cnt != 0) {
2400Sstevel@tonic-gate 		ht = htable_reserve_pool;
2410Sstevel@tonic-gate 		ASSERT(ht != NULL);
2420Sstevel@tonic-gate 		ASSERT(ht->ht_pfn == PFN_INVALID);
2430Sstevel@tonic-gate 		htable_reserve_pool = ht->ht_next;
2440Sstevel@tonic-gate 		--htable_reserve_cnt;
2450Sstevel@tonic-gate 		HATSTAT_INC(hs_htable_rgets);
2460Sstevel@tonic-gate 	}
2470Sstevel@tonic-gate 	mutex_exit(&htable_reserve_mutex);
2480Sstevel@tonic-gate 	return (ht);
2490Sstevel@tonic-gate }
2500Sstevel@tonic-gate 
2510Sstevel@tonic-gate /*
252*3446Smrj  * Allocate initial htables and put them on the reserve list
2530Sstevel@tonic-gate  */
2540Sstevel@tonic-gate void
2550Sstevel@tonic-gate htable_initial_reserve(uint_t count)
2560Sstevel@tonic-gate {
2570Sstevel@tonic-gate 	htable_t *ht;
2580Sstevel@tonic-gate 
2590Sstevel@tonic-gate 	count += HTABLE_RESERVE_AMOUNT;
2600Sstevel@tonic-gate 	while (count > 0) {
2610Sstevel@tonic-gate 		ht = kmem_cache_alloc(htable_cache, KM_NOSLEEP);
2620Sstevel@tonic-gate 		ASSERT(ht != NULL);
2630Sstevel@tonic-gate 
2640Sstevel@tonic-gate 		ASSERT(use_boot_reserve);
265*3446Smrj 		ht->ht_pfn = PFN_INVALID;
266*3446Smrj 		htable_put_reserve(ht);
2670Sstevel@tonic-gate 		--count;
2680Sstevel@tonic-gate 	}
2690Sstevel@tonic-gate }
2700Sstevel@tonic-gate 
2710Sstevel@tonic-gate /*
2720Sstevel@tonic-gate  * Readjust the reserves after a thread finishes using them.
2730Sstevel@tonic-gate  */
2740Sstevel@tonic-gate void
2750Sstevel@tonic-gate htable_adjust_reserve()
2760Sstevel@tonic-gate {
2770Sstevel@tonic-gate 	htable_t *ht;
2780Sstevel@tonic-gate 
2790Sstevel@tonic-gate 	ASSERT(curthread != hat_reserves_thread);
2800Sstevel@tonic-gate 
2810Sstevel@tonic-gate 	/*
2820Sstevel@tonic-gate 	 * Free any excess htables in the reserve list
2830Sstevel@tonic-gate 	 */
2840Sstevel@tonic-gate 	while (htable_reserve_cnt > htable_reserve_amount) {
2850Sstevel@tonic-gate 		ht = htable_get_reserve();
2860Sstevel@tonic-gate 		if (ht == NULL)
2870Sstevel@tonic-gate 			return;
2880Sstevel@tonic-gate 		ASSERT(ht->ht_pfn == PFN_INVALID);
2890Sstevel@tonic-gate 		kmem_cache_free(htable_cache, ht);
2900Sstevel@tonic-gate 	}
2910Sstevel@tonic-gate }
2920Sstevel@tonic-gate 
2930Sstevel@tonic-gate 
2940Sstevel@tonic-gate /*
2950Sstevel@tonic-gate  * This routine steals htables from user processes for htable_alloc() or
2960Sstevel@tonic-gate  * for htable_reap().
2970Sstevel@tonic-gate  */
2980Sstevel@tonic-gate static htable_t *
2990Sstevel@tonic-gate htable_steal(uint_t cnt)
3000Sstevel@tonic-gate {
3010Sstevel@tonic-gate 	hat_t		*hat = kas.a_hat;	/* list starts with khat */
3020Sstevel@tonic-gate 	htable_t	*list = NULL;
3030Sstevel@tonic-gate 	htable_t	*ht;
3040Sstevel@tonic-gate 	htable_t	*higher;
3050Sstevel@tonic-gate 	uint_t		h;
3061747Sjosephb 	uint_t		h_start;
3071747Sjosephb 	static uint_t	h_seed = 0;
3080Sstevel@tonic-gate 	uint_t		e;
3090Sstevel@tonic-gate 	uintptr_t	va;
3100Sstevel@tonic-gate 	x86pte_t	pte;
3110Sstevel@tonic-gate 	uint_t		stolen = 0;
3120Sstevel@tonic-gate 	uint_t		pass;
3131747Sjosephb 	uint_t		threshold;
3140Sstevel@tonic-gate 
3150Sstevel@tonic-gate 	/*
3160Sstevel@tonic-gate 	 * Limit htable_steal_passes to something reasonable
3170Sstevel@tonic-gate 	 */
3180Sstevel@tonic-gate 	if (htable_steal_passes == 0)
3190Sstevel@tonic-gate 		htable_steal_passes = 1;
3200Sstevel@tonic-gate 	if (htable_steal_passes > mmu.ptes_per_table)
3210Sstevel@tonic-gate 		htable_steal_passes = mmu.ptes_per_table;
3220Sstevel@tonic-gate 
3230Sstevel@tonic-gate 	/*
3241747Sjosephb 	 * Loop through all user hats. The 1st pass takes cached htables that
3250Sstevel@tonic-gate 	 * aren't in use. The later passes steal by removing mappings, too.
3260Sstevel@tonic-gate 	 */
3270Sstevel@tonic-gate 	atomic_add_32(&htable_dont_cache, 1);
3281747Sjosephb 	for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) {
3291747Sjosephb 		threshold = pass * mmu.ptes_per_table / htable_steal_passes;
3301747Sjosephb 		hat = kas.a_hat;
3310Sstevel@tonic-gate 		for (;;) {
3320Sstevel@tonic-gate 
3330Sstevel@tonic-gate 			/*
3341747Sjosephb 			 * Clear the victim flag and move to next hat
3350Sstevel@tonic-gate 			 */
3360Sstevel@tonic-gate 			mutex_enter(&hat_list_lock);
3371747Sjosephb 			if (hat != kas.a_hat) {
3381747Sjosephb 				hat->hat_flags &= ~HAT_VICTIM;
3391747Sjosephb 				cv_broadcast(&hat_list_cv);
3401747Sjosephb 			}
3411747Sjosephb 			hat = hat->hat_next;
3421747Sjosephb 
3431747Sjosephb 			/*
3441747Sjosephb 			 * Skip any hat that is already being stolen from.
3451747Sjosephb 			 *
3461747Sjosephb 			 * We skip SHARED hats, as these are dummy
3471747Sjosephb 			 * hats that host ISM shared page tables.
3481747Sjosephb 			 *
3491747Sjosephb 			 * We also skip if HAT_FREEING because hat_pte_unmap()
3501747Sjosephb 			 * won't zero out the PTE's. That would lead to hitting
3511747Sjosephb 			 * stale PTEs either here or under hat_unload() when we
3521747Sjosephb 			 * steal and unload the same page table in competing
3531747Sjosephb 			 * threads.
3541747Sjosephb 			 */
3551747Sjosephb 			while (hat != NULL &&
3561747Sjosephb 			    (hat->hat_flags &
3571747Sjosephb 			    (HAT_VICTIM | HAT_SHARED | HAT_FREEING)) != 0)
3581747Sjosephb 				hat = hat->hat_next;
3591747Sjosephb 
3601747Sjosephb 			if (hat == NULL) {
3610Sstevel@tonic-gate 				mutex_exit(&hat_list_lock);
3620Sstevel@tonic-gate 				break;
3630Sstevel@tonic-gate 			}
3641747Sjosephb 
3651747Sjosephb 			/*
3661747Sjosephb 			 * Are we finished?
3671747Sjosephb 			 */
3681747Sjosephb 			if (stolen == cnt) {
3691747Sjosephb 				/*
3701747Sjosephb 				 * Try to spread the pain of stealing,
3711747Sjosephb 				 * move victim HAT to the end of the HAT list.
3721747Sjosephb 				 */
3731747Sjosephb 				if (pass >= 1 && cnt == 1 &&
3741747Sjosephb 				    kas.a_hat->hat_prev != hat) {
3751747Sjosephb 
3761747Sjosephb 					/* unlink victim hat */
3771747Sjosephb 					if (hat->hat_prev)
3781747Sjosephb 						hat->hat_prev->hat_next =
3791747Sjosephb 						    hat->hat_next;
3801747Sjosephb 					else
3811747Sjosephb 						kas.a_hat->hat_next =
3821747Sjosephb 						    hat->hat_next;
3831747Sjosephb 					if (hat->hat_next)
3841747Sjosephb 						hat->hat_next->hat_prev =
3851747Sjosephb 						    hat->hat_prev;
3861747Sjosephb 					else
3871747Sjosephb 						kas.a_hat->hat_prev =
3881747Sjosephb 						    hat->hat_prev;
3891747Sjosephb 
3901747Sjosephb 
3911747Sjosephb 					/* relink at end of hat list */
3921747Sjosephb 					hat->hat_next = NULL;
3931747Sjosephb 					hat->hat_prev = kas.a_hat->hat_prev;
3941747Sjosephb 					if (hat->hat_prev)
3951747Sjosephb 						hat->hat_prev->hat_next = hat;
3961747Sjosephb 					else
3971747Sjosephb 						kas.a_hat->hat_next = hat;
3981747Sjosephb 					kas.a_hat->hat_prev = hat;
3991747Sjosephb 
4001747Sjosephb 				}
4011747Sjosephb 
4021747Sjosephb 				mutex_exit(&hat_list_lock);
4031747Sjosephb 				break;
4041747Sjosephb 			}
4051747Sjosephb 
4061747Sjosephb 			/*
4071747Sjosephb 			 * Mark the HAT as a stealing victim.
4081747Sjosephb 			 */
4090Sstevel@tonic-gate 			hat->hat_flags |= HAT_VICTIM;
4100Sstevel@tonic-gate 			mutex_exit(&hat_list_lock);
4110Sstevel@tonic-gate 
4120Sstevel@tonic-gate 			/*
4130Sstevel@tonic-gate 			 * Take any htables from the hat's cached "free" list.
4140Sstevel@tonic-gate 			 */
4150Sstevel@tonic-gate 			hat_enter(hat);
4160Sstevel@tonic-gate 			while ((ht = hat->hat_ht_cached) != NULL &&
4170Sstevel@tonic-gate 			    stolen < cnt) {
4180Sstevel@tonic-gate 				hat->hat_ht_cached = ht->ht_next;
4190Sstevel@tonic-gate 				ht->ht_next = list;
4200Sstevel@tonic-gate 				list = ht;
4210Sstevel@tonic-gate 				++stolen;
4220Sstevel@tonic-gate 			}
4230Sstevel@tonic-gate 			hat_exit(hat);
4240Sstevel@tonic-gate 
4250Sstevel@tonic-gate 			/*
4260Sstevel@tonic-gate 			 * Don't steal on first pass.
4270Sstevel@tonic-gate 			 */
4281747Sjosephb 			if (pass == 0 || stolen == cnt)
4290Sstevel@tonic-gate 				continue;
4300Sstevel@tonic-gate 
4310Sstevel@tonic-gate 			/*
4321747Sjosephb 			 * Search the active htables for one to steal.
4331747Sjosephb 			 * Start at a different hash bucket every time to
4341747Sjosephb 			 * help spread the pain of stealing.
4350Sstevel@tonic-gate 			 */
4361747Sjosephb 			h = h_start = h_seed++ % hat->hat_num_hash;
4371747Sjosephb 			do {
4380Sstevel@tonic-gate 				higher = NULL;
4390Sstevel@tonic-gate 				HTABLE_ENTER(h);
4400Sstevel@tonic-gate 				for (ht = hat->hat_ht_hash[h]; ht;
4410Sstevel@tonic-gate 				    ht = ht->ht_next) {
4420Sstevel@tonic-gate 
4430Sstevel@tonic-gate 					/*
4440Sstevel@tonic-gate 					 * Can we rule out reaping?
4450Sstevel@tonic-gate 					 */
4460Sstevel@tonic-gate 					if (ht->ht_busy != 0 ||
4470Sstevel@tonic-gate 					    (ht->ht_flags & HTABLE_SHARED_PFN)||
4481747Sjosephb 					    ht->ht_level > 0 ||
4491747Sjosephb 					    ht->ht_valid_cnt > threshold ||
4500Sstevel@tonic-gate 					    ht->ht_lock_cnt != 0)
4510Sstevel@tonic-gate 						continue;
4520Sstevel@tonic-gate 
4530Sstevel@tonic-gate 					/*
4540Sstevel@tonic-gate 					 * Increment busy so the htable can't
4550Sstevel@tonic-gate 					 * disappear. We drop the htable mutex
4560Sstevel@tonic-gate 					 * to avoid deadlocks with
4570Sstevel@tonic-gate 					 * hat_pageunload() and the hment mutex
4580Sstevel@tonic-gate 					 * while we call hat_pte_unmap()
4590Sstevel@tonic-gate 					 */
4600Sstevel@tonic-gate 					++ht->ht_busy;
4610Sstevel@tonic-gate 					HTABLE_EXIT(h);
4620Sstevel@tonic-gate 
4630Sstevel@tonic-gate 					/*
4640Sstevel@tonic-gate 					 * Try stealing.
4650Sstevel@tonic-gate 					 * - unload and invalidate all PTEs
4660Sstevel@tonic-gate 					 */
4670Sstevel@tonic-gate 					for (e = 0, va = ht->ht_vaddr;
468*3446Smrj 					    e < HTABLE_NUM_PTES(ht) &&
4690Sstevel@tonic-gate 					    ht->ht_valid_cnt > 0 &&
4700Sstevel@tonic-gate 					    ht->ht_busy == 1 &&
4710Sstevel@tonic-gate 					    ht->ht_lock_cnt == 0;
4720Sstevel@tonic-gate 					    ++e, va += MMU_PAGESIZE) {
4730Sstevel@tonic-gate 						pte = x86pte_get(ht, e);
4740Sstevel@tonic-gate 						if (!PTE_ISVALID(pte))
4750Sstevel@tonic-gate 							continue;
4760Sstevel@tonic-gate 						hat_pte_unmap(ht, e,
4770Sstevel@tonic-gate 						    HAT_UNLOAD, pte, NULL);
4780Sstevel@tonic-gate 					}
4790Sstevel@tonic-gate 
4800Sstevel@tonic-gate 					/*
4810Sstevel@tonic-gate 					 * Reacquire htable lock. If we didn't
4820Sstevel@tonic-gate 					 * remove all mappings in the table,
4830Sstevel@tonic-gate 					 * or another thread added a new mapping
4840Sstevel@tonic-gate 					 * behind us, give up on this table.
4850Sstevel@tonic-gate 					 */
4860Sstevel@tonic-gate 					HTABLE_ENTER(h);
4870Sstevel@tonic-gate 					if (ht->ht_busy != 1 ||
4880Sstevel@tonic-gate 					    ht->ht_valid_cnt != 0 ||
4890Sstevel@tonic-gate 					    ht->ht_lock_cnt != 0) {
4900Sstevel@tonic-gate 						--ht->ht_busy;
4910Sstevel@tonic-gate 						continue;
4920Sstevel@tonic-gate 					}
4930Sstevel@tonic-gate 
4940Sstevel@tonic-gate 					/*
4950Sstevel@tonic-gate 					 * Steal it and unlink the page table.
4960Sstevel@tonic-gate 					 */
4970Sstevel@tonic-gate 					higher = ht->ht_parent;
4980Sstevel@tonic-gate 					unlink_ptp(higher, ht, ht->ht_vaddr);
4990Sstevel@tonic-gate 
5000Sstevel@tonic-gate 					/*
5010Sstevel@tonic-gate 					 * remove from the hash list
5020Sstevel@tonic-gate 					 */
5030Sstevel@tonic-gate 					if (ht->ht_next)
5040Sstevel@tonic-gate 						ht->ht_next->ht_prev =
5050Sstevel@tonic-gate 						    ht->ht_prev;
5060Sstevel@tonic-gate 
5070Sstevel@tonic-gate 					if (ht->ht_prev) {
5080Sstevel@tonic-gate 						ht->ht_prev->ht_next =
5090Sstevel@tonic-gate 						    ht->ht_next;
5100Sstevel@tonic-gate 					} else {
5110Sstevel@tonic-gate 						ASSERT(hat->hat_ht_hash[h] ==
5120Sstevel@tonic-gate 						    ht);
5130Sstevel@tonic-gate 						hat->hat_ht_hash[h] =
5140Sstevel@tonic-gate 						    ht->ht_next;
5150Sstevel@tonic-gate 					}
5160Sstevel@tonic-gate 
5170Sstevel@tonic-gate 					/*
5180Sstevel@tonic-gate 					 * Break to outer loop to release the
519*3446Smrj 					 * higher (ht_parent) pagetable. This
5200Sstevel@tonic-gate 					 * spreads out the pain caused by
5210Sstevel@tonic-gate 					 * pagefaults.
5220Sstevel@tonic-gate 					 */
5230Sstevel@tonic-gate 					ht->ht_next = list;
5240Sstevel@tonic-gate 					list = ht;
5250Sstevel@tonic-gate 					++stolen;
5260Sstevel@tonic-gate 					break;
5270Sstevel@tonic-gate 				}
5280Sstevel@tonic-gate 				HTABLE_EXIT(h);
5290Sstevel@tonic-gate 				if (higher != NULL)
5300Sstevel@tonic-gate 					htable_release(higher);
5311747Sjosephb 				if (++h == hat->hat_num_hash)
5321747Sjosephb 					h = 0;
5331747Sjosephb 			} while (stolen < cnt && h != h_start);
5340Sstevel@tonic-gate 		}
5350Sstevel@tonic-gate 	}
5360Sstevel@tonic-gate 	atomic_add_32(&htable_dont_cache, -1);
5370Sstevel@tonic-gate 	return (list);
5380Sstevel@tonic-gate }
5390Sstevel@tonic-gate 
5400Sstevel@tonic-gate 
5410Sstevel@tonic-gate /*
5420Sstevel@tonic-gate  * This is invoked from kmem when the system is low on memory.  We try
5430Sstevel@tonic-gate  * to free hments, htables, and ptables to improve the memory situation.
5440Sstevel@tonic-gate  */
5450Sstevel@tonic-gate /*ARGSUSED*/
5460Sstevel@tonic-gate static void
5470Sstevel@tonic-gate htable_reap(void *handle)
5480Sstevel@tonic-gate {
5490Sstevel@tonic-gate 	uint_t		reap_cnt;
5500Sstevel@tonic-gate 	htable_t	*list;
5510Sstevel@tonic-gate 	htable_t	*ht;
5520Sstevel@tonic-gate 
5530Sstevel@tonic-gate 	HATSTAT_INC(hs_reap_attempts);
5540Sstevel@tonic-gate 	if (!can_steal_post_boot)
5550Sstevel@tonic-gate 		return;
5560Sstevel@tonic-gate 
5570Sstevel@tonic-gate 	/*
5580Sstevel@tonic-gate 	 * Try to reap 5% of the page tables bounded by a maximum of
5590Sstevel@tonic-gate 	 * 5% of physmem and a minimum of 10.
5600Sstevel@tonic-gate 	 */
5610Sstevel@tonic-gate 	reap_cnt = MIN(MAX(physmem / 20, active_ptables / 20), 10);
5620Sstevel@tonic-gate 
5630Sstevel@tonic-gate 	/*
5640Sstevel@tonic-gate 	 * Let htable_steal() do the work, we just call htable_free()
5650Sstevel@tonic-gate 	 */
5660Sstevel@tonic-gate 	list = htable_steal(reap_cnt);
5670Sstevel@tonic-gate 	while ((ht = list) != NULL) {
5680Sstevel@tonic-gate 		list = ht->ht_next;
5690Sstevel@tonic-gate 		HATSTAT_INC(hs_reaped);
5700Sstevel@tonic-gate 		htable_free(ht);
5710Sstevel@tonic-gate 	}
5720Sstevel@tonic-gate 
5730Sstevel@tonic-gate 	/*
5740Sstevel@tonic-gate 	 * Free up excess reserves
5750Sstevel@tonic-gate 	 */
5760Sstevel@tonic-gate 	htable_adjust_reserve();
5770Sstevel@tonic-gate 	hment_adjust_reserve();
5780Sstevel@tonic-gate }
5790Sstevel@tonic-gate 
5800Sstevel@tonic-gate /*
581*3446Smrj  * Allocate an htable, stealing one or using the reserve if necessary
5820Sstevel@tonic-gate  */
5830Sstevel@tonic-gate static htable_t *
5840Sstevel@tonic-gate htable_alloc(
5850Sstevel@tonic-gate 	hat_t		*hat,
5860Sstevel@tonic-gate 	uintptr_t	vaddr,
5870Sstevel@tonic-gate 	level_t		level,
5880Sstevel@tonic-gate 	htable_t	*shared)
5890Sstevel@tonic-gate {
5900Sstevel@tonic-gate 	htable_t	*ht = NULL;
5910Sstevel@tonic-gate 	uint_t		is_vlp;
5920Sstevel@tonic-gate 	uint_t		is_bare = 0;
5930Sstevel@tonic-gate 	uint_t		need_to_zero = 1;
5940Sstevel@tonic-gate 	int		kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP);
5950Sstevel@tonic-gate 
5960Sstevel@tonic-gate 	if (level < 0 || level > TOP_LEVEL(hat))
5970Sstevel@tonic-gate 		panic("htable_alloc(): level %d out of range\n", level);
5980Sstevel@tonic-gate 
5990Sstevel@tonic-gate 	is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL;
6000Sstevel@tonic-gate 	if (is_vlp || shared != NULL)
6010Sstevel@tonic-gate 		is_bare = 1;
6020Sstevel@tonic-gate 
6030Sstevel@tonic-gate 	/*
6040Sstevel@tonic-gate 	 * First reuse a cached htable from the hat_ht_cached field, this
605*3446Smrj 	 * avoids unnecessary trips through kmem/page allocators.
6060Sstevel@tonic-gate 	 */
6070Sstevel@tonic-gate 	if (hat->hat_ht_cached != NULL && !is_bare) {
6080Sstevel@tonic-gate 		hat_enter(hat);
6090Sstevel@tonic-gate 		ht = hat->hat_ht_cached;
6100Sstevel@tonic-gate 		if (ht != NULL) {
6110Sstevel@tonic-gate 			hat->hat_ht_cached = ht->ht_next;
6120Sstevel@tonic-gate 			need_to_zero = 0;
6130Sstevel@tonic-gate 			/* XX64 ASSERT() they're all zero somehow */
6140Sstevel@tonic-gate 			ASSERT(ht->ht_pfn != PFN_INVALID);
6150Sstevel@tonic-gate 		}
6160Sstevel@tonic-gate 		hat_exit(hat);
6170Sstevel@tonic-gate 	}
6180Sstevel@tonic-gate 
6190Sstevel@tonic-gate 	if (ht == NULL) {
6200Sstevel@tonic-gate 		/*
6210Sstevel@tonic-gate 		 * When allocating for hat_memload_arena, we use the reserve.
6220Sstevel@tonic-gate 		 * Also use reserves if we are in a panic().
6230Sstevel@tonic-gate 		 */
624*3446Smrj 		if (use_boot_reserve || curthread == hat_reserves_thread ||
625*3446Smrj 		    panicstr != NULL) {
6260Sstevel@tonic-gate 			ht = htable_get_reserve();
6270Sstevel@tonic-gate 		} else {
6280Sstevel@tonic-gate 			/*
6290Sstevel@tonic-gate 			 * Donate successful htable allocations to the reserve.
6300Sstevel@tonic-gate 			 */
6310Sstevel@tonic-gate 			for (;;) {
6320Sstevel@tonic-gate 				ASSERT(curthread != hat_reserves_thread);
6330Sstevel@tonic-gate 				ht = kmem_cache_alloc(htable_cache, kmflags);
6340Sstevel@tonic-gate 				if (ht == NULL)
6350Sstevel@tonic-gate 					break;
6360Sstevel@tonic-gate 				ht->ht_pfn = PFN_INVALID;
6370Sstevel@tonic-gate 				if (curthread == hat_reserves_thread ||
6380Sstevel@tonic-gate 				    panicstr != NULL ||
6390Sstevel@tonic-gate 				    htable_reserve_cnt >= htable_reserve_amount)
6400Sstevel@tonic-gate 					break;
6410Sstevel@tonic-gate 				htable_put_reserve(ht);
6420Sstevel@tonic-gate 			}
6430Sstevel@tonic-gate 		}
6440Sstevel@tonic-gate 
6450Sstevel@tonic-gate 		/*
6460Sstevel@tonic-gate 		 * allocate a page for the hardware page table if needed
6470Sstevel@tonic-gate 		 */
6480Sstevel@tonic-gate 		if (ht != NULL && !is_bare) {
6491747Sjosephb 			ht->ht_hat = hat;
650*3446Smrj 			ht->ht_pfn = ptable_alloc((uintptr_t)ht);
6510Sstevel@tonic-gate 			if (ht->ht_pfn == PFN_INVALID) {
6520Sstevel@tonic-gate 				kmem_cache_free(htable_cache, ht);
6530Sstevel@tonic-gate 				ht = NULL;
6540Sstevel@tonic-gate 			}
6550Sstevel@tonic-gate 		}
6560Sstevel@tonic-gate 	}
6570Sstevel@tonic-gate 
6580Sstevel@tonic-gate 	/*
6591747Sjosephb 	 * If allocations failed, kick off a kmem_reap() and resort to
6601747Sjosephb 	 * htable steal(). We may spin here if the system is very low on
6611747Sjosephb 	 * memory. If the kernel itself has consumed all memory and kmem_reap()
6621747Sjosephb 	 * can't free up anything, then we'll really get stuck here.
6631747Sjosephb 	 * That should only happen in a system where the administrator has
6641747Sjosephb 	 * misconfigured VM parameters via /etc/system.
6650Sstevel@tonic-gate 	 */
6661747Sjosephb 	while (ht == NULL && can_steal_post_boot) {
6671747Sjosephb 		kmem_reap();
6680Sstevel@tonic-gate 		ht = htable_steal(1);
6690Sstevel@tonic-gate 		HATSTAT_INC(hs_steals);
6700Sstevel@tonic-gate 
6710Sstevel@tonic-gate 		/*
6721747Sjosephb 		 * If we stole for a bare htable, release the pagetable page.
6730Sstevel@tonic-gate 		 */
674*3446Smrj 		if (ht != NULL) {
675*3446Smrj 			if (is_bare) {
676*3446Smrj 				ptable_free(ht->ht_pfn);
677*3446Smrj 				ht->ht_pfn = PFN_INVALID;
678*3446Smrj 			}
679*3446Smrj 		}
6800Sstevel@tonic-gate 	}
6810Sstevel@tonic-gate 
6820Sstevel@tonic-gate 	/*
6831747Sjosephb 	 * All attempts to allocate or steal failed. This should only happen
6841747Sjosephb 	 * if we run out of memory during boot, due perhaps to a huge
6851747Sjosephb 	 * boot_archive. At this point there's no way to continue.
6860Sstevel@tonic-gate 	 */
6870Sstevel@tonic-gate 	if (ht == NULL)
6880Sstevel@tonic-gate 		panic("htable_alloc(): couldn't steal\n");
6890Sstevel@tonic-gate 
6900Sstevel@tonic-gate 	/*
6910Sstevel@tonic-gate 	 * Shared page tables have all entries locked and entries may not
6920Sstevel@tonic-gate 	 * be added or deleted.
6930Sstevel@tonic-gate 	 */
6940Sstevel@tonic-gate 	ht->ht_flags = 0;
6950Sstevel@tonic-gate 	if (shared != NULL) {
6960Sstevel@tonic-gate 		ASSERT(level == 0);
6970Sstevel@tonic-gate 		ASSERT(shared->ht_valid_cnt > 0);
6980Sstevel@tonic-gate 		ht->ht_flags |= HTABLE_SHARED_PFN;
6990Sstevel@tonic-gate 		ht->ht_pfn = shared->ht_pfn;
7000Sstevel@tonic-gate 		ht->ht_lock_cnt = 0;
7010Sstevel@tonic-gate 		ht->ht_valid_cnt = 0;		/* updated in hat_share() */
7020Sstevel@tonic-gate 		ht->ht_shares = shared;
7030Sstevel@tonic-gate 		need_to_zero = 0;
7040Sstevel@tonic-gate 	} else {
7050Sstevel@tonic-gate 		ht->ht_shares = NULL;
7060Sstevel@tonic-gate 		ht->ht_lock_cnt = 0;
7070Sstevel@tonic-gate 		ht->ht_valid_cnt = 0;
7080Sstevel@tonic-gate 	}
7090Sstevel@tonic-gate 
7100Sstevel@tonic-gate 	/*
7110Sstevel@tonic-gate 	 * setup flags, etc. for VLP htables
7120Sstevel@tonic-gate 	 */
7130Sstevel@tonic-gate 	if (is_vlp) {
7140Sstevel@tonic-gate 		ht->ht_flags |= HTABLE_VLP;
7150Sstevel@tonic-gate 		ASSERT(ht->ht_pfn == PFN_INVALID);
7160Sstevel@tonic-gate 		need_to_zero = 0;
7170Sstevel@tonic-gate 	}
7180Sstevel@tonic-gate 
7190Sstevel@tonic-gate 	/*
7200Sstevel@tonic-gate 	 * fill in the htable
7210Sstevel@tonic-gate 	 */
7220Sstevel@tonic-gate 	ht->ht_hat = hat;
7230Sstevel@tonic-gate 	ht->ht_parent = NULL;
7240Sstevel@tonic-gate 	ht->ht_vaddr = vaddr;
7250Sstevel@tonic-gate 	ht->ht_level = level;
7260Sstevel@tonic-gate 	ht->ht_busy = 1;
7270Sstevel@tonic-gate 	ht->ht_next = NULL;
7280Sstevel@tonic-gate 	ht->ht_prev = NULL;
7290Sstevel@tonic-gate 
7300Sstevel@tonic-gate 	/*
7310Sstevel@tonic-gate 	 * Zero out any freshly allocated page table
7320Sstevel@tonic-gate 	 */
7330Sstevel@tonic-gate 	if (need_to_zero)
7340Sstevel@tonic-gate 		x86pte_zero(ht, 0, mmu.ptes_per_table);
735*3446Smrj 
7360Sstevel@tonic-gate 	return (ht);
7370Sstevel@tonic-gate }
7380Sstevel@tonic-gate 
7390Sstevel@tonic-gate /*
7400Sstevel@tonic-gate  * Free up an htable, either to a hat's cached list, the reserves or
7410Sstevel@tonic-gate  * back to kmem.
7420Sstevel@tonic-gate  */
7430Sstevel@tonic-gate static void
7440Sstevel@tonic-gate htable_free(htable_t *ht)
7450Sstevel@tonic-gate {
7460Sstevel@tonic-gate 	hat_t *hat = ht->ht_hat;
7470Sstevel@tonic-gate 
7480Sstevel@tonic-gate 	/*
7490Sstevel@tonic-gate 	 * If the process isn't exiting, cache the free htable in the hat
7500Sstevel@tonic-gate 	 * structure. We always do this for the boot reserve. We don't
7510Sstevel@tonic-gate 	 * do this if the hat is exiting or we are stealing/reaping htables.
7520Sstevel@tonic-gate 	 */
7530Sstevel@tonic-gate 	if (hat != NULL &&
7540Sstevel@tonic-gate 	    !(ht->ht_flags & HTABLE_SHARED_PFN) &&
7550Sstevel@tonic-gate 	    (use_boot_reserve ||
7560Sstevel@tonic-gate 	    (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) {
7570Sstevel@tonic-gate 		ASSERT((ht->ht_flags & HTABLE_VLP) == 0);
7580Sstevel@tonic-gate 		ASSERT(ht->ht_pfn != PFN_INVALID);
7590Sstevel@tonic-gate 		hat_enter(hat);
7600Sstevel@tonic-gate 		ht->ht_next = hat->hat_ht_cached;
7610Sstevel@tonic-gate 		hat->hat_ht_cached = ht;
7620Sstevel@tonic-gate 		hat_exit(hat);
7630Sstevel@tonic-gate 		return;
7640Sstevel@tonic-gate 	}
7650Sstevel@tonic-gate 
7660Sstevel@tonic-gate 	/*
7670Sstevel@tonic-gate 	 * If we have a hardware page table, free it.
768*3446Smrj 	 * We don't free page tables that are accessed by sharing.
7690Sstevel@tonic-gate 	 */
7700Sstevel@tonic-gate 	if (ht->ht_flags & HTABLE_SHARED_PFN) {
7710Sstevel@tonic-gate 		ASSERT(ht->ht_pfn != PFN_INVALID);
7720Sstevel@tonic-gate 	} else if (!(ht->ht_flags & HTABLE_VLP)) {
773*3446Smrj 		ptable_free(ht->ht_pfn);
7740Sstevel@tonic-gate 	}
775*3446Smrj 	ht->ht_pfn = PFN_INVALID;
7760Sstevel@tonic-gate 
7770Sstevel@tonic-gate 	/*
7780Sstevel@tonic-gate 	 * If we are the thread using the reserves, put free htables
7790Sstevel@tonic-gate 	 * into reserves.
7800Sstevel@tonic-gate 	 */
7810Sstevel@tonic-gate 	if (curthread == hat_reserves_thread ||
7820Sstevel@tonic-gate 	    htable_reserve_cnt < htable_reserve_amount)
7830Sstevel@tonic-gate 		htable_put_reserve(ht);
7840Sstevel@tonic-gate 	else
7850Sstevel@tonic-gate 		kmem_cache_free(htable_cache, ht);
7860Sstevel@tonic-gate }
7870Sstevel@tonic-gate 
7880Sstevel@tonic-gate 
7890Sstevel@tonic-gate /*
7900Sstevel@tonic-gate  * This is called when a hat is being destroyed or swapped out. We reap all
7910Sstevel@tonic-gate  * the remaining htables in the hat cache. If destroying all left over
7920Sstevel@tonic-gate  * htables are also destroyed.
7930Sstevel@tonic-gate  *
7940Sstevel@tonic-gate  * We also don't need to invalidate any of the PTPs nor do any demapping.
7950Sstevel@tonic-gate  */
7960Sstevel@tonic-gate void
7970Sstevel@tonic-gate htable_purge_hat(hat_t *hat)
7980Sstevel@tonic-gate {
7990Sstevel@tonic-gate 	htable_t *ht;
8000Sstevel@tonic-gate 	int h;
8010Sstevel@tonic-gate 
8020Sstevel@tonic-gate 	/*
8030Sstevel@tonic-gate 	 * Purge the htable cache if just reaping.
8040Sstevel@tonic-gate 	 */
8050Sstevel@tonic-gate 	if (!(hat->hat_flags & HAT_FREEING)) {
8060Sstevel@tonic-gate 		atomic_add_32(&htable_dont_cache, 1);
8070Sstevel@tonic-gate 		for (;;) {
8080Sstevel@tonic-gate 			hat_enter(hat);
8090Sstevel@tonic-gate 			ht = hat->hat_ht_cached;
8100Sstevel@tonic-gate 			if (ht == NULL) {
8110Sstevel@tonic-gate 				hat_exit(hat);
8120Sstevel@tonic-gate 				break;
8130Sstevel@tonic-gate 			}
8140Sstevel@tonic-gate 			hat->hat_ht_cached = ht->ht_next;
8150Sstevel@tonic-gate 			hat_exit(hat);
8160Sstevel@tonic-gate 			htable_free(ht);
8170Sstevel@tonic-gate 		}
8180Sstevel@tonic-gate 		atomic_add_32(&htable_dont_cache, -1);
8190Sstevel@tonic-gate 		return;
8200Sstevel@tonic-gate 	}
8210Sstevel@tonic-gate 
8220Sstevel@tonic-gate 	/*
8230Sstevel@tonic-gate 	 * if freeing, no locking is needed
8240Sstevel@tonic-gate 	 */
8250Sstevel@tonic-gate 	while ((ht = hat->hat_ht_cached) != NULL) {
8260Sstevel@tonic-gate 		hat->hat_ht_cached = ht->ht_next;
8270Sstevel@tonic-gate 		htable_free(ht);
8280Sstevel@tonic-gate 	}
8290Sstevel@tonic-gate 
8300Sstevel@tonic-gate 	/*
8310Sstevel@tonic-gate 	 * walk thru the htable hash table and free all the htables in it.
8320Sstevel@tonic-gate 	 */
8330Sstevel@tonic-gate 	for (h = 0; h < hat->hat_num_hash; ++h) {
8340Sstevel@tonic-gate 		while ((ht = hat->hat_ht_hash[h]) != NULL) {
8350Sstevel@tonic-gate 			if (ht->ht_next)
8360Sstevel@tonic-gate 				ht->ht_next->ht_prev = ht->ht_prev;
8370Sstevel@tonic-gate 
8380Sstevel@tonic-gate 			if (ht->ht_prev) {
8390Sstevel@tonic-gate 				ht->ht_prev->ht_next = ht->ht_next;
8400Sstevel@tonic-gate 			} else {
8410Sstevel@tonic-gate 				ASSERT(hat->hat_ht_hash[h] == ht);
8420Sstevel@tonic-gate 				hat->hat_ht_hash[h] = ht->ht_next;
8430Sstevel@tonic-gate 			}
8440Sstevel@tonic-gate 			htable_free(ht);
8450Sstevel@tonic-gate 		}
8460Sstevel@tonic-gate 	}
8470Sstevel@tonic-gate }
8480Sstevel@tonic-gate 
8490Sstevel@tonic-gate /*
8500Sstevel@tonic-gate  * Unlink an entry for a table at vaddr and level out of the existing table
8510Sstevel@tonic-gate  * one level higher. We are always holding the HASH_ENTER() when doing this.
8520Sstevel@tonic-gate  */
8530Sstevel@tonic-gate static void
8540Sstevel@tonic-gate unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr)
8550Sstevel@tonic-gate {
8560Sstevel@tonic-gate 	uint_t		entry = htable_va2entry(vaddr, higher);
8570Sstevel@tonic-gate 	x86pte_t	expect = MAKEPTP(old->ht_pfn, old->ht_level);
8580Sstevel@tonic-gate 	x86pte_t	found;
8590Sstevel@tonic-gate 
8600Sstevel@tonic-gate 	ASSERT(higher->ht_busy > 0);
8610Sstevel@tonic-gate 	ASSERT(higher->ht_valid_cnt > 0);
8620Sstevel@tonic-gate 	ASSERT(old->ht_valid_cnt == 0);
8630Sstevel@tonic-gate 	found = x86pte_cas(higher, entry, expect, 0);
8640Sstevel@tonic-gate 	if (found != expect)
8650Sstevel@tonic-gate 		panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE,
8660Sstevel@tonic-gate 		    found, expect);
8670Sstevel@tonic-gate 	HTABLE_DEC(higher->ht_valid_cnt);
8680Sstevel@tonic-gate }
8690Sstevel@tonic-gate 
8700Sstevel@tonic-gate /*
8710Sstevel@tonic-gate  * Link an entry for a new table at vaddr and level into the existing table
8720Sstevel@tonic-gate  * one level higher. We are always holding the HASH_ENTER() when doing this.
8730Sstevel@tonic-gate  */
8740Sstevel@tonic-gate static void
8750Sstevel@tonic-gate link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr)
8760Sstevel@tonic-gate {
8770Sstevel@tonic-gate 	uint_t		entry = htable_va2entry(vaddr, higher);
8780Sstevel@tonic-gate 	x86pte_t	newptp = MAKEPTP(new->ht_pfn, new->ht_level);
8790Sstevel@tonic-gate 	x86pte_t	found;
8800Sstevel@tonic-gate 
8810Sstevel@tonic-gate 	ASSERT(higher->ht_busy > 0);
8820Sstevel@tonic-gate 
8830Sstevel@tonic-gate 	ASSERT(new->ht_level != mmu.max_level);
8840Sstevel@tonic-gate 
8850Sstevel@tonic-gate 	HTABLE_INC(higher->ht_valid_cnt);
8860Sstevel@tonic-gate 
8870Sstevel@tonic-gate 	found = x86pte_cas(higher, entry, 0, newptp);
8881251Skchow 	if ((found & ~PT_REF) != 0)
8890Sstevel@tonic-gate 		panic("HAT: ptp not 0, found=" FMT_PTE, found);
8900Sstevel@tonic-gate }
8910Sstevel@tonic-gate 
8920Sstevel@tonic-gate /*
893*3446Smrj  * Release of hold on an htable. If this is the last use and the pagetable
894*3446Smrj  * is empty we may want to free it, then recursively look at the pagetable
895*3446Smrj  * above it. The recursion is handled by the outer while() loop.
8960Sstevel@tonic-gate  */
8970Sstevel@tonic-gate void
8980Sstevel@tonic-gate htable_release(htable_t *ht)
8990Sstevel@tonic-gate {
9000Sstevel@tonic-gate 	uint_t		hashval;
9010Sstevel@tonic-gate 	htable_t	*shared;
9020Sstevel@tonic-gate 	htable_t	*higher;
9030Sstevel@tonic-gate 	hat_t		*hat;
9040Sstevel@tonic-gate 	uintptr_t	va;
9050Sstevel@tonic-gate 	level_t		level;
9060Sstevel@tonic-gate 
9070Sstevel@tonic-gate 	while (ht != NULL) {
9080Sstevel@tonic-gate 		shared = NULL;
9090Sstevel@tonic-gate 		for (;;) {
9100Sstevel@tonic-gate 			hat = ht->ht_hat;
9110Sstevel@tonic-gate 			va = ht->ht_vaddr;
9120Sstevel@tonic-gate 			level = ht->ht_level;
9130Sstevel@tonic-gate 			hashval = HTABLE_HASH(hat, va, level);
9140Sstevel@tonic-gate 
9150Sstevel@tonic-gate 			/*
9160Sstevel@tonic-gate 			 * The common case is that this isn't the last use of
9170Sstevel@tonic-gate 			 * an htable so we don't want to free the htable.
9180Sstevel@tonic-gate 			 */
9190Sstevel@tonic-gate 			HTABLE_ENTER(hashval);
9200Sstevel@tonic-gate 			ASSERT(ht->ht_lock_cnt == 0 || ht->ht_valid_cnt > 0);
9210Sstevel@tonic-gate 			ASSERT(ht->ht_valid_cnt >= 0);
9220Sstevel@tonic-gate 			ASSERT(ht->ht_busy > 0);
9230Sstevel@tonic-gate 			if (ht->ht_valid_cnt > 0)
9240Sstevel@tonic-gate 				break;
9250Sstevel@tonic-gate 			if (ht->ht_busy > 1)
9260Sstevel@tonic-gate 				break;
9270Sstevel@tonic-gate 
9280Sstevel@tonic-gate 			/*
9290Sstevel@tonic-gate 			 * we always release empty shared htables
9300Sstevel@tonic-gate 			 */
9310Sstevel@tonic-gate 			if (!(ht->ht_flags & HTABLE_SHARED_PFN)) {
9320Sstevel@tonic-gate 
9330Sstevel@tonic-gate 				/*
9340Sstevel@tonic-gate 				 * don't release if in address space tear down
9350Sstevel@tonic-gate 				 */
9360Sstevel@tonic-gate 				if (hat->hat_flags & HAT_FREEING)
9370Sstevel@tonic-gate 					break;
9380Sstevel@tonic-gate 
9390Sstevel@tonic-gate 				/*
9400Sstevel@tonic-gate 				 * At and above max_page_level, free if it's for
9410Sstevel@tonic-gate 				 * a boot-time kernel mapping below kernelbase.
9420Sstevel@tonic-gate 				 */
9430Sstevel@tonic-gate 				if (level >= mmu.max_page_level &&
9440Sstevel@tonic-gate 				    (hat != kas.a_hat || va >= kernelbase))
9450Sstevel@tonic-gate 					break;
9460Sstevel@tonic-gate 			}
9470Sstevel@tonic-gate 
9480Sstevel@tonic-gate 			/*
949*3446Smrj 			 * Remember if we destroy an htable that shares its PFN
950*3446Smrj 			 * from elsewhere.
9510Sstevel@tonic-gate 			 */
9520Sstevel@tonic-gate 			if (ht->ht_flags & HTABLE_SHARED_PFN) {
9530Sstevel@tonic-gate 				ASSERT(ht->ht_level == 0);
9540Sstevel@tonic-gate 				ASSERT(shared == NULL);
9550Sstevel@tonic-gate 				shared = ht->ht_shares;
9560Sstevel@tonic-gate 				HATSTAT_INC(hs_htable_unshared);
9570Sstevel@tonic-gate 			}
9580Sstevel@tonic-gate 
9590Sstevel@tonic-gate 			/*
9600Sstevel@tonic-gate 			 * Handle release of a table and freeing the htable_t.
9610Sstevel@tonic-gate 			 * Unlink it from the table higher (ie. ht_parent).
9620Sstevel@tonic-gate 			 */
9630Sstevel@tonic-gate 			ASSERT(ht->ht_lock_cnt == 0);
9640Sstevel@tonic-gate 			higher = ht->ht_parent;
9650Sstevel@tonic-gate 			ASSERT(higher != NULL);
9660Sstevel@tonic-gate 
9670Sstevel@tonic-gate 			/*
9680Sstevel@tonic-gate 			 * Unlink the pagetable.
9690Sstevel@tonic-gate 			 */
9700Sstevel@tonic-gate 			unlink_ptp(higher, ht, va);
9710Sstevel@tonic-gate 
9720Sstevel@tonic-gate 			/*
9730Sstevel@tonic-gate 			 * When any top level VLP page table entry changes, we
9740Sstevel@tonic-gate 			 * must issue a reload of cr3 on all processors.
9750Sstevel@tonic-gate 			 */
9760Sstevel@tonic-gate 			if ((hat->hat_flags & HAT_VLP) &&
9770Sstevel@tonic-gate 			    level == VLP_LEVEL - 1)
978*3446Smrj 				hat_tlb_inval(hat, DEMAP_ALL_ADDR);
9790Sstevel@tonic-gate 
9800Sstevel@tonic-gate 			/*
9810Sstevel@tonic-gate 			 * remove this htable from its hash list
9820Sstevel@tonic-gate 			 */
9830Sstevel@tonic-gate 			if (ht->ht_next)
9840Sstevel@tonic-gate 				ht->ht_next->ht_prev = ht->ht_prev;
9850Sstevel@tonic-gate 
9860Sstevel@tonic-gate 			if (ht->ht_prev) {
9870Sstevel@tonic-gate 				ht->ht_prev->ht_next = ht->ht_next;
9880Sstevel@tonic-gate 			} else {
9890Sstevel@tonic-gate 				ASSERT(hat->hat_ht_hash[hashval] == ht);
9900Sstevel@tonic-gate 				hat->hat_ht_hash[hashval] = ht->ht_next;
9910Sstevel@tonic-gate 			}
9920Sstevel@tonic-gate 			HTABLE_EXIT(hashval);
9930Sstevel@tonic-gate 			htable_free(ht);
9940Sstevel@tonic-gate 			ht = higher;
9950Sstevel@tonic-gate 		}
9960Sstevel@tonic-gate 
9970Sstevel@tonic-gate 		ASSERT(ht->ht_busy >= 1);
9980Sstevel@tonic-gate 		--ht->ht_busy;
9990Sstevel@tonic-gate 		HTABLE_EXIT(hashval);
10000Sstevel@tonic-gate 
10010Sstevel@tonic-gate 		/*
10020Sstevel@tonic-gate 		 * If we released a shared htable, do a release on the htable
10030Sstevel@tonic-gate 		 * from which it shared
10040Sstevel@tonic-gate 		 */
10050Sstevel@tonic-gate 		ht = shared;
10060Sstevel@tonic-gate 	}
10070Sstevel@tonic-gate }
10080Sstevel@tonic-gate 
10090Sstevel@tonic-gate /*
10100Sstevel@tonic-gate  * Find the htable for the pagetable at the given level for the given address.
10110Sstevel@tonic-gate  * If found acquires a hold that eventually needs to be htable_release()d
10120Sstevel@tonic-gate  */
10130Sstevel@tonic-gate htable_t *
10140Sstevel@tonic-gate htable_lookup(hat_t *hat, uintptr_t vaddr, level_t level)
10150Sstevel@tonic-gate {
10160Sstevel@tonic-gate 	uintptr_t	base;
10170Sstevel@tonic-gate 	uint_t		hashval;
10180Sstevel@tonic-gate 	htable_t	*ht = NULL;
10190Sstevel@tonic-gate 
10200Sstevel@tonic-gate 	ASSERT(level >= 0);
10210Sstevel@tonic-gate 	ASSERT(level <= TOP_LEVEL(hat));
10220Sstevel@tonic-gate 
10230Sstevel@tonic-gate 	if (level == TOP_LEVEL(hat))
10240Sstevel@tonic-gate 		base = 0;
10250Sstevel@tonic-gate 	else
10260Sstevel@tonic-gate 		base = vaddr & LEVEL_MASK(level + 1);
10270Sstevel@tonic-gate 
10280Sstevel@tonic-gate 	hashval = HTABLE_HASH(hat, base, level);
10290Sstevel@tonic-gate 	HTABLE_ENTER(hashval);
10300Sstevel@tonic-gate 	for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) {
10310Sstevel@tonic-gate 		if (ht->ht_hat == hat &&
10320Sstevel@tonic-gate 		    ht->ht_vaddr == base &&
10330Sstevel@tonic-gate 		    ht->ht_level == level)
10340Sstevel@tonic-gate 			break;
10350Sstevel@tonic-gate 	}
10360Sstevel@tonic-gate 	if (ht)
10370Sstevel@tonic-gate 		++ht->ht_busy;
10380Sstevel@tonic-gate 
10390Sstevel@tonic-gate 	HTABLE_EXIT(hashval);
10400Sstevel@tonic-gate 	return (ht);
10410Sstevel@tonic-gate }
10420Sstevel@tonic-gate 
10430Sstevel@tonic-gate /*
10440Sstevel@tonic-gate  * Acquires a hold on a known htable (from a locked hment entry).
10450Sstevel@tonic-gate  */
10460Sstevel@tonic-gate void
10470Sstevel@tonic-gate htable_acquire(htable_t *ht)
10480Sstevel@tonic-gate {
10490Sstevel@tonic-gate 	hat_t		*hat = ht->ht_hat;
10500Sstevel@tonic-gate 	level_t		level = ht->ht_level;
10510Sstevel@tonic-gate 	uintptr_t	base = ht->ht_vaddr;
10520Sstevel@tonic-gate 	uint_t		hashval = HTABLE_HASH(hat, base, level);
10530Sstevel@tonic-gate 
10540Sstevel@tonic-gate 	HTABLE_ENTER(hashval);
10550Sstevel@tonic-gate #ifdef DEBUG
10560Sstevel@tonic-gate 	/*
10570Sstevel@tonic-gate 	 * make sure the htable is there
10580Sstevel@tonic-gate 	 */
10590Sstevel@tonic-gate 	{
10600Sstevel@tonic-gate 		htable_t	*h;
10610Sstevel@tonic-gate 
10620Sstevel@tonic-gate 		for (h = hat->hat_ht_hash[hashval];
10630Sstevel@tonic-gate 		    h && h != ht;
10640Sstevel@tonic-gate 		    h = h->ht_next)
10650Sstevel@tonic-gate 			;
10660Sstevel@tonic-gate 		ASSERT(h == ht);
10670Sstevel@tonic-gate 	}
10680Sstevel@tonic-gate #endif /* DEBUG */
10690Sstevel@tonic-gate 	++ht->ht_busy;
10700Sstevel@tonic-gate 	HTABLE_EXIT(hashval);
10710Sstevel@tonic-gate }
10720Sstevel@tonic-gate 
10730Sstevel@tonic-gate /*
10740Sstevel@tonic-gate  * Find the htable for the pagetable at the given level for the given address.
10750Sstevel@tonic-gate  * If found acquires a hold that eventually needs to be htable_release()d
10760Sstevel@tonic-gate  * If not found the table is created.
10770Sstevel@tonic-gate  *
10780Sstevel@tonic-gate  * Since we can't hold a hash table mutex during allocation, we have to
10790Sstevel@tonic-gate  * drop it and redo the search on a create. Then we may have to free the newly
10800Sstevel@tonic-gate  * allocated htable if another thread raced in and created it ahead of us.
10810Sstevel@tonic-gate  */
10820Sstevel@tonic-gate htable_t *
10830Sstevel@tonic-gate htable_create(
10840Sstevel@tonic-gate 	hat_t		*hat,
10850Sstevel@tonic-gate 	uintptr_t	vaddr,
10860Sstevel@tonic-gate 	level_t		level,
10870Sstevel@tonic-gate 	htable_t	*shared)
10880Sstevel@tonic-gate {
10890Sstevel@tonic-gate 	uint_t		h;
10900Sstevel@tonic-gate 	level_t		l;
10910Sstevel@tonic-gate 	uintptr_t	base;
10920Sstevel@tonic-gate 	htable_t	*ht;
10930Sstevel@tonic-gate 	htable_t	*higher = NULL;
10940Sstevel@tonic-gate 	htable_t	*new = NULL;
10950Sstevel@tonic-gate 
10960Sstevel@tonic-gate 	if (level < 0 || level > TOP_LEVEL(hat))
10970Sstevel@tonic-gate 		panic("htable_create(): level %d out of range\n", level);
10980Sstevel@tonic-gate 
10990Sstevel@tonic-gate 	/*
11000Sstevel@tonic-gate 	 * Create the page tables in top down order.
11010Sstevel@tonic-gate 	 */
11020Sstevel@tonic-gate 	for (l = TOP_LEVEL(hat); l >= level; --l) {
11030Sstevel@tonic-gate 		new = NULL;
11040Sstevel@tonic-gate 		if (l == TOP_LEVEL(hat))
11050Sstevel@tonic-gate 			base = 0;
11060Sstevel@tonic-gate 		else
11070Sstevel@tonic-gate 			base = vaddr & LEVEL_MASK(l + 1);
11080Sstevel@tonic-gate 
11090Sstevel@tonic-gate 		h = HTABLE_HASH(hat, base, l);
11100Sstevel@tonic-gate try_again:
11110Sstevel@tonic-gate 		/*
11120Sstevel@tonic-gate 		 * look up the htable at this level
11130Sstevel@tonic-gate 		 */
11140Sstevel@tonic-gate 		HTABLE_ENTER(h);
11150Sstevel@tonic-gate 		if (l == TOP_LEVEL(hat)) {
11160Sstevel@tonic-gate 			ht = hat->hat_htable;
11170Sstevel@tonic-gate 		} else {
11180Sstevel@tonic-gate 			for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) {
11190Sstevel@tonic-gate 				ASSERT(ht->ht_hat == hat);
11200Sstevel@tonic-gate 				if (ht->ht_vaddr == base &&
11210Sstevel@tonic-gate 				    ht->ht_level == l)
11220Sstevel@tonic-gate 					break;
11230Sstevel@tonic-gate 			}
11240Sstevel@tonic-gate 		}
11250Sstevel@tonic-gate 
11260Sstevel@tonic-gate 		/*
11270Sstevel@tonic-gate 		 * if we found the htable, increment its busy cnt
11280Sstevel@tonic-gate 		 * and if we had allocated a new htable, free it.
11290Sstevel@tonic-gate 		 */
11300Sstevel@tonic-gate 		if (ht != NULL) {
11310Sstevel@tonic-gate 			/*
11320Sstevel@tonic-gate 			 * If we find a pre-existing shared table, it must
11330Sstevel@tonic-gate 			 * share from the same place.
11340Sstevel@tonic-gate 			 */
11350Sstevel@tonic-gate 			if (l == level && shared && ht->ht_shares &&
11360Sstevel@tonic-gate 			    ht->ht_shares != shared) {
11370Sstevel@tonic-gate 				panic("htable shared from wrong place "
11380Sstevel@tonic-gate 				    "found htable=%p shared=%p", ht, shared);
11390Sstevel@tonic-gate 			}
11400Sstevel@tonic-gate 			++ht->ht_busy;
11410Sstevel@tonic-gate 			HTABLE_EXIT(h);
11420Sstevel@tonic-gate 			if (new)
11430Sstevel@tonic-gate 				htable_free(new);
11440Sstevel@tonic-gate 			if (higher != NULL)
11450Sstevel@tonic-gate 				htable_release(higher);
11460Sstevel@tonic-gate 			higher = ht;
11470Sstevel@tonic-gate 
11480Sstevel@tonic-gate 		/*
11490Sstevel@tonic-gate 		 * if we didn't find it on the first search
11500Sstevel@tonic-gate 		 * allocate a new one and search again
11510Sstevel@tonic-gate 		 */
11520Sstevel@tonic-gate 		} else if (new == NULL) {
11530Sstevel@tonic-gate 			HTABLE_EXIT(h);
11540Sstevel@tonic-gate 			new = htable_alloc(hat, base, l,
11550Sstevel@tonic-gate 			    l == level ? shared : NULL);
11560Sstevel@tonic-gate 			goto try_again;
11570Sstevel@tonic-gate 
11580Sstevel@tonic-gate 		/*
11590Sstevel@tonic-gate 		 * 2nd search and still not there, use "new" table
11600Sstevel@tonic-gate 		 * Link new table into higher, when not at top level.
11610Sstevel@tonic-gate 		 */
11620Sstevel@tonic-gate 		} else {
11630Sstevel@tonic-gate 			ht = new;
11640Sstevel@tonic-gate 			if (higher != NULL) {
11650Sstevel@tonic-gate 				link_ptp(higher, ht, base);
11660Sstevel@tonic-gate 				ht->ht_parent = higher;
11670Sstevel@tonic-gate 
11680Sstevel@tonic-gate 				/*
11690Sstevel@tonic-gate 				 * When any top level VLP page table changes,
11700Sstevel@tonic-gate 				 * we must reload cr3 on all processors.
11710Sstevel@tonic-gate 				 */
11720Sstevel@tonic-gate #ifdef __i386
11730Sstevel@tonic-gate 				if (mmu.pae_hat &&
11740Sstevel@tonic-gate #else /* !__i386 */
11750Sstevel@tonic-gate 				if ((hat->hat_flags & HAT_VLP) &&
11760Sstevel@tonic-gate #endif /* __i386 */
11770Sstevel@tonic-gate 				    l == VLP_LEVEL - 1)
1178*3446Smrj 					hat_tlb_inval(hat, DEMAP_ALL_ADDR);
11790Sstevel@tonic-gate 			}
11800Sstevel@tonic-gate 			ht->ht_next = hat->hat_ht_hash[h];
11810Sstevel@tonic-gate 			ASSERT(ht->ht_prev == NULL);
11820Sstevel@tonic-gate 			if (hat->hat_ht_hash[h])
11830Sstevel@tonic-gate 				hat->hat_ht_hash[h]->ht_prev = ht;
11840Sstevel@tonic-gate 			hat->hat_ht_hash[h] = ht;
11850Sstevel@tonic-gate 			HTABLE_EXIT(h);
11860Sstevel@tonic-gate 
11870Sstevel@tonic-gate 			/*
11880Sstevel@tonic-gate 			 * Note we don't do htable_release(higher).
11890Sstevel@tonic-gate 			 * That happens recursively when "new" is removed by
11900Sstevel@tonic-gate 			 * htable_release() or htable_steal().
11910Sstevel@tonic-gate 			 */
11920Sstevel@tonic-gate 			higher = ht;
11930Sstevel@tonic-gate 
11940Sstevel@tonic-gate 			/*
11950Sstevel@tonic-gate 			 * If we just created a new shared page table we
11960Sstevel@tonic-gate 			 * increment the shared htable's busy count, so that
11970Sstevel@tonic-gate 			 * it can't be the victim of a steal even if it's empty.
11980Sstevel@tonic-gate 			 */
11990Sstevel@tonic-gate 			if (l == level && shared) {
12000Sstevel@tonic-gate 				(void) htable_lookup(shared->ht_hat,
12010Sstevel@tonic-gate 				    shared->ht_vaddr, shared->ht_level);
12020Sstevel@tonic-gate 				HATSTAT_INC(hs_htable_shared);
12030Sstevel@tonic-gate 			}
12040Sstevel@tonic-gate 		}
12050Sstevel@tonic-gate 	}
12060Sstevel@tonic-gate 
12070Sstevel@tonic-gate 	return (ht);
12080Sstevel@tonic-gate }
12090Sstevel@tonic-gate 
12100Sstevel@tonic-gate /*
1211*3446Smrj  * Inherit initial pagetables from the boot program.
1212*3446Smrj  */
1213*3446Smrj void
1214*3446Smrj htable_attach(
1215*3446Smrj 	hat_t *hat,
1216*3446Smrj 	uintptr_t base,
1217*3446Smrj 	level_t level,
1218*3446Smrj 	htable_t *parent,
1219*3446Smrj 	pfn_t pfn)
1220*3446Smrj {
1221*3446Smrj 	htable_t	*ht;
1222*3446Smrj 	uint_t		h;
1223*3446Smrj 	uint_t		i;
1224*3446Smrj 	x86pte_t	pte;
1225*3446Smrj 	x86pte_t	*ptep;
1226*3446Smrj 	page_t		*pp;
1227*3446Smrj 	extern page_t	*boot_claim_page(pfn_t);
1228*3446Smrj 
1229*3446Smrj 	ht = htable_get_reserve();
1230*3446Smrj 	if (level == mmu.max_level)
1231*3446Smrj 		kas.a_hat->hat_htable = ht;
1232*3446Smrj 	ht->ht_hat = hat;
1233*3446Smrj 	ht->ht_parent = parent;
1234*3446Smrj 	ht->ht_vaddr = base;
1235*3446Smrj 	ht->ht_level = level;
1236*3446Smrj 	ht->ht_busy = 1;
1237*3446Smrj 	ht->ht_next = NULL;
1238*3446Smrj 	ht->ht_prev = NULL;
1239*3446Smrj 	ht->ht_flags = 0;
1240*3446Smrj 	ht->ht_pfn = pfn;
1241*3446Smrj 	ht->ht_lock_cnt = 0;
1242*3446Smrj 	ht->ht_valid_cnt = 0;
1243*3446Smrj 	if (parent != NULL)
1244*3446Smrj 		++parent->ht_busy;
1245*3446Smrj 
1246*3446Smrj 	h = HTABLE_HASH(hat, base, level);
1247*3446Smrj 	HTABLE_ENTER(h);
1248*3446Smrj 	ht->ht_next = hat->hat_ht_hash[h];
1249*3446Smrj 	ASSERT(ht->ht_prev == NULL);
1250*3446Smrj 	if (hat->hat_ht_hash[h])
1251*3446Smrj 		hat->hat_ht_hash[h]->ht_prev = ht;
1252*3446Smrj 	hat->hat_ht_hash[h] = ht;
1253*3446Smrj 	HTABLE_EXIT(h);
1254*3446Smrj 
1255*3446Smrj 	/*
1256*3446Smrj 	 * make sure the page table physical page is not FREE
1257*3446Smrj 	 */
1258*3446Smrj 	if (page_resv(1, KM_NOSLEEP) == 0)
1259*3446Smrj 		panic("page_resv() failed in ptable alloc");
1260*3446Smrj 
1261*3446Smrj 	pp = boot_claim_page(pfn);
1262*3446Smrj 	ASSERT(pp != NULL);
1263*3446Smrj 	page_downgrade(pp);
1264*3446Smrj 	/*
1265*3446Smrj 	 * Record in the page_t that is a pagetable for segkpm setup.
1266*3446Smrj 	 */
1267*3446Smrj 	if (kpm_vbase)
1268*3446Smrj 		pp->p_index = 1;
1269*3446Smrj 
1270*3446Smrj 	/*
1271*3446Smrj 	 * Count valid mappings and recursively attach lower level pagetables.
1272*3446Smrj 	 */
1273*3446Smrj 	ptep = kbm_remap_window(pfn_to_pa(pfn), 0);
1274*3446Smrj 	for (i = 0; i < HTABLE_NUM_PTES(ht); ++i) {
1275*3446Smrj 		if (mmu.pae_hat)
1276*3446Smrj 			pte = ptep[i];
1277*3446Smrj 		else
1278*3446Smrj 			pte = ((x86pte32_t *)ptep)[i];
1279*3446Smrj 		if (!IN_HYPERVISOR_VA(base) && PTE_ISVALID(pte)) {
1280*3446Smrj 			++ht->ht_valid_cnt;
1281*3446Smrj 			if (!PTE_ISPAGE(pte, level)) {
1282*3446Smrj 				htable_attach(hat, base, level - 1,
1283*3446Smrj 				    ht, PTE2PFN(pte, level));
1284*3446Smrj 				ptep = kbm_remap_window(pfn_to_pa(pfn), 0);
1285*3446Smrj 			}
1286*3446Smrj 		}
1287*3446Smrj 		base += LEVEL_SIZE(level);
1288*3446Smrj 		if (base == mmu.hole_start)
1289*3446Smrj 			base = (mmu.hole_end + MMU_PAGEOFFSET) & MMU_PAGEMASK;
1290*3446Smrj 	}
1291*3446Smrj 
1292*3446Smrj 	/*
1293*3446Smrj 	 * As long as all the mappings we had were below kernel base
1294*3446Smrj 	 * we can release the htable.
1295*3446Smrj 	 */
1296*3446Smrj 	if (base < kernelbase)
1297*3446Smrj 		htable_release(ht);
1298*3446Smrj }
1299*3446Smrj 
1300*3446Smrj /*
13010Sstevel@tonic-gate  * Walk through a given htable looking for the first valid entry.  This
13020Sstevel@tonic-gate  * routine takes both a starting and ending address.  The starting address
13030Sstevel@tonic-gate  * is required to be within the htable provided by the caller, but there is
13040Sstevel@tonic-gate  * no such restriction on the ending address.
13050Sstevel@tonic-gate  *
13060Sstevel@tonic-gate  * If the routine finds a valid entry in the htable (at or beyond the
13070Sstevel@tonic-gate  * starting address), the PTE (and its address) will be returned.
13080Sstevel@tonic-gate  * This PTE may correspond to either a page or a pagetable - it is the
13090Sstevel@tonic-gate  * caller's responsibility to determine which.  If no valid entry is
13100Sstevel@tonic-gate  * found, 0 (and invalid PTE) and the next unexamined address will be
13110Sstevel@tonic-gate  * returned.
13120Sstevel@tonic-gate  *
13130Sstevel@tonic-gate  * The loop has been carefully coded for optimization.
13140Sstevel@tonic-gate  */
13150Sstevel@tonic-gate static x86pte_t
13160Sstevel@tonic-gate htable_scan(htable_t *ht, uintptr_t *vap, uintptr_t eaddr)
13170Sstevel@tonic-gate {
13180Sstevel@tonic-gate 	uint_t e;
13190Sstevel@tonic-gate 	x86pte_t found_pte = (x86pte_t)0;
1320*3446Smrj 	caddr_t pte_ptr;
1321*3446Smrj 	caddr_t end_pte_ptr;
13220Sstevel@tonic-gate 	int l = ht->ht_level;
13230Sstevel@tonic-gate 	uintptr_t va = *vap & LEVEL_MASK(l);
13240Sstevel@tonic-gate 	size_t pgsize = LEVEL_SIZE(l);
13250Sstevel@tonic-gate 
13260Sstevel@tonic-gate 	ASSERT(va >= ht->ht_vaddr);
13270Sstevel@tonic-gate 	ASSERT(va <= HTABLE_LAST_PAGE(ht));
13280Sstevel@tonic-gate 
13290Sstevel@tonic-gate 	/*
13300Sstevel@tonic-gate 	 * Compute the starting index and ending virtual address
13310Sstevel@tonic-gate 	 */
13320Sstevel@tonic-gate 	e = htable_va2entry(va, ht);
13330Sstevel@tonic-gate 
13340Sstevel@tonic-gate 	/*
13350Sstevel@tonic-gate 	 * The following page table scan code knows that the valid
13360Sstevel@tonic-gate 	 * bit of a PTE is in the lowest byte AND that x86 is little endian!!
13370Sstevel@tonic-gate 	 */
1338*3446Smrj 	pte_ptr = (caddr_t)x86pte_access_pagetable(ht, 0);
1339*3446Smrj 	end_pte_ptr = (caddr_t)PT_INDEX_PTR(pte_ptr, HTABLE_NUM_PTES(ht));
1340*3446Smrj 	pte_ptr = (caddr_t)PT_INDEX_PTR((x86pte_t *)pte_ptr, e);
13412687Skchow 	while (!PTE_ISVALID(*pte_ptr)) {
13420Sstevel@tonic-gate 		va += pgsize;
13430Sstevel@tonic-gate 		if (va >= eaddr)
13440Sstevel@tonic-gate 			break;
13450Sstevel@tonic-gate 		pte_ptr += mmu.pte_size;
13460Sstevel@tonic-gate 		ASSERT(pte_ptr <= end_pte_ptr);
13470Sstevel@tonic-gate 		if (pte_ptr == end_pte_ptr)
13480Sstevel@tonic-gate 			break;
13490Sstevel@tonic-gate 	}
13500Sstevel@tonic-gate 
13510Sstevel@tonic-gate 	/*
13520Sstevel@tonic-gate 	 * if we found a valid PTE, load the entire PTE
13530Sstevel@tonic-gate 	 */
1354*3446Smrj 	if (va < eaddr && pte_ptr != end_pte_ptr)
1355*3446Smrj 		found_pte = GET_PTE((x86pte_t *)pte_ptr);
13560Sstevel@tonic-gate 	x86pte_release_pagetable(ht);
13570Sstevel@tonic-gate 
13580Sstevel@tonic-gate #if defined(__amd64)
13590Sstevel@tonic-gate 	/*
13600Sstevel@tonic-gate 	 * deal with VA hole on amd64
13610Sstevel@tonic-gate 	 */
13620Sstevel@tonic-gate 	if (l == mmu.max_level && va >= mmu.hole_start && va <= mmu.hole_end)
13630Sstevel@tonic-gate 		va = mmu.hole_end + va - mmu.hole_start;
13640Sstevel@tonic-gate #endif /* __amd64 */
13650Sstevel@tonic-gate 
13660Sstevel@tonic-gate 	*vap = va;
13670Sstevel@tonic-gate 	return (found_pte);
13680Sstevel@tonic-gate }
13690Sstevel@tonic-gate 
13700Sstevel@tonic-gate /*
13710Sstevel@tonic-gate  * Find the address and htable for the first populated translation at or
13720Sstevel@tonic-gate  * above the given virtual address.  The caller may also specify an upper
13730Sstevel@tonic-gate  * limit to the address range to search.  Uses level information to quickly
13740Sstevel@tonic-gate  * skip unpopulated sections of virtual address spaces.
13750Sstevel@tonic-gate  *
13760Sstevel@tonic-gate  * If not found returns NULL. When found, returns the htable and virt addr
13770Sstevel@tonic-gate  * and has a hold on the htable.
13780Sstevel@tonic-gate  */
13790Sstevel@tonic-gate x86pte_t
13800Sstevel@tonic-gate htable_walk(
13810Sstevel@tonic-gate 	struct hat *hat,
13820Sstevel@tonic-gate 	htable_t **htp,
13830Sstevel@tonic-gate 	uintptr_t *vaddr,
13840Sstevel@tonic-gate 	uintptr_t eaddr)
13850Sstevel@tonic-gate {
13860Sstevel@tonic-gate 	uintptr_t va = *vaddr;
13870Sstevel@tonic-gate 	htable_t *ht;
13880Sstevel@tonic-gate 	htable_t *prev = *htp;
13890Sstevel@tonic-gate 	level_t l;
13900Sstevel@tonic-gate 	level_t max_mapped_level;
13910Sstevel@tonic-gate 	x86pte_t pte;
13920Sstevel@tonic-gate 
13930Sstevel@tonic-gate 	ASSERT(eaddr > va);
13940Sstevel@tonic-gate 
13950Sstevel@tonic-gate 	/*
13960Sstevel@tonic-gate 	 * If this is a user address, then we know we need not look beyond
13970Sstevel@tonic-gate 	 * kernelbase.
13980Sstevel@tonic-gate 	 */
13990Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat || eaddr <= kernelbase ||
14000Sstevel@tonic-gate 	    eaddr == HTABLE_WALK_TO_END);
14010Sstevel@tonic-gate 	if (hat != kas.a_hat && eaddr == HTABLE_WALK_TO_END)
14020Sstevel@tonic-gate 		eaddr = kernelbase;
14030Sstevel@tonic-gate 
14040Sstevel@tonic-gate 	/*
14050Sstevel@tonic-gate 	 * If we're coming in with a previous page table, search it first
14060Sstevel@tonic-gate 	 * without doing an htable_lookup(), this should be frequent.
14070Sstevel@tonic-gate 	 */
14080Sstevel@tonic-gate 	if (prev) {
14090Sstevel@tonic-gate 		ASSERT(prev->ht_busy > 0);
14100Sstevel@tonic-gate 		ASSERT(prev->ht_vaddr <= va);
14110Sstevel@tonic-gate 		l = prev->ht_level;
14120Sstevel@tonic-gate 		if (va <= HTABLE_LAST_PAGE(prev)) {
14130Sstevel@tonic-gate 			pte = htable_scan(prev, &va, eaddr);
14140Sstevel@tonic-gate 
14150Sstevel@tonic-gate 			if (PTE_ISPAGE(pte, l)) {
14160Sstevel@tonic-gate 				*vaddr = va;
14170Sstevel@tonic-gate 				*htp = prev;
14180Sstevel@tonic-gate 				return (pte);
14190Sstevel@tonic-gate 			}
14200Sstevel@tonic-gate 		}
14210Sstevel@tonic-gate 
14220Sstevel@tonic-gate 		/*
14230Sstevel@tonic-gate 		 * We found nothing in the htable provided by the caller,
14240Sstevel@tonic-gate 		 * so fall through and do the full search
14250Sstevel@tonic-gate 		 */
14260Sstevel@tonic-gate 		htable_release(prev);
14270Sstevel@tonic-gate 	}
14280Sstevel@tonic-gate 
14290Sstevel@tonic-gate 	/*
14300Sstevel@tonic-gate 	 * Find the level of the largest pagesize used by this HAT.
14310Sstevel@tonic-gate 	 */
14320Sstevel@tonic-gate 	max_mapped_level = 0;
14330Sstevel@tonic-gate 	for (l = 1; l <= mmu.max_page_level; ++l)
14340Sstevel@tonic-gate 		if (hat->hat_pages_mapped[l] != 0)
14350Sstevel@tonic-gate 			max_mapped_level = l;
14360Sstevel@tonic-gate 
14370Sstevel@tonic-gate 	while (va < eaddr && va >= *vaddr) {
14380Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(va));
14390Sstevel@tonic-gate 
14400Sstevel@tonic-gate 		/*
14410Sstevel@tonic-gate 		 *  Find lowest table with any entry for given address.
14420Sstevel@tonic-gate 		 */
14430Sstevel@tonic-gate 		for (l = 0; l <= TOP_LEVEL(hat); ++l) {
14440Sstevel@tonic-gate 			ht = htable_lookup(hat, va, l);
14450Sstevel@tonic-gate 			if (ht != NULL) {
14460Sstevel@tonic-gate 				pte = htable_scan(ht, &va, eaddr);
14470Sstevel@tonic-gate 				if (PTE_ISPAGE(pte, l)) {
14480Sstevel@tonic-gate 					*vaddr = va;
14490Sstevel@tonic-gate 					*htp = ht;
14500Sstevel@tonic-gate 					return (pte);
14510Sstevel@tonic-gate 				}
14520Sstevel@tonic-gate 				htable_release(ht);
14530Sstevel@tonic-gate 				break;
14540Sstevel@tonic-gate 			}
14550Sstevel@tonic-gate 
14560Sstevel@tonic-gate 			/*
14570Sstevel@tonic-gate 			 * The ht is never NULL at the top level since
14580Sstevel@tonic-gate 			 * the top level htable is created in hat_alloc().
14590Sstevel@tonic-gate 			 */
14600Sstevel@tonic-gate 			ASSERT(l < TOP_LEVEL(hat));
14610Sstevel@tonic-gate 
14620Sstevel@tonic-gate 			/*
14630Sstevel@tonic-gate 			 * No htable covers the address. If there is no
14640Sstevel@tonic-gate 			 * larger page size that could cover it, we
14650Sstevel@tonic-gate 			 * skip to the start of the next page table.
14660Sstevel@tonic-gate 			 */
14670Sstevel@tonic-gate 			if (l >= max_mapped_level) {
14680Sstevel@tonic-gate 				va = NEXT_ENTRY_VA(va, l + 1);
14690Sstevel@tonic-gate 				break;
14700Sstevel@tonic-gate 			}
14710Sstevel@tonic-gate 		}
14720Sstevel@tonic-gate 	}
14730Sstevel@tonic-gate 
14740Sstevel@tonic-gate 	*vaddr = 0;
14750Sstevel@tonic-gate 	*htp = NULL;
14760Sstevel@tonic-gate 	return (0);
14770Sstevel@tonic-gate }
14780Sstevel@tonic-gate 
14790Sstevel@tonic-gate /*
14800Sstevel@tonic-gate  * Find the htable and page table entry index of the given virtual address
14810Sstevel@tonic-gate  * with pagesize at or below given level.
14820Sstevel@tonic-gate  * If not found returns NULL. When found, returns the htable, sets
14830Sstevel@tonic-gate  * entry, and has a hold on the htable.
14840Sstevel@tonic-gate  */
14850Sstevel@tonic-gate htable_t *
14860Sstevel@tonic-gate htable_getpte(
14870Sstevel@tonic-gate 	struct hat *hat,
14880Sstevel@tonic-gate 	uintptr_t vaddr,
14890Sstevel@tonic-gate 	uint_t *entry,
14900Sstevel@tonic-gate 	x86pte_t *pte,
14910Sstevel@tonic-gate 	level_t level)
14920Sstevel@tonic-gate {
14930Sstevel@tonic-gate 	htable_t	*ht;
14940Sstevel@tonic-gate 	level_t		l;
14950Sstevel@tonic-gate 	uint_t		e;
14960Sstevel@tonic-gate 
14970Sstevel@tonic-gate 	ASSERT(level <= mmu.max_page_level);
14980Sstevel@tonic-gate 
14990Sstevel@tonic-gate 	for (l = 0; l <= level; ++l) {
15000Sstevel@tonic-gate 		ht = htable_lookup(hat, vaddr, l);
15010Sstevel@tonic-gate 		if (ht == NULL)
15020Sstevel@tonic-gate 			continue;
15030Sstevel@tonic-gate 		e = htable_va2entry(vaddr, ht);
15040Sstevel@tonic-gate 		if (entry != NULL)
15050Sstevel@tonic-gate 			*entry = e;
15060Sstevel@tonic-gate 		if (pte != NULL)
15070Sstevel@tonic-gate 			*pte = x86pte_get(ht, e);
15080Sstevel@tonic-gate 		return (ht);
15090Sstevel@tonic-gate 	}
15100Sstevel@tonic-gate 	return (NULL);
15110Sstevel@tonic-gate }
15120Sstevel@tonic-gate 
15130Sstevel@tonic-gate /*
15140Sstevel@tonic-gate  * Find the htable and page table entry index of the given virtual address.
15150Sstevel@tonic-gate  * There must be a valid page mapped at the given address.
15160Sstevel@tonic-gate  * If not found returns NULL. When found, returns the htable, sets
15170Sstevel@tonic-gate  * entry, and has a hold on the htable.
15180Sstevel@tonic-gate  */
15190Sstevel@tonic-gate htable_t *
15200Sstevel@tonic-gate htable_getpage(struct hat *hat, uintptr_t vaddr, uint_t *entry)
15210Sstevel@tonic-gate {
15220Sstevel@tonic-gate 	htable_t	*ht;
15230Sstevel@tonic-gate 	uint_t		e;
15240Sstevel@tonic-gate 	x86pte_t	pte;
15250Sstevel@tonic-gate 
15260Sstevel@tonic-gate 	ht = htable_getpte(hat, vaddr, &e, &pte, mmu.max_page_level);
15270Sstevel@tonic-gate 	if (ht == NULL)
15280Sstevel@tonic-gate 		return (NULL);
15290Sstevel@tonic-gate 
15300Sstevel@tonic-gate 	if (entry)
15310Sstevel@tonic-gate 		*entry = e;
15320Sstevel@tonic-gate 
15330Sstevel@tonic-gate 	if (PTE_ISPAGE(pte, ht->ht_level))
15340Sstevel@tonic-gate 		return (ht);
15350Sstevel@tonic-gate 	htable_release(ht);
15360Sstevel@tonic-gate 	return (NULL);
15370Sstevel@tonic-gate }
15380Sstevel@tonic-gate 
15390Sstevel@tonic-gate 
15400Sstevel@tonic-gate void
15410Sstevel@tonic-gate htable_init()
15420Sstevel@tonic-gate {
15430Sstevel@tonic-gate 	/*
15440Sstevel@tonic-gate 	 * To save on kernel VA usage, we avoid debug information in 32 bit
15450Sstevel@tonic-gate 	 * kernels.
15460Sstevel@tonic-gate 	 */
15470Sstevel@tonic-gate #if defined(__amd64)
15480Sstevel@tonic-gate 	int	kmem_flags = KMC_NOHASH;
15490Sstevel@tonic-gate #elif defined(__i386)
15500Sstevel@tonic-gate 	int	kmem_flags = KMC_NOHASH | KMC_NODEBUG;
15510Sstevel@tonic-gate #endif
15520Sstevel@tonic-gate 
15530Sstevel@tonic-gate 	/*
15540Sstevel@tonic-gate 	 * initialize kmem caches
15550Sstevel@tonic-gate 	 */
15560Sstevel@tonic-gate 	htable_cache = kmem_cache_create("htable_t",
15570Sstevel@tonic-gate 	    sizeof (htable_t), 0, NULL, NULL,
15580Sstevel@tonic-gate 	    htable_reap, NULL, hat_memload_arena, kmem_flags);
15590Sstevel@tonic-gate }
15600Sstevel@tonic-gate 
15610Sstevel@tonic-gate /*
15620Sstevel@tonic-gate  * get the pte index for the virtual address in the given htable's pagetable
15630Sstevel@tonic-gate  */
15640Sstevel@tonic-gate uint_t
15650Sstevel@tonic-gate htable_va2entry(uintptr_t va, htable_t *ht)
15660Sstevel@tonic-gate {
15670Sstevel@tonic-gate 	level_t	l = ht->ht_level;
15680Sstevel@tonic-gate 
15690Sstevel@tonic-gate 	ASSERT(va >= ht->ht_vaddr);
15700Sstevel@tonic-gate 	ASSERT(va <= HTABLE_LAST_PAGE(ht));
1571*3446Smrj 	return ((va >> LEVEL_SHIFT(l)) & (HTABLE_NUM_PTES(ht) - 1));
15720Sstevel@tonic-gate }
15730Sstevel@tonic-gate 
15740Sstevel@tonic-gate /*
15750Sstevel@tonic-gate  * Given an htable and the index of a pte in it, return the virtual address
15760Sstevel@tonic-gate  * of the page.
15770Sstevel@tonic-gate  */
15780Sstevel@tonic-gate uintptr_t
15790Sstevel@tonic-gate htable_e2va(htable_t *ht, uint_t entry)
15800Sstevel@tonic-gate {
15810Sstevel@tonic-gate 	level_t	l = ht->ht_level;
15820Sstevel@tonic-gate 	uintptr_t va;
15830Sstevel@tonic-gate 
1584*3446Smrj 	ASSERT(entry < HTABLE_NUM_PTES(ht));
15850Sstevel@tonic-gate 	va = ht->ht_vaddr + ((uintptr_t)entry << LEVEL_SHIFT(l));
15860Sstevel@tonic-gate 
15870Sstevel@tonic-gate 	/*
15880Sstevel@tonic-gate 	 * Need to skip over any VA hole in top level table
15890Sstevel@tonic-gate 	 */
15900Sstevel@tonic-gate #if defined(__amd64)
15910Sstevel@tonic-gate 	if (ht->ht_level == mmu.max_level && va >= mmu.hole_start)
15920Sstevel@tonic-gate 		va += ((mmu.hole_end - mmu.hole_start) + 1);
15930Sstevel@tonic-gate #endif
15940Sstevel@tonic-gate 
15950Sstevel@tonic-gate 	return (va);
15960Sstevel@tonic-gate }
15970Sstevel@tonic-gate 
15980Sstevel@tonic-gate /*
15990Sstevel@tonic-gate  * The code uses compare and swap instructions to read/write PTE's to
16000Sstevel@tonic-gate  * avoid atomicity problems, since PTEs can be 8 bytes on 32 bit systems.
16010Sstevel@tonic-gate  * will naturally be atomic.
16020Sstevel@tonic-gate  *
16030Sstevel@tonic-gate  * The combination of using kpreempt_disable()/_enable() and the hci_mutex
16040Sstevel@tonic-gate  * are used to ensure that an interrupt won't overwrite a temporary mapping
16050Sstevel@tonic-gate  * while it's in use. If an interrupt thread tries to access a PTE, it will
16060Sstevel@tonic-gate  * yield briefly back to the pinned thread which holds the cpu's hci_mutex.
16070Sstevel@tonic-gate  */
16080Sstevel@tonic-gate void
1609*3446Smrj x86pte_cpu_init(cpu_t *cpu)
16100Sstevel@tonic-gate {
16110Sstevel@tonic-gate 	struct hat_cpu_info *hci;
16120Sstevel@tonic-gate 
1613*3446Smrj 	hci = kmem_zalloc(sizeof (*hci), KM_SLEEP);
16140Sstevel@tonic-gate 	mutex_init(&hci->hci_mutex, NULL, MUTEX_DEFAULT, NULL);
16150Sstevel@tonic-gate 	cpu->cpu_hat_info = hci;
16160Sstevel@tonic-gate }
16170Sstevel@tonic-gate 
1618*3446Smrj void
1619*3446Smrj x86pte_cpu_fini(cpu_t *cpu)
1620*3446Smrj {
1621*3446Smrj 	struct hat_cpu_info *hci = cpu->cpu_hat_info;
1622*3446Smrj 
1623*3446Smrj 	kmem_free(hci, sizeof (*hci));
1624*3446Smrj 	cpu->cpu_hat_info = NULL;
1625*3446Smrj }
1626*3446Smrj 
1627*3446Smrj #ifdef __i386
16280Sstevel@tonic-gate /*
1629*3446Smrj  * On 32 bit kernels, loading a 64 bit PTE is a little tricky
16300Sstevel@tonic-gate  */
1631*3446Smrj x86pte_t
1632*3446Smrj get_pte64(x86pte_t *ptr)
1633*3446Smrj {
1634*3446Smrj 	volatile uint32_t *p = (uint32_t *)ptr;
1635*3446Smrj 	x86pte_t t;
1636*3446Smrj 
1637*3446Smrj 	ASSERT(mmu.pae_hat != 0);
1638*3446Smrj 	for (;;) {
1639*3446Smrj 		t = p[0];
1640*3446Smrj 		t |= (uint64_t)p[1] << 32;
1641*3446Smrj 		if ((t & 0xffffffff) == p[0])
1642*3446Smrj 			return (t);
1643*3446Smrj 	}
16440Sstevel@tonic-gate }
1645*3446Smrj #endif /* __i386 */
16460Sstevel@tonic-gate 
16470Sstevel@tonic-gate /*
16480Sstevel@tonic-gate  * Disable preemption and establish a mapping to the pagetable with the
16490Sstevel@tonic-gate  * given pfn. This is optimized for there case where it's the same
16500Sstevel@tonic-gate  * pfn as we last used referenced from this CPU.
16510Sstevel@tonic-gate  */
16520Sstevel@tonic-gate static x86pte_t *
1653*3446Smrj x86pte_access_pagetable(htable_t *ht, uint_t index)
16540Sstevel@tonic-gate {
16550Sstevel@tonic-gate 	/*
16560Sstevel@tonic-gate 	 * VLP pagetables are contained in the hat_t
16570Sstevel@tonic-gate 	 */
16580Sstevel@tonic-gate 	if (ht->ht_flags & HTABLE_VLP)
1659*3446Smrj 		return (PT_INDEX_PTR(ht->ht_hat->hat_vlp_ptes, index));
1660*3446Smrj 	return (x86pte_mapin(ht->ht_pfn, index, ht));
1661*3446Smrj }
16620Sstevel@tonic-gate 
1663*3446Smrj /*
1664*3446Smrj  * map the given pfn into the page table window.
1665*3446Smrj  */
1666*3446Smrj /*ARGSUSED*/
1667*3446Smrj x86pte_t *
1668*3446Smrj x86pte_mapin(pfn_t pfn, uint_t index, htable_t *ht)
1669*3446Smrj {
1670*3446Smrj 	x86pte_t *pteptr;
1671*3446Smrj 	x86pte_t pte;
1672*3446Smrj 	x86pte_t newpte;
1673*3446Smrj 	int x;
1674*3446Smrj 
16750Sstevel@tonic-gate 	ASSERT(pfn != PFN_INVALID);
16760Sstevel@tonic-gate 
16770Sstevel@tonic-gate 	if (!khat_running) {
1678*3446Smrj 		caddr_t va = kbm_remap_window(pfn_to_pa(pfn), 1);
1679*3446Smrj 		return (PT_INDEX_PTR(va, index));
16800Sstevel@tonic-gate 	}
16810Sstevel@tonic-gate 
16820Sstevel@tonic-gate 	/*
1683*3446Smrj 	 * If kpm is available, use it.
1684*3446Smrj 	 */
1685*3446Smrj 	if (kpm_vbase)
1686*3446Smrj 		return (PT_INDEX_PTR(hat_kpm_pfn2va(pfn), index));
1687*3446Smrj 
1688*3446Smrj 	/*
1689*3446Smrj 	 * Disable preemption and grab the CPU's hci_mutex
16900Sstevel@tonic-gate 	 */
16910Sstevel@tonic-gate 	kpreempt_disable();
1692*3446Smrj 	ASSERT(CPU->cpu_hat_info != NULL);
1693*3446Smrj 	mutex_enter(&CPU->cpu_hat_info->hci_mutex);
1694*3446Smrj 	x = PWIN_TABLE(CPU->cpu_id);
1695*3446Smrj 	pteptr = (x86pte_t *)PWIN_PTE_VA(x);
1696*3446Smrj 	if (mmu.pae_hat)
1697*3446Smrj 		pte = *pteptr;
1698*3446Smrj 	else
1699*3446Smrj 		pte = *(x86pte32_t *)pteptr;
1700*3446Smrj 
1701*3446Smrj 	newpte = MAKEPTE(pfn, 0) | mmu.pt_global | mmu.pt_nx;
1702*3446Smrj 	newpte |= PT_WRITABLE;
1703*3446Smrj 
1704*3446Smrj 	if (!PTE_EQUIV(newpte, pte)) {
1705*3446Smrj 		if (mmu.pae_hat)
1706*3446Smrj 			*pteptr = newpte;
1707*3446Smrj 		else
1708*3446Smrj 			*(x86pte32_t *)pteptr = newpte;
1709*3446Smrj 		mmu_tlbflush_entry((caddr_t)(PWIN_VA(x)));
17100Sstevel@tonic-gate 	}
1711*3446Smrj 	return (PT_INDEX_PTR(PWIN_VA(x), index));
17120Sstevel@tonic-gate }
17130Sstevel@tonic-gate 
17140Sstevel@tonic-gate /*
17150Sstevel@tonic-gate  * Release access to a page table.
17160Sstevel@tonic-gate  */
17170Sstevel@tonic-gate static void
17180Sstevel@tonic-gate x86pte_release_pagetable(htable_t *ht)
17190Sstevel@tonic-gate {
17200Sstevel@tonic-gate 	/*
17210Sstevel@tonic-gate 	 * nothing to do for VLP htables
17220Sstevel@tonic-gate 	 */
17230Sstevel@tonic-gate 	if (ht->ht_flags & HTABLE_VLP)
17240Sstevel@tonic-gate 		return;
17250Sstevel@tonic-gate 
1726*3446Smrj 	x86pte_mapout();
1727*3446Smrj }
1728*3446Smrj 
1729*3446Smrj void
1730*3446Smrj x86pte_mapout(void)
1731*3446Smrj {
1732*3446Smrj 	if (mmu.pwin_base == NULL || !khat_running)
17330Sstevel@tonic-gate 		return;
17340Sstevel@tonic-gate 
17350Sstevel@tonic-gate 	/*
1736*3446Smrj 	 * Drop the CPU's hci_mutex and restore preemption.
17370Sstevel@tonic-gate 	 */
1738*3446Smrj 	mutex_exit(&CPU->cpu_hat_info->hci_mutex);
17390Sstevel@tonic-gate 	kpreempt_enable();
17400Sstevel@tonic-gate }
17410Sstevel@tonic-gate 
17420Sstevel@tonic-gate /*
17430Sstevel@tonic-gate  * Atomic retrieval of a pagetable entry
17440Sstevel@tonic-gate  */
17450Sstevel@tonic-gate x86pte_t
17460Sstevel@tonic-gate x86pte_get(htable_t *ht, uint_t entry)
17470Sstevel@tonic-gate {
17480Sstevel@tonic-gate 	x86pte_t	pte;
174947Sjosephb 	x86pte_t	*ptep;
17500Sstevel@tonic-gate 
17510Sstevel@tonic-gate 	/*
175247Sjosephb 	 * Be careful that loading PAE entries in 32 bit kernel is atomic.
17530Sstevel@tonic-gate 	 */
1754*3446Smrj 	ASSERT(entry < mmu.ptes_per_table);
1755*3446Smrj 	ptep = x86pte_access_pagetable(ht, entry);
1756*3446Smrj 	pte = GET_PTE(ptep);
17570Sstevel@tonic-gate 	x86pte_release_pagetable(ht);
17580Sstevel@tonic-gate 	return (pte);
17590Sstevel@tonic-gate }
17600Sstevel@tonic-gate 
17610Sstevel@tonic-gate /*
17620Sstevel@tonic-gate  * Atomic unconditional set of a page table entry, it returns the previous
1763*3446Smrj  * value. For pre-existing mappings if the PFN changes, then we don't care
1764*3446Smrj  * about the old pte's REF / MOD bits. If the PFN remains the same, we leave
1765*3446Smrj  * the MOD/REF bits unchanged.
1766*3446Smrj  *
1767*3446Smrj  * If asked to overwrite a link to a lower page table with a large page
1768*3446Smrj  * mapping, this routine returns the special value of LPAGE_ERROR. This
1769*3446Smrj  * allows the upper HAT layers to retry with a smaller mapping size.
17700Sstevel@tonic-gate  */
17710Sstevel@tonic-gate x86pte_t
17720Sstevel@tonic-gate x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr)
17730Sstevel@tonic-gate {
17740Sstevel@tonic-gate 	x86pte_t	old;
1775*3446Smrj 	x86pte_t	prev;
17760Sstevel@tonic-gate 	x86pte_t	*ptep;
1777*3446Smrj 	level_t		l = ht->ht_level;
1778*3446Smrj 	x86pte_t	pfn_mask = (l != 0) ? PT_PADDR_LGPG : PT_PADDR;
1779*3446Smrj 	x86pte_t	n;
1780*3446Smrj 	uintptr_t	addr = htable_e2va(ht, entry);
1781*3446Smrj 	hat_t		*hat = ht->ht_hat;
17820Sstevel@tonic-gate 
1783*3446Smrj 	ASSERT(new != 0); /* don't use to invalidate a PTE, see x86pte_update */
17840Sstevel@tonic-gate 	ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN));
1785*3446Smrj 	if (ptr == NULL)
1786*3446Smrj 		ptep = x86pte_access_pagetable(ht, entry);
1787*3446Smrj 	else
17880Sstevel@tonic-gate 		ptep = ptr;
1789*3446Smrj 
1790*3446Smrj 	/*
1791*3446Smrj 	 * Install the new PTE. If remapping the same PFN, then
1792*3446Smrj 	 * copy existing REF/MOD bits to new mapping.
1793*3446Smrj 	 */
1794*3446Smrj 	do {
1795*3446Smrj 		prev = GET_PTE(ptep);
1796*3446Smrj 		n = new;
1797*3446Smrj 		if (PTE_ISVALID(n) && (prev & pfn_mask) == (new & pfn_mask))
1798*3446Smrj 			n |= prev & (PT_REF | PT_MOD);
17990Sstevel@tonic-gate 
1800*3446Smrj 		/*
1801*3446Smrj 		 * Another thread may have installed this mapping already,
1802*3446Smrj 		 * flush the local TLB and be done.
1803*3446Smrj 		 */
1804*3446Smrj 		if (prev == n) {
1805*3446Smrj 			old = new;
1806*3446Smrj 			mmu_tlbflush_entry((caddr_t)addr);
1807*3446Smrj 			goto done;
18080Sstevel@tonic-gate 		}
1809*3446Smrj 
1810*3446Smrj 		/*
1811*3446Smrj 		 * Detect if we have a collision of installing a large
1812*3446Smrj 		 * page mapping where there already is a lower page table.
1813*3446Smrj 		 */
1814*3446Smrj 		if (l > 0 && (prev & PT_VALID) && !(prev & PT_PAGESIZE))
1815*3446Smrj 			return (LPAGE_ERROR);
1816*3446Smrj 
1817*3446Smrj 		old = CAS_PTE(ptep, prev, n);
1818*3446Smrj 	} while (old != prev);
1819*3446Smrj 
1820*3446Smrj 	/*
1821*3446Smrj 	 * Do a TLB demap if needed, ie. the old pte was valid.
1822*3446Smrj 	 *
1823*3446Smrj 	 * Note that a stale TLB writeback to the PTE here either can't happen
1824*3446Smrj 	 * or doesn't matter. The PFN can only change for NOSYNC|NOCONSIST
1825*3446Smrj 	 * mappings, but they were created with REF and MOD already set, so
1826*3446Smrj 	 * no stale writeback will happen.
1827*3446Smrj 	 *
1828*3446Smrj 	 * Segmap is the only place where remaps happen on the same pfn and for
1829*3446Smrj 	 * that we want to preserve the stale REF/MOD bits.
1830*3446Smrj 	 */
1831*3446Smrj 	if (old & PT_REF)
1832*3446Smrj 		hat_tlb_inval(hat, addr);
1833*3446Smrj 
1834*3446Smrj done:
18350Sstevel@tonic-gate 	if (ptr == NULL)
18360Sstevel@tonic-gate 		x86pte_release_pagetable(ht);
18370Sstevel@tonic-gate 	return (old);
18380Sstevel@tonic-gate }
18390Sstevel@tonic-gate 
18400Sstevel@tonic-gate /*
1841*3446Smrj  * Atomic compare and swap of a page table entry. No TLB invalidates are done.
1842*3446Smrj  * This is used for links between pagetables of different levels.
1843*3446Smrj  * Note we always create these links with dirty/access set, so they should
1844*3446Smrj  * never change.
18450Sstevel@tonic-gate  */
1846*3446Smrj x86pte_t
18470Sstevel@tonic-gate x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, x86pte_t new)
18480Sstevel@tonic-gate {
18490Sstevel@tonic-gate 	x86pte_t	pte;
18500Sstevel@tonic-gate 	x86pte_t	*ptep;
18510Sstevel@tonic-gate 
1852*3446Smrj 	ptep = x86pte_access_pagetable(ht, entry);
1853*3446Smrj 	pte = CAS_PTE(ptep, old, new);
18540Sstevel@tonic-gate 	x86pte_release_pagetable(ht);
18550Sstevel@tonic-gate 	return (pte);
18560Sstevel@tonic-gate }
18570Sstevel@tonic-gate 
18580Sstevel@tonic-gate /*
1859*3446Smrj  * Make sure the zero we wrote to a page table entry sticks in memory
1860*3446Smrj  * after invalidating all TLB entries on all CPUs.
18610Sstevel@tonic-gate  */
1862*3446Smrj static x86pte_t
1863*3446Smrj handle_tlbs(x86pte_t oldpte, x86pte_t *ptep, htable_t *ht, uint_t entry)
1864*3446Smrj {
1865*3446Smrj 	hat_t		*hat = ht->ht_hat;
1866*3446Smrj 	uintptr_t	addr = htable_e2va(ht, entry);
1867*3446Smrj 	x86pte_t	found;
18680Sstevel@tonic-gate 
1869*3446Smrj 	/*
1870*3446Smrj 	 * Was the PTE ever used? If not there can't be any TLB entries.
1871*3446Smrj 	 */
1872*3446Smrj 	if ((oldpte & PT_REF) == 0)
1873*3446Smrj 		return (oldpte);
18740Sstevel@tonic-gate 
18750Sstevel@tonic-gate 	/*
1876*3446Smrj 	 * Do a full global TLB invalidation.
1877*3446Smrj 	 * We may have to loop until the new PTE in memory stays zero.
1878*3446Smrj 	 * Why? Because Intel/AMD don't document how the REF/MOD bits are
1879*3446Smrj 	 * copied back from the TLB to the PTE, sigh. We're protecting
1880*3446Smrj 	 * here against a blind write back of the MOD (and other) bits.
18810Sstevel@tonic-gate 	 */
1882*3446Smrj 	for (;;) {
1883*3446Smrj 		hat_tlb_inval(hat, addr);
1884*3446Smrj 
1885*3446Smrj 		/*
1886*3446Smrj 		 * Check for a stale writeback of a oldpte TLB entry.
1887*3446Smrj 		 * Done when the PTE stays zero.
1888*3446Smrj 		 */
1889*3446Smrj 		found = GET_PTE(ptep);
1890*3446Smrj 		if (found == 0)
1891*3446Smrj 			return (oldpte);
18920Sstevel@tonic-gate 
1893*3446Smrj 		/*
1894*3446Smrj 		 * The only acceptable PTE change must be from a TLB
1895*3446Smrj 		 * flush setting the MOD bit in, hence oldpte must
1896*3446Smrj 		 * have been writable.
1897*3446Smrj 		 */
1898*3446Smrj 		if (!(oldpte & PT_WRITABLE) || !(found & PT_MOD))
1899*3446Smrj 			break;
1900*3446Smrj 
1901*3446Smrj 		/*
1902*3446Smrj 		 * Did we see a complete writeback of oldpte?
1903*3446Smrj 		 * or
1904*3446Smrj 		 * Did we see the MOD bit set (plus possibly other
1905*3446Smrj 		 * bits rewritten) in a still invalid mapping?
1906*3446Smrj 		 */
1907*3446Smrj 		if (found == (oldpte | PT_MOD) ||
1908*3446Smrj 		    (!(found & PT_VALID) &&
1909*3446Smrj 		    (oldpte | found) == (oldpte | PT_MOD)))
1910*3446Smrj 			oldpte |= PT_MOD;
1911*3446Smrj 		else
1912*3446Smrj 			break;
1913*3446Smrj 
1914*3446Smrj 		(void) CAS_PTE(ptep, found, 0);
19150Sstevel@tonic-gate 	}
19160Sstevel@tonic-gate 
19170Sstevel@tonic-gate 	/*
1918*3446Smrj 	 * If we hit this, a processor attempted to set the DIRTY bit
1919*3446Smrj 	 * of a page table entry happened in a way we didn't anticipate
19200Sstevel@tonic-gate 	 */
1921*3446Smrj 	panic("handle_tlbs(): unanticipated TLB shootdown scenario"
1922*3446Smrj 	    " oldpte=" FMT_PTE " found=" FMT_PTE, oldpte, found);
1923*3446Smrj 	/*LINTED*/
19240Sstevel@tonic-gate }
19250Sstevel@tonic-gate 
19260Sstevel@tonic-gate /*
1927*3446Smrj  * Invalidate a page table entry as long as it currently maps something that
1928*3446Smrj  * matches the value determined by expect.
1929*3446Smrj  *
1930*3446Smrj  * Also invalidates any TLB entries and returns the previous value of the PTE.
19310Sstevel@tonic-gate  */
1932*3446Smrj x86pte_t
1933*3446Smrj x86pte_inval(
1934*3446Smrj 	htable_t *ht,
1935*3446Smrj 	uint_t entry,
1936*3446Smrj 	x86pte_t expect,
1937*3446Smrj 	x86pte_t *pte_ptr)
19380Sstevel@tonic-gate {
1939*3446Smrj 	x86pte_t	*ptep;
1940*3446Smrj 	x86pte_t	oldpte;
1941*3446Smrj 	x86pte_t	found;
19420Sstevel@tonic-gate 
1943*3446Smrj 	ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN));
1944*3446Smrj 	ASSERT(ht->ht_level != VLP_LEVEL);
1945*3446Smrj 	if (pte_ptr != NULL)
1946*3446Smrj 		ptep = pte_ptr;
1947*3446Smrj 	else
1948*3446Smrj 		ptep = x86pte_access_pagetable(ht, entry);
19490Sstevel@tonic-gate 
19500Sstevel@tonic-gate 	/*
1951*3446Smrj 	 * This loop deals with REF/MOD bits changing between the
1952*3446Smrj 	 * GET_PTE() and the CAS_PTE().
19530Sstevel@tonic-gate 	 */
1954*3446Smrj 	do {
1955*3446Smrj 		oldpte = GET_PTE(ptep);
1956*3446Smrj 		if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR))
1957*3446Smrj 			goto give_up;
1958*3446Smrj 		found = CAS_PTE(ptep, oldpte, 0);
1959*3446Smrj 	} while (found != oldpte);
1960*3446Smrj 	oldpte = handle_tlbs(oldpte, ptep, ht, entry);
19610Sstevel@tonic-gate 
1962*3446Smrj give_up:
1963*3446Smrj 	if (pte_ptr == NULL)
1964*3446Smrj 		x86pte_release_pagetable(ht);
1965*3446Smrj 	return (oldpte);
19660Sstevel@tonic-gate }
19670Sstevel@tonic-gate 
19680Sstevel@tonic-gate /*
1969*3446Smrj  * Change a page table entry af it currently matches the value in expect.
19700Sstevel@tonic-gate  */
19710Sstevel@tonic-gate x86pte_t
1972*3446Smrj x86pte_update(
1973*3446Smrj 	htable_t *ht,
1974*3446Smrj 	uint_t entry,
1975*3446Smrj 	x86pte_t expect,
1976*3446Smrj 	x86pte_t new)
19770Sstevel@tonic-gate {
19780Sstevel@tonic-gate 	x86pte_t	*ptep;
1979*3446Smrj 	x86pte_t	found;
19800Sstevel@tonic-gate 
1981*3446Smrj 	ASSERT(new != 0);
1982*3446Smrj 	ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN));
1983*3446Smrj 	ASSERT(ht->ht_level != VLP_LEVEL);
19840Sstevel@tonic-gate 
1985*3446Smrj 	ptep = x86pte_access_pagetable(ht, entry);
1986*3446Smrj 	found = CAS_PTE(ptep, expect, new);
1987*3446Smrj 	if (found == expect) {
1988*3446Smrj 		hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry));
19890Sstevel@tonic-gate 
1990*3446Smrj 		/*
1991*3446Smrj 		 * When removing write permission *and* clearing the
1992*3446Smrj 		 * MOD bit, check if a write happened via a stale
1993*3446Smrj 		 * TLB entry before the TLB shootdown finished.
1994*3446Smrj 		 *
1995*3446Smrj 		 * If it did happen, simply re-enable write permission and
1996*3446Smrj 		 * act like the original CAS failed.
1997*3446Smrj 		 */
1998*3446Smrj 		if ((expect & (PT_WRITABLE | PT_MOD)) == PT_WRITABLE &&
1999*3446Smrj 		    (new & (PT_WRITABLE | PT_MOD)) == 0 &&
2000*3446Smrj 		    (GET_PTE(ptep) & PT_MOD) != 0) {
2001*3446Smrj 			do {
2002*3446Smrj 				found = GET_PTE(ptep);
2003*3446Smrj 				found =
2004*3446Smrj 				    CAS_PTE(ptep, found, found | PT_WRITABLE);
2005*3446Smrj 			} while ((found & PT_WRITABLE) == 0);
2006*3446Smrj 		}
2007*3446Smrj 	}
20080Sstevel@tonic-gate 	x86pte_release_pagetable(ht);
2009*3446Smrj 	return (found);
20100Sstevel@tonic-gate }
20110Sstevel@tonic-gate 
20120Sstevel@tonic-gate /*
20130Sstevel@tonic-gate  * Copy page tables - this is just a little more complicated than the
20140Sstevel@tonic-gate  * previous routines. Note that it's also not atomic! It also is never
20150Sstevel@tonic-gate  * used for VLP pagetables.
20160Sstevel@tonic-gate  */
20170Sstevel@tonic-gate void
20180Sstevel@tonic-gate x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count)
20190Sstevel@tonic-gate {
20200Sstevel@tonic-gate 	caddr_t	src_va;
20210Sstevel@tonic-gate 	caddr_t dst_va;
20220Sstevel@tonic-gate 	size_t size;
2023*3446Smrj 	x86pte_t *pteptr;
2024*3446Smrj 	x86pte_t pte;
20250Sstevel@tonic-gate 
20260Sstevel@tonic-gate 	ASSERT(khat_running);
20270Sstevel@tonic-gate 	ASSERT(!(dest->ht_flags & HTABLE_VLP));
20280Sstevel@tonic-gate 	ASSERT(!(src->ht_flags & HTABLE_VLP));
20290Sstevel@tonic-gate 	ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN));
20300Sstevel@tonic-gate 	ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN));
20310Sstevel@tonic-gate 
20320Sstevel@tonic-gate 	/*
2033*3446Smrj 	 * Acquire access to the CPU pagetable windows for the dest and source.
20340Sstevel@tonic-gate 	 */
2035*3446Smrj 	dst_va = (caddr_t)x86pte_access_pagetable(dest, entry);
2036*3446Smrj 	if (kpm_vbase) {
2037*3446Smrj 		src_va = (caddr_t)
2038*3446Smrj 		    PT_INDEX_PTR(hat_kpm_pfn2va(src->ht_pfn), entry);
20390Sstevel@tonic-gate 	} else {
2040*3446Smrj 		uint_t x = PWIN_SRC(CPU->cpu_id);
20410Sstevel@tonic-gate 
20420Sstevel@tonic-gate 		/*
20430Sstevel@tonic-gate 		 * Finish defining the src pagetable mapping
20440Sstevel@tonic-gate 		 */
2045*3446Smrj 		src_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry);
2046*3446Smrj 		pte = MAKEPTE(src->ht_pfn, 0) | mmu.pt_global | mmu.pt_nx;
2047*3446Smrj 		pteptr = (x86pte_t *)PWIN_PTE_VA(x);
2048*3446Smrj 		if (mmu.pae_hat)
2049*3446Smrj 			*pteptr = pte;
2050*3446Smrj 		else
2051*3446Smrj 			*(x86pte32_t *)pteptr = pte;
2052*3446Smrj 		mmu_tlbflush_entry((caddr_t)(PWIN_VA(x)));
20530Sstevel@tonic-gate 	}
20540Sstevel@tonic-gate 
20550Sstevel@tonic-gate 	/*
20560Sstevel@tonic-gate 	 * now do the copy
20570Sstevel@tonic-gate 	 */
20580Sstevel@tonic-gate 	size = count << mmu.pte_size_shift;
20590Sstevel@tonic-gate 	bcopy(src_va, dst_va, size);
20600Sstevel@tonic-gate 
20610Sstevel@tonic-gate 	x86pte_release_pagetable(dest);
20620Sstevel@tonic-gate }
20630Sstevel@tonic-gate 
20640Sstevel@tonic-gate /*
20650Sstevel@tonic-gate  * Zero page table entries - Note this doesn't use atomic stores!
20660Sstevel@tonic-gate  */
2067*3446Smrj static void
20680Sstevel@tonic-gate x86pte_zero(htable_t *dest, uint_t entry, uint_t count)
20690Sstevel@tonic-gate {
20700Sstevel@tonic-gate 	caddr_t dst_va;
20710Sstevel@tonic-gate 	size_t size;
20720Sstevel@tonic-gate 
20730Sstevel@tonic-gate 	/*
20740Sstevel@tonic-gate 	 * Map in the page table to be zeroed.
20750Sstevel@tonic-gate 	 */
20760Sstevel@tonic-gate 	ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN));
20770Sstevel@tonic-gate 	ASSERT(!(dest->ht_flags & HTABLE_VLP));
2078*3446Smrj 
2079*3446Smrj 	dst_va = (caddr_t)x86pte_access_pagetable(dest, entry);
2080*3446Smrj 
20810Sstevel@tonic-gate 	size = count << mmu.pte_size_shift;
2082*3446Smrj 	ASSERT(size > BLOCKZEROALIGN);
2083*3446Smrj #ifdef __i386
2084*3446Smrj 	if ((x86_feature & X86_SSE2) == 0)
20850Sstevel@tonic-gate 		bzero(dst_va, size);
2086*3446Smrj 	else
2087*3446Smrj #endif
2088*3446Smrj 		block_zero_no_xmm(dst_va, size);
2089*3446Smrj 
20900Sstevel@tonic-gate 	x86pte_release_pagetable(dest);
20910Sstevel@tonic-gate }
20920Sstevel@tonic-gate 
20930Sstevel@tonic-gate /*
20940Sstevel@tonic-gate  * Called to ensure that all pagetables are in the system dump
20950Sstevel@tonic-gate  */
20960Sstevel@tonic-gate void
20970Sstevel@tonic-gate hat_dump(void)
20980Sstevel@tonic-gate {
20990Sstevel@tonic-gate 	hat_t *hat;
21000Sstevel@tonic-gate 	uint_t h;
21010Sstevel@tonic-gate 	htable_t *ht;
21020Sstevel@tonic-gate 
21030Sstevel@tonic-gate 	/*
21041747Sjosephb 	 * Dump all page tables
21050Sstevel@tonic-gate 	 */
21061747Sjosephb 	for (hat = kas.a_hat; hat != NULL; hat = hat->hat_next) {
21070Sstevel@tonic-gate 		for (h = 0; h < hat->hat_num_hash; ++h) {
21080Sstevel@tonic-gate 			for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) {
21091747Sjosephb 				if ((ht->ht_flags & HTABLE_VLP) == 0)
21100Sstevel@tonic-gate 					dump_page(ht->ht_pfn);
21110Sstevel@tonic-gate 			}
21120Sstevel@tonic-gate 		}
21130Sstevel@tonic-gate 	}
21140Sstevel@tonic-gate }
2115