xref: /onnv-gate/usr/src/uts/i86pc/vm/hat_i86.c (revision 5159:6cdd421a2458)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51747Sjosephb  * Common Development and Distribution License (the "License").
61747Sjosephb  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
223446Smrj  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
270Sstevel@tonic-gate 
280Sstevel@tonic-gate /*
290Sstevel@tonic-gate  * VM - Hardware Address Translation management for i386 and amd64
300Sstevel@tonic-gate  *
310Sstevel@tonic-gate  * Implementation of the interfaces described in <common/vm/hat.h>
320Sstevel@tonic-gate  *
330Sstevel@tonic-gate  * Nearly all the details of how the hardware is managed should not be
340Sstevel@tonic-gate  * visible outside this layer except for misc. machine specific functions
350Sstevel@tonic-gate  * that work in conjunction with this code.
360Sstevel@tonic-gate  *
370Sstevel@tonic-gate  * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
380Sstevel@tonic-gate  */
390Sstevel@tonic-gate 
400Sstevel@tonic-gate #include <sys/machparam.h>
410Sstevel@tonic-gate #include <sys/machsystm.h>
420Sstevel@tonic-gate #include <sys/mman.h>
430Sstevel@tonic-gate #include <sys/types.h>
440Sstevel@tonic-gate #include <sys/systm.h>
450Sstevel@tonic-gate #include <sys/cpuvar.h>
460Sstevel@tonic-gate #include <sys/thread.h>
470Sstevel@tonic-gate #include <sys/proc.h>
480Sstevel@tonic-gate #include <sys/cpu.h>
490Sstevel@tonic-gate #include <sys/kmem.h>
500Sstevel@tonic-gate #include <sys/disp.h>
510Sstevel@tonic-gate #include <sys/shm.h>
520Sstevel@tonic-gate #include <sys/sysmacros.h>
530Sstevel@tonic-gate #include <sys/machparam.h>
540Sstevel@tonic-gate #include <sys/vmem.h>
550Sstevel@tonic-gate #include <sys/vmsystm.h>
560Sstevel@tonic-gate #include <sys/promif.h>
570Sstevel@tonic-gate #include <sys/var.h>
580Sstevel@tonic-gate #include <sys/x86_archext.h>
590Sstevel@tonic-gate #include <sys/atomic.h>
600Sstevel@tonic-gate #include <sys/bitmap.h>
613446Smrj #include <sys/controlregs.h>
623446Smrj #include <sys/bootconf.h>
633446Smrj #include <sys/bootsvcs.h>
643446Smrj #include <sys/bootinfo.h>
654191Sjosephb #include <sys/archsystm.h>
660Sstevel@tonic-gate 
670Sstevel@tonic-gate #include <vm/seg_kmem.h>
680Sstevel@tonic-gate #include <vm/hat_i86.h>
690Sstevel@tonic-gate #include <vm/as.h>
700Sstevel@tonic-gate #include <vm/seg.h>
710Sstevel@tonic-gate #include <vm/page.h>
720Sstevel@tonic-gate #include <vm/seg_kp.h>
730Sstevel@tonic-gate #include <vm/seg_kpm.h>
740Sstevel@tonic-gate #include <vm/vm_dep.h>
755084Sjohnlev #ifdef __xpv
765084Sjohnlev #include <sys/hypervisor.h>
775084Sjohnlev #endif
783446Smrj #include <vm/kboot_mmu.h>
794381Sjosephb #include <vm/seg_spt.h>
800Sstevel@tonic-gate 
810Sstevel@tonic-gate #include <sys/cmn_err.h>
820Sstevel@tonic-gate 
830Sstevel@tonic-gate /*
840Sstevel@tonic-gate  * Basic parameters for hat operation.
850Sstevel@tonic-gate  */
860Sstevel@tonic-gate struct hat_mmu_info mmu;
870Sstevel@tonic-gate 
880Sstevel@tonic-gate /*
890Sstevel@tonic-gate  * The page that is the kernel's top level pagetable.
900Sstevel@tonic-gate  *
915084Sjohnlev  * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries
920Sstevel@tonic-gate  * on this 4K page for its top level page table. The remaining groups of
930Sstevel@tonic-gate  * 4 entries are used for per processor copies of user VLP pagetables for
940Sstevel@tonic-gate  * running threads.  See hat_switch() and reload_pae32() for details.
950Sstevel@tonic-gate  *
965084Sjohnlev  * vlp_page[0..3] - level==2 PTEs for kernel HAT
975084Sjohnlev  * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0
985084Sjohnlev  * vlp_page[8..11]  - level==2 PTE for user thread on cpu 1
995084Sjohnlev  * etc...
1000Sstevel@tonic-gate  */
1010Sstevel@tonic-gate static x86pte_t *vlp_page;
1020Sstevel@tonic-gate 
1030Sstevel@tonic-gate /*
1040Sstevel@tonic-gate  * forward declaration of internal utility routines
1050Sstevel@tonic-gate  */
1060Sstevel@tonic-gate static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
1070Sstevel@tonic-gate 	x86pte_t new);
1080Sstevel@tonic-gate 
1090Sstevel@tonic-gate /*
1100Sstevel@tonic-gate  * The kernel address space exists in all HATs. To implement this the
1115084Sjohnlev  * kernel reserves a fixed number of entries in the topmost level(s) of page
1125084Sjohnlev  * tables. The values are setup during startup and then copied to every user
1135084Sjohnlev  * hat created by hat_alloc(). This means that kernelbase must be:
1140Sstevel@tonic-gate  *
1150Sstevel@tonic-gate  *	  4Meg aligned for 32 bit kernels
1160Sstevel@tonic-gate  *	512Gig aligned for x86_64 64 bit kernel
1170Sstevel@tonic-gate  *
1185084Sjohnlev  * The hat_kernel_range_ts describe what needs to be copied from kernel hat
1195084Sjohnlev  * to each user hat.
1200Sstevel@tonic-gate  */
1215084Sjohnlev typedef struct hat_kernel_range {
1225084Sjohnlev 	level_t		hkr_level;
1235084Sjohnlev 	uintptr_t	hkr_start_va;
1245084Sjohnlev 	uintptr_t	hkr_end_va;	/* zero means to end of memory */
1255084Sjohnlev } hat_kernel_range_t;
1265084Sjohnlev #define	NUM_KERNEL_RANGE 2
1275084Sjohnlev static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE];
1285084Sjohnlev static int num_kernel_ranges;
1290Sstevel@tonic-gate 
1300Sstevel@tonic-gate uint_t use_boot_reserve = 1;	/* cleared after early boot process */
1310Sstevel@tonic-gate uint_t can_steal_post_boot = 0;	/* set late in boot to enable stealing */
1320Sstevel@tonic-gate 
1330Sstevel@tonic-gate /*
1340Sstevel@tonic-gate  * A cpuset for all cpus. This is used for kernel address cross calls, since
1350Sstevel@tonic-gate  * the kernel addresses apply to all cpus.
1360Sstevel@tonic-gate  */
1370Sstevel@tonic-gate cpuset_t khat_cpuset;
1380Sstevel@tonic-gate 
1390Sstevel@tonic-gate /*
1400Sstevel@tonic-gate  * management stuff for hat structures
1410Sstevel@tonic-gate  */
1420Sstevel@tonic-gate kmutex_t	hat_list_lock;
1430Sstevel@tonic-gate kcondvar_t	hat_list_cv;
1440Sstevel@tonic-gate kmem_cache_t	*hat_cache;
1450Sstevel@tonic-gate kmem_cache_t	*hat_hash_cache;
1460Sstevel@tonic-gate kmem_cache_t	*vlp_hash_cache;
1470Sstevel@tonic-gate 
1480Sstevel@tonic-gate /*
1490Sstevel@tonic-gate  * Simple statistics
1500Sstevel@tonic-gate  */
1510Sstevel@tonic-gate struct hatstats hatstat;
1520Sstevel@tonic-gate 
1530Sstevel@tonic-gate /*
1540Sstevel@tonic-gate  * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's.
1550Sstevel@tonic-gate  */
1560Sstevel@tonic-gate extern void atomic_orb(uchar_t *addr, uchar_t val);
1570Sstevel@tonic-gate extern void atomic_andb(uchar_t *addr, uchar_t val);
1580Sstevel@tonic-gate 
1590Sstevel@tonic-gate #define	PP_GETRM(pp, rmmask)    (pp->p_nrm & rmmask)
1600Sstevel@tonic-gate #define	PP_ISMOD(pp)		PP_GETRM(pp, P_MOD)
1610Sstevel@tonic-gate #define	PP_ISREF(pp)		PP_GETRM(pp, P_REF)
1620Sstevel@tonic-gate #define	PP_ISRO(pp)		PP_GETRM(pp, P_RO)
1630Sstevel@tonic-gate 
1640Sstevel@tonic-gate #define	PP_SETRM(pp, rm)	atomic_orb(&(pp->p_nrm), rm)
1650Sstevel@tonic-gate #define	PP_SETMOD(pp)		PP_SETRM(pp, P_MOD)
1660Sstevel@tonic-gate #define	PP_SETREF(pp)		PP_SETRM(pp, P_REF)
1670Sstevel@tonic-gate #define	PP_SETRO(pp)		PP_SETRM(pp, P_RO)
1680Sstevel@tonic-gate 
1690Sstevel@tonic-gate #define	PP_CLRRM(pp, rm)	atomic_andb(&(pp->p_nrm), ~(rm))
1700Sstevel@tonic-gate #define	PP_CLRMOD(pp)   	PP_CLRRM(pp, P_MOD)
1710Sstevel@tonic-gate #define	PP_CLRREF(pp)   	PP_CLRRM(pp, P_REF)
1720Sstevel@tonic-gate #define	PP_CLRRO(pp)    	PP_CLRRM(pp, P_RO)
1730Sstevel@tonic-gate #define	PP_CLRALL(pp)		PP_CLRRM(pp, P_MOD | P_REF | P_RO)
1740Sstevel@tonic-gate 
1750Sstevel@tonic-gate /*
1760Sstevel@tonic-gate  * kmem cache constructor for struct hat
1770Sstevel@tonic-gate  */
1780Sstevel@tonic-gate /*ARGSUSED*/
1790Sstevel@tonic-gate static int
1800Sstevel@tonic-gate hati_constructor(void *buf, void *handle, int kmflags)
1810Sstevel@tonic-gate {
1820Sstevel@tonic-gate 	hat_t	*hat = buf;
1830Sstevel@tonic-gate 
1840Sstevel@tonic-gate 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
1850Sstevel@tonic-gate 	bzero(hat->hat_pages_mapped,
1860Sstevel@tonic-gate 	    sizeof (pgcnt_t) * (mmu.max_page_level + 1));
1874381Sjosephb 	hat->hat_ism_pgcnt = 0;
1880Sstevel@tonic-gate 	hat->hat_stats = 0;
1890Sstevel@tonic-gate 	hat->hat_flags = 0;
1900Sstevel@tonic-gate 	CPUSET_ZERO(hat->hat_cpus);
1910Sstevel@tonic-gate 	hat->hat_htable = NULL;
1920Sstevel@tonic-gate 	hat->hat_ht_hash = NULL;
1930Sstevel@tonic-gate 	return (0);
1940Sstevel@tonic-gate }
1950Sstevel@tonic-gate 
1960Sstevel@tonic-gate /*
1970Sstevel@tonic-gate  * Allocate a hat structure for as. We also create the top level
1980Sstevel@tonic-gate  * htable and initialize it to contain the kernel hat entries.
1990Sstevel@tonic-gate  */
2000Sstevel@tonic-gate hat_t *
2010Sstevel@tonic-gate hat_alloc(struct as *as)
2020Sstevel@tonic-gate {
2035084Sjohnlev 	hat_t			*hat;
2045084Sjohnlev 	htable_t		*ht;	/* top level htable */
2055084Sjohnlev 	uint_t			use_vlp;
2065084Sjohnlev 	uint_t			r;
2075084Sjohnlev 	hat_kernel_range_t	*rp;
2085084Sjohnlev 	uintptr_t		va;
2095084Sjohnlev 	uintptr_t		eva;
2105084Sjohnlev 	uint_t			start;
2115084Sjohnlev 	uint_t			cnt;
2125084Sjohnlev 	htable_t		*src;
2130Sstevel@tonic-gate 
2140Sstevel@tonic-gate 	/*
2150Sstevel@tonic-gate 	 * Once we start creating user process HATs we can enable
2160Sstevel@tonic-gate 	 * the htable_steal() code.
2170Sstevel@tonic-gate 	 */
2180Sstevel@tonic-gate 	if (can_steal_post_boot == 0)
2190Sstevel@tonic-gate 		can_steal_post_boot = 1;
2200Sstevel@tonic-gate 
2210Sstevel@tonic-gate 	ASSERT(AS_WRITE_HELD(as, &as->a_lock));
2220Sstevel@tonic-gate 	hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
2230Sstevel@tonic-gate 	hat->hat_as = as;
2240Sstevel@tonic-gate 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
2250Sstevel@tonic-gate 	ASSERT(hat->hat_flags == 0);
2260Sstevel@tonic-gate 
2275084Sjohnlev #if defined(__xpv)
2280Sstevel@tonic-gate 	/*
2295084Sjohnlev 	 * No VLP stuff on the hypervisor due to the 64-bit split top level
2305084Sjohnlev 	 * page tables.  On 32-bit it's not needed as the hypervisor takes
2315084Sjohnlev 	 * care of copying the top level PTEs to a below 4Gig page.
2320Sstevel@tonic-gate 	 */
2335084Sjohnlev 	use_vlp = 0;
2345084Sjohnlev #else	/* __xpv */
2355084Sjohnlev 	/* 32 bit processes uses a VLP style hat when running with PAE */
2360Sstevel@tonic-gate #if defined(__amd64)
2370Sstevel@tonic-gate 	use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32);
2380Sstevel@tonic-gate #elif defined(__i386)
2390Sstevel@tonic-gate 	use_vlp = mmu.pae_hat;
2400Sstevel@tonic-gate #endif
2415084Sjohnlev #endif	/* __xpv */
2420Sstevel@tonic-gate 	if (use_vlp) {
2430Sstevel@tonic-gate 		hat->hat_flags = HAT_VLP;
2440Sstevel@tonic-gate 		bzero(hat->hat_vlp_ptes, VLP_SIZE);
2450Sstevel@tonic-gate 	}
2460Sstevel@tonic-gate 
2470Sstevel@tonic-gate 	/*
2480Sstevel@tonic-gate 	 * Allocate the htable hash
2490Sstevel@tonic-gate 	 */
2500Sstevel@tonic-gate 	if ((hat->hat_flags & HAT_VLP)) {
2510Sstevel@tonic-gate 		hat->hat_num_hash = mmu.vlp_hash_cnt;
2520Sstevel@tonic-gate 		hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
2530Sstevel@tonic-gate 	} else {
2540Sstevel@tonic-gate 		hat->hat_num_hash = mmu.hash_cnt;
2550Sstevel@tonic-gate 		hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
2560Sstevel@tonic-gate 	}
2570Sstevel@tonic-gate 	bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
2580Sstevel@tonic-gate 
2590Sstevel@tonic-gate 	/*
2600Sstevel@tonic-gate 	 * Initialize Kernel HAT entries at the top of the top level page
2615084Sjohnlev 	 * tables for the new hat.
2620Sstevel@tonic-gate 	 */
2630Sstevel@tonic-gate 	hat->hat_htable = NULL;
2640Sstevel@tonic-gate 	hat->hat_ht_cached = NULL;
2655084Sjohnlev 	XPV_DISALLOW_MIGRATE();
2660Sstevel@tonic-gate 	ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
2675084Sjohnlev 	hat->hat_htable = ht;
2685084Sjohnlev 
2695084Sjohnlev #if defined(__amd64)
2705084Sjohnlev 	if (hat->hat_flags & HAT_VLP)
2715084Sjohnlev 		goto init_done;
2720Sstevel@tonic-gate #endif
2735084Sjohnlev 
2745084Sjohnlev 	for (r = 0; r < num_kernel_ranges; ++r) {
2755084Sjohnlev 		rp = &kernel_ranges[r];
2765084Sjohnlev 		for (va = rp->hkr_start_va; va != rp->hkr_end_va;
2775084Sjohnlev 		    va += cnt * LEVEL_SIZE(rp->hkr_level)) {
2785084Sjohnlev 
2795084Sjohnlev 			if (rp->hkr_level == TOP_LEVEL(hat))
2805084Sjohnlev 				ht = hat->hat_htable;
2815084Sjohnlev 			else
2825084Sjohnlev 				ht = htable_create(hat, va, rp->hkr_level,
2835084Sjohnlev 				    NULL);
2845084Sjohnlev 
2855084Sjohnlev 			start = htable_va2entry(va, ht);
2865084Sjohnlev 			cnt = HTABLE_NUM_PTES(ht) - start;
2875084Sjohnlev 			eva = va +
2885084Sjohnlev 			    ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level));
2895084Sjohnlev 			if (rp->hkr_end_va != 0 &&
2905084Sjohnlev 			    (eva > rp->hkr_end_va || eva == 0))
2915084Sjohnlev 				cnt = htable_va2entry(rp->hkr_end_va, ht) -
2925084Sjohnlev 				    start;
2935084Sjohnlev 
2945084Sjohnlev #if defined(__i386) && !defined(__xpv)
2955084Sjohnlev 			if (ht->ht_flags & HTABLE_VLP) {
2965084Sjohnlev 				bcopy(&vlp_page[start],
2975084Sjohnlev 				    &hat->hat_vlp_ptes[start],
2985084Sjohnlev 				    cnt * sizeof (x86pte_t));
2995084Sjohnlev 				continue;
3005084Sjohnlev 			}
3015084Sjohnlev #endif
3025084Sjohnlev 			src = htable_lookup(kas.a_hat, va, rp->hkr_level);
3035084Sjohnlev 			ASSERT(src != NULL);
3045084Sjohnlev 			x86pte_copy(src, ht, start, cnt);
3055084Sjohnlev 			htable_release(src);
3065084Sjohnlev 		}
3075084Sjohnlev 	}
3085084Sjohnlev 
3095084Sjohnlev init_done:
3105084Sjohnlev 	XPV_ALLOW_MIGRATE();
3115084Sjohnlev 
3125084Sjohnlev #if defined(__xpv)
3130Sstevel@tonic-gate 	/*
3145084Sjohnlev 	 * Pin top level page tables after initializing them
3150Sstevel@tonic-gate 	 */
3165084Sjohnlev 	xen_pin(hat->hat_htable->ht_pfn, mmu.max_level);
3175084Sjohnlev #if defined(__amd64)
3185084Sjohnlev 	xen_pin(hat->hat_user_ptable, mmu.max_level);
3195084Sjohnlev #endif
3200Sstevel@tonic-gate #endif
3210Sstevel@tonic-gate 
3220Sstevel@tonic-gate 	/*
3231747Sjosephb 	 * Put it at the start of the global list of all hats (used by stealing)
3241747Sjosephb 	 *
3251747Sjosephb 	 * kas.a_hat is not in the list but is instead used to find the
3261747Sjosephb 	 * first and last items in the list.
3271747Sjosephb 	 *
3281747Sjosephb 	 * - kas.a_hat->hat_next points to the start of the user hats.
3291747Sjosephb 	 *   The list ends where hat->hat_next == NULL
3301747Sjosephb 	 *
3311747Sjosephb 	 * - kas.a_hat->hat_prev points to the last of the user hats.
3321747Sjosephb 	 *   The list begins where hat->hat_prev == NULL
3330Sstevel@tonic-gate 	 */
3340Sstevel@tonic-gate 	mutex_enter(&hat_list_lock);
3351747Sjosephb 	hat->hat_prev = NULL;
3361747Sjosephb 	hat->hat_next = kas.a_hat->hat_next;
3371747Sjosephb 	if (hat->hat_next)
3381747Sjosephb 		hat->hat_next->hat_prev = hat;
3391747Sjosephb 	else
3401747Sjosephb 		kas.a_hat->hat_prev = hat;
3410Sstevel@tonic-gate 	kas.a_hat->hat_next = hat;
3420Sstevel@tonic-gate 	mutex_exit(&hat_list_lock);
3430Sstevel@tonic-gate 
3440Sstevel@tonic-gate 	return (hat);
3450Sstevel@tonic-gate }
3460Sstevel@tonic-gate 
3470Sstevel@tonic-gate /*
3480Sstevel@tonic-gate  * process has finished executing but as has not been cleaned up yet.
3490Sstevel@tonic-gate  */
3500Sstevel@tonic-gate /*ARGSUSED*/
3510Sstevel@tonic-gate void
3520Sstevel@tonic-gate hat_free_start(hat_t *hat)
3530Sstevel@tonic-gate {
3540Sstevel@tonic-gate 	ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock));
3551747Sjosephb 
3561747Sjosephb 	/*
3571747Sjosephb 	 * If the hat is currently a stealing victim, wait for the stealing
3581747Sjosephb 	 * to finish.  Once we mark it as HAT_FREEING, htable_steal()
3591747Sjosephb 	 * won't look at its pagetables anymore.
3601747Sjosephb 	 */
3610Sstevel@tonic-gate 	mutex_enter(&hat_list_lock);
3621747Sjosephb 	while (hat->hat_flags & HAT_VICTIM)
3631747Sjosephb 		cv_wait(&hat_list_cv, &hat_list_lock);
3640Sstevel@tonic-gate 	hat->hat_flags |= HAT_FREEING;
3650Sstevel@tonic-gate 	mutex_exit(&hat_list_lock);
3660Sstevel@tonic-gate }
3670Sstevel@tonic-gate 
3680Sstevel@tonic-gate /*
3690Sstevel@tonic-gate  * An address space is being destroyed, so we destroy the associated hat.
3700Sstevel@tonic-gate  */
3710Sstevel@tonic-gate void
3720Sstevel@tonic-gate hat_free_end(hat_t *hat)
3730Sstevel@tonic-gate {
3740Sstevel@tonic-gate 	kmem_cache_t *cache;
3750Sstevel@tonic-gate 
3760Sstevel@tonic-gate 	ASSERT(hat->hat_flags & HAT_FREEING);
3770Sstevel@tonic-gate 
3780Sstevel@tonic-gate 	/*
3790Sstevel@tonic-gate 	 * must not be running on the given hat
3800Sstevel@tonic-gate 	 */
3810Sstevel@tonic-gate 	ASSERT(CPU->cpu_current_hat != hat);
3820Sstevel@tonic-gate 
3830Sstevel@tonic-gate 	/*
3841747Sjosephb 	 * Remove it from the list of HATs
3850Sstevel@tonic-gate 	 */
3860Sstevel@tonic-gate 	mutex_enter(&hat_list_lock);
3871747Sjosephb 	if (hat->hat_prev)
3881747Sjosephb 		hat->hat_prev->hat_next = hat->hat_next;
3891747Sjosephb 	else
3900Sstevel@tonic-gate 		kas.a_hat->hat_next = hat->hat_next;
3911747Sjosephb 	if (hat->hat_next)
3921747Sjosephb 		hat->hat_next->hat_prev = hat->hat_prev;
3931747Sjosephb 	else
3941747Sjosephb 		kas.a_hat->hat_prev = hat->hat_prev;
3950Sstevel@tonic-gate 	mutex_exit(&hat_list_lock);
3961747Sjosephb 	hat->hat_next = hat->hat_prev = NULL;
3970Sstevel@tonic-gate 
3985084Sjohnlev #if defined(__xpv)
3995084Sjohnlev 	/*
4005084Sjohnlev 	 * On the hypervisor, unpin top level page table(s)
4015084Sjohnlev 	 */
4025084Sjohnlev 	xen_unpin(hat->hat_htable->ht_pfn);
4035084Sjohnlev #if defined(__amd64)
4045084Sjohnlev 	xen_unpin(hat->hat_user_ptable);
4055084Sjohnlev #endif
4065084Sjohnlev #endif
4075084Sjohnlev 
4080Sstevel@tonic-gate 	/*
4090Sstevel@tonic-gate 	 * Make a pass through the htables freeing them all up.
4100Sstevel@tonic-gate 	 */
4110Sstevel@tonic-gate 	htable_purge_hat(hat);
4120Sstevel@tonic-gate 
4130Sstevel@tonic-gate 	/*
4140Sstevel@tonic-gate 	 * Decide which kmem cache the hash table came from, then free it.
4150Sstevel@tonic-gate 	 */
4160Sstevel@tonic-gate 	if (hat->hat_flags & HAT_VLP)
4170Sstevel@tonic-gate 		cache = vlp_hash_cache;
4180Sstevel@tonic-gate 	else
4190Sstevel@tonic-gate 		cache = hat_hash_cache;
4200Sstevel@tonic-gate 	kmem_cache_free(cache, hat->hat_ht_hash);
4210Sstevel@tonic-gate 	hat->hat_ht_hash = NULL;
4220Sstevel@tonic-gate 
4230Sstevel@tonic-gate 	hat->hat_flags = 0;
4240Sstevel@tonic-gate 	kmem_cache_free(hat_cache, hat);
4250Sstevel@tonic-gate }
4260Sstevel@tonic-gate 
4270Sstevel@tonic-gate /*
4280Sstevel@tonic-gate  * round kernelbase down to a supported value to use for _userlimit
4290Sstevel@tonic-gate  *
4300Sstevel@tonic-gate  * userlimit must be aligned down to an entry in the top level htable.
4310Sstevel@tonic-gate  * The one exception is for 32 bit HAT's running PAE.
4320Sstevel@tonic-gate  */
4330Sstevel@tonic-gate uintptr_t
4340Sstevel@tonic-gate hat_kernelbase(uintptr_t va)
4350Sstevel@tonic-gate {
4360Sstevel@tonic-gate #if defined(__i386)
4370Sstevel@tonic-gate 	va &= LEVEL_MASK(1);
4380Sstevel@tonic-gate #endif
4390Sstevel@tonic-gate 	if (IN_VA_HOLE(va))
4400Sstevel@tonic-gate 		panic("_userlimit %p will fall in VA hole\n", (void *)va);
4410Sstevel@tonic-gate 	return (va);
4420Sstevel@tonic-gate }
4430Sstevel@tonic-gate 
4440Sstevel@tonic-gate /*
4450Sstevel@tonic-gate  * Initialize hat data structures based on processor MMU information.
4460Sstevel@tonic-gate  */
4470Sstevel@tonic-gate void
4480Sstevel@tonic-gate mmu_init(void)
4490Sstevel@tonic-gate {
4500Sstevel@tonic-gate 	uint_t max_htables;
4510Sstevel@tonic-gate 	uint_t pa_bits;
4520Sstevel@tonic-gate 	uint_t va_bits;
4530Sstevel@tonic-gate 	int i;
4540Sstevel@tonic-gate 
4550Sstevel@tonic-gate 	/*
4563446Smrj 	 * If CPU enabled the page table global bit, use it for the kernel
4573446Smrj 	 * This is bit 7 in CR4 (PGE - Page Global Enable).
4580Sstevel@tonic-gate 	 */
4593446Smrj 	if ((x86_feature & X86_PGE) != 0 && (getcr4() & CR4_PGE) != 0)
4600Sstevel@tonic-gate 		mmu.pt_global = PT_GLOBAL;
4610Sstevel@tonic-gate 
4620Sstevel@tonic-gate 	/*
4633446Smrj 	 * Detect NX and PAE usage.
4640Sstevel@tonic-gate 	 */
4653446Smrj 	mmu.pae_hat = kbm_pae_support;
4663446Smrj 	if (kbm_nx_support)
4670Sstevel@tonic-gate 		mmu.pt_nx = PT_NX;
4683446Smrj 	else
4690Sstevel@tonic-gate 		mmu.pt_nx = 0;
4700Sstevel@tonic-gate 
4710Sstevel@tonic-gate 	/*
4720Sstevel@tonic-gate 	 * Use CPU info to set various MMU parameters
4730Sstevel@tonic-gate 	 */
4740Sstevel@tonic-gate 	cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
4750Sstevel@tonic-gate 
4760Sstevel@tonic-gate 	if (va_bits < sizeof (void *) * NBBY) {
4770Sstevel@tonic-gate 		mmu.hole_start = (1ul << (va_bits - 1));
4780Sstevel@tonic-gate 		mmu.hole_end = 0ul - mmu.hole_start - 1;
4790Sstevel@tonic-gate 	} else {
4800Sstevel@tonic-gate 		mmu.hole_end = 0;
4810Sstevel@tonic-gate 		mmu.hole_start = mmu.hole_end - 1;
4820Sstevel@tonic-gate 	}
4830Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121)
4840Sstevel@tonic-gate 	/*
4850Sstevel@tonic-gate 	 * If erratum 121 has already been detected at this time, hole_start
4860Sstevel@tonic-gate 	 * contains the value to be subtracted from mmu.hole_start.
4870Sstevel@tonic-gate 	 */
4880Sstevel@tonic-gate 	ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
4890Sstevel@tonic-gate 	hole_start = mmu.hole_start - hole_start;
4900Sstevel@tonic-gate #else
4910Sstevel@tonic-gate 	hole_start = mmu.hole_start;
4920Sstevel@tonic-gate #endif
4930Sstevel@tonic-gate 	hole_end = mmu.hole_end;
4940Sstevel@tonic-gate 
4950Sstevel@tonic-gate 	mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
4960Sstevel@tonic-gate 	if (mmu.pae_hat == 0 && pa_bits > 32)
4970Sstevel@tonic-gate 		mmu.highest_pfn = PFN_4G - 1;
4980Sstevel@tonic-gate 
4990Sstevel@tonic-gate 	if (mmu.pae_hat) {
5000Sstevel@tonic-gate 		mmu.pte_size = 8;	/* 8 byte PTEs */
5010Sstevel@tonic-gate 		mmu.pte_size_shift = 3;
5020Sstevel@tonic-gate 	} else {
5030Sstevel@tonic-gate 		mmu.pte_size = 4;	/* 4 byte PTEs */
5040Sstevel@tonic-gate 		mmu.pte_size_shift = 2;
5050Sstevel@tonic-gate 	}
5060Sstevel@tonic-gate 
5070Sstevel@tonic-gate 	if (mmu.pae_hat && (x86_feature & X86_PAE) == 0)
5080Sstevel@tonic-gate 		panic("Processor does not support PAE");
5090Sstevel@tonic-gate 
5100Sstevel@tonic-gate 	if ((x86_feature & X86_CX8) == 0)
5110Sstevel@tonic-gate 		panic("Processor does not support cmpxchg8b instruction");
5120Sstevel@tonic-gate 
5130Sstevel@tonic-gate 	/*
5140Sstevel@tonic-gate 	 * Initialize parameters based on the 64 or 32 bit kernels and
5150Sstevel@tonic-gate 	 * for the 32 bit kernel decide if we should use PAE.
5160Sstevel@tonic-gate 	 */
5173446Smrj 	if (kbm_largepage_support)
5180Sstevel@tonic-gate 		mmu.max_page_level = 1;
5190Sstevel@tonic-gate 	else
5200Sstevel@tonic-gate 		mmu.max_page_level = 0;
5210Sstevel@tonic-gate 	mmu_page_sizes = mmu.max_page_level + 1;
5220Sstevel@tonic-gate 	mmu_exported_page_sizes = mmu_page_sizes;
5230Sstevel@tonic-gate 
5240Sstevel@tonic-gate #if defined(__amd64)
5250Sstevel@tonic-gate 
5260Sstevel@tonic-gate 	mmu.num_level = 4;
5270Sstevel@tonic-gate 	mmu.max_level = 3;
5280Sstevel@tonic-gate 	mmu.ptes_per_table = 512;
5290Sstevel@tonic-gate 	mmu.top_level_count = 512;
5300Sstevel@tonic-gate 
5310Sstevel@tonic-gate 	mmu.level_shift[0] = 12;
5320Sstevel@tonic-gate 	mmu.level_shift[1] = 21;
5330Sstevel@tonic-gate 	mmu.level_shift[2] = 30;
5340Sstevel@tonic-gate 	mmu.level_shift[3] = 39;
5350Sstevel@tonic-gate 
5360Sstevel@tonic-gate #elif defined(__i386)
5370Sstevel@tonic-gate 
5380Sstevel@tonic-gate 	if (mmu.pae_hat) {
5390Sstevel@tonic-gate 		mmu.num_level = 3;
5400Sstevel@tonic-gate 		mmu.max_level = 2;
5410Sstevel@tonic-gate 		mmu.ptes_per_table = 512;
5420Sstevel@tonic-gate 		mmu.top_level_count = 4;
5430Sstevel@tonic-gate 
5440Sstevel@tonic-gate 		mmu.level_shift[0] = 12;
5450Sstevel@tonic-gate 		mmu.level_shift[1] = 21;
5460Sstevel@tonic-gate 		mmu.level_shift[2] = 30;
5470Sstevel@tonic-gate 
5480Sstevel@tonic-gate 	} else {
5490Sstevel@tonic-gate 		mmu.num_level = 2;
5500Sstevel@tonic-gate 		mmu.max_level = 1;
5510Sstevel@tonic-gate 		mmu.ptes_per_table = 1024;
5520Sstevel@tonic-gate 		mmu.top_level_count = 1024;
5530Sstevel@tonic-gate 
5540Sstevel@tonic-gate 		mmu.level_shift[0] = 12;
5550Sstevel@tonic-gate 		mmu.level_shift[1] = 22;
5560Sstevel@tonic-gate 	}
5570Sstevel@tonic-gate 
5580Sstevel@tonic-gate #endif	/* __i386 */
5590Sstevel@tonic-gate 
5600Sstevel@tonic-gate 	for (i = 0; i < mmu.num_level; ++i) {
5610Sstevel@tonic-gate 		mmu.level_size[i] = 1UL << mmu.level_shift[i];
5620Sstevel@tonic-gate 		mmu.level_offset[i] = mmu.level_size[i] - 1;
5630Sstevel@tonic-gate 		mmu.level_mask[i] = ~mmu.level_offset[i];
5640Sstevel@tonic-gate 	}
5650Sstevel@tonic-gate 
5663446Smrj 	for (i = 0; i <= mmu.max_page_level; ++i) {
5673446Smrj 		mmu.pte_bits[i] = PT_VALID;
5685084Sjohnlev #if defined(__xpv) && defined(__amd64)
5695084Sjohnlev 		mmu.pte_bits[i] |= PT_USER;
5705084Sjohnlev #endif
5713446Smrj 		if (i > 0)
5723446Smrj 			mmu.pte_bits[i] |= PT_PAGESIZE;
5733446Smrj 	}
5740Sstevel@tonic-gate 
5750Sstevel@tonic-gate 	/*
5760Sstevel@tonic-gate 	 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
5770Sstevel@tonic-gate 	 */
5780Sstevel@tonic-gate 	for (i = 1; i < mmu.num_level; ++i)
5790Sstevel@tonic-gate 		mmu.ptp_bits[i] = PT_PTPBITS;
5803446Smrj 
5810Sstevel@tonic-gate #if defined(__i386)
5820Sstevel@tonic-gate 	mmu.ptp_bits[2] = PT_VALID;
5830Sstevel@tonic-gate #endif
5840Sstevel@tonic-gate 
5850Sstevel@tonic-gate 	/*
5860Sstevel@tonic-gate 	 * Compute how many hash table entries to have per process for htables.
5870Sstevel@tonic-gate 	 * We start with 1 page's worth of entries.
5880Sstevel@tonic-gate 	 *
5890Sstevel@tonic-gate 	 * If physical memory is small, reduce the amount need to cover it.
5900Sstevel@tonic-gate 	 */
5910Sstevel@tonic-gate 	max_htables = physmax / mmu.ptes_per_table;
5920Sstevel@tonic-gate 	mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
5930Sstevel@tonic-gate 	while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
5940Sstevel@tonic-gate 		mmu.hash_cnt >>= 1;
5950Sstevel@tonic-gate 	mmu.vlp_hash_cnt = mmu.hash_cnt;
5960Sstevel@tonic-gate 
5970Sstevel@tonic-gate #if defined(__amd64)
5980Sstevel@tonic-gate 	/*
5990Sstevel@tonic-gate 	 * If running in 64 bits and physical memory is large,
6000Sstevel@tonic-gate 	 * increase the size of the cache to cover all of memory for
6010Sstevel@tonic-gate 	 * a 64 bit process.
6020Sstevel@tonic-gate 	 */
6030Sstevel@tonic-gate #define	HASH_MAX_LENGTH 4
6040Sstevel@tonic-gate 	while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
6050Sstevel@tonic-gate 		mmu.hash_cnt <<= 1;
6060Sstevel@tonic-gate #endif
6070Sstevel@tonic-gate }
6080Sstevel@tonic-gate 
6090Sstevel@tonic-gate 
6100Sstevel@tonic-gate /*
6110Sstevel@tonic-gate  * initialize hat data structures
6120Sstevel@tonic-gate  */
6130Sstevel@tonic-gate void
6140Sstevel@tonic-gate hat_init()
6150Sstevel@tonic-gate {
6160Sstevel@tonic-gate #if defined(__i386)
6170Sstevel@tonic-gate 	/*
6180Sstevel@tonic-gate 	 * _userlimit must be aligned correctly
6190Sstevel@tonic-gate 	 */
6200Sstevel@tonic-gate 	if ((_userlimit & LEVEL_MASK(1)) != _userlimit) {
6210Sstevel@tonic-gate 		prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n",
6220Sstevel@tonic-gate 		    (void *)_userlimit, (void *)LEVEL_SIZE(1));
6230Sstevel@tonic-gate 		halt("hat_init(): Unable to continue");
6240Sstevel@tonic-gate 	}
6250Sstevel@tonic-gate #endif
6260Sstevel@tonic-gate 
6270Sstevel@tonic-gate 	cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
6280Sstevel@tonic-gate 
6290Sstevel@tonic-gate 	/*
6300Sstevel@tonic-gate 	 * initialize kmem caches
6310Sstevel@tonic-gate 	 */
6320Sstevel@tonic-gate 	htable_init();
6330Sstevel@tonic-gate 	hment_init();
6340Sstevel@tonic-gate 
6350Sstevel@tonic-gate 	hat_cache = kmem_cache_create("hat_t",
6360Sstevel@tonic-gate 	    sizeof (hat_t), 0, hati_constructor, NULL, NULL,
6370Sstevel@tonic-gate 	    NULL, 0, 0);
6380Sstevel@tonic-gate 
6390Sstevel@tonic-gate 	hat_hash_cache = kmem_cache_create("HatHash",
6400Sstevel@tonic-gate 	    mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
6410Sstevel@tonic-gate 	    NULL, 0, 0);
6420Sstevel@tonic-gate 
6430Sstevel@tonic-gate 	/*
6440Sstevel@tonic-gate 	 * VLP hats can use a smaller hash table size on large memroy machines
6450Sstevel@tonic-gate 	 */
6460Sstevel@tonic-gate 	if (mmu.hash_cnt == mmu.vlp_hash_cnt) {
6470Sstevel@tonic-gate 		vlp_hash_cache = hat_hash_cache;
6480Sstevel@tonic-gate 	} else {
6490Sstevel@tonic-gate 		vlp_hash_cache = kmem_cache_create("HatVlpHash",
6500Sstevel@tonic-gate 		    mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
6510Sstevel@tonic-gate 		    NULL, 0, 0);
6520Sstevel@tonic-gate 	}
6530Sstevel@tonic-gate 
6540Sstevel@tonic-gate 	/*
6550Sstevel@tonic-gate 	 * Set up the kernel's hat
6560Sstevel@tonic-gate 	 */
6570Sstevel@tonic-gate 	AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
6580Sstevel@tonic-gate 	kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
6590Sstevel@tonic-gate 	mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
6600Sstevel@tonic-gate 	kas.a_hat->hat_as = &kas;
6610Sstevel@tonic-gate 	kas.a_hat->hat_flags = 0;
6620Sstevel@tonic-gate 	AS_LOCK_EXIT(&kas, &kas.a_lock);
6630Sstevel@tonic-gate 
6640Sstevel@tonic-gate 	CPUSET_ZERO(khat_cpuset);
6650Sstevel@tonic-gate 	CPUSET_ADD(khat_cpuset, CPU->cpu_id);
6660Sstevel@tonic-gate 
6670Sstevel@tonic-gate 	/*
6680Sstevel@tonic-gate 	 * The kernel hat's next pointer serves as the head of the hat list .
6691747Sjosephb 	 * The kernel hat's prev pointer tracks the last hat on the list for
6701747Sjosephb 	 * htable_steal() to use.
6710Sstevel@tonic-gate 	 */
6720Sstevel@tonic-gate 	kas.a_hat->hat_next = NULL;
6731747Sjosephb 	kas.a_hat->hat_prev = NULL;
6740Sstevel@tonic-gate 
6750Sstevel@tonic-gate 	/*
6760Sstevel@tonic-gate 	 * Allocate an htable hash bucket for the kernel
6770Sstevel@tonic-gate 	 * XX64 - tune for 64 bit procs
6780Sstevel@tonic-gate 	 */
6790Sstevel@tonic-gate 	kas.a_hat->hat_num_hash = mmu.hash_cnt;
6800Sstevel@tonic-gate 	kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
6810Sstevel@tonic-gate 	bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
6820Sstevel@tonic-gate 
6830Sstevel@tonic-gate 	/*
6840Sstevel@tonic-gate 	 * zero out the top level and cached htable pointers
6850Sstevel@tonic-gate 	 */
6860Sstevel@tonic-gate 	kas.a_hat->hat_ht_cached = NULL;
6870Sstevel@tonic-gate 	kas.a_hat->hat_htable = NULL;
6883258Strevtom 
6893258Strevtom 	/*
6903258Strevtom 	 * Pre-allocate hrm_hashtab before enabling the collection of
6913258Strevtom 	 * refmod statistics.  Allocating on the fly would mean us
6923258Strevtom 	 * running the risk of suffering recursive mutex enters or
6933258Strevtom 	 * deadlocks.
6943258Strevtom 	 */
6953258Strevtom 	hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
6963258Strevtom 	    KM_SLEEP);
6970Sstevel@tonic-gate }
6980Sstevel@tonic-gate 
6990Sstevel@tonic-gate /*
7000Sstevel@tonic-gate  * Prepare CPU specific pagetables for VLP processes on 64 bit kernels.
7010Sstevel@tonic-gate  *
7020Sstevel@tonic-gate  * Each CPU has a set of 2 pagetables that are reused for any 32 bit
7030Sstevel@tonic-gate  * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and
7040Sstevel@tonic-gate  * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes.
7050Sstevel@tonic-gate  */
7060Sstevel@tonic-gate /*ARGSUSED*/
7070Sstevel@tonic-gate static void
7080Sstevel@tonic-gate hat_vlp_setup(struct cpu *cpu)
7090Sstevel@tonic-gate {
7105084Sjohnlev #if defined(__amd64) && !defined(__xpv)
7110Sstevel@tonic-gate 	struct hat_cpu_info *hci = cpu->cpu_hat_info;
7120Sstevel@tonic-gate 	pfn_t pfn;
7130Sstevel@tonic-gate 
7140Sstevel@tonic-gate 	/*
7150Sstevel@tonic-gate 	 * allocate the level==2 page table for the bottom most
7160Sstevel@tonic-gate 	 * 512Gig of address space (this is where 32 bit apps live)
7170Sstevel@tonic-gate 	 */
7180Sstevel@tonic-gate 	ASSERT(hci != NULL);
7190Sstevel@tonic-gate 	hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
7200Sstevel@tonic-gate 
7210Sstevel@tonic-gate 	/*
7220Sstevel@tonic-gate 	 * Allocate a top level pagetable and copy the kernel's
7230Sstevel@tonic-gate 	 * entries into it. Then link in hci_vlp_l2ptes in the 1st entry.
7240Sstevel@tonic-gate 	 */
7250Sstevel@tonic-gate 	hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
7260Sstevel@tonic-gate 	hci->hci_vlp_pfn =
7270Sstevel@tonic-gate 	    hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes);
7280Sstevel@tonic-gate 	ASSERT(hci->hci_vlp_pfn != PFN_INVALID);
7295084Sjohnlev 	bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE);
7300Sstevel@tonic-gate 
7310Sstevel@tonic-gate 	pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes);
7320Sstevel@tonic-gate 	ASSERT(pfn != PFN_INVALID);
7330Sstevel@tonic-gate 	hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2);
7345084Sjohnlev #endif /* __amd64 && !__xpv */
7350Sstevel@tonic-gate }
7360Sstevel@tonic-gate 
7373446Smrj /*ARGSUSED*/
7383446Smrj static void
7393446Smrj hat_vlp_teardown(cpu_t *cpu)
7403446Smrj {
7415084Sjohnlev #if defined(__amd64) && !defined(__xpv)
7423446Smrj 	struct hat_cpu_info *hci;
7433446Smrj 
7443446Smrj 	if ((hci = cpu->cpu_hat_info) == NULL)
7453446Smrj 		return;
7463446Smrj 	if (hci->hci_vlp_l2ptes)
7473446Smrj 		kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE);
7483446Smrj 	if (hci->hci_vlp_l3ptes)
7493446Smrj 		kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE);
7505084Sjohnlev #endif
7515084Sjohnlev }
7525084Sjohnlev 
7535084Sjohnlev #define	NEXT_HKR(r, l, s, e) {			\
7545084Sjohnlev 	kernel_ranges[r].hkr_level = l;		\
7555084Sjohnlev 	kernel_ranges[r].hkr_start_va = s;	\
7565084Sjohnlev 	kernel_ranges[r].hkr_end_va = e;	\
7575084Sjohnlev 	++r;					\
7583446Smrj }
7593446Smrj 
7600Sstevel@tonic-gate /*
7610Sstevel@tonic-gate  * Finish filling in the kernel hat.
7620Sstevel@tonic-gate  * Pre fill in all top level kernel page table entries for the kernel's
7630Sstevel@tonic-gate  * part of the address range.  From this point on we can't use any new
7640Sstevel@tonic-gate  * kernel large pages if they need PTE's at max_level
7653446Smrj  *
7663446Smrj  * create the kmap mappings.
7670Sstevel@tonic-gate  */
7680Sstevel@tonic-gate void
7690Sstevel@tonic-gate hat_init_finish(void)
7700Sstevel@tonic-gate {
7713446Smrj 	size_t		size;
7725084Sjohnlev 	uint_t		r = 0;
7735084Sjohnlev 	uintptr_t	va;
7745084Sjohnlev 	hat_kernel_range_t *rp;
7755084Sjohnlev 
7760Sstevel@tonic-gate 
7770Sstevel@tonic-gate 	/*
7780Sstevel@tonic-gate 	 * We are now effectively running on the kernel hat.
7790Sstevel@tonic-gate 	 * Clearing use_boot_reserve shuts off using the pre-allocated boot
7800Sstevel@tonic-gate 	 * reserve for all HAT allocations.  From here on, the reserves are
7815084Sjohnlev 	 * only used when avoiding recursion in kmem_alloc().
7820Sstevel@tonic-gate 	 */
7830Sstevel@tonic-gate 	use_boot_reserve = 0;
7840Sstevel@tonic-gate 	htable_adjust_reserve();
7850Sstevel@tonic-gate 
7860Sstevel@tonic-gate 	/*
7875084Sjohnlev 	 * User HATs are initialized with copies of all kernel mappings in
7885084Sjohnlev 	 * higher level page tables. Ensure that those entries exist.
7895084Sjohnlev 	 */
7905084Sjohnlev #if defined(__amd64)
7915084Sjohnlev 
7925084Sjohnlev 	NEXT_HKR(r, 3, kernelbase, 0);
7935084Sjohnlev #if defined(__xpv)
7945084Sjohnlev 	NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END);
7955084Sjohnlev #endif
7965084Sjohnlev 
7975084Sjohnlev #elif defined(__i386)
7985084Sjohnlev 
7995084Sjohnlev #if !defined(__xpv)
8005084Sjohnlev 	if (mmu.pae_hat) {
8015084Sjohnlev 		va = kernelbase;
8025084Sjohnlev 		if ((va & LEVEL_MASK(2)) != va) {
8035084Sjohnlev 			va = P2ROUNDUP(va, LEVEL_SIZE(2));
8045084Sjohnlev 			NEXT_HKR(r, 1, kernelbase, va);
8055084Sjohnlev 		}
8065084Sjohnlev 		if (va != 0)
8075084Sjohnlev 			NEXT_HKR(r, 2, va, 0);
8085084Sjohnlev 	} else
8095084Sjohnlev #endif /* __xpv */
8105084Sjohnlev 		NEXT_HKR(r, 1, kernelbase, 0);
8115084Sjohnlev 
8125084Sjohnlev #endif /* __i386 */
8135084Sjohnlev 
8145084Sjohnlev 	num_kernel_ranges = r;
8155084Sjohnlev 
8165084Sjohnlev 	/*
8175084Sjohnlev 	 * Create all the kernel pagetables that will have entries
8185084Sjohnlev 	 * shared to user HATs.
8195084Sjohnlev 	 */
8205084Sjohnlev 	for (r = 0; r < num_kernel_ranges; ++r) {
8215084Sjohnlev 		rp = &kernel_ranges[r];
8225084Sjohnlev 		for (va = rp->hkr_start_va; va != rp->hkr_end_va;
8235084Sjohnlev 		    va += LEVEL_SIZE(rp->hkr_level)) {
8245084Sjohnlev 			htable_t *ht;
8255084Sjohnlev 
8265084Sjohnlev 			if (IN_HYPERVISOR_VA(va))
8275084Sjohnlev 				continue;
8285084Sjohnlev 
8295084Sjohnlev 			/* can/must skip if a page mapping already exists */
8305084Sjohnlev 			if (rp->hkr_level <= mmu.max_page_level &&
8315084Sjohnlev 			    (ht = htable_getpage(kas.a_hat, va, NULL)) !=
8325084Sjohnlev 			    NULL) {
8335084Sjohnlev 				htable_release(ht);
8345084Sjohnlev 				continue;
8355084Sjohnlev 			}
8365084Sjohnlev 
8375084Sjohnlev 			(void) htable_create(kas.a_hat, va, rp->hkr_level - 1,
8385084Sjohnlev 			    NULL);
8395084Sjohnlev 		}
8405084Sjohnlev 	}
8415084Sjohnlev 
8425084Sjohnlev 	/*
8435084Sjohnlev 	 * 32 bit PAE metal kernels use only 4 of the 512 entries in the
8445084Sjohnlev 	 * page holding the top level pagetable. We use the remainder for
8455084Sjohnlev 	 * the "per CPU" page tables for VLP processes.
8465084Sjohnlev 	 * Map the top level kernel pagetable into the kernel to make
8475084Sjohnlev 	 * it easy to use bcopy access these tables.
8480Sstevel@tonic-gate 	 */
8490Sstevel@tonic-gate 	if (mmu.pae_hat) {
8500Sstevel@tonic-gate 		vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
8510Sstevel@tonic-gate 		hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE,
8520Sstevel@tonic-gate 		    kas.a_hat->hat_htable->ht_pfn,
8535084Sjohnlev #if !defined(__xpv)
8543446Smrj 		    PROT_WRITE |
8555084Sjohnlev #endif
8563446Smrj 		    PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
8570Sstevel@tonic-gate 		    HAT_LOAD | HAT_LOAD_NOCONSIST);
8580Sstevel@tonic-gate 	}
8590Sstevel@tonic-gate 	hat_vlp_setup(CPU);
8603446Smrj 
8613446Smrj 	/*
8623446Smrj 	 * Create kmap (cached mappings of kernel PTEs)
8633446Smrj 	 * for 32 bit we map from segmap_start .. ekernelheap
8643446Smrj 	 * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
8653446Smrj 	 */
8663446Smrj #if defined(__i386)
8673446Smrj 	size = (uintptr_t)ekernelheap - segmap_start;
8683446Smrj #elif defined(__amd64)
8693446Smrj 	size = segmapsize;
8703446Smrj #endif
8713446Smrj 	hat_kmap_init((uintptr_t)segmap_start, size);
8720Sstevel@tonic-gate }
8730Sstevel@tonic-gate 
8740Sstevel@tonic-gate /*
8750Sstevel@tonic-gate  * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
8760Sstevel@tonic-gate  * are 32 bit, so for safety we must use cas64() to install these.
8770Sstevel@tonic-gate  */
8780Sstevel@tonic-gate #ifdef __i386
8790Sstevel@tonic-gate static void
8800Sstevel@tonic-gate reload_pae32(hat_t *hat, cpu_t *cpu)
8810Sstevel@tonic-gate {
8820Sstevel@tonic-gate 	x86pte_t *src;
8830Sstevel@tonic-gate 	x86pte_t *dest;
8840Sstevel@tonic-gate 	x86pte_t pte;
8850Sstevel@tonic-gate 	int i;
8860Sstevel@tonic-gate 
8870Sstevel@tonic-gate 	/*
8880Sstevel@tonic-gate 	 * Load the 4 entries of the level 2 page table into this
8890Sstevel@tonic-gate 	 * cpu's range of the vlp_page and point cr3 at them.
8900Sstevel@tonic-gate 	 */
8910Sstevel@tonic-gate 	ASSERT(mmu.pae_hat);
8920Sstevel@tonic-gate 	src = hat->hat_vlp_ptes;
8930Sstevel@tonic-gate 	dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
8940Sstevel@tonic-gate 	for (i = 0; i < VLP_NUM_PTES; ++i) {
8950Sstevel@tonic-gate 		for (;;) {
8960Sstevel@tonic-gate 			pte = dest[i];
8970Sstevel@tonic-gate 			if (pte == src[i])
8980Sstevel@tonic-gate 				break;
8990Sstevel@tonic-gate 			if (cas64(dest + i, pte, src[i]) != src[i])
9000Sstevel@tonic-gate 				break;
9010Sstevel@tonic-gate 		}
9020Sstevel@tonic-gate 	}
9030Sstevel@tonic-gate }
9040Sstevel@tonic-gate #endif
9050Sstevel@tonic-gate 
9060Sstevel@tonic-gate /*
9070Sstevel@tonic-gate  * Switch to a new active hat, maintaining bit masks to track active CPUs.
9085084Sjohnlev  *
9095084Sjohnlev  * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it
9105084Sjohnlev  * remains a 32-bit value.
9110Sstevel@tonic-gate  */
9120Sstevel@tonic-gate void
9130Sstevel@tonic-gate hat_switch(hat_t *hat)
9140Sstevel@tonic-gate {
9155084Sjohnlev 	uint64_t	newcr3;
9160Sstevel@tonic-gate 	cpu_t		*cpu = CPU;
9170Sstevel@tonic-gate 	hat_t		*old = cpu->cpu_current_hat;
9180Sstevel@tonic-gate 
9190Sstevel@tonic-gate 	/*
9200Sstevel@tonic-gate 	 * set up this information first, so we don't miss any cross calls
9210Sstevel@tonic-gate 	 */
9220Sstevel@tonic-gate 	if (old != NULL) {
9230Sstevel@tonic-gate 		if (old == hat)
9240Sstevel@tonic-gate 			return;
9250Sstevel@tonic-gate 		if (old != kas.a_hat)
9260Sstevel@tonic-gate 			CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
9270Sstevel@tonic-gate 	}
9280Sstevel@tonic-gate 
9290Sstevel@tonic-gate 	/*
9304191Sjosephb 	 * Add this CPU to the active set for this HAT.
9310Sstevel@tonic-gate 	 */
9320Sstevel@tonic-gate 	if (hat != kas.a_hat) {
9330Sstevel@tonic-gate 		CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
9340Sstevel@tonic-gate 	}
9350Sstevel@tonic-gate 	cpu->cpu_current_hat = hat;
9360Sstevel@tonic-gate 
9370Sstevel@tonic-gate 	/*
9380Sstevel@tonic-gate 	 * now go ahead and load cr3
9390Sstevel@tonic-gate 	 */
9400Sstevel@tonic-gate 	if (hat->hat_flags & HAT_VLP) {
9410Sstevel@tonic-gate #if defined(__amd64)
9420Sstevel@tonic-gate 		x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
9430Sstevel@tonic-gate 
9440Sstevel@tonic-gate 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
9450Sstevel@tonic-gate 		newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn);
9460Sstevel@tonic-gate #elif defined(__i386)
9470Sstevel@tonic-gate 		reload_pae32(hat, cpu);
9480Sstevel@tonic-gate 		newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) +
9490Sstevel@tonic-gate 		    (cpu->cpu_id + 1) * VLP_SIZE;
9500Sstevel@tonic-gate #endif
9510Sstevel@tonic-gate 	} else {
9525084Sjohnlev 		newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn);
9530Sstevel@tonic-gate 	}
9545084Sjohnlev #ifdef __xpv
9555084Sjohnlev 	{
9565084Sjohnlev 		struct mmuext_op t[2];
9575084Sjohnlev 		uint_t retcnt;
9585084Sjohnlev 		uint_t opcnt = 1;
9595084Sjohnlev 
9605084Sjohnlev 		t[0].cmd = MMUEXT_NEW_BASEPTR;
9615084Sjohnlev 		t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
9625084Sjohnlev #if defined(__amd64)
9635084Sjohnlev 		/*
9645084Sjohnlev 		 * There's an interesting problem here, as to what to
9655084Sjohnlev 		 * actually specify when switching to the kernel hat.
9665084Sjohnlev 		 * For now we'll reuse the kernel hat again.
9675084Sjohnlev 		 */
9685084Sjohnlev 		t[1].cmd = MMUEXT_NEW_USER_BASEPTR;
9695084Sjohnlev 		if (hat == kas.a_hat)
9705084Sjohnlev 			t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
9715084Sjohnlev 		else
9725084Sjohnlev 			t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable);
9735084Sjohnlev 		++opcnt;
9745084Sjohnlev #endif	/* __amd64 */
9755084Sjohnlev 		if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0)
9765084Sjohnlev 			panic("HYPERVISOR_mmu_update() failed");
9775084Sjohnlev 		ASSERT(retcnt == opcnt);
9785084Sjohnlev 
9795084Sjohnlev 	}
9805084Sjohnlev #else
9810Sstevel@tonic-gate 	setcr3(newcr3);
9825084Sjohnlev #endif
9830Sstevel@tonic-gate 	ASSERT(cpu == CPU);
9840Sstevel@tonic-gate }
9850Sstevel@tonic-gate 
9860Sstevel@tonic-gate /*
9870Sstevel@tonic-gate  * Utility to return a valid x86pte_t from protections, pfn, and level number
9880Sstevel@tonic-gate  */
9890Sstevel@tonic-gate static x86pte_t
9900Sstevel@tonic-gate hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
9910Sstevel@tonic-gate {
9920Sstevel@tonic-gate 	x86pte_t	pte;
9930Sstevel@tonic-gate 	uint_t		cache_attr = attr & HAT_ORDER_MASK;
9940Sstevel@tonic-gate 
9950Sstevel@tonic-gate 	pte = MAKEPTE(pfn, level);
9960Sstevel@tonic-gate 
9970Sstevel@tonic-gate 	if (attr & PROT_WRITE)
9980Sstevel@tonic-gate 		PTE_SET(pte, PT_WRITABLE);
9990Sstevel@tonic-gate 
10000Sstevel@tonic-gate 	if (attr & PROT_USER)
10010Sstevel@tonic-gate 		PTE_SET(pte, PT_USER);
10020Sstevel@tonic-gate 
10030Sstevel@tonic-gate 	if (!(attr & PROT_EXEC))
10040Sstevel@tonic-gate 		PTE_SET(pte, mmu.pt_nx);
10050Sstevel@tonic-gate 
10060Sstevel@tonic-gate 	/*
10073446Smrj 	 * Set the software bits used track ref/mod sync's and hments.
10083446Smrj 	 * If not using REF/MOD, set them to avoid h/w rewriting PTEs.
10090Sstevel@tonic-gate 	 */
10100Sstevel@tonic-gate 	if (flags & HAT_LOAD_NOCONSIST)
10113446Smrj 		PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD);
10123446Smrj 	else if (attr & HAT_NOSYNC)
10133446Smrj 		PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD);
10140Sstevel@tonic-gate 
10150Sstevel@tonic-gate 	/*
10160Sstevel@tonic-gate 	 * Set the caching attributes in the PTE. The combination
10170Sstevel@tonic-gate 	 * of attributes are poorly defined, so we pay attention
10180Sstevel@tonic-gate 	 * to them in the given order.
10190Sstevel@tonic-gate 	 *
10200Sstevel@tonic-gate 	 * The test for HAT_STRICTORDER is different because it's defined
10210Sstevel@tonic-gate 	 * as "0" - which was a stupid thing to do, but is too late to change!
10220Sstevel@tonic-gate 	 */
10230Sstevel@tonic-gate 	if (cache_attr == HAT_STRICTORDER) {
10240Sstevel@tonic-gate 		PTE_SET(pte, PT_NOCACHE);
10250Sstevel@tonic-gate 	/*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
10260Sstevel@tonic-gate 	} else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
10270Sstevel@tonic-gate 		/* nothing to set */;
10280Sstevel@tonic-gate 	} else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
10290Sstevel@tonic-gate 		PTE_SET(pte, PT_NOCACHE);
10300Sstevel@tonic-gate 		if (x86_feature & X86_PAT)
10310Sstevel@tonic-gate 			PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
10320Sstevel@tonic-gate 		else
10330Sstevel@tonic-gate 			PTE_SET(pte, PT_WRITETHRU);
10340Sstevel@tonic-gate 	} else {
10350Sstevel@tonic-gate 		panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
10360Sstevel@tonic-gate 	}
10370Sstevel@tonic-gate 
10380Sstevel@tonic-gate 	return (pte);
10390Sstevel@tonic-gate }
10400Sstevel@tonic-gate 
10410Sstevel@tonic-gate /*
10420Sstevel@tonic-gate  * Duplicate address translations of the parent to the child.
10430Sstevel@tonic-gate  * This function really isn't used anymore.
10440Sstevel@tonic-gate  */
10450Sstevel@tonic-gate /*ARGSUSED*/
10460Sstevel@tonic-gate int
10470Sstevel@tonic-gate hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
10480Sstevel@tonic-gate {
10490Sstevel@tonic-gate 	ASSERT((uintptr_t)addr < kernelbase);
10500Sstevel@tonic-gate 	ASSERT(new != kas.a_hat);
10510Sstevel@tonic-gate 	ASSERT(old != kas.a_hat);
10520Sstevel@tonic-gate 	return (0);
10530Sstevel@tonic-gate }
10540Sstevel@tonic-gate 
10550Sstevel@tonic-gate /*
10560Sstevel@tonic-gate  * Allocate any hat resources required for a process being swapped in.
10570Sstevel@tonic-gate  */
10580Sstevel@tonic-gate /*ARGSUSED*/
10590Sstevel@tonic-gate void
10600Sstevel@tonic-gate hat_swapin(hat_t *hat)
10610Sstevel@tonic-gate {
10620Sstevel@tonic-gate 	/* do nothing - we let everything fault back in */
10630Sstevel@tonic-gate }
10640Sstevel@tonic-gate 
10650Sstevel@tonic-gate /*
10660Sstevel@tonic-gate  * Unload all translations associated with an address space of a process
10670Sstevel@tonic-gate  * that is being swapped out.
10680Sstevel@tonic-gate  */
10690Sstevel@tonic-gate void
10700Sstevel@tonic-gate hat_swapout(hat_t *hat)
10710Sstevel@tonic-gate {
10720Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)0;
10730Sstevel@tonic-gate 	uintptr_t	eaddr = _userlimit;
10740Sstevel@tonic-gate 	htable_t	*ht = NULL;
10750Sstevel@tonic-gate 	level_t		l;
10760Sstevel@tonic-gate 
10775084Sjohnlev 	XPV_DISALLOW_MIGRATE();
10780Sstevel@tonic-gate 	/*
10790Sstevel@tonic-gate 	 * We can't just call hat_unload(hat, 0, _userlimit...)  here, because
10800Sstevel@tonic-gate 	 * seg_spt and shared pagetables can't be swapped out.
10810Sstevel@tonic-gate 	 * Take a look at segspt_shmswapout() - it's a big no-op.
10820Sstevel@tonic-gate 	 *
10830Sstevel@tonic-gate 	 * Instead we'll walk through all the address space and unload
10840Sstevel@tonic-gate 	 * any mappings which we are sure are not shared, not locked.
10850Sstevel@tonic-gate 	 */
10860Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
10870Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
10880Sstevel@tonic-gate 	ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
10890Sstevel@tonic-gate 	if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
10900Sstevel@tonic-gate 		eaddr = (uintptr_t)hat->hat_as->a_userlimit;
10910Sstevel@tonic-gate 
10920Sstevel@tonic-gate 	while (vaddr < eaddr) {
10930Sstevel@tonic-gate 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
10940Sstevel@tonic-gate 		if (ht == NULL)
10950Sstevel@tonic-gate 			break;
10960Sstevel@tonic-gate 
10970Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(vaddr));
10980Sstevel@tonic-gate 
10990Sstevel@tonic-gate 		/*
11000Sstevel@tonic-gate 		 * If the page table is shared skip its entire range.
11010Sstevel@tonic-gate 		 * This code knows that only level 0 page tables are shared
11020Sstevel@tonic-gate 		 */
11030Sstevel@tonic-gate 		l = ht->ht_level;
11040Sstevel@tonic-gate 		if (ht->ht_flags & HTABLE_SHARED_PFN) {
11050Sstevel@tonic-gate 			ASSERT(l == 0);
11060Sstevel@tonic-gate 			vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
11070Sstevel@tonic-gate 			htable_release(ht);
11080Sstevel@tonic-gate 			ht = NULL;
11090Sstevel@tonic-gate 			continue;
11100Sstevel@tonic-gate 		}
11110Sstevel@tonic-gate 
11120Sstevel@tonic-gate 		/*
11130Sstevel@tonic-gate 		 * If the page table has no locked entries, unload this one.
11140Sstevel@tonic-gate 		 */
11150Sstevel@tonic-gate 		if (ht->ht_lock_cnt == 0)
11160Sstevel@tonic-gate 			hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
11170Sstevel@tonic-gate 			    HAT_UNLOAD_UNMAP);
11180Sstevel@tonic-gate 
11190Sstevel@tonic-gate 		/*
11200Sstevel@tonic-gate 		 * If we have a level 0 page table with locked entries,
11210Sstevel@tonic-gate 		 * skip the entire page table, otherwise skip just one entry.
11220Sstevel@tonic-gate 		 */
11230Sstevel@tonic-gate 		if (ht->ht_lock_cnt > 0 && l == 0)
11240Sstevel@tonic-gate 			vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
11250Sstevel@tonic-gate 		else
11260Sstevel@tonic-gate 			vaddr += LEVEL_SIZE(l);
11270Sstevel@tonic-gate 	}
11280Sstevel@tonic-gate 	if (ht)
11290Sstevel@tonic-gate 		htable_release(ht);
11300Sstevel@tonic-gate 
11310Sstevel@tonic-gate 	/*
11320Sstevel@tonic-gate 	 * We're in swapout because the system is low on memory, so
11330Sstevel@tonic-gate 	 * go back and flush all the htables off the cached list.
11340Sstevel@tonic-gate 	 */
11350Sstevel@tonic-gate 	htable_purge_hat(hat);
11365084Sjohnlev 	XPV_ALLOW_MIGRATE();
11370Sstevel@tonic-gate }
11380Sstevel@tonic-gate 
11390Sstevel@tonic-gate /*
11400Sstevel@tonic-gate  * returns number of bytes that have valid mappings in hat.
11410Sstevel@tonic-gate  */
11420Sstevel@tonic-gate size_t
11430Sstevel@tonic-gate hat_get_mapped_size(hat_t *hat)
11440Sstevel@tonic-gate {
11450Sstevel@tonic-gate 	size_t total = 0;
11460Sstevel@tonic-gate 	int l;
11470Sstevel@tonic-gate 
11480Sstevel@tonic-gate 	for (l = 0; l <= mmu.max_page_level; l++)
11490Sstevel@tonic-gate 		total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
11504381Sjosephb 	total += hat->hat_ism_pgcnt;
11510Sstevel@tonic-gate 
11520Sstevel@tonic-gate 	return (total);
11530Sstevel@tonic-gate }
11540Sstevel@tonic-gate 
11550Sstevel@tonic-gate /*
11560Sstevel@tonic-gate  * enable/disable collection of stats for hat.
11570Sstevel@tonic-gate  */
11580Sstevel@tonic-gate int
11590Sstevel@tonic-gate hat_stats_enable(hat_t *hat)
11600Sstevel@tonic-gate {
11610Sstevel@tonic-gate 	atomic_add_32(&hat->hat_stats, 1);
11620Sstevel@tonic-gate 	return (1);
11630Sstevel@tonic-gate }
11640Sstevel@tonic-gate 
11650Sstevel@tonic-gate void
11660Sstevel@tonic-gate hat_stats_disable(hat_t *hat)
11670Sstevel@tonic-gate {
11680Sstevel@tonic-gate 	atomic_add_32(&hat->hat_stats, -1);
11690Sstevel@tonic-gate }
11700Sstevel@tonic-gate 
11710Sstevel@tonic-gate /*
11720Sstevel@tonic-gate  * Utility to sync the ref/mod bits from a page table entry to the page_t
11730Sstevel@tonic-gate  * We must be holding the mapping list lock when this is called.
11740Sstevel@tonic-gate  */
11750Sstevel@tonic-gate static void
11760Sstevel@tonic-gate hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
11770Sstevel@tonic-gate {
11780Sstevel@tonic-gate 	uint_t	rm = 0;
11790Sstevel@tonic-gate 	pgcnt_t	pgcnt;
11800Sstevel@tonic-gate 
11813446Smrj 	if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
11820Sstevel@tonic-gate 		return;
11830Sstevel@tonic-gate 
11840Sstevel@tonic-gate 	if (PTE_GET(pte, PT_REF))
11850Sstevel@tonic-gate 		rm |= P_REF;
11860Sstevel@tonic-gate 
11870Sstevel@tonic-gate 	if (PTE_GET(pte, PT_MOD))
11880Sstevel@tonic-gate 		rm |= P_MOD;
11890Sstevel@tonic-gate 
11900Sstevel@tonic-gate 	if (rm == 0)
11910Sstevel@tonic-gate 		return;
11920Sstevel@tonic-gate 
11930Sstevel@tonic-gate 	/*
11940Sstevel@tonic-gate 	 * sync to all constituent pages of a large page
11950Sstevel@tonic-gate 	 */
11960Sstevel@tonic-gate 	ASSERT(x86_hm_held(pp));
11970Sstevel@tonic-gate 	pgcnt = page_get_pagecnt(level);
11980Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
11990Sstevel@tonic-gate 	for (; pgcnt > 0; --pgcnt) {
12000Sstevel@tonic-gate 		/*
12010Sstevel@tonic-gate 		 * hat_page_demote() can't decrease
12020Sstevel@tonic-gate 		 * pszc below this mapping size
12030Sstevel@tonic-gate 		 * since this large mapping existed after we
12040Sstevel@tonic-gate 		 * took mlist lock.
12050Sstevel@tonic-gate 		 */
12060Sstevel@tonic-gate 		ASSERT(pp->p_szc >= level);
12070Sstevel@tonic-gate 		hat_page_setattr(pp, rm);
12080Sstevel@tonic-gate 		++pp;
12090Sstevel@tonic-gate 	}
12100Sstevel@tonic-gate }
12110Sstevel@tonic-gate 
12120Sstevel@tonic-gate /*
12130Sstevel@tonic-gate  * This the set of PTE bits for PFN, permissions and caching
12145084Sjohnlev  * that are allowed to change on a HAT_LOAD_REMAP
12150Sstevel@tonic-gate  */
12160Sstevel@tonic-gate #define	PT_REMAP_BITS							\
12170Sstevel@tonic-gate 	(PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU |		\
12185084Sjohnlev 	PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD)
12190Sstevel@tonic-gate 
1220510Skchow #define	REMAPASSERT(EX)	if (!(EX)) panic("hati_pte_map: " #EX)
12210Sstevel@tonic-gate /*
12220Sstevel@tonic-gate  * Do the low-level work to get a mapping entered into a HAT's pagetables
12230Sstevel@tonic-gate  * and in the mapping list of the associated page_t.
12240Sstevel@tonic-gate  */
12253446Smrj static int
12260Sstevel@tonic-gate hati_pte_map(
12270Sstevel@tonic-gate 	htable_t	*ht,
12280Sstevel@tonic-gate 	uint_t		entry,
12290Sstevel@tonic-gate 	page_t		*pp,
12300Sstevel@tonic-gate 	x86pte_t	pte,
12310Sstevel@tonic-gate 	int		flags,
12320Sstevel@tonic-gate 	void		*pte_ptr)
12330Sstevel@tonic-gate {
12340Sstevel@tonic-gate 	hat_t		*hat = ht->ht_hat;
12350Sstevel@tonic-gate 	x86pte_t	old_pte;
12360Sstevel@tonic-gate 	level_t		l = ht->ht_level;
12370Sstevel@tonic-gate 	hment_t		*hm;
12380Sstevel@tonic-gate 	uint_t		is_consist;
12393446Smrj 	int		rv = 0;
12400Sstevel@tonic-gate 
12410Sstevel@tonic-gate 	/*
12420Sstevel@tonic-gate 	 * Is this a consistant (ie. need mapping list lock) mapping?
12430Sstevel@tonic-gate 	 */
12440Sstevel@tonic-gate 	is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
12450Sstevel@tonic-gate 
12460Sstevel@tonic-gate 	/*
12470Sstevel@tonic-gate 	 * Track locked mapping count in the htable.  Do this first,
12480Sstevel@tonic-gate 	 * as we track locking even if there already is a mapping present.
12490Sstevel@tonic-gate 	 */
12500Sstevel@tonic-gate 	if ((flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat)
12510Sstevel@tonic-gate 		HTABLE_LOCK_INC(ht);
12520Sstevel@tonic-gate 
12530Sstevel@tonic-gate 	/*
12540Sstevel@tonic-gate 	 * Acquire the page's mapping list lock and get an hment to use.
12550Sstevel@tonic-gate 	 * Note that hment_prepare() might return NULL.
12560Sstevel@tonic-gate 	 */
12570Sstevel@tonic-gate 	if (is_consist) {
12580Sstevel@tonic-gate 		x86_hm_enter(pp);
12590Sstevel@tonic-gate 		hm = hment_prepare(ht, entry, pp);
12600Sstevel@tonic-gate 	}
12610Sstevel@tonic-gate 
12620Sstevel@tonic-gate 	/*
12630Sstevel@tonic-gate 	 * Set the new pte, retrieving the old one at the same time.
12640Sstevel@tonic-gate 	 */
12650Sstevel@tonic-gate 	old_pte = x86pte_set(ht, entry, pte, pte_ptr);
12660Sstevel@tonic-gate 
12670Sstevel@tonic-gate 	/*
12683446Smrj 	 * did we get a large page / page table collision?
12693446Smrj 	 */
12703446Smrj 	if (old_pte == LPAGE_ERROR) {
12713446Smrj 		rv = -1;
12723446Smrj 		goto done;
12733446Smrj 	}
12743446Smrj 
12753446Smrj 	/*
12760Sstevel@tonic-gate 	 * If the mapping didn't change there is nothing more to do.
12770Sstevel@tonic-gate 	 */
12783446Smrj 	if (PTE_EQUIV(pte, old_pte))
12793446Smrj 		goto done;
12800Sstevel@tonic-gate 
12810Sstevel@tonic-gate 	/*
12820Sstevel@tonic-gate 	 * Install a new mapping in the page's mapping list
12830Sstevel@tonic-gate 	 */
12840Sstevel@tonic-gate 	if (!PTE_ISVALID(old_pte)) {
12850Sstevel@tonic-gate 		if (is_consist) {
12860Sstevel@tonic-gate 			hment_assign(ht, entry, pp, hm);
12870Sstevel@tonic-gate 			x86_hm_exit(pp);
12880Sstevel@tonic-gate 		} else {
12890Sstevel@tonic-gate 			ASSERT(flags & HAT_LOAD_NOCONSIST);
12900Sstevel@tonic-gate 		}
12910Sstevel@tonic-gate 		HTABLE_INC(ht->ht_valid_cnt);
12920Sstevel@tonic-gate 		PGCNT_INC(hat, l);
12933446Smrj 		return (rv);
12940Sstevel@tonic-gate 	}
12950Sstevel@tonic-gate 
12960Sstevel@tonic-gate 	/*
12970Sstevel@tonic-gate 	 * Remap's are more complicated:
12980Sstevel@tonic-gate 	 *  - HAT_LOAD_REMAP must be specified if changing the pfn.
12990Sstevel@tonic-gate 	 *    We also require that NOCONSIST be specified.
13000Sstevel@tonic-gate 	 *  - Otherwise only permission or caching bits may change.
13010Sstevel@tonic-gate 	 */
13020Sstevel@tonic-gate 	if (!PTE_ISPAGE(old_pte, l))
13030Sstevel@tonic-gate 		panic("non-null/page mapping pte=" FMT_PTE, old_pte);
13040Sstevel@tonic-gate 
13050Sstevel@tonic-gate 	if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1306510Skchow 		REMAPASSERT(flags & HAT_LOAD_REMAP);
1307510Skchow 		REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
13083446Smrj 		REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
1309510Skchow 		REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
13100Sstevel@tonic-gate 		    pf_is_memory(PTE2PFN(pte, l)));
1311510Skchow 		REMAPASSERT(!is_consist);
13120Sstevel@tonic-gate 	}
13130Sstevel@tonic-gate 
13140Sstevel@tonic-gate 	/*
13155084Sjohnlev 	 * We only let remaps change the certain bits in the PTE.
13160Sstevel@tonic-gate 	 */
13175084Sjohnlev 	if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS))
13185084Sjohnlev 		panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n",
13195084Sjohnlev 		    old_pte, pte);
13200Sstevel@tonic-gate 
13210Sstevel@tonic-gate 	/*
13220Sstevel@tonic-gate 	 * We don't create any mapping list entries on a remap, so release
13230Sstevel@tonic-gate 	 * any allocated hment after we drop the mapping list lock.
13240Sstevel@tonic-gate 	 */
13253446Smrj done:
13260Sstevel@tonic-gate 	if (is_consist) {
13270Sstevel@tonic-gate 		x86_hm_exit(pp);
13280Sstevel@tonic-gate 		if (hm != NULL)
13290Sstevel@tonic-gate 			hment_free(hm);
13300Sstevel@tonic-gate 	}
13313446Smrj 	return (rv);
13320Sstevel@tonic-gate }
13330Sstevel@tonic-gate 
13340Sstevel@tonic-gate /*
13353446Smrj  * Internal routine to load a single page table entry. This only fails if
13363446Smrj  * we attempt to overwrite a page table link with a large page.
13370Sstevel@tonic-gate  */
13383446Smrj static int
13390Sstevel@tonic-gate hati_load_common(
13400Sstevel@tonic-gate 	hat_t		*hat,
13410Sstevel@tonic-gate 	uintptr_t	va,
13420Sstevel@tonic-gate 	page_t		*pp,
13430Sstevel@tonic-gate 	uint_t		attr,
13440Sstevel@tonic-gate 	uint_t		flags,
13450Sstevel@tonic-gate 	level_t		level,
13460Sstevel@tonic-gate 	pfn_t		pfn)
13470Sstevel@tonic-gate {
13480Sstevel@tonic-gate 	htable_t	*ht;
13490Sstevel@tonic-gate 	uint_t		entry;
13500Sstevel@tonic-gate 	x86pte_t	pte;
13513446Smrj 	int		rv = 0;
13520Sstevel@tonic-gate 
13534004Sjosephb 	/*
13544004Sjosephb 	 * The number 16 is arbitrary and here to catch a recursion problem
13554004Sjosephb 	 * early before we blow out the kernel stack.
13564004Sjosephb 	 */
13574004Sjosephb 	++curthread->t_hatdepth;
13584004Sjosephb 	ASSERT(curthread->t_hatdepth < 16);
13594004Sjosephb 
13600Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
13610Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
13620Sstevel@tonic-gate 
13630Sstevel@tonic-gate 	if (flags & HAT_LOAD_SHARE)
13640Sstevel@tonic-gate 		hat->hat_flags |= HAT_SHARED;
13650Sstevel@tonic-gate 
13660Sstevel@tonic-gate 	/*
13670Sstevel@tonic-gate 	 * Find the page table that maps this page if it already exists.
13680Sstevel@tonic-gate 	 */
13690Sstevel@tonic-gate 	ht = htable_lookup(hat, va, level);
13700Sstevel@tonic-gate 
13710Sstevel@tonic-gate 	/*
13724004Sjosephb 	 * We must have HAT_LOAD_NOCONSIST if page_t is NULL.
13730Sstevel@tonic-gate 	 */
13744004Sjosephb 	if (pp == NULL)
13750Sstevel@tonic-gate 		flags |= HAT_LOAD_NOCONSIST;
13760Sstevel@tonic-gate 
13770Sstevel@tonic-gate 	if (ht == NULL) {
13780Sstevel@tonic-gate 		ht = htable_create(hat, va, level, NULL);
13790Sstevel@tonic-gate 		ASSERT(ht != NULL);
13800Sstevel@tonic-gate 	}
13810Sstevel@tonic-gate 	entry = htable_va2entry(va, ht);
13820Sstevel@tonic-gate 
13830Sstevel@tonic-gate 	/*
13840Sstevel@tonic-gate 	 * a bunch of paranoid error checking
13850Sstevel@tonic-gate 	 */
13860Sstevel@tonic-gate 	ASSERT(ht->ht_busy > 0);
13870Sstevel@tonic-gate 	if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht))
13880Sstevel@tonic-gate 		panic("hati_load_common: bad htable %p, va %p", ht, (void *)va);
13890Sstevel@tonic-gate 	ASSERT(ht->ht_level == level);
13900Sstevel@tonic-gate 
13910Sstevel@tonic-gate 	/*
13920Sstevel@tonic-gate 	 * construct the new PTE
13930Sstevel@tonic-gate 	 */
13940Sstevel@tonic-gate 	if (hat == kas.a_hat)
13950Sstevel@tonic-gate 		attr &= ~PROT_USER;
13960Sstevel@tonic-gate 	pte = hati_mkpte(pfn, attr, level, flags);
13970Sstevel@tonic-gate 	if (hat == kas.a_hat && va >= kernelbase)
13980Sstevel@tonic-gate 		PTE_SET(pte, mmu.pt_global);
13990Sstevel@tonic-gate 
14000Sstevel@tonic-gate 	/*
14010Sstevel@tonic-gate 	 * establish the mapping
14020Sstevel@tonic-gate 	 */
14033446Smrj 	rv = hati_pte_map(ht, entry, pp, pte, flags, NULL);
14040Sstevel@tonic-gate 
14050Sstevel@tonic-gate 	/*
14060Sstevel@tonic-gate 	 * release the htable and any reserves
14070Sstevel@tonic-gate 	 */
14080Sstevel@tonic-gate 	htable_release(ht);
14094004Sjosephb 	--curthread->t_hatdepth;
14103446Smrj 	return (rv);
14110Sstevel@tonic-gate }
14120Sstevel@tonic-gate 
14130Sstevel@tonic-gate /*
14140Sstevel@tonic-gate  * special case of hat_memload to deal with some kernel addrs for performance
14150Sstevel@tonic-gate  */
14160Sstevel@tonic-gate static void
14170Sstevel@tonic-gate hat_kmap_load(
14180Sstevel@tonic-gate 	caddr_t		addr,
14190Sstevel@tonic-gate 	page_t		*pp,
14200Sstevel@tonic-gate 	uint_t		attr,
14210Sstevel@tonic-gate 	uint_t		flags)
14220Sstevel@tonic-gate {
14230Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
14240Sstevel@tonic-gate 	x86pte_t	pte;
14250Sstevel@tonic-gate 	pfn_t		pfn = page_pptonum(pp);
14260Sstevel@tonic-gate 	pgcnt_t		pg_off = mmu_btop(va - mmu.kmap_addr);
14270Sstevel@tonic-gate 	htable_t	*ht;
14280Sstevel@tonic-gate 	uint_t		entry;
14290Sstevel@tonic-gate 	void		*pte_ptr;
14300Sstevel@tonic-gate 
14310Sstevel@tonic-gate 	/*
14320Sstevel@tonic-gate 	 * construct the requested PTE
14330Sstevel@tonic-gate 	 */
14340Sstevel@tonic-gate 	attr &= ~PROT_USER;
14350Sstevel@tonic-gate 	attr |= HAT_STORECACHING_OK;
14360Sstevel@tonic-gate 	pte = hati_mkpte(pfn, attr, 0, flags);
14370Sstevel@tonic-gate 	PTE_SET(pte, mmu.pt_global);
14380Sstevel@tonic-gate 
14390Sstevel@tonic-gate 	/*
14400Sstevel@tonic-gate 	 * Figure out the pte_ptr and htable and use common code to finish up
14410Sstevel@tonic-gate 	 */
14420Sstevel@tonic-gate 	if (mmu.pae_hat)
14430Sstevel@tonic-gate 		pte_ptr = mmu.kmap_ptes + pg_off;
14440Sstevel@tonic-gate 	else
14450Sstevel@tonic-gate 		pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
14460Sstevel@tonic-gate 	ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
14470Sstevel@tonic-gate 	    LEVEL_SHIFT(1)];
14480Sstevel@tonic-gate 	entry = htable_va2entry(va, ht);
14494004Sjosephb 	++curthread->t_hatdepth;
14504004Sjosephb 	ASSERT(curthread->t_hatdepth < 16);
14513446Smrj 	(void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
14524004Sjosephb 	--curthread->t_hatdepth;
14530Sstevel@tonic-gate }
14540Sstevel@tonic-gate 
14550Sstevel@tonic-gate /*
14560Sstevel@tonic-gate  * hat_memload() - load a translation to the given page struct
14570Sstevel@tonic-gate  *
14580Sstevel@tonic-gate  * Flags for hat_memload/hat_devload/hat_*attr.
14590Sstevel@tonic-gate  *
14600Sstevel@tonic-gate  * 	HAT_LOAD	Default flags to load a translation to the page.
14610Sstevel@tonic-gate  *
14620Sstevel@tonic-gate  * 	HAT_LOAD_LOCK	Lock down mapping resources; hat_map(), hat_memload(),
14630Sstevel@tonic-gate  *			and hat_devload().
14640Sstevel@tonic-gate  *
14650Sstevel@tonic-gate  *	HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
14663446Smrj  *			sets PT_NOCONSIST
14670Sstevel@tonic-gate  *
14680Sstevel@tonic-gate  *	HAT_LOAD_SHARE	A flag to hat_memload() to indicate h/w page tables
14690Sstevel@tonic-gate  *			that map some user pages (not kas) is shared by more
14700Sstevel@tonic-gate  *			than one process (eg. ISM).
14710Sstevel@tonic-gate  *
14720Sstevel@tonic-gate  *	HAT_LOAD_REMAP	Reload a valid pte with a different page frame.
14730Sstevel@tonic-gate  *
14740Sstevel@tonic-gate  *	HAT_NO_KALLOC	Do not kmem_alloc while creating the mapping; at this
14750Sstevel@tonic-gate  *			point, it's setting up mapping to allocate internal
14760Sstevel@tonic-gate  *			hat layer data structures.  This flag forces hat layer
14770Sstevel@tonic-gate  *			to tap its reserves in order to prevent infinite
14780Sstevel@tonic-gate  *			recursion.
14790Sstevel@tonic-gate  *
14800Sstevel@tonic-gate  * The following is a protection attribute (like PROT_READ, etc.)
14810Sstevel@tonic-gate  *
14823446Smrj  *	HAT_NOSYNC	set PT_NOSYNC - this mapping's ref/mod bits
14830Sstevel@tonic-gate  *			are never cleared.
14840Sstevel@tonic-gate  *
14850Sstevel@tonic-gate  * Installing new valid PTE's and creation of the mapping list
14860Sstevel@tonic-gate  * entry are controlled under the same lock. It's derived from the
14870Sstevel@tonic-gate  * page_t being mapped.
14880Sstevel@tonic-gate  */
14890Sstevel@tonic-gate static uint_t supported_memload_flags =
14900Sstevel@tonic-gate 	HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
14910Sstevel@tonic-gate 	HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
14920Sstevel@tonic-gate 
14930Sstevel@tonic-gate void
14940Sstevel@tonic-gate hat_memload(
14950Sstevel@tonic-gate 	hat_t		*hat,
14960Sstevel@tonic-gate 	caddr_t		addr,
14970Sstevel@tonic-gate 	page_t		*pp,
14980Sstevel@tonic-gate 	uint_t		attr,
14990Sstevel@tonic-gate 	uint_t		flags)
15000Sstevel@tonic-gate {
15010Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
15020Sstevel@tonic-gate 	level_t		level = 0;
15030Sstevel@tonic-gate 	pfn_t		pfn = page_pptonum(pp);
15040Sstevel@tonic-gate 
15055084Sjohnlev 	XPV_DISALLOW_MIGRATE();
15060Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
15073446Smrj 	ASSERT(hat == kas.a_hat || va < _userlimit);
15080Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
15090Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
15100Sstevel@tonic-gate 	ASSERT((flags & supported_memload_flags) == flags);
15110Sstevel@tonic-gate 
15120Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(va));
15130Sstevel@tonic-gate 	ASSERT(!PP_ISFREE(pp));
15140Sstevel@tonic-gate 
15150Sstevel@tonic-gate 	/*
15160Sstevel@tonic-gate 	 * kernel address special case for performance.
15170Sstevel@tonic-gate 	 */
15180Sstevel@tonic-gate 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
15190Sstevel@tonic-gate 		ASSERT(hat == kas.a_hat);
15200Sstevel@tonic-gate 		hat_kmap_load(addr, pp, attr, flags);
15215084Sjohnlev 		XPV_ALLOW_MIGRATE();
15220Sstevel@tonic-gate 		return;
15230Sstevel@tonic-gate 	}
15240Sstevel@tonic-gate 
15250Sstevel@tonic-gate 	/*
15260Sstevel@tonic-gate 	 * This is used for memory with normal caching enabled, so
15270Sstevel@tonic-gate 	 * always set HAT_STORECACHING_OK.
15280Sstevel@tonic-gate 	 */
15290Sstevel@tonic-gate 	attr |= HAT_STORECACHING_OK;
15303446Smrj 	if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
15313446Smrj 		panic("unexpected hati_load_common() failure");
15325084Sjohnlev 	XPV_ALLOW_MIGRATE();
15330Sstevel@tonic-gate }
15340Sstevel@tonic-gate 
15354528Spaulsan /* ARGSUSED */
15364528Spaulsan void
15374528Spaulsan hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
15384528Spaulsan     uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
15394528Spaulsan {
15404528Spaulsan 	hat_memload(hat, addr, pp, attr, flags);
15414528Spaulsan }
15424528Spaulsan 
15430Sstevel@tonic-gate /*
15440Sstevel@tonic-gate  * Load the given array of page structs using large pages when possible
15450Sstevel@tonic-gate  */
15460Sstevel@tonic-gate void
15470Sstevel@tonic-gate hat_memload_array(
15480Sstevel@tonic-gate 	hat_t		*hat,
15490Sstevel@tonic-gate 	caddr_t		addr,
15500Sstevel@tonic-gate 	size_t		len,
15510Sstevel@tonic-gate 	page_t		**pages,
15520Sstevel@tonic-gate 	uint_t		attr,
15530Sstevel@tonic-gate 	uint_t		flags)
15540Sstevel@tonic-gate {
15550Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
15560Sstevel@tonic-gate 	uintptr_t	eaddr = va + len;
15570Sstevel@tonic-gate 	level_t		level;
15580Sstevel@tonic-gate 	size_t		pgsize;
15590Sstevel@tonic-gate 	pgcnt_t		pgindx = 0;
15600Sstevel@tonic-gate 	pfn_t		pfn;
15610Sstevel@tonic-gate 	pgcnt_t		i;
15620Sstevel@tonic-gate 
15635084Sjohnlev 	XPV_DISALLOW_MIGRATE();
15640Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
15653446Smrj 	ASSERT(hat == kas.a_hat || va + len <= _userlimit);
15660Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
15670Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
15680Sstevel@tonic-gate 	ASSERT((flags & supported_memload_flags) == flags);
15690Sstevel@tonic-gate 
15700Sstevel@tonic-gate 	/*
15710Sstevel@tonic-gate 	 * memload is used for memory with full caching enabled, so
15720Sstevel@tonic-gate 	 * set HAT_STORECACHING_OK.
15730Sstevel@tonic-gate 	 */
15740Sstevel@tonic-gate 	attr |= HAT_STORECACHING_OK;
15750Sstevel@tonic-gate 
15760Sstevel@tonic-gate 	/*
15770Sstevel@tonic-gate 	 * handle all pages using largest possible pagesize
15780Sstevel@tonic-gate 	 */
15790Sstevel@tonic-gate 	while (va < eaddr) {
15800Sstevel@tonic-gate 		/*
15810Sstevel@tonic-gate 		 * decide what level mapping to use (ie. pagesize)
15820Sstevel@tonic-gate 		 */
15830Sstevel@tonic-gate 		pfn = page_pptonum(pages[pgindx]);
15840Sstevel@tonic-gate 		for (level = mmu.max_page_level; ; --level) {
15850Sstevel@tonic-gate 			pgsize = LEVEL_SIZE(level);
15860Sstevel@tonic-gate 			if (level == 0)
15870Sstevel@tonic-gate 				break;
15883446Smrj 
15890Sstevel@tonic-gate 			if (!IS_P2ALIGNED(va, pgsize) ||
15900Sstevel@tonic-gate 			    (eaddr - va) < pgsize ||
15913446Smrj 			    !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize))
15920Sstevel@tonic-gate 				continue;
15930Sstevel@tonic-gate 
15940Sstevel@tonic-gate 			/*
15950Sstevel@tonic-gate 			 * To use a large mapping of this size, all the
15960Sstevel@tonic-gate 			 * pages we are passed must be sequential subpages
15970Sstevel@tonic-gate 			 * of the large page.
15980Sstevel@tonic-gate 			 * hat_page_demote() can't change p_szc because
15990Sstevel@tonic-gate 			 * all pages are locked.
16000Sstevel@tonic-gate 			 */
16010Sstevel@tonic-gate 			if (pages[pgindx]->p_szc >= level) {
16020Sstevel@tonic-gate 				for (i = 0; i < mmu_btop(pgsize); ++i) {
16030Sstevel@tonic-gate 					if (pfn + i !=
16040Sstevel@tonic-gate 					    page_pptonum(pages[pgindx + i]))
16050Sstevel@tonic-gate 						break;
16060Sstevel@tonic-gate 					ASSERT(pages[pgindx + i]->p_szc >=
16070Sstevel@tonic-gate 					    level);
16080Sstevel@tonic-gate 					ASSERT(pages[pgindx] + i ==
16090Sstevel@tonic-gate 					    pages[pgindx + i]);
16100Sstevel@tonic-gate 				}
16110Sstevel@tonic-gate 				if (i == mmu_btop(pgsize))
16120Sstevel@tonic-gate 					break;
16130Sstevel@tonic-gate 			}
16140Sstevel@tonic-gate 		}
16150Sstevel@tonic-gate 
16160Sstevel@tonic-gate 		/*
16173446Smrj 		 * Load this page mapping. If the load fails, try a smaller
16183446Smrj 		 * pagesize.
16190Sstevel@tonic-gate 		 */
16200Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(va));
16213446Smrj 		while (hati_load_common(hat, va, pages[pgindx], attr,
16224381Sjosephb 		    flags, level, pfn) != 0) {
16233446Smrj 			if (level == 0)
16243446Smrj 				panic("unexpected hati_load_common() failure");
16253446Smrj 			--level;
16263446Smrj 			pgsize = LEVEL_SIZE(level);
16273446Smrj 		}
16280Sstevel@tonic-gate 
16290Sstevel@tonic-gate 		/*
16300Sstevel@tonic-gate 		 * move to next page
16310Sstevel@tonic-gate 		 */
16320Sstevel@tonic-gate 		va += pgsize;
16330Sstevel@tonic-gate 		pgindx += mmu_btop(pgsize);
16340Sstevel@tonic-gate 	}
16355084Sjohnlev 	XPV_ALLOW_MIGRATE();
16360Sstevel@tonic-gate }
16370Sstevel@tonic-gate 
16384528Spaulsan /* ARGSUSED */
16394528Spaulsan void
16404528Spaulsan hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
16414528Spaulsan     struct page **pps, uint_t attr, uint_t flags,
16424528Spaulsan     hat_region_cookie_t rcookie)
16434528Spaulsan {
16444528Spaulsan 	hat_memload_array(hat, addr, len, pps, attr, flags);
16454528Spaulsan }
16464528Spaulsan 
16470Sstevel@tonic-gate /*
16480Sstevel@tonic-gate  * void hat_devload(hat, addr, len, pf, attr, flags)
16490Sstevel@tonic-gate  *	load/lock the given page frame number
16500Sstevel@tonic-gate  *
16510Sstevel@tonic-gate  * Advisory ordering attributes. Apply only to device mappings.
16520Sstevel@tonic-gate  *
16530Sstevel@tonic-gate  * HAT_STRICTORDER: the CPU must issue the references in order, as the
16540Sstevel@tonic-gate  *	programmer specified.  This is the default.
16550Sstevel@tonic-gate  * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
16560Sstevel@tonic-gate  *	of reordering; store or load with store or load).
16570Sstevel@tonic-gate  * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
16580Sstevel@tonic-gate  *	to consecutive locations (for example, turn two consecutive byte
16590Sstevel@tonic-gate  *	stores into one halfword store), and it may batch individual loads
16600Sstevel@tonic-gate  *	(for example, turn two consecutive byte loads into one halfword load).
16610Sstevel@tonic-gate  *	This also implies re-ordering.
16620Sstevel@tonic-gate  * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
16630Sstevel@tonic-gate  *	until another store occurs.  The default is to fetch new data
16640Sstevel@tonic-gate  *	on every load.  This also implies merging.
16650Sstevel@tonic-gate  * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
16660Sstevel@tonic-gate  *	the device (perhaps with other data) at a later time.  The default is
16670Sstevel@tonic-gate  *	to push the data right away.  This also implies load caching.
16680Sstevel@tonic-gate  *
16690Sstevel@tonic-gate  * Equivalent of hat_memload(), but can be used for device memory where
16700Sstevel@tonic-gate  * there are no page_t's and we support additional flags (write merging, etc).
16710Sstevel@tonic-gate  * Note that we can have large page mappings with this interface.
16720Sstevel@tonic-gate  */
16730Sstevel@tonic-gate int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
16740Sstevel@tonic-gate 	HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
16750Sstevel@tonic-gate 	HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
16760Sstevel@tonic-gate 
16770Sstevel@tonic-gate void
16780Sstevel@tonic-gate hat_devload(
16790Sstevel@tonic-gate 	hat_t		*hat,
16800Sstevel@tonic-gate 	caddr_t		addr,
16810Sstevel@tonic-gate 	size_t		len,
16820Sstevel@tonic-gate 	pfn_t		pfn,
16830Sstevel@tonic-gate 	uint_t		attr,
16840Sstevel@tonic-gate 	int		flags)
16850Sstevel@tonic-gate {
16860Sstevel@tonic-gate 	uintptr_t	va = ALIGN2PAGE(addr);
16870Sstevel@tonic-gate 	uintptr_t	eva = va + len;
16880Sstevel@tonic-gate 	level_t		level;
16890Sstevel@tonic-gate 	size_t		pgsize;
16900Sstevel@tonic-gate 	page_t		*pp;
16910Sstevel@tonic-gate 	int		f;	/* per PTE copy of flags  - maybe modified */
16920Sstevel@tonic-gate 	uint_t		a;	/* per PTE copy of attr */
16930Sstevel@tonic-gate 
16945084Sjohnlev 	XPV_DISALLOW_MIGRATE();
16950Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
16963446Smrj 	ASSERT(hat == kas.a_hat || eva <= _userlimit);
16970Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
16980Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
16990Sstevel@tonic-gate 	ASSERT((flags & supported_devload_flags) == flags);
17000Sstevel@tonic-gate 
17010Sstevel@tonic-gate 	/*
17020Sstevel@tonic-gate 	 * handle all pages
17030Sstevel@tonic-gate 	 */
17040Sstevel@tonic-gate 	while (va < eva) {
17050Sstevel@tonic-gate 
17060Sstevel@tonic-gate 		/*
17070Sstevel@tonic-gate 		 * decide what level mapping to use (ie. pagesize)
17080Sstevel@tonic-gate 		 */
17090Sstevel@tonic-gate 		for (level = mmu.max_page_level; ; --level) {
17100Sstevel@tonic-gate 			pgsize = LEVEL_SIZE(level);
17110Sstevel@tonic-gate 			if (level == 0)
17120Sstevel@tonic-gate 				break;
17130Sstevel@tonic-gate 			if (IS_P2ALIGNED(va, pgsize) &&
17140Sstevel@tonic-gate 			    (eva - va) >= pgsize &&
17150Sstevel@tonic-gate 			    IS_P2ALIGNED(pfn, mmu_btop(pgsize)))
17160Sstevel@tonic-gate 				break;
17170Sstevel@tonic-gate 		}
17180Sstevel@tonic-gate 
17190Sstevel@tonic-gate 		/*
17203446Smrj 		 * If this is just memory then allow caching (this happens
17210Sstevel@tonic-gate 		 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
17223446Smrj 		 * to override that. If we don't have a page_t then make sure
17230Sstevel@tonic-gate 		 * NOCONSIST is set.
17240Sstevel@tonic-gate 		 */
17250Sstevel@tonic-gate 		a = attr;
17260Sstevel@tonic-gate 		f = flags;
17275084Sjohnlev 		if (!pf_is_memory(pfn))
17285084Sjohnlev 			f |= HAT_LOAD_NOCONSIST;
17295084Sjohnlev 		else if (!(a & HAT_PLAT_NOCACHE))
17305084Sjohnlev 			a |= HAT_STORECACHING_OK;
17315084Sjohnlev 
17325084Sjohnlev 		if (f & HAT_LOAD_NOCONSIST)
17330Sstevel@tonic-gate 			pp = NULL;
17345084Sjohnlev 		else
17355084Sjohnlev 			pp = page_numtopp_nolock(pfn);
17360Sstevel@tonic-gate 
17370Sstevel@tonic-gate 		/*
17380Sstevel@tonic-gate 		 * load this page mapping
17390Sstevel@tonic-gate 		 */
17400Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(va));
17413446Smrj 		while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
17423446Smrj 			if (level == 0)
17433446Smrj 				panic("unexpected hati_load_common() failure");
17443446Smrj 			--level;
17453446Smrj 			pgsize = LEVEL_SIZE(level);
17463446Smrj 		}
17470Sstevel@tonic-gate 
17480Sstevel@tonic-gate 		/*
17490Sstevel@tonic-gate 		 * move to next page
17500Sstevel@tonic-gate 		 */
17510Sstevel@tonic-gate 		va += pgsize;
17520Sstevel@tonic-gate 		pfn += mmu_btop(pgsize);
17530Sstevel@tonic-gate 	}
17545084Sjohnlev 	XPV_ALLOW_MIGRATE();
17550Sstevel@tonic-gate }
17560Sstevel@tonic-gate 
17570Sstevel@tonic-gate /*
17580Sstevel@tonic-gate  * void hat_unlock(hat, addr, len)
17590Sstevel@tonic-gate  *	unlock the mappings to a given range of addresses
17600Sstevel@tonic-gate  *
17610Sstevel@tonic-gate  * Locks are tracked by ht_lock_cnt in the htable.
17620Sstevel@tonic-gate  */
17630Sstevel@tonic-gate void
17640Sstevel@tonic-gate hat_unlock(hat_t *hat, caddr_t addr, size_t len)
17650Sstevel@tonic-gate {
17660Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
17670Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
17680Sstevel@tonic-gate 	htable_t	*ht = NULL;
17690Sstevel@tonic-gate 
17700Sstevel@tonic-gate 	/*
17710Sstevel@tonic-gate 	 * kernel entries are always locked, we don't track lock counts
17720Sstevel@tonic-gate 	 */
17733446Smrj 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
17740Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
17750Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
17760Sstevel@tonic-gate 	if (hat == kas.a_hat)
17770Sstevel@tonic-gate 		return;
17780Sstevel@tonic-gate 	if (eaddr > _userlimit)
17790Sstevel@tonic-gate 		panic("hat_unlock() address out of range - above _userlimit");
17800Sstevel@tonic-gate 
17815084Sjohnlev 	XPV_DISALLOW_MIGRATE();
17820Sstevel@tonic-gate 	ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
17830Sstevel@tonic-gate 	while (vaddr < eaddr) {
17840Sstevel@tonic-gate 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
17850Sstevel@tonic-gate 		if (ht == NULL)
17860Sstevel@tonic-gate 			break;
17870Sstevel@tonic-gate 
17880Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(vaddr));
17890Sstevel@tonic-gate 
17900Sstevel@tonic-gate 		if (ht->ht_lock_cnt < 1)
17910Sstevel@tonic-gate 			panic("hat_unlock(): lock_cnt < 1, "
17920Sstevel@tonic-gate 			    "htable=%p, vaddr=%p\n", ht, (caddr_t)vaddr);
17930Sstevel@tonic-gate 		HTABLE_LOCK_DEC(ht);
17940Sstevel@tonic-gate 
17950Sstevel@tonic-gate 		vaddr += LEVEL_SIZE(ht->ht_level);
17960Sstevel@tonic-gate 	}
17970Sstevel@tonic-gate 	if (ht)
17980Sstevel@tonic-gate 		htable_release(ht);
17995084Sjohnlev 	XPV_ALLOW_MIGRATE();
18000Sstevel@tonic-gate }
18010Sstevel@tonic-gate 
18024528Spaulsan /* ARGSUSED */
18034528Spaulsan void
18045075Spaulsan hat_unlock_region(struct hat *hat, caddr_t addr, size_t len,
18054528Spaulsan     hat_region_cookie_t rcookie)
18064528Spaulsan {
18074528Spaulsan 	panic("No shared region support on x86");
18084528Spaulsan }
18094528Spaulsan 
18105084Sjohnlev #if !defined(__xpv)
18110Sstevel@tonic-gate /*
18120Sstevel@tonic-gate  * Cross call service routine to demap a virtual page on
18130Sstevel@tonic-gate  * the current CPU or flush all mappings in TLB.
18140Sstevel@tonic-gate  */
18150Sstevel@tonic-gate /*ARGSUSED*/
18160Sstevel@tonic-gate static int
18170Sstevel@tonic-gate hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
18180Sstevel@tonic-gate {
18190Sstevel@tonic-gate 	hat_t	*hat = (hat_t *)a1;
18200Sstevel@tonic-gate 	caddr_t	addr = (caddr_t)a2;
18210Sstevel@tonic-gate 
18220Sstevel@tonic-gate 	/*
18230Sstevel@tonic-gate 	 * If the target hat isn't the kernel and this CPU isn't operating
18240Sstevel@tonic-gate 	 * in the target hat, we can ignore the cross call.
18250Sstevel@tonic-gate 	 */
18260Sstevel@tonic-gate 	if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
18270Sstevel@tonic-gate 		return (0);
18280Sstevel@tonic-gate 
18290Sstevel@tonic-gate 	/*
18300Sstevel@tonic-gate 	 * For a normal address, we just flush one page mapping
18310Sstevel@tonic-gate 	 */
18320Sstevel@tonic-gate 	if ((uintptr_t)addr != DEMAP_ALL_ADDR) {
18333446Smrj 		mmu_tlbflush_entry(addr);
18340Sstevel@tonic-gate 		return (0);
18350Sstevel@tonic-gate 	}
18360Sstevel@tonic-gate 
18370Sstevel@tonic-gate 	/*
18380Sstevel@tonic-gate 	 * Otherwise we reload cr3 to effect a complete TLB flush.
18390Sstevel@tonic-gate 	 *
18400Sstevel@tonic-gate 	 * A reload of cr3 on a VLP process also means we must also recopy in
18410Sstevel@tonic-gate 	 * the pte values from the struct hat
18420Sstevel@tonic-gate 	 */
18430Sstevel@tonic-gate 	if (hat->hat_flags & HAT_VLP) {
18440Sstevel@tonic-gate #if defined(__amd64)
18450Sstevel@tonic-gate 		x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes;
18460Sstevel@tonic-gate 
18470Sstevel@tonic-gate 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
18480Sstevel@tonic-gate #elif defined(__i386)
18490Sstevel@tonic-gate 		reload_pae32(hat, CPU);
18500Sstevel@tonic-gate #endif
18510Sstevel@tonic-gate 	}
18520Sstevel@tonic-gate 	reload_cr3();
18530Sstevel@tonic-gate 	return (0);
18540Sstevel@tonic-gate }
18550Sstevel@tonic-gate 
18560Sstevel@tonic-gate /*
18574191Sjosephb  * Flush all TLB entries, including global (ie. kernel) ones.
18584191Sjosephb  */
18594191Sjosephb static void
18604191Sjosephb flush_all_tlb_entries(void)
18614191Sjosephb {
18624191Sjosephb 	ulong_t cr4 = getcr4();
18634191Sjosephb 
18644191Sjosephb 	if (cr4 & CR4_PGE) {
18654191Sjosephb 		setcr4(cr4 & ~(ulong_t)CR4_PGE);
18664191Sjosephb 		setcr4(cr4);
18674191Sjosephb 
18684191Sjosephb 		/*
18694191Sjosephb 		 * 32 bit PAE also needs to always reload_cr3()
18704191Sjosephb 		 */
18714191Sjosephb 		if (mmu.max_level == 2)
18724191Sjosephb 			reload_cr3();
18734191Sjosephb 	} else {
18744191Sjosephb 		reload_cr3();
18754191Sjosephb 	}
18764191Sjosephb }
18774191Sjosephb 
18784191Sjosephb #define	TLB_CPU_HALTED	(01ul)
18794191Sjosephb #define	TLB_INVAL_ALL	(02ul)
18804191Sjosephb #define	CAS_TLB_INFO(cpu, old, new)	\
18814191Sjosephb 	caslong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
18824191Sjosephb 
18834191Sjosephb /*
18844191Sjosephb  * Record that a CPU is going idle
18854191Sjosephb  */
18864191Sjosephb void
18874191Sjosephb tlb_going_idle(void)
18884191Sjosephb {
18894191Sjosephb 	atomic_or_long((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
18904191Sjosephb }
18914191Sjosephb 
18924191Sjosephb /*
18934191Sjosephb  * Service a delayed TLB flush if coming out of being idle.
18944191Sjosephb  */
18954191Sjosephb void
18964191Sjosephb tlb_service(void)
18974191Sjosephb {
18984191Sjosephb 	ulong_t flags = getflags();
18994191Sjosephb 	ulong_t tlb_info;
19004191Sjosephb 	ulong_t found;
19014191Sjosephb 
19024191Sjosephb 	/*
19034191Sjosephb 	 * Be sure interrupts are off while doing this so that
19044191Sjosephb 	 * higher level interrupts correctly wait for flushes to finish.
19054191Sjosephb 	 */
19064191Sjosephb 	if (flags & PS_IE)
19074191Sjosephb 		flags = intr_clear();
19084191Sjosephb 
19094191Sjosephb 	/*
19104191Sjosephb 	 * We only have to do something if coming out of being idle.
19114191Sjosephb 	 */
19124191Sjosephb 	tlb_info = CPU->cpu_m.mcpu_tlb_info;
19134191Sjosephb 	if (tlb_info & TLB_CPU_HALTED) {
19144191Sjosephb 		ASSERT(CPU->cpu_current_hat == kas.a_hat);
19154191Sjosephb 
19164191Sjosephb 		/*
19174191Sjosephb 		 * Atomic clear and fetch of old state.
19184191Sjosephb 		 */
19194191Sjosephb 		while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) {
19204191Sjosephb 			ASSERT(found & TLB_CPU_HALTED);
19214191Sjosephb 			tlb_info = found;
19224191Sjosephb 			SMT_PAUSE();
19234191Sjosephb 		}
19244191Sjosephb 		if (tlb_info & TLB_INVAL_ALL)
19254191Sjosephb 			flush_all_tlb_entries();
19264191Sjosephb 	}
19274191Sjosephb 
19284191Sjosephb 	/*
19294191Sjosephb 	 * Restore interrupt enable control bit.
19304191Sjosephb 	 */
19314191Sjosephb 	if (flags & PS_IE)
19324191Sjosephb 		sti();
19334191Sjosephb }
19345084Sjohnlev #endif /* !__xpv */
19354191Sjosephb 
19364191Sjosephb /*
19370Sstevel@tonic-gate  * Internal routine to do cross calls to invalidate a range of pages on
19380Sstevel@tonic-gate  * all CPUs using a given hat.
19390Sstevel@tonic-gate  */
19400Sstevel@tonic-gate void
19413446Smrj hat_tlb_inval(hat_t *hat, uintptr_t va)
19420Sstevel@tonic-gate {
19430Sstevel@tonic-gate 	extern int	flushes_require_xcalls;	/* from mp_startup.c */
19440Sstevel@tonic-gate 	cpuset_t	justme;
19455084Sjohnlev 	cpuset_t	cpus_to_shootdown;
19465084Sjohnlev #ifndef __xpv
19474191Sjosephb 	cpuset_t	check_cpus;
19484191Sjosephb 	cpu_t		*cpup;
19494191Sjosephb 	int		c;
19505084Sjohnlev #endif
19510Sstevel@tonic-gate 
19520Sstevel@tonic-gate 	/*
19530Sstevel@tonic-gate 	 * If the hat is being destroyed, there are no more users, so
19540Sstevel@tonic-gate 	 * demap need not do anything.
19550Sstevel@tonic-gate 	 */
19560Sstevel@tonic-gate 	if (hat->hat_flags & HAT_FREEING)
19570Sstevel@tonic-gate 		return;
19580Sstevel@tonic-gate 
19590Sstevel@tonic-gate 	/*
19600Sstevel@tonic-gate 	 * If demapping from a shared pagetable, we best demap the
19610Sstevel@tonic-gate 	 * entire set of user TLBs, since we don't know what addresses
19620Sstevel@tonic-gate 	 * these were shared at.
19630Sstevel@tonic-gate 	 */
19640Sstevel@tonic-gate 	if (hat->hat_flags & HAT_SHARED) {
19650Sstevel@tonic-gate 		hat = kas.a_hat;
19660Sstevel@tonic-gate 		va = DEMAP_ALL_ADDR;
19670Sstevel@tonic-gate 	}
19680Sstevel@tonic-gate 
19690Sstevel@tonic-gate 	/*
19700Sstevel@tonic-gate 	 * if not running with multiple CPUs, don't use cross calls
19710Sstevel@tonic-gate 	 */
19720Sstevel@tonic-gate 	if (panicstr || !flushes_require_xcalls) {
19735084Sjohnlev #ifdef __xpv
19745084Sjohnlev 		if (va == DEMAP_ALL_ADDR)
19755084Sjohnlev 			xen_flush_tlb();
19765084Sjohnlev 		else
19775084Sjohnlev 			xen_flush_va((caddr_t)va);
19785084Sjohnlev #else
19790Sstevel@tonic-gate 		(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
19805084Sjohnlev #endif
19810Sstevel@tonic-gate 		return;
19820Sstevel@tonic-gate 	}
19830Sstevel@tonic-gate 
19840Sstevel@tonic-gate 
19850Sstevel@tonic-gate 	/*
19863446Smrj 	 * Determine CPUs to shootdown. Kernel changes always do all CPUs.
19873446Smrj 	 * Otherwise it's just CPUs currently executing in this hat.
19880Sstevel@tonic-gate 	 */
19890Sstevel@tonic-gate 	kpreempt_disable();
19900Sstevel@tonic-gate 	CPUSET_ONLY(justme, CPU->cpu_id);
19913446Smrj 	if (hat == kas.a_hat)
19923446Smrj 		cpus_to_shootdown = khat_cpuset;
19930Sstevel@tonic-gate 	else
19943446Smrj 		cpus_to_shootdown = hat->hat_cpus;
19953446Smrj 
19965084Sjohnlev #ifndef __xpv
19974191Sjosephb 	/*
19984191Sjosephb 	 * If any CPUs in the set are idle, just request a delayed flush
19994191Sjosephb 	 * and avoid waking them up.
20004191Sjosephb 	 */
20014191Sjosephb 	check_cpus = cpus_to_shootdown;
20024191Sjosephb 	for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) {
20034191Sjosephb 		ulong_t tlb_info;
20044191Sjosephb 
20054191Sjosephb 		if (!CPU_IN_SET(check_cpus, c))
20064191Sjosephb 			continue;
20074191Sjosephb 		CPUSET_DEL(check_cpus, c);
20084191Sjosephb 		cpup = cpu[c];
20094191Sjosephb 		if (cpup == NULL)
20104191Sjosephb 			continue;
20114191Sjosephb 
20124191Sjosephb 		tlb_info = cpup->cpu_m.mcpu_tlb_info;
20134191Sjosephb 		while (tlb_info == TLB_CPU_HALTED) {
20144191Sjosephb 			(void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED,
20154381Sjosephb 			    TLB_CPU_HALTED | TLB_INVAL_ALL);
20164191Sjosephb 			SMT_PAUSE();
20174191Sjosephb 			tlb_info = cpup->cpu_m.mcpu_tlb_info;
20184191Sjosephb 		}
20194191Sjosephb 		if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) {
20204191Sjosephb 			HATSTAT_INC(hs_tlb_inval_delayed);
20214191Sjosephb 			CPUSET_DEL(cpus_to_shootdown, c);
20224191Sjosephb 		}
20234191Sjosephb 	}
20245084Sjohnlev #endif
20254191Sjosephb 
20263446Smrj 	if (CPUSET_ISNULL(cpus_to_shootdown) ||
20273446Smrj 	    CPUSET_ISEQUAL(cpus_to_shootdown, justme)) {
20283446Smrj 
20295084Sjohnlev #ifdef __xpv
20305084Sjohnlev 		if (va == DEMAP_ALL_ADDR)
20315084Sjohnlev 			xen_flush_tlb();
20325084Sjohnlev 		else
20335084Sjohnlev 			xen_flush_va((caddr_t)va);
20345084Sjohnlev #else
20353446Smrj 		(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
20365084Sjohnlev #endif
20373446Smrj 
20383446Smrj 	} else {
20393446Smrj 
20403446Smrj 		CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
20415084Sjohnlev #ifdef __xpv
20425084Sjohnlev 		if (va == DEMAP_ALL_ADDR)
20435084Sjohnlev 			xen_gflush_tlb(cpus_to_shootdown);
20445084Sjohnlev 		else
20455084Sjohnlev 			xen_gflush_va((caddr_t)va, cpus_to_shootdown);
20465084Sjohnlev #else
20473446Smrj 		xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL, X_CALL_HIPRI,
20483446Smrj 		    cpus_to_shootdown, hati_demap_func);
20495084Sjohnlev #endif
20503446Smrj 
20513446Smrj 	}
20520Sstevel@tonic-gate 	kpreempt_enable();
20530Sstevel@tonic-gate }
20540Sstevel@tonic-gate 
20550Sstevel@tonic-gate /*
20560Sstevel@tonic-gate  * Interior routine for HAT_UNLOADs from hat_unload_callback(),
20570Sstevel@tonic-gate  * hat_kmap_unload() OR from hat_steal() code.  This routine doesn't
20580Sstevel@tonic-gate  * handle releasing of the htables.
20590Sstevel@tonic-gate  */
20600Sstevel@tonic-gate void
20610Sstevel@tonic-gate hat_pte_unmap(
20620Sstevel@tonic-gate 	htable_t	*ht,
20630Sstevel@tonic-gate 	uint_t		entry,
20640Sstevel@tonic-gate 	uint_t		flags,
20650Sstevel@tonic-gate 	x86pte_t	old_pte,
20660Sstevel@tonic-gate 	void		*pte_ptr)
20670Sstevel@tonic-gate {
20680Sstevel@tonic-gate 	hat_t		*hat = ht->ht_hat;
20690Sstevel@tonic-gate 	hment_t		*hm = NULL;
20700Sstevel@tonic-gate 	page_t		*pp = NULL;
20710Sstevel@tonic-gate 	level_t		l = ht->ht_level;
20720Sstevel@tonic-gate 	pfn_t		pfn;
20730Sstevel@tonic-gate 
20740Sstevel@tonic-gate 	/*
20750Sstevel@tonic-gate 	 * We always track the locking counts, even if nothing is unmapped
20760Sstevel@tonic-gate 	 */
20770Sstevel@tonic-gate 	if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
20780Sstevel@tonic-gate 		ASSERT(ht->ht_lock_cnt > 0);
20790Sstevel@tonic-gate 		HTABLE_LOCK_DEC(ht);
20800Sstevel@tonic-gate 	}
20810Sstevel@tonic-gate 
20820Sstevel@tonic-gate 	/*
20830Sstevel@tonic-gate 	 * Figure out which page's mapping list lock to acquire using the PFN
20840Sstevel@tonic-gate 	 * passed in "old" PTE. We then attempt to invalidate the PTE.
20850Sstevel@tonic-gate 	 * If another thread, probably a hat_pageunload, has asynchronously
20860Sstevel@tonic-gate 	 * unmapped/remapped this address we'll loop here.
20870Sstevel@tonic-gate 	 */
20880Sstevel@tonic-gate 	ASSERT(ht->ht_busy > 0);
20890Sstevel@tonic-gate 	while (PTE_ISVALID(old_pte)) {
20900Sstevel@tonic-gate 		pfn = PTE2PFN(old_pte, l);
20913446Smrj 		if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) {
20920Sstevel@tonic-gate 			pp = NULL;
20930Sstevel@tonic-gate 		} else {
20945084Sjohnlev #ifdef __xpv
20955084Sjohnlev 			if (pfn == PFN_INVALID)
20965084Sjohnlev 				panic("Invalid PFN, but not PT_NOCONSIST");
20975084Sjohnlev #endif
20980Sstevel@tonic-gate 			pp = page_numtopp_nolock(pfn);
209947Sjosephb 			if (pp == NULL) {
210047Sjosephb 				panic("no page_t, not NOCONSIST: old_pte="
210147Sjosephb 				    FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
210247Sjosephb 				    old_pte, (uintptr_t)ht, entry,
210347Sjosephb 				    (uintptr_t)pte_ptr);
210447Sjosephb 			}
21050Sstevel@tonic-gate 			x86_hm_enter(pp);
21060Sstevel@tonic-gate 		}
210747Sjosephb 
210847Sjosephb 		/*
210947Sjosephb 		 * If freeing the address space, check that the PTE
211047Sjosephb 		 * hasn't changed, as the mappings are no longer in use by
211147Sjosephb 		 * any thread, invalidation is unnecessary.
211247Sjosephb 		 * If not freeing, do a full invalidate.
21135084Sjohnlev 		 *
21145084Sjohnlev 		 * On the hypervisor we must always remove mappings, as a
21155084Sjohnlev 		 * writable mapping left behind could cause a page table
21165084Sjohnlev 		 * allocation to fail.
211747Sjosephb 		 */
21185084Sjohnlev #if !defined(__xpv)
211947Sjosephb 		if (hat->hat_flags & HAT_FREEING)
212047Sjosephb 			old_pte = x86pte_get(ht, entry);
212147Sjosephb 		else
21225084Sjohnlev #endif
21233446Smrj 			old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr);
21240Sstevel@tonic-gate 
21250Sstevel@tonic-gate 		/*
21260Sstevel@tonic-gate 		 * If the page hadn't changed we've unmapped it and can proceed
21270Sstevel@tonic-gate 		 */
21280Sstevel@tonic-gate 		if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
21290Sstevel@tonic-gate 			break;
21300Sstevel@tonic-gate 
21310Sstevel@tonic-gate 		/*
21320Sstevel@tonic-gate 		 * Otherwise, we'll have to retry with the current old_pte.
21330Sstevel@tonic-gate 		 * Drop the hment lock, since the pfn may have changed.
21340Sstevel@tonic-gate 		 */
21350Sstevel@tonic-gate 		if (pp != NULL) {
21360Sstevel@tonic-gate 			x86_hm_exit(pp);
21370Sstevel@tonic-gate 			pp = NULL;
21380Sstevel@tonic-gate 		} else {
21393446Smrj 			ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
21400Sstevel@tonic-gate 		}
21410Sstevel@tonic-gate 	}
21420Sstevel@tonic-gate 
21430Sstevel@tonic-gate 	/*
21440Sstevel@tonic-gate 	 * If the old mapping wasn't valid, there's nothing more to do
21450Sstevel@tonic-gate 	 */
21460Sstevel@tonic-gate 	if (!PTE_ISVALID(old_pte)) {
21470Sstevel@tonic-gate 		if (pp != NULL)
21480Sstevel@tonic-gate 			x86_hm_exit(pp);
21490Sstevel@tonic-gate 		return;
21500Sstevel@tonic-gate 	}
21510Sstevel@tonic-gate 
21520Sstevel@tonic-gate 	/*
21530Sstevel@tonic-gate 	 * Take care of syncing any MOD/REF bits and removing the hment.
21540Sstevel@tonic-gate 	 */
21550Sstevel@tonic-gate 	if (pp != NULL) {
21560Sstevel@tonic-gate 		if (!(flags & HAT_UNLOAD_NOSYNC))
21570Sstevel@tonic-gate 			hati_sync_pte_to_page(pp, old_pte, l);
21580Sstevel@tonic-gate 		hm = hment_remove(pp, ht, entry);
21590Sstevel@tonic-gate 		x86_hm_exit(pp);
21600Sstevel@tonic-gate 		if (hm != NULL)
21610Sstevel@tonic-gate 			hment_free(hm);
21620Sstevel@tonic-gate 	}
21630Sstevel@tonic-gate 
21640Sstevel@tonic-gate 	/*
21650Sstevel@tonic-gate 	 * Handle book keeping in the htable and hat
21660Sstevel@tonic-gate 	 */
21670Sstevel@tonic-gate 	ASSERT(ht->ht_valid_cnt > 0);
21680Sstevel@tonic-gate 	HTABLE_DEC(ht->ht_valid_cnt);
21690Sstevel@tonic-gate 	PGCNT_DEC(hat, l);
21700Sstevel@tonic-gate }
21710Sstevel@tonic-gate 
21720Sstevel@tonic-gate /*
21730Sstevel@tonic-gate  * very cheap unload implementation to special case some kernel addresses
21740Sstevel@tonic-gate  */
21750Sstevel@tonic-gate static void
21760Sstevel@tonic-gate hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
21770Sstevel@tonic-gate {
21780Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
21790Sstevel@tonic-gate 	uintptr_t	eva = va + len;
21803446Smrj 	pgcnt_t		pg_index;
21810Sstevel@tonic-gate 	htable_t	*ht;
21820Sstevel@tonic-gate 	uint_t		entry;
21833446Smrj 	x86pte_t	*pte_ptr;
21840Sstevel@tonic-gate 	x86pte_t	old_pte;
21850Sstevel@tonic-gate 
21860Sstevel@tonic-gate 	for (; va < eva; va += MMU_PAGESIZE) {
21870Sstevel@tonic-gate 		/*
21880Sstevel@tonic-gate 		 * Get the PTE
21890Sstevel@tonic-gate 		 */
21903446Smrj 		pg_index = mmu_btop(va - mmu.kmap_addr);
21913446Smrj 		pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index);
21923446Smrj 		old_pte = GET_PTE(pte_ptr);
21930Sstevel@tonic-gate 
21940Sstevel@tonic-gate 		/*
21950Sstevel@tonic-gate 		 * get the htable / entry
21960Sstevel@tonic-gate 		 */
21970Sstevel@tonic-gate 		ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
21980Sstevel@tonic-gate 		    >> LEVEL_SHIFT(1)];
21990Sstevel@tonic-gate 		entry = htable_va2entry(va, ht);
22000Sstevel@tonic-gate 
22010Sstevel@tonic-gate 		/*
22020Sstevel@tonic-gate 		 * use mostly common code to unmap it.
22030Sstevel@tonic-gate 		 */
22040Sstevel@tonic-gate 		hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr);
22050Sstevel@tonic-gate 	}
22060Sstevel@tonic-gate }
22070Sstevel@tonic-gate 
22080Sstevel@tonic-gate 
22090Sstevel@tonic-gate /*
22100Sstevel@tonic-gate  * unload a range of virtual address space (no callback)
22110Sstevel@tonic-gate  */
22120Sstevel@tonic-gate void
22130Sstevel@tonic-gate hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
22140Sstevel@tonic-gate {
22150Sstevel@tonic-gate 	uintptr_t va = (uintptr_t)addr;
22163446Smrj 
22175084Sjohnlev 	XPV_DISALLOW_MIGRATE();
22183446Smrj 	ASSERT(hat == kas.a_hat || va + len <= _userlimit);
22190Sstevel@tonic-gate 
22200Sstevel@tonic-gate 	/*
22210Sstevel@tonic-gate 	 * special case for performance.
22220Sstevel@tonic-gate 	 */
22230Sstevel@tonic-gate 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
22240Sstevel@tonic-gate 		ASSERT(hat == kas.a_hat);
22250Sstevel@tonic-gate 		hat_kmap_unload(addr, len, flags);
22263446Smrj 	} else {
22273446Smrj 		hat_unload_callback(hat, addr, len, flags, NULL);
22280Sstevel@tonic-gate 	}
22295084Sjohnlev 	XPV_ALLOW_MIGRATE();
22300Sstevel@tonic-gate }
22310Sstevel@tonic-gate 
22320Sstevel@tonic-gate /*
22330Sstevel@tonic-gate  * Do the callbacks for ranges being unloaded.
22340Sstevel@tonic-gate  */
22350Sstevel@tonic-gate typedef struct range_info {
22360Sstevel@tonic-gate 	uintptr_t	rng_va;
22370Sstevel@tonic-gate 	ulong_t		rng_cnt;
22380Sstevel@tonic-gate 	level_t		rng_level;
22390Sstevel@tonic-gate } range_info_t;
22400Sstevel@tonic-gate 
22410Sstevel@tonic-gate static void
22420Sstevel@tonic-gate handle_ranges(hat_callback_t *cb, uint_t cnt, range_info_t *range)
22430Sstevel@tonic-gate {
22440Sstevel@tonic-gate 	/*
22450Sstevel@tonic-gate 	 * do callbacks to upper level VM system
22460Sstevel@tonic-gate 	 */
22470Sstevel@tonic-gate 	while (cb != NULL && cnt > 0) {
22480Sstevel@tonic-gate 		--cnt;
22490Sstevel@tonic-gate 		cb->hcb_start_addr = (caddr_t)range[cnt].rng_va;
22500Sstevel@tonic-gate 		cb->hcb_end_addr = cb->hcb_start_addr;
22510Sstevel@tonic-gate 		cb->hcb_end_addr +=
22520Sstevel@tonic-gate 		    range[cnt].rng_cnt << LEVEL_SIZE(range[cnt].rng_level);
22530Sstevel@tonic-gate 		cb->hcb_function(cb);
22540Sstevel@tonic-gate 	}
22550Sstevel@tonic-gate }
22560Sstevel@tonic-gate 
22570Sstevel@tonic-gate /*
22580Sstevel@tonic-gate  * Unload a given range of addresses (has optional callback)
22590Sstevel@tonic-gate  *
22600Sstevel@tonic-gate  * Flags:
22610Sstevel@tonic-gate  * define	HAT_UNLOAD		0x00
22620Sstevel@tonic-gate  * define	HAT_UNLOAD_NOSYNC	0x02
22630Sstevel@tonic-gate  * define	HAT_UNLOAD_UNLOCK	0x04
22640Sstevel@tonic-gate  * define	HAT_UNLOAD_OTHER	0x08 - not used
22650Sstevel@tonic-gate  * define	HAT_UNLOAD_UNMAP	0x10 - same as HAT_UNLOAD
22660Sstevel@tonic-gate  */
22670Sstevel@tonic-gate #define	MAX_UNLOAD_CNT (8)
22680Sstevel@tonic-gate void
22690Sstevel@tonic-gate hat_unload_callback(
22700Sstevel@tonic-gate 	hat_t		*hat,
22710Sstevel@tonic-gate 	caddr_t		addr,
22720Sstevel@tonic-gate 	size_t		len,
22730Sstevel@tonic-gate 	uint_t		flags,
22740Sstevel@tonic-gate 	hat_callback_t	*cb)
22750Sstevel@tonic-gate {
22760Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
22770Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
22780Sstevel@tonic-gate 	htable_t	*ht = NULL;
22790Sstevel@tonic-gate 	uint_t		entry;
228047Sjosephb 	uintptr_t	contig_va = (uintptr_t)-1L;
22810Sstevel@tonic-gate 	range_info_t	r[MAX_UNLOAD_CNT];
22820Sstevel@tonic-gate 	uint_t		r_cnt = 0;
22830Sstevel@tonic-gate 	x86pte_t	old_pte;
22840Sstevel@tonic-gate 
22855084Sjohnlev 	XPV_DISALLOW_MIGRATE();
22863446Smrj 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
22870Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
22880Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
22890Sstevel@tonic-gate 
22903446Smrj 	/*
22913446Smrj 	 * Special case a single page being unloaded for speed. This happens
22923446Smrj 	 * quite frequently, COW faults after a fork() for example.
22933446Smrj 	 */
22943446Smrj 	if (cb == NULL && len == MMU_PAGESIZE) {
22953446Smrj 		ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
22963446Smrj 		if (ht != NULL) {
22973446Smrj 			if (PTE_ISVALID(old_pte))
22983446Smrj 				hat_pte_unmap(ht, entry, flags, old_pte, NULL);
22993446Smrj 			htable_release(ht);
23003446Smrj 		}
23015084Sjohnlev 		XPV_ALLOW_MIGRATE();
23023446Smrj 		return;
23033446Smrj 	}
23043446Smrj 
23050Sstevel@tonic-gate 	while (vaddr < eaddr) {
23060Sstevel@tonic-gate 		old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
23070Sstevel@tonic-gate 		if (ht == NULL)
23080Sstevel@tonic-gate 			break;
23090Sstevel@tonic-gate 
23100Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(vaddr));
23110Sstevel@tonic-gate 
23120Sstevel@tonic-gate 		if (vaddr < (uintptr_t)addr)
23130Sstevel@tonic-gate 			panic("hat_unload_callback(): unmap inside large page");
23140Sstevel@tonic-gate 
23150Sstevel@tonic-gate 		/*
23160Sstevel@tonic-gate 		 * We'll do the call backs for contiguous ranges
23170Sstevel@tonic-gate 		 */
231847Sjosephb 		if (vaddr != contig_va ||
23190Sstevel@tonic-gate 		    (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) {
23200Sstevel@tonic-gate 			if (r_cnt == MAX_UNLOAD_CNT) {
23210Sstevel@tonic-gate 				handle_ranges(cb, r_cnt, r);
23220Sstevel@tonic-gate 				r_cnt = 0;
23230Sstevel@tonic-gate 			}
23240Sstevel@tonic-gate 			r[r_cnt].rng_va = vaddr;
23250Sstevel@tonic-gate 			r[r_cnt].rng_cnt = 0;
23260Sstevel@tonic-gate 			r[r_cnt].rng_level = ht->ht_level;
23270Sstevel@tonic-gate 			++r_cnt;
23280Sstevel@tonic-gate 		}
23290Sstevel@tonic-gate 
23300Sstevel@tonic-gate 		/*
23310Sstevel@tonic-gate 		 * Unload one mapping from the page tables.
23320Sstevel@tonic-gate 		 */
23330Sstevel@tonic-gate 		entry = htable_va2entry(vaddr, ht);
23340Sstevel@tonic-gate 		hat_pte_unmap(ht, entry, flags, old_pte, NULL);
23350Sstevel@tonic-gate 		ASSERT(ht->ht_level <= mmu.max_page_level);
23360Sstevel@tonic-gate 		vaddr += LEVEL_SIZE(ht->ht_level);
233747Sjosephb 		contig_va = vaddr;
23380Sstevel@tonic-gate 		++r[r_cnt - 1].rng_cnt;
23390Sstevel@tonic-gate 	}
23400Sstevel@tonic-gate 	if (ht)
23410Sstevel@tonic-gate 		htable_release(ht);
23420Sstevel@tonic-gate 
23430Sstevel@tonic-gate 	/*
23440Sstevel@tonic-gate 	 * handle last range for callbacks
23450Sstevel@tonic-gate 	 */
23460Sstevel@tonic-gate 	if (r_cnt > 0)
23470Sstevel@tonic-gate 		handle_ranges(cb, r_cnt, r);
23485084Sjohnlev 	XPV_ALLOW_MIGRATE();
23490Sstevel@tonic-gate }
23500Sstevel@tonic-gate 
23510Sstevel@tonic-gate /*
23520Sstevel@tonic-gate  * synchronize mapping with software data structures
23530Sstevel@tonic-gate  *
23540Sstevel@tonic-gate  * This interface is currently only used by the working set monitor
23550Sstevel@tonic-gate  * driver.
23560Sstevel@tonic-gate  */
23570Sstevel@tonic-gate /*ARGSUSED*/
23580Sstevel@tonic-gate void
23590Sstevel@tonic-gate hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
23600Sstevel@tonic-gate {
23610Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
23620Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
23630Sstevel@tonic-gate 	htable_t	*ht = NULL;
23640Sstevel@tonic-gate 	uint_t		entry;
23650Sstevel@tonic-gate 	x86pte_t	pte;
23660Sstevel@tonic-gate 	x86pte_t	save_pte;
23670Sstevel@tonic-gate 	x86pte_t	new;
23680Sstevel@tonic-gate 	page_t		*pp;
23690Sstevel@tonic-gate 
23700Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(vaddr));
23710Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
23720Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
23733446Smrj 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
23740Sstevel@tonic-gate 
23755084Sjohnlev 	XPV_DISALLOW_MIGRATE();
23760Sstevel@tonic-gate 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
23770Sstevel@tonic-gate try_again:
23780Sstevel@tonic-gate 		pte = htable_walk(hat, &ht, &vaddr, eaddr);
23790Sstevel@tonic-gate 		if (ht == NULL)
23800Sstevel@tonic-gate 			break;
23810Sstevel@tonic-gate 		entry = htable_va2entry(vaddr, ht);
23820Sstevel@tonic-gate 
23833446Smrj 		if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
23840Sstevel@tonic-gate 		    PTE_GET(pte, PT_REF | PT_MOD) == 0)
23850Sstevel@tonic-gate 			continue;
23860Sstevel@tonic-gate 
23870Sstevel@tonic-gate 		/*
23880Sstevel@tonic-gate 		 * We need to acquire the mapping list lock to protect
23890Sstevel@tonic-gate 		 * against hat_pageunload(), hat_unload(), etc.
23900Sstevel@tonic-gate 		 */
23910Sstevel@tonic-gate 		pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
23920Sstevel@tonic-gate 		if (pp == NULL)
23930Sstevel@tonic-gate 			break;
23940Sstevel@tonic-gate 		x86_hm_enter(pp);
23950Sstevel@tonic-gate 		save_pte = pte;
23960Sstevel@tonic-gate 		pte = x86pte_get(ht, entry);
23970Sstevel@tonic-gate 		if (pte != save_pte) {
23980Sstevel@tonic-gate 			x86_hm_exit(pp);
23990Sstevel@tonic-gate 			goto try_again;
24000Sstevel@tonic-gate 		}
24013446Smrj 		if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
24020Sstevel@tonic-gate 		    PTE_GET(pte, PT_REF | PT_MOD) == 0) {
24030Sstevel@tonic-gate 			x86_hm_exit(pp);
24040Sstevel@tonic-gate 			continue;
24050Sstevel@tonic-gate 		}
24060Sstevel@tonic-gate 
24070Sstevel@tonic-gate 		/*
24080Sstevel@tonic-gate 		 * Need to clear ref or mod bits. We may compete with
24090Sstevel@tonic-gate 		 * hardware updating the R/M bits and have to try again.
24100Sstevel@tonic-gate 		 */
24110Sstevel@tonic-gate 		if (flags == HAT_SYNC_ZERORM) {
24120Sstevel@tonic-gate 			new = pte;
24130Sstevel@tonic-gate 			PTE_CLR(new, PT_REF | PT_MOD);
24140Sstevel@tonic-gate 			pte = hati_update_pte(ht, entry, pte, new);
24150Sstevel@tonic-gate 			if (pte != 0) {
24160Sstevel@tonic-gate 				x86_hm_exit(pp);
24170Sstevel@tonic-gate 				goto try_again;
24180Sstevel@tonic-gate 			}
24190Sstevel@tonic-gate 		} else {
24200Sstevel@tonic-gate 			/*
24210Sstevel@tonic-gate 			 * sync the PTE to the page_t
24220Sstevel@tonic-gate 			 */
24230Sstevel@tonic-gate 			hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
24240Sstevel@tonic-gate 		}
24250Sstevel@tonic-gate 		x86_hm_exit(pp);
24260Sstevel@tonic-gate 	}
24270Sstevel@tonic-gate 	if (ht)
24280Sstevel@tonic-gate 		htable_release(ht);
24295084Sjohnlev 	XPV_ALLOW_MIGRATE();
24300Sstevel@tonic-gate }
24310Sstevel@tonic-gate 
24320Sstevel@tonic-gate /*
24330Sstevel@tonic-gate  * void	hat_map(hat, addr, len, flags)
24340Sstevel@tonic-gate  */
24350Sstevel@tonic-gate /*ARGSUSED*/
24360Sstevel@tonic-gate void
24370Sstevel@tonic-gate hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
24380Sstevel@tonic-gate {
24390Sstevel@tonic-gate 	/* does nothing */
24400Sstevel@tonic-gate }
24410Sstevel@tonic-gate 
24420Sstevel@tonic-gate /*
24430Sstevel@tonic-gate  * uint_t hat_getattr(hat, addr, *attr)
24440Sstevel@tonic-gate  *	returns attr for <hat,addr> in *attr.  returns 0 if there was a
24450Sstevel@tonic-gate  *	mapping and *attr is valid, nonzero if there was no mapping and
24460Sstevel@tonic-gate  *	*attr is not valid.
24470Sstevel@tonic-gate  */
24480Sstevel@tonic-gate uint_t
24490Sstevel@tonic-gate hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
24500Sstevel@tonic-gate {
24510Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
24520Sstevel@tonic-gate 	htable_t	*ht = NULL;
24530Sstevel@tonic-gate 	x86pte_t	pte;
24540Sstevel@tonic-gate 
24553446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
24560Sstevel@tonic-gate 
24570Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
24580Sstevel@tonic-gate 		return ((uint_t)-1);
24590Sstevel@tonic-gate 
24603446Smrj 	ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
24610Sstevel@tonic-gate 	if (ht == NULL)
24620Sstevel@tonic-gate 		return ((uint_t)-1);
24630Sstevel@tonic-gate 
24640Sstevel@tonic-gate 	if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
24650Sstevel@tonic-gate 		htable_release(ht);
24660Sstevel@tonic-gate 		return ((uint_t)-1);
24670Sstevel@tonic-gate 	}
24680Sstevel@tonic-gate 
24690Sstevel@tonic-gate 	*attr = PROT_READ;
24700Sstevel@tonic-gate 	if (PTE_GET(pte, PT_WRITABLE))
24710Sstevel@tonic-gate 		*attr |= PROT_WRITE;
24720Sstevel@tonic-gate 	if (PTE_GET(pte, PT_USER))
24730Sstevel@tonic-gate 		*attr |= PROT_USER;
24740Sstevel@tonic-gate 	if (!PTE_GET(pte, mmu.pt_nx))
24750Sstevel@tonic-gate 		*attr |= PROT_EXEC;
24763446Smrj 	if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
24770Sstevel@tonic-gate 		*attr |= HAT_NOSYNC;
24780Sstevel@tonic-gate 	htable_release(ht);
24790Sstevel@tonic-gate 	return (0);
24800Sstevel@tonic-gate }
24810Sstevel@tonic-gate 
24820Sstevel@tonic-gate /*
24830Sstevel@tonic-gate  * hat_updateattr() applies the given attribute change to an existing mapping
24840Sstevel@tonic-gate  */
24850Sstevel@tonic-gate #define	HAT_LOAD_ATTR		1
24860Sstevel@tonic-gate #define	HAT_SET_ATTR		2
24870Sstevel@tonic-gate #define	HAT_CLR_ATTR		3
24880Sstevel@tonic-gate 
24890Sstevel@tonic-gate static void
24900Sstevel@tonic-gate hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
24910Sstevel@tonic-gate {
24920Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
24930Sstevel@tonic-gate 	uintptr_t	eaddr = (uintptr_t)addr + len;
24940Sstevel@tonic-gate 	htable_t	*ht = NULL;
24950Sstevel@tonic-gate 	uint_t		entry;
24960Sstevel@tonic-gate 	x86pte_t	oldpte, newpte;
24970Sstevel@tonic-gate 	page_t		*pp;
24980Sstevel@tonic-gate 
24995084Sjohnlev 	XPV_DISALLOW_MIGRATE();
25000Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
25010Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
25020Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
25030Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
25040Sstevel@tonic-gate 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
25050Sstevel@tonic-gate try_again:
25060Sstevel@tonic-gate 		oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
25070Sstevel@tonic-gate 		if (ht == NULL)
25080Sstevel@tonic-gate 			break;
25093446Smrj 		if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
25100Sstevel@tonic-gate 			continue;
25110Sstevel@tonic-gate 
25120Sstevel@tonic-gate 		pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
25130Sstevel@tonic-gate 		if (pp == NULL)
25140Sstevel@tonic-gate 			continue;
25150Sstevel@tonic-gate 		x86_hm_enter(pp);
25160Sstevel@tonic-gate 
25170Sstevel@tonic-gate 		newpte = oldpte;
25180Sstevel@tonic-gate 		/*
25190Sstevel@tonic-gate 		 * We found a page table entry in the desired range,
25200Sstevel@tonic-gate 		 * figure out the new attributes.
25210Sstevel@tonic-gate 		 */
25220Sstevel@tonic-gate 		if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
25230Sstevel@tonic-gate 			if ((attr & PROT_WRITE) &&
25240Sstevel@tonic-gate 			    !PTE_GET(oldpte, PT_WRITABLE))
25250Sstevel@tonic-gate 				newpte |= PT_WRITABLE;
25260Sstevel@tonic-gate 
25273446Smrj 			if ((attr & HAT_NOSYNC) &&
25283446Smrj 			    PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC)
25290Sstevel@tonic-gate 				newpte |= PT_NOSYNC;
25300Sstevel@tonic-gate 
25310Sstevel@tonic-gate 			if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
25320Sstevel@tonic-gate 				newpte &= ~mmu.pt_nx;
25330Sstevel@tonic-gate 		}
25340Sstevel@tonic-gate 
25350Sstevel@tonic-gate 		if (what == HAT_LOAD_ATTR) {
25360Sstevel@tonic-gate 			if (!(attr & PROT_WRITE) &&
25370Sstevel@tonic-gate 			    PTE_GET(oldpte, PT_WRITABLE))
25380Sstevel@tonic-gate 				newpte &= ~PT_WRITABLE;
25390Sstevel@tonic-gate 
25403446Smrj 			if (!(attr & HAT_NOSYNC) &&
25413446Smrj 			    PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
25423446Smrj 				newpte &= ~PT_SOFTWARE;
25430Sstevel@tonic-gate 
25440Sstevel@tonic-gate 			if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
25450Sstevel@tonic-gate 				newpte |= mmu.pt_nx;
25460Sstevel@tonic-gate 		}
25470Sstevel@tonic-gate 
25480Sstevel@tonic-gate 		if (what == HAT_CLR_ATTR) {
25490Sstevel@tonic-gate 			if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
25500Sstevel@tonic-gate 				newpte &= ~PT_WRITABLE;
25510Sstevel@tonic-gate 
25523446Smrj 			if ((attr & HAT_NOSYNC) &&
25533446Smrj 			    PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
25543446Smrj 				newpte &= ~PT_SOFTWARE;
25550Sstevel@tonic-gate 
25560Sstevel@tonic-gate 			if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
25570Sstevel@tonic-gate 				newpte |= mmu.pt_nx;
25580Sstevel@tonic-gate 		}
25590Sstevel@tonic-gate 
25600Sstevel@tonic-gate 		/*
25613446Smrj 		 * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set.
25623446Smrj 		 * x86pte_set() depends on this.
25633446Smrj 		 */
25643446Smrj 		if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC)
25653446Smrj 			newpte |= PT_REF | PT_MOD;
25663446Smrj 
25673446Smrj 		/*
25680Sstevel@tonic-gate 		 * what about PROT_READ or others? this code only handles:
25690Sstevel@tonic-gate 		 * EXEC, WRITE, NOSYNC
25700Sstevel@tonic-gate 		 */
25710Sstevel@tonic-gate 
25720Sstevel@tonic-gate 		/*
25730Sstevel@tonic-gate 		 * If new PTE really changed, update the table.
25740Sstevel@tonic-gate 		 */
25750Sstevel@tonic-gate 		if (newpte != oldpte) {
25760Sstevel@tonic-gate 			entry = htable_va2entry(vaddr, ht);
25770Sstevel@tonic-gate 			oldpte = hati_update_pte(ht, entry, oldpte, newpte);
25780Sstevel@tonic-gate 			if (oldpte != 0) {
25790Sstevel@tonic-gate 				x86_hm_exit(pp);
25800Sstevel@tonic-gate 				goto try_again;
25810Sstevel@tonic-gate 			}
25820Sstevel@tonic-gate 		}
25830Sstevel@tonic-gate 		x86_hm_exit(pp);
25840Sstevel@tonic-gate 	}
25850Sstevel@tonic-gate 	if (ht)
25860Sstevel@tonic-gate 		htable_release(ht);
25875084Sjohnlev 	XPV_ALLOW_MIGRATE();
25880Sstevel@tonic-gate }
25890Sstevel@tonic-gate 
25900Sstevel@tonic-gate /*
25910Sstevel@tonic-gate  * Various wrappers for hat_updateattr()
25920Sstevel@tonic-gate  */
25930Sstevel@tonic-gate void
25940Sstevel@tonic-gate hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
25950Sstevel@tonic-gate {
25963446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
25970Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
25980Sstevel@tonic-gate }
25990Sstevel@tonic-gate 
26000Sstevel@tonic-gate void
26010Sstevel@tonic-gate hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
26020Sstevel@tonic-gate {
26033446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
26040Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
26050Sstevel@tonic-gate }
26060Sstevel@tonic-gate 
26070Sstevel@tonic-gate void
26080Sstevel@tonic-gate hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
26090Sstevel@tonic-gate {
26103446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
26110Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
26120Sstevel@tonic-gate }
26130Sstevel@tonic-gate 
26140Sstevel@tonic-gate void
26150Sstevel@tonic-gate hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
26160Sstevel@tonic-gate {
26173446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
26180Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
26190Sstevel@tonic-gate }
26200Sstevel@tonic-gate 
26210Sstevel@tonic-gate /*
26220Sstevel@tonic-gate  * size_t hat_getpagesize(hat, addr)
26230Sstevel@tonic-gate  *	returns pagesize in bytes for <hat, addr>. returns -1 of there is
26240Sstevel@tonic-gate  *	no mapping. This is an advisory call.
26250Sstevel@tonic-gate  */
26260Sstevel@tonic-gate ssize_t
26270Sstevel@tonic-gate hat_getpagesize(hat_t *hat, caddr_t addr)
26280Sstevel@tonic-gate {
26290Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
26300Sstevel@tonic-gate 	htable_t	*ht;
26310Sstevel@tonic-gate 	size_t		pagesize;
26320Sstevel@tonic-gate 
26333446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
26340Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
26350Sstevel@tonic-gate 		return (-1);
26360Sstevel@tonic-gate 	ht = htable_getpage(hat, vaddr, NULL);
26370Sstevel@tonic-gate 	if (ht == NULL)
26380Sstevel@tonic-gate 		return (-1);
26390Sstevel@tonic-gate 	pagesize = LEVEL_SIZE(ht->ht_level);
26400Sstevel@tonic-gate 	htable_release(ht);
26410Sstevel@tonic-gate 	return (pagesize);
26420Sstevel@tonic-gate }
26430Sstevel@tonic-gate 
26440Sstevel@tonic-gate 
26450Sstevel@tonic-gate 
26460Sstevel@tonic-gate /*
26470Sstevel@tonic-gate  * pfn_t hat_getpfnum(hat, addr)
26480Sstevel@tonic-gate  *	returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
26490Sstevel@tonic-gate  */
26500Sstevel@tonic-gate pfn_t
26510Sstevel@tonic-gate hat_getpfnum(hat_t *hat, caddr_t addr)
26520Sstevel@tonic-gate {
26530Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
26540Sstevel@tonic-gate 	htable_t	*ht;
26550Sstevel@tonic-gate 	uint_t		entry;
26560Sstevel@tonic-gate 	pfn_t		pfn = PFN_INVALID;
26570Sstevel@tonic-gate 
26583446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
26590Sstevel@tonic-gate 	if (khat_running == 0)
26603446Smrj 		return (PFN_INVALID);
26610Sstevel@tonic-gate 
26620Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
26630Sstevel@tonic-gate 		return (PFN_INVALID);
26640Sstevel@tonic-gate 
26655084Sjohnlev 	XPV_DISALLOW_MIGRATE();
26660Sstevel@tonic-gate 	/*
26670Sstevel@tonic-gate 	 * A very common use of hat_getpfnum() is from the DDI for kernel pages.
26680Sstevel@tonic-gate 	 * Use the kmap_ptes (which also covers the 32 bit heap) to speed
26690Sstevel@tonic-gate 	 * this up.
26700Sstevel@tonic-gate 	 */
26710Sstevel@tonic-gate 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
26720Sstevel@tonic-gate 		x86pte_t pte;
26733446Smrj 		pgcnt_t pg_index;
26743446Smrj 
26753446Smrj 		pg_index = mmu_btop(vaddr - mmu.kmap_addr);
26763446Smrj 		pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index));
26775084Sjohnlev 		if (PTE_ISVALID(pte))
26785084Sjohnlev 			/*LINTED [use of constant 0 causes a lint warning] */
26795084Sjohnlev 			pfn = PTE2PFN(pte, 0);
26805084Sjohnlev 		XPV_ALLOW_MIGRATE();
26815084Sjohnlev 		return (pfn);
26820Sstevel@tonic-gate 	}
26830Sstevel@tonic-gate 
26840Sstevel@tonic-gate 	ht = htable_getpage(hat, vaddr, &entry);
26855084Sjohnlev 	if (ht == NULL) {
26865084Sjohnlev 		XPV_ALLOW_MIGRATE();
26870Sstevel@tonic-gate 		return (PFN_INVALID);
26885084Sjohnlev 	}
26890Sstevel@tonic-gate 	ASSERT(vaddr >= ht->ht_vaddr);
26900Sstevel@tonic-gate 	ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
26910Sstevel@tonic-gate 	pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
26920Sstevel@tonic-gate 	if (ht->ht_level > 0)
26930Sstevel@tonic-gate 		pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
26940Sstevel@tonic-gate 	htable_release(ht);
26955084Sjohnlev 	XPV_ALLOW_MIGRATE();
26960Sstevel@tonic-gate 	return (pfn);
26970Sstevel@tonic-gate }
26980Sstevel@tonic-gate 
26990Sstevel@tonic-gate /*
27000Sstevel@tonic-gate  * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged.
27010Sstevel@tonic-gate  * Use hat_getpfnum(kas.a_hat, ...) instead.
27020Sstevel@tonic-gate  *
27030Sstevel@tonic-gate  * We'd like to return PFN_INVALID if the mappings have underlying page_t's
27040Sstevel@tonic-gate  * but can't right now due to the fact that some software has grown to use
27050Sstevel@tonic-gate  * this interface incorrectly. So for now when the interface is misused,
27060Sstevel@tonic-gate  * return a warning to the user that in the future it won't work in the
27070Sstevel@tonic-gate  * way they're abusing it, and carry on.
27080Sstevel@tonic-gate  *
27090Sstevel@tonic-gate  * Note that hat_getkpfnum() is never supported on amd64.
27100Sstevel@tonic-gate  */
27110Sstevel@tonic-gate #if !defined(__amd64)
27120Sstevel@tonic-gate pfn_t
27130Sstevel@tonic-gate hat_getkpfnum(caddr_t addr)
27140Sstevel@tonic-gate {
27150Sstevel@tonic-gate 	pfn_t	pfn;
27160Sstevel@tonic-gate 	int badcaller = 0;
27170Sstevel@tonic-gate 
27180Sstevel@tonic-gate 	if (khat_running == 0)
27190Sstevel@tonic-gate 		panic("hat_getkpfnum(): called too early\n");
27200Sstevel@tonic-gate 	if ((uintptr_t)addr < kernelbase)
27210Sstevel@tonic-gate 		return (PFN_INVALID);
27220Sstevel@tonic-gate 
27235084Sjohnlev 	XPV_DISALLOW_MIGRATE();
27240Sstevel@tonic-gate 	if (segkpm && IS_KPM_ADDR(addr)) {
27250Sstevel@tonic-gate 		badcaller = 1;
27260Sstevel@tonic-gate 		pfn = hat_kpm_va2pfn(addr);
27270Sstevel@tonic-gate 	} else {
27280Sstevel@tonic-gate 		pfn = hat_getpfnum(kas.a_hat, addr);
27290Sstevel@tonic-gate 		badcaller = pf_is_memory(pfn);
27300Sstevel@tonic-gate 	}
27310Sstevel@tonic-gate 
27320Sstevel@tonic-gate 	if (badcaller)
27330Sstevel@tonic-gate 		hat_getkpfnum_badcall(caller());
27345084Sjohnlev 	XPV_ALLOW_MIGRATE();
27350Sstevel@tonic-gate 	return (pfn);
27360Sstevel@tonic-gate }
27370Sstevel@tonic-gate #endif /* __amd64 */
27380Sstevel@tonic-gate 
27390Sstevel@tonic-gate /*
27400Sstevel@tonic-gate  * int hat_probe(hat, addr)
27410Sstevel@tonic-gate  *	return 0 if no valid mapping is present.  Faster version
27420Sstevel@tonic-gate  *	of hat_getattr in certain architectures.
27430Sstevel@tonic-gate  */
27440Sstevel@tonic-gate int
27450Sstevel@tonic-gate hat_probe(hat_t *hat, caddr_t addr)
27460Sstevel@tonic-gate {
27470Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
27480Sstevel@tonic-gate 	uint_t		entry;
27490Sstevel@tonic-gate 	htable_t	*ht;
27500Sstevel@tonic-gate 	pgcnt_t		pg_off;
27510Sstevel@tonic-gate 
27523446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
27530Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
27540Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
27550Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
27560Sstevel@tonic-gate 		return (0);
27570Sstevel@tonic-gate 
27580Sstevel@tonic-gate 	/*
27590Sstevel@tonic-gate 	 * Most common use of hat_probe is from segmap. We special case it
27600Sstevel@tonic-gate 	 * for performance.
27610Sstevel@tonic-gate 	 */
27620Sstevel@tonic-gate 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
27630Sstevel@tonic-gate 		pg_off = mmu_btop(vaddr - mmu.kmap_addr);
27640Sstevel@tonic-gate 		if (mmu.pae_hat)
27650Sstevel@tonic-gate 			return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
27660Sstevel@tonic-gate 		else
27670Sstevel@tonic-gate 			return (PTE_ISVALID(
27680Sstevel@tonic-gate 			    ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
27690Sstevel@tonic-gate 	}
27700Sstevel@tonic-gate 
27710Sstevel@tonic-gate 	ht = htable_getpage(hat, vaddr, &entry);
27720Sstevel@tonic-gate 	htable_release(ht);
27735084Sjohnlev 	return (ht != NULL);
27740Sstevel@tonic-gate }
27750Sstevel@tonic-gate 
27760Sstevel@tonic-gate /*
27774381Sjosephb  * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM.
27784381Sjosephb  */
27794381Sjosephb static int
27804381Sjosephb is_it_dism(hat_t *hat, caddr_t va)
27814381Sjosephb {
27824381Sjosephb 	struct seg *seg;
27834381Sjosephb 	struct shm_data *shmd;
27844381Sjosephb 	struct spt_data *sptd;
27854381Sjosephb 
27864381Sjosephb 	seg = as_findseg(hat->hat_as, va, 0);
27874381Sjosephb 	ASSERT(seg != NULL);
27884381Sjosephb 	ASSERT(seg->s_base <= va);
27894381Sjosephb 	shmd = (struct shm_data *)seg->s_data;
27904381Sjosephb 	ASSERT(shmd != NULL);
27914381Sjosephb 	sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
27924381Sjosephb 	ASSERT(sptd != NULL);
27934381Sjosephb 	if (sptd->spt_flags & SHM_PAGEABLE)
27944381Sjosephb 		return (1);
27954381Sjosephb 	return (0);
27964381Sjosephb }
27974381Sjosephb 
27984381Sjosephb /*
27994381Sjosephb  * Simple implementation of ISM. hat_share() is similar to hat_memload_array(),
28000Sstevel@tonic-gate  * except that we use the ism_hat's existing mappings to determine the pages
28014381Sjosephb  * and protections to use for this hat. If we find a full properly aligned
28024381Sjosephb  * and sized pagetable, we will attempt to share the pagetable itself.
28030Sstevel@tonic-gate  */
28040Sstevel@tonic-gate /*ARGSUSED*/
28050Sstevel@tonic-gate int
28060Sstevel@tonic-gate hat_share(
28070Sstevel@tonic-gate 	hat_t		*hat,
28080Sstevel@tonic-gate 	caddr_t		addr,
28090Sstevel@tonic-gate 	hat_t		*ism_hat,
28100Sstevel@tonic-gate 	caddr_t		src_addr,
28110Sstevel@tonic-gate 	size_t		len,	/* almost useless value, see below.. */
28120Sstevel@tonic-gate 	uint_t		ismszc)
28130Sstevel@tonic-gate {
28140Sstevel@tonic-gate 	uintptr_t	vaddr_start = (uintptr_t)addr;
28150Sstevel@tonic-gate 	uintptr_t	vaddr;
28160Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr_start + len;
28170Sstevel@tonic-gate 	uintptr_t	ism_addr_start = (uintptr_t)src_addr;
28180Sstevel@tonic-gate 	uintptr_t	ism_addr = ism_addr_start;
28190Sstevel@tonic-gate 	uintptr_t	e_ism_addr = ism_addr + len;
28200Sstevel@tonic-gate 	htable_t	*ism_ht = NULL;
28210Sstevel@tonic-gate 	htable_t	*ht;
28220Sstevel@tonic-gate 	x86pte_t	pte;
28230Sstevel@tonic-gate 	page_t		*pp;
28240Sstevel@tonic-gate 	pfn_t		pfn;
28250Sstevel@tonic-gate 	level_t		l;
28260Sstevel@tonic-gate 	pgcnt_t		pgcnt;
28270Sstevel@tonic-gate 	uint_t		prot;
28284381Sjosephb 	int		is_dism;
28294381Sjosephb 	int		flags;
28300Sstevel@tonic-gate 
28310Sstevel@tonic-gate 	/*
28320Sstevel@tonic-gate 	 * We might be asked to share an empty DISM hat by as_dup()
28330Sstevel@tonic-gate 	 */
28340Sstevel@tonic-gate 	ASSERT(hat != kas.a_hat);
28353446Smrj 	ASSERT(eaddr <= _userlimit);
28360Sstevel@tonic-gate 	if (!(ism_hat->hat_flags & HAT_SHARED)) {
28370Sstevel@tonic-gate 		ASSERT(hat_get_mapped_size(ism_hat) == 0);
28380Sstevel@tonic-gate 		return (0);
28390Sstevel@tonic-gate 	}
28405084Sjohnlev 	XPV_DISALLOW_MIGRATE();
28410Sstevel@tonic-gate 
28420Sstevel@tonic-gate 	/*
28430Sstevel@tonic-gate 	 * The SPT segment driver often passes us a size larger than there are
28440Sstevel@tonic-gate 	 * valid mappings. That's because it rounds the segment size up to a
28450Sstevel@tonic-gate 	 * large pagesize, even if the actual memory mapped by ism_hat is less.
28460Sstevel@tonic-gate 	 */
28470Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr_start));
28480Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(ism_addr_start));
28490Sstevel@tonic-gate 	ASSERT(ism_hat->hat_flags & HAT_SHARED);
28504381Sjosephb 	is_dism = is_it_dism(hat, addr);
28510Sstevel@tonic-gate 	while (ism_addr < e_ism_addr) {
28520Sstevel@tonic-gate 		/*
28530Sstevel@tonic-gate 		 * use htable_walk to get the next valid ISM mapping
28540Sstevel@tonic-gate 		 */
28550Sstevel@tonic-gate 		pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
28560Sstevel@tonic-gate 		if (ism_ht == NULL)
28570Sstevel@tonic-gate 			break;
28580Sstevel@tonic-gate 
28590Sstevel@tonic-gate 		/*
28604381Sjosephb 		 * First check to see if we already share the page table.
28614381Sjosephb 		 */
28624381Sjosephb 		l = ism_ht->ht_level;
28634381Sjosephb 		vaddr = vaddr_start + (ism_addr - ism_addr_start);
28644381Sjosephb 		ht = htable_lookup(hat, vaddr, l);
28654381Sjosephb 		if (ht != NULL) {
28664381Sjosephb 			if (ht->ht_flags & HTABLE_SHARED_PFN)
28674381Sjosephb 				goto shared;
28684381Sjosephb 			htable_release(ht);
28694381Sjosephb 			goto not_shared;
28704381Sjosephb 		}
28714381Sjosephb 
28724381Sjosephb 		/*
28734381Sjosephb 		 * Can't ever share top table.
28744381Sjosephb 		 */
28754381Sjosephb 		if (l == mmu.max_level)
28764381Sjosephb 			goto not_shared;
28774381Sjosephb 
28784381Sjosephb 		/*
28794381Sjosephb 		 * Avoid level mismatches later due to DISM faults.
28804381Sjosephb 		 */
28814381Sjosephb 		if (is_dism && l > 0)
28824381Sjosephb 			goto not_shared;
28834381Sjosephb 
28844381Sjosephb 		/*
28854381Sjosephb 		 * addresses and lengths must align
28864381Sjosephb 		 * table must be fully populated
28874381Sjosephb 		 * no lower level page tables
28884381Sjosephb 		 */
28894381Sjosephb 		if (ism_addr != ism_ht->ht_vaddr ||
28904381Sjosephb 		    (vaddr & LEVEL_OFFSET(l + 1)) != 0)
28914381Sjosephb 			goto not_shared;
28924381Sjosephb 
28934381Sjosephb 		/*
28944381Sjosephb 		 * The range of address space must cover a full table.
28950Sstevel@tonic-gate 		 */
2896*5159Sjohnlev 		if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1))
28974381Sjosephb 			goto not_shared;
28984381Sjosephb 
28994381Sjosephb 		/*
29004381Sjosephb 		 * All entries in the ISM page table must be leaf PTEs.
29014381Sjosephb 		 */
29024381Sjosephb 		if (l > 0) {
29034381Sjosephb 			int e;
29044381Sjosephb 
29054381Sjosephb 			/*
29064381Sjosephb 			 * We know the 0th is from htable_walk() above.
29074381Sjosephb 			 */
29084381Sjosephb 			for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) {
29094381Sjosephb 				x86pte_t pte;
29104381Sjosephb 				pte = x86pte_get(ism_ht, e);
29114381Sjosephb 				if (!PTE_ISPAGE(pte, l))
29124381Sjosephb 					goto not_shared;
29134381Sjosephb 			}
29144381Sjosephb 		}
29154381Sjosephb 
29164381Sjosephb 		/*
29174381Sjosephb 		 * share the page table
29184381Sjosephb 		 */
29194381Sjosephb 		ht = htable_create(hat, vaddr, l, ism_ht);
29204381Sjosephb shared:
29214381Sjosephb 		ASSERT(ht->ht_flags & HTABLE_SHARED_PFN);
29224381Sjosephb 		ASSERT(ht->ht_shares == ism_ht);
29234381Sjosephb 		hat->hat_ism_pgcnt +=
29244381Sjosephb 		    (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) <<
29254381Sjosephb 		    (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
29264381Sjosephb 		ht->ht_valid_cnt = ism_ht->ht_valid_cnt;
29274381Sjosephb 		htable_release(ht);
29284381Sjosephb 		ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1);
29294381Sjosephb 		htable_release(ism_ht);
29304381Sjosephb 		ism_ht = NULL;
29314381Sjosephb 		continue;
29324381Sjosephb 
29334381Sjosephb not_shared:
29344381Sjosephb 		/*
29354381Sjosephb 		 * Unable to share the page table. Instead we will
29364381Sjosephb 		 * create new mappings from the values in the ISM mappings.
29374381Sjosephb 		 * Figure out what level size mappings to use;
29384381Sjosephb 		 */
29390Sstevel@tonic-gate 		for (l = ism_ht->ht_level; l > 0; --l) {
29400Sstevel@tonic-gate 			if (LEVEL_SIZE(l) <= eaddr - vaddr &&
29410Sstevel@tonic-gate 			    (vaddr & LEVEL_OFFSET(l)) == 0)
29420Sstevel@tonic-gate 				break;
29430Sstevel@tonic-gate 		}
29440Sstevel@tonic-gate 
29450Sstevel@tonic-gate 		/*
29460Sstevel@tonic-gate 		 * The ISM mapping might be larger than the share area,
29474381Sjosephb 		 * be careful to truncate it if needed.
29480Sstevel@tonic-gate 		 */
29490Sstevel@tonic-gate 		if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
29500Sstevel@tonic-gate 			pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
29510Sstevel@tonic-gate 		} else {
29520Sstevel@tonic-gate 			pgcnt = mmu_btop(eaddr - vaddr);
29530Sstevel@tonic-gate 			l = 0;
29540Sstevel@tonic-gate 		}
29550Sstevel@tonic-gate 
29560Sstevel@tonic-gate 		pfn = PTE2PFN(pte, ism_ht->ht_level);
29570Sstevel@tonic-gate 		ASSERT(pfn != PFN_INVALID);
29580Sstevel@tonic-gate 		while (pgcnt > 0) {
29590Sstevel@tonic-gate 			/*
29600Sstevel@tonic-gate 			 * Make a new pte for the PFN for this level.
29610Sstevel@tonic-gate 			 * Copy protections for the pte from the ISM pte.
29620Sstevel@tonic-gate 			 */
29630Sstevel@tonic-gate 			pp = page_numtopp_nolock(pfn);
29640Sstevel@tonic-gate 			ASSERT(pp != NULL);
29650Sstevel@tonic-gate 
29660Sstevel@tonic-gate 			prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
29670Sstevel@tonic-gate 			if (PTE_GET(pte, PT_WRITABLE))
29680Sstevel@tonic-gate 				prot |= PROT_WRITE;
29690Sstevel@tonic-gate 			if (!PTE_GET(pte, PT_NX))
29700Sstevel@tonic-gate 				prot |= PROT_EXEC;
29710Sstevel@tonic-gate 
29724381Sjosephb 			flags = HAT_LOAD;
29734381Sjosephb 			if (!is_dism)
29744381Sjosephb 				flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST;
29754381Sjosephb 			while (hati_load_common(hat, vaddr, pp, prot, flags,
29763446Smrj 			    l, pfn) != 0) {
29773446Smrj 				if (l == 0)
29783446Smrj 					panic("hati_load_common() failure");
29793446Smrj 				--l;
29803446Smrj 			}
29810Sstevel@tonic-gate 
29820Sstevel@tonic-gate 			vaddr += LEVEL_SIZE(l);
29830Sstevel@tonic-gate 			ism_addr += LEVEL_SIZE(l);
29840Sstevel@tonic-gate 			pfn += mmu_btop(LEVEL_SIZE(l));
29850Sstevel@tonic-gate 			pgcnt -= mmu_btop(LEVEL_SIZE(l));
29860Sstevel@tonic-gate 		}
29870Sstevel@tonic-gate 	}
29880Sstevel@tonic-gate 	if (ism_ht != NULL)
29890Sstevel@tonic-gate 		htable_release(ism_ht);
29905084Sjohnlev 	XPV_ALLOW_MIGRATE();
29910Sstevel@tonic-gate 	return (0);
29920Sstevel@tonic-gate }
29930Sstevel@tonic-gate 
29940Sstevel@tonic-gate 
29950Sstevel@tonic-gate /*
29960Sstevel@tonic-gate  * hat_unshare() is similar to hat_unload_callback(), but
29970Sstevel@tonic-gate  * we have to look for empty shared pagetables. Note that
29980Sstevel@tonic-gate  * hat_unshare() is always invoked against an entire segment.
29990Sstevel@tonic-gate  */
30000Sstevel@tonic-gate /*ARGSUSED*/
30010Sstevel@tonic-gate void
30020Sstevel@tonic-gate hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
30030Sstevel@tonic-gate {
30044654Sjosephb 	uint64_t	vaddr = (uintptr_t)addr;
30050Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
30060Sstevel@tonic-gate 	htable_t	*ht = NULL;
30070Sstevel@tonic-gate 	uint_t		need_demaps = 0;
30084381Sjosephb 	int		flags = HAT_UNLOAD_UNMAP;
30094381Sjosephb 	level_t		l;
30100Sstevel@tonic-gate 
30110Sstevel@tonic-gate 	ASSERT(hat != kas.a_hat);
30123446Smrj 	ASSERT(eaddr <= _userlimit);
30130Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
30140Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
30155084Sjohnlev 	XPV_DISALLOW_MIGRATE();
30160Sstevel@tonic-gate 
30170Sstevel@tonic-gate 	/*
30180Sstevel@tonic-gate 	 * First go through and remove any shared pagetables.
30190Sstevel@tonic-gate 	 *
30203446Smrj 	 * Note that it's ok to delay the TLB shootdown till the entire range is
30210Sstevel@tonic-gate 	 * finished, because if hat_pageunload() were to unload a shared
30223446Smrj 	 * pagetable page, its hat_tlb_inval() will do a global TLB invalidate.
30230Sstevel@tonic-gate 	 */
30244381Sjosephb 	l = mmu.max_page_level;
30254381Sjosephb 	if (l == mmu.max_level)
30264381Sjosephb 		--l;
30274381Sjosephb 	for (; l >= 0; --l) {
30284381Sjosephb 		for (vaddr = (uintptr_t)addr; vaddr < eaddr;
30294381Sjosephb 		    vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) {
30304381Sjosephb 			ASSERT(!IN_VA_HOLE(vaddr));
30314381Sjosephb 			/*
30324381Sjosephb 			 * find a pagetable that maps the current address
30334381Sjosephb 			 */
30344381Sjosephb 			ht = htable_lookup(hat, vaddr, l);
30354381Sjosephb 			if (ht == NULL)
30364381Sjosephb 				continue;
30370Sstevel@tonic-gate 			if (ht->ht_flags & HTABLE_SHARED_PFN) {
30380Sstevel@tonic-gate 				/*
30394381Sjosephb 				 * clear page count, set valid_cnt to 0,
30404381Sjosephb 				 * let htable_release() finish the job
30410Sstevel@tonic-gate 				 */
30424381Sjosephb 				hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
30434381Sjosephb 				    (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
30440Sstevel@tonic-gate 				ht->ht_valid_cnt = 0;
30450Sstevel@tonic-gate 				need_demaps = 1;
30460Sstevel@tonic-gate 			}
30470Sstevel@tonic-gate 			htable_release(ht);
30480Sstevel@tonic-gate 		}
30490Sstevel@tonic-gate 	}
30500Sstevel@tonic-gate 
30510Sstevel@tonic-gate 	/*
30520Sstevel@tonic-gate 	 * flush the TLBs - since we're probably dealing with MANY mappings
30530Sstevel@tonic-gate 	 * we do just one CR3 reload.
30540Sstevel@tonic-gate 	 */
30550Sstevel@tonic-gate 	if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
30563446Smrj 		hat_tlb_inval(hat, DEMAP_ALL_ADDR);
30570Sstevel@tonic-gate 
30580Sstevel@tonic-gate 	/*
30590Sstevel@tonic-gate 	 * Now go back and clean up any unaligned mappings that
30600Sstevel@tonic-gate 	 * couldn't share pagetables.
30610Sstevel@tonic-gate 	 */
30624381Sjosephb 	if (!is_it_dism(hat, addr))
30634381Sjosephb 		flags |= HAT_UNLOAD_UNLOCK;
30644381Sjosephb 	hat_unload(hat, addr, len, flags);
30655084Sjohnlev 	XPV_ALLOW_MIGRATE();
30660Sstevel@tonic-gate }
30670Sstevel@tonic-gate 
30680Sstevel@tonic-gate 
30690Sstevel@tonic-gate /*
30700Sstevel@tonic-gate  * hat_reserve() does nothing
30710Sstevel@tonic-gate  */
30720Sstevel@tonic-gate /*ARGSUSED*/
30730Sstevel@tonic-gate void
30740Sstevel@tonic-gate hat_reserve(struct as *as, caddr_t addr, size_t len)
30750Sstevel@tonic-gate {
30760Sstevel@tonic-gate }
30770Sstevel@tonic-gate 
30780Sstevel@tonic-gate 
30790Sstevel@tonic-gate /*
30800Sstevel@tonic-gate  * Called when all mappings to a page should have write permission removed.
30810Sstevel@tonic-gate  * Mostly stolem from hat_pagesync()
30820Sstevel@tonic-gate  */
30830Sstevel@tonic-gate static void
30840Sstevel@tonic-gate hati_page_clrwrt(struct page *pp)
30850Sstevel@tonic-gate {
30860Sstevel@tonic-gate 	hment_t		*hm = NULL;
30870Sstevel@tonic-gate 	htable_t	*ht;
30880Sstevel@tonic-gate 	uint_t		entry;
30890Sstevel@tonic-gate 	x86pte_t	old;
30900Sstevel@tonic-gate 	x86pte_t	new;
30910Sstevel@tonic-gate 	uint_t		pszc = 0;
30920Sstevel@tonic-gate 
30935084Sjohnlev 	XPV_DISALLOW_MIGRATE();
30940Sstevel@tonic-gate next_size:
30950Sstevel@tonic-gate 	/*
30960Sstevel@tonic-gate 	 * walk thru the mapping list clearing write permission
30970Sstevel@tonic-gate 	 */
30980Sstevel@tonic-gate 	x86_hm_enter(pp);
30990Sstevel@tonic-gate 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
31000Sstevel@tonic-gate 		if (ht->ht_level < pszc)
31010Sstevel@tonic-gate 			continue;
31020Sstevel@tonic-gate 		old = x86pte_get(ht, entry);
31030Sstevel@tonic-gate 
31040Sstevel@tonic-gate 		for (;;) {
31050Sstevel@tonic-gate 			/*
31060Sstevel@tonic-gate 			 * Is this mapping of interest?
31070Sstevel@tonic-gate 			 */
31080Sstevel@tonic-gate 			if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
31090Sstevel@tonic-gate 			    PTE_GET(old, PT_WRITABLE) == 0)
31100Sstevel@tonic-gate 				break;
31110Sstevel@tonic-gate 
31120Sstevel@tonic-gate 			/*
31130Sstevel@tonic-gate 			 * Clear ref/mod writable bits. This requires cross
31140Sstevel@tonic-gate 			 * calls to ensure any executing TLBs see cleared bits.
31150Sstevel@tonic-gate 			 */
31160Sstevel@tonic-gate 			new = old;
31170Sstevel@tonic-gate 			PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
31180Sstevel@tonic-gate 			old = hati_update_pte(ht, entry, old, new);
31190Sstevel@tonic-gate 			if (old != 0)
31200Sstevel@tonic-gate 				continue;
31210Sstevel@tonic-gate 
31220Sstevel@tonic-gate 			break;
31230Sstevel@tonic-gate 		}
31240Sstevel@tonic-gate 	}
31250Sstevel@tonic-gate 	x86_hm_exit(pp);
31260Sstevel@tonic-gate 	while (pszc < pp->p_szc) {
31270Sstevel@tonic-gate 		page_t *tpp;
31280Sstevel@tonic-gate 		pszc++;
31290Sstevel@tonic-gate 		tpp = PP_GROUPLEADER(pp, pszc);
31300Sstevel@tonic-gate 		if (pp != tpp) {
31310Sstevel@tonic-gate 			pp = tpp;
31320Sstevel@tonic-gate 			goto next_size;
31330Sstevel@tonic-gate 		}
31340Sstevel@tonic-gate 	}
31355084Sjohnlev 	XPV_ALLOW_MIGRATE();
31360Sstevel@tonic-gate }
31370Sstevel@tonic-gate 
31380Sstevel@tonic-gate /*
31390Sstevel@tonic-gate  * void hat_page_setattr(pp, flag)
31400Sstevel@tonic-gate  * void hat_page_clrattr(pp, flag)
31410Sstevel@tonic-gate  *	used to set/clr ref/mod bits.
31420Sstevel@tonic-gate  */
31430Sstevel@tonic-gate void
31440Sstevel@tonic-gate hat_page_setattr(struct page *pp, uint_t flag)
31450Sstevel@tonic-gate {
31460Sstevel@tonic-gate 	vnode_t		*vp = pp->p_vnode;
31470Sstevel@tonic-gate 	kmutex_t	*vphm = NULL;
31480Sstevel@tonic-gate 	page_t		**listp;
31494324Sqiao 	int		noshuffle;
31504324Sqiao 
31514324Sqiao 	noshuffle = flag & P_NSH;
31524324Sqiao 	flag &= ~P_NSH;
31530Sstevel@tonic-gate 
31540Sstevel@tonic-gate 	if (PP_GETRM(pp, flag) == flag)
31550Sstevel@tonic-gate 		return;
31560Sstevel@tonic-gate 
31574324Sqiao 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
31584324Sqiao 	    !noshuffle) {
31590Sstevel@tonic-gate 		vphm = page_vnode_mutex(vp);
31600Sstevel@tonic-gate 		mutex_enter(vphm);
31610Sstevel@tonic-gate 	}
31620Sstevel@tonic-gate 
31630Sstevel@tonic-gate 	PP_SETRM(pp, flag);
31640Sstevel@tonic-gate 
31650Sstevel@tonic-gate 	if (vphm != NULL) {
31660Sstevel@tonic-gate 
31670Sstevel@tonic-gate 		/*
31680Sstevel@tonic-gate 		 * Some File Systems examine v_pages for NULL w/o
31690Sstevel@tonic-gate 		 * grabbing the vphm mutex. Must not let it become NULL when
31700Sstevel@tonic-gate 		 * pp is the only page on the list.
31710Sstevel@tonic-gate 		 */
31720Sstevel@tonic-gate 		if (pp->p_vpnext != pp) {
31730Sstevel@tonic-gate 			page_vpsub(&vp->v_pages, pp);
31740Sstevel@tonic-gate 			if (vp->v_pages != NULL)
31750Sstevel@tonic-gate 				listp = &vp->v_pages->p_vpprev->p_vpnext;
31760Sstevel@tonic-gate 			else
31770Sstevel@tonic-gate 				listp = &vp->v_pages;
31780Sstevel@tonic-gate 			page_vpadd(listp, pp);
31790Sstevel@tonic-gate 		}
31800Sstevel@tonic-gate 		mutex_exit(vphm);
31810Sstevel@tonic-gate 	}
31820Sstevel@tonic-gate }
31830Sstevel@tonic-gate 
31840Sstevel@tonic-gate void
31850Sstevel@tonic-gate hat_page_clrattr(struct page *pp, uint_t flag)
31860Sstevel@tonic-gate {
31870Sstevel@tonic-gate 	vnode_t		*vp = pp->p_vnode;
31880Sstevel@tonic-gate 	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
31890Sstevel@tonic-gate 
31900Sstevel@tonic-gate 	/*
31912999Sstans 	 * Caller is expected to hold page's io lock for VMODSORT to work
31922999Sstans 	 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
31932999Sstans 	 * bit is cleared.
31942999Sstans 	 * We don't have assert to avoid tripping some existing third party
31952999Sstans 	 * code. The dirty page is moved back to top of the v_page list
31962999Sstans 	 * after IO is done in pvn_write_done().
31970Sstevel@tonic-gate 	 */
31980Sstevel@tonic-gate 	PP_CLRRM(pp, flag);
31990Sstevel@tonic-gate 
32002999Sstans 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
32010Sstevel@tonic-gate 
32020Sstevel@tonic-gate 		/*
32030Sstevel@tonic-gate 		 * VMODSORT works by removing write permissions and getting
32040Sstevel@tonic-gate 		 * a fault when a page is made dirty. At this point
32050Sstevel@tonic-gate 		 * we need to remove write permission from all mappings
32060Sstevel@tonic-gate 		 * to this page.
32070Sstevel@tonic-gate 		 */
32080Sstevel@tonic-gate 		hati_page_clrwrt(pp);
32090Sstevel@tonic-gate 	}
32100Sstevel@tonic-gate }
32110Sstevel@tonic-gate 
32120Sstevel@tonic-gate /*
32130Sstevel@tonic-gate  *	If flag is specified, returns 0 if attribute is disabled
32140Sstevel@tonic-gate  *	and non zero if enabled.  If flag specifes multiple attributs
32150Sstevel@tonic-gate  *	then returns 0 if ALL atriibutes are disabled.  This is an advisory
32160Sstevel@tonic-gate  *	call.
32170Sstevel@tonic-gate  */
32180Sstevel@tonic-gate uint_t
32190Sstevel@tonic-gate hat_page_getattr(struct page *pp, uint_t flag)
32200Sstevel@tonic-gate {
32210Sstevel@tonic-gate 	return (PP_GETRM(pp, flag));
32220Sstevel@tonic-gate }
32230Sstevel@tonic-gate 
32240Sstevel@tonic-gate 
32250Sstevel@tonic-gate /*
32260Sstevel@tonic-gate  * common code used by hat_pageunload() and hment_steal()
32270Sstevel@tonic-gate  */
32280Sstevel@tonic-gate hment_t *
32290Sstevel@tonic-gate hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
32300Sstevel@tonic-gate {
32310Sstevel@tonic-gate 	x86pte_t old_pte;
32320Sstevel@tonic-gate 	pfn_t pfn = pp->p_pagenum;
32330Sstevel@tonic-gate 	hment_t *hm;
32340Sstevel@tonic-gate 
32350Sstevel@tonic-gate 	/*
32360Sstevel@tonic-gate 	 * We need to acquire a hold on the htable in order to
32370Sstevel@tonic-gate 	 * do the invalidate. We know the htable must exist, since
32380Sstevel@tonic-gate 	 * unmap's don't release the htable until after removing any
32390Sstevel@tonic-gate 	 * hment. Having x86_hm_enter() keeps that from proceeding.
32400Sstevel@tonic-gate 	 */
32410Sstevel@tonic-gate 	htable_acquire(ht);
32420Sstevel@tonic-gate 
32430Sstevel@tonic-gate 	/*
32440Sstevel@tonic-gate 	 * Invalidate the PTE and remove the hment.
32450Sstevel@tonic-gate 	 */
32463446Smrj 	old_pte = x86pte_inval(ht, entry, 0, NULL);
324747Sjosephb 	if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
32483446Smrj 		panic("x86pte_inval() failure found PTE = " FMT_PTE
324947Sjosephb 		    " pfn being unmapped is %lx ht=0x%lx entry=0x%x",
325047Sjosephb 		    old_pte, pfn, (uintptr_t)ht, entry);
325147Sjosephb 	}
32520Sstevel@tonic-gate 
32530Sstevel@tonic-gate 	/*
32540Sstevel@tonic-gate 	 * Clean up all the htable information for this mapping
32550Sstevel@tonic-gate 	 */
32560Sstevel@tonic-gate 	ASSERT(ht->ht_valid_cnt > 0);
32570Sstevel@tonic-gate 	HTABLE_DEC(ht->ht_valid_cnt);
32580Sstevel@tonic-gate 	PGCNT_DEC(ht->ht_hat, ht->ht_level);
32590Sstevel@tonic-gate 
32600Sstevel@tonic-gate 	/*
32610Sstevel@tonic-gate 	 * sync ref/mod bits to the page_t
32620Sstevel@tonic-gate 	 */
32633446Smrj 	if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC)
32640Sstevel@tonic-gate 		hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
32650Sstevel@tonic-gate 
32660Sstevel@tonic-gate 	/*
32670Sstevel@tonic-gate 	 * Remove the mapping list entry for this page.
32680Sstevel@tonic-gate 	 */
32690Sstevel@tonic-gate 	hm = hment_remove(pp, ht, entry);
32700Sstevel@tonic-gate 
32710Sstevel@tonic-gate 	/*
32720Sstevel@tonic-gate 	 * drop the mapping list lock so that we might free the
32730Sstevel@tonic-gate 	 * hment and htable.
32740Sstevel@tonic-gate 	 */
32750Sstevel@tonic-gate 	x86_hm_exit(pp);
32760Sstevel@tonic-gate 	htable_release(ht);
32770Sstevel@tonic-gate 	return (hm);
32780Sstevel@tonic-gate }
32790Sstevel@tonic-gate 
32801841Spraks extern int	vpm_enable;
32810Sstevel@tonic-gate /*
32820Sstevel@tonic-gate  * Unload all translations to a page. If the page is a subpage of a large
32830Sstevel@tonic-gate  * page, the large page mappings are also removed.
32840Sstevel@tonic-gate  *
32850Sstevel@tonic-gate  * The forceflags are unused.
32860Sstevel@tonic-gate  */
32870Sstevel@tonic-gate 
32880Sstevel@tonic-gate /*ARGSUSED*/
32890Sstevel@tonic-gate static int
32900Sstevel@tonic-gate hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
32910Sstevel@tonic-gate {
32920Sstevel@tonic-gate 	page_t		*cur_pp = pp;
32930Sstevel@tonic-gate 	hment_t		*hm;
32940Sstevel@tonic-gate 	hment_t		*prev;
32950Sstevel@tonic-gate 	htable_t	*ht;
32960Sstevel@tonic-gate 	uint_t		entry;
32970Sstevel@tonic-gate 	level_t		level;
32980Sstevel@tonic-gate 
32995084Sjohnlev 	XPV_DISALLOW_MIGRATE();
33001841Spraks #if defined(__amd64)
33011841Spraks 	/*
33021841Spraks 	 * clear the vpm ref.
33031841Spraks 	 */
33041841Spraks 	if (vpm_enable) {
33051841Spraks 		pp->p_vpmref = 0;
33061841Spraks 	}
33071841Spraks #endif
33080Sstevel@tonic-gate 	/*
33090Sstevel@tonic-gate 	 * The loop with next_size handles pages with multiple pagesize mappings
33100Sstevel@tonic-gate 	 */
33110Sstevel@tonic-gate next_size:
33120Sstevel@tonic-gate 	for (;;) {
33130Sstevel@tonic-gate 
33140Sstevel@tonic-gate 		/*
33150Sstevel@tonic-gate 		 * Get a mapping list entry
33160Sstevel@tonic-gate 		 */
33170Sstevel@tonic-gate 		x86_hm_enter(cur_pp);
33180Sstevel@tonic-gate 		for (prev = NULL; ; prev = hm) {
33190Sstevel@tonic-gate 			hm = hment_walk(cur_pp, &ht, &entry, prev);
33200Sstevel@tonic-gate 			if (hm == NULL) {
33210Sstevel@tonic-gate 				x86_hm_exit(cur_pp);
33220Sstevel@tonic-gate 
33230Sstevel@tonic-gate 				/*
33240Sstevel@tonic-gate 				 * If not part of a larger page, we're done.
33250Sstevel@tonic-gate 				 */
33263446Smrj 				if (cur_pp->p_szc <= pg_szcd) {
33275084Sjohnlev 					XPV_ALLOW_MIGRATE();
33280Sstevel@tonic-gate 					return (0);
33293446Smrj 				}
33300Sstevel@tonic-gate 
33310Sstevel@tonic-gate 				/*
33320Sstevel@tonic-gate 				 * Else check the next larger page size.
33330Sstevel@tonic-gate 				 * hat_page_demote() may decrease p_szc
33340Sstevel@tonic-gate 				 * but that's ok we'll just take an extra
33350Sstevel@tonic-gate 				 * trip discover there're no larger mappings
33360Sstevel@tonic-gate 				 * and return.
33370Sstevel@tonic-gate 				 */
33380Sstevel@tonic-gate 				++pg_szcd;
33390Sstevel@tonic-gate 				cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
33400Sstevel@tonic-gate 				goto next_size;
33410Sstevel@tonic-gate 			}
33420Sstevel@tonic-gate 
33430Sstevel@tonic-gate 			/*
33440Sstevel@tonic-gate 			 * If this mapping size matches, remove it.
33450Sstevel@tonic-gate 			 */
33460Sstevel@tonic-gate 			level = ht->ht_level;
33470Sstevel@tonic-gate 			if (level == pg_szcd)
33480Sstevel@tonic-gate 				break;
33490Sstevel@tonic-gate 		}
33500Sstevel@tonic-gate 
33510Sstevel@tonic-gate 		/*
33520Sstevel@tonic-gate 		 * Remove the mapping list entry for this page.
33530Sstevel@tonic-gate 		 * Note this does the x86_hm_exit() for us.
33540Sstevel@tonic-gate 		 */
33550Sstevel@tonic-gate 		hm = hati_page_unmap(cur_pp, ht, entry);
33560Sstevel@tonic-gate 		if (hm != NULL)
33570Sstevel@tonic-gate 			hment_free(hm);
33580Sstevel@tonic-gate 	}
33590Sstevel@tonic-gate }
33600Sstevel@tonic-gate 
33610Sstevel@tonic-gate int
33620Sstevel@tonic-gate hat_pageunload(struct page *pp, uint_t forceflag)
33630Sstevel@tonic-gate {
33640Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp));
33650Sstevel@tonic-gate 	return (hati_pageunload(pp, 0, forceflag));
33660Sstevel@tonic-gate }
33670Sstevel@tonic-gate 
33680Sstevel@tonic-gate /*
33690Sstevel@tonic-gate  * Unload all large mappings to pp and reduce by 1 p_szc field of every large
33700Sstevel@tonic-gate  * page level that included pp.
33710Sstevel@tonic-gate  *
33720Sstevel@tonic-gate  * pp must be locked EXCL. Even though no other constituent pages are locked
33730Sstevel@tonic-gate  * it's legal to unload large mappings to pp because all constituent pages of
33740Sstevel@tonic-gate  * large locked mappings have to be locked SHARED.  therefore if we have EXCL
33750Sstevel@tonic-gate  * lock on one of constituent pages none of the large mappings to pp are
33760Sstevel@tonic-gate  * locked.
33770Sstevel@tonic-gate  *
33780Sstevel@tonic-gate  * Change (always decrease) p_szc field starting from the last constituent
33790Sstevel@tonic-gate  * page and ending with root constituent page so that root's pszc always shows
33800Sstevel@tonic-gate  * the area where hat_page_demote() may be active.
33810Sstevel@tonic-gate  *
33820Sstevel@tonic-gate  * This mechanism is only used for file system pages where it's not always
33830Sstevel@tonic-gate  * possible to get EXCL locks on all constituent pages to demote the size code
33840Sstevel@tonic-gate  * (as is done for anonymous or kernel large pages).
33850Sstevel@tonic-gate  */
33860Sstevel@tonic-gate void
33870Sstevel@tonic-gate hat_page_demote(page_t *pp)
33880Sstevel@tonic-gate {
33890Sstevel@tonic-gate 	uint_t		pszc;
33900Sstevel@tonic-gate 	uint_t		rszc;
33910Sstevel@tonic-gate 	uint_t		szc;
33920Sstevel@tonic-gate 	page_t		*rootpp;
33930Sstevel@tonic-gate 	page_t		*firstpp;
33940Sstevel@tonic-gate 	page_t		*lastpp;
33950Sstevel@tonic-gate 	pgcnt_t		pgcnt;
33960Sstevel@tonic-gate 
33970Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp));
33980Sstevel@tonic-gate 	ASSERT(!PP_ISFREE(pp));
33990Sstevel@tonic-gate 	ASSERT(page_szc_lock_assert(pp));
34000Sstevel@tonic-gate 
34010Sstevel@tonic-gate 	if (pp->p_szc == 0)
34020Sstevel@tonic-gate 		return;
34030Sstevel@tonic-gate 
34040Sstevel@tonic-gate 	rootpp = PP_GROUPLEADER(pp, 1);
34050Sstevel@tonic-gate 	(void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
34060Sstevel@tonic-gate 
34070Sstevel@tonic-gate 	/*
34080Sstevel@tonic-gate 	 * all large mappings to pp are gone
34090Sstevel@tonic-gate 	 * and no new can be setup since pp is locked exclusively.
34100Sstevel@tonic-gate 	 *
34110Sstevel@tonic-gate 	 * Lock the root to make sure there's only one hat_page_demote()
34120Sstevel@tonic-gate 	 * outstanding within the area of this root's pszc.
34130Sstevel@tonic-gate 	 *
34140Sstevel@tonic-gate 	 * Second potential hat_page_demote() is already eliminated by upper
34150Sstevel@tonic-gate 	 * VM layer via page_szc_lock() but we don't rely on it and use our
34160Sstevel@tonic-gate 	 * own locking (so that upper layer locking can be changed without
34170Sstevel@tonic-gate 	 * assumptions that hat depends on upper layer VM to prevent multiple
34180Sstevel@tonic-gate 	 * hat_page_demote() to be issued simultaneously to the same large
34190Sstevel@tonic-gate 	 * page).
34200Sstevel@tonic-gate 	 */
34210Sstevel@tonic-gate again:
34220Sstevel@tonic-gate 	pszc = pp->p_szc;
34230Sstevel@tonic-gate 	if (pszc == 0)
34240Sstevel@tonic-gate 		return;
34250Sstevel@tonic-gate 	rootpp = PP_GROUPLEADER(pp, pszc);
34260Sstevel@tonic-gate 	x86_hm_enter(rootpp);
34270Sstevel@tonic-gate 	/*
34280Sstevel@tonic-gate 	 * If root's p_szc is different from pszc we raced with another
34290Sstevel@tonic-gate 	 * hat_page_demote().  Drop the lock and try to find the root again.
34300Sstevel@tonic-gate 	 * If root's p_szc is greater than pszc previous hat_page_demote() is
34310Sstevel@tonic-gate 	 * not done yet.  Take and release mlist lock of root's root to wait
34320Sstevel@tonic-gate 	 * for previous hat_page_demote() to complete.
34330Sstevel@tonic-gate 	 */
34340Sstevel@tonic-gate 	if ((rszc = rootpp->p_szc) != pszc) {
34350Sstevel@tonic-gate 		x86_hm_exit(rootpp);
34360Sstevel@tonic-gate 		if (rszc > pszc) {
34370Sstevel@tonic-gate 			/* p_szc of a locked non free page can't increase */
34380Sstevel@tonic-gate 			ASSERT(pp != rootpp);
34390Sstevel@tonic-gate 
34400Sstevel@tonic-gate 			rootpp = PP_GROUPLEADER(rootpp, rszc);
34410Sstevel@tonic-gate 			x86_hm_enter(rootpp);
34420Sstevel@tonic-gate 			x86_hm_exit(rootpp);
34430Sstevel@tonic-gate 		}
34440Sstevel@tonic-gate 		goto again;
34450Sstevel@tonic-gate 	}
34460Sstevel@tonic-gate 	ASSERT(pp->p_szc == pszc);
34470Sstevel@tonic-gate 
34480Sstevel@tonic-gate 	/*
34490Sstevel@tonic-gate 	 * Decrement by 1 p_szc of every constituent page of a region that
34500Sstevel@tonic-gate 	 * covered pp. For example if original szc is 3 it gets changed to 2
34510Sstevel@tonic-gate 	 * everywhere except in region 2 that covered pp. Region 2 that
34520Sstevel@tonic-gate 	 * covered pp gets demoted to 1 everywhere except in region 1 that
34530Sstevel@tonic-gate 	 * covered pp. The region 1 that covered pp is demoted to region
34540Sstevel@tonic-gate 	 * 0. It's done this way because from region 3 we removed level 3
34550Sstevel@tonic-gate 	 * mappings, from region 2 that covered pp we removed level 2 mappings
34560Sstevel@tonic-gate 	 * and from region 1 that covered pp we removed level 1 mappings.  All
34570Sstevel@tonic-gate 	 * changes are done from from high pfn's to low pfn's so that roots
34580Sstevel@tonic-gate 	 * are changed last allowing one to know the largest region where
34590Sstevel@tonic-gate 	 * hat_page_demote() is stil active by only looking at the root page.
34600Sstevel@tonic-gate 	 *
34610Sstevel@tonic-gate 	 * This algorithm is implemented in 2 while loops. First loop changes
34620Sstevel@tonic-gate 	 * p_szc of pages to the right of pp's level 1 region and second
34630Sstevel@tonic-gate 	 * loop changes p_szc of pages of level 1 region that covers pp
34640Sstevel@tonic-gate 	 * and all pages to the left of level 1 region that covers pp.
34650Sstevel@tonic-gate 	 * In the first loop p_szc keeps dropping with every iteration
34660Sstevel@tonic-gate 	 * and in the second loop it keeps increasing with every iteration.
34670Sstevel@tonic-gate 	 *
34680Sstevel@tonic-gate 	 * First loop description: Demote pages to the right of pp outside of
34690Sstevel@tonic-gate 	 * level 1 region that covers pp.  In every iteration of the while
34700Sstevel@tonic-gate 	 * loop below find the last page of szc region and the first page of
34710Sstevel@tonic-gate 	 * (szc - 1) region that is immediately to the right of (szc - 1)
34720Sstevel@tonic-gate 	 * region that covers pp.  From last such page to first such page
34730Sstevel@tonic-gate 	 * change every page's szc to szc - 1. Decrement szc and continue
34740Sstevel@tonic-gate 	 * looping until szc is 1. If pp belongs to the last (szc - 1) region
34750Sstevel@tonic-gate 	 * of szc region skip to the next iteration.
34760Sstevel@tonic-gate 	 */
34770Sstevel@tonic-gate 	szc = pszc;
34780Sstevel@tonic-gate 	while (szc > 1) {
34790Sstevel@tonic-gate 		lastpp = PP_GROUPLEADER(pp, szc);
34800Sstevel@tonic-gate 		pgcnt = page_get_pagecnt(szc);
34810Sstevel@tonic-gate 		lastpp += pgcnt - 1;
34820Sstevel@tonic-gate 		firstpp = PP_GROUPLEADER(pp, (szc - 1));
34830Sstevel@tonic-gate 		pgcnt = page_get_pagecnt(szc - 1);
34840Sstevel@tonic-gate 		if (lastpp - firstpp < pgcnt) {
34850Sstevel@tonic-gate 			szc--;
34860Sstevel@tonic-gate 			continue;
34870Sstevel@tonic-gate 		}
34880Sstevel@tonic-gate 		firstpp += pgcnt;
34890Sstevel@tonic-gate 		while (lastpp != firstpp) {
34900Sstevel@tonic-gate 			ASSERT(lastpp->p_szc == pszc);
34910Sstevel@tonic-gate 			lastpp->p_szc = szc - 1;
34920Sstevel@tonic-gate 			lastpp--;
34930Sstevel@tonic-gate 		}
34940Sstevel@tonic-gate 		firstpp->p_szc = szc - 1;
34950Sstevel@tonic-gate 		szc--;
34960Sstevel@tonic-gate 	}
34970Sstevel@tonic-gate 
34980Sstevel@tonic-gate 	/*
34990Sstevel@tonic-gate 	 * Second loop description:
35000Sstevel@tonic-gate 	 * First iteration changes p_szc to 0 of every
35010Sstevel@tonic-gate 	 * page of level 1 region that covers pp.
35020Sstevel@tonic-gate 	 * Subsequent iterations find last page of szc region
35030Sstevel@tonic-gate 	 * immediately to the left of szc region that covered pp
35040Sstevel@tonic-gate 	 * and first page of (szc + 1) region that covers pp.
35050Sstevel@tonic-gate 	 * From last to first page change p_szc of every page to szc.
35060Sstevel@tonic-gate 	 * Increment szc and continue looping until szc is pszc.
35070Sstevel@tonic-gate 	 * If pp belongs to the fist szc region of (szc + 1) region
35080Sstevel@tonic-gate 	 * skip to the next iteration.
35090Sstevel@tonic-gate 	 *
35100Sstevel@tonic-gate 	 */
35110Sstevel@tonic-gate 	szc = 0;
35120Sstevel@tonic-gate 	while (szc < pszc) {
35130Sstevel@tonic-gate 		firstpp = PP_GROUPLEADER(pp, (szc + 1));
35140Sstevel@tonic-gate 		if (szc == 0) {
35150Sstevel@tonic-gate 			pgcnt = page_get_pagecnt(1);
35160Sstevel@tonic-gate 			lastpp = firstpp + (pgcnt - 1);
35170Sstevel@tonic-gate 		} else {
35180Sstevel@tonic-gate 			lastpp = PP_GROUPLEADER(pp, szc);
35190Sstevel@tonic-gate 			if (firstpp == lastpp) {
35200Sstevel@tonic-gate 				szc++;
35210Sstevel@tonic-gate 				continue;
35220Sstevel@tonic-gate 			}
35230Sstevel@tonic-gate 			lastpp--;
35240Sstevel@tonic-gate 			pgcnt = page_get_pagecnt(szc);
35250Sstevel@tonic-gate 		}
35260Sstevel@tonic-gate 		while (lastpp != firstpp) {
35270Sstevel@tonic-gate 			ASSERT(lastpp->p_szc == pszc);
35280Sstevel@tonic-gate 			lastpp->p_szc = szc;
35290Sstevel@tonic-gate 			lastpp--;
35300Sstevel@tonic-gate 		}
35310Sstevel@tonic-gate 		firstpp->p_szc = szc;
35320Sstevel@tonic-gate 		if (firstpp == rootpp)
35330Sstevel@tonic-gate 			break;
35340Sstevel@tonic-gate 		szc++;
35350Sstevel@tonic-gate 	}
35360Sstevel@tonic-gate 	x86_hm_exit(rootpp);
35370Sstevel@tonic-gate }
35380Sstevel@tonic-gate 
35390Sstevel@tonic-gate /*
35400Sstevel@tonic-gate  * get hw stats from hardware into page struct and reset hw stats
35410Sstevel@tonic-gate  * returns attributes of page
35420Sstevel@tonic-gate  * Flags for hat_pagesync, hat_getstat, hat_sync
35430Sstevel@tonic-gate  *
35440Sstevel@tonic-gate  * define	HAT_SYNC_ZERORM		0x01
35450Sstevel@tonic-gate  *
35460Sstevel@tonic-gate  * Additional flags for hat_pagesync
35470Sstevel@tonic-gate  *
35480Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_REF	0x02
35490Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_MOD	0x04
35500Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_RM	0x06
35510Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_SHARED	0x08
35520Sstevel@tonic-gate  */
35530Sstevel@tonic-gate uint_t
35540Sstevel@tonic-gate hat_pagesync(struct page *pp, uint_t flags)
35550Sstevel@tonic-gate {
35560Sstevel@tonic-gate 	hment_t		*hm = NULL;
35570Sstevel@tonic-gate 	htable_t	*ht;
35580Sstevel@tonic-gate 	uint_t		entry;
35590Sstevel@tonic-gate 	x86pte_t	old, save_old;
35600Sstevel@tonic-gate 	x86pte_t	new;
35610Sstevel@tonic-gate 	uchar_t		nrmbits = P_REF|P_MOD|P_RO;
35620Sstevel@tonic-gate 	extern ulong_t	po_share;
35630Sstevel@tonic-gate 	page_t		*save_pp = pp;
35640Sstevel@tonic-gate 	uint_t		pszc = 0;
35650Sstevel@tonic-gate 
35660Sstevel@tonic-gate 	ASSERT(PAGE_LOCKED(pp) || panicstr);
35670Sstevel@tonic-gate 
35680Sstevel@tonic-gate 	if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
35690Sstevel@tonic-gate 		return (pp->p_nrm & nrmbits);
35700Sstevel@tonic-gate 
35710Sstevel@tonic-gate 	if ((flags & HAT_SYNC_ZERORM) == 0) {
35720Sstevel@tonic-gate 
35730Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
35740Sstevel@tonic-gate 			return (pp->p_nrm & nrmbits);
35750Sstevel@tonic-gate 
35760Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
35770Sstevel@tonic-gate 			return (pp->p_nrm & nrmbits);
35780Sstevel@tonic-gate 
35790Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
35800Sstevel@tonic-gate 		    hat_page_getshare(pp) > po_share) {
35810Sstevel@tonic-gate 			if (PP_ISRO(pp))
35820Sstevel@tonic-gate 				PP_SETREF(pp);
35830Sstevel@tonic-gate 			return (pp->p_nrm & nrmbits);
35840Sstevel@tonic-gate 		}
35850Sstevel@tonic-gate 	}
35860Sstevel@tonic-gate 
35875084Sjohnlev 	XPV_DISALLOW_MIGRATE();
35880Sstevel@tonic-gate next_size:
35890Sstevel@tonic-gate 	/*
35900Sstevel@tonic-gate 	 * walk thru the mapping list syncing (and clearing) ref/mod bits.
35910Sstevel@tonic-gate 	 */
35920Sstevel@tonic-gate 	x86_hm_enter(pp);
35930Sstevel@tonic-gate 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
35940Sstevel@tonic-gate 		if (ht->ht_level < pszc)
35950Sstevel@tonic-gate 			continue;
35960Sstevel@tonic-gate 		old = x86pte_get(ht, entry);
35970Sstevel@tonic-gate try_again:
35980Sstevel@tonic-gate 
35990Sstevel@tonic-gate 		ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
36000Sstevel@tonic-gate 
36010Sstevel@tonic-gate 		if (PTE_GET(old, PT_REF | PT_MOD) == 0)
36020Sstevel@tonic-gate 			continue;
36030Sstevel@tonic-gate 
36040Sstevel@tonic-gate 		save_old = old;
36050Sstevel@tonic-gate 		if ((flags & HAT_SYNC_ZERORM) != 0) {
36060Sstevel@tonic-gate 
36070Sstevel@tonic-gate 			/*
36080Sstevel@tonic-gate 			 * Need to clear ref or mod bits. Need to demap
36090Sstevel@tonic-gate 			 * to make sure any executing TLBs see cleared bits.
36100Sstevel@tonic-gate 			 */
36110Sstevel@tonic-gate 			new = old;
36120Sstevel@tonic-gate 			PTE_CLR(new, PT_REF | PT_MOD);
36130Sstevel@tonic-gate 			old = hati_update_pte(ht, entry, old, new);
36140Sstevel@tonic-gate 			if (old != 0)
36150Sstevel@tonic-gate 				goto try_again;
36160Sstevel@tonic-gate 
36170Sstevel@tonic-gate 			old = save_old;
36180Sstevel@tonic-gate 		}
36190Sstevel@tonic-gate 
36200Sstevel@tonic-gate 		/*
36210Sstevel@tonic-gate 		 * Sync the PTE
36220Sstevel@tonic-gate 		 */
36233446Smrj 		if (!(flags & HAT_SYNC_ZERORM) &&
36243446Smrj 		    PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC)
36250Sstevel@tonic-gate 			hati_sync_pte_to_page(pp, old, ht->ht_level);
36260Sstevel@tonic-gate 
36270Sstevel@tonic-gate 		/*
36280Sstevel@tonic-gate 		 * can stop short if we found a ref'd or mod'd page
36290Sstevel@tonic-gate 		 */
36300Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) ||
36310Sstevel@tonic-gate 		    (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) {
36320Sstevel@tonic-gate 			x86_hm_exit(pp);
36333446Smrj 			goto done;
36340Sstevel@tonic-gate 		}
36350Sstevel@tonic-gate 	}
36360Sstevel@tonic-gate 	x86_hm_exit(pp);
36370Sstevel@tonic-gate 	while (pszc < pp->p_szc) {
36380Sstevel@tonic-gate 		page_t *tpp;
36390Sstevel@tonic-gate 		pszc++;
36400Sstevel@tonic-gate 		tpp = PP_GROUPLEADER(pp, pszc);
36410Sstevel@tonic-gate 		if (pp != tpp) {
36420Sstevel@tonic-gate 			pp = tpp;
36430Sstevel@tonic-gate 			goto next_size;
36440Sstevel@tonic-gate 		}
36450Sstevel@tonic-gate 	}
36463446Smrj done:
36475084Sjohnlev 	XPV_ALLOW_MIGRATE();
36480Sstevel@tonic-gate 	return (save_pp->p_nrm & nrmbits);
36490Sstevel@tonic-gate }
36500Sstevel@tonic-gate 
36510Sstevel@tonic-gate /*
36520Sstevel@tonic-gate  * returns approx number of mappings to this pp.  A return of 0 implies
36530Sstevel@tonic-gate  * there are no mappings to the page.
36540Sstevel@tonic-gate  */
36550Sstevel@tonic-gate ulong_t
36560Sstevel@tonic-gate hat_page_getshare(page_t *pp)
36570Sstevel@tonic-gate {
36580Sstevel@tonic-gate 	uint_t cnt;
36590Sstevel@tonic-gate 	cnt = hment_mapcnt(pp);
36601841Spraks #if defined(__amd64)
36611841Spraks 	if (vpm_enable && pp->p_vpmref) {
36621841Spraks 		cnt += 1;
36631841Spraks 	}
36641841Spraks #endif
36650Sstevel@tonic-gate 	return (cnt);
36660Sstevel@tonic-gate }
36670Sstevel@tonic-gate 
36680Sstevel@tonic-gate /*
36694528Spaulsan  * Return 1 the number of mappings exceeds sh_thresh. Return 0
36704528Spaulsan  * otherwise.
36714528Spaulsan  */
36724528Spaulsan int
36734528Spaulsan hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
36744528Spaulsan {
36754528Spaulsan 	return (hat_page_getshare(pp) > sh_thresh);
36764528Spaulsan }
36774528Spaulsan 
36784528Spaulsan /*
36790Sstevel@tonic-gate  * hat_softlock isn't supported anymore
36800Sstevel@tonic-gate  */
36810Sstevel@tonic-gate /*ARGSUSED*/
36820Sstevel@tonic-gate faultcode_t
36830Sstevel@tonic-gate hat_softlock(
36840Sstevel@tonic-gate 	hat_t *hat,
36850Sstevel@tonic-gate 	caddr_t addr,
36860Sstevel@tonic-gate 	size_t *len,
36870Sstevel@tonic-gate 	struct page **page_array,
36880Sstevel@tonic-gate 	uint_t flags)
36890Sstevel@tonic-gate {
36900Sstevel@tonic-gate 	return (FC_NOSUPPORT);
36910Sstevel@tonic-gate }
36920Sstevel@tonic-gate 
36930Sstevel@tonic-gate 
36940Sstevel@tonic-gate 
36950Sstevel@tonic-gate /*
36960Sstevel@tonic-gate  * Routine to expose supported HAT features to platform independent code.
36970Sstevel@tonic-gate  */
36980Sstevel@tonic-gate /*ARGSUSED*/
36990Sstevel@tonic-gate int
37000Sstevel@tonic-gate hat_supported(enum hat_features feature, void *arg)
37010Sstevel@tonic-gate {
37020Sstevel@tonic-gate 	switch (feature) {
37030Sstevel@tonic-gate 
37040Sstevel@tonic-gate 	case HAT_SHARED_PT:	/* this is really ISM */
37050Sstevel@tonic-gate 		return (1);
37060Sstevel@tonic-gate 
37070Sstevel@tonic-gate 	case HAT_DYNAMIC_ISM_UNMAP:
37080Sstevel@tonic-gate 		return (0);
37090Sstevel@tonic-gate 
37100Sstevel@tonic-gate 	case HAT_VMODSORT:
37110Sstevel@tonic-gate 		return (1);
37120Sstevel@tonic-gate 
37134528Spaulsan 	case HAT_SHARED_REGIONS:
37144528Spaulsan 		return (0);
37154528Spaulsan 
37160Sstevel@tonic-gate 	default:
37170Sstevel@tonic-gate 		panic("hat_supported() - unknown feature");
37180Sstevel@tonic-gate 	}
37190Sstevel@tonic-gate 	return (0);
37200Sstevel@tonic-gate }
37210Sstevel@tonic-gate 
37220Sstevel@tonic-gate /*
37230Sstevel@tonic-gate  * Called when a thread is exiting and has been switched to the kernel AS
37240Sstevel@tonic-gate  */
37250Sstevel@tonic-gate void
37260Sstevel@tonic-gate hat_thread_exit(kthread_t *thd)
37270Sstevel@tonic-gate {
37280Sstevel@tonic-gate 	ASSERT(thd->t_procp->p_as == &kas);
37295084Sjohnlev 	XPV_DISALLOW_MIGRATE();
37300Sstevel@tonic-gate 	hat_switch(thd->t_procp->p_as->a_hat);
37315084Sjohnlev 	XPV_ALLOW_MIGRATE();
37320Sstevel@tonic-gate }
37330Sstevel@tonic-gate 
37340Sstevel@tonic-gate /*
37350Sstevel@tonic-gate  * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
37360Sstevel@tonic-gate  */
37370Sstevel@tonic-gate /*ARGSUSED*/
37380Sstevel@tonic-gate void
37390Sstevel@tonic-gate hat_setup(hat_t *hat, int flags)
37400Sstevel@tonic-gate {
37415084Sjohnlev 	XPV_DISALLOW_MIGRATE();
37420Sstevel@tonic-gate 	kpreempt_disable();
37430Sstevel@tonic-gate 
37440Sstevel@tonic-gate 	hat_switch(hat);
37450Sstevel@tonic-gate 
37460Sstevel@tonic-gate 	kpreempt_enable();
37475084Sjohnlev 	XPV_ALLOW_MIGRATE();
37480Sstevel@tonic-gate }
37490Sstevel@tonic-gate 
37500Sstevel@tonic-gate /*
37510Sstevel@tonic-gate  * Prepare for a CPU private mapping for the given address.
37520Sstevel@tonic-gate  *
37530Sstevel@tonic-gate  * The address can only be used from a single CPU and can be remapped
37540Sstevel@tonic-gate  * using hat_mempte_remap().  Return the address of the PTE.
37550Sstevel@tonic-gate  *
37560Sstevel@tonic-gate  * We do the htable_create() if necessary and increment the valid count so
37570Sstevel@tonic-gate  * the htable can't disappear.  We also hat_devload() the page table into
37580Sstevel@tonic-gate  * kernel so that the PTE is quickly accessed.
37590Sstevel@tonic-gate  */
37603446Smrj hat_mempte_t
37613446Smrj hat_mempte_setup(caddr_t addr)
37620Sstevel@tonic-gate {
37630Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
37640Sstevel@tonic-gate 	htable_t	*ht;
37650Sstevel@tonic-gate 	uint_t		entry;
37660Sstevel@tonic-gate 	x86pte_t	oldpte;
37673446Smrj 	hat_mempte_t	p;
37680Sstevel@tonic-gate 
37690Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
37700Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(va));
37714004Sjosephb 	++curthread->t_hatdepth;
37720Sstevel@tonic-gate 	ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
37730Sstevel@tonic-gate 	if (ht == NULL) {
37740Sstevel@tonic-gate 		ht = htable_create(kas.a_hat, va, 0, NULL);
37750Sstevel@tonic-gate 		entry = htable_va2entry(va, ht);
37760Sstevel@tonic-gate 		ASSERT(ht->ht_level == 0);
37770Sstevel@tonic-gate 		oldpte = x86pte_get(ht, entry);
37780Sstevel@tonic-gate 	}
37790Sstevel@tonic-gate 	if (PTE_ISVALID(oldpte))
37800Sstevel@tonic-gate 		panic("hat_mempte_setup(): address already mapped"
37810Sstevel@tonic-gate 		    "ht=%p, entry=%d, pte=" FMT_PTE, ht, entry, oldpte);
37820Sstevel@tonic-gate 
37830Sstevel@tonic-gate 	/*
37840Sstevel@tonic-gate 	 * increment ht_valid_cnt so that the pagetable can't disappear
37850Sstevel@tonic-gate 	 */
37860Sstevel@tonic-gate 	HTABLE_INC(ht->ht_valid_cnt);
37870Sstevel@tonic-gate 
37880Sstevel@tonic-gate 	/*
37893446Smrj 	 * return the PTE physical address to the caller.
37900Sstevel@tonic-gate 	 */
37910Sstevel@tonic-gate 	htable_release(ht);
37923446Smrj 	p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry);
37934004Sjosephb 	--curthread->t_hatdepth;
37943446Smrj 	return (p);
37950Sstevel@tonic-gate }
37960Sstevel@tonic-gate 
37970Sstevel@tonic-gate /*
37980Sstevel@tonic-gate  * Release a CPU private mapping for the given address.
37990Sstevel@tonic-gate  * We decrement the htable valid count so it might be destroyed.
38000Sstevel@tonic-gate  */
38013446Smrj /*ARGSUSED1*/
38020Sstevel@tonic-gate void
38033446Smrj hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa)
38040Sstevel@tonic-gate {
38050Sstevel@tonic-gate 	htable_t	*ht;
38060Sstevel@tonic-gate 
38070Sstevel@tonic-gate 	/*
38083446Smrj 	 * invalidate any left over mapping and decrement the htable valid count
38090Sstevel@tonic-gate 	 */
38105084Sjohnlev #ifdef __xpv
38115084Sjohnlev 	if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0,
38125084Sjohnlev 	    UVMF_INVLPG | UVMF_LOCAL))
38135084Sjohnlev 		panic("HYPERVISOR_update_va_mapping() failed");
38145084Sjohnlev #else
38153446Smrj 	{
38163446Smrj 		x86pte_t *pteptr;
38173446Smrj 
38183446Smrj 		pteptr = x86pte_mapin(mmu_btop(pte_pa),
38193446Smrj 		    (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
38203446Smrj 		if (mmu.pae_hat)
38213446Smrj 			*pteptr = 0;
38223446Smrj 		else
38233446Smrj 			*(x86pte32_t *)pteptr = 0;
38243446Smrj 		mmu_tlbflush_entry(addr);
38253446Smrj 		x86pte_mapout();
38263446Smrj 	}
38275084Sjohnlev #endif
38283446Smrj 
38290Sstevel@tonic-gate 	ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
38300Sstevel@tonic-gate 	if (ht == NULL)
38310Sstevel@tonic-gate 		panic("hat_mempte_release(): invalid address");
38320Sstevel@tonic-gate 	ASSERT(ht->ht_level == 0);
38330Sstevel@tonic-gate 	HTABLE_DEC(ht->ht_valid_cnt);
38340Sstevel@tonic-gate 	htable_release(ht);
38350Sstevel@tonic-gate }
38360Sstevel@tonic-gate 
38370Sstevel@tonic-gate /*
38380Sstevel@tonic-gate  * Apply a temporary CPU private mapping to a page. We flush the TLB only
38390Sstevel@tonic-gate  * on this CPU, so this ought to have been called with preemption disabled.
38400Sstevel@tonic-gate  */
38410Sstevel@tonic-gate void
38420Sstevel@tonic-gate hat_mempte_remap(
38433446Smrj 	pfn_t		pfn,
38443446Smrj 	caddr_t		addr,
38453446Smrj 	hat_mempte_t	pte_pa,
38463446Smrj 	uint_t		attr,
38473446Smrj 	uint_t		flags)
38480Sstevel@tonic-gate {
38490Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
38500Sstevel@tonic-gate 	x86pte_t	pte;
38510Sstevel@tonic-gate 
38520Sstevel@tonic-gate 	/*
38530Sstevel@tonic-gate 	 * Remap the given PTE to the new page's PFN. Invalidate only
38540Sstevel@tonic-gate 	 * on this CPU.
38550Sstevel@tonic-gate 	 */
38560Sstevel@tonic-gate #ifdef DEBUG
38570Sstevel@tonic-gate 	htable_t	*ht;
38580Sstevel@tonic-gate 	uint_t		entry;
38590Sstevel@tonic-gate 
38600Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
38610Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(va));
38620Sstevel@tonic-gate 	ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
38630Sstevel@tonic-gate 	ASSERT(ht != NULL);
38640Sstevel@tonic-gate 	ASSERT(ht->ht_level == 0);
38650Sstevel@tonic-gate 	ASSERT(ht->ht_valid_cnt > 0);
38663446Smrj 	ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
38670Sstevel@tonic-gate 	htable_release(ht);
38680Sstevel@tonic-gate #endif
38695084Sjohnlev 	XPV_DISALLOW_MIGRATE();
38700Sstevel@tonic-gate 	pte = hati_mkpte(pfn, attr, 0, flags);
38715084Sjohnlev #ifdef __xpv
38725084Sjohnlev 	if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL))
38735084Sjohnlev 		panic("HYPERVISOR_update_va_mapping() failed");
38745084Sjohnlev #else
38753446Smrj 	{
38763446Smrj 		x86pte_t *pteptr;
38773446Smrj 
38783446Smrj 		pteptr = x86pte_mapin(mmu_btop(pte_pa),
38793446Smrj 		    (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
38803446Smrj 		if (mmu.pae_hat)
38813446Smrj 			*(x86pte_t *)pteptr = pte;
38823446Smrj 		else
38833446Smrj 			*(x86pte32_t *)pteptr = (x86pte32_t)pte;
38843446Smrj 		mmu_tlbflush_entry(addr);
38853446Smrj 		x86pte_mapout();
38863446Smrj 	}
38875084Sjohnlev #endif
38885084Sjohnlev 	XPV_ALLOW_MIGRATE();
38890Sstevel@tonic-gate }
38900Sstevel@tonic-gate 
38910Sstevel@tonic-gate 
38920Sstevel@tonic-gate 
38930Sstevel@tonic-gate /*
38940Sstevel@tonic-gate  * Hat locking functions
38950Sstevel@tonic-gate  * XXX - these two functions are currently being used by hatstats
38960Sstevel@tonic-gate  * 	they can be removed by using a per-as mutex for hatstats.
38970Sstevel@tonic-gate  */
38980Sstevel@tonic-gate void
38990Sstevel@tonic-gate hat_enter(hat_t *hat)
39000Sstevel@tonic-gate {
39010Sstevel@tonic-gate 	mutex_enter(&hat->hat_mutex);
39020Sstevel@tonic-gate }
39030Sstevel@tonic-gate 
39040Sstevel@tonic-gate void
39050Sstevel@tonic-gate hat_exit(hat_t *hat)
39060Sstevel@tonic-gate {
39070Sstevel@tonic-gate 	mutex_exit(&hat->hat_mutex);
39080Sstevel@tonic-gate }
39090Sstevel@tonic-gate 
39100Sstevel@tonic-gate /*
39113446Smrj  * HAT part of cpu initialization.
39120Sstevel@tonic-gate  */
39130Sstevel@tonic-gate void
39140Sstevel@tonic-gate hat_cpu_online(struct cpu *cpup)
39150Sstevel@tonic-gate {
39160Sstevel@tonic-gate 	if (cpup != CPU) {
39173446Smrj 		x86pte_cpu_init(cpup);
39180Sstevel@tonic-gate 		hat_vlp_setup(cpup);
39190Sstevel@tonic-gate 	}
39200Sstevel@tonic-gate 	CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
39210Sstevel@tonic-gate }
39220Sstevel@tonic-gate 
39230Sstevel@tonic-gate /*
39243446Smrj  * HAT part of cpu deletion.
39253446Smrj  * (currently, we only call this after the cpu is safely passivated.)
39263446Smrj  */
39273446Smrj void
39283446Smrj hat_cpu_offline(struct cpu *cpup)
39293446Smrj {
39303446Smrj 	ASSERT(cpup != CPU);
39313446Smrj 
39323446Smrj 	CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id);
39333446Smrj 	x86pte_cpu_fini(cpup);
39343446Smrj 	hat_vlp_teardown(cpup);
39353446Smrj }
39363446Smrj 
39373446Smrj /*
39380Sstevel@tonic-gate  * Function called after all CPUs are brought online.
39390Sstevel@tonic-gate  * Used to remove low address boot mappings.
39400Sstevel@tonic-gate  */
39410Sstevel@tonic-gate void
39420Sstevel@tonic-gate clear_boot_mappings(uintptr_t low, uintptr_t high)
39430Sstevel@tonic-gate {
39440Sstevel@tonic-gate 	uintptr_t vaddr = low;
39450Sstevel@tonic-gate 	htable_t *ht = NULL;
39460Sstevel@tonic-gate 	level_t level;
39470Sstevel@tonic-gate 	uint_t entry;
39480Sstevel@tonic-gate 	x86pte_t pte;
39490Sstevel@tonic-gate 
39500Sstevel@tonic-gate 	/*
39510Sstevel@tonic-gate 	 * On 1st CPU we can unload the prom mappings, basically we blow away
39523446Smrj 	 * all virtual mappings under _userlimit.
39530Sstevel@tonic-gate 	 */
39540Sstevel@tonic-gate 	while (vaddr < high) {
39550Sstevel@tonic-gate 		pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
39560Sstevel@tonic-gate 		if (ht == NULL)
39570Sstevel@tonic-gate 			break;
39580Sstevel@tonic-gate 
39590Sstevel@tonic-gate 		level = ht->ht_level;
39600Sstevel@tonic-gate 		entry = htable_va2entry(vaddr, ht);
39610Sstevel@tonic-gate 		ASSERT(level <= mmu.max_page_level);
39620Sstevel@tonic-gate 		ASSERT(PTE_ISPAGE(pte, level));
39630Sstevel@tonic-gate 
39640Sstevel@tonic-gate 		/*
39650Sstevel@tonic-gate 		 * Unload the mapping from the page tables.
39660Sstevel@tonic-gate 		 */
39673446Smrj 		(void) x86pte_inval(ht, entry, 0, NULL);
39680Sstevel@tonic-gate 		ASSERT(ht->ht_valid_cnt > 0);
39690Sstevel@tonic-gate 		HTABLE_DEC(ht->ht_valid_cnt);
39700Sstevel@tonic-gate 		PGCNT_DEC(ht->ht_hat, ht->ht_level);
39710Sstevel@tonic-gate 
39720Sstevel@tonic-gate 		vaddr += LEVEL_SIZE(ht->ht_level);
39730Sstevel@tonic-gate 	}
39740Sstevel@tonic-gate 	if (ht)
39750Sstevel@tonic-gate 		htable_release(ht);
39760Sstevel@tonic-gate }
39770Sstevel@tonic-gate 
39780Sstevel@tonic-gate /*
39790Sstevel@tonic-gate  * Atomically update a new translation for a single page.  If the
39800Sstevel@tonic-gate  * currently installed PTE doesn't match the value we expect to find,
39810Sstevel@tonic-gate  * it's not updated and we return the PTE we found.
39820Sstevel@tonic-gate  *
39830Sstevel@tonic-gate  * If activating nosync or NOWRITE and the page was modified we need to sync
39840Sstevel@tonic-gate  * with the page_t. Also sync with page_t if clearing ref/mod bits.
39850Sstevel@tonic-gate  */
39860Sstevel@tonic-gate static x86pte_t
39870Sstevel@tonic-gate hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
39880Sstevel@tonic-gate {
39890Sstevel@tonic-gate 	page_t		*pp;
39900Sstevel@tonic-gate 	uint_t		rm = 0;
39910Sstevel@tonic-gate 	x86pte_t	replaced;
39920Sstevel@tonic-gate 
39933446Smrj 	if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC &&
39940Sstevel@tonic-gate 	    PTE_GET(expected, PT_MOD | PT_REF) &&
39950Sstevel@tonic-gate 	    (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
39964381Sjosephb 	    !PTE_GET(new, PT_MOD | PT_REF))) {
39970Sstevel@tonic-gate 
39983446Smrj 		ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level)));
39990Sstevel@tonic-gate 		pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
40000Sstevel@tonic-gate 		ASSERT(pp != NULL);
40010Sstevel@tonic-gate 		if (PTE_GET(expected, PT_MOD))
40020Sstevel@tonic-gate 			rm |= P_MOD;
40030Sstevel@tonic-gate 		if (PTE_GET(expected, PT_REF))
40040Sstevel@tonic-gate 			rm |= P_REF;
40050Sstevel@tonic-gate 		PTE_CLR(new, PT_MOD | PT_REF);
40060Sstevel@tonic-gate 	}
40070Sstevel@tonic-gate 
40080Sstevel@tonic-gate 	replaced = x86pte_update(ht, entry, expected, new);
40090Sstevel@tonic-gate 	if (replaced != expected)
40100Sstevel@tonic-gate 		return (replaced);
40110Sstevel@tonic-gate 
40120Sstevel@tonic-gate 	if (rm) {
40130Sstevel@tonic-gate 		/*
40140Sstevel@tonic-gate 		 * sync to all constituent pages of a large page
40150Sstevel@tonic-gate 		 */
40160Sstevel@tonic-gate 		pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
40170Sstevel@tonic-gate 		ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
40180Sstevel@tonic-gate 		while (pgcnt-- > 0) {
40190Sstevel@tonic-gate 			/*
40200Sstevel@tonic-gate 			 * hat_page_demote() can't decrease
40210Sstevel@tonic-gate 			 * pszc below this mapping size
40220Sstevel@tonic-gate 			 * since large mapping existed after we
40230Sstevel@tonic-gate 			 * took mlist lock.
40240Sstevel@tonic-gate 			 */
40250Sstevel@tonic-gate 			ASSERT(pp->p_szc >= ht->ht_level);
40260Sstevel@tonic-gate 			hat_page_setattr(pp, rm);
40270Sstevel@tonic-gate 			++pp;
40280Sstevel@tonic-gate 		}
40290Sstevel@tonic-gate 	}
40300Sstevel@tonic-gate 
40310Sstevel@tonic-gate 	return (0);
40320Sstevel@tonic-gate }
40330Sstevel@tonic-gate 
40344528Spaulsan /* ARGSUSED */
40354528Spaulsan void
40365075Spaulsan hat_join_srd(struct hat *hat, vnode_t *evp)
40374528Spaulsan {
40384528Spaulsan }
40394528Spaulsan 
40404528Spaulsan /* ARGSUSED */
40414528Spaulsan hat_region_cookie_t
40425075Spaulsan hat_join_region(struct hat *hat,
40434528Spaulsan     caddr_t r_saddr,
40444528Spaulsan     size_t r_size,
40454528Spaulsan     void *r_obj,
40464528Spaulsan     u_offset_t r_objoff,
40474528Spaulsan     uchar_t r_perm,
40484528Spaulsan     uchar_t r_pgszc,
40494528Spaulsan     hat_rgn_cb_func_t r_cb_function,
40504528Spaulsan     uint_t flags)
40514528Spaulsan {
40524528Spaulsan 	panic("No shared region support on x86");
40534528Spaulsan 	return (HAT_INVALID_REGION_COOKIE);
40544528Spaulsan }
40554528Spaulsan 
40564528Spaulsan /* ARGSUSED */
40574528Spaulsan void
40585075Spaulsan hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags)
40594528Spaulsan {
40604528Spaulsan 	panic("No shared region support on x86");
40614528Spaulsan }
40624528Spaulsan 
40634528Spaulsan /* ARGSUSED */
40644528Spaulsan void
40655075Spaulsan hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie)
40664528Spaulsan {
40674528Spaulsan 	panic("No shared region support on x86");
40684528Spaulsan }
40694528Spaulsan 
40704528Spaulsan 
40710Sstevel@tonic-gate /*
40720Sstevel@tonic-gate  * Kernel Physical Mapping (kpm) facility
40730Sstevel@tonic-gate  *
40740Sstevel@tonic-gate  * Most of the routines needed to support segkpm are almost no-ops on the
40750Sstevel@tonic-gate  * x86 platform.  We map in the entire segment when it is created and leave
40760Sstevel@tonic-gate  * it mapped in, so there is no additional work required to set up and tear
40770Sstevel@tonic-gate  * down individual mappings.  All of these routines were created to support
40780Sstevel@tonic-gate  * SPARC platforms that have to avoid aliasing in their virtually indexed
40790Sstevel@tonic-gate  * caches.
40800Sstevel@tonic-gate  *
40810Sstevel@tonic-gate  * Most of the routines have sanity checks in them (e.g. verifying that the
40820Sstevel@tonic-gate  * passed-in page is locked).  We don't actually care about most of these
40830Sstevel@tonic-gate  * checks on x86, but we leave them in place to identify problems in the
40840Sstevel@tonic-gate  * upper levels.
40850Sstevel@tonic-gate  */
40860Sstevel@tonic-gate 
40870Sstevel@tonic-gate /*
40880Sstevel@tonic-gate  * Map in a locked page and return the vaddr.
40890Sstevel@tonic-gate  */
40900Sstevel@tonic-gate /*ARGSUSED*/
40910Sstevel@tonic-gate caddr_t
40920Sstevel@tonic-gate hat_kpm_mapin(struct page *pp, struct kpme *kpme)
40930Sstevel@tonic-gate {
40940Sstevel@tonic-gate 	caddr_t		vaddr;
40950Sstevel@tonic-gate 
40960Sstevel@tonic-gate #ifdef DEBUG
40970Sstevel@tonic-gate 	if (kpm_enable == 0) {
40980Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
40990Sstevel@tonic-gate 		return ((caddr_t)NULL);
41000Sstevel@tonic-gate 	}
41010Sstevel@tonic-gate 
41020Sstevel@tonic-gate 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
41030Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
41040Sstevel@tonic-gate 		return ((caddr_t)NULL);
41050Sstevel@tonic-gate 	}
41060Sstevel@tonic-gate #endif
41070Sstevel@tonic-gate 
41080Sstevel@tonic-gate 	vaddr = hat_kpm_page2va(pp, 1);
41090Sstevel@tonic-gate 
41100Sstevel@tonic-gate 	return (vaddr);
41110Sstevel@tonic-gate }
41120Sstevel@tonic-gate 
41130Sstevel@tonic-gate /*
41140Sstevel@tonic-gate  * Mapout a locked page.
41150Sstevel@tonic-gate  */
41160Sstevel@tonic-gate /*ARGSUSED*/
41170Sstevel@tonic-gate void
41180Sstevel@tonic-gate hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
41190Sstevel@tonic-gate {
41200Sstevel@tonic-gate #ifdef DEBUG
41210Sstevel@tonic-gate 	if (kpm_enable == 0) {
41220Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
41230Sstevel@tonic-gate 		return;
41240Sstevel@tonic-gate 	}
41250Sstevel@tonic-gate 
41260Sstevel@tonic-gate 	if (IS_KPM_ADDR(vaddr) == 0) {
41270Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
41280Sstevel@tonic-gate 		return;
41290Sstevel@tonic-gate 	}
41300Sstevel@tonic-gate 
41310Sstevel@tonic-gate 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
41320Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
41330Sstevel@tonic-gate 		return;
41340Sstevel@tonic-gate 	}
41350Sstevel@tonic-gate #endif
41360Sstevel@tonic-gate }
41370Sstevel@tonic-gate 
41380Sstevel@tonic-gate /*
41390Sstevel@tonic-gate  * Return the kpm virtual address for a specific pfn
41400Sstevel@tonic-gate  */
41410Sstevel@tonic-gate caddr_t
41420Sstevel@tonic-gate hat_kpm_pfn2va(pfn_t pfn)
41430Sstevel@tonic-gate {
41443446Smrj 	uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
41450Sstevel@tonic-gate 
41460Sstevel@tonic-gate 	return ((caddr_t)vaddr);
41470Sstevel@tonic-gate }
41480Sstevel@tonic-gate 
41490Sstevel@tonic-gate /*
41500Sstevel@tonic-gate  * Return the kpm virtual address for the page at pp.
41510Sstevel@tonic-gate  */
41520Sstevel@tonic-gate /*ARGSUSED*/
41530Sstevel@tonic-gate caddr_t
41540Sstevel@tonic-gate hat_kpm_page2va(struct page *pp, int checkswap)
41550Sstevel@tonic-gate {
41560Sstevel@tonic-gate 	return (hat_kpm_pfn2va(pp->p_pagenum));
41570Sstevel@tonic-gate }
41580Sstevel@tonic-gate 
41590Sstevel@tonic-gate /*
41600Sstevel@tonic-gate  * Return the page frame number for the kpm virtual address vaddr.
41610Sstevel@tonic-gate  */
41620Sstevel@tonic-gate pfn_t
41630Sstevel@tonic-gate hat_kpm_va2pfn(caddr_t vaddr)
41640Sstevel@tonic-gate {
41650Sstevel@tonic-gate 	pfn_t		pfn;
41660Sstevel@tonic-gate 
41670Sstevel@tonic-gate 	ASSERT(IS_KPM_ADDR(vaddr));
41680Sstevel@tonic-gate 
41690Sstevel@tonic-gate 	pfn = (pfn_t)btop(vaddr - kpm_vbase);
41700Sstevel@tonic-gate 
41710Sstevel@tonic-gate 	return (pfn);
41720Sstevel@tonic-gate }
41730Sstevel@tonic-gate 
41740Sstevel@tonic-gate 
41750Sstevel@tonic-gate /*
41760Sstevel@tonic-gate  * Return the page for the kpm virtual address vaddr.
41770Sstevel@tonic-gate  */
41780Sstevel@tonic-gate page_t *
41790Sstevel@tonic-gate hat_kpm_vaddr2page(caddr_t vaddr)
41800Sstevel@tonic-gate {
41810Sstevel@tonic-gate 	pfn_t		pfn;
41820Sstevel@tonic-gate 
41830Sstevel@tonic-gate 	ASSERT(IS_KPM_ADDR(vaddr));
41840Sstevel@tonic-gate 
41850Sstevel@tonic-gate 	pfn = hat_kpm_va2pfn(vaddr);
41860Sstevel@tonic-gate 
41870Sstevel@tonic-gate 	return (page_numtopp_nolock(pfn));
41880Sstevel@tonic-gate }
41890Sstevel@tonic-gate 
41900Sstevel@tonic-gate /*
41910Sstevel@tonic-gate  * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
41920Sstevel@tonic-gate  * KPM page.  This should never happen on x86
41930Sstevel@tonic-gate  */
41940Sstevel@tonic-gate int
41950Sstevel@tonic-gate hat_kpm_fault(hat_t *hat, caddr_t vaddr)
41960Sstevel@tonic-gate {
41970Sstevel@tonic-gate 	panic("pagefault in seg_kpm.  hat: 0x%p  vaddr: 0x%p", hat, vaddr);
41980Sstevel@tonic-gate 
41990Sstevel@tonic-gate 	return (0);
42000Sstevel@tonic-gate }
42010Sstevel@tonic-gate 
42020Sstevel@tonic-gate /*ARGSUSED*/
42030Sstevel@tonic-gate void
42040Sstevel@tonic-gate hat_kpm_mseghash_clear(int nentries)
42050Sstevel@tonic-gate {}
42060Sstevel@tonic-gate 
42070Sstevel@tonic-gate /*ARGSUSED*/
42080Sstevel@tonic-gate void
42090Sstevel@tonic-gate hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
42100Sstevel@tonic-gate {}
42115084Sjohnlev 
42125084Sjohnlev #ifdef __xpv
42135084Sjohnlev /*
42145084Sjohnlev  * There are specific Hypervisor calls to establish and remove mappings
42155084Sjohnlev  * to grant table references and the privcmd driver. We have to ensure
42165084Sjohnlev  * that a page table actually exists.
42175084Sjohnlev  */
42185084Sjohnlev void
42195084Sjohnlev hat_prepare_mapping(hat_t *hat, caddr_t addr)
42205084Sjohnlev {
42215084Sjohnlev 	ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
42225084Sjohnlev 	(void) htable_create(hat, (uintptr_t)addr, 0, NULL);
42235084Sjohnlev }
42245084Sjohnlev 
42255084Sjohnlev void
42265084Sjohnlev hat_release_mapping(hat_t *hat, caddr_t addr)
42275084Sjohnlev {
42285084Sjohnlev 	htable_t *ht;
42295084Sjohnlev 
42305084Sjohnlev 	ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
42315084Sjohnlev 	ht = htable_lookup(hat, (uintptr_t)addr, 0);
42325084Sjohnlev 	ASSERT(ht != NULL);
42335084Sjohnlev 	ASSERT(ht->ht_busy >= 2);
42345084Sjohnlev 	htable_release(ht);
42355084Sjohnlev 	htable_release(ht);
42365084Sjohnlev }
42375084Sjohnlev #endif
4238