xref: /onnv-gate/usr/src/uts/i86pc/vm/hat_i86.c (revision 5466:8e6f5dfe7459)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51747Sjosephb  * Common Development and Distribution License (the "License").
61747Sjosephb  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
223446Smrj  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
270Sstevel@tonic-gate 
280Sstevel@tonic-gate /*
290Sstevel@tonic-gate  * VM - Hardware Address Translation management for i386 and amd64
300Sstevel@tonic-gate  *
310Sstevel@tonic-gate  * Implementation of the interfaces described in <common/vm/hat.h>
320Sstevel@tonic-gate  *
330Sstevel@tonic-gate  * Nearly all the details of how the hardware is managed should not be
340Sstevel@tonic-gate  * visible outside this layer except for misc. machine specific functions
350Sstevel@tonic-gate  * that work in conjunction with this code.
360Sstevel@tonic-gate  *
370Sstevel@tonic-gate  * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
380Sstevel@tonic-gate  */
390Sstevel@tonic-gate 
400Sstevel@tonic-gate #include <sys/machparam.h>
410Sstevel@tonic-gate #include <sys/machsystm.h>
420Sstevel@tonic-gate #include <sys/mman.h>
430Sstevel@tonic-gate #include <sys/types.h>
440Sstevel@tonic-gate #include <sys/systm.h>
450Sstevel@tonic-gate #include <sys/cpuvar.h>
460Sstevel@tonic-gate #include <sys/thread.h>
470Sstevel@tonic-gate #include <sys/proc.h>
480Sstevel@tonic-gate #include <sys/cpu.h>
490Sstevel@tonic-gate #include <sys/kmem.h>
500Sstevel@tonic-gate #include <sys/disp.h>
510Sstevel@tonic-gate #include <sys/shm.h>
520Sstevel@tonic-gate #include <sys/sysmacros.h>
530Sstevel@tonic-gate #include <sys/machparam.h>
540Sstevel@tonic-gate #include <sys/vmem.h>
550Sstevel@tonic-gate #include <sys/vmsystm.h>
560Sstevel@tonic-gate #include <sys/promif.h>
570Sstevel@tonic-gate #include <sys/var.h>
580Sstevel@tonic-gate #include <sys/x86_archext.h>
590Sstevel@tonic-gate #include <sys/atomic.h>
600Sstevel@tonic-gate #include <sys/bitmap.h>
613446Smrj #include <sys/controlregs.h>
623446Smrj #include <sys/bootconf.h>
633446Smrj #include <sys/bootsvcs.h>
643446Smrj #include <sys/bootinfo.h>
654191Sjosephb #include <sys/archsystm.h>
660Sstevel@tonic-gate 
670Sstevel@tonic-gate #include <vm/seg_kmem.h>
680Sstevel@tonic-gate #include <vm/hat_i86.h>
690Sstevel@tonic-gate #include <vm/as.h>
700Sstevel@tonic-gate #include <vm/seg.h>
710Sstevel@tonic-gate #include <vm/page.h>
720Sstevel@tonic-gate #include <vm/seg_kp.h>
730Sstevel@tonic-gate #include <vm/seg_kpm.h>
740Sstevel@tonic-gate #include <vm/vm_dep.h>
755084Sjohnlev #ifdef __xpv
765084Sjohnlev #include <sys/hypervisor.h>
775084Sjohnlev #endif
783446Smrj #include <vm/kboot_mmu.h>
794381Sjosephb #include <vm/seg_spt.h>
800Sstevel@tonic-gate 
810Sstevel@tonic-gate #include <sys/cmn_err.h>
820Sstevel@tonic-gate 
830Sstevel@tonic-gate /*
840Sstevel@tonic-gate  * Basic parameters for hat operation.
850Sstevel@tonic-gate  */
860Sstevel@tonic-gate struct hat_mmu_info mmu;
870Sstevel@tonic-gate 
880Sstevel@tonic-gate /*
890Sstevel@tonic-gate  * The page that is the kernel's top level pagetable.
900Sstevel@tonic-gate  *
915084Sjohnlev  * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries
920Sstevel@tonic-gate  * on this 4K page for its top level page table. The remaining groups of
930Sstevel@tonic-gate  * 4 entries are used for per processor copies of user VLP pagetables for
940Sstevel@tonic-gate  * running threads.  See hat_switch() and reload_pae32() for details.
950Sstevel@tonic-gate  *
965084Sjohnlev  * vlp_page[0..3] - level==2 PTEs for kernel HAT
975084Sjohnlev  * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0
985084Sjohnlev  * vlp_page[8..11]  - level==2 PTE for user thread on cpu 1
995084Sjohnlev  * etc...
1000Sstevel@tonic-gate  */
1010Sstevel@tonic-gate static x86pte_t *vlp_page;
1020Sstevel@tonic-gate 
1030Sstevel@tonic-gate /*
1040Sstevel@tonic-gate  * forward declaration of internal utility routines
1050Sstevel@tonic-gate  */
1060Sstevel@tonic-gate static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
1070Sstevel@tonic-gate 	x86pte_t new);
1080Sstevel@tonic-gate 
1090Sstevel@tonic-gate /*
1100Sstevel@tonic-gate  * The kernel address space exists in all HATs. To implement this the
1115084Sjohnlev  * kernel reserves a fixed number of entries in the topmost level(s) of page
1125084Sjohnlev  * tables. The values are setup during startup and then copied to every user
1135084Sjohnlev  * hat created by hat_alloc(). This means that kernelbase must be:
1140Sstevel@tonic-gate  *
1150Sstevel@tonic-gate  *	  4Meg aligned for 32 bit kernels
1160Sstevel@tonic-gate  *	512Gig aligned for x86_64 64 bit kernel
1170Sstevel@tonic-gate  *
1185084Sjohnlev  * The hat_kernel_range_ts describe what needs to be copied from kernel hat
1195084Sjohnlev  * to each user hat.
1200Sstevel@tonic-gate  */
1215084Sjohnlev typedef struct hat_kernel_range {
1225084Sjohnlev 	level_t		hkr_level;
1235084Sjohnlev 	uintptr_t	hkr_start_va;
1245084Sjohnlev 	uintptr_t	hkr_end_va;	/* zero means to end of memory */
1255084Sjohnlev } hat_kernel_range_t;
1265084Sjohnlev #define	NUM_KERNEL_RANGE 2
1275084Sjohnlev static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE];
1285084Sjohnlev static int num_kernel_ranges;
1290Sstevel@tonic-gate 
1300Sstevel@tonic-gate uint_t use_boot_reserve = 1;	/* cleared after early boot process */
1310Sstevel@tonic-gate uint_t can_steal_post_boot = 0;	/* set late in boot to enable stealing */
1320Sstevel@tonic-gate 
1335349Skchow /* export 1g page size to user applications if set */
134*5466Skchow int	enable_1gpg = 1;
1355349Skchow 
1365349Skchow #ifdef DEBUG
1375349Skchow uint_t	map1gcnt;
1385349Skchow #endif
1395349Skchow 
1405349Skchow 
1410Sstevel@tonic-gate /*
1420Sstevel@tonic-gate  * A cpuset for all cpus. This is used for kernel address cross calls, since
1430Sstevel@tonic-gate  * the kernel addresses apply to all cpus.
1440Sstevel@tonic-gate  */
1450Sstevel@tonic-gate cpuset_t khat_cpuset;
1460Sstevel@tonic-gate 
1470Sstevel@tonic-gate /*
1480Sstevel@tonic-gate  * management stuff for hat structures
1490Sstevel@tonic-gate  */
1500Sstevel@tonic-gate kmutex_t	hat_list_lock;
1510Sstevel@tonic-gate kcondvar_t	hat_list_cv;
1520Sstevel@tonic-gate kmem_cache_t	*hat_cache;
1530Sstevel@tonic-gate kmem_cache_t	*hat_hash_cache;
1540Sstevel@tonic-gate kmem_cache_t	*vlp_hash_cache;
1550Sstevel@tonic-gate 
1560Sstevel@tonic-gate /*
1570Sstevel@tonic-gate  * Simple statistics
1580Sstevel@tonic-gate  */
1590Sstevel@tonic-gate struct hatstats hatstat;
1600Sstevel@tonic-gate 
1610Sstevel@tonic-gate /*
1625316Sjohnlev  * Some earlier hypervisor versions do not emulate cmpxchg of PTEs
1635316Sjohnlev  * correctly.  For such hypervisors we must set PT_USER for kernel
1645316Sjohnlev  * entries ourselves (normally the emulation would set PT_USER for
1655316Sjohnlev  * kernel entries and PT_USER|PT_GLOBAL for user entries).  pt_kern is
1665316Sjohnlev  * thus set appropriately.  Note that dboot/kbm is OK, as only the full
1675316Sjohnlev  * HAT uses cmpxchg() and the other paths (hypercall etc.) were never
1685316Sjohnlev  * incorrect.
1695316Sjohnlev  */
1705316Sjohnlev int pt_kern;
1715316Sjohnlev 
1725316Sjohnlev /*
1730Sstevel@tonic-gate  * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's.
1740Sstevel@tonic-gate  */
1750Sstevel@tonic-gate extern void atomic_orb(uchar_t *addr, uchar_t val);
1760Sstevel@tonic-gate extern void atomic_andb(uchar_t *addr, uchar_t val);
1770Sstevel@tonic-gate 
1780Sstevel@tonic-gate #define	PP_GETRM(pp, rmmask)    (pp->p_nrm & rmmask)
1790Sstevel@tonic-gate #define	PP_ISMOD(pp)		PP_GETRM(pp, P_MOD)
1800Sstevel@tonic-gate #define	PP_ISREF(pp)		PP_GETRM(pp, P_REF)
1810Sstevel@tonic-gate #define	PP_ISRO(pp)		PP_GETRM(pp, P_RO)
1820Sstevel@tonic-gate 
1830Sstevel@tonic-gate #define	PP_SETRM(pp, rm)	atomic_orb(&(pp->p_nrm), rm)
1840Sstevel@tonic-gate #define	PP_SETMOD(pp)		PP_SETRM(pp, P_MOD)
1850Sstevel@tonic-gate #define	PP_SETREF(pp)		PP_SETRM(pp, P_REF)
1860Sstevel@tonic-gate #define	PP_SETRO(pp)		PP_SETRM(pp, P_RO)
1870Sstevel@tonic-gate 
1880Sstevel@tonic-gate #define	PP_CLRRM(pp, rm)	atomic_andb(&(pp->p_nrm), ~(rm))
1890Sstevel@tonic-gate #define	PP_CLRMOD(pp)   	PP_CLRRM(pp, P_MOD)
1900Sstevel@tonic-gate #define	PP_CLRREF(pp)   	PP_CLRRM(pp, P_REF)
1910Sstevel@tonic-gate #define	PP_CLRRO(pp)    	PP_CLRRM(pp, P_RO)
1920Sstevel@tonic-gate #define	PP_CLRALL(pp)		PP_CLRRM(pp, P_MOD | P_REF | P_RO)
1930Sstevel@tonic-gate 
1940Sstevel@tonic-gate /*
1950Sstevel@tonic-gate  * kmem cache constructor for struct hat
1960Sstevel@tonic-gate  */
1970Sstevel@tonic-gate /*ARGSUSED*/
1980Sstevel@tonic-gate static int
1990Sstevel@tonic-gate hati_constructor(void *buf, void *handle, int kmflags)
2000Sstevel@tonic-gate {
2010Sstevel@tonic-gate 	hat_t	*hat = buf;
2020Sstevel@tonic-gate 
2030Sstevel@tonic-gate 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
2040Sstevel@tonic-gate 	bzero(hat->hat_pages_mapped,
2050Sstevel@tonic-gate 	    sizeof (pgcnt_t) * (mmu.max_page_level + 1));
2064381Sjosephb 	hat->hat_ism_pgcnt = 0;
2070Sstevel@tonic-gate 	hat->hat_stats = 0;
2080Sstevel@tonic-gate 	hat->hat_flags = 0;
2090Sstevel@tonic-gate 	CPUSET_ZERO(hat->hat_cpus);
2100Sstevel@tonic-gate 	hat->hat_htable = NULL;
2110Sstevel@tonic-gate 	hat->hat_ht_hash = NULL;
2120Sstevel@tonic-gate 	return (0);
2130Sstevel@tonic-gate }
2140Sstevel@tonic-gate 
2150Sstevel@tonic-gate /*
2160Sstevel@tonic-gate  * Allocate a hat structure for as. We also create the top level
2170Sstevel@tonic-gate  * htable and initialize it to contain the kernel hat entries.
2180Sstevel@tonic-gate  */
2190Sstevel@tonic-gate hat_t *
2200Sstevel@tonic-gate hat_alloc(struct as *as)
2210Sstevel@tonic-gate {
2225084Sjohnlev 	hat_t			*hat;
2235084Sjohnlev 	htable_t		*ht;	/* top level htable */
2245084Sjohnlev 	uint_t			use_vlp;
2255084Sjohnlev 	uint_t			r;
2265084Sjohnlev 	hat_kernel_range_t	*rp;
2275084Sjohnlev 	uintptr_t		va;
2285084Sjohnlev 	uintptr_t		eva;
2295084Sjohnlev 	uint_t			start;
2305084Sjohnlev 	uint_t			cnt;
2315084Sjohnlev 	htable_t		*src;
2320Sstevel@tonic-gate 
2330Sstevel@tonic-gate 	/*
2340Sstevel@tonic-gate 	 * Once we start creating user process HATs we can enable
2350Sstevel@tonic-gate 	 * the htable_steal() code.
2360Sstevel@tonic-gate 	 */
2370Sstevel@tonic-gate 	if (can_steal_post_boot == 0)
2380Sstevel@tonic-gate 		can_steal_post_boot = 1;
2390Sstevel@tonic-gate 
2400Sstevel@tonic-gate 	ASSERT(AS_WRITE_HELD(as, &as->a_lock));
2410Sstevel@tonic-gate 	hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
2420Sstevel@tonic-gate 	hat->hat_as = as;
2430Sstevel@tonic-gate 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
2440Sstevel@tonic-gate 	ASSERT(hat->hat_flags == 0);
2450Sstevel@tonic-gate 
2465084Sjohnlev #if defined(__xpv)
2470Sstevel@tonic-gate 	/*
2485084Sjohnlev 	 * No VLP stuff on the hypervisor due to the 64-bit split top level
2495084Sjohnlev 	 * page tables.  On 32-bit it's not needed as the hypervisor takes
2505084Sjohnlev 	 * care of copying the top level PTEs to a below 4Gig page.
2510Sstevel@tonic-gate 	 */
2525084Sjohnlev 	use_vlp = 0;
2535084Sjohnlev #else	/* __xpv */
2545084Sjohnlev 	/* 32 bit processes uses a VLP style hat when running with PAE */
2550Sstevel@tonic-gate #if defined(__amd64)
2560Sstevel@tonic-gate 	use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32);
2570Sstevel@tonic-gate #elif defined(__i386)
2580Sstevel@tonic-gate 	use_vlp = mmu.pae_hat;
2590Sstevel@tonic-gate #endif
2605084Sjohnlev #endif	/* __xpv */
2610Sstevel@tonic-gate 	if (use_vlp) {
2620Sstevel@tonic-gate 		hat->hat_flags = HAT_VLP;
2630Sstevel@tonic-gate 		bzero(hat->hat_vlp_ptes, VLP_SIZE);
2640Sstevel@tonic-gate 	}
2650Sstevel@tonic-gate 
2660Sstevel@tonic-gate 	/*
2670Sstevel@tonic-gate 	 * Allocate the htable hash
2680Sstevel@tonic-gate 	 */
2690Sstevel@tonic-gate 	if ((hat->hat_flags & HAT_VLP)) {
2700Sstevel@tonic-gate 		hat->hat_num_hash = mmu.vlp_hash_cnt;
2710Sstevel@tonic-gate 		hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
2720Sstevel@tonic-gate 	} else {
2730Sstevel@tonic-gate 		hat->hat_num_hash = mmu.hash_cnt;
2740Sstevel@tonic-gate 		hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
2750Sstevel@tonic-gate 	}
2760Sstevel@tonic-gate 	bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
2770Sstevel@tonic-gate 
2780Sstevel@tonic-gate 	/*
2790Sstevel@tonic-gate 	 * Initialize Kernel HAT entries at the top of the top level page
2805084Sjohnlev 	 * tables for the new hat.
2810Sstevel@tonic-gate 	 */
2820Sstevel@tonic-gate 	hat->hat_htable = NULL;
2830Sstevel@tonic-gate 	hat->hat_ht_cached = NULL;
2845084Sjohnlev 	XPV_DISALLOW_MIGRATE();
2850Sstevel@tonic-gate 	ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
2865084Sjohnlev 	hat->hat_htable = ht;
2875084Sjohnlev 
2885084Sjohnlev #if defined(__amd64)
2895084Sjohnlev 	if (hat->hat_flags & HAT_VLP)
2905084Sjohnlev 		goto init_done;
2910Sstevel@tonic-gate #endif
2925084Sjohnlev 
2935084Sjohnlev 	for (r = 0; r < num_kernel_ranges; ++r) {
2945084Sjohnlev 		rp = &kernel_ranges[r];
2955084Sjohnlev 		for (va = rp->hkr_start_va; va != rp->hkr_end_va;
2965084Sjohnlev 		    va += cnt * LEVEL_SIZE(rp->hkr_level)) {
2975084Sjohnlev 
2985084Sjohnlev 			if (rp->hkr_level == TOP_LEVEL(hat))
2995084Sjohnlev 				ht = hat->hat_htable;
3005084Sjohnlev 			else
3015084Sjohnlev 				ht = htable_create(hat, va, rp->hkr_level,
3025084Sjohnlev 				    NULL);
3035084Sjohnlev 
3045084Sjohnlev 			start = htable_va2entry(va, ht);
3055084Sjohnlev 			cnt = HTABLE_NUM_PTES(ht) - start;
3065084Sjohnlev 			eva = va +
3075084Sjohnlev 			    ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level));
3085084Sjohnlev 			if (rp->hkr_end_va != 0 &&
3095084Sjohnlev 			    (eva > rp->hkr_end_va || eva == 0))
3105084Sjohnlev 				cnt = htable_va2entry(rp->hkr_end_va, ht) -
3115084Sjohnlev 				    start;
3125084Sjohnlev 
3135084Sjohnlev #if defined(__i386) && !defined(__xpv)
3145084Sjohnlev 			if (ht->ht_flags & HTABLE_VLP) {
3155084Sjohnlev 				bcopy(&vlp_page[start],
3165084Sjohnlev 				    &hat->hat_vlp_ptes[start],
3175084Sjohnlev 				    cnt * sizeof (x86pte_t));
3185084Sjohnlev 				continue;
3195084Sjohnlev 			}
3205084Sjohnlev #endif
3215084Sjohnlev 			src = htable_lookup(kas.a_hat, va, rp->hkr_level);
3225084Sjohnlev 			ASSERT(src != NULL);
3235084Sjohnlev 			x86pte_copy(src, ht, start, cnt);
3245084Sjohnlev 			htable_release(src);
3255084Sjohnlev 		}
3265084Sjohnlev 	}
3275084Sjohnlev 
3285084Sjohnlev init_done:
3295084Sjohnlev 	XPV_ALLOW_MIGRATE();
3305084Sjohnlev 
3315084Sjohnlev #if defined(__xpv)
3320Sstevel@tonic-gate 	/*
3335084Sjohnlev 	 * Pin top level page tables after initializing them
3340Sstevel@tonic-gate 	 */
3355084Sjohnlev 	xen_pin(hat->hat_htable->ht_pfn, mmu.max_level);
3365084Sjohnlev #if defined(__amd64)
3375084Sjohnlev 	xen_pin(hat->hat_user_ptable, mmu.max_level);
3385084Sjohnlev #endif
3390Sstevel@tonic-gate #endif
3400Sstevel@tonic-gate 
3410Sstevel@tonic-gate 	/*
3421747Sjosephb 	 * Put it at the start of the global list of all hats (used by stealing)
3431747Sjosephb 	 *
3441747Sjosephb 	 * kas.a_hat is not in the list but is instead used to find the
3451747Sjosephb 	 * first and last items in the list.
3461747Sjosephb 	 *
3471747Sjosephb 	 * - kas.a_hat->hat_next points to the start of the user hats.
3481747Sjosephb 	 *   The list ends where hat->hat_next == NULL
3491747Sjosephb 	 *
3501747Sjosephb 	 * - kas.a_hat->hat_prev points to the last of the user hats.
3511747Sjosephb 	 *   The list begins where hat->hat_prev == NULL
3520Sstevel@tonic-gate 	 */
3530Sstevel@tonic-gate 	mutex_enter(&hat_list_lock);
3541747Sjosephb 	hat->hat_prev = NULL;
3551747Sjosephb 	hat->hat_next = kas.a_hat->hat_next;
3561747Sjosephb 	if (hat->hat_next)
3571747Sjosephb 		hat->hat_next->hat_prev = hat;
3581747Sjosephb 	else
3591747Sjosephb 		kas.a_hat->hat_prev = hat;
3600Sstevel@tonic-gate 	kas.a_hat->hat_next = hat;
3610Sstevel@tonic-gate 	mutex_exit(&hat_list_lock);
3620Sstevel@tonic-gate 
3630Sstevel@tonic-gate 	return (hat);
3640Sstevel@tonic-gate }
3650Sstevel@tonic-gate 
3660Sstevel@tonic-gate /*
3670Sstevel@tonic-gate  * process has finished executing but as has not been cleaned up yet.
3680Sstevel@tonic-gate  */
3690Sstevel@tonic-gate /*ARGSUSED*/
3700Sstevel@tonic-gate void
3710Sstevel@tonic-gate hat_free_start(hat_t *hat)
3720Sstevel@tonic-gate {
3730Sstevel@tonic-gate 	ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock));
3741747Sjosephb 
3751747Sjosephb 	/*
3761747Sjosephb 	 * If the hat is currently a stealing victim, wait for the stealing
3771747Sjosephb 	 * to finish.  Once we mark it as HAT_FREEING, htable_steal()
3781747Sjosephb 	 * won't look at its pagetables anymore.
3791747Sjosephb 	 */
3800Sstevel@tonic-gate 	mutex_enter(&hat_list_lock);
3811747Sjosephb 	while (hat->hat_flags & HAT_VICTIM)
3821747Sjosephb 		cv_wait(&hat_list_cv, &hat_list_lock);
3830Sstevel@tonic-gate 	hat->hat_flags |= HAT_FREEING;
3840Sstevel@tonic-gate 	mutex_exit(&hat_list_lock);
3850Sstevel@tonic-gate }
3860Sstevel@tonic-gate 
3870Sstevel@tonic-gate /*
3880Sstevel@tonic-gate  * An address space is being destroyed, so we destroy the associated hat.
3890Sstevel@tonic-gate  */
3900Sstevel@tonic-gate void
3910Sstevel@tonic-gate hat_free_end(hat_t *hat)
3920Sstevel@tonic-gate {
3930Sstevel@tonic-gate 	kmem_cache_t *cache;
3940Sstevel@tonic-gate 
3950Sstevel@tonic-gate 	ASSERT(hat->hat_flags & HAT_FREEING);
3960Sstevel@tonic-gate 
3970Sstevel@tonic-gate 	/*
3980Sstevel@tonic-gate 	 * must not be running on the given hat
3990Sstevel@tonic-gate 	 */
4000Sstevel@tonic-gate 	ASSERT(CPU->cpu_current_hat != hat);
4010Sstevel@tonic-gate 
4020Sstevel@tonic-gate 	/*
4031747Sjosephb 	 * Remove it from the list of HATs
4040Sstevel@tonic-gate 	 */
4050Sstevel@tonic-gate 	mutex_enter(&hat_list_lock);
4061747Sjosephb 	if (hat->hat_prev)
4071747Sjosephb 		hat->hat_prev->hat_next = hat->hat_next;
4081747Sjosephb 	else
4090Sstevel@tonic-gate 		kas.a_hat->hat_next = hat->hat_next;
4101747Sjosephb 	if (hat->hat_next)
4111747Sjosephb 		hat->hat_next->hat_prev = hat->hat_prev;
4121747Sjosephb 	else
4131747Sjosephb 		kas.a_hat->hat_prev = hat->hat_prev;
4140Sstevel@tonic-gate 	mutex_exit(&hat_list_lock);
4151747Sjosephb 	hat->hat_next = hat->hat_prev = NULL;
4160Sstevel@tonic-gate 
4175084Sjohnlev #if defined(__xpv)
4185084Sjohnlev 	/*
4195084Sjohnlev 	 * On the hypervisor, unpin top level page table(s)
4205084Sjohnlev 	 */
4215084Sjohnlev 	xen_unpin(hat->hat_htable->ht_pfn);
4225084Sjohnlev #if defined(__amd64)
4235084Sjohnlev 	xen_unpin(hat->hat_user_ptable);
4245084Sjohnlev #endif
4255084Sjohnlev #endif
4265084Sjohnlev 
4270Sstevel@tonic-gate 	/*
4280Sstevel@tonic-gate 	 * Make a pass through the htables freeing them all up.
4290Sstevel@tonic-gate 	 */
4300Sstevel@tonic-gate 	htable_purge_hat(hat);
4310Sstevel@tonic-gate 
4320Sstevel@tonic-gate 	/*
4330Sstevel@tonic-gate 	 * Decide which kmem cache the hash table came from, then free it.
4340Sstevel@tonic-gate 	 */
4350Sstevel@tonic-gate 	if (hat->hat_flags & HAT_VLP)
4360Sstevel@tonic-gate 		cache = vlp_hash_cache;
4370Sstevel@tonic-gate 	else
4380Sstevel@tonic-gate 		cache = hat_hash_cache;
4390Sstevel@tonic-gate 	kmem_cache_free(cache, hat->hat_ht_hash);
4400Sstevel@tonic-gate 	hat->hat_ht_hash = NULL;
4410Sstevel@tonic-gate 
4420Sstevel@tonic-gate 	hat->hat_flags = 0;
4430Sstevel@tonic-gate 	kmem_cache_free(hat_cache, hat);
4440Sstevel@tonic-gate }
4450Sstevel@tonic-gate 
4460Sstevel@tonic-gate /*
4470Sstevel@tonic-gate  * round kernelbase down to a supported value to use for _userlimit
4480Sstevel@tonic-gate  *
4490Sstevel@tonic-gate  * userlimit must be aligned down to an entry in the top level htable.
4500Sstevel@tonic-gate  * The one exception is for 32 bit HAT's running PAE.
4510Sstevel@tonic-gate  */
4520Sstevel@tonic-gate uintptr_t
4530Sstevel@tonic-gate hat_kernelbase(uintptr_t va)
4540Sstevel@tonic-gate {
4550Sstevel@tonic-gate #if defined(__i386)
4560Sstevel@tonic-gate 	va &= LEVEL_MASK(1);
4570Sstevel@tonic-gate #endif
4580Sstevel@tonic-gate 	if (IN_VA_HOLE(va))
4590Sstevel@tonic-gate 		panic("_userlimit %p will fall in VA hole\n", (void *)va);
4600Sstevel@tonic-gate 	return (va);
4610Sstevel@tonic-gate }
4620Sstevel@tonic-gate 
4630Sstevel@tonic-gate /*
4640Sstevel@tonic-gate  * Initialize hat data structures based on processor MMU information.
4650Sstevel@tonic-gate  */
4660Sstevel@tonic-gate void
4670Sstevel@tonic-gate mmu_init(void)
4680Sstevel@tonic-gate {
4690Sstevel@tonic-gate 	uint_t max_htables;
4700Sstevel@tonic-gate 	uint_t pa_bits;
4710Sstevel@tonic-gate 	uint_t va_bits;
4720Sstevel@tonic-gate 	int i;
4730Sstevel@tonic-gate 
4740Sstevel@tonic-gate 	/*
4753446Smrj 	 * If CPU enabled the page table global bit, use it for the kernel
4763446Smrj 	 * This is bit 7 in CR4 (PGE - Page Global Enable).
4770Sstevel@tonic-gate 	 */
4783446Smrj 	if ((x86_feature & X86_PGE) != 0 && (getcr4() & CR4_PGE) != 0)
4790Sstevel@tonic-gate 		mmu.pt_global = PT_GLOBAL;
4800Sstevel@tonic-gate 
4810Sstevel@tonic-gate 	/*
4823446Smrj 	 * Detect NX and PAE usage.
4830Sstevel@tonic-gate 	 */
4843446Smrj 	mmu.pae_hat = kbm_pae_support;
4853446Smrj 	if (kbm_nx_support)
4860Sstevel@tonic-gate 		mmu.pt_nx = PT_NX;
4873446Smrj 	else
4880Sstevel@tonic-gate 		mmu.pt_nx = 0;
4890Sstevel@tonic-gate 
4900Sstevel@tonic-gate 	/*
4910Sstevel@tonic-gate 	 * Use CPU info to set various MMU parameters
4920Sstevel@tonic-gate 	 */
4930Sstevel@tonic-gate 	cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
4940Sstevel@tonic-gate 
4950Sstevel@tonic-gate 	if (va_bits < sizeof (void *) * NBBY) {
4960Sstevel@tonic-gate 		mmu.hole_start = (1ul << (va_bits - 1));
4970Sstevel@tonic-gate 		mmu.hole_end = 0ul - mmu.hole_start - 1;
4980Sstevel@tonic-gate 	} else {
4990Sstevel@tonic-gate 		mmu.hole_end = 0;
5000Sstevel@tonic-gate 		mmu.hole_start = mmu.hole_end - 1;
5010Sstevel@tonic-gate 	}
5020Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121)
5030Sstevel@tonic-gate 	/*
5040Sstevel@tonic-gate 	 * If erratum 121 has already been detected at this time, hole_start
5050Sstevel@tonic-gate 	 * contains the value to be subtracted from mmu.hole_start.
5060Sstevel@tonic-gate 	 */
5070Sstevel@tonic-gate 	ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
5080Sstevel@tonic-gate 	hole_start = mmu.hole_start - hole_start;
5090Sstevel@tonic-gate #else
5100Sstevel@tonic-gate 	hole_start = mmu.hole_start;
5110Sstevel@tonic-gate #endif
5120Sstevel@tonic-gate 	hole_end = mmu.hole_end;
5130Sstevel@tonic-gate 
5140Sstevel@tonic-gate 	mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
5150Sstevel@tonic-gate 	if (mmu.pae_hat == 0 && pa_bits > 32)
5160Sstevel@tonic-gate 		mmu.highest_pfn = PFN_4G - 1;
5170Sstevel@tonic-gate 
5180Sstevel@tonic-gate 	if (mmu.pae_hat) {
5190Sstevel@tonic-gate 		mmu.pte_size = 8;	/* 8 byte PTEs */
5200Sstevel@tonic-gate 		mmu.pte_size_shift = 3;
5210Sstevel@tonic-gate 	} else {
5220Sstevel@tonic-gate 		mmu.pte_size = 4;	/* 4 byte PTEs */
5230Sstevel@tonic-gate 		mmu.pte_size_shift = 2;
5240Sstevel@tonic-gate 	}
5250Sstevel@tonic-gate 
5260Sstevel@tonic-gate 	if (mmu.pae_hat && (x86_feature & X86_PAE) == 0)
5270Sstevel@tonic-gate 		panic("Processor does not support PAE");
5280Sstevel@tonic-gate 
5290Sstevel@tonic-gate 	if ((x86_feature & X86_CX8) == 0)
5300Sstevel@tonic-gate 		panic("Processor does not support cmpxchg8b instruction");
5310Sstevel@tonic-gate 
5320Sstevel@tonic-gate 	/*
5330Sstevel@tonic-gate 	 * Initialize parameters based on the 64 or 32 bit kernels and
5340Sstevel@tonic-gate 	 * for the 32 bit kernel decide if we should use PAE.
5350Sstevel@tonic-gate 	 */
5365349Skchow 	if (kbm_largepage_support) {
5375349Skchow 		if (x86_feature & X86_1GPG) {
5385349Skchow 			mmu.max_page_level = 2;
5395349Skchow 			mmu.umax_page_level = (enable_1gpg) ? 2 : 1;
5405349Skchow 		} else {
5415349Skchow 			mmu.max_page_level = 1;
5425349Skchow 			mmu.umax_page_level = 1;
5435349Skchow 		}
5445349Skchow 	} else {
5450Sstevel@tonic-gate 		mmu.max_page_level = 0;
5465349Skchow 		mmu.umax_page_level = 0;
5475349Skchow 	}
5480Sstevel@tonic-gate 	mmu_page_sizes = mmu.max_page_level + 1;
5495349Skchow 	mmu_exported_page_sizes = mmu.umax_page_level + 1;
5505349Skchow 
5515349Skchow 	/* restrict legacy applications from using pagesizes 1g and above */
5525349Skchow 	mmu_legacy_page_sizes =
5535349Skchow 	    (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes;
5545349Skchow 
5550Sstevel@tonic-gate 
5560Sstevel@tonic-gate #if defined(__amd64)
5570Sstevel@tonic-gate 
5580Sstevel@tonic-gate 	mmu.num_level = 4;
5590Sstevel@tonic-gate 	mmu.max_level = 3;
5600Sstevel@tonic-gate 	mmu.ptes_per_table = 512;
5610Sstevel@tonic-gate 	mmu.top_level_count = 512;
5620Sstevel@tonic-gate 
5630Sstevel@tonic-gate 	mmu.level_shift[0] = 12;
5640Sstevel@tonic-gate 	mmu.level_shift[1] = 21;
5650Sstevel@tonic-gate 	mmu.level_shift[2] = 30;
5660Sstevel@tonic-gate 	mmu.level_shift[3] = 39;
5670Sstevel@tonic-gate 
5680Sstevel@tonic-gate #elif defined(__i386)
5690Sstevel@tonic-gate 
5700Sstevel@tonic-gate 	if (mmu.pae_hat) {
5710Sstevel@tonic-gate 		mmu.num_level = 3;
5720Sstevel@tonic-gate 		mmu.max_level = 2;
5730Sstevel@tonic-gate 		mmu.ptes_per_table = 512;
5740Sstevel@tonic-gate 		mmu.top_level_count = 4;
5750Sstevel@tonic-gate 
5760Sstevel@tonic-gate 		mmu.level_shift[0] = 12;
5770Sstevel@tonic-gate 		mmu.level_shift[1] = 21;
5780Sstevel@tonic-gate 		mmu.level_shift[2] = 30;
5790Sstevel@tonic-gate 
5800Sstevel@tonic-gate 	} else {
5810Sstevel@tonic-gate 		mmu.num_level = 2;
5820Sstevel@tonic-gate 		mmu.max_level = 1;
5830Sstevel@tonic-gate 		mmu.ptes_per_table = 1024;
5840Sstevel@tonic-gate 		mmu.top_level_count = 1024;
5850Sstevel@tonic-gate 
5860Sstevel@tonic-gate 		mmu.level_shift[0] = 12;
5870Sstevel@tonic-gate 		mmu.level_shift[1] = 22;
5880Sstevel@tonic-gate 	}
5890Sstevel@tonic-gate 
5900Sstevel@tonic-gate #endif	/* __i386 */
5910Sstevel@tonic-gate 
5920Sstevel@tonic-gate 	for (i = 0; i < mmu.num_level; ++i) {
5930Sstevel@tonic-gate 		mmu.level_size[i] = 1UL << mmu.level_shift[i];
5940Sstevel@tonic-gate 		mmu.level_offset[i] = mmu.level_size[i] - 1;
5950Sstevel@tonic-gate 		mmu.level_mask[i] = ~mmu.level_offset[i];
5960Sstevel@tonic-gate 	}
5970Sstevel@tonic-gate 
5983446Smrj 	for (i = 0; i <= mmu.max_page_level; ++i) {
5995316Sjohnlev 		mmu.pte_bits[i] = PT_VALID | pt_kern;
6003446Smrj 		if (i > 0)
6013446Smrj 			mmu.pte_bits[i] |= PT_PAGESIZE;
6023446Smrj 	}
6030Sstevel@tonic-gate 
6040Sstevel@tonic-gate 	/*
6050Sstevel@tonic-gate 	 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
6060Sstevel@tonic-gate 	 */
6070Sstevel@tonic-gate 	for (i = 1; i < mmu.num_level; ++i)
6080Sstevel@tonic-gate 		mmu.ptp_bits[i] = PT_PTPBITS;
6093446Smrj 
6100Sstevel@tonic-gate #if defined(__i386)
6110Sstevel@tonic-gate 	mmu.ptp_bits[2] = PT_VALID;
6120Sstevel@tonic-gate #endif
6130Sstevel@tonic-gate 
6140Sstevel@tonic-gate 	/*
6150Sstevel@tonic-gate 	 * Compute how many hash table entries to have per process for htables.
6160Sstevel@tonic-gate 	 * We start with 1 page's worth of entries.
6170Sstevel@tonic-gate 	 *
6180Sstevel@tonic-gate 	 * If physical memory is small, reduce the amount need to cover it.
6190Sstevel@tonic-gate 	 */
6200Sstevel@tonic-gate 	max_htables = physmax / mmu.ptes_per_table;
6210Sstevel@tonic-gate 	mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
6220Sstevel@tonic-gate 	while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
6230Sstevel@tonic-gate 		mmu.hash_cnt >>= 1;
6240Sstevel@tonic-gate 	mmu.vlp_hash_cnt = mmu.hash_cnt;
6250Sstevel@tonic-gate 
6260Sstevel@tonic-gate #if defined(__amd64)
6270Sstevel@tonic-gate 	/*
6280Sstevel@tonic-gate 	 * If running in 64 bits and physical memory is large,
6290Sstevel@tonic-gate 	 * increase the size of the cache to cover all of memory for
6300Sstevel@tonic-gate 	 * a 64 bit process.
6310Sstevel@tonic-gate 	 */
6320Sstevel@tonic-gate #define	HASH_MAX_LENGTH 4
6330Sstevel@tonic-gate 	while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
6340Sstevel@tonic-gate 		mmu.hash_cnt <<= 1;
6350Sstevel@tonic-gate #endif
6360Sstevel@tonic-gate }
6370Sstevel@tonic-gate 
6380Sstevel@tonic-gate 
6390Sstevel@tonic-gate /*
6400Sstevel@tonic-gate  * initialize hat data structures
6410Sstevel@tonic-gate  */
6420Sstevel@tonic-gate void
6430Sstevel@tonic-gate hat_init()
6440Sstevel@tonic-gate {
6450Sstevel@tonic-gate #if defined(__i386)
6460Sstevel@tonic-gate 	/*
6470Sstevel@tonic-gate 	 * _userlimit must be aligned correctly
6480Sstevel@tonic-gate 	 */
6490Sstevel@tonic-gate 	if ((_userlimit & LEVEL_MASK(1)) != _userlimit) {
6500Sstevel@tonic-gate 		prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n",
6510Sstevel@tonic-gate 		    (void *)_userlimit, (void *)LEVEL_SIZE(1));
6520Sstevel@tonic-gate 		halt("hat_init(): Unable to continue");
6530Sstevel@tonic-gate 	}
6540Sstevel@tonic-gate #endif
6550Sstevel@tonic-gate 
6560Sstevel@tonic-gate 	cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
6570Sstevel@tonic-gate 
6580Sstevel@tonic-gate 	/*
6590Sstevel@tonic-gate 	 * initialize kmem caches
6600Sstevel@tonic-gate 	 */
6610Sstevel@tonic-gate 	htable_init();
6620Sstevel@tonic-gate 	hment_init();
6630Sstevel@tonic-gate 
6640Sstevel@tonic-gate 	hat_cache = kmem_cache_create("hat_t",
6650Sstevel@tonic-gate 	    sizeof (hat_t), 0, hati_constructor, NULL, NULL,
6660Sstevel@tonic-gate 	    NULL, 0, 0);
6670Sstevel@tonic-gate 
6680Sstevel@tonic-gate 	hat_hash_cache = kmem_cache_create("HatHash",
6690Sstevel@tonic-gate 	    mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
6700Sstevel@tonic-gate 	    NULL, 0, 0);
6710Sstevel@tonic-gate 
6720Sstevel@tonic-gate 	/*
6730Sstevel@tonic-gate 	 * VLP hats can use a smaller hash table size on large memroy machines
6740Sstevel@tonic-gate 	 */
6750Sstevel@tonic-gate 	if (mmu.hash_cnt == mmu.vlp_hash_cnt) {
6760Sstevel@tonic-gate 		vlp_hash_cache = hat_hash_cache;
6770Sstevel@tonic-gate 	} else {
6780Sstevel@tonic-gate 		vlp_hash_cache = kmem_cache_create("HatVlpHash",
6790Sstevel@tonic-gate 		    mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
6800Sstevel@tonic-gate 		    NULL, 0, 0);
6810Sstevel@tonic-gate 	}
6820Sstevel@tonic-gate 
6830Sstevel@tonic-gate 	/*
6840Sstevel@tonic-gate 	 * Set up the kernel's hat
6850Sstevel@tonic-gate 	 */
6860Sstevel@tonic-gate 	AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
6870Sstevel@tonic-gate 	kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
6880Sstevel@tonic-gate 	mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
6890Sstevel@tonic-gate 	kas.a_hat->hat_as = &kas;
6900Sstevel@tonic-gate 	kas.a_hat->hat_flags = 0;
6910Sstevel@tonic-gate 	AS_LOCK_EXIT(&kas, &kas.a_lock);
6920Sstevel@tonic-gate 
6930Sstevel@tonic-gate 	CPUSET_ZERO(khat_cpuset);
6940Sstevel@tonic-gate 	CPUSET_ADD(khat_cpuset, CPU->cpu_id);
6950Sstevel@tonic-gate 
6960Sstevel@tonic-gate 	/*
6970Sstevel@tonic-gate 	 * The kernel hat's next pointer serves as the head of the hat list .
6981747Sjosephb 	 * The kernel hat's prev pointer tracks the last hat on the list for
6991747Sjosephb 	 * htable_steal() to use.
7000Sstevel@tonic-gate 	 */
7010Sstevel@tonic-gate 	kas.a_hat->hat_next = NULL;
7021747Sjosephb 	kas.a_hat->hat_prev = NULL;
7030Sstevel@tonic-gate 
7040Sstevel@tonic-gate 	/*
7050Sstevel@tonic-gate 	 * Allocate an htable hash bucket for the kernel
7060Sstevel@tonic-gate 	 * XX64 - tune for 64 bit procs
7070Sstevel@tonic-gate 	 */
7080Sstevel@tonic-gate 	kas.a_hat->hat_num_hash = mmu.hash_cnt;
7090Sstevel@tonic-gate 	kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
7100Sstevel@tonic-gate 	bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
7110Sstevel@tonic-gate 
7120Sstevel@tonic-gate 	/*
7130Sstevel@tonic-gate 	 * zero out the top level and cached htable pointers
7140Sstevel@tonic-gate 	 */
7150Sstevel@tonic-gate 	kas.a_hat->hat_ht_cached = NULL;
7160Sstevel@tonic-gate 	kas.a_hat->hat_htable = NULL;
7173258Strevtom 
7183258Strevtom 	/*
7193258Strevtom 	 * Pre-allocate hrm_hashtab before enabling the collection of
7203258Strevtom 	 * refmod statistics.  Allocating on the fly would mean us
7213258Strevtom 	 * running the risk of suffering recursive mutex enters or
7223258Strevtom 	 * deadlocks.
7233258Strevtom 	 */
7243258Strevtom 	hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
7253258Strevtom 	    KM_SLEEP);
7260Sstevel@tonic-gate }
7270Sstevel@tonic-gate 
7280Sstevel@tonic-gate /*
7290Sstevel@tonic-gate  * Prepare CPU specific pagetables for VLP processes on 64 bit kernels.
7300Sstevel@tonic-gate  *
7310Sstevel@tonic-gate  * Each CPU has a set of 2 pagetables that are reused for any 32 bit
7320Sstevel@tonic-gate  * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and
7330Sstevel@tonic-gate  * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes.
7340Sstevel@tonic-gate  */
7350Sstevel@tonic-gate /*ARGSUSED*/
7360Sstevel@tonic-gate static void
7370Sstevel@tonic-gate hat_vlp_setup(struct cpu *cpu)
7380Sstevel@tonic-gate {
7395084Sjohnlev #if defined(__amd64) && !defined(__xpv)
7400Sstevel@tonic-gate 	struct hat_cpu_info *hci = cpu->cpu_hat_info;
7410Sstevel@tonic-gate 	pfn_t pfn;
7420Sstevel@tonic-gate 
7430Sstevel@tonic-gate 	/*
7440Sstevel@tonic-gate 	 * allocate the level==2 page table for the bottom most
7450Sstevel@tonic-gate 	 * 512Gig of address space (this is where 32 bit apps live)
7460Sstevel@tonic-gate 	 */
7470Sstevel@tonic-gate 	ASSERT(hci != NULL);
7480Sstevel@tonic-gate 	hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
7490Sstevel@tonic-gate 
7500Sstevel@tonic-gate 	/*
7510Sstevel@tonic-gate 	 * Allocate a top level pagetable and copy the kernel's
7520Sstevel@tonic-gate 	 * entries into it. Then link in hci_vlp_l2ptes in the 1st entry.
7530Sstevel@tonic-gate 	 */
7540Sstevel@tonic-gate 	hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
7550Sstevel@tonic-gate 	hci->hci_vlp_pfn =
7560Sstevel@tonic-gate 	    hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes);
7570Sstevel@tonic-gate 	ASSERT(hci->hci_vlp_pfn != PFN_INVALID);
7585084Sjohnlev 	bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE);
7590Sstevel@tonic-gate 
7600Sstevel@tonic-gate 	pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes);
7610Sstevel@tonic-gate 	ASSERT(pfn != PFN_INVALID);
7620Sstevel@tonic-gate 	hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2);
7635084Sjohnlev #endif /* __amd64 && !__xpv */
7640Sstevel@tonic-gate }
7650Sstevel@tonic-gate 
7663446Smrj /*ARGSUSED*/
7673446Smrj static void
7683446Smrj hat_vlp_teardown(cpu_t *cpu)
7693446Smrj {
7705084Sjohnlev #if defined(__amd64) && !defined(__xpv)
7713446Smrj 	struct hat_cpu_info *hci;
7723446Smrj 
7733446Smrj 	if ((hci = cpu->cpu_hat_info) == NULL)
7743446Smrj 		return;
7753446Smrj 	if (hci->hci_vlp_l2ptes)
7763446Smrj 		kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE);
7773446Smrj 	if (hci->hci_vlp_l3ptes)
7783446Smrj 		kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE);
7795084Sjohnlev #endif
7805084Sjohnlev }
7815084Sjohnlev 
7825084Sjohnlev #define	NEXT_HKR(r, l, s, e) {			\
7835084Sjohnlev 	kernel_ranges[r].hkr_level = l;		\
7845084Sjohnlev 	kernel_ranges[r].hkr_start_va = s;	\
7855084Sjohnlev 	kernel_ranges[r].hkr_end_va = e;	\
7865084Sjohnlev 	++r;					\
7873446Smrj }
7883446Smrj 
7890Sstevel@tonic-gate /*
7900Sstevel@tonic-gate  * Finish filling in the kernel hat.
7910Sstevel@tonic-gate  * Pre fill in all top level kernel page table entries for the kernel's
7920Sstevel@tonic-gate  * part of the address range.  From this point on we can't use any new
7930Sstevel@tonic-gate  * kernel large pages if they need PTE's at max_level
7943446Smrj  *
7953446Smrj  * create the kmap mappings.
7960Sstevel@tonic-gate  */
7970Sstevel@tonic-gate void
7980Sstevel@tonic-gate hat_init_finish(void)
7990Sstevel@tonic-gate {
8003446Smrj 	size_t		size;
8015084Sjohnlev 	uint_t		r = 0;
8025084Sjohnlev 	uintptr_t	va;
8035084Sjohnlev 	hat_kernel_range_t *rp;
8045084Sjohnlev 
8050Sstevel@tonic-gate 
8060Sstevel@tonic-gate 	/*
8070Sstevel@tonic-gate 	 * We are now effectively running on the kernel hat.
8080Sstevel@tonic-gate 	 * Clearing use_boot_reserve shuts off using the pre-allocated boot
8090Sstevel@tonic-gate 	 * reserve for all HAT allocations.  From here on, the reserves are
8105084Sjohnlev 	 * only used when avoiding recursion in kmem_alloc().
8110Sstevel@tonic-gate 	 */
8120Sstevel@tonic-gate 	use_boot_reserve = 0;
8130Sstevel@tonic-gate 	htable_adjust_reserve();
8140Sstevel@tonic-gate 
8150Sstevel@tonic-gate 	/*
8165084Sjohnlev 	 * User HATs are initialized with copies of all kernel mappings in
8175084Sjohnlev 	 * higher level page tables. Ensure that those entries exist.
8185084Sjohnlev 	 */
8195084Sjohnlev #if defined(__amd64)
8205084Sjohnlev 
8215084Sjohnlev 	NEXT_HKR(r, 3, kernelbase, 0);
8225084Sjohnlev #if defined(__xpv)
8235084Sjohnlev 	NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END);
8245084Sjohnlev #endif
8255084Sjohnlev 
8265084Sjohnlev #elif defined(__i386)
8275084Sjohnlev 
8285084Sjohnlev #if !defined(__xpv)
8295084Sjohnlev 	if (mmu.pae_hat) {
8305084Sjohnlev 		va = kernelbase;
8315084Sjohnlev 		if ((va & LEVEL_MASK(2)) != va) {
8325084Sjohnlev 			va = P2ROUNDUP(va, LEVEL_SIZE(2));
8335084Sjohnlev 			NEXT_HKR(r, 1, kernelbase, va);
8345084Sjohnlev 		}
8355084Sjohnlev 		if (va != 0)
8365084Sjohnlev 			NEXT_HKR(r, 2, va, 0);
8375084Sjohnlev 	} else
8385084Sjohnlev #endif /* __xpv */
8395084Sjohnlev 		NEXT_HKR(r, 1, kernelbase, 0);
8405084Sjohnlev 
8415084Sjohnlev #endif /* __i386 */
8425084Sjohnlev 
8435084Sjohnlev 	num_kernel_ranges = r;
8445084Sjohnlev 
8455084Sjohnlev 	/*
8465084Sjohnlev 	 * Create all the kernel pagetables that will have entries
8475084Sjohnlev 	 * shared to user HATs.
8485084Sjohnlev 	 */
8495084Sjohnlev 	for (r = 0; r < num_kernel_ranges; ++r) {
8505084Sjohnlev 		rp = &kernel_ranges[r];
8515084Sjohnlev 		for (va = rp->hkr_start_va; va != rp->hkr_end_va;
8525084Sjohnlev 		    va += LEVEL_SIZE(rp->hkr_level)) {
8535084Sjohnlev 			htable_t *ht;
8545084Sjohnlev 
8555084Sjohnlev 			if (IN_HYPERVISOR_VA(va))
8565084Sjohnlev 				continue;
8575084Sjohnlev 
8585084Sjohnlev 			/* can/must skip if a page mapping already exists */
8595084Sjohnlev 			if (rp->hkr_level <= mmu.max_page_level &&
8605084Sjohnlev 			    (ht = htable_getpage(kas.a_hat, va, NULL)) !=
8615084Sjohnlev 			    NULL) {
8625084Sjohnlev 				htable_release(ht);
8635084Sjohnlev 				continue;
8645084Sjohnlev 			}
8655084Sjohnlev 
8665084Sjohnlev 			(void) htable_create(kas.a_hat, va, rp->hkr_level - 1,
8675084Sjohnlev 			    NULL);
8685084Sjohnlev 		}
8695084Sjohnlev 	}
8705084Sjohnlev 
8715084Sjohnlev 	/*
8725084Sjohnlev 	 * 32 bit PAE metal kernels use only 4 of the 512 entries in the
8735084Sjohnlev 	 * page holding the top level pagetable. We use the remainder for
8745084Sjohnlev 	 * the "per CPU" page tables for VLP processes.
8755084Sjohnlev 	 * Map the top level kernel pagetable into the kernel to make
8765084Sjohnlev 	 * it easy to use bcopy access these tables.
8770Sstevel@tonic-gate 	 */
8780Sstevel@tonic-gate 	if (mmu.pae_hat) {
8790Sstevel@tonic-gate 		vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
8800Sstevel@tonic-gate 		hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE,
8810Sstevel@tonic-gate 		    kas.a_hat->hat_htable->ht_pfn,
8825084Sjohnlev #if !defined(__xpv)
8833446Smrj 		    PROT_WRITE |
8845084Sjohnlev #endif
8853446Smrj 		    PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
8860Sstevel@tonic-gate 		    HAT_LOAD | HAT_LOAD_NOCONSIST);
8870Sstevel@tonic-gate 	}
8880Sstevel@tonic-gate 	hat_vlp_setup(CPU);
8893446Smrj 
8903446Smrj 	/*
8913446Smrj 	 * Create kmap (cached mappings of kernel PTEs)
8923446Smrj 	 * for 32 bit we map from segmap_start .. ekernelheap
8933446Smrj 	 * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
8943446Smrj 	 */
8953446Smrj #if defined(__i386)
8963446Smrj 	size = (uintptr_t)ekernelheap - segmap_start;
8973446Smrj #elif defined(__amd64)
8983446Smrj 	size = segmapsize;
8993446Smrj #endif
9003446Smrj 	hat_kmap_init((uintptr_t)segmap_start, size);
9010Sstevel@tonic-gate }
9020Sstevel@tonic-gate 
9030Sstevel@tonic-gate /*
9040Sstevel@tonic-gate  * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
9050Sstevel@tonic-gate  * are 32 bit, so for safety we must use cas64() to install these.
9060Sstevel@tonic-gate  */
9070Sstevel@tonic-gate #ifdef __i386
9080Sstevel@tonic-gate static void
9090Sstevel@tonic-gate reload_pae32(hat_t *hat, cpu_t *cpu)
9100Sstevel@tonic-gate {
9110Sstevel@tonic-gate 	x86pte_t *src;
9120Sstevel@tonic-gate 	x86pte_t *dest;
9130Sstevel@tonic-gate 	x86pte_t pte;
9140Sstevel@tonic-gate 	int i;
9150Sstevel@tonic-gate 
9160Sstevel@tonic-gate 	/*
9170Sstevel@tonic-gate 	 * Load the 4 entries of the level 2 page table into this
9180Sstevel@tonic-gate 	 * cpu's range of the vlp_page and point cr3 at them.
9190Sstevel@tonic-gate 	 */
9200Sstevel@tonic-gate 	ASSERT(mmu.pae_hat);
9210Sstevel@tonic-gate 	src = hat->hat_vlp_ptes;
9220Sstevel@tonic-gate 	dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
9230Sstevel@tonic-gate 	for (i = 0; i < VLP_NUM_PTES; ++i) {
9240Sstevel@tonic-gate 		for (;;) {
9250Sstevel@tonic-gate 			pte = dest[i];
9260Sstevel@tonic-gate 			if (pte == src[i])
9270Sstevel@tonic-gate 				break;
9280Sstevel@tonic-gate 			if (cas64(dest + i, pte, src[i]) != src[i])
9290Sstevel@tonic-gate 				break;
9300Sstevel@tonic-gate 		}
9310Sstevel@tonic-gate 	}
9320Sstevel@tonic-gate }
9330Sstevel@tonic-gate #endif
9340Sstevel@tonic-gate 
9350Sstevel@tonic-gate /*
9360Sstevel@tonic-gate  * Switch to a new active hat, maintaining bit masks to track active CPUs.
9375084Sjohnlev  *
9385084Sjohnlev  * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it
9395084Sjohnlev  * remains a 32-bit value.
9400Sstevel@tonic-gate  */
9410Sstevel@tonic-gate void
9420Sstevel@tonic-gate hat_switch(hat_t *hat)
9430Sstevel@tonic-gate {
9445084Sjohnlev 	uint64_t	newcr3;
9450Sstevel@tonic-gate 	cpu_t		*cpu = CPU;
9460Sstevel@tonic-gate 	hat_t		*old = cpu->cpu_current_hat;
9470Sstevel@tonic-gate 
9480Sstevel@tonic-gate 	/*
9490Sstevel@tonic-gate 	 * set up this information first, so we don't miss any cross calls
9500Sstevel@tonic-gate 	 */
9510Sstevel@tonic-gate 	if (old != NULL) {
9520Sstevel@tonic-gate 		if (old == hat)
9530Sstevel@tonic-gate 			return;
9540Sstevel@tonic-gate 		if (old != kas.a_hat)
9550Sstevel@tonic-gate 			CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
9560Sstevel@tonic-gate 	}
9570Sstevel@tonic-gate 
9580Sstevel@tonic-gate 	/*
9594191Sjosephb 	 * Add this CPU to the active set for this HAT.
9600Sstevel@tonic-gate 	 */
9610Sstevel@tonic-gate 	if (hat != kas.a_hat) {
9620Sstevel@tonic-gate 		CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
9630Sstevel@tonic-gate 	}
9640Sstevel@tonic-gate 	cpu->cpu_current_hat = hat;
9650Sstevel@tonic-gate 
9660Sstevel@tonic-gate 	/*
9670Sstevel@tonic-gate 	 * now go ahead and load cr3
9680Sstevel@tonic-gate 	 */
9690Sstevel@tonic-gate 	if (hat->hat_flags & HAT_VLP) {
9700Sstevel@tonic-gate #if defined(__amd64)
9710Sstevel@tonic-gate 		x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
9720Sstevel@tonic-gate 
9730Sstevel@tonic-gate 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
9740Sstevel@tonic-gate 		newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn);
9750Sstevel@tonic-gate #elif defined(__i386)
9760Sstevel@tonic-gate 		reload_pae32(hat, cpu);
9770Sstevel@tonic-gate 		newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) +
9780Sstevel@tonic-gate 		    (cpu->cpu_id + 1) * VLP_SIZE;
9790Sstevel@tonic-gate #endif
9800Sstevel@tonic-gate 	} else {
9815084Sjohnlev 		newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn);
9820Sstevel@tonic-gate 	}
9835084Sjohnlev #ifdef __xpv
9845084Sjohnlev 	{
9855084Sjohnlev 		struct mmuext_op t[2];
9865084Sjohnlev 		uint_t retcnt;
9875084Sjohnlev 		uint_t opcnt = 1;
9885084Sjohnlev 
9895084Sjohnlev 		t[0].cmd = MMUEXT_NEW_BASEPTR;
9905084Sjohnlev 		t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
9915084Sjohnlev #if defined(__amd64)
9925084Sjohnlev 		/*
9935084Sjohnlev 		 * There's an interesting problem here, as to what to
9945084Sjohnlev 		 * actually specify when switching to the kernel hat.
9955084Sjohnlev 		 * For now we'll reuse the kernel hat again.
9965084Sjohnlev 		 */
9975084Sjohnlev 		t[1].cmd = MMUEXT_NEW_USER_BASEPTR;
9985084Sjohnlev 		if (hat == kas.a_hat)
9995084Sjohnlev 			t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
10005084Sjohnlev 		else
10015084Sjohnlev 			t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable);
10025084Sjohnlev 		++opcnt;
10035084Sjohnlev #endif	/* __amd64 */
10045084Sjohnlev 		if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0)
10055084Sjohnlev 			panic("HYPERVISOR_mmu_update() failed");
10065084Sjohnlev 		ASSERT(retcnt == opcnt);
10075084Sjohnlev 
10085084Sjohnlev 	}
10095084Sjohnlev #else
10100Sstevel@tonic-gate 	setcr3(newcr3);
10115084Sjohnlev #endif
10120Sstevel@tonic-gate 	ASSERT(cpu == CPU);
10130Sstevel@tonic-gate }
10140Sstevel@tonic-gate 
10150Sstevel@tonic-gate /*
10160Sstevel@tonic-gate  * Utility to return a valid x86pte_t from protections, pfn, and level number
10170Sstevel@tonic-gate  */
10180Sstevel@tonic-gate static x86pte_t
10190Sstevel@tonic-gate hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
10200Sstevel@tonic-gate {
10210Sstevel@tonic-gate 	x86pte_t	pte;
10220Sstevel@tonic-gate 	uint_t		cache_attr = attr & HAT_ORDER_MASK;
10230Sstevel@tonic-gate 
10240Sstevel@tonic-gate 	pte = MAKEPTE(pfn, level);
10250Sstevel@tonic-gate 
10260Sstevel@tonic-gate 	if (attr & PROT_WRITE)
10270Sstevel@tonic-gate 		PTE_SET(pte, PT_WRITABLE);
10280Sstevel@tonic-gate 
10290Sstevel@tonic-gate 	if (attr & PROT_USER)
10300Sstevel@tonic-gate 		PTE_SET(pte, PT_USER);
10310Sstevel@tonic-gate 
10320Sstevel@tonic-gate 	if (!(attr & PROT_EXEC))
10330Sstevel@tonic-gate 		PTE_SET(pte, mmu.pt_nx);
10340Sstevel@tonic-gate 
10350Sstevel@tonic-gate 	/*
10363446Smrj 	 * Set the software bits used track ref/mod sync's and hments.
10373446Smrj 	 * If not using REF/MOD, set them to avoid h/w rewriting PTEs.
10380Sstevel@tonic-gate 	 */
10390Sstevel@tonic-gate 	if (flags & HAT_LOAD_NOCONSIST)
10403446Smrj 		PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD);
10413446Smrj 	else if (attr & HAT_NOSYNC)
10423446Smrj 		PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD);
10430Sstevel@tonic-gate 
10440Sstevel@tonic-gate 	/*
10450Sstevel@tonic-gate 	 * Set the caching attributes in the PTE. The combination
10460Sstevel@tonic-gate 	 * of attributes are poorly defined, so we pay attention
10470Sstevel@tonic-gate 	 * to them in the given order.
10480Sstevel@tonic-gate 	 *
10490Sstevel@tonic-gate 	 * The test for HAT_STRICTORDER is different because it's defined
10500Sstevel@tonic-gate 	 * as "0" - which was a stupid thing to do, but is too late to change!
10510Sstevel@tonic-gate 	 */
10520Sstevel@tonic-gate 	if (cache_attr == HAT_STRICTORDER) {
10530Sstevel@tonic-gate 		PTE_SET(pte, PT_NOCACHE);
10540Sstevel@tonic-gate 	/*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
10550Sstevel@tonic-gate 	} else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
10560Sstevel@tonic-gate 		/* nothing to set */;
10570Sstevel@tonic-gate 	} else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
10580Sstevel@tonic-gate 		PTE_SET(pte, PT_NOCACHE);
10590Sstevel@tonic-gate 		if (x86_feature & X86_PAT)
10600Sstevel@tonic-gate 			PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
10610Sstevel@tonic-gate 		else
10620Sstevel@tonic-gate 			PTE_SET(pte, PT_WRITETHRU);
10630Sstevel@tonic-gate 	} else {
10640Sstevel@tonic-gate 		panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
10650Sstevel@tonic-gate 	}
10660Sstevel@tonic-gate 
10670Sstevel@tonic-gate 	return (pte);
10680Sstevel@tonic-gate }
10690Sstevel@tonic-gate 
10700Sstevel@tonic-gate /*
10710Sstevel@tonic-gate  * Duplicate address translations of the parent to the child.
10720Sstevel@tonic-gate  * This function really isn't used anymore.
10730Sstevel@tonic-gate  */
10740Sstevel@tonic-gate /*ARGSUSED*/
10750Sstevel@tonic-gate int
10760Sstevel@tonic-gate hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
10770Sstevel@tonic-gate {
10780Sstevel@tonic-gate 	ASSERT((uintptr_t)addr < kernelbase);
10790Sstevel@tonic-gate 	ASSERT(new != kas.a_hat);
10800Sstevel@tonic-gate 	ASSERT(old != kas.a_hat);
10810Sstevel@tonic-gate 	return (0);
10820Sstevel@tonic-gate }
10830Sstevel@tonic-gate 
10840Sstevel@tonic-gate /*
10850Sstevel@tonic-gate  * Allocate any hat resources required for a process being swapped in.
10860Sstevel@tonic-gate  */
10870Sstevel@tonic-gate /*ARGSUSED*/
10880Sstevel@tonic-gate void
10890Sstevel@tonic-gate hat_swapin(hat_t *hat)
10900Sstevel@tonic-gate {
10910Sstevel@tonic-gate 	/* do nothing - we let everything fault back in */
10920Sstevel@tonic-gate }
10930Sstevel@tonic-gate 
10940Sstevel@tonic-gate /*
10950Sstevel@tonic-gate  * Unload all translations associated with an address space of a process
10960Sstevel@tonic-gate  * that is being swapped out.
10970Sstevel@tonic-gate  */
10980Sstevel@tonic-gate void
10990Sstevel@tonic-gate hat_swapout(hat_t *hat)
11000Sstevel@tonic-gate {
11010Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)0;
11020Sstevel@tonic-gate 	uintptr_t	eaddr = _userlimit;
11030Sstevel@tonic-gate 	htable_t	*ht = NULL;
11040Sstevel@tonic-gate 	level_t		l;
11050Sstevel@tonic-gate 
11065084Sjohnlev 	XPV_DISALLOW_MIGRATE();
11070Sstevel@tonic-gate 	/*
11080Sstevel@tonic-gate 	 * We can't just call hat_unload(hat, 0, _userlimit...)  here, because
11090Sstevel@tonic-gate 	 * seg_spt and shared pagetables can't be swapped out.
11100Sstevel@tonic-gate 	 * Take a look at segspt_shmswapout() - it's a big no-op.
11110Sstevel@tonic-gate 	 *
11120Sstevel@tonic-gate 	 * Instead we'll walk through all the address space and unload
11130Sstevel@tonic-gate 	 * any mappings which we are sure are not shared, not locked.
11140Sstevel@tonic-gate 	 */
11150Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
11160Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
11170Sstevel@tonic-gate 	ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
11180Sstevel@tonic-gate 	if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
11190Sstevel@tonic-gate 		eaddr = (uintptr_t)hat->hat_as->a_userlimit;
11200Sstevel@tonic-gate 
11210Sstevel@tonic-gate 	while (vaddr < eaddr) {
11220Sstevel@tonic-gate 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
11230Sstevel@tonic-gate 		if (ht == NULL)
11240Sstevel@tonic-gate 			break;
11250Sstevel@tonic-gate 
11260Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(vaddr));
11270Sstevel@tonic-gate 
11280Sstevel@tonic-gate 		/*
11290Sstevel@tonic-gate 		 * If the page table is shared skip its entire range.
11300Sstevel@tonic-gate 		 * This code knows that only level 0 page tables are shared
11310Sstevel@tonic-gate 		 */
11320Sstevel@tonic-gate 		l = ht->ht_level;
11330Sstevel@tonic-gate 		if (ht->ht_flags & HTABLE_SHARED_PFN) {
11340Sstevel@tonic-gate 			ASSERT(l == 0);
11350Sstevel@tonic-gate 			vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
11360Sstevel@tonic-gate 			htable_release(ht);
11370Sstevel@tonic-gate 			ht = NULL;
11380Sstevel@tonic-gate 			continue;
11390Sstevel@tonic-gate 		}
11400Sstevel@tonic-gate 
11410Sstevel@tonic-gate 		/*
11420Sstevel@tonic-gate 		 * If the page table has no locked entries, unload this one.
11430Sstevel@tonic-gate 		 */
11440Sstevel@tonic-gate 		if (ht->ht_lock_cnt == 0)
11450Sstevel@tonic-gate 			hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
11460Sstevel@tonic-gate 			    HAT_UNLOAD_UNMAP);
11470Sstevel@tonic-gate 
11480Sstevel@tonic-gate 		/*
11490Sstevel@tonic-gate 		 * If we have a level 0 page table with locked entries,
11500Sstevel@tonic-gate 		 * skip the entire page table, otherwise skip just one entry.
11510Sstevel@tonic-gate 		 */
11520Sstevel@tonic-gate 		if (ht->ht_lock_cnt > 0 && l == 0)
11530Sstevel@tonic-gate 			vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
11540Sstevel@tonic-gate 		else
11550Sstevel@tonic-gate 			vaddr += LEVEL_SIZE(l);
11560Sstevel@tonic-gate 	}
11570Sstevel@tonic-gate 	if (ht)
11580Sstevel@tonic-gate 		htable_release(ht);
11590Sstevel@tonic-gate 
11600Sstevel@tonic-gate 	/*
11610Sstevel@tonic-gate 	 * We're in swapout because the system is low on memory, so
11620Sstevel@tonic-gate 	 * go back and flush all the htables off the cached list.
11630Sstevel@tonic-gate 	 */
11640Sstevel@tonic-gate 	htable_purge_hat(hat);
11655084Sjohnlev 	XPV_ALLOW_MIGRATE();
11660Sstevel@tonic-gate }
11670Sstevel@tonic-gate 
11680Sstevel@tonic-gate /*
11690Sstevel@tonic-gate  * returns number of bytes that have valid mappings in hat.
11700Sstevel@tonic-gate  */
11710Sstevel@tonic-gate size_t
11720Sstevel@tonic-gate hat_get_mapped_size(hat_t *hat)
11730Sstevel@tonic-gate {
11740Sstevel@tonic-gate 	size_t total = 0;
11750Sstevel@tonic-gate 	int l;
11760Sstevel@tonic-gate 
11770Sstevel@tonic-gate 	for (l = 0; l <= mmu.max_page_level; l++)
11780Sstevel@tonic-gate 		total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
11794381Sjosephb 	total += hat->hat_ism_pgcnt;
11800Sstevel@tonic-gate 
11810Sstevel@tonic-gate 	return (total);
11820Sstevel@tonic-gate }
11830Sstevel@tonic-gate 
11840Sstevel@tonic-gate /*
11850Sstevel@tonic-gate  * enable/disable collection of stats for hat.
11860Sstevel@tonic-gate  */
11870Sstevel@tonic-gate int
11880Sstevel@tonic-gate hat_stats_enable(hat_t *hat)
11890Sstevel@tonic-gate {
11900Sstevel@tonic-gate 	atomic_add_32(&hat->hat_stats, 1);
11910Sstevel@tonic-gate 	return (1);
11920Sstevel@tonic-gate }
11930Sstevel@tonic-gate 
11940Sstevel@tonic-gate void
11950Sstevel@tonic-gate hat_stats_disable(hat_t *hat)
11960Sstevel@tonic-gate {
11970Sstevel@tonic-gate 	atomic_add_32(&hat->hat_stats, -1);
11980Sstevel@tonic-gate }
11990Sstevel@tonic-gate 
12000Sstevel@tonic-gate /*
12010Sstevel@tonic-gate  * Utility to sync the ref/mod bits from a page table entry to the page_t
12020Sstevel@tonic-gate  * We must be holding the mapping list lock when this is called.
12030Sstevel@tonic-gate  */
12040Sstevel@tonic-gate static void
12050Sstevel@tonic-gate hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
12060Sstevel@tonic-gate {
12070Sstevel@tonic-gate 	uint_t	rm = 0;
12080Sstevel@tonic-gate 	pgcnt_t	pgcnt;
12090Sstevel@tonic-gate 
12103446Smrj 	if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
12110Sstevel@tonic-gate 		return;
12120Sstevel@tonic-gate 
12130Sstevel@tonic-gate 	if (PTE_GET(pte, PT_REF))
12140Sstevel@tonic-gate 		rm |= P_REF;
12150Sstevel@tonic-gate 
12160Sstevel@tonic-gate 	if (PTE_GET(pte, PT_MOD))
12170Sstevel@tonic-gate 		rm |= P_MOD;
12180Sstevel@tonic-gate 
12190Sstevel@tonic-gate 	if (rm == 0)
12200Sstevel@tonic-gate 		return;
12210Sstevel@tonic-gate 
12220Sstevel@tonic-gate 	/*
12230Sstevel@tonic-gate 	 * sync to all constituent pages of a large page
12240Sstevel@tonic-gate 	 */
12250Sstevel@tonic-gate 	ASSERT(x86_hm_held(pp));
12260Sstevel@tonic-gate 	pgcnt = page_get_pagecnt(level);
12270Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
12280Sstevel@tonic-gate 	for (; pgcnt > 0; --pgcnt) {
12290Sstevel@tonic-gate 		/*
12300Sstevel@tonic-gate 		 * hat_page_demote() can't decrease
12310Sstevel@tonic-gate 		 * pszc below this mapping size
12320Sstevel@tonic-gate 		 * since this large mapping existed after we
12330Sstevel@tonic-gate 		 * took mlist lock.
12340Sstevel@tonic-gate 		 */
12350Sstevel@tonic-gate 		ASSERT(pp->p_szc >= level);
12360Sstevel@tonic-gate 		hat_page_setattr(pp, rm);
12370Sstevel@tonic-gate 		++pp;
12380Sstevel@tonic-gate 	}
12390Sstevel@tonic-gate }
12400Sstevel@tonic-gate 
12410Sstevel@tonic-gate /*
12420Sstevel@tonic-gate  * This the set of PTE bits for PFN, permissions and caching
12435084Sjohnlev  * that are allowed to change on a HAT_LOAD_REMAP
12440Sstevel@tonic-gate  */
12450Sstevel@tonic-gate #define	PT_REMAP_BITS							\
12460Sstevel@tonic-gate 	(PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU |		\
12475084Sjohnlev 	PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD)
12480Sstevel@tonic-gate 
1249510Skchow #define	REMAPASSERT(EX)	if (!(EX)) panic("hati_pte_map: " #EX)
12500Sstevel@tonic-gate /*
12510Sstevel@tonic-gate  * Do the low-level work to get a mapping entered into a HAT's pagetables
12520Sstevel@tonic-gate  * and in the mapping list of the associated page_t.
12530Sstevel@tonic-gate  */
12543446Smrj static int
12550Sstevel@tonic-gate hati_pte_map(
12560Sstevel@tonic-gate 	htable_t	*ht,
12570Sstevel@tonic-gate 	uint_t		entry,
12580Sstevel@tonic-gate 	page_t		*pp,
12590Sstevel@tonic-gate 	x86pte_t	pte,
12600Sstevel@tonic-gate 	int		flags,
12610Sstevel@tonic-gate 	void		*pte_ptr)
12620Sstevel@tonic-gate {
12630Sstevel@tonic-gate 	hat_t		*hat = ht->ht_hat;
12640Sstevel@tonic-gate 	x86pte_t	old_pte;
12650Sstevel@tonic-gate 	level_t		l = ht->ht_level;
12660Sstevel@tonic-gate 	hment_t		*hm;
12670Sstevel@tonic-gate 	uint_t		is_consist;
12683446Smrj 	int		rv = 0;
12690Sstevel@tonic-gate 
12700Sstevel@tonic-gate 	/*
12710Sstevel@tonic-gate 	 * Is this a consistant (ie. need mapping list lock) mapping?
12720Sstevel@tonic-gate 	 */
12730Sstevel@tonic-gate 	is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
12740Sstevel@tonic-gate 
12750Sstevel@tonic-gate 	/*
12760Sstevel@tonic-gate 	 * Track locked mapping count in the htable.  Do this first,
12770Sstevel@tonic-gate 	 * as we track locking even if there already is a mapping present.
12780Sstevel@tonic-gate 	 */
12790Sstevel@tonic-gate 	if ((flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat)
12800Sstevel@tonic-gate 		HTABLE_LOCK_INC(ht);
12810Sstevel@tonic-gate 
12820Sstevel@tonic-gate 	/*
12830Sstevel@tonic-gate 	 * Acquire the page's mapping list lock and get an hment to use.
12840Sstevel@tonic-gate 	 * Note that hment_prepare() might return NULL.
12850Sstevel@tonic-gate 	 */
12860Sstevel@tonic-gate 	if (is_consist) {
12870Sstevel@tonic-gate 		x86_hm_enter(pp);
12880Sstevel@tonic-gate 		hm = hment_prepare(ht, entry, pp);
12890Sstevel@tonic-gate 	}
12900Sstevel@tonic-gate 
12910Sstevel@tonic-gate 	/*
12920Sstevel@tonic-gate 	 * Set the new pte, retrieving the old one at the same time.
12930Sstevel@tonic-gate 	 */
12940Sstevel@tonic-gate 	old_pte = x86pte_set(ht, entry, pte, pte_ptr);
12950Sstevel@tonic-gate 
12960Sstevel@tonic-gate 	/*
12973446Smrj 	 * did we get a large page / page table collision?
12983446Smrj 	 */
12993446Smrj 	if (old_pte == LPAGE_ERROR) {
13003446Smrj 		rv = -1;
13013446Smrj 		goto done;
13023446Smrj 	}
13033446Smrj 
13043446Smrj 	/*
13050Sstevel@tonic-gate 	 * If the mapping didn't change there is nothing more to do.
13060Sstevel@tonic-gate 	 */
13073446Smrj 	if (PTE_EQUIV(pte, old_pte))
13083446Smrj 		goto done;
13090Sstevel@tonic-gate 
13100Sstevel@tonic-gate 	/*
13110Sstevel@tonic-gate 	 * Install a new mapping in the page's mapping list
13120Sstevel@tonic-gate 	 */
13130Sstevel@tonic-gate 	if (!PTE_ISVALID(old_pte)) {
13140Sstevel@tonic-gate 		if (is_consist) {
13150Sstevel@tonic-gate 			hment_assign(ht, entry, pp, hm);
13160Sstevel@tonic-gate 			x86_hm_exit(pp);
13170Sstevel@tonic-gate 		} else {
13180Sstevel@tonic-gate 			ASSERT(flags & HAT_LOAD_NOCONSIST);
13190Sstevel@tonic-gate 		}
13205349Skchow #if defined(__amd64)
13215349Skchow 		if (ht->ht_flags & HTABLE_VLP) {
13225349Skchow 			cpu_t *cpu = CPU;
13235349Skchow 			x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
13245349Skchow 			VLP_COPY(hat->hat_vlp_ptes, vlpptep);
13255349Skchow 		}
13265349Skchow #endif
13270Sstevel@tonic-gate 		HTABLE_INC(ht->ht_valid_cnt);
13280Sstevel@tonic-gate 		PGCNT_INC(hat, l);
13293446Smrj 		return (rv);
13300Sstevel@tonic-gate 	}
13310Sstevel@tonic-gate 
13320Sstevel@tonic-gate 	/*
13330Sstevel@tonic-gate 	 * Remap's are more complicated:
13340Sstevel@tonic-gate 	 *  - HAT_LOAD_REMAP must be specified if changing the pfn.
13350Sstevel@tonic-gate 	 *    We also require that NOCONSIST be specified.
13360Sstevel@tonic-gate 	 *  - Otherwise only permission or caching bits may change.
13370Sstevel@tonic-gate 	 */
13380Sstevel@tonic-gate 	if (!PTE_ISPAGE(old_pte, l))
13390Sstevel@tonic-gate 		panic("non-null/page mapping pte=" FMT_PTE, old_pte);
13400Sstevel@tonic-gate 
13410Sstevel@tonic-gate 	if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1342510Skchow 		REMAPASSERT(flags & HAT_LOAD_REMAP);
1343510Skchow 		REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
13443446Smrj 		REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
1345510Skchow 		REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
13460Sstevel@tonic-gate 		    pf_is_memory(PTE2PFN(pte, l)));
1347510Skchow 		REMAPASSERT(!is_consist);
13480Sstevel@tonic-gate 	}
13490Sstevel@tonic-gate 
13500Sstevel@tonic-gate 	/*
13515084Sjohnlev 	 * We only let remaps change the certain bits in the PTE.
13520Sstevel@tonic-gate 	 */
13535084Sjohnlev 	if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS))
13545084Sjohnlev 		panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n",
13555084Sjohnlev 		    old_pte, pte);
13560Sstevel@tonic-gate 
13570Sstevel@tonic-gate 	/*
13580Sstevel@tonic-gate 	 * We don't create any mapping list entries on a remap, so release
13590Sstevel@tonic-gate 	 * any allocated hment after we drop the mapping list lock.
13600Sstevel@tonic-gate 	 */
13613446Smrj done:
13620Sstevel@tonic-gate 	if (is_consist) {
13630Sstevel@tonic-gate 		x86_hm_exit(pp);
13640Sstevel@tonic-gate 		if (hm != NULL)
13650Sstevel@tonic-gate 			hment_free(hm);
13660Sstevel@tonic-gate 	}
13673446Smrj 	return (rv);
13680Sstevel@tonic-gate }
13690Sstevel@tonic-gate 
13700Sstevel@tonic-gate /*
13713446Smrj  * Internal routine to load a single page table entry. This only fails if
13723446Smrj  * we attempt to overwrite a page table link with a large page.
13730Sstevel@tonic-gate  */
13743446Smrj static int
13750Sstevel@tonic-gate hati_load_common(
13760Sstevel@tonic-gate 	hat_t		*hat,
13770Sstevel@tonic-gate 	uintptr_t	va,
13780Sstevel@tonic-gate 	page_t		*pp,
13790Sstevel@tonic-gate 	uint_t		attr,
13800Sstevel@tonic-gate 	uint_t		flags,
13810Sstevel@tonic-gate 	level_t		level,
13820Sstevel@tonic-gate 	pfn_t		pfn)
13830Sstevel@tonic-gate {
13840Sstevel@tonic-gate 	htable_t	*ht;
13850Sstevel@tonic-gate 	uint_t		entry;
13860Sstevel@tonic-gate 	x86pte_t	pte;
13873446Smrj 	int		rv = 0;
13880Sstevel@tonic-gate 
13894004Sjosephb 	/*
13904004Sjosephb 	 * The number 16 is arbitrary and here to catch a recursion problem
13914004Sjosephb 	 * early before we blow out the kernel stack.
13924004Sjosephb 	 */
13934004Sjosephb 	++curthread->t_hatdepth;
13944004Sjosephb 	ASSERT(curthread->t_hatdepth < 16);
13954004Sjosephb 
13960Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
13970Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
13980Sstevel@tonic-gate 
13990Sstevel@tonic-gate 	if (flags & HAT_LOAD_SHARE)
14000Sstevel@tonic-gate 		hat->hat_flags |= HAT_SHARED;
14010Sstevel@tonic-gate 
14020Sstevel@tonic-gate 	/*
14030Sstevel@tonic-gate 	 * Find the page table that maps this page if it already exists.
14040Sstevel@tonic-gate 	 */
14050Sstevel@tonic-gate 	ht = htable_lookup(hat, va, level);
14060Sstevel@tonic-gate 
14070Sstevel@tonic-gate 	/*
14084004Sjosephb 	 * We must have HAT_LOAD_NOCONSIST if page_t is NULL.
14090Sstevel@tonic-gate 	 */
14104004Sjosephb 	if (pp == NULL)
14110Sstevel@tonic-gate 		flags |= HAT_LOAD_NOCONSIST;
14120Sstevel@tonic-gate 
14130Sstevel@tonic-gate 	if (ht == NULL) {
14140Sstevel@tonic-gate 		ht = htable_create(hat, va, level, NULL);
14150Sstevel@tonic-gate 		ASSERT(ht != NULL);
14160Sstevel@tonic-gate 	}
14170Sstevel@tonic-gate 	entry = htable_va2entry(va, ht);
14180Sstevel@tonic-gate 
14190Sstevel@tonic-gate 	/*
14200Sstevel@tonic-gate 	 * a bunch of paranoid error checking
14210Sstevel@tonic-gate 	 */
14220Sstevel@tonic-gate 	ASSERT(ht->ht_busy > 0);
14230Sstevel@tonic-gate 	if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht))
14240Sstevel@tonic-gate 		panic("hati_load_common: bad htable %p, va %p", ht, (void *)va);
14250Sstevel@tonic-gate 	ASSERT(ht->ht_level == level);
14260Sstevel@tonic-gate 
14270Sstevel@tonic-gate 	/*
14280Sstevel@tonic-gate 	 * construct the new PTE
14290Sstevel@tonic-gate 	 */
14300Sstevel@tonic-gate 	if (hat == kas.a_hat)
14310Sstevel@tonic-gate 		attr &= ~PROT_USER;
14320Sstevel@tonic-gate 	pte = hati_mkpte(pfn, attr, level, flags);
14330Sstevel@tonic-gate 	if (hat == kas.a_hat && va >= kernelbase)
14340Sstevel@tonic-gate 		PTE_SET(pte, mmu.pt_global);
14350Sstevel@tonic-gate 
14360Sstevel@tonic-gate 	/*
14370Sstevel@tonic-gate 	 * establish the mapping
14380Sstevel@tonic-gate 	 */
14393446Smrj 	rv = hati_pte_map(ht, entry, pp, pte, flags, NULL);
14400Sstevel@tonic-gate 
14410Sstevel@tonic-gate 	/*
14420Sstevel@tonic-gate 	 * release the htable and any reserves
14430Sstevel@tonic-gate 	 */
14440Sstevel@tonic-gate 	htable_release(ht);
14454004Sjosephb 	--curthread->t_hatdepth;
14463446Smrj 	return (rv);
14470Sstevel@tonic-gate }
14480Sstevel@tonic-gate 
14490Sstevel@tonic-gate /*
14500Sstevel@tonic-gate  * special case of hat_memload to deal with some kernel addrs for performance
14510Sstevel@tonic-gate  */
14520Sstevel@tonic-gate static void
14530Sstevel@tonic-gate hat_kmap_load(
14540Sstevel@tonic-gate 	caddr_t		addr,
14550Sstevel@tonic-gate 	page_t		*pp,
14560Sstevel@tonic-gate 	uint_t		attr,
14570Sstevel@tonic-gate 	uint_t		flags)
14580Sstevel@tonic-gate {
14590Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
14600Sstevel@tonic-gate 	x86pte_t	pte;
14610Sstevel@tonic-gate 	pfn_t		pfn = page_pptonum(pp);
14620Sstevel@tonic-gate 	pgcnt_t		pg_off = mmu_btop(va - mmu.kmap_addr);
14630Sstevel@tonic-gate 	htable_t	*ht;
14640Sstevel@tonic-gate 	uint_t		entry;
14650Sstevel@tonic-gate 	void		*pte_ptr;
14660Sstevel@tonic-gate 
14670Sstevel@tonic-gate 	/*
14680Sstevel@tonic-gate 	 * construct the requested PTE
14690Sstevel@tonic-gate 	 */
14700Sstevel@tonic-gate 	attr &= ~PROT_USER;
14710Sstevel@tonic-gate 	attr |= HAT_STORECACHING_OK;
14720Sstevel@tonic-gate 	pte = hati_mkpte(pfn, attr, 0, flags);
14730Sstevel@tonic-gate 	PTE_SET(pte, mmu.pt_global);
14740Sstevel@tonic-gate 
14750Sstevel@tonic-gate 	/*
14760Sstevel@tonic-gate 	 * Figure out the pte_ptr and htable and use common code to finish up
14770Sstevel@tonic-gate 	 */
14780Sstevel@tonic-gate 	if (mmu.pae_hat)
14790Sstevel@tonic-gate 		pte_ptr = mmu.kmap_ptes + pg_off;
14800Sstevel@tonic-gate 	else
14810Sstevel@tonic-gate 		pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
14820Sstevel@tonic-gate 	ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
14830Sstevel@tonic-gate 	    LEVEL_SHIFT(1)];
14840Sstevel@tonic-gate 	entry = htable_va2entry(va, ht);
14854004Sjosephb 	++curthread->t_hatdepth;
14864004Sjosephb 	ASSERT(curthread->t_hatdepth < 16);
14873446Smrj 	(void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
14884004Sjosephb 	--curthread->t_hatdepth;
14890Sstevel@tonic-gate }
14900Sstevel@tonic-gate 
14910Sstevel@tonic-gate /*
14920Sstevel@tonic-gate  * hat_memload() - load a translation to the given page struct
14930Sstevel@tonic-gate  *
14940Sstevel@tonic-gate  * Flags for hat_memload/hat_devload/hat_*attr.
14950Sstevel@tonic-gate  *
14960Sstevel@tonic-gate  * 	HAT_LOAD	Default flags to load a translation to the page.
14970Sstevel@tonic-gate  *
14980Sstevel@tonic-gate  * 	HAT_LOAD_LOCK	Lock down mapping resources; hat_map(), hat_memload(),
14990Sstevel@tonic-gate  *			and hat_devload().
15000Sstevel@tonic-gate  *
15010Sstevel@tonic-gate  *	HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
15023446Smrj  *			sets PT_NOCONSIST
15030Sstevel@tonic-gate  *
15040Sstevel@tonic-gate  *	HAT_LOAD_SHARE	A flag to hat_memload() to indicate h/w page tables
15050Sstevel@tonic-gate  *			that map some user pages (not kas) is shared by more
15060Sstevel@tonic-gate  *			than one process (eg. ISM).
15070Sstevel@tonic-gate  *
15080Sstevel@tonic-gate  *	HAT_LOAD_REMAP	Reload a valid pte with a different page frame.
15090Sstevel@tonic-gate  *
15100Sstevel@tonic-gate  *	HAT_NO_KALLOC	Do not kmem_alloc while creating the mapping; at this
15110Sstevel@tonic-gate  *			point, it's setting up mapping to allocate internal
15120Sstevel@tonic-gate  *			hat layer data structures.  This flag forces hat layer
15130Sstevel@tonic-gate  *			to tap its reserves in order to prevent infinite
15140Sstevel@tonic-gate  *			recursion.
15150Sstevel@tonic-gate  *
15160Sstevel@tonic-gate  * The following is a protection attribute (like PROT_READ, etc.)
15170Sstevel@tonic-gate  *
15183446Smrj  *	HAT_NOSYNC	set PT_NOSYNC - this mapping's ref/mod bits
15190Sstevel@tonic-gate  *			are never cleared.
15200Sstevel@tonic-gate  *
15210Sstevel@tonic-gate  * Installing new valid PTE's and creation of the mapping list
15220Sstevel@tonic-gate  * entry are controlled under the same lock. It's derived from the
15230Sstevel@tonic-gate  * page_t being mapped.
15240Sstevel@tonic-gate  */
15250Sstevel@tonic-gate static uint_t supported_memload_flags =
15260Sstevel@tonic-gate 	HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
15270Sstevel@tonic-gate 	HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
15280Sstevel@tonic-gate 
15290Sstevel@tonic-gate void
15300Sstevel@tonic-gate hat_memload(
15310Sstevel@tonic-gate 	hat_t		*hat,
15320Sstevel@tonic-gate 	caddr_t		addr,
15330Sstevel@tonic-gate 	page_t		*pp,
15340Sstevel@tonic-gate 	uint_t		attr,
15350Sstevel@tonic-gate 	uint_t		flags)
15360Sstevel@tonic-gate {
15370Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
15380Sstevel@tonic-gate 	level_t		level = 0;
15390Sstevel@tonic-gate 	pfn_t		pfn = page_pptonum(pp);
15400Sstevel@tonic-gate 
15415084Sjohnlev 	XPV_DISALLOW_MIGRATE();
15420Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
15433446Smrj 	ASSERT(hat == kas.a_hat || va < _userlimit);
15440Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
15450Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
15460Sstevel@tonic-gate 	ASSERT((flags & supported_memload_flags) == flags);
15470Sstevel@tonic-gate 
15480Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(va));
15490Sstevel@tonic-gate 	ASSERT(!PP_ISFREE(pp));
15500Sstevel@tonic-gate 
15510Sstevel@tonic-gate 	/*
15520Sstevel@tonic-gate 	 * kernel address special case for performance.
15530Sstevel@tonic-gate 	 */
15540Sstevel@tonic-gate 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
15550Sstevel@tonic-gate 		ASSERT(hat == kas.a_hat);
15560Sstevel@tonic-gate 		hat_kmap_load(addr, pp, attr, flags);
15575084Sjohnlev 		XPV_ALLOW_MIGRATE();
15580Sstevel@tonic-gate 		return;
15590Sstevel@tonic-gate 	}
15600Sstevel@tonic-gate 
15610Sstevel@tonic-gate 	/*
15620Sstevel@tonic-gate 	 * This is used for memory with normal caching enabled, so
15630Sstevel@tonic-gate 	 * always set HAT_STORECACHING_OK.
15640Sstevel@tonic-gate 	 */
15650Sstevel@tonic-gate 	attr |= HAT_STORECACHING_OK;
15663446Smrj 	if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
15673446Smrj 		panic("unexpected hati_load_common() failure");
15685084Sjohnlev 	XPV_ALLOW_MIGRATE();
15690Sstevel@tonic-gate }
15700Sstevel@tonic-gate 
15714528Spaulsan /* ARGSUSED */
15724528Spaulsan void
15734528Spaulsan hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
15744528Spaulsan     uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
15754528Spaulsan {
15764528Spaulsan 	hat_memload(hat, addr, pp, attr, flags);
15774528Spaulsan }
15784528Spaulsan 
15790Sstevel@tonic-gate /*
15800Sstevel@tonic-gate  * Load the given array of page structs using large pages when possible
15810Sstevel@tonic-gate  */
15820Sstevel@tonic-gate void
15830Sstevel@tonic-gate hat_memload_array(
15840Sstevel@tonic-gate 	hat_t		*hat,
15850Sstevel@tonic-gate 	caddr_t		addr,
15860Sstevel@tonic-gate 	size_t		len,
15870Sstevel@tonic-gate 	page_t		**pages,
15880Sstevel@tonic-gate 	uint_t		attr,
15890Sstevel@tonic-gate 	uint_t		flags)
15900Sstevel@tonic-gate {
15910Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
15920Sstevel@tonic-gate 	uintptr_t	eaddr = va + len;
15930Sstevel@tonic-gate 	level_t		level;
15940Sstevel@tonic-gate 	size_t		pgsize;
15950Sstevel@tonic-gate 	pgcnt_t		pgindx = 0;
15960Sstevel@tonic-gate 	pfn_t		pfn;
15970Sstevel@tonic-gate 	pgcnt_t		i;
15980Sstevel@tonic-gate 
15995084Sjohnlev 	XPV_DISALLOW_MIGRATE();
16000Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
16013446Smrj 	ASSERT(hat == kas.a_hat || va + len <= _userlimit);
16020Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
16030Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
16040Sstevel@tonic-gate 	ASSERT((flags & supported_memload_flags) == flags);
16050Sstevel@tonic-gate 
16060Sstevel@tonic-gate 	/*
16070Sstevel@tonic-gate 	 * memload is used for memory with full caching enabled, so
16080Sstevel@tonic-gate 	 * set HAT_STORECACHING_OK.
16090Sstevel@tonic-gate 	 */
16100Sstevel@tonic-gate 	attr |= HAT_STORECACHING_OK;
16110Sstevel@tonic-gate 
16120Sstevel@tonic-gate 	/*
16130Sstevel@tonic-gate 	 * handle all pages using largest possible pagesize
16140Sstevel@tonic-gate 	 */
16150Sstevel@tonic-gate 	while (va < eaddr) {
16160Sstevel@tonic-gate 		/*
16170Sstevel@tonic-gate 		 * decide what level mapping to use (ie. pagesize)
16180Sstevel@tonic-gate 		 */
16190Sstevel@tonic-gate 		pfn = page_pptonum(pages[pgindx]);
16200Sstevel@tonic-gate 		for (level = mmu.max_page_level; ; --level) {
16210Sstevel@tonic-gate 			pgsize = LEVEL_SIZE(level);
16220Sstevel@tonic-gate 			if (level == 0)
16230Sstevel@tonic-gate 				break;
16243446Smrj 
16250Sstevel@tonic-gate 			if (!IS_P2ALIGNED(va, pgsize) ||
16260Sstevel@tonic-gate 			    (eaddr - va) < pgsize ||
16273446Smrj 			    !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize))
16280Sstevel@tonic-gate 				continue;
16290Sstevel@tonic-gate 
16300Sstevel@tonic-gate 			/*
16310Sstevel@tonic-gate 			 * To use a large mapping of this size, all the
16320Sstevel@tonic-gate 			 * pages we are passed must be sequential subpages
16330Sstevel@tonic-gate 			 * of the large page.
16340Sstevel@tonic-gate 			 * hat_page_demote() can't change p_szc because
16350Sstevel@tonic-gate 			 * all pages are locked.
16360Sstevel@tonic-gate 			 */
16370Sstevel@tonic-gate 			if (pages[pgindx]->p_szc >= level) {
16380Sstevel@tonic-gate 				for (i = 0; i < mmu_btop(pgsize); ++i) {
16390Sstevel@tonic-gate 					if (pfn + i !=
16400Sstevel@tonic-gate 					    page_pptonum(pages[pgindx + i]))
16410Sstevel@tonic-gate 						break;
16420Sstevel@tonic-gate 					ASSERT(pages[pgindx + i]->p_szc >=
16430Sstevel@tonic-gate 					    level);
16440Sstevel@tonic-gate 					ASSERT(pages[pgindx] + i ==
16450Sstevel@tonic-gate 					    pages[pgindx + i]);
16460Sstevel@tonic-gate 				}
16475349Skchow 				if (i == mmu_btop(pgsize)) {
16485349Skchow #ifdef DEBUG
16495349Skchow 					if (level == 2)
16505349Skchow 						map1gcnt++;
16515349Skchow #endif
16520Sstevel@tonic-gate 					break;
16535349Skchow 				}
16540Sstevel@tonic-gate 			}
16550Sstevel@tonic-gate 		}
16560Sstevel@tonic-gate 
16570Sstevel@tonic-gate 		/*
16583446Smrj 		 * Load this page mapping. If the load fails, try a smaller
16593446Smrj 		 * pagesize.
16600Sstevel@tonic-gate 		 */
16610Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(va));
16623446Smrj 		while (hati_load_common(hat, va, pages[pgindx], attr,
16634381Sjosephb 		    flags, level, pfn) != 0) {
16643446Smrj 			if (level == 0)
16653446Smrj 				panic("unexpected hati_load_common() failure");
16663446Smrj 			--level;
16673446Smrj 			pgsize = LEVEL_SIZE(level);
16683446Smrj 		}
16690Sstevel@tonic-gate 
16700Sstevel@tonic-gate 		/*
16710Sstevel@tonic-gate 		 * move to next page
16720Sstevel@tonic-gate 		 */
16730Sstevel@tonic-gate 		va += pgsize;
16740Sstevel@tonic-gate 		pgindx += mmu_btop(pgsize);
16750Sstevel@tonic-gate 	}
16765084Sjohnlev 	XPV_ALLOW_MIGRATE();
16770Sstevel@tonic-gate }
16780Sstevel@tonic-gate 
16794528Spaulsan /* ARGSUSED */
16804528Spaulsan void
16814528Spaulsan hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
16824528Spaulsan     struct page **pps, uint_t attr, uint_t flags,
16834528Spaulsan     hat_region_cookie_t rcookie)
16844528Spaulsan {
16854528Spaulsan 	hat_memload_array(hat, addr, len, pps, attr, flags);
16864528Spaulsan }
16874528Spaulsan 
16880Sstevel@tonic-gate /*
16890Sstevel@tonic-gate  * void hat_devload(hat, addr, len, pf, attr, flags)
16900Sstevel@tonic-gate  *	load/lock the given page frame number
16910Sstevel@tonic-gate  *
16920Sstevel@tonic-gate  * Advisory ordering attributes. Apply only to device mappings.
16930Sstevel@tonic-gate  *
16940Sstevel@tonic-gate  * HAT_STRICTORDER: the CPU must issue the references in order, as the
16950Sstevel@tonic-gate  *	programmer specified.  This is the default.
16960Sstevel@tonic-gate  * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
16970Sstevel@tonic-gate  *	of reordering; store or load with store or load).
16980Sstevel@tonic-gate  * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
16990Sstevel@tonic-gate  *	to consecutive locations (for example, turn two consecutive byte
17000Sstevel@tonic-gate  *	stores into one halfword store), and it may batch individual loads
17010Sstevel@tonic-gate  *	(for example, turn two consecutive byte loads into one halfword load).
17020Sstevel@tonic-gate  *	This also implies re-ordering.
17030Sstevel@tonic-gate  * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
17040Sstevel@tonic-gate  *	until another store occurs.  The default is to fetch new data
17050Sstevel@tonic-gate  *	on every load.  This also implies merging.
17060Sstevel@tonic-gate  * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
17070Sstevel@tonic-gate  *	the device (perhaps with other data) at a later time.  The default is
17080Sstevel@tonic-gate  *	to push the data right away.  This also implies load caching.
17090Sstevel@tonic-gate  *
17100Sstevel@tonic-gate  * Equivalent of hat_memload(), but can be used for device memory where
17110Sstevel@tonic-gate  * there are no page_t's and we support additional flags (write merging, etc).
17120Sstevel@tonic-gate  * Note that we can have large page mappings with this interface.
17130Sstevel@tonic-gate  */
17140Sstevel@tonic-gate int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
17150Sstevel@tonic-gate 	HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
17160Sstevel@tonic-gate 	HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
17170Sstevel@tonic-gate 
17180Sstevel@tonic-gate void
17190Sstevel@tonic-gate hat_devload(
17200Sstevel@tonic-gate 	hat_t		*hat,
17210Sstevel@tonic-gate 	caddr_t		addr,
17220Sstevel@tonic-gate 	size_t		len,
17230Sstevel@tonic-gate 	pfn_t		pfn,
17240Sstevel@tonic-gate 	uint_t		attr,
17250Sstevel@tonic-gate 	int		flags)
17260Sstevel@tonic-gate {
17270Sstevel@tonic-gate 	uintptr_t	va = ALIGN2PAGE(addr);
17280Sstevel@tonic-gate 	uintptr_t	eva = va + len;
17290Sstevel@tonic-gate 	level_t		level;
17300Sstevel@tonic-gate 	size_t		pgsize;
17310Sstevel@tonic-gate 	page_t		*pp;
17320Sstevel@tonic-gate 	int		f;	/* per PTE copy of flags  - maybe modified */
17330Sstevel@tonic-gate 	uint_t		a;	/* per PTE copy of attr */
17340Sstevel@tonic-gate 
17355084Sjohnlev 	XPV_DISALLOW_MIGRATE();
17360Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
17373446Smrj 	ASSERT(hat == kas.a_hat || eva <= _userlimit);
17380Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
17390Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
17400Sstevel@tonic-gate 	ASSERT((flags & supported_devload_flags) == flags);
17410Sstevel@tonic-gate 
17420Sstevel@tonic-gate 	/*
17430Sstevel@tonic-gate 	 * handle all pages
17440Sstevel@tonic-gate 	 */
17450Sstevel@tonic-gate 	while (va < eva) {
17460Sstevel@tonic-gate 
17470Sstevel@tonic-gate 		/*
17480Sstevel@tonic-gate 		 * decide what level mapping to use (ie. pagesize)
17490Sstevel@tonic-gate 		 */
17500Sstevel@tonic-gate 		for (level = mmu.max_page_level; ; --level) {
17510Sstevel@tonic-gate 			pgsize = LEVEL_SIZE(level);
17520Sstevel@tonic-gate 			if (level == 0)
17530Sstevel@tonic-gate 				break;
17540Sstevel@tonic-gate 			if (IS_P2ALIGNED(va, pgsize) &&
17550Sstevel@tonic-gate 			    (eva - va) >= pgsize &&
17565349Skchow 			    IS_P2ALIGNED(pfn, mmu_btop(pgsize))) {
17575349Skchow #ifdef DEBUG
17585349Skchow 				if (level == 2)
17595349Skchow 					map1gcnt++;
17605349Skchow #endif
17610Sstevel@tonic-gate 				break;
17625349Skchow 			}
17630Sstevel@tonic-gate 		}
17640Sstevel@tonic-gate 
17650Sstevel@tonic-gate 		/*
17663446Smrj 		 * If this is just memory then allow caching (this happens
17670Sstevel@tonic-gate 		 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
17683446Smrj 		 * to override that. If we don't have a page_t then make sure
17690Sstevel@tonic-gate 		 * NOCONSIST is set.
17700Sstevel@tonic-gate 		 */
17710Sstevel@tonic-gate 		a = attr;
17720Sstevel@tonic-gate 		f = flags;
17735084Sjohnlev 		if (!pf_is_memory(pfn))
17745084Sjohnlev 			f |= HAT_LOAD_NOCONSIST;
17755084Sjohnlev 		else if (!(a & HAT_PLAT_NOCACHE))
17765084Sjohnlev 			a |= HAT_STORECACHING_OK;
17775084Sjohnlev 
17785084Sjohnlev 		if (f & HAT_LOAD_NOCONSIST)
17790Sstevel@tonic-gate 			pp = NULL;
17805084Sjohnlev 		else
17815084Sjohnlev 			pp = page_numtopp_nolock(pfn);
17820Sstevel@tonic-gate 
17830Sstevel@tonic-gate 		/*
17840Sstevel@tonic-gate 		 * load this page mapping
17850Sstevel@tonic-gate 		 */
17860Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(va));
17873446Smrj 		while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
17883446Smrj 			if (level == 0)
17893446Smrj 				panic("unexpected hati_load_common() failure");
17903446Smrj 			--level;
17913446Smrj 			pgsize = LEVEL_SIZE(level);
17923446Smrj 		}
17930Sstevel@tonic-gate 
17940Sstevel@tonic-gate 		/*
17950Sstevel@tonic-gate 		 * move to next page
17960Sstevel@tonic-gate 		 */
17970Sstevel@tonic-gate 		va += pgsize;
17980Sstevel@tonic-gate 		pfn += mmu_btop(pgsize);
17990Sstevel@tonic-gate 	}
18005084Sjohnlev 	XPV_ALLOW_MIGRATE();
18010Sstevel@tonic-gate }
18020Sstevel@tonic-gate 
18030Sstevel@tonic-gate /*
18040Sstevel@tonic-gate  * void hat_unlock(hat, addr, len)
18050Sstevel@tonic-gate  *	unlock the mappings to a given range of addresses
18060Sstevel@tonic-gate  *
18070Sstevel@tonic-gate  * Locks are tracked by ht_lock_cnt in the htable.
18080Sstevel@tonic-gate  */
18090Sstevel@tonic-gate void
18100Sstevel@tonic-gate hat_unlock(hat_t *hat, caddr_t addr, size_t len)
18110Sstevel@tonic-gate {
18120Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
18130Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
18140Sstevel@tonic-gate 	htable_t	*ht = NULL;
18150Sstevel@tonic-gate 
18160Sstevel@tonic-gate 	/*
18170Sstevel@tonic-gate 	 * kernel entries are always locked, we don't track lock counts
18180Sstevel@tonic-gate 	 */
18193446Smrj 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
18200Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
18210Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
18220Sstevel@tonic-gate 	if (hat == kas.a_hat)
18230Sstevel@tonic-gate 		return;
18240Sstevel@tonic-gate 	if (eaddr > _userlimit)
18250Sstevel@tonic-gate 		panic("hat_unlock() address out of range - above _userlimit");
18260Sstevel@tonic-gate 
18275084Sjohnlev 	XPV_DISALLOW_MIGRATE();
18280Sstevel@tonic-gate 	ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
18290Sstevel@tonic-gate 	while (vaddr < eaddr) {
18300Sstevel@tonic-gate 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
18310Sstevel@tonic-gate 		if (ht == NULL)
18320Sstevel@tonic-gate 			break;
18330Sstevel@tonic-gate 
18340Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(vaddr));
18350Sstevel@tonic-gate 
18360Sstevel@tonic-gate 		if (ht->ht_lock_cnt < 1)
18370Sstevel@tonic-gate 			panic("hat_unlock(): lock_cnt < 1, "
18380Sstevel@tonic-gate 			    "htable=%p, vaddr=%p\n", ht, (caddr_t)vaddr);
18390Sstevel@tonic-gate 		HTABLE_LOCK_DEC(ht);
18400Sstevel@tonic-gate 
18410Sstevel@tonic-gate 		vaddr += LEVEL_SIZE(ht->ht_level);
18420Sstevel@tonic-gate 	}
18430Sstevel@tonic-gate 	if (ht)
18440Sstevel@tonic-gate 		htable_release(ht);
18455084Sjohnlev 	XPV_ALLOW_MIGRATE();
18460Sstevel@tonic-gate }
18470Sstevel@tonic-gate 
18484528Spaulsan /* ARGSUSED */
18494528Spaulsan void
18505075Spaulsan hat_unlock_region(struct hat *hat, caddr_t addr, size_t len,
18514528Spaulsan     hat_region_cookie_t rcookie)
18524528Spaulsan {
18534528Spaulsan 	panic("No shared region support on x86");
18544528Spaulsan }
18554528Spaulsan 
18565084Sjohnlev #if !defined(__xpv)
18570Sstevel@tonic-gate /*
18580Sstevel@tonic-gate  * Cross call service routine to demap a virtual page on
18590Sstevel@tonic-gate  * the current CPU or flush all mappings in TLB.
18600Sstevel@tonic-gate  */
18610Sstevel@tonic-gate /*ARGSUSED*/
18620Sstevel@tonic-gate static int
18630Sstevel@tonic-gate hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
18640Sstevel@tonic-gate {
18650Sstevel@tonic-gate 	hat_t	*hat = (hat_t *)a1;
18660Sstevel@tonic-gate 	caddr_t	addr = (caddr_t)a2;
18670Sstevel@tonic-gate 
18680Sstevel@tonic-gate 	/*
18690Sstevel@tonic-gate 	 * If the target hat isn't the kernel and this CPU isn't operating
18700Sstevel@tonic-gate 	 * in the target hat, we can ignore the cross call.
18710Sstevel@tonic-gate 	 */
18720Sstevel@tonic-gate 	if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
18730Sstevel@tonic-gate 		return (0);
18740Sstevel@tonic-gate 
18750Sstevel@tonic-gate 	/*
18760Sstevel@tonic-gate 	 * For a normal address, we just flush one page mapping
18770Sstevel@tonic-gate 	 */
18780Sstevel@tonic-gate 	if ((uintptr_t)addr != DEMAP_ALL_ADDR) {
18793446Smrj 		mmu_tlbflush_entry(addr);
18800Sstevel@tonic-gate 		return (0);
18810Sstevel@tonic-gate 	}
18820Sstevel@tonic-gate 
18830Sstevel@tonic-gate 	/*
18840Sstevel@tonic-gate 	 * Otherwise we reload cr3 to effect a complete TLB flush.
18850Sstevel@tonic-gate 	 *
18860Sstevel@tonic-gate 	 * A reload of cr3 on a VLP process also means we must also recopy in
18870Sstevel@tonic-gate 	 * the pte values from the struct hat
18880Sstevel@tonic-gate 	 */
18890Sstevel@tonic-gate 	if (hat->hat_flags & HAT_VLP) {
18900Sstevel@tonic-gate #if defined(__amd64)
18910Sstevel@tonic-gate 		x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes;
18920Sstevel@tonic-gate 
18930Sstevel@tonic-gate 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
18940Sstevel@tonic-gate #elif defined(__i386)
18950Sstevel@tonic-gate 		reload_pae32(hat, CPU);
18960Sstevel@tonic-gate #endif
18970Sstevel@tonic-gate 	}
18980Sstevel@tonic-gate 	reload_cr3();
18990Sstevel@tonic-gate 	return (0);
19000Sstevel@tonic-gate }
19010Sstevel@tonic-gate 
19020Sstevel@tonic-gate /*
19034191Sjosephb  * Flush all TLB entries, including global (ie. kernel) ones.
19044191Sjosephb  */
19054191Sjosephb static void
19064191Sjosephb flush_all_tlb_entries(void)
19074191Sjosephb {
19084191Sjosephb 	ulong_t cr4 = getcr4();
19094191Sjosephb 
19104191Sjosephb 	if (cr4 & CR4_PGE) {
19114191Sjosephb 		setcr4(cr4 & ~(ulong_t)CR4_PGE);
19124191Sjosephb 		setcr4(cr4);
19134191Sjosephb 
19144191Sjosephb 		/*
19154191Sjosephb 		 * 32 bit PAE also needs to always reload_cr3()
19164191Sjosephb 		 */
19174191Sjosephb 		if (mmu.max_level == 2)
19184191Sjosephb 			reload_cr3();
19194191Sjosephb 	} else {
19204191Sjosephb 		reload_cr3();
19214191Sjosephb 	}
19224191Sjosephb }
19234191Sjosephb 
19244191Sjosephb #define	TLB_CPU_HALTED	(01ul)
19254191Sjosephb #define	TLB_INVAL_ALL	(02ul)
19264191Sjosephb #define	CAS_TLB_INFO(cpu, old, new)	\
19274191Sjosephb 	caslong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
19284191Sjosephb 
19294191Sjosephb /*
19304191Sjosephb  * Record that a CPU is going idle
19314191Sjosephb  */
19324191Sjosephb void
19334191Sjosephb tlb_going_idle(void)
19344191Sjosephb {
19354191Sjosephb 	atomic_or_long((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
19364191Sjosephb }
19374191Sjosephb 
19384191Sjosephb /*
19394191Sjosephb  * Service a delayed TLB flush if coming out of being idle.
19404191Sjosephb  */
19414191Sjosephb void
19424191Sjosephb tlb_service(void)
19434191Sjosephb {
19444191Sjosephb 	ulong_t flags = getflags();
19454191Sjosephb 	ulong_t tlb_info;
19464191Sjosephb 	ulong_t found;
19474191Sjosephb 
19484191Sjosephb 	/*
19494191Sjosephb 	 * Be sure interrupts are off while doing this so that
19504191Sjosephb 	 * higher level interrupts correctly wait for flushes to finish.
19514191Sjosephb 	 */
19524191Sjosephb 	if (flags & PS_IE)
19534191Sjosephb 		flags = intr_clear();
19544191Sjosephb 
19554191Sjosephb 	/*
19564191Sjosephb 	 * We only have to do something if coming out of being idle.
19574191Sjosephb 	 */
19584191Sjosephb 	tlb_info = CPU->cpu_m.mcpu_tlb_info;
19594191Sjosephb 	if (tlb_info & TLB_CPU_HALTED) {
19604191Sjosephb 		ASSERT(CPU->cpu_current_hat == kas.a_hat);
19614191Sjosephb 
19624191Sjosephb 		/*
19634191Sjosephb 		 * Atomic clear and fetch of old state.
19644191Sjosephb 		 */
19654191Sjosephb 		while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) {
19664191Sjosephb 			ASSERT(found & TLB_CPU_HALTED);
19674191Sjosephb 			tlb_info = found;
19684191Sjosephb 			SMT_PAUSE();
19694191Sjosephb 		}
19704191Sjosephb 		if (tlb_info & TLB_INVAL_ALL)
19714191Sjosephb 			flush_all_tlb_entries();
19724191Sjosephb 	}
19734191Sjosephb 
19744191Sjosephb 	/*
19754191Sjosephb 	 * Restore interrupt enable control bit.
19764191Sjosephb 	 */
19774191Sjosephb 	if (flags & PS_IE)
19784191Sjosephb 		sti();
19794191Sjosephb }
19805084Sjohnlev #endif /* !__xpv */
19814191Sjosephb 
19824191Sjosephb /*
19830Sstevel@tonic-gate  * Internal routine to do cross calls to invalidate a range of pages on
19840Sstevel@tonic-gate  * all CPUs using a given hat.
19850Sstevel@tonic-gate  */
19860Sstevel@tonic-gate void
19873446Smrj hat_tlb_inval(hat_t *hat, uintptr_t va)
19880Sstevel@tonic-gate {
19890Sstevel@tonic-gate 	extern int	flushes_require_xcalls;	/* from mp_startup.c */
19900Sstevel@tonic-gate 	cpuset_t	justme;
19915084Sjohnlev 	cpuset_t	cpus_to_shootdown;
19925084Sjohnlev #ifndef __xpv
19934191Sjosephb 	cpuset_t	check_cpus;
19944191Sjosephb 	cpu_t		*cpup;
19954191Sjosephb 	int		c;
19965084Sjohnlev #endif
19970Sstevel@tonic-gate 
19980Sstevel@tonic-gate 	/*
19990Sstevel@tonic-gate 	 * If the hat is being destroyed, there are no more users, so
20000Sstevel@tonic-gate 	 * demap need not do anything.
20010Sstevel@tonic-gate 	 */
20020Sstevel@tonic-gate 	if (hat->hat_flags & HAT_FREEING)
20030Sstevel@tonic-gate 		return;
20040Sstevel@tonic-gate 
20050Sstevel@tonic-gate 	/*
20060Sstevel@tonic-gate 	 * If demapping from a shared pagetable, we best demap the
20070Sstevel@tonic-gate 	 * entire set of user TLBs, since we don't know what addresses
20080Sstevel@tonic-gate 	 * these were shared at.
20090Sstevel@tonic-gate 	 */
20100Sstevel@tonic-gate 	if (hat->hat_flags & HAT_SHARED) {
20110Sstevel@tonic-gate 		hat = kas.a_hat;
20120Sstevel@tonic-gate 		va = DEMAP_ALL_ADDR;
20130Sstevel@tonic-gate 	}
20140Sstevel@tonic-gate 
20150Sstevel@tonic-gate 	/*
20160Sstevel@tonic-gate 	 * if not running with multiple CPUs, don't use cross calls
20170Sstevel@tonic-gate 	 */
20180Sstevel@tonic-gate 	if (panicstr || !flushes_require_xcalls) {
20195084Sjohnlev #ifdef __xpv
20205084Sjohnlev 		if (va == DEMAP_ALL_ADDR)
20215084Sjohnlev 			xen_flush_tlb();
20225084Sjohnlev 		else
20235084Sjohnlev 			xen_flush_va((caddr_t)va);
20245084Sjohnlev #else
20250Sstevel@tonic-gate 		(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
20265084Sjohnlev #endif
20270Sstevel@tonic-gate 		return;
20280Sstevel@tonic-gate 	}
20290Sstevel@tonic-gate 
20300Sstevel@tonic-gate 
20310Sstevel@tonic-gate 	/*
20323446Smrj 	 * Determine CPUs to shootdown. Kernel changes always do all CPUs.
20333446Smrj 	 * Otherwise it's just CPUs currently executing in this hat.
20340Sstevel@tonic-gate 	 */
20350Sstevel@tonic-gate 	kpreempt_disable();
20360Sstevel@tonic-gate 	CPUSET_ONLY(justme, CPU->cpu_id);
20373446Smrj 	if (hat == kas.a_hat)
20383446Smrj 		cpus_to_shootdown = khat_cpuset;
20390Sstevel@tonic-gate 	else
20403446Smrj 		cpus_to_shootdown = hat->hat_cpus;
20413446Smrj 
20425084Sjohnlev #ifndef __xpv
20434191Sjosephb 	/*
20444191Sjosephb 	 * If any CPUs in the set are idle, just request a delayed flush
20454191Sjosephb 	 * and avoid waking them up.
20464191Sjosephb 	 */
20474191Sjosephb 	check_cpus = cpus_to_shootdown;
20484191Sjosephb 	for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) {
20494191Sjosephb 		ulong_t tlb_info;
20504191Sjosephb 
20514191Sjosephb 		if (!CPU_IN_SET(check_cpus, c))
20524191Sjosephb 			continue;
20534191Sjosephb 		CPUSET_DEL(check_cpus, c);
20544191Sjosephb 		cpup = cpu[c];
20554191Sjosephb 		if (cpup == NULL)
20564191Sjosephb 			continue;
20574191Sjosephb 
20584191Sjosephb 		tlb_info = cpup->cpu_m.mcpu_tlb_info;
20594191Sjosephb 		while (tlb_info == TLB_CPU_HALTED) {
20604191Sjosephb 			(void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED,
20614381Sjosephb 			    TLB_CPU_HALTED | TLB_INVAL_ALL);
20624191Sjosephb 			SMT_PAUSE();
20634191Sjosephb 			tlb_info = cpup->cpu_m.mcpu_tlb_info;
20644191Sjosephb 		}
20654191Sjosephb 		if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) {
20664191Sjosephb 			HATSTAT_INC(hs_tlb_inval_delayed);
20674191Sjosephb 			CPUSET_DEL(cpus_to_shootdown, c);
20684191Sjosephb 		}
20694191Sjosephb 	}
20705084Sjohnlev #endif
20714191Sjosephb 
20723446Smrj 	if (CPUSET_ISNULL(cpus_to_shootdown) ||
20733446Smrj 	    CPUSET_ISEQUAL(cpus_to_shootdown, justme)) {
20743446Smrj 
20755084Sjohnlev #ifdef __xpv
20765084Sjohnlev 		if (va == DEMAP_ALL_ADDR)
20775084Sjohnlev 			xen_flush_tlb();
20785084Sjohnlev 		else
20795084Sjohnlev 			xen_flush_va((caddr_t)va);
20805084Sjohnlev #else
20813446Smrj 		(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
20825084Sjohnlev #endif
20833446Smrj 
20843446Smrj 	} else {
20853446Smrj 
20863446Smrj 		CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
20875084Sjohnlev #ifdef __xpv
20885084Sjohnlev 		if (va == DEMAP_ALL_ADDR)
20895084Sjohnlev 			xen_gflush_tlb(cpus_to_shootdown);
20905084Sjohnlev 		else
20915084Sjohnlev 			xen_gflush_va((caddr_t)va, cpus_to_shootdown);
20925084Sjohnlev #else
20933446Smrj 		xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL, X_CALL_HIPRI,
20943446Smrj 		    cpus_to_shootdown, hati_demap_func);
20955084Sjohnlev #endif
20963446Smrj 
20973446Smrj 	}
20980Sstevel@tonic-gate 	kpreempt_enable();
20990Sstevel@tonic-gate }
21000Sstevel@tonic-gate 
21010Sstevel@tonic-gate /*
21020Sstevel@tonic-gate  * Interior routine for HAT_UNLOADs from hat_unload_callback(),
21030Sstevel@tonic-gate  * hat_kmap_unload() OR from hat_steal() code.  This routine doesn't
21040Sstevel@tonic-gate  * handle releasing of the htables.
21050Sstevel@tonic-gate  */
21060Sstevel@tonic-gate void
21070Sstevel@tonic-gate hat_pte_unmap(
21080Sstevel@tonic-gate 	htable_t	*ht,
21090Sstevel@tonic-gate 	uint_t		entry,
21100Sstevel@tonic-gate 	uint_t		flags,
21110Sstevel@tonic-gate 	x86pte_t	old_pte,
21120Sstevel@tonic-gate 	void		*pte_ptr)
21130Sstevel@tonic-gate {
21140Sstevel@tonic-gate 	hat_t		*hat = ht->ht_hat;
21150Sstevel@tonic-gate 	hment_t		*hm = NULL;
21160Sstevel@tonic-gate 	page_t		*pp = NULL;
21170Sstevel@tonic-gate 	level_t		l = ht->ht_level;
21180Sstevel@tonic-gate 	pfn_t		pfn;
21190Sstevel@tonic-gate 
21200Sstevel@tonic-gate 	/*
21210Sstevel@tonic-gate 	 * We always track the locking counts, even if nothing is unmapped
21220Sstevel@tonic-gate 	 */
21230Sstevel@tonic-gate 	if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
21240Sstevel@tonic-gate 		ASSERT(ht->ht_lock_cnt > 0);
21250Sstevel@tonic-gate 		HTABLE_LOCK_DEC(ht);
21260Sstevel@tonic-gate 	}
21270Sstevel@tonic-gate 
21280Sstevel@tonic-gate 	/*
21290Sstevel@tonic-gate 	 * Figure out which page's mapping list lock to acquire using the PFN
21300Sstevel@tonic-gate 	 * passed in "old" PTE. We then attempt to invalidate the PTE.
21310Sstevel@tonic-gate 	 * If another thread, probably a hat_pageunload, has asynchronously
21320Sstevel@tonic-gate 	 * unmapped/remapped this address we'll loop here.
21330Sstevel@tonic-gate 	 */
21340Sstevel@tonic-gate 	ASSERT(ht->ht_busy > 0);
21350Sstevel@tonic-gate 	while (PTE_ISVALID(old_pte)) {
21360Sstevel@tonic-gate 		pfn = PTE2PFN(old_pte, l);
21373446Smrj 		if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) {
21380Sstevel@tonic-gate 			pp = NULL;
21390Sstevel@tonic-gate 		} else {
21405084Sjohnlev #ifdef __xpv
21415084Sjohnlev 			if (pfn == PFN_INVALID)
21425084Sjohnlev 				panic("Invalid PFN, but not PT_NOCONSIST");
21435084Sjohnlev #endif
21440Sstevel@tonic-gate 			pp = page_numtopp_nolock(pfn);
214547Sjosephb 			if (pp == NULL) {
214647Sjosephb 				panic("no page_t, not NOCONSIST: old_pte="
214747Sjosephb 				    FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
214847Sjosephb 				    old_pte, (uintptr_t)ht, entry,
214947Sjosephb 				    (uintptr_t)pte_ptr);
215047Sjosephb 			}
21510Sstevel@tonic-gate 			x86_hm_enter(pp);
21520Sstevel@tonic-gate 		}
215347Sjosephb 
215447Sjosephb 		/*
215547Sjosephb 		 * If freeing the address space, check that the PTE
215647Sjosephb 		 * hasn't changed, as the mappings are no longer in use by
215747Sjosephb 		 * any thread, invalidation is unnecessary.
215847Sjosephb 		 * If not freeing, do a full invalidate.
21595084Sjohnlev 		 *
21605084Sjohnlev 		 * On the hypervisor we must always remove mappings, as a
21615084Sjohnlev 		 * writable mapping left behind could cause a page table
21625084Sjohnlev 		 * allocation to fail.
216347Sjosephb 		 */
21645084Sjohnlev #if !defined(__xpv)
216547Sjosephb 		if (hat->hat_flags & HAT_FREEING)
216647Sjosephb 			old_pte = x86pte_get(ht, entry);
216747Sjosephb 		else
21685084Sjohnlev #endif
21693446Smrj 			old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr);
21700Sstevel@tonic-gate 
21710Sstevel@tonic-gate 		/*
21720Sstevel@tonic-gate 		 * If the page hadn't changed we've unmapped it and can proceed
21730Sstevel@tonic-gate 		 */
21740Sstevel@tonic-gate 		if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
21750Sstevel@tonic-gate 			break;
21760Sstevel@tonic-gate 
21770Sstevel@tonic-gate 		/*
21780Sstevel@tonic-gate 		 * Otherwise, we'll have to retry with the current old_pte.
21790Sstevel@tonic-gate 		 * Drop the hment lock, since the pfn may have changed.
21800Sstevel@tonic-gate 		 */
21810Sstevel@tonic-gate 		if (pp != NULL) {
21820Sstevel@tonic-gate 			x86_hm_exit(pp);
21830Sstevel@tonic-gate 			pp = NULL;
21840Sstevel@tonic-gate 		} else {
21853446Smrj 			ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
21860Sstevel@tonic-gate 		}
21870Sstevel@tonic-gate 	}
21880Sstevel@tonic-gate 
21890Sstevel@tonic-gate 	/*
21900Sstevel@tonic-gate 	 * If the old mapping wasn't valid, there's nothing more to do
21910Sstevel@tonic-gate 	 */
21920Sstevel@tonic-gate 	if (!PTE_ISVALID(old_pte)) {
21930Sstevel@tonic-gate 		if (pp != NULL)
21940Sstevel@tonic-gate 			x86_hm_exit(pp);
21950Sstevel@tonic-gate 		return;
21960Sstevel@tonic-gate 	}
21970Sstevel@tonic-gate 
21980Sstevel@tonic-gate 	/*
21990Sstevel@tonic-gate 	 * Take care of syncing any MOD/REF bits and removing the hment.
22000Sstevel@tonic-gate 	 */
22010Sstevel@tonic-gate 	if (pp != NULL) {
22020Sstevel@tonic-gate 		if (!(flags & HAT_UNLOAD_NOSYNC))
22030Sstevel@tonic-gate 			hati_sync_pte_to_page(pp, old_pte, l);
22040Sstevel@tonic-gate 		hm = hment_remove(pp, ht, entry);
22050Sstevel@tonic-gate 		x86_hm_exit(pp);
22060Sstevel@tonic-gate 		if (hm != NULL)
22070Sstevel@tonic-gate 			hment_free(hm);
22080Sstevel@tonic-gate 	}
22090Sstevel@tonic-gate 
22100Sstevel@tonic-gate 	/*
22110Sstevel@tonic-gate 	 * Handle book keeping in the htable and hat
22120Sstevel@tonic-gate 	 */
22130Sstevel@tonic-gate 	ASSERT(ht->ht_valid_cnt > 0);
22140Sstevel@tonic-gate 	HTABLE_DEC(ht->ht_valid_cnt);
22150Sstevel@tonic-gate 	PGCNT_DEC(hat, l);
22160Sstevel@tonic-gate }
22170Sstevel@tonic-gate 
22180Sstevel@tonic-gate /*
22190Sstevel@tonic-gate  * very cheap unload implementation to special case some kernel addresses
22200Sstevel@tonic-gate  */
22210Sstevel@tonic-gate static void
22220Sstevel@tonic-gate hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
22230Sstevel@tonic-gate {
22240Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
22250Sstevel@tonic-gate 	uintptr_t	eva = va + len;
22263446Smrj 	pgcnt_t		pg_index;
22270Sstevel@tonic-gate 	htable_t	*ht;
22280Sstevel@tonic-gate 	uint_t		entry;
22293446Smrj 	x86pte_t	*pte_ptr;
22300Sstevel@tonic-gate 	x86pte_t	old_pte;
22310Sstevel@tonic-gate 
22320Sstevel@tonic-gate 	for (; va < eva; va += MMU_PAGESIZE) {
22330Sstevel@tonic-gate 		/*
22340Sstevel@tonic-gate 		 * Get the PTE
22350Sstevel@tonic-gate 		 */
22363446Smrj 		pg_index = mmu_btop(va - mmu.kmap_addr);
22373446Smrj 		pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index);
22383446Smrj 		old_pte = GET_PTE(pte_ptr);
22390Sstevel@tonic-gate 
22400Sstevel@tonic-gate 		/*
22410Sstevel@tonic-gate 		 * get the htable / entry
22420Sstevel@tonic-gate 		 */
22430Sstevel@tonic-gate 		ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
22440Sstevel@tonic-gate 		    >> LEVEL_SHIFT(1)];
22450Sstevel@tonic-gate 		entry = htable_va2entry(va, ht);
22460Sstevel@tonic-gate 
22470Sstevel@tonic-gate 		/*
22480Sstevel@tonic-gate 		 * use mostly common code to unmap it.
22490Sstevel@tonic-gate 		 */
22500Sstevel@tonic-gate 		hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr);
22510Sstevel@tonic-gate 	}
22520Sstevel@tonic-gate }
22530Sstevel@tonic-gate 
22540Sstevel@tonic-gate 
22550Sstevel@tonic-gate /*
22560Sstevel@tonic-gate  * unload a range of virtual address space (no callback)
22570Sstevel@tonic-gate  */
22580Sstevel@tonic-gate void
22590Sstevel@tonic-gate hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
22600Sstevel@tonic-gate {
22610Sstevel@tonic-gate 	uintptr_t va = (uintptr_t)addr;
22623446Smrj 
22635084Sjohnlev 	XPV_DISALLOW_MIGRATE();
22643446Smrj 	ASSERT(hat == kas.a_hat || va + len <= _userlimit);
22650Sstevel@tonic-gate 
22660Sstevel@tonic-gate 	/*
22670Sstevel@tonic-gate 	 * special case for performance.
22680Sstevel@tonic-gate 	 */
22690Sstevel@tonic-gate 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
22700Sstevel@tonic-gate 		ASSERT(hat == kas.a_hat);
22710Sstevel@tonic-gate 		hat_kmap_unload(addr, len, flags);
22723446Smrj 	} else {
22733446Smrj 		hat_unload_callback(hat, addr, len, flags, NULL);
22740Sstevel@tonic-gate 	}
22755084Sjohnlev 	XPV_ALLOW_MIGRATE();
22760Sstevel@tonic-gate }
22770Sstevel@tonic-gate 
22780Sstevel@tonic-gate /*
22790Sstevel@tonic-gate  * Do the callbacks for ranges being unloaded.
22800Sstevel@tonic-gate  */
22810Sstevel@tonic-gate typedef struct range_info {
22820Sstevel@tonic-gate 	uintptr_t	rng_va;
22830Sstevel@tonic-gate 	ulong_t		rng_cnt;
22840Sstevel@tonic-gate 	level_t		rng_level;
22850Sstevel@tonic-gate } range_info_t;
22860Sstevel@tonic-gate 
22870Sstevel@tonic-gate static void
22880Sstevel@tonic-gate handle_ranges(hat_callback_t *cb, uint_t cnt, range_info_t *range)
22890Sstevel@tonic-gate {
22900Sstevel@tonic-gate 	/*
22910Sstevel@tonic-gate 	 * do callbacks to upper level VM system
22920Sstevel@tonic-gate 	 */
22930Sstevel@tonic-gate 	while (cb != NULL && cnt > 0) {
22940Sstevel@tonic-gate 		--cnt;
22950Sstevel@tonic-gate 		cb->hcb_start_addr = (caddr_t)range[cnt].rng_va;
22960Sstevel@tonic-gate 		cb->hcb_end_addr = cb->hcb_start_addr;
22970Sstevel@tonic-gate 		cb->hcb_end_addr +=
22980Sstevel@tonic-gate 		    range[cnt].rng_cnt << LEVEL_SIZE(range[cnt].rng_level);
22990Sstevel@tonic-gate 		cb->hcb_function(cb);
23000Sstevel@tonic-gate 	}
23010Sstevel@tonic-gate }
23020Sstevel@tonic-gate 
23030Sstevel@tonic-gate /*
23040Sstevel@tonic-gate  * Unload a given range of addresses (has optional callback)
23050Sstevel@tonic-gate  *
23060Sstevel@tonic-gate  * Flags:
23070Sstevel@tonic-gate  * define	HAT_UNLOAD		0x00
23080Sstevel@tonic-gate  * define	HAT_UNLOAD_NOSYNC	0x02
23090Sstevel@tonic-gate  * define	HAT_UNLOAD_UNLOCK	0x04
23100Sstevel@tonic-gate  * define	HAT_UNLOAD_OTHER	0x08 - not used
23110Sstevel@tonic-gate  * define	HAT_UNLOAD_UNMAP	0x10 - same as HAT_UNLOAD
23120Sstevel@tonic-gate  */
23130Sstevel@tonic-gate #define	MAX_UNLOAD_CNT (8)
23140Sstevel@tonic-gate void
23150Sstevel@tonic-gate hat_unload_callback(
23160Sstevel@tonic-gate 	hat_t		*hat,
23170Sstevel@tonic-gate 	caddr_t		addr,
23180Sstevel@tonic-gate 	size_t		len,
23190Sstevel@tonic-gate 	uint_t		flags,
23200Sstevel@tonic-gate 	hat_callback_t	*cb)
23210Sstevel@tonic-gate {
23220Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
23230Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
23240Sstevel@tonic-gate 	htable_t	*ht = NULL;
23250Sstevel@tonic-gate 	uint_t		entry;
232647Sjosephb 	uintptr_t	contig_va = (uintptr_t)-1L;
23270Sstevel@tonic-gate 	range_info_t	r[MAX_UNLOAD_CNT];
23280Sstevel@tonic-gate 	uint_t		r_cnt = 0;
23290Sstevel@tonic-gate 	x86pte_t	old_pte;
23300Sstevel@tonic-gate 
23315084Sjohnlev 	XPV_DISALLOW_MIGRATE();
23323446Smrj 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
23330Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
23340Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
23350Sstevel@tonic-gate 
23363446Smrj 	/*
23373446Smrj 	 * Special case a single page being unloaded for speed. This happens
23383446Smrj 	 * quite frequently, COW faults after a fork() for example.
23393446Smrj 	 */
23403446Smrj 	if (cb == NULL && len == MMU_PAGESIZE) {
23413446Smrj 		ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
23423446Smrj 		if (ht != NULL) {
23433446Smrj 			if (PTE_ISVALID(old_pte))
23443446Smrj 				hat_pte_unmap(ht, entry, flags, old_pte, NULL);
23453446Smrj 			htable_release(ht);
23463446Smrj 		}
23475084Sjohnlev 		XPV_ALLOW_MIGRATE();
23483446Smrj 		return;
23493446Smrj 	}
23503446Smrj 
23510Sstevel@tonic-gate 	while (vaddr < eaddr) {
23520Sstevel@tonic-gate 		old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
23530Sstevel@tonic-gate 		if (ht == NULL)
23540Sstevel@tonic-gate 			break;
23550Sstevel@tonic-gate 
23560Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(vaddr));
23570Sstevel@tonic-gate 
23580Sstevel@tonic-gate 		if (vaddr < (uintptr_t)addr)
23590Sstevel@tonic-gate 			panic("hat_unload_callback(): unmap inside large page");
23600Sstevel@tonic-gate 
23610Sstevel@tonic-gate 		/*
23620Sstevel@tonic-gate 		 * We'll do the call backs for contiguous ranges
23630Sstevel@tonic-gate 		 */
236447Sjosephb 		if (vaddr != contig_va ||
23650Sstevel@tonic-gate 		    (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) {
23660Sstevel@tonic-gate 			if (r_cnt == MAX_UNLOAD_CNT) {
23670Sstevel@tonic-gate 				handle_ranges(cb, r_cnt, r);
23680Sstevel@tonic-gate 				r_cnt = 0;
23690Sstevel@tonic-gate 			}
23700Sstevel@tonic-gate 			r[r_cnt].rng_va = vaddr;
23710Sstevel@tonic-gate 			r[r_cnt].rng_cnt = 0;
23720Sstevel@tonic-gate 			r[r_cnt].rng_level = ht->ht_level;
23730Sstevel@tonic-gate 			++r_cnt;
23740Sstevel@tonic-gate 		}
23750Sstevel@tonic-gate 
23760Sstevel@tonic-gate 		/*
23770Sstevel@tonic-gate 		 * Unload one mapping from the page tables.
23780Sstevel@tonic-gate 		 */
23790Sstevel@tonic-gate 		entry = htable_va2entry(vaddr, ht);
23800Sstevel@tonic-gate 		hat_pte_unmap(ht, entry, flags, old_pte, NULL);
23810Sstevel@tonic-gate 		ASSERT(ht->ht_level <= mmu.max_page_level);
23820Sstevel@tonic-gate 		vaddr += LEVEL_SIZE(ht->ht_level);
238347Sjosephb 		contig_va = vaddr;
23840Sstevel@tonic-gate 		++r[r_cnt - 1].rng_cnt;
23850Sstevel@tonic-gate 	}
23860Sstevel@tonic-gate 	if (ht)
23870Sstevel@tonic-gate 		htable_release(ht);
23880Sstevel@tonic-gate 
23890Sstevel@tonic-gate 	/*
23900Sstevel@tonic-gate 	 * handle last range for callbacks
23910Sstevel@tonic-gate 	 */
23920Sstevel@tonic-gate 	if (r_cnt > 0)
23930Sstevel@tonic-gate 		handle_ranges(cb, r_cnt, r);
23945084Sjohnlev 	XPV_ALLOW_MIGRATE();
23950Sstevel@tonic-gate }
23960Sstevel@tonic-gate 
23970Sstevel@tonic-gate /*
23980Sstevel@tonic-gate  * synchronize mapping with software data structures
23990Sstevel@tonic-gate  *
24000Sstevel@tonic-gate  * This interface is currently only used by the working set monitor
24010Sstevel@tonic-gate  * driver.
24020Sstevel@tonic-gate  */
24030Sstevel@tonic-gate /*ARGSUSED*/
24040Sstevel@tonic-gate void
24050Sstevel@tonic-gate hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
24060Sstevel@tonic-gate {
24070Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
24080Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
24090Sstevel@tonic-gate 	htable_t	*ht = NULL;
24100Sstevel@tonic-gate 	uint_t		entry;
24110Sstevel@tonic-gate 	x86pte_t	pte;
24120Sstevel@tonic-gate 	x86pte_t	save_pte;
24130Sstevel@tonic-gate 	x86pte_t	new;
24140Sstevel@tonic-gate 	page_t		*pp;
24150Sstevel@tonic-gate 
24160Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(vaddr));
24170Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
24180Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
24193446Smrj 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
24200Sstevel@tonic-gate 
24215084Sjohnlev 	XPV_DISALLOW_MIGRATE();
24220Sstevel@tonic-gate 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
24230Sstevel@tonic-gate try_again:
24240Sstevel@tonic-gate 		pte = htable_walk(hat, &ht, &vaddr, eaddr);
24250Sstevel@tonic-gate 		if (ht == NULL)
24260Sstevel@tonic-gate 			break;
24270Sstevel@tonic-gate 		entry = htable_va2entry(vaddr, ht);
24280Sstevel@tonic-gate 
24293446Smrj 		if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
24300Sstevel@tonic-gate 		    PTE_GET(pte, PT_REF | PT_MOD) == 0)
24310Sstevel@tonic-gate 			continue;
24320Sstevel@tonic-gate 
24330Sstevel@tonic-gate 		/*
24340Sstevel@tonic-gate 		 * We need to acquire the mapping list lock to protect
24350Sstevel@tonic-gate 		 * against hat_pageunload(), hat_unload(), etc.
24360Sstevel@tonic-gate 		 */
24370Sstevel@tonic-gate 		pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
24380Sstevel@tonic-gate 		if (pp == NULL)
24390Sstevel@tonic-gate 			break;
24400Sstevel@tonic-gate 		x86_hm_enter(pp);
24410Sstevel@tonic-gate 		save_pte = pte;
24420Sstevel@tonic-gate 		pte = x86pte_get(ht, entry);
24430Sstevel@tonic-gate 		if (pte != save_pte) {
24440Sstevel@tonic-gate 			x86_hm_exit(pp);
24450Sstevel@tonic-gate 			goto try_again;
24460Sstevel@tonic-gate 		}
24473446Smrj 		if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
24480Sstevel@tonic-gate 		    PTE_GET(pte, PT_REF | PT_MOD) == 0) {
24490Sstevel@tonic-gate 			x86_hm_exit(pp);
24500Sstevel@tonic-gate 			continue;
24510Sstevel@tonic-gate 		}
24520Sstevel@tonic-gate 
24530Sstevel@tonic-gate 		/*
24540Sstevel@tonic-gate 		 * Need to clear ref or mod bits. We may compete with
24550Sstevel@tonic-gate 		 * hardware updating the R/M bits and have to try again.
24560Sstevel@tonic-gate 		 */
24570Sstevel@tonic-gate 		if (flags == HAT_SYNC_ZERORM) {
24580Sstevel@tonic-gate 			new = pte;
24590Sstevel@tonic-gate 			PTE_CLR(new, PT_REF | PT_MOD);
24600Sstevel@tonic-gate 			pte = hati_update_pte(ht, entry, pte, new);
24610Sstevel@tonic-gate 			if (pte != 0) {
24620Sstevel@tonic-gate 				x86_hm_exit(pp);
24630Sstevel@tonic-gate 				goto try_again;
24640Sstevel@tonic-gate 			}
24650Sstevel@tonic-gate 		} else {
24660Sstevel@tonic-gate 			/*
24670Sstevel@tonic-gate 			 * sync the PTE to the page_t
24680Sstevel@tonic-gate 			 */
24690Sstevel@tonic-gate 			hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
24700Sstevel@tonic-gate 		}
24710Sstevel@tonic-gate 		x86_hm_exit(pp);
24720Sstevel@tonic-gate 	}
24730Sstevel@tonic-gate 	if (ht)
24740Sstevel@tonic-gate 		htable_release(ht);
24755084Sjohnlev 	XPV_ALLOW_MIGRATE();
24760Sstevel@tonic-gate }
24770Sstevel@tonic-gate 
24780Sstevel@tonic-gate /*
24790Sstevel@tonic-gate  * void	hat_map(hat, addr, len, flags)
24800Sstevel@tonic-gate  */
24810Sstevel@tonic-gate /*ARGSUSED*/
24820Sstevel@tonic-gate void
24830Sstevel@tonic-gate hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
24840Sstevel@tonic-gate {
24850Sstevel@tonic-gate 	/* does nothing */
24860Sstevel@tonic-gate }
24870Sstevel@tonic-gate 
24880Sstevel@tonic-gate /*
24890Sstevel@tonic-gate  * uint_t hat_getattr(hat, addr, *attr)
24900Sstevel@tonic-gate  *	returns attr for <hat,addr> in *attr.  returns 0 if there was a
24910Sstevel@tonic-gate  *	mapping and *attr is valid, nonzero if there was no mapping and
24920Sstevel@tonic-gate  *	*attr is not valid.
24930Sstevel@tonic-gate  */
24940Sstevel@tonic-gate uint_t
24950Sstevel@tonic-gate hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
24960Sstevel@tonic-gate {
24970Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
24980Sstevel@tonic-gate 	htable_t	*ht = NULL;
24990Sstevel@tonic-gate 	x86pte_t	pte;
25000Sstevel@tonic-gate 
25013446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
25020Sstevel@tonic-gate 
25030Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
25040Sstevel@tonic-gate 		return ((uint_t)-1);
25050Sstevel@tonic-gate 
25063446Smrj 	ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
25070Sstevel@tonic-gate 	if (ht == NULL)
25080Sstevel@tonic-gate 		return ((uint_t)-1);
25090Sstevel@tonic-gate 
25100Sstevel@tonic-gate 	if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
25110Sstevel@tonic-gate 		htable_release(ht);
25120Sstevel@tonic-gate 		return ((uint_t)-1);
25130Sstevel@tonic-gate 	}
25140Sstevel@tonic-gate 
25150Sstevel@tonic-gate 	*attr = PROT_READ;
25160Sstevel@tonic-gate 	if (PTE_GET(pte, PT_WRITABLE))
25170Sstevel@tonic-gate 		*attr |= PROT_WRITE;
25180Sstevel@tonic-gate 	if (PTE_GET(pte, PT_USER))
25190Sstevel@tonic-gate 		*attr |= PROT_USER;
25200Sstevel@tonic-gate 	if (!PTE_GET(pte, mmu.pt_nx))
25210Sstevel@tonic-gate 		*attr |= PROT_EXEC;
25223446Smrj 	if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
25230Sstevel@tonic-gate 		*attr |= HAT_NOSYNC;
25240Sstevel@tonic-gate 	htable_release(ht);
25250Sstevel@tonic-gate 	return (0);
25260Sstevel@tonic-gate }
25270Sstevel@tonic-gate 
25280Sstevel@tonic-gate /*
25290Sstevel@tonic-gate  * hat_updateattr() applies the given attribute change to an existing mapping
25300Sstevel@tonic-gate  */
25310Sstevel@tonic-gate #define	HAT_LOAD_ATTR		1
25320Sstevel@tonic-gate #define	HAT_SET_ATTR		2
25330Sstevel@tonic-gate #define	HAT_CLR_ATTR		3
25340Sstevel@tonic-gate 
25350Sstevel@tonic-gate static void
25360Sstevel@tonic-gate hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
25370Sstevel@tonic-gate {
25380Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
25390Sstevel@tonic-gate 	uintptr_t	eaddr = (uintptr_t)addr + len;
25400Sstevel@tonic-gate 	htable_t	*ht = NULL;
25410Sstevel@tonic-gate 	uint_t		entry;
25420Sstevel@tonic-gate 	x86pte_t	oldpte, newpte;
25430Sstevel@tonic-gate 	page_t		*pp;
25440Sstevel@tonic-gate 
25455084Sjohnlev 	XPV_DISALLOW_MIGRATE();
25460Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
25470Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
25480Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
25490Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
25500Sstevel@tonic-gate 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
25510Sstevel@tonic-gate try_again:
25520Sstevel@tonic-gate 		oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
25530Sstevel@tonic-gate 		if (ht == NULL)
25540Sstevel@tonic-gate 			break;
25553446Smrj 		if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
25560Sstevel@tonic-gate 			continue;
25570Sstevel@tonic-gate 
25580Sstevel@tonic-gate 		pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
25590Sstevel@tonic-gate 		if (pp == NULL)
25600Sstevel@tonic-gate 			continue;
25610Sstevel@tonic-gate 		x86_hm_enter(pp);
25620Sstevel@tonic-gate 
25630Sstevel@tonic-gate 		newpte = oldpte;
25640Sstevel@tonic-gate 		/*
25650Sstevel@tonic-gate 		 * We found a page table entry in the desired range,
25660Sstevel@tonic-gate 		 * figure out the new attributes.
25670Sstevel@tonic-gate 		 */
25680Sstevel@tonic-gate 		if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
25690Sstevel@tonic-gate 			if ((attr & PROT_WRITE) &&
25700Sstevel@tonic-gate 			    !PTE_GET(oldpte, PT_WRITABLE))
25710Sstevel@tonic-gate 				newpte |= PT_WRITABLE;
25720Sstevel@tonic-gate 
25733446Smrj 			if ((attr & HAT_NOSYNC) &&
25743446Smrj 			    PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC)
25750Sstevel@tonic-gate 				newpte |= PT_NOSYNC;
25760Sstevel@tonic-gate 
25770Sstevel@tonic-gate 			if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
25780Sstevel@tonic-gate 				newpte &= ~mmu.pt_nx;
25790Sstevel@tonic-gate 		}
25800Sstevel@tonic-gate 
25810Sstevel@tonic-gate 		if (what == HAT_LOAD_ATTR) {
25820Sstevel@tonic-gate 			if (!(attr & PROT_WRITE) &&
25830Sstevel@tonic-gate 			    PTE_GET(oldpte, PT_WRITABLE))
25840Sstevel@tonic-gate 				newpte &= ~PT_WRITABLE;
25850Sstevel@tonic-gate 
25863446Smrj 			if (!(attr & HAT_NOSYNC) &&
25873446Smrj 			    PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
25883446Smrj 				newpte &= ~PT_SOFTWARE;
25890Sstevel@tonic-gate 
25900Sstevel@tonic-gate 			if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
25910Sstevel@tonic-gate 				newpte |= mmu.pt_nx;
25920Sstevel@tonic-gate 		}
25930Sstevel@tonic-gate 
25940Sstevel@tonic-gate 		if (what == HAT_CLR_ATTR) {
25950Sstevel@tonic-gate 			if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
25960Sstevel@tonic-gate 				newpte &= ~PT_WRITABLE;
25970Sstevel@tonic-gate 
25983446Smrj 			if ((attr & HAT_NOSYNC) &&
25993446Smrj 			    PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
26003446Smrj 				newpte &= ~PT_SOFTWARE;
26010Sstevel@tonic-gate 
26020Sstevel@tonic-gate 			if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
26030Sstevel@tonic-gate 				newpte |= mmu.pt_nx;
26040Sstevel@tonic-gate 		}
26050Sstevel@tonic-gate 
26060Sstevel@tonic-gate 		/*
26073446Smrj 		 * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set.
26083446Smrj 		 * x86pte_set() depends on this.
26093446Smrj 		 */
26103446Smrj 		if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC)
26113446Smrj 			newpte |= PT_REF | PT_MOD;
26123446Smrj 
26133446Smrj 		/*
26140Sstevel@tonic-gate 		 * what about PROT_READ or others? this code only handles:
26150Sstevel@tonic-gate 		 * EXEC, WRITE, NOSYNC
26160Sstevel@tonic-gate 		 */
26170Sstevel@tonic-gate 
26180Sstevel@tonic-gate 		/*
26190Sstevel@tonic-gate 		 * If new PTE really changed, update the table.
26200Sstevel@tonic-gate 		 */
26210Sstevel@tonic-gate 		if (newpte != oldpte) {
26220Sstevel@tonic-gate 			entry = htable_va2entry(vaddr, ht);
26230Sstevel@tonic-gate 			oldpte = hati_update_pte(ht, entry, oldpte, newpte);
26240Sstevel@tonic-gate 			if (oldpte != 0) {
26250Sstevel@tonic-gate 				x86_hm_exit(pp);
26260Sstevel@tonic-gate 				goto try_again;
26270Sstevel@tonic-gate 			}
26280Sstevel@tonic-gate 		}
26290Sstevel@tonic-gate 		x86_hm_exit(pp);
26300Sstevel@tonic-gate 	}
26310Sstevel@tonic-gate 	if (ht)
26320Sstevel@tonic-gate 		htable_release(ht);
26335084Sjohnlev 	XPV_ALLOW_MIGRATE();
26340Sstevel@tonic-gate }
26350Sstevel@tonic-gate 
26360Sstevel@tonic-gate /*
26370Sstevel@tonic-gate  * Various wrappers for hat_updateattr()
26380Sstevel@tonic-gate  */
26390Sstevel@tonic-gate void
26400Sstevel@tonic-gate hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
26410Sstevel@tonic-gate {
26423446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
26430Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
26440Sstevel@tonic-gate }
26450Sstevel@tonic-gate 
26460Sstevel@tonic-gate void
26470Sstevel@tonic-gate hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
26480Sstevel@tonic-gate {
26493446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
26500Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
26510Sstevel@tonic-gate }
26520Sstevel@tonic-gate 
26530Sstevel@tonic-gate void
26540Sstevel@tonic-gate hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
26550Sstevel@tonic-gate {
26563446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
26570Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
26580Sstevel@tonic-gate }
26590Sstevel@tonic-gate 
26600Sstevel@tonic-gate void
26610Sstevel@tonic-gate hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
26620Sstevel@tonic-gate {
26633446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
26640Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
26650Sstevel@tonic-gate }
26660Sstevel@tonic-gate 
26670Sstevel@tonic-gate /*
26680Sstevel@tonic-gate  * size_t hat_getpagesize(hat, addr)
26690Sstevel@tonic-gate  *	returns pagesize in bytes for <hat, addr>. returns -1 of there is
26700Sstevel@tonic-gate  *	no mapping. This is an advisory call.
26710Sstevel@tonic-gate  */
26720Sstevel@tonic-gate ssize_t
26730Sstevel@tonic-gate hat_getpagesize(hat_t *hat, caddr_t addr)
26740Sstevel@tonic-gate {
26750Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
26760Sstevel@tonic-gate 	htable_t	*ht;
26770Sstevel@tonic-gate 	size_t		pagesize;
26780Sstevel@tonic-gate 
26793446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
26800Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
26810Sstevel@tonic-gate 		return (-1);
26820Sstevel@tonic-gate 	ht = htable_getpage(hat, vaddr, NULL);
26830Sstevel@tonic-gate 	if (ht == NULL)
26840Sstevel@tonic-gate 		return (-1);
26850Sstevel@tonic-gate 	pagesize = LEVEL_SIZE(ht->ht_level);
26860Sstevel@tonic-gate 	htable_release(ht);
26870Sstevel@tonic-gate 	return (pagesize);
26880Sstevel@tonic-gate }
26890Sstevel@tonic-gate 
26900Sstevel@tonic-gate 
26910Sstevel@tonic-gate 
26920Sstevel@tonic-gate /*
26930Sstevel@tonic-gate  * pfn_t hat_getpfnum(hat, addr)
26940Sstevel@tonic-gate  *	returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
26950Sstevel@tonic-gate  */
26960Sstevel@tonic-gate pfn_t
26970Sstevel@tonic-gate hat_getpfnum(hat_t *hat, caddr_t addr)
26980Sstevel@tonic-gate {
26990Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
27000Sstevel@tonic-gate 	htable_t	*ht;
27010Sstevel@tonic-gate 	uint_t		entry;
27020Sstevel@tonic-gate 	pfn_t		pfn = PFN_INVALID;
27030Sstevel@tonic-gate 
27043446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
27050Sstevel@tonic-gate 	if (khat_running == 0)
27063446Smrj 		return (PFN_INVALID);
27070Sstevel@tonic-gate 
27080Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
27090Sstevel@tonic-gate 		return (PFN_INVALID);
27100Sstevel@tonic-gate 
27115084Sjohnlev 	XPV_DISALLOW_MIGRATE();
27120Sstevel@tonic-gate 	/*
27130Sstevel@tonic-gate 	 * A very common use of hat_getpfnum() is from the DDI for kernel pages.
27140Sstevel@tonic-gate 	 * Use the kmap_ptes (which also covers the 32 bit heap) to speed
27150Sstevel@tonic-gate 	 * this up.
27160Sstevel@tonic-gate 	 */
27170Sstevel@tonic-gate 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
27180Sstevel@tonic-gate 		x86pte_t pte;
27193446Smrj 		pgcnt_t pg_index;
27203446Smrj 
27213446Smrj 		pg_index = mmu_btop(vaddr - mmu.kmap_addr);
27223446Smrj 		pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index));
27235084Sjohnlev 		if (PTE_ISVALID(pte))
27245084Sjohnlev 			/*LINTED [use of constant 0 causes a lint warning] */
27255084Sjohnlev 			pfn = PTE2PFN(pte, 0);
27265084Sjohnlev 		XPV_ALLOW_MIGRATE();
27275084Sjohnlev 		return (pfn);
27280Sstevel@tonic-gate 	}
27290Sstevel@tonic-gate 
27300Sstevel@tonic-gate 	ht = htable_getpage(hat, vaddr, &entry);
27315084Sjohnlev 	if (ht == NULL) {
27325084Sjohnlev 		XPV_ALLOW_MIGRATE();
27330Sstevel@tonic-gate 		return (PFN_INVALID);
27345084Sjohnlev 	}
27350Sstevel@tonic-gate 	ASSERT(vaddr >= ht->ht_vaddr);
27360Sstevel@tonic-gate 	ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
27370Sstevel@tonic-gate 	pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
27380Sstevel@tonic-gate 	if (ht->ht_level > 0)
27390Sstevel@tonic-gate 		pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
27400Sstevel@tonic-gate 	htable_release(ht);
27415084Sjohnlev 	XPV_ALLOW_MIGRATE();
27420Sstevel@tonic-gate 	return (pfn);
27430Sstevel@tonic-gate }
27440Sstevel@tonic-gate 
27450Sstevel@tonic-gate /*
27460Sstevel@tonic-gate  * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged.
27470Sstevel@tonic-gate  * Use hat_getpfnum(kas.a_hat, ...) instead.
27480Sstevel@tonic-gate  *
27490Sstevel@tonic-gate  * We'd like to return PFN_INVALID if the mappings have underlying page_t's
27500Sstevel@tonic-gate  * but can't right now due to the fact that some software has grown to use
27510Sstevel@tonic-gate  * this interface incorrectly. So for now when the interface is misused,
27520Sstevel@tonic-gate  * return a warning to the user that in the future it won't work in the
27530Sstevel@tonic-gate  * way they're abusing it, and carry on.
27540Sstevel@tonic-gate  *
27550Sstevel@tonic-gate  * Note that hat_getkpfnum() is never supported on amd64.
27560Sstevel@tonic-gate  */
27570Sstevel@tonic-gate #if !defined(__amd64)
27580Sstevel@tonic-gate pfn_t
27590Sstevel@tonic-gate hat_getkpfnum(caddr_t addr)
27600Sstevel@tonic-gate {
27610Sstevel@tonic-gate 	pfn_t	pfn;
27620Sstevel@tonic-gate 	int badcaller = 0;
27630Sstevel@tonic-gate 
27640Sstevel@tonic-gate 	if (khat_running == 0)
27650Sstevel@tonic-gate 		panic("hat_getkpfnum(): called too early\n");
27660Sstevel@tonic-gate 	if ((uintptr_t)addr < kernelbase)
27670Sstevel@tonic-gate 		return (PFN_INVALID);
27680Sstevel@tonic-gate 
27695084Sjohnlev 	XPV_DISALLOW_MIGRATE();
27700Sstevel@tonic-gate 	if (segkpm && IS_KPM_ADDR(addr)) {
27710Sstevel@tonic-gate 		badcaller = 1;
27720Sstevel@tonic-gate 		pfn = hat_kpm_va2pfn(addr);
27730Sstevel@tonic-gate 	} else {
27740Sstevel@tonic-gate 		pfn = hat_getpfnum(kas.a_hat, addr);
27750Sstevel@tonic-gate 		badcaller = pf_is_memory(pfn);
27760Sstevel@tonic-gate 	}
27770Sstevel@tonic-gate 
27780Sstevel@tonic-gate 	if (badcaller)
27790Sstevel@tonic-gate 		hat_getkpfnum_badcall(caller());
27805084Sjohnlev 	XPV_ALLOW_MIGRATE();
27810Sstevel@tonic-gate 	return (pfn);
27820Sstevel@tonic-gate }
27830Sstevel@tonic-gate #endif /* __amd64 */
27840Sstevel@tonic-gate 
27850Sstevel@tonic-gate /*
27860Sstevel@tonic-gate  * int hat_probe(hat, addr)
27870Sstevel@tonic-gate  *	return 0 if no valid mapping is present.  Faster version
27880Sstevel@tonic-gate  *	of hat_getattr in certain architectures.
27890Sstevel@tonic-gate  */
27900Sstevel@tonic-gate int
27910Sstevel@tonic-gate hat_probe(hat_t *hat, caddr_t addr)
27920Sstevel@tonic-gate {
27930Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
27940Sstevel@tonic-gate 	uint_t		entry;
27950Sstevel@tonic-gate 	htable_t	*ht;
27960Sstevel@tonic-gate 	pgcnt_t		pg_off;
27970Sstevel@tonic-gate 
27983446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
27990Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
28000Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
28010Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
28020Sstevel@tonic-gate 		return (0);
28030Sstevel@tonic-gate 
28040Sstevel@tonic-gate 	/*
28050Sstevel@tonic-gate 	 * Most common use of hat_probe is from segmap. We special case it
28060Sstevel@tonic-gate 	 * for performance.
28070Sstevel@tonic-gate 	 */
28080Sstevel@tonic-gate 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
28090Sstevel@tonic-gate 		pg_off = mmu_btop(vaddr - mmu.kmap_addr);
28100Sstevel@tonic-gate 		if (mmu.pae_hat)
28110Sstevel@tonic-gate 			return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
28120Sstevel@tonic-gate 		else
28130Sstevel@tonic-gate 			return (PTE_ISVALID(
28140Sstevel@tonic-gate 			    ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
28150Sstevel@tonic-gate 	}
28160Sstevel@tonic-gate 
28170Sstevel@tonic-gate 	ht = htable_getpage(hat, vaddr, &entry);
28180Sstevel@tonic-gate 	htable_release(ht);
28195084Sjohnlev 	return (ht != NULL);
28200Sstevel@tonic-gate }
28210Sstevel@tonic-gate 
28220Sstevel@tonic-gate /*
28234381Sjosephb  * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM.
28244381Sjosephb  */
28254381Sjosephb static int
28264381Sjosephb is_it_dism(hat_t *hat, caddr_t va)
28274381Sjosephb {
28284381Sjosephb 	struct seg *seg;
28294381Sjosephb 	struct shm_data *shmd;
28304381Sjosephb 	struct spt_data *sptd;
28314381Sjosephb 
28324381Sjosephb 	seg = as_findseg(hat->hat_as, va, 0);
28334381Sjosephb 	ASSERT(seg != NULL);
28344381Sjosephb 	ASSERT(seg->s_base <= va);
28354381Sjosephb 	shmd = (struct shm_data *)seg->s_data;
28364381Sjosephb 	ASSERT(shmd != NULL);
28374381Sjosephb 	sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
28384381Sjosephb 	ASSERT(sptd != NULL);
28394381Sjosephb 	if (sptd->spt_flags & SHM_PAGEABLE)
28404381Sjosephb 		return (1);
28414381Sjosephb 	return (0);
28424381Sjosephb }
28434381Sjosephb 
28444381Sjosephb /*
28454381Sjosephb  * Simple implementation of ISM. hat_share() is similar to hat_memload_array(),
28460Sstevel@tonic-gate  * except that we use the ism_hat's existing mappings to determine the pages
28474381Sjosephb  * and protections to use for this hat. If we find a full properly aligned
28484381Sjosephb  * and sized pagetable, we will attempt to share the pagetable itself.
28490Sstevel@tonic-gate  */
28500Sstevel@tonic-gate /*ARGSUSED*/
28510Sstevel@tonic-gate int
28520Sstevel@tonic-gate hat_share(
28530Sstevel@tonic-gate 	hat_t		*hat,
28540Sstevel@tonic-gate 	caddr_t		addr,
28550Sstevel@tonic-gate 	hat_t		*ism_hat,
28560Sstevel@tonic-gate 	caddr_t		src_addr,
28570Sstevel@tonic-gate 	size_t		len,	/* almost useless value, see below.. */
28580Sstevel@tonic-gate 	uint_t		ismszc)
28590Sstevel@tonic-gate {
28600Sstevel@tonic-gate 	uintptr_t	vaddr_start = (uintptr_t)addr;
28610Sstevel@tonic-gate 	uintptr_t	vaddr;
28620Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr_start + len;
28630Sstevel@tonic-gate 	uintptr_t	ism_addr_start = (uintptr_t)src_addr;
28640Sstevel@tonic-gate 	uintptr_t	ism_addr = ism_addr_start;
28650Sstevel@tonic-gate 	uintptr_t	e_ism_addr = ism_addr + len;
28660Sstevel@tonic-gate 	htable_t	*ism_ht = NULL;
28670Sstevel@tonic-gate 	htable_t	*ht;
28680Sstevel@tonic-gate 	x86pte_t	pte;
28690Sstevel@tonic-gate 	page_t		*pp;
28700Sstevel@tonic-gate 	pfn_t		pfn;
28710Sstevel@tonic-gate 	level_t		l;
28720Sstevel@tonic-gate 	pgcnt_t		pgcnt;
28730Sstevel@tonic-gate 	uint_t		prot;
28744381Sjosephb 	int		is_dism;
28754381Sjosephb 	int		flags;
28760Sstevel@tonic-gate 
28770Sstevel@tonic-gate 	/*
28780Sstevel@tonic-gate 	 * We might be asked to share an empty DISM hat by as_dup()
28790Sstevel@tonic-gate 	 */
28800Sstevel@tonic-gate 	ASSERT(hat != kas.a_hat);
28813446Smrj 	ASSERT(eaddr <= _userlimit);
28820Sstevel@tonic-gate 	if (!(ism_hat->hat_flags & HAT_SHARED)) {
28830Sstevel@tonic-gate 		ASSERT(hat_get_mapped_size(ism_hat) == 0);
28840Sstevel@tonic-gate 		return (0);
28850Sstevel@tonic-gate 	}
28865084Sjohnlev 	XPV_DISALLOW_MIGRATE();
28870Sstevel@tonic-gate 
28880Sstevel@tonic-gate 	/*
28890Sstevel@tonic-gate 	 * The SPT segment driver often passes us a size larger than there are
28900Sstevel@tonic-gate 	 * valid mappings. That's because it rounds the segment size up to a
28910Sstevel@tonic-gate 	 * large pagesize, even if the actual memory mapped by ism_hat is less.
28920Sstevel@tonic-gate 	 */
28930Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr_start));
28940Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(ism_addr_start));
28950Sstevel@tonic-gate 	ASSERT(ism_hat->hat_flags & HAT_SHARED);
28964381Sjosephb 	is_dism = is_it_dism(hat, addr);
28970Sstevel@tonic-gate 	while (ism_addr < e_ism_addr) {
28980Sstevel@tonic-gate 		/*
28990Sstevel@tonic-gate 		 * use htable_walk to get the next valid ISM mapping
29000Sstevel@tonic-gate 		 */
29010Sstevel@tonic-gate 		pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
29020Sstevel@tonic-gate 		if (ism_ht == NULL)
29030Sstevel@tonic-gate 			break;
29040Sstevel@tonic-gate 
29050Sstevel@tonic-gate 		/*
29064381Sjosephb 		 * First check to see if we already share the page table.
29074381Sjosephb 		 */
29084381Sjosephb 		l = ism_ht->ht_level;
29094381Sjosephb 		vaddr = vaddr_start + (ism_addr - ism_addr_start);
29104381Sjosephb 		ht = htable_lookup(hat, vaddr, l);
29114381Sjosephb 		if (ht != NULL) {
29124381Sjosephb 			if (ht->ht_flags & HTABLE_SHARED_PFN)
29134381Sjosephb 				goto shared;
29144381Sjosephb 			htable_release(ht);
29154381Sjosephb 			goto not_shared;
29164381Sjosephb 		}
29174381Sjosephb 
29184381Sjosephb 		/*
29194381Sjosephb 		 * Can't ever share top table.
29204381Sjosephb 		 */
29214381Sjosephb 		if (l == mmu.max_level)
29224381Sjosephb 			goto not_shared;
29234381Sjosephb 
29244381Sjosephb 		/*
29254381Sjosephb 		 * Avoid level mismatches later due to DISM faults.
29264381Sjosephb 		 */
29274381Sjosephb 		if (is_dism && l > 0)
29284381Sjosephb 			goto not_shared;
29294381Sjosephb 
29304381Sjosephb 		/*
29314381Sjosephb 		 * addresses and lengths must align
29324381Sjosephb 		 * table must be fully populated
29334381Sjosephb 		 * no lower level page tables
29344381Sjosephb 		 */
29354381Sjosephb 		if (ism_addr != ism_ht->ht_vaddr ||
29364381Sjosephb 		    (vaddr & LEVEL_OFFSET(l + 1)) != 0)
29374381Sjosephb 			goto not_shared;
29384381Sjosephb 
29394381Sjosephb 		/*
29404381Sjosephb 		 * The range of address space must cover a full table.
29410Sstevel@tonic-gate 		 */
29425159Sjohnlev 		if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1))
29434381Sjosephb 			goto not_shared;
29444381Sjosephb 
29454381Sjosephb 		/*
29464381Sjosephb 		 * All entries in the ISM page table must be leaf PTEs.
29474381Sjosephb 		 */
29484381Sjosephb 		if (l > 0) {
29494381Sjosephb 			int e;
29504381Sjosephb 
29514381Sjosephb 			/*
29524381Sjosephb 			 * We know the 0th is from htable_walk() above.
29534381Sjosephb 			 */
29544381Sjosephb 			for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) {
29554381Sjosephb 				x86pte_t pte;
29564381Sjosephb 				pte = x86pte_get(ism_ht, e);
29574381Sjosephb 				if (!PTE_ISPAGE(pte, l))
29584381Sjosephb 					goto not_shared;
29594381Sjosephb 			}
29604381Sjosephb 		}
29614381Sjosephb 
29624381Sjosephb 		/*
29634381Sjosephb 		 * share the page table
29644381Sjosephb 		 */
29654381Sjosephb 		ht = htable_create(hat, vaddr, l, ism_ht);
29664381Sjosephb shared:
29674381Sjosephb 		ASSERT(ht->ht_flags & HTABLE_SHARED_PFN);
29684381Sjosephb 		ASSERT(ht->ht_shares == ism_ht);
29694381Sjosephb 		hat->hat_ism_pgcnt +=
29704381Sjosephb 		    (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) <<
29714381Sjosephb 		    (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
29724381Sjosephb 		ht->ht_valid_cnt = ism_ht->ht_valid_cnt;
29734381Sjosephb 		htable_release(ht);
29744381Sjosephb 		ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1);
29754381Sjosephb 		htable_release(ism_ht);
29764381Sjosephb 		ism_ht = NULL;
29774381Sjosephb 		continue;
29784381Sjosephb 
29794381Sjosephb not_shared:
29804381Sjosephb 		/*
29814381Sjosephb 		 * Unable to share the page table. Instead we will
29824381Sjosephb 		 * create new mappings from the values in the ISM mappings.
29834381Sjosephb 		 * Figure out what level size mappings to use;
29844381Sjosephb 		 */
29850Sstevel@tonic-gate 		for (l = ism_ht->ht_level; l > 0; --l) {
29860Sstevel@tonic-gate 			if (LEVEL_SIZE(l) <= eaddr - vaddr &&
29870Sstevel@tonic-gate 			    (vaddr & LEVEL_OFFSET(l)) == 0)
29880Sstevel@tonic-gate 				break;
29890Sstevel@tonic-gate 		}
29900Sstevel@tonic-gate 
29910Sstevel@tonic-gate 		/*
29920Sstevel@tonic-gate 		 * The ISM mapping might be larger than the share area,
29934381Sjosephb 		 * be careful to truncate it if needed.
29940Sstevel@tonic-gate 		 */
29950Sstevel@tonic-gate 		if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
29960Sstevel@tonic-gate 			pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
29970Sstevel@tonic-gate 		} else {
29980Sstevel@tonic-gate 			pgcnt = mmu_btop(eaddr - vaddr);
29990Sstevel@tonic-gate 			l = 0;
30000Sstevel@tonic-gate 		}
30010Sstevel@tonic-gate 
30020Sstevel@tonic-gate 		pfn = PTE2PFN(pte, ism_ht->ht_level);
30030Sstevel@tonic-gate 		ASSERT(pfn != PFN_INVALID);
30040Sstevel@tonic-gate 		while (pgcnt > 0) {
30050Sstevel@tonic-gate 			/*
30060Sstevel@tonic-gate 			 * Make a new pte for the PFN for this level.
30070Sstevel@tonic-gate 			 * Copy protections for the pte from the ISM pte.
30080Sstevel@tonic-gate 			 */
30090Sstevel@tonic-gate 			pp = page_numtopp_nolock(pfn);
30100Sstevel@tonic-gate 			ASSERT(pp != NULL);
30110Sstevel@tonic-gate 
30120Sstevel@tonic-gate 			prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
30130Sstevel@tonic-gate 			if (PTE_GET(pte, PT_WRITABLE))
30140Sstevel@tonic-gate 				prot |= PROT_WRITE;
30150Sstevel@tonic-gate 			if (!PTE_GET(pte, PT_NX))
30160Sstevel@tonic-gate 				prot |= PROT_EXEC;
30170Sstevel@tonic-gate 
30184381Sjosephb 			flags = HAT_LOAD;
30194381Sjosephb 			if (!is_dism)
30204381Sjosephb 				flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST;
30214381Sjosephb 			while (hati_load_common(hat, vaddr, pp, prot, flags,
30223446Smrj 			    l, pfn) != 0) {
30233446Smrj 				if (l == 0)
30243446Smrj 					panic("hati_load_common() failure");
30253446Smrj 				--l;
30263446Smrj 			}
30270Sstevel@tonic-gate 
30280Sstevel@tonic-gate 			vaddr += LEVEL_SIZE(l);
30290Sstevel@tonic-gate 			ism_addr += LEVEL_SIZE(l);
30300Sstevel@tonic-gate 			pfn += mmu_btop(LEVEL_SIZE(l));
30310Sstevel@tonic-gate 			pgcnt -= mmu_btop(LEVEL_SIZE(l));
30320Sstevel@tonic-gate 		}
30330Sstevel@tonic-gate 	}
30340Sstevel@tonic-gate 	if (ism_ht != NULL)
30350Sstevel@tonic-gate 		htable_release(ism_ht);
30365084Sjohnlev 	XPV_ALLOW_MIGRATE();
30370Sstevel@tonic-gate 	return (0);
30380Sstevel@tonic-gate }
30390Sstevel@tonic-gate 
30400Sstevel@tonic-gate 
30410Sstevel@tonic-gate /*
30420Sstevel@tonic-gate  * hat_unshare() is similar to hat_unload_callback(), but
30430Sstevel@tonic-gate  * we have to look for empty shared pagetables. Note that
30440Sstevel@tonic-gate  * hat_unshare() is always invoked against an entire segment.
30450Sstevel@tonic-gate  */
30460Sstevel@tonic-gate /*ARGSUSED*/
30470Sstevel@tonic-gate void
30480Sstevel@tonic-gate hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
30490Sstevel@tonic-gate {
30504654Sjosephb 	uint64_t	vaddr = (uintptr_t)addr;
30510Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
30520Sstevel@tonic-gate 	htable_t	*ht = NULL;
30530Sstevel@tonic-gate 	uint_t		need_demaps = 0;
30544381Sjosephb 	int		flags = HAT_UNLOAD_UNMAP;
30554381Sjosephb 	level_t		l;
30560Sstevel@tonic-gate 
30570Sstevel@tonic-gate 	ASSERT(hat != kas.a_hat);
30583446Smrj 	ASSERT(eaddr <= _userlimit);
30590Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
30600Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
30615084Sjohnlev 	XPV_DISALLOW_MIGRATE();
30620Sstevel@tonic-gate 
30630Sstevel@tonic-gate 	/*
30640Sstevel@tonic-gate 	 * First go through and remove any shared pagetables.
30650Sstevel@tonic-gate 	 *
30663446Smrj 	 * Note that it's ok to delay the TLB shootdown till the entire range is
30670Sstevel@tonic-gate 	 * finished, because if hat_pageunload() were to unload a shared
30683446Smrj 	 * pagetable page, its hat_tlb_inval() will do a global TLB invalidate.
30690Sstevel@tonic-gate 	 */
30704381Sjosephb 	l = mmu.max_page_level;
30714381Sjosephb 	if (l == mmu.max_level)
30724381Sjosephb 		--l;
30734381Sjosephb 	for (; l >= 0; --l) {
30744381Sjosephb 		for (vaddr = (uintptr_t)addr; vaddr < eaddr;
30754381Sjosephb 		    vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) {
30764381Sjosephb 			ASSERT(!IN_VA_HOLE(vaddr));
30774381Sjosephb 			/*
30784381Sjosephb 			 * find a pagetable that maps the current address
30794381Sjosephb 			 */
30804381Sjosephb 			ht = htable_lookup(hat, vaddr, l);
30814381Sjosephb 			if (ht == NULL)
30824381Sjosephb 				continue;
30830Sstevel@tonic-gate 			if (ht->ht_flags & HTABLE_SHARED_PFN) {
30840Sstevel@tonic-gate 				/*
30854381Sjosephb 				 * clear page count, set valid_cnt to 0,
30864381Sjosephb 				 * let htable_release() finish the job
30870Sstevel@tonic-gate 				 */
30884381Sjosephb 				hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
30894381Sjosephb 				    (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
30900Sstevel@tonic-gate 				ht->ht_valid_cnt = 0;
30910Sstevel@tonic-gate 				need_demaps = 1;
30920Sstevel@tonic-gate 			}
30930Sstevel@tonic-gate 			htable_release(ht);
30940Sstevel@tonic-gate 		}
30950Sstevel@tonic-gate 	}
30960Sstevel@tonic-gate 
30970Sstevel@tonic-gate 	/*
30980Sstevel@tonic-gate 	 * flush the TLBs - since we're probably dealing with MANY mappings
30990Sstevel@tonic-gate 	 * we do just one CR3 reload.
31000Sstevel@tonic-gate 	 */
31010Sstevel@tonic-gate 	if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
31023446Smrj 		hat_tlb_inval(hat, DEMAP_ALL_ADDR);
31030Sstevel@tonic-gate 
31040Sstevel@tonic-gate 	/*
31050Sstevel@tonic-gate 	 * Now go back and clean up any unaligned mappings that
31060Sstevel@tonic-gate 	 * couldn't share pagetables.
31070Sstevel@tonic-gate 	 */
31084381Sjosephb 	if (!is_it_dism(hat, addr))
31094381Sjosephb 		flags |= HAT_UNLOAD_UNLOCK;
31104381Sjosephb 	hat_unload(hat, addr, len, flags);
31115084Sjohnlev 	XPV_ALLOW_MIGRATE();
31120Sstevel@tonic-gate }
31130Sstevel@tonic-gate 
31140Sstevel@tonic-gate 
31150Sstevel@tonic-gate /*
31160Sstevel@tonic-gate  * hat_reserve() does nothing
31170Sstevel@tonic-gate  */
31180Sstevel@tonic-gate /*ARGSUSED*/
31190Sstevel@tonic-gate void
31200Sstevel@tonic-gate hat_reserve(struct as *as, caddr_t addr, size_t len)
31210Sstevel@tonic-gate {
31220Sstevel@tonic-gate }
31230Sstevel@tonic-gate 
31240Sstevel@tonic-gate 
31250Sstevel@tonic-gate /*
31260Sstevel@tonic-gate  * Called when all mappings to a page should have write permission removed.
31270Sstevel@tonic-gate  * Mostly stolem from hat_pagesync()
31280Sstevel@tonic-gate  */
31290Sstevel@tonic-gate static void
31300Sstevel@tonic-gate hati_page_clrwrt(struct page *pp)
31310Sstevel@tonic-gate {
31320Sstevel@tonic-gate 	hment_t		*hm = NULL;
31330Sstevel@tonic-gate 	htable_t	*ht;
31340Sstevel@tonic-gate 	uint_t		entry;
31350Sstevel@tonic-gate 	x86pte_t	old;
31360Sstevel@tonic-gate 	x86pte_t	new;
31370Sstevel@tonic-gate 	uint_t		pszc = 0;
31380Sstevel@tonic-gate 
31395084Sjohnlev 	XPV_DISALLOW_MIGRATE();
31400Sstevel@tonic-gate next_size:
31410Sstevel@tonic-gate 	/*
31420Sstevel@tonic-gate 	 * walk thru the mapping list clearing write permission
31430Sstevel@tonic-gate 	 */
31440Sstevel@tonic-gate 	x86_hm_enter(pp);
31450Sstevel@tonic-gate 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
31460Sstevel@tonic-gate 		if (ht->ht_level < pszc)
31470Sstevel@tonic-gate 			continue;
31480Sstevel@tonic-gate 		old = x86pte_get(ht, entry);
31490Sstevel@tonic-gate 
31500Sstevel@tonic-gate 		for (;;) {
31510Sstevel@tonic-gate 			/*
31520Sstevel@tonic-gate 			 * Is this mapping of interest?
31530Sstevel@tonic-gate 			 */
31540Sstevel@tonic-gate 			if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
31550Sstevel@tonic-gate 			    PTE_GET(old, PT_WRITABLE) == 0)
31560Sstevel@tonic-gate 				break;
31570Sstevel@tonic-gate 
31580Sstevel@tonic-gate 			/*
31590Sstevel@tonic-gate 			 * Clear ref/mod writable bits. This requires cross
31600Sstevel@tonic-gate 			 * calls to ensure any executing TLBs see cleared bits.
31610Sstevel@tonic-gate 			 */
31620Sstevel@tonic-gate 			new = old;
31630Sstevel@tonic-gate 			PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
31640Sstevel@tonic-gate 			old = hati_update_pte(ht, entry, old, new);
31650Sstevel@tonic-gate 			if (old != 0)
31660Sstevel@tonic-gate 				continue;
31670Sstevel@tonic-gate 
31680Sstevel@tonic-gate 			break;
31690Sstevel@tonic-gate 		}
31700Sstevel@tonic-gate 	}
31710Sstevel@tonic-gate 	x86_hm_exit(pp);
31720Sstevel@tonic-gate 	while (pszc < pp->p_szc) {
31730Sstevel@tonic-gate 		page_t *tpp;
31740Sstevel@tonic-gate 		pszc++;
31750Sstevel@tonic-gate 		tpp = PP_GROUPLEADER(pp, pszc);
31760Sstevel@tonic-gate 		if (pp != tpp) {
31770Sstevel@tonic-gate 			pp = tpp;
31780Sstevel@tonic-gate 			goto next_size;
31790Sstevel@tonic-gate 		}
31800Sstevel@tonic-gate 	}
31815084Sjohnlev 	XPV_ALLOW_MIGRATE();
31820Sstevel@tonic-gate }
31830Sstevel@tonic-gate 
31840Sstevel@tonic-gate /*
31850Sstevel@tonic-gate  * void hat_page_setattr(pp, flag)
31860Sstevel@tonic-gate  * void hat_page_clrattr(pp, flag)
31870Sstevel@tonic-gate  *	used to set/clr ref/mod bits.
31880Sstevel@tonic-gate  */
31890Sstevel@tonic-gate void
31900Sstevel@tonic-gate hat_page_setattr(struct page *pp, uint_t flag)
31910Sstevel@tonic-gate {
31920Sstevel@tonic-gate 	vnode_t		*vp = pp->p_vnode;
31930Sstevel@tonic-gate 	kmutex_t	*vphm = NULL;
31940Sstevel@tonic-gate 	page_t		**listp;
31954324Sqiao 	int		noshuffle;
31964324Sqiao 
31974324Sqiao 	noshuffle = flag & P_NSH;
31984324Sqiao 	flag &= ~P_NSH;
31990Sstevel@tonic-gate 
32000Sstevel@tonic-gate 	if (PP_GETRM(pp, flag) == flag)
32010Sstevel@tonic-gate 		return;
32020Sstevel@tonic-gate 
32034324Sqiao 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
32044324Sqiao 	    !noshuffle) {
32050Sstevel@tonic-gate 		vphm = page_vnode_mutex(vp);
32060Sstevel@tonic-gate 		mutex_enter(vphm);
32070Sstevel@tonic-gate 	}
32080Sstevel@tonic-gate 
32090Sstevel@tonic-gate 	PP_SETRM(pp, flag);
32100Sstevel@tonic-gate 
32110Sstevel@tonic-gate 	if (vphm != NULL) {
32120Sstevel@tonic-gate 
32130Sstevel@tonic-gate 		/*
32140Sstevel@tonic-gate 		 * Some File Systems examine v_pages for NULL w/o
32150Sstevel@tonic-gate 		 * grabbing the vphm mutex. Must not let it become NULL when
32160Sstevel@tonic-gate 		 * pp is the only page on the list.
32170Sstevel@tonic-gate 		 */
32180Sstevel@tonic-gate 		if (pp->p_vpnext != pp) {
32190Sstevel@tonic-gate 			page_vpsub(&vp->v_pages, pp);
32200Sstevel@tonic-gate 			if (vp->v_pages != NULL)
32210Sstevel@tonic-gate 				listp = &vp->v_pages->p_vpprev->p_vpnext;
32220Sstevel@tonic-gate 			else
32230Sstevel@tonic-gate 				listp = &vp->v_pages;
32240Sstevel@tonic-gate 			page_vpadd(listp, pp);
32250Sstevel@tonic-gate 		}
32260Sstevel@tonic-gate 		mutex_exit(vphm);
32270Sstevel@tonic-gate 	}
32280Sstevel@tonic-gate }
32290Sstevel@tonic-gate 
32300Sstevel@tonic-gate void
32310Sstevel@tonic-gate hat_page_clrattr(struct page *pp, uint_t flag)
32320Sstevel@tonic-gate {
32330Sstevel@tonic-gate 	vnode_t		*vp = pp->p_vnode;
32340Sstevel@tonic-gate 	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
32350Sstevel@tonic-gate 
32360Sstevel@tonic-gate 	/*
32372999Sstans 	 * Caller is expected to hold page's io lock for VMODSORT to work
32382999Sstans 	 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
32392999Sstans 	 * bit is cleared.
32402999Sstans 	 * We don't have assert to avoid tripping some existing third party
32412999Sstans 	 * code. The dirty page is moved back to top of the v_page list
32422999Sstans 	 * after IO is done in pvn_write_done().
32430Sstevel@tonic-gate 	 */
32440Sstevel@tonic-gate 	PP_CLRRM(pp, flag);
32450Sstevel@tonic-gate 
32462999Sstans 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
32470Sstevel@tonic-gate 
32480Sstevel@tonic-gate 		/*
32490Sstevel@tonic-gate 		 * VMODSORT works by removing write permissions and getting
32500Sstevel@tonic-gate 		 * a fault when a page is made dirty. At this point
32510Sstevel@tonic-gate 		 * we need to remove write permission from all mappings
32520Sstevel@tonic-gate 		 * to this page.
32530Sstevel@tonic-gate 		 */
32540Sstevel@tonic-gate 		hati_page_clrwrt(pp);
32550Sstevel@tonic-gate 	}
32560Sstevel@tonic-gate }
32570Sstevel@tonic-gate 
32580Sstevel@tonic-gate /*
32590Sstevel@tonic-gate  *	If flag is specified, returns 0 if attribute is disabled
32600Sstevel@tonic-gate  *	and non zero if enabled.  If flag specifes multiple attributs
32610Sstevel@tonic-gate  *	then returns 0 if ALL atriibutes are disabled.  This is an advisory
32620Sstevel@tonic-gate  *	call.
32630Sstevel@tonic-gate  */
32640Sstevel@tonic-gate uint_t
32650Sstevel@tonic-gate hat_page_getattr(struct page *pp, uint_t flag)
32660Sstevel@tonic-gate {
32670Sstevel@tonic-gate 	return (PP_GETRM(pp, flag));
32680Sstevel@tonic-gate }
32690Sstevel@tonic-gate 
32700Sstevel@tonic-gate 
32710Sstevel@tonic-gate /*
32720Sstevel@tonic-gate  * common code used by hat_pageunload() and hment_steal()
32730Sstevel@tonic-gate  */
32740Sstevel@tonic-gate hment_t *
32750Sstevel@tonic-gate hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
32760Sstevel@tonic-gate {
32770Sstevel@tonic-gate 	x86pte_t old_pte;
32780Sstevel@tonic-gate 	pfn_t pfn = pp->p_pagenum;
32790Sstevel@tonic-gate 	hment_t *hm;
32800Sstevel@tonic-gate 
32810Sstevel@tonic-gate 	/*
32820Sstevel@tonic-gate 	 * We need to acquire a hold on the htable in order to
32830Sstevel@tonic-gate 	 * do the invalidate. We know the htable must exist, since
32840Sstevel@tonic-gate 	 * unmap's don't release the htable until after removing any
32850Sstevel@tonic-gate 	 * hment. Having x86_hm_enter() keeps that from proceeding.
32860Sstevel@tonic-gate 	 */
32870Sstevel@tonic-gate 	htable_acquire(ht);
32880Sstevel@tonic-gate 
32890Sstevel@tonic-gate 	/*
32900Sstevel@tonic-gate 	 * Invalidate the PTE and remove the hment.
32910Sstevel@tonic-gate 	 */
32923446Smrj 	old_pte = x86pte_inval(ht, entry, 0, NULL);
329347Sjosephb 	if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
32943446Smrj 		panic("x86pte_inval() failure found PTE = " FMT_PTE
329547Sjosephb 		    " pfn being unmapped is %lx ht=0x%lx entry=0x%x",
329647Sjosephb 		    old_pte, pfn, (uintptr_t)ht, entry);
329747Sjosephb 	}
32980Sstevel@tonic-gate 
32990Sstevel@tonic-gate 	/*
33000Sstevel@tonic-gate 	 * Clean up all the htable information for this mapping
33010Sstevel@tonic-gate 	 */
33020Sstevel@tonic-gate 	ASSERT(ht->ht_valid_cnt > 0);
33030Sstevel@tonic-gate 	HTABLE_DEC(ht->ht_valid_cnt);
33040Sstevel@tonic-gate 	PGCNT_DEC(ht->ht_hat, ht->ht_level);
33050Sstevel@tonic-gate 
33060Sstevel@tonic-gate 	/*
33070Sstevel@tonic-gate 	 * sync ref/mod bits to the page_t
33080Sstevel@tonic-gate 	 */
33093446Smrj 	if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC)
33100Sstevel@tonic-gate 		hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
33110Sstevel@tonic-gate 
33120Sstevel@tonic-gate 	/*
33130Sstevel@tonic-gate 	 * Remove the mapping list entry for this page.
33140Sstevel@tonic-gate 	 */
33150Sstevel@tonic-gate 	hm = hment_remove(pp, ht, entry);
33160Sstevel@tonic-gate 
33170Sstevel@tonic-gate 	/*
33180Sstevel@tonic-gate 	 * drop the mapping list lock so that we might free the
33190Sstevel@tonic-gate 	 * hment and htable.
33200Sstevel@tonic-gate 	 */
33210Sstevel@tonic-gate 	x86_hm_exit(pp);
33220Sstevel@tonic-gate 	htable_release(ht);
33230Sstevel@tonic-gate 	return (hm);
33240Sstevel@tonic-gate }
33250Sstevel@tonic-gate 
33261841Spraks extern int	vpm_enable;
33270Sstevel@tonic-gate /*
33280Sstevel@tonic-gate  * Unload all translations to a page. If the page is a subpage of a large
33290Sstevel@tonic-gate  * page, the large page mappings are also removed.
33300Sstevel@tonic-gate  *
33310Sstevel@tonic-gate  * The forceflags are unused.
33320Sstevel@tonic-gate  */
33330Sstevel@tonic-gate 
33340Sstevel@tonic-gate /*ARGSUSED*/
33350Sstevel@tonic-gate static int
33360Sstevel@tonic-gate hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
33370Sstevel@tonic-gate {
33380Sstevel@tonic-gate 	page_t		*cur_pp = pp;
33390Sstevel@tonic-gate 	hment_t		*hm;
33400Sstevel@tonic-gate 	hment_t		*prev;
33410Sstevel@tonic-gate 	htable_t	*ht;
33420Sstevel@tonic-gate 	uint_t		entry;
33430Sstevel@tonic-gate 	level_t		level;
33440Sstevel@tonic-gate 
33455084Sjohnlev 	XPV_DISALLOW_MIGRATE();
33461841Spraks #if defined(__amd64)
33471841Spraks 	/*
33481841Spraks 	 * clear the vpm ref.
33491841Spraks 	 */
33501841Spraks 	if (vpm_enable) {
33511841Spraks 		pp->p_vpmref = 0;
33521841Spraks 	}
33531841Spraks #endif
33540Sstevel@tonic-gate 	/*
33550Sstevel@tonic-gate 	 * The loop with next_size handles pages with multiple pagesize mappings
33560Sstevel@tonic-gate 	 */
33570Sstevel@tonic-gate next_size:
33580Sstevel@tonic-gate 	for (;;) {
33590Sstevel@tonic-gate 
33600Sstevel@tonic-gate 		/*
33610Sstevel@tonic-gate 		 * Get a mapping list entry
33620Sstevel@tonic-gate 		 */
33630Sstevel@tonic-gate 		x86_hm_enter(cur_pp);
33640Sstevel@tonic-gate 		for (prev = NULL; ; prev = hm) {
33650Sstevel@tonic-gate 			hm = hment_walk(cur_pp, &ht, &entry, prev);
33660Sstevel@tonic-gate 			if (hm == NULL) {
33670Sstevel@tonic-gate 				x86_hm_exit(cur_pp);
33680Sstevel@tonic-gate 
33690Sstevel@tonic-gate 				/*
33700Sstevel@tonic-gate 				 * If not part of a larger page, we're done.
33710Sstevel@tonic-gate 				 */
33723446Smrj 				if (cur_pp->p_szc <= pg_szcd) {
33735084Sjohnlev 					XPV_ALLOW_MIGRATE();
33740Sstevel@tonic-gate 					return (0);
33753446Smrj 				}
33760Sstevel@tonic-gate 
33770Sstevel@tonic-gate 				/*
33780Sstevel@tonic-gate 				 * Else check the next larger page size.
33790Sstevel@tonic-gate 				 * hat_page_demote() may decrease p_szc
33800Sstevel@tonic-gate 				 * but that's ok we'll just take an extra
33810Sstevel@tonic-gate 				 * trip discover there're no larger mappings
33820Sstevel@tonic-gate 				 * and return.
33830Sstevel@tonic-gate 				 */
33840Sstevel@tonic-gate 				++pg_szcd;
33850Sstevel@tonic-gate 				cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
33860Sstevel@tonic-gate 				goto next_size;
33870Sstevel@tonic-gate 			}
33880Sstevel@tonic-gate 
33890Sstevel@tonic-gate 			/*
33900Sstevel@tonic-gate 			 * If this mapping size matches, remove it.
33910Sstevel@tonic-gate 			 */
33920Sstevel@tonic-gate 			level = ht->ht_level;
33930Sstevel@tonic-gate 			if (level == pg_szcd)
33940Sstevel@tonic-gate 				break;
33950Sstevel@tonic-gate 		}
33960Sstevel@tonic-gate 
33970Sstevel@tonic-gate 		/*
33980Sstevel@tonic-gate 		 * Remove the mapping list entry for this page.
33990Sstevel@tonic-gate 		 * Note this does the x86_hm_exit() for us.
34000Sstevel@tonic-gate 		 */
34010Sstevel@tonic-gate 		hm = hati_page_unmap(cur_pp, ht, entry);
34020Sstevel@tonic-gate 		if (hm != NULL)
34030Sstevel@tonic-gate 			hment_free(hm);
34040Sstevel@tonic-gate 	}
34050Sstevel@tonic-gate }
34060Sstevel@tonic-gate 
34070Sstevel@tonic-gate int
34080Sstevel@tonic-gate hat_pageunload(struct page *pp, uint_t forceflag)
34090Sstevel@tonic-gate {
34100Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp));
34110Sstevel@tonic-gate 	return (hati_pageunload(pp, 0, forceflag));
34120Sstevel@tonic-gate }
34130Sstevel@tonic-gate 
34140Sstevel@tonic-gate /*
34150Sstevel@tonic-gate  * Unload all large mappings to pp and reduce by 1 p_szc field of every large
34160Sstevel@tonic-gate  * page level that included pp.
34170Sstevel@tonic-gate  *
34180Sstevel@tonic-gate  * pp must be locked EXCL. Even though no other constituent pages are locked
34190Sstevel@tonic-gate  * it's legal to unload large mappings to pp because all constituent pages of
34200Sstevel@tonic-gate  * large locked mappings have to be locked SHARED.  therefore if we have EXCL
34210Sstevel@tonic-gate  * lock on one of constituent pages none of the large mappings to pp are
34220Sstevel@tonic-gate  * locked.
34230Sstevel@tonic-gate  *
34240Sstevel@tonic-gate  * Change (always decrease) p_szc field starting from the last constituent
34250Sstevel@tonic-gate  * page and ending with root constituent page so that root's pszc always shows
34260Sstevel@tonic-gate  * the area where hat_page_demote() may be active.
34270Sstevel@tonic-gate  *
34280Sstevel@tonic-gate  * This mechanism is only used for file system pages where it's not always
34290Sstevel@tonic-gate  * possible to get EXCL locks on all constituent pages to demote the size code
34300Sstevel@tonic-gate  * (as is done for anonymous or kernel large pages).
34310Sstevel@tonic-gate  */
34320Sstevel@tonic-gate void
34330Sstevel@tonic-gate hat_page_demote(page_t *pp)
34340Sstevel@tonic-gate {
34350Sstevel@tonic-gate 	uint_t		pszc;
34360Sstevel@tonic-gate 	uint_t		rszc;
34370Sstevel@tonic-gate 	uint_t		szc;
34380Sstevel@tonic-gate 	page_t		*rootpp;
34390Sstevel@tonic-gate 	page_t		*firstpp;
34400Sstevel@tonic-gate 	page_t		*lastpp;
34410Sstevel@tonic-gate 	pgcnt_t		pgcnt;
34420Sstevel@tonic-gate 
34430Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp));
34440Sstevel@tonic-gate 	ASSERT(!PP_ISFREE(pp));
34450Sstevel@tonic-gate 	ASSERT(page_szc_lock_assert(pp));
34460Sstevel@tonic-gate 
34470Sstevel@tonic-gate 	if (pp->p_szc == 0)
34480Sstevel@tonic-gate 		return;
34490Sstevel@tonic-gate 
34500Sstevel@tonic-gate 	rootpp = PP_GROUPLEADER(pp, 1);
34510Sstevel@tonic-gate 	(void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
34520Sstevel@tonic-gate 
34530Sstevel@tonic-gate 	/*
34540Sstevel@tonic-gate 	 * all large mappings to pp are gone
34550Sstevel@tonic-gate 	 * and no new can be setup since pp is locked exclusively.
34560Sstevel@tonic-gate 	 *
34570Sstevel@tonic-gate 	 * Lock the root to make sure there's only one hat_page_demote()
34580Sstevel@tonic-gate 	 * outstanding within the area of this root's pszc.
34590Sstevel@tonic-gate 	 *
34600Sstevel@tonic-gate 	 * Second potential hat_page_demote() is already eliminated by upper
34610Sstevel@tonic-gate 	 * VM layer via page_szc_lock() but we don't rely on it and use our
34620Sstevel@tonic-gate 	 * own locking (so that upper layer locking can be changed without
34630Sstevel@tonic-gate 	 * assumptions that hat depends on upper layer VM to prevent multiple
34640Sstevel@tonic-gate 	 * hat_page_demote() to be issued simultaneously to the same large
34650Sstevel@tonic-gate 	 * page).
34660Sstevel@tonic-gate 	 */
34670Sstevel@tonic-gate again:
34680Sstevel@tonic-gate 	pszc = pp->p_szc;
34690Sstevel@tonic-gate 	if (pszc == 0)
34700Sstevel@tonic-gate 		return;
34710Sstevel@tonic-gate 	rootpp = PP_GROUPLEADER(pp, pszc);
34720Sstevel@tonic-gate 	x86_hm_enter(rootpp);
34730Sstevel@tonic-gate 	/*
34740Sstevel@tonic-gate 	 * If root's p_szc is different from pszc we raced with another
34750Sstevel@tonic-gate 	 * hat_page_demote().  Drop the lock and try to find the root again.
34760Sstevel@tonic-gate 	 * If root's p_szc is greater than pszc previous hat_page_demote() is
34770Sstevel@tonic-gate 	 * not done yet.  Take and release mlist lock of root's root to wait
34780Sstevel@tonic-gate 	 * for previous hat_page_demote() to complete.
34790Sstevel@tonic-gate 	 */
34800Sstevel@tonic-gate 	if ((rszc = rootpp->p_szc) != pszc) {
34810Sstevel@tonic-gate 		x86_hm_exit(rootpp);
34820Sstevel@tonic-gate 		if (rszc > pszc) {
34830Sstevel@tonic-gate 			/* p_szc of a locked non free page can't increase */
34840Sstevel@tonic-gate 			ASSERT(pp != rootpp);
34850Sstevel@tonic-gate 
34860Sstevel@tonic-gate 			rootpp = PP_GROUPLEADER(rootpp, rszc);
34870Sstevel@tonic-gate 			x86_hm_enter(rootpp);
34880Sstevel@tonic-gate 			x86_hm_exit(rootpp);
34890Sstevel@tonic-gate 		}
34900Sstevel@tonic-gate 		goto again;
34910Sstevel@tonic-gate 	}
34920Sstevel@tonic-gate 	ASSERT(pp->p_szc == pszc);
34930Sstevel@tonic-gate 
34940Sstevel@tonic-gate 	/*
34950Sstevel@tonic-gate 	 * Decrement by 1 p_szc of every constituent page of a region that
34960Sstevel@tonic-gate 	 * covered pp. For example if original szc is 3 it gets changed to 2
34970Sstevel@tonic-gate 	 * everywhere except in region 2 that covered pp. Region 2 that
34980Sstevel@tonic-gate 	 * covered pp gets demoted to 1 everywhere except in region 1 that
34990Sstevel@tonic-gate 	 * covered pp. The region 1 that covered pp is demoted to region
35000Sstevel@tonic-gate 	 * 0. It's done this way because from region 3 we removed level 3
35010Sstevel@tonic-gate 	 * mappings, from region 2 that covered pp we removed level 2 mappings
35020Sstevel@tonic-gate 	 * and from region 1 that covered pp we removed level 1 mappings.  All
35030Sstevel@tonic-gate 	 * changes are done from from high pfn's to low pfn's so that roots
35040Sstevel@tonic-gate 	 * are changed last allowing one to know the largest region where
35050Sstevel@tonic-gate 	 * hat_page_demote() is stil active by only looking at the root page.
35060Sstevel@tonic-gate 	 *
35070Sstevel@tonic-gate 	 * This algorithm is implemented in 2 while loops. First loop changes
35080Sstevel@tonic-gate 	 * p_szc of pages to the right of pp's level 1 region and second
35090Sstevel@tonic-gate 	 * loop changes p_szc of pages of level 1 region that covers pp
35100Sstevel@tonic-gate 	 * and all pages to the left of level 1 region that covers pp.
35110Sstevel@tonic-gate 	 * In the first loop p_szc keeps dropping with every iteration
35120Sstevel@tonic-gate 	 * and in the second loop it keeps increasing with every iteration.
35130Sstevel@tonic-gate 	 *
35140Sstevel@tonic-gate 	 * First loop description: Demote pages to the right of pp outside of
35150Sstevel@tonic-gate 	 * level 1 region that covers pp.  In every iteration of the while
35160Sstevel@tonic-gate 	 * loop below find the last page of szc region and the first page of
35170Sstevel@tonic-gate 	 * (szc - 1) region that is immediately to the right of (szc - 1)
35180Sstevel@tonic-gate 	 * region that covers pp.  From last such page to first such page
35190Sstevel@tonic-gate 	 * change every page's szc to szc - 1. Decrement szc and continue
35200Sstevel@tonic-gate 	 * looping until szc is 1. If pp belongs to the last (szc - 1) region
35210Sstevel@tonic-gate 	 * of szc region skip to the next iteration.
35220Sstevel@tonic-gate 	 */
35230Sstevel@tonic-gate 	szc = pszc;
35240Sstevel@tonic-gate 	while (szc > 1) {
35250Sstevel@tonic-gate 		lastpp = PP_GROUPLEADER(pp, szc);
35260Sstevel@tonic-gate 		pgcnt = page_get_pagecnt(szc);
35270Sstevel@tonic-gate 		lastpp += pgcnt - 1;
35280Sstevel@tonic-gate 		firstpp = PP_GROUPLEADER(pp, (szc - 1));
35290Sstevel@tonic-gate 		pgcnt = page_get_pagecnt(szc - 1);
35300Sstevel@tonic-gate 		if (lastpp - firstpp < pgcnt) {
35310Sstevel@tonic-gate 			szc--;
35320Sstevel@tonic-gate 			continue;
35330Sstevel@tonic-gate 		}
35340Sstevel@tonic-gate 		firstpp += pgcnt;
35350Sstevel@tonic-gate 		while (lastpp != firstpp) {
35360Sstevel@tonic-gate 			ASSERT(lastpp->p_szc == pszc);
35370Sstevel@tonic-gate 			lastpp->p_szc = szc - 1;
35380Sstevel@tonic-gate 			lastpp--;
35390Sstevel@tonic-gate 		}
35400Sstevel@tonic-gate 		firstpp->p_szc = szc - 1;
35410Sstevel@tonic-gate 		szc--;
35420Sstevel@tonic-gate 	}
35430Sstevel@tonic-gate 
35440Sstevel@tonic-gate 	/*
35450Sstevel@tonic-gate 	 * Second loop description:
35460Sstevel@tonic-gate 	 * First iteration changes p_szc to 0 of every
35470Sstevel@tonic-gate 	 * page of level 1 region that covers pp.
35480Sstevel@tonic-gate 	 * Subsequent iterations find last page of szc region
35490Sstevel@tonic-gate 	 * immediately to the left of szc region that covered pp
35500Sstevel@tonic-gate 	 * and first page of (szc + 1) region that covers pp.
35510Sstevel@tonic-gate 	 * From last to first page change p_szc of every page to szc.
35520Sstevel@tonic-gate 	 * Increment szc and continue looping until szc is pszc.
35530Sstevel@tonic-gate 	 * If pp belongs to the fist szc region of (szc + 1) region
35540Sstevel@tonic-gate 	 * skip to the next iteration.
35550Sstevel@tonic-gate 	 *
35560Sstevel@tonic-gate 	 */
35570Sstevel@tonic-gate 	szc = 0;
35580Sstevel@tonic-gate 	while (szc < pszc) {
35590Sstevel@tonic-gate 		firstpp = PP_GROUPLEADER(pp, (szc + 1));
35600Sstevel@tonic-gate 		if (szc == 0) {
35610Sstevel@tonic-gate 			pgcnt = page_get_pagecnt(1);
35620Sstevel@tonic-gate 			lastpp = firstpp + (pgcnt - 1);
35630Sstevel@tonic-gate 		} else {
35640Sstevel@tonic-gate 			lastpp = PP_GROUPLEADER(pp, szc);
35650Sstevel@tonic-gate 			if (firstpp == lastpp) {
35660Sstevel@tonic-gate 				szc++;
35670Sstevel@tonic-gate 				continue;
35680Sstevel@tonic-gate 			}
35690Sstevel@tonic-gate 			lastpp--;
35700Sstevel@tonic-gate 			pgcnt = page_get_pagecnt(szc);
35710Sstevel@tonic-gate 		}
35720Sstevel@tonic-gate 		while (lastpp != firstpp) {
35730Sstevel@tonic-gate 			ASSERT(lastpp->p_szc == pszc);
35740Sstevel@tonic-gate 			lastpp->p_szc = szc;
35750Sstevel@tonic-gate 			lastpp--;
35760Sstevel@tonic-gate 		}
35770Sstevel@tonic-gate 		firstpp->p_szc = szc;
35780Sstevel@tonic-gate 		if (firstpp == rootpp)
35790Sstevel@tonic-gate 			break;
35800Sstevel@tonic-gate 		szc++;
35810Sstevel@tonic-gate 	}
35820Sstevel@tonic-gate 	x86_hm_exit(rootpp);
35830Sstevel@tonic-gate }
35840Sstevel@tonic-gate 
35850Sstevel@tonic-gate /*
35860Sstevel@tonic-gate  * get hw stats from hardware into page struct and reset hw stats
35870Sstevel@tonic-gate  * returns attributes of page
35880Sstevel@tonic-gate  * Flags for hat_pagesync, hat_getstat, hat_sync
35890Sstevel@tonic-gate  *
35900Sstevel@tonic-gate  * define	HAT_SYNC_ZERORM		0x01
35910Sstevel@tonic-gate  *
35920Sstevel@tonic-gate  * Additional flags for hat_pagesync
35930Sstevel@tonic-gate  *
35940Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_REF	0x02
35950Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_MOD	0x04
35960Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_RM	0x06
35970Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_SHARED	0x08
35980Sstevel@tonic-gate  */
35990Sstevel@tonic-gate uint_t
36000Sstevel@tonic-gate hat_pagesync(struct page *pp, uint_t flags)
36010Sstevel@tonic-gate {
36020Sstevel@tonic-gate 	hment_t		*hm = NULL;
36030Sstevel@tonic-gate 	htable_t	*ht;
36040Sstevel@tonic-gate 	uint_t		entry;
36050Sstevel@tonic-gate 	x86pte_t	old, save_old;
36060Sstevel@tonic-gate 	x86pte_t	new;
36070Sstevel@tonic-gate 	uchar_t		nrmbits = P_REF|P_MOD|P_RO;
36080Sstevel@tonic-gate 	extern ulong_t	po_share;
36090Sstevel@tonic-gate 	page_t		*save_pp = pp;
36100Sstevel@tonic-gate 	uint_t		pszc = 0;
36110Sstevel@tonic-gate 
36120Sstevel@tonic-gate 	ASSERT(PAGE_LOCKED(pp) || panicstr);
36130Sstevel@tonic-gate 
36140Sstevel@tonic-gate 	if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
36150Sstevel@tonic-gate 		return (pp->p_nrm & nrmbits);
36160Sstevel@tonic-gate 
36170Sstevel@tonic-gate 	if ((flags & HAT_SYNC_ZERORM) == 0) {
36180Sstevel@tonic-gate 
36190Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
36200Sstevel@tonic-gate 			return (pp->p_nrm & nrmbits);
36210Sstevel@tonic-gate 
36220Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
36230Sstevel@tonic-gate 			return (pp->p_nrm & nrmbits);
36240Sstevel@tonic-gate 
36250Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
36260Sstevel@tonic-gate 		    hat_page_getshare(pp) > po_share) {
36270Sstevel@tonic-gate 			if (PP_ISRO(pp))
36280Sstevel@tonic-gate 				PP_SETREF(pp);
36290Sstevel@tonic-gate 			return (pp->p_nrm & nrmbits);
36300Sstevel@tonic-gate 		}
36310Sstevel@tonic-gate 	}
36320Sstevel@tonic-gate 
36335084Sjohnlev 	XPV_DISALLOW_MIGRATE();
36340Sstevel@tonic-gate next_size:
36350Sstevel@tonic-gate 	/*
36360Sstevel@tonic-gate 	 * walk thru the mapping list syncing (and clearing) ref/mod bits.
36370Sstevel@tonic-gate 	 */
36380Sstevel@tonic-gate 	x86_hm_enter(pp);
36390Sstevel@tonic-gate 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
36400Sstevel@tonic-gate 		if (ht->ht_level < pszc)
36410Sstevel@tonic-gate 			continue;
36420Sstevel@tonic-gate 		old = x86pte_get(ht, entry);
36430Sstevel@tonic-gate try_again:
36440Sstevel@tonic-gate 
36450Sstevel@tonic-gate 		ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
36460Sstevel@tonic-gate 
36470Sstevel@tonic-gate 		if (PTE_GET(old, PT_REF | PT_MOD) == 0)
36480Sstevel@tonic-gate 			continue;
36490Sstevel@tonic-gate 
36500Sstevel@tonic-gate 		save_old = old;
36510Sstevel@tonic-gate 		if ((flags & HAT_SYNC_ZERORM) != 0) {
36520Sstevel@tonic-gate 
36530Sstevel@tonic-gate 			/*
36540Sstevel@tonic-gate 			 * Need to clear ref or mod bits. Need to demap
36550Sstevel@tonic-gate 			 * to make sure any executing TLBs see cleared bits.
36560Sstevel@tonic-gate 			 */
36570Sstevel@tonic-gate 			new = old;
36580Sstevel@tonic-gate 			PTE_CLR(new, PT_REF | PT_MOD);
36590Sstevel@tonic-gate 			old = hati_update_pte(ht, entry, old, new);
36600Sstevel@tonic-gate 			if (old != 0)
36610Sstevel@tonic-gate 				goto try_again;
36620Sstevel@tonic-gate 
36630Sstevel@tonic-gate 			old = save_old;
36640Sstevel@tonic-gate 		}
36650Sstevel@tonic-gate 
36660Sstevel@tonic-gate 		/*
36670Sstevel@tonic-gate 		 * Sync the PTE
36680Sstevel@tonic-gate 		 */
36693446Smrj 		if (!(flags & HAT_SYNC_ZERORM) &&
36703446Smrj 		    PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC)
36710Sstevel@tonic-gate 			hati_sync_pte_to_page(pp, old, ht->ht_level);
36720Sstevel@tonic-gate 
36730Sstevel@tonic-gate 		/*
36740Sstevel@tonic-gate 		 * can stop short if we found a ref'd or mod'd page
36750Sstevel@tonic-gate 		 */
36760Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) ||
36770Sstevel@tonic-gate 		    (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) {
36780Sstevel@tonic-gate 			x86_hm_exit(pp);
36793446Smrj 			goto done;
36800Sstevel@tonic-gate 		}
36810Sstevel@tonic-gate 	}
36820Sstevel@tonic-gate 	x86_hm_exit(pp);
36830Sstevel@tonic-gate 	while (pszc < pp->p_szc) {
36840Sstevel@tonic-gate 		page_t *tpp;
36850Sstevel@tonic-gate 		pszc++;
36860Sstevel@tonic-gate 		tpp = PP_GROUPLEADER(pp, pszc);
36870Sstevel@tonic-gate 		if (pp != tpp) {
36880Sstevel@tonic-gate 			pp = tpp;
36890Sstevel@tonic-gate 			goto next_size;
36900Sstevel@tonic-gate 		}
36910Sstevel@tonic-gate 	}
36923446Smrj done:
36935084Sjohnlev 	XPV_ALLOW_MIGRATE();
36940Sstevel@tonic-gate 	return (save_pp->p_nrm & nrmbits);
36950Sstevel@tonic-gate }
36960Sstevel@tonic-gate 
36970Sstevel@tonic-gate /*
36980Sstevel@tonic-gate  * returns approx number of mappings to this pp.  A return of 0 implies
36990Sstevel@tonic-gate  * there are no mappings to the page.
37000Sstevel@tonic-gate  */
37010Sstevel@tonic-gate ulong_t
37020Sstevel@tonic-gate hat_page_getshare(page_t *pp)
37030Sstevel@tonic-gate {
37040Sstevel@tonic-gate 	uint_t cnt;
37050Sstevel@tonic-gate 	cnt = hment_mapcnt(pp);
37061841Spraks #if defined(__amd64)
37071841Spraks 	if (vpm_enable && pp->p_vpmref) {
37081841Spraks 		cnt += 1;
37091841Spraks 	}
37101841Spraks #endif
37110Sstevel@tonic-gate 	return (cnt);
37120Sstevel@tonic-gate }
37130Sstevel@tonic-gate 
37140Sstevel@tonic-gate /*
37154528Spaulsan  * Return 1 the number of mappings exceeds sh_thresh. Return 0
37164528Spaulsan  * otherwise.
37174528Spaulsan  */
37184528Spaulsan int
37194528Spaulsan hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
37204528Spaulsan {
37214528Spaulsan 	return (hat_page_getshare(pp) > sh_thresh);
37224528Spaulsan }
37234528Spaulsan 
37244528Spaulsan /*
37250Sstevel@tonic-gate  * hat_softlock isn't supported anymore
37260Sstevel@tonic-gate  */
37270Sstevel@tonic-gate /*ARGSUSED*/
37280Sstevel@tonic-gate faultcode_t
37290Sstevel@tonic-gate hat_softlock(
37300Sstevel@tonic-gate 	hat_t *hat,
37310Sstevel@tonic-gate 	caddr_t addr,
37320Sstevel@tonic-gate 	size_t *len,
37330Sstevel@tonic-gate 	struct page **page_array,
37340Sstevel@tonic-gate 	uint_t flags)
37350Sstevel@tonic-gate {
37360Sstevel@tonic-gate 	return (FC_NOSUPPORT);
37370Sstevel@tonic-gate }
37380Sstevel@tonic-gate 
37390Sstevel@tonic-gate 
37400Sstevel@tonic-gate 
37410Sstevel@tonic-gate /*
37420Sstevel@tonic-gate  * Routine to expose supported HAT features to platform independent code.
37430Sstevel@tonic-gate  */
37440Sstevel@tonic-gate /*ARGSUSED*/
37450Sstevel@tonic-gate int
37460Sstevel@tonic-gate hat_supported(enum hat_features feature, void *arg)
37470Sstevel@tonic-gate {
37480Sstevel@tonic-gate 	switch (feature) {
37490Sstevel@tonic-gate 
37500Sstevel@tonic-gate 	case HAT_SHARED_PT:	/* this is really ISM */
37510Sstevel@tonic-gate 		return (1);
37520Sstevel@tonic-gate 
37530Sstevel@tonic-gate 	case HAT_DYNAMIC_ISM_UNMAP:
37540Sstevel@tonic-gate 		return (0);
37550Sstevel@tonic-gate 
37560Sstevel@tonic-gate 	case HAT_VMODSORT:
37570Sstevel@tonic-gate 		return (1);
37580Sstevel@tonic-gate 
37594528Spaulsan 	case HAT_SHARED_REGIONS:
37604528Spaulsan 		return (0);
37614528Spaulsan 
37620Sstevel@tonic-gate 	default:
37630Sstevel@tonic-gate 		panic("hat_supported() - unknown feature");
37640Sstevel@tonic-gate 	}
37650Sstevel@tonic-gate 	return (0);
37660Sstevel@tonic-gate }
37670Sstevel@tonic-gate 
37680Sstevel@tonic-gate /*
37690Sstevel@tonic-gate  * Called when a thread is exiting and has been switched to the kernel AS
37700Sstevel@tonic-gate  */
37710Sstevel@tonic-gate void
37720Sstevel@tonic-gate hat_thread_exit(kthread_t *thd)
37730Sstevel@tonic-gate {
37740Sstevel@tonic-gate 	ASSERT(thd->t_procp->p_as == &kas);
37755084Sjohnlev 	XPV_DISALLOW_MIGRATE();
37760Sstevel@tonic-gate 	hat_switch(thd->t_procp->p_as->a_hat);
37775084Sjohnlev 	XPV_ALLOW_MIGRATE();
37780Sstevel@tonic-gate }
37790Sstevel@tonic-gate 
37800Sstevel@tonic-gate /*
37810Sstevel@tonic-gate  * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
37820Sstevel@tonic-gate  */
37830Sstevel@tonic-gate /*ARGSUSED*/
37840Sstevel@tonic-gate void
37850Sstevel@tonic-gate hat_setup(hat_t *hat, int flags)
37860Sstevel@tonic-gate {
37875084Sjohnlev 	XPV_DISALLOW_MIGRATE();
37880Sstevel@tonic-gate 	kpreempt_disable();
37890Sstevel@tonic-gate 
37900Sstevel@tonic-gate 	hat_switch(hat);
37910Sstevel@tonic-gate 
37920Sstevel@tonic-gate 	kpreempt_enable();
37935084Sjohnlev 	XPV_ALLOW_MIGRATE();
37940Sstevel@tonic-gate }
37950Sstevel@tonic-gate 
37960Sstevel@tonic-gate /*
37970Sstevel@tonic-gate  * Prepare for a CPU private mapping for the given address.
37980Sstevel@tonic-gate  *
37990Sstevel@tonic-gate  * The address can only be used from a single CPU and can be remapped
38000Sstevel@tonic-gate  * using hat_mempte_remap().  Return the address of the PTE.
38010Sstevel@tonic-gate  *
38020Sstevel@tonic-gate  * We do the htable_create() if necessary and increment the valid count so
38030Sstevel@tonic-gate  * the htable can't disappear.  We also hat_devload() the page table into
38040Sstevel@tonic-gate  * kernel so that the PTE is quickly accessed.
38050Sstevel@tonic-gate  */
38063446Smrj hat_mempte_t
38073446Smrj hat_mempte_setup(caddr_t addr)
38080Sstevel@tonic-gate {
38090Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
38100Sstevel@tonic-gate 	htable_t	*ht;
38110Sstevel@tonic-gate 	uint_t		entry;
38120Sstevel@tonic-gate 	x86pte_t	oldpte;
38133446Smrj 	hat_mempte_t	p;
38140Sstevel@tonic-gate 
38150Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
38160Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(va));
38174004Sjosephb 	++curthread->t_hatdepth;
38180Sstevel@tonic-gate 	ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
38190Sstevel@tonic-gate 	if (ht == NULL) {
38200Sstevel@tonic-gate 		ht = htable_create(kas.a_hat, va, 0, NULL);
38210Sstevel@tonic-gate 		entry = htable_va2entry(va, ht);
38220Sstevel@tonic-gate 		ASSERT(ht->ht_level == 0);
38230Sstevel@tonic-gate 		oldpte = x86pte_get(ht, entry);
38240Sstevel@tonic-gate 	}
38250Sstevel@tonic-gate 	if (PTE_ISVALID(oldpte))
38260Sstevel@tonic-gate 		panic("hat_mempte_setup(): address already mapped"
38270Sstevel@tonic-gate 		    "ht=%p, entry=%d, pte=" FMT_PTE, ht, entry, oldpte);
38280Sstevel@tonic-gate 
38290Sstevel@tonic-gate 	/*
38300Sstevel@tonic-gate 	 * increment ht_valid_cnt so that the pagetable can't disappear
38310Sstevel@tonic-gate 	 */
38320Sstevel@tonic-gate 	HTABLE_INC(ht->ht_valid_cnt);
38330Sstevel@tonic-gate 
38340Sstevel@tonic-gate 	/*
38353446Smrj 	 * return the PTE physical address to the caller.
38360Sstevel@tonic-gate 	 */
38370Sstevel@tonic-gate 	htable_release(ht);
38383446Smrj 	p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry);
38394004Sjosephb 	--curthread->t_hatdepth;
38403446Smrj 	return (p);
38410Sstevel@tonic-gate }
38420Sstevel@tonic-gate 
38430Sstevel@tonic-gate /*
38440Sstevel@tonic-gate  * Release a CPU private mapping for the given address.
38450Sstevel@tonic-gate  * We decrement the htable valid count so it might be destroyed.
38460Sstevel@tonic-gate  */
38473446Smrj /*ARGSUSED1*/
38480Sstevel@tonic-gate void
38493446Smrj hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa)
38500Sstevel@tonic-gate {
38510Sstevel@tonic-gate 	htable_t	*ht;
38520Sstevel@tonic-gate 
38530Sstevel@tonic-gate 	/*
38543446Smrj 	 * invalidate any left over mapping and decrement the htable valid count
38550Sstevel@tonic-gate 	 */
38565084Sjohnlev #ifdef __xpv
38575084Sjohnlev 	if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0,
38585084Sjohnlev 	    UVMF_INVLPG | UVMF_LOCAL))
38595084Sjohnlev 		panic("HYPERVISOR_update_va_mapping() failed");
38605084Sjohnlev #else
38613446Smrj 	{
38623446Smrj 		x86pte_t *pteptr;
38633446Smrj 
38643446Smrj 		pteptr = x86pte_mapin(mmu_btop(pte_pa),
38653446Smrj 		    (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
38663446Smrj 		if (mmu.pae_hat)
38673446Smrj 			*pteptr = 0;
38683446Smrj 		else
38693446Smrj 			*(x86pte32_t *)pteptr = 0;
38703446Smrj 		mmu_tlbflush_entry(addr);
38713446Smrj 		x86pte_mapout();
38723446Smrj 	}
38735084Sjohnlev #endif
38743446Smrj 
38750Sstevel@tonic-gate 	ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
38760Sstevel@tonic-gate 	if (ht == NULL)
38770Sstevel@tonic-gate 		panic("hat_mempte_release(): invalid address");
38780Sstevel@tonic-gate 	ASSERT(ht->ht_level == 0);
38790Sstevel@tonic-gate 	HTABLE_DEC(ht->ht_valid_cnt);
38800Sstevel@tonic-gate 	htable_release(ht);
38810Sstevel@tonic-gate }
38820Sstevel@tonic-gate 
38830Sstevel@tonic-gate /*
38840Sstevel@tonic-gate  * Apply a temporary CPU private mapping to a page. We flush the TLB only
38850Sstevel@tonic-gate  * on this CPU, so this ought to have been called with preemption disabled.
38860Sstevel@tonic-gate  */
38870Sstevel@tonic-gate void
38880Sstevel@tonic-gate hat_mempte_remap(
38893446Smrj 	pfn_t		pfn,
38903446Smrj 	caddr_t		addr,
38913446Smrj 	hat_mempte_t	pte_pa,
38923446Smrj 	uint_t		attr,
38933446Smrj 	uint_t		flags)
38940Sstevel@tonic-gate {
38950Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
38960Sstevel@tonic-gate 	x86pte_t	pte;
38970Sstevel@tonic-gate 
38980Sstevel@tonic-gate 	/*
38990Sstevel@tonic-gate 	 * Remap the given PTE to the new page's PFN. Invalidate only
39000Sstevel@tonic-gate 	 * on this CPU.
39010Sstevel@tonic-gate 	 */
39020Sstevel@tonic-gate #ifdef DEBUG
39030Sstevel@tonic-gate 	htable_t	*ht;
39040Sstevel@tonic-gate 	uint_t		entry;
39050Sstevel@tonic-gate 
39060Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
39070Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(va));
39080Sstevel@tonic-gate 	ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
39090Sstevel@tonic-gate 	ASSERT(ht != NULL);
39100Sstevel@tonic-gate 	ASSERT(ht->ht_level == 0);
39110Sstevel@tonic-gate 	ASSERT(ht->ht_valid_cnt > 0);
39123446Smrj 	ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
39130Sstevel@tonic-gate 	htable_release(ht);
39140Sstevel@tonic-gate #endif
39155084Sjohnlev 	XPV_DISALLOW_MIGRATE();
39160Sstevel@tonic-gate 	pte = hati_mkpte(pfn, attr, 0, flags);
39175084Sjohnlev #ifdef __xpv
39185084Sjohnlev 	if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL))
39195084Sjohnlev 		panic("HYPERVISOR_update_va_mapping() failed");
39205084Sjohnlev #else
39213446Smrj 	{
39223446Smrj 		x86pte_t *pteptr;
39233446Smrj 
39243446Smrj 		pteptr = x86pte_mapin(mmu_btop(pte_pa),
39253446Smrj 		    (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
39263446Smrj 		if (mmu.pae_hat)
39273446Smrj 			*(x86pte_t *)pteptr = pte;
39283446Smrj 		else
39293446Smrj 			*(x86pte32_t *)pteptr = (x86pte32_t)pte;
39303446Smrj 		mmu_tlbflush_entry(addr);
39313446Smrj 		x86pte_mapout();
39323446Smrj 	}
39335084Sjohnlev #endif
39345084Sjohnlev 	XPV_ALLOW_MIGRATE();
39350Sstevel@tonic-gate }
39360Sstevel@tonic-gate 
39370Sstevel@tonic-gate 
39380Sstevel@tonic-gate 
39390Sstevel@tonic-gate /*
39400Sstevel@tonic-gate  * Hat locking functions
39410Sstevel@tonic-gate  * XXX - these two functions are currently being used by hatstats
39420Sstevel@tonic-gate  * 	they can be removed by using a per-as mutex for hatstats.
39430Sstevel@tonic-gate  */
39440Sstevel@tonic-gate void
39450Sstevel@tonic-gate hat_enter(hat_t *hat)
39460Sstevel@tonic-gate {
39470Sstevel@tonic-gate 	mutex_enter(&hat->hat_mutex);
39480Sstevel@tonic-gate }
39490Sstevel@tonic-gate 
39500Sstevel@tonic-gate void
39510Sstevel@tonic-gate hat_exit(hat_t *hat)
39520Sstevel@tonic-gate {
39530Sstevel@tonic-gate 	mutex_exit(&hat->hat_mutex);
39540Sstevel@tonic-gate }
39550Sstevel@tonic-gate 
39560Sstevel@tonic-gate /*
39573446Smrj  * HAT part of cpu initialization.
39580Sstevel@tonic-gate  */
39590Sstevel@tonic-gate void
39600Sstevel@tonic-gate hat_cpu_online(struct cpu *cpup)
39610Sstevel@tonic-gate {
39620Sstevel@tonic-gate 	if (cpup != CPU) {
39633446Smrj 		x86pte_cpu_init(cpup);
39640Sstevel@tonic-gate 		hat_vlp_setup(cpup);
39650Sstevel@tonic-gate 	}
39660Sstevel@tonic-gate 	CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
39670Sstevel@tonic-gate }
39680Sstevel@tonic-gate 
39690Sstevel@tonic-gate /*
39703446Smrj  * HAT part of cpu deletion.
39713446Smrj  * (currently, we only call this after the cpu is safely passivated.)
39723446Smrj  */
39733446Smrj void
39743446Smrj hat_cpu_offline(struct cpu *cpup)
39753446Smrj {
39763446Smrj 	ASSERT(cpup != CPU);
39773446Smrj 
39783446Smrj 	CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id);
39793446Smrj 	x86pte_cpu_fini(cpup);
39803446Smrj 	hat_vlp_teardown(cpup);
39813446Smrj }
39823446Smrj 
39833446Smrj /*
39840Sstevel@tonic-gate  * Function called after all CPUs are brought online.
39850Sstevel@tonic-gate  * Used to remove low address boot mappings.
39860Sstevel@tonic-gate  */
39870Sstevel@tonic-gate void
39880Sstevel@tonic-gate clear_boot_mappings(uintptr_t low, uintptr_t high)
39890Sstevel@tonic-gate {
39900Sstevel@tonic-gate 	uintptr_t vaddr = low;
39910Sstevel@tonic-gate 	htable_t *ht = NULL;
39920Sstevel@tonic-gate 	level_t level;
39930Sstevel@tonic-gate 	uint_t entry;
39940Sstevel@tonic-gate 	x86pte_t pte;
39950Sstevel@tonic-gate 
39960Sstevel@tonic-gate 	/*
39970Sstevel@tonic-gate 	 * On 1st CPU we can unload the prom mappings, basically we blow away
39983446Smrj 	 * all virtual mappings under _userlimit.
39990Sstevel@tonic-gate 	 */
40000Sstevel@tonic-gate 	while (vaddr < high) {
40010Sstevel@tonic-gate 		pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
40020Sstevel@tonic-gate 		if (ht == NULL)
40030Sstevel@tonic-gate 			break;
40040Sstevel@tonic-gate 
40050Sstevel@tonic-gate 		level = ht->ht_level;
40060Sstevel@tonic-gate 		entry = htable_va2entry(vaddr, ht);
40070Sstevel@tonic-gate 		ASSERT(level <= mmu.max_page_level);
40080Sstevel@tonic-gate 		ASSERT(PTE_ISPAGE(pte, level));
40090Sstevel@tonic-gate 
40100Sstevel@tonic-gate 		/*
40110Sstevel@tonic-gate 		 * Unload the mapping from the page tables.
40120Sstevel@tonic-gate 		 */
40133446Smrj 		(void) x86pte_inval(ht, entry, 0, NULL);
40140Sstevel@tonic-gate 		ASSERT(ht->ht_valid_cnt > 0);
40150Sstevel@tonic-gate 		HTABLE_DEC(ht->ht_valid_cnt);
40160Sstevel@tonic-gate 		PGCNT_DEC(ht->ht_hat, ht->ht_level);
40170Sstevel@tonic-gate 
40180Sstevel@tonic-gate 		vaddr += LEVEL_SIZE(ht->ht_level);
40190Sstevel@tonic-gate 	}
40200Sstevel@tonic-gate 	if (ht)
40210Sstevel@tonic-gate 		htable_release(ht);
40220Sstevel@tonic-gate }
40230Sstevel@tonic-gate 
40240Sstevel@tonic-gate /*
40250Sstevel@tonic-gate  * Atomically update a new translation for a single page.  If the
40260Sstevel@tonic-gate  * currently installed PTE doesn't match the value we expect to find,
40270Sstevel@tonic-gate  * it's not updated and we return the PTE we found.
40280Sstevel@tonic-gate  *
40290Sstevel@tonic-gate  * If activating nosync or NOWRITE and the page was modified we need to sync
40300Sstevel@tonic-gate  * with the page_t. Also sync with page_t if clearing ref/mod bits.
40310Sstevel@tonic-gate  */
40320Sstevel@tonic-gate static x86pte_t
40330Sstevel@tonic-gate hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
40340Sstevel@tonic-gate {
40350Sstevel@tonic-gate 	page_t		*pp;
40360Sstevel@tonic-gate 	uint_t		rm = 0;
40370Sstevel@tonic-gate 	x86pte_t	replaced;
40380Sstevel@tonic-gate 
40393446Smrj 	if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC &&
40400Sstevel@tonic-gate 	    PTE_GET(expected, PT_MOD | PT_REF) &&
40410Sstevel@tonic-gate 	    (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
40424381Sjosephb 	    !PTE_GET(new, PT_MOD | PT_REF))) {
40430Sstevel@tonic-gate 
40443446Smrj 		ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level)));
40450Sstevel@tonic-gate 		pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
40460Sstevel@tonic-gate 		ASSERT(pp != NULL);
40470Sstevel@tonic-gate 		if (PTE_GET(expected, PT_MOD))
40480Sstevel@tonic-gate 			rm |= P_MOD;
40490Sstevel@tonic-gate 		if (PTE_GET(expected, PT_REF))
40500Sstevel@tonic-gate 			rm |= P_REF;
40510Sstevel@tonic-gate 		PTE_CLR(new, PT_MOD | PT_REF);
40520Sstevel@tonic-gate 	}
40530Sstevel@tonic-gate 
40540Sstevel@tonic-gate 	replaced = x86pte_update(ht, entry, expected, new);
40550Sstevel@tonic-gate 	if (replaced != expected)
40560Sstevel@tonic-gate 		return (replaced);
40570Sstevel@tonic-gate 
40580Sstevel@tonic-gate 	if (rm) {
40590Sstevel@tonic-gate 		/*
40600Sstevel@tonic-gate 		 * sync to all constituent pages of a large page
40610Sstevel@tonic-gate 		 */
40620Sstevel@tonic-gate 		pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
40630Sstevel@tonic-gate 		ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
40640Sstevel@tonic-gate 		while (pgcnt-- > 0) {
40650Sstevel@tonic-gate 			/*
40660Sstevel@tonic-gate 			 * hat_page_demote() can't decrease
40670Sstevel@tonic-gate 			 * pszc below this mapping size
40680Sstevel@tonic-gate 			 * since large mapping existed after we
40690Sstevel@tonic-gate 			 * took mlist lock.
40700Sstevel@tonic-gate 			 */
40710Sstevel@tonic-gate 			ASSERT(pp->p_szc >= ht->ht_level);
40720Sstevel@tonic-gate 			hat_page_setattr(pp, rm);
40730Sstevel@tonic-gate 			++pp;
40740Sstevel@tonic-gate 		}
40750Sstevel@tonic-gate 	}
40760Sstevel@tonic-gate 
40770Sstevel@tonic-gate 	return (0);
40780Sstevel@tonic-gate }
40790Sstevel@tonic-gate 
40804528Spaulsan /* ARGSUSED */
40814528Spaulsan void
40825075Spaulsan hat_join_srd(struct hat *hat, vnode_t *evp)
40834528Spaulsan {
40844528Spaulsan }
40854528Spaulsan 
40864528Spaulsan /* ARGSUSED */
40874528Spaulsan hat_region_cookie_t
40885075Spaulsan hat_join_region(struct hat *hat,
40894528Spaulsan     caddr_t r_saddr,
40904528Spaulsan     size_t r_size,
40914528Spaulsan     void *r_obj,
40924528Spaulsan     u_offset_t r_objoff,
40934528Spaulsan     uchar_t r_perm,
40944528Spaulsan     uchar_t r_pgszc,
40954528Spaulsan     hat_rgn_cb_func_t r_cb_function,
40964528Spaulsan     uint_t flags)
40974528Spaulsan {
40984528Spaulsan 	panic("No shared region support on x86");
40994528Spaulsan 	return (HAT_INVALID_REGION_COOKIE);
41004528Spaulsan }
41014528Spaulsan 
41024528Spaulsan /* ARGSUSED */
41034528Spaulsan void
41045075Spaulsan hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags)
41054528Spaulsan {
41064528Spaulsan 	panic("No shared region support on x86");
41074528Spaulsan }
41084528Spaulsan 
41094528Spaulsan /* ARGSUSED */
41104528Spaulsan void
41115075Spaulsan hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie)
41124528Spaulsan {
41134528Spaulsan 	panic("No shared region support on x86");
41144528Spaulsan }
41154528Spaulsan 
41164528Spaulsan 
41170Sstevel@tonic-gate /*
41180Sstevel@tonic-gate  * Kernel Physical Mapping (kpm) facility
41190Sstevel@tonic-gate  *
41200Sstevel@tonic-gate  * Most of the routines needed to support segkpm are almost no-ops on the
41210Sstevel@tonic-gate  * x86 platform.  We map in the entire segment when it is created and leave
41220Sstevel@tonic-gate  * it mapped in, so there is no additional work required to set up and tear
41230Sstevel@tonic-gate  * down individual mappings.  All of these routines were created to support
41240Sstevel@tonic-gate  * SPARC platforms that have to avoid aliasing in their virtually indexed
41250Sstevel@tonic-gate  * caches.
41260Sstevel@tonic-gate  *
41270Sstevel@tonic-gate  * Most of the routines have sanity checks in them (e.g. verifying that the
41280Sstevel@tonic-gate  * passed-in page is locked).  We don't actually care about most of these
41290Sstevel@tonic-gate  * checks on x86, but we leave them in place to identify problems in the
41300Sstevel@tonic-gate  * upper levels.
41310Sstevel@tonic-gate  */
41320Sstevel@tonic-gate 
41330Sstevel@tonic-gate /*
41340Sstevel@tonic-gate  * Map in a locked page and return the vaddr.
41350Sstevel@tonic-gate  */
41360Sstevel@tonic-gate /*ARGSUSED*/
41370Sstevel@tonic-gate caddr_t
41380Sstevel@tonic-gate hat_kpm_mapin(struct page *pp, struct kpme *kpme)
41390Sstevel@tonic-gate {
41400Sstevel@tonic-gate 	caddr_t		vaddr;
41410Sstevel@tonic-gate 
41420Sstevel@tonic-gate #ifdef DEBUG
41430Sstevel@tonic-gate 	if (kpm_enable == 0) {
41440Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
41450Sstevel@tonic-gate 		return ((caddr_t)NULL);
41460Sstevel@tonic-gate 	}
41470Sstevel@tonic-gate 
41480Sstevel@tonic-gate 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
41490Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
41500Sstevel@tonic-gate 		return ((caddr_t)NULL);
41510Sstevel@tonic-gate 	}
41520Sstevel@tonic-gate #endif
41530Sstevel@tonic-gate 
41540Sstevel@tonic-gate 	vaddr = hat_kpm_page2va(pp, 1);
41550Sstevel@tonic-gate 
41560Sstevel@tonic-gate 	return (vaddr);
41570Sstevel@tonic-gate }
41580Sstevel@tonic-gate 
41590Sstevel@tonic-gate /*
41600Sstevel@tonic-gate  * Mapout a locked page.
41610Sstevel@tonic-gate  */
41620Sstevel@tonic-gate /*ARGSUSED*/
41630Sstevel@tonic-gate void
41640Sstevel@tonic-gate hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
41650Sstevel@tonic-gate {
41660Sstevel@tonic-gate #ifdef DEBUG
41670Sstevel@tonic-gate 	if (kpm_enable == 0) {
41680Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
41690Sstevel@tonic-gate 		return;
41700Sstevel@tonic-gate 	}
41710Sstevel@tonic-gate 
41720Sstevel@tonic-gate 	if (IS_KPM_ADDR(vaddr) == 0) {
41730Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
41740Sstevel@tonic-gate 		return;
41750Sstevel@tonic-gate 	}
41760Sstevel@tonic-gate 
41770Sstevel@tonic-gate 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
41780Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
41790Sstevel@tonic-gate 		return;
41800Sstevel@tonic-gate 	}
41810Sstevel@tonic-gate #endif
41820Sstevel@tonic-gate }
41830Sstevel@tonic-gate 
41840Sstevel@tonic-gate /*
41850Sstevel@tonic-gate  * Return the kpm virtual address for a specific pfn
41860Sstevel@tonic-gate  */
41870Sstevel@tonic-gate caddr_t
41880Sstevel@tonic-gate hat_kpm_pfn2va(pfn_t pfn)
41890Sstevel@tonic-gate {
41903446Smrj 	uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
41910Sstevel@tonic-gate 
41925262Srscott 	ASSERT(!pfn_is_foreign(pfn));
41930Sstevel@tonic-gate 	return ((caddr_t)vaddr);
41940Sstevel@tonic-gate }
41950Sstevel@tonic-gate 
41960Sstevel@tonic-gate /*
41970Sstevel@tonic-gate  * Return the kpm virtual address for the page at pp.
41980Sstevel@tonic-gate  */
41990Sstevel@tonic-gate /*ARGSUSED*/
42000Sstevel@tonic-gate caddr_t
42010Sstevel@tonic-gate hat_kpm_page2va(struct page *pp, int checkswap)
42020Sstevel@tonic-gate {
42030Sstevel@tonic-gate 	return (hat_kpm_pfn2va(pp->p_pagenum));
42040Sstevel@tonic-gate }
42050Sstevel@tonic-gate 
42060Sstevel@tonic-gate /*
42070Sstevel@tonic-gate  * Return the page frame number for the kpm virtual address vaddr.
42080Sstevel@tonic-gate  */
42090Sstevel@tonic-gate pfn_t
42100Sstevel@tonic-gate hat_kpm_va2pfn(caddr_t vaddr)
42110Sstevel@tonic-gate {
42120Sstevel@tonic-gate 	pfn_t		pfn;
42130Sstevel@tonic-gate 
42140Sstevel@tonic-gate 	ASSERT(IS_KPM_ADDR(vaddr));
42150Sstevel@tonic-gate 
42160Sstevel@tonic-gate 	pfn = (pfn_t)btop(vaddr - kpm_vbase);
42170Sstevel@tonic-gate 
42180Sstevel@tonic-gate 	return (pfn);
42190Sstevel@tonic-gate }
42200Sstevel@tonic-gate 
42210Sstevel@tonic-gate 
42220Sstevel@tonic-gate /*
42230Sstevel@tonic-gate  * Return the page for the kpm virtual address vaddr.
42240Sstevel@tonic-gate  */
42250Sstevel@tonic-gate page_t *
42260Sstevel@tonic-gate hat_kpm_vaddr2page(caddr_t vaddr)
42270Sstevel@tonic-gate {
42280Sstevel@tonic-gate 	pfn_t		pfn;
42290Sstevel@tonic-gate 
42300Sstevel@tonic-gate 	ASSERT(IS_KPM_ADDR(vaddr));
42310Sstevel@tonic-gate 
42320Sstevel@tonic-gate 	pfn = hat_kpm_va2pfn(vaddr);
42330Sstevel@tonic-gate 
42340Sstevel@tonic-gate 	return (page_numtopp_nolock(pfn));
42350Sstevel@tonic-gate }
42360Sstevel@tonic-gate 
42370Sstevel@tonic-gate /*
42380Sstevel@tonic-gate  * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
42390Sstevel@tonic-gate  * KPM page.  This should never happen on x86
42400Sstevel@tonic-gate  */
42410Sstevel@tonic-gate int
42420Sstevel@tonic-gate hat_kpm_fault(hat_t *hat, caddr_t vaddr)
42430Sstevel@tonic-gate {
42440Sstevel@tonic-gate 	panic("pagefault in seg_kpm.  hat: 0x%p  vaddr: 0x%p", hat, vaddr);
42450Sstevel@tonic-gate 
42460Sstevel@tonic-gate 	return (0);
42470Sstevel@tonic-gate }
42480Sstevel@tonic-gate 
42490Sstevel@tonic-gate /*ARGSUSED*/
42500Sstevel@tonic-gate void
42510Sstevel@tonic-gate hat_kpm_mseghash_clear(int nentries)
42520Sstevel@tonic-gate {}
42530Sstevel@tonic-gate 
42540Sstevel@tonic-gate /*ARGSUSED*/
42550Sstevel@tonic-gate void
42560Sstevel@tonic-gate hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
42570Sstevel@tonic-gate {}
42585084Sjohnlev 
42595084Sjohnlev #ifdef __xpv
42605084Sjohnlev /*
42615084Sjohnlev  * There are specific Hypervisor calls to establish and remove mappings
42625084Sjohnlev  * to grant table references and the privcmd driver. We have to ensure
42635084Sjohnlev  * that a page table actually exists.
42645084Sjohnlev  */
42655084Sjohnlev void
42665084Sjohnlev hat_prepare_mapping(hat_t *hat, caddr_t addr)
42675084Sjohnlev {
42685084Sjohnlev 	ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
42695084Sjohnlev 	(void) htable_create(hat, (uintptr_t)addr, 0, NULL);
42705084Sjohnlev }
42715084Sjohnlev 
42725084Sjohnlev void
42735084Sjohnlev hat_release_mapping(hat_t *hat, caddr_t addr)
42745084Sjohnlev {
42755084Sjohnlev 	htable_t *ht;
42765084Sjohnlev 
42775084Sjohnlev 	ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
42785084Sjohnlev 	ht = htable_lookup(hat, (uintptr_t)addr, 0);
42795084Sjohnlev 	ASSERT(ht != NULL);
42805084Sjohnlev 	ASSERT(ht->ht_busy >= 2);
42815084Sjohnlev 	htable_release(ht);
42825084Sjohnlev 	htable_release(ht);
42835084Sjohnlev }
42845084Sjohnlev #endif
4285