xref: /onnv-gate/usr/src/uts/i86pc/vm/hat_i86.c (revision 9894:42b0c48b08a4)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51747Sjosephb  * Common Development and Distribution License (the "License").
61747Sjosephb  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
228522SJakub.Jermar@Sun.COM  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate 
270Sstevel@tonic-gate /*
280Sstevel@tonic-gate  * VM - Hardware Address Translation management for i386 and amd64
290Sstevel@tonic-gate  *
300Sstevel@tonic-gate  * Implementation of the interfaces described in <common/vm/hat.h>
310Sstevel@tonic-gate  *
320Sstevel@tonic-gate  * Nearly all the details of how the hardware is managed should not be
330Sstevel@tonic-gate  * visible outside this layer except for misc. machine specific functions
340Sstevel@tonic-gate  * that work in conjunction with this code.
350Sstevel@tonic-gate  *
360Sstevel@tonic-gate  * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
370Sstevel@tonic-gate  */
380Sstevel@tonic-gate 
390Sstevel@tonic-gate #include <sys/machparam.h>
400Sstevel@tonic-gate #include <sys/machsystm.h>
410Sstevel@tonic-gate #include <sys/mman.h>
420Sstevel@tonic-gate #include <sys/types.h>
430Sstevel@tonic-gate #include <sys/systm.h>
440Sstevel@tonic-gate #include <sys/cpuvar.h>
450Sstevel@tonic-gate #include <sys/thread.h>
460Sstevel@tonic-gate #include <sys/proc.h>
470Sstevel@tonic-gate #include <sys/cpu.h>
480Sstevel@tonic-gate #include <sys/kmem.h>
490Sstevel@tonic-gate #include <sys/disp.h>
500Sstevel@tonic-gate #include <sys/shm.h>
510Sstevel@tonic-gate #include <sys/sysmacros.h>
520Sstevel@tonic-gate #include <sys/machparam.h>
530Sstevel@tonic-gate #include <sys/vmem.h>
540Sstevel@tonic-gate #include <sys/vmsystm.h>
550Sstevel@tonic-gate #include <sys/promif.h>
560Sstevel@tonic-gate #include <sys/var.h>
570Sstevel@tonic-gate #include <sys/x86_archext.h>
580Sstevel@tonic-gate #include <sys/atomic.h>
590Sstevel@tonic-gate #include <sys/bitmap.h>
603446Smrj #include <sys/controlregs.h>
613446Smrj #include <sys/bootconf.h>
623446Smrj #include <sys/bootsvcs.h>
633446Smrj #include <sys/bootinfo.h>
644191Sjosephb #include <sys/archsystm.h>
650Sstevel@tonic-gate 
660Sstevel@tonic-gate #include <vm/seg_kmem.h>
670Sstevel@tonic-gate #include <vm/hat_i86.h>
680Sstevel@tonic-gate #include <vm/as.h>
690Sstevel@tonic-gate #include <vm/seg.h>
700Sstevel@tonic-gate #include <vm/page.h>
710Sstevel@tonic-gate #include <vm/seg_kp.h>
720Sstevel@tonic-gate #include <vm/seg_kpm.h>
730Sstevel@tonic-gate #include <vm/vm_dep.h>
745084Sjohnlev #ifdef __xpv
755084Sjohnlev #include <sys/hypervisor.h>
765084Sjohnlev #endif
773446Smrj #include <vm/kboot_mmu.h>
784381Sjosephb #include <vm/seg_spt.h>
790Sstevel@tonic-gate 
800Sstevel@tonic-gate #include <sys/cmn_err.h>
810Sstevel@tonic-gate 
820Sstevel@tonic-gate /*
830Sstevel@tonic-gate  * Basic parameters for hat operation.
840Sstevel@tonic-gate  */
850Sstevel@tonic-gate struct hat_mmu_info mmu;
860Sstevel@tonic-gate 
870Sstevel@tonic-gate /*
880Sstevel@tonic-gate  * The page that is the kernel's top level pagetable.
890Sstevel@tonic-gate  *
905084Sjohnlev  * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries
910Sstevel@tonic-gate  * on this 4K page for its top level page table. The remaining groups of
920Sstevel@tonic-gate  * 4 entries are used for per processor copies of user VLP pagetables for
930Sstevel@tonic-gate  * running threads.  See hat_switch() and reload_pae32() for details.
940Sstevel@tonic-gate  *
955084Sjohnlev  * vlp_page[0..3] - level==2 PTEs for kernel HAT
965084Sjohnlev  * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0
975084Sjohnlev  * vlp_page[8..11]  - level==2 PTE for user thread on cpu 1
985084Sjohnlev  * etc...
990Sstevel@tonic-gate  */
1000Sstevel@tonic-gate static x86pte_t *vlp_page;
1010Sstevel@tonic-gate 
1020Sstevel@tonic-gate /*
1030Sstevel@tonic-gate  * forward declaration of internal utility routines
1040Sstevel@tonic-gate  */
1050Sstevel@tonic-gate static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
1060Sstevel@tonic-gate 	x86pte_t new);
1070Sstevel@tonic-gate 
1080Sstevel@tonic-gate /*
1090Sstevel@tonic-gate  * The kernel address space exists in all HATs. To implement this the
1105084Sjohnlev  * kernel reserves a fixed number of entries in the topmost level(s) of page
1115084Sjohnlev  * tables. The values are setup during startup and then copied to every user
1125084Sjohnlev  * hat created by hat_alloc(). This means that kernelbase must be:
1130Sstevel@tonic-gate  *
1140Sstevel@tonic-gate  *	  4Meg aligned for 32 bit kernels
1150Sstevel@tonic-gate  *	512Gig aligned for x86_64 64 bit kernel
1160Sstevel@tonic-gate  *
1175084Sjohnlev  * The hat_kernel_range_ts describe what needs to be copied from kernel hat
1185084Sjohnlev  * to each user hat.
1190Sstevel@tonic-gate  */
1205084Sjohnlev typedef struct hat_kernel_range {
1215084Sjohnlev 	level_t		hkr_level;
1225084Sjohnlev 	uintptr_t	hkr_start_va;
1235084Sjohnlev 	uintptr_t	hkr_end_va;	/* zero means to end of memory */
1245084Sjohnlev } hat_kernel_range_t;
1255084Sjohnlev #define	NUM_KERNEL_RANGE 2
1265084Sjohnlev static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE];
1275084Sjohnlev static int num_kernel_ranges;
1280Sstevel@tonic-gate 
1290Sstevel@tonic-gate uint_t use_boot_reserve = 1;	/* cleared after early boot process */
1300Sstevel@tonic-gate uint_t can_steal_post_boot = 0;	/* set late in boot to enable stealing */
1310Sstevel@tonic-gate 
1326691Skchow /*
1336691Skchow  * enable_1gpg: controls 1g page support for user applications.
1346691Skchow  * By default, 1g pages are exported to user applications. enable_1gpg can
1356691Skchow  * be set to 0 to not export.
1366691Skchow  */
1375466Skchow int	enable_1gpg = 1;
1385349Skchow 
1396691Skchow /*
1406691Skchow  * AMD shanghai processors provide better management of 1gb ptes in its tlb.
141*9894SPavel.Tatashin@Sun.COM  * By default, 1g page suppport will be disabled for pre-shanghai AMD
1426691Skchow  * processors that don't have optimal tlb support for the 1g page size.
1436691Skchow  * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal
1446691Skchow  * processors.
1456691Skchow  */
1466691Skchow int	chk_optimal_1gtlb = 1;
1476691Skchow 
1486691Skchow 
1495349Skchow #ifdef DEBUG
1505349Skchow uint_t	map1gcnt;
1515349Skchow #endif
1525349Skchow 
1535349Skchow 
1540Sstevel@tonic-gate /*
1550Sstevel@tonic-gate  * A cpuset for all cpus. This is used for kernel address cross calls, since
1560Sstevel@tonic-gate  * the kernel addresses apply to all cpus.
1570Sstevel@tonic-gate  */
1580Sstevel@tonic-gate cpuset_t khat_cpuset;
1590Sstevel@tonic-gate 
1600Sstevel@tonic-gate /*
1610Sstevel@tonic-gate  * management stuff for hat structures
1620Sstevel@tonic-gate  */
1630Sstevel@tonic-gate kmutex_t	hat_list_lock;
1640Sstevel@tonic-gate kcondvar_t	hat_list_cv;
1650Sstevel@tonic-gate kmem_cache_t	*hat_cache;
1660Sstevel@tonic-gate kmem_cache_t	*hat_hash_cache;
1670Sstevel@tonic-gate kmem_cache_t	*vlp_hash_cache;
1680Sstevel@tonic-gate 
1690Sstevel@tonic-gate /*
1700Sstevel@tonic-gate  * Simple statistics
1710Sstevel@tonic-gate  */
1720Sstevel@tonic-gate struct hatstats hatstat;
1730Sstevel@tonic-gate 
1740Sstevel@tonic-gate /*
1755316Sjohnlev  * Some earlier hypervisor versions do not emulate cmpxchg of PTEs
1765316Sjohnlev  * correctly.  For such hypervisors we must set PT_USER for kernel
1775316Sjohnlev  * entries ourselves (normally the emulation would set PT_USER for
1785316Sjohnlev  * kernel entries and PT_USER|PT_GLOBAL for user entries).  pt_kern is
1795316Sjohnlev  * thus set appropriately.  Note that dboot/kbm is OK, as only the full
1805316Sjohnlev  * HAT uses cmpxchg() and the other paths (hypercall etc.) were never
1815316Sjohnlev  * incorrect.
1825316Sjohnlev  */
1835316Sjohnlev int pt_kern;
1845316Sjohnlev 
1855316Sjohnlev /*
1860Sstevel@tonic-gate  * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's.
1870Sstevel@tonic-gate  */
1880Sstevel@tonic-gate extern void atomic_orb(uchar_t *addr, uchar_t val);
1890Sstevel@tonic-gate extern void atomic_andb(uchar_t *addr, uchar_t val);
1900Sstevel@tonic-gate 
1910Sstevel@tonic-gate #define	PP_GETRM(pp, rmmask)    (pp->p_nrm & rmmask)
1920Sstevel@tonic-gate #define	PP_ISMOD(pp)		PP_GETRM(pp, P_MOD)
1930Sstevel@tonic-gate #define	PP_ISREF(pp)		PP_GETRM(pp, P_REF)
1940Sstevel@tonic-gate #define	PP_ISRO(pp)		PP_GETRM(pp, P_RO)
1950Sstevel@tonic-gate 
1960Sstevel@tonic-gate #define	PP_SETRM(pp, rm)	atomic_orb(&(pp->p_nrm), rm)
1970Sstevel@tonic-gate #define	PP_SETMOD(pp)		PP_SETRM(pp, P_MOD)
1980Sstevel@tonic-gate #define	PP_SETREF(pp)		PP_SETRM(pp, P_REF)
1990Sstevel@tonic-gate #define	PP_SETRO(pp)		PP_SETRM(pp, P_RO)
2000Sstevel@tonic-gate 
2010Sstevel@tonic-gate #define	PP_CLRRM(pp, rm)	atomic_andb(&(pp->p_nrm), ~(rm))
2020Sstevel@tonic-gate #define	PP_CLRMOD(pp)   	PP_CLRRM(pp, P_MOD)
2030Sstevel@tonic-gate #define	PP_CLRREF(pp)   	PP_CLRRM(pp, P_REF)
2040Sstevel@tonic-gate #define	PP_CLRRO(pp)    	PP_CLRRM(pp, P_RO)
2050Sstevel@tonic-gate #define	PP_CLRALL(pp)		PP_CLRRM(pp, P_MOD | P_REF | P_RO)
2060Sstevel@tonic-gate 
2070Sstevel@tonic-gate /*
2080Sstevel@tonic-gate  * kmem cache constructor for struct hat
2090Sstevel@tonic-gate  */
2100Sstevel@tonic-gate /*ARGSUSED*/
2110Sstevel@tonic-gate static int
2120Sstevel@tonic-gate hati_constructor(void *buf, void *handle, int kmflags)
2130Sstevel@tonic-gate {
2140Sstevel@tonic-gate 	hat_t	*hat = buf;
2150Sstevel@tonic-gate 
2160Sstevel@tonic-gate 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
2170Sstevel@tonic-gate 	bzero(hat->hat_pages_mapped,
2180Sstevel@tonic-gate 	    sizeof (pgcnt_t) * (mmu.max_page_level + 1));
2194381Sjosephb 	hat->hat_ism_pgcnt = 0;
2200Sstevel@tonic-gate 	hat->hat_stats = 0;
2210Sstevel@tonic-gate 	hat->hat_flags = 0;
2220Sstevel@tonic-gate 	CPUSET_ZERO(hat->hat_cpus);
2230Sstevel@tonic-gate 	hat->hat_htable = NULL;
2240Sstevel@tonic-gate 	hat->hat_ht_hash = NULL;
2250Sstevel@tonic-gate 	return (0);
2260Sstevel@tonic-gate }
2270Sstevel@tonic-gate 
2280Sstevel@tonic-gate /*
2290Sstevel@tonic-gate  * Allocate a hat structure for as. We also create the top level
2300Sstevel@tonic-gate  * htable and initialize it to contain the kernel hat entries.
2310Sstevel@tonic-gate  */
2320Sstevel@tonic-gate hat_t *
2330Sstevel@tonic-gate hat_alloc(struct as *as)
2340Sstevel@tonic-gate {
2355084Sjohnlev 	hat_t			*hat;
2365084Sjohnlev 	htable_t		*ht;	/* top level htable */
2375084Sjohnlev 	uint_t			use_vlp;
2385084Sjohnlev 	uint_t			r;
2395084Sjohnlev 	hat_kernel_range_t	*rp;
2405084Sjohnlev 	uintptr_t		va;
2415084Sjohnlev 	uintptr_t		eva;
2425084Sjohnlev 	uint_t			start;
2435084Sjohnlev 	uint_t			cnt;
2445084Sjohnlev 	htable_t		*src;
2450Sstevel@tonic-gate 
2460Sstevel@tonic-gate 	/*
2470Sstevel@tonic-gate 	 * Once we start creating user process HATs we can enable
2480Sstevel@tonic-gate 	 * the htable_steal() code.
2490Sstevel@tonic-gate 	 */
2500Sstevel@tonic-gate 	if (can_steal_post_boot == 0)
2510Sstevel@tonic-gate 		can_steal_post_boot = 1;
2520Sstevel@tonic-gate 
2530Sstevel@tonic-gate 	ASSERT(AS_WRITE_HELD(as, &as->a_lock));
2540Sstevel@tonic-gate 	hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
2550Sstevel@tonic-gate 	hat->hat_as = as;
2560Sstevel@tonic-gate 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
2570Sstevel@tonic-gate 	ASSERT(hat->hat_flags == 0);
2580Sstevel@tonic-gate 
2595084Sjohnlev #if defined(__xpv)
2600Sstevel@tonic-gate 	/*
2615084Sjohnlev 	 * No VLP stuff on the hypervisor due to the 64-bit split top level
2625084Sjohnlev 	 * page tables.  On 32-bit it's not needed as the hypervisor takes
2635084Sjohnlev 	 * care of copying the top level PTEs to a below 4Gig page.
2640Sstevel@tonic-gate 	 */
2655084Sjohnlev 	use_vlp = 0;
2665084Sjohnlev #else	/* __xpv */
2675084Sjohnlev 	/* 32 bit processes uses a VLP style hat when running with PAE */
2680Sstevel@tonic-gate #if defined(__amd64)
2690Sstevel@tonic-gate 	use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32);
2700Sstevel@tonic-gate #elif defined(__i386)
2710Sstevel@tonic-gate 	use_vlp = mmu.pae_hat;
2720Sstevel@tonic-gate #endif
2735084Sjohnlev #endif	/* __xpv */
2740Sstevel@tonic-gate 	if (use_vlp) {
2750Sstevel@tonic-gate 		hat->hat_flags = HAT_VLP;
2760Sstevel@tonic-gate 		bzero(hat->hat_vlp_ptes, VLP_SIZE);
2770Sstevel@tonic-gate 	}
2780Sstevel@tonic-gate 
2790Sstevel@tonic-gate 	/*
2800Sstevel@tonic-gate 	 * Allocate the htable hash
2810Sstevel@tonic-gate 	 */
2820Sstevel@tonic-gate 	if ((hat->hat_flags & HAT_VLP)) {
2830Sstevel@tonic-gate 		hat->hat_num_hash = mmu.vlp_hash_cnt;
2840Sstevel@tonic-gate 		hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
2850Sstevel@tonic-gate 	} else {
2860Sstevel@tonic-gate 		hat->hat_num_hash = mmu.hash_cnt;
2870Sstevel@tonic-gate 		hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
2880Sstevel@tonic-gate 	}
2890Sstevel@tonic-gate 	bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
2900Sstevel@tonic-gate 
2910Sstevel@tonic-gate 	/*
2920Sstevel@tonic-gate 	 * Initialize Kernel HAT entries at the top of the top level page
2935084Sjohnlev 	 * tables for the new hat.
2940Sstevel@tonic-gate 	 */
2950Sstevel@tonic-gate 	hat->hat_htable = NULL;
2960Sstevel@tonic-gate 	hat->hat_ht_cached = NULL;
2975084Sjohnlev 	XPV_DISALLOW_MIGRATE();
2980Sstevel@tonic-gate 	ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
2995084Sjohnlev 	hat->hat_htable = ht;
3005084Sjohnlev 
3015084Sjohnlev #if defined(__amd64)
3025084Sjohnlev 	if (hat->hat_flags & HAT_VLP)
3035084Sjohnlev 		goto init_done;
3040Sstevel@tonic-gate #endif
3055084Sjohnlev 
3065084Sjohnlev 	for (r = 0; r < num_kernel_ranges; ++r) {
3075084Sjohnlev 		rp = &kernel_ranges[r];
3085084Sjohnlev 		for (va = rp->hkr_start_va; va != rp->hkr_end_va;
3095084Sjohnlev 		    va += cnt * LEVEL_SIZE(rp->hkr_level)) {
3105084Sjohnlev 
3115084Sjohnlev 			if (rp->hkr_level == TOP_LEVEL(hat))
3125084Sjohnlev 				ht = hat->hat_htable;
3135084Sjohnlev 			else
3145084Sjohnlev 				ht = htable_create(hat, va, rp->hkr_level,
3155084Sjohnlev 				    NULL);
3165084Sjohnlev 
3175084Sjohnlev 			start = htable_va2entry(va, ht);
3185084Sjohnlev 			cnt = HTABLE_NUM_PTES(ht) - start;
3195084Sjohnlev 			eva = va +
3205084Sjohnlev 			    ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level));
3215084Sjohnlev 			if (rp->hkr_end_va != 0 &&
3225084Sjohnlev 			    (eva > rp->hkr_end_va || eva == 0))
3235084Sjohnlev 				cnt = htable_va2entry(rp->hkr_end_va, ht) -
3245084Sjohnlev 				    start;
3255084Sjohnlev 
3265084Sjohnlev #if defined(__i386) && !defined(__xpv)
3275084Sjohnlev 			if (ht->ht_flags & HTABLE_VLP) {
3285084Sjohnlev 				bcopy(&vlp_page[start],
3295084Sjohnlev 				    &hat->hat_vlp_ptes[start],
3305084Sjohnlev 				    cnt * sizeof (x86pte_t));
3315084Sjohnlev 				continue;
3325084Sjohnlev 			}
3335084Sjohnlev #endif
3345084Sjohnlev 			src = htable_lookup(kas.a_hat, va, rp->hkr_level);
3355084Sjohnlev 			ASSERT(src != NULL);
3365084Sjohnlev 			x86pte_copy(src, ht, start, cnt);
3375084Sjohnlev 			htable_release(src);
3385084Sjohnlev 		}
3395084Sjohnlev 	}
3405084Sjohnlev 
3415084Sjohnlev init_done:
3425084Sjohnlev 
3435084Sjohnlev #if defined(__xpv)
3440Sstevel@tonic-gate 	/*
3455084Sjohnlev 	 * Pin top level page tables after initializing them
3460Sstevel@tonic-gate 	 */
3475084Sjohnlev 	xen_pin(hat->hat_htable->ht_pfn, mmu.max_level);
3485084Sjohnlev #if defined(__amd64)
3495084Sjohnlev 	xen_pin(hat->hat_user_ptable, mmu.max_level);
3505084Sjohnlev #endif
3510Sstevel@tonic-gate #endif
3525741Smrj 	XPV_ALLOW_MIGRATE();
3530Sstevel@tonic-gate 
3540Sstevel@tonic-gate 	/*
3551747Sjosephb 	 * Put it at the start of the global list of all hats (used by stealing)
3561747Sjosephb 	 *
3571747Sjosephb 	 * kas.a_hat is not in the list but is instead used to find the
3581747Sjosephb 	 * first and last items in the list.
3591747Sjosephb 	 *
3601747Sjosephb 	 * - kas.a_hat->hat_next points to the start of the user hats.
3611747Sjosephb 	 *   The list ends where hat->hat_next == NULL
3621747Sjosephb 	 *
3631747Sjosephb 	 * - kas.a_hat->hat_prev points to the last of the user hats.
3641747Sjosephb 	 *   The list begins where hat->hat_prev == NULL
3650Sstevel@tonic-gate 	 */
3660Sstevel@tonic-gate 	mutex_enter(&hat_list_lock);
3671747Sjosephb 	hat->hat_prev = NULL;
3681747Sjosephb 	hat->hat_next = kas.a_hat->hat_next;
3691747Sjosephb 	if (hat->hat_next)
3701747Sjosephb 		hat->hat_next->hat_prev = hat;
3711747Sjosephb 	else
3721747Sjosephb 		kas.a_hat->hat_prev = hat;
3730Sstevel@tonic-gate 	kas.a_hat->hat_next = hat;
3740Sstevel@tonic-gate 	mutex_exit(&hat_list_lock);
3750Sstevel@tonic-gate 
3760Sstevel@tonic-gate 	return (hat);
3770Sstevel@tonic-gate }
3780Sstevel@tonic-gate 
3790Sstevel@tonic-gate /*
3800Sstevel@tonic-gate  * process has finished executing but as has not been cleaned up yet.
3810Sstevel@tonic-gate  */
3820Sstevel@tonic-gate /*ARGSUSED*/
3830Sstevel@tonic-gate void
3840Sstevel@tonic-gate hat_free_start(hat_t *hat)
3850Sstevel@tonic-gate {
3860Sstevel@tonic-gate 	ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock));
3871747Sjosephb 
3881747Sjosephb 	/*
3891747Sjosephb 	 * If the hat is currently a stealing victim, wait for the stealing
3901747Sjosephb 	 * to finish.  Once we mark it as HAT_FREEING, htable_steal()
3911747Sjosephb 	 * won't look at its pagetables anymore.
3921747Sjosephb 	 */
3930Sstevel@tonic-gate 	mutex_enter(&hat_list_lock);
3941747Sjosephb 	while (hat->hat_flags & HAT_VICTIM)
3951747Sjosephb 		cv_wait(&hat_list_cv, &hat_list_lock);
3960Sstevel@tonic-gate 	hat->hat_flags |= HAT_FREEING;
3970Sstevel@tonic-gate 	mutex_exit(&hat_list_lock);
3980Sstevel@tonic-gate }
3990Sstevel@tonic-gate 
4000Sstevel@tonic-gate /*
4010Sstevel@tonic-gate  * An address space is being destroyed, so we destroy the associated hat.
4020Sstevel@tonic-gate  */
4030Sstevel@tonic-gate void
4040Sstevel@tonic-gate hat_free_end(hat_t *hat)
4050Sstevel@tonic-gate {
4060Sstevel@tonic-gate 	kmem_cache_t *cache;
4070Sstevel@tonic-gate 
4080Sstevel@tonic-gate 	ASSERT(hat->hat_flags & HAT_FREEING);
4090Sstevel@tonic-gate 
4100Sstevel@tonic-gate 	/*
4110Sstevel@tonic-gate 	 * must not be running on the given hat
4120Sstevel@tonic-gate 	 */
4130Sstevel@tonic-gate 	ASSERT(CPU->cpu_current_hat != hat);
4140Sstevel@tonic-gate 
4150Sstevel@tonic-gate 	/*
4161747Sjosephb 	 * Remove it from the list of HATs
4170Sstevel@tonic-gate 	 */
4180Sstevel@tonic-gate 	mutex_enter(&hat_list_lock);
4191747Sjosephb 	if (hat->hat_prev)
4201747Sjosephb 		hat->hat_prev->hat_next = hat->hat_next;
4211747Sjosephb 	else
4220Sstevel@tonic-gate 		kas.a_hat->hat_next = hat->hat_next;
4231747Sjosephb 	if (hat->hat_next)
4241747Sjosephb 		hat->hat_next->hat_prev = hat->hat_prev;
4251747Sjosephb 	else
4261747Sjosephb 		kas.a_hat->hat_prev = hat->hat_prev;
4270Sstevel@tonic-gate 	mutex_exit(&hat_list_lock);
4281747Sjosephb 	hat->hat_next = hat->hat_prev = NULL;
4290Sstevel@tonic-gate 
4305084Sjohnlev #if defined(__xpv)
4315084Sjohnlev 	/*
4325084Sjohnlev 	 * On the hypervisor, unpin top level page table(s)
4335084Sjohnlev 	 */
4345084Sjohnlev 	xen_unpin(hat->hat_htable->ht_pfn);
4355084Sjohnlev #if defined(__amd64)
4365084Sjohnlev 	xen_unpin(hat->hat_user_ptable);
4375084Sjohnlev #endif
4385084Sjohnlev #endif
4395084Sjohnlev 
4400Sstevel@tonic-gate 	/*
4410Sstevel@tonic-gate 	 * Make a pass through the htables freeing them all up.
4420Sstevel@tonic-gate 	 */
4430Sstevel@tonic-gate 	htable_purge_hat(hat);
4440Sstevel@tonic-gate 
4450Sstevel@tonic-gate 	/*
4460Sstevel@tonic-gate 	 * Decide which kmem cache the hash table came from, then free it.
4470Sstevel@tonic-gate 	 */
4480Sstevel@tonic-gate 	if (hat->hat_flags & HAT_VLP)
4490Sstevel@tonic-gate 		cache = vlp_hash_cache;
4500Sstevel@tonic-gate 	else
4510Sstevel@tonic-gate 		cache = hat_hash_cache;
4520Sstevel@tonic-gate 	kmem_cache_free(cache, hat->hat_ht_hash);
4530Sstevel@tonic-gate 	hat->hat_ht_hash = NULL;
4540Sstevel@tonic-gate 
4550Sstevel@tonic-gate 	hat->hat_flags = 0;
4560Sstevel@tonic-gate 	kmem_cache_free(hat_cache, hat);
4570Sstevel@tonic-gate }
4580Sstevel@tonic-gate 
4590Sstevel@tonic-gate /*
4600Sstevel@tonic-gate  * round kernelbase down to a supported value to use for _userlimit
4610Sstevel@tonic-gate  *
4620Sstevel@tonic-gate  * userlimit must be aligned down to an entry in the top level htable.
4630Sstevel@tonic-gate  * The one exception is for 32 bit HAT's running PAE.
4640Sstevel@tonic-gate  */
4650Sstevel@tonic-gate uintptr_t
4660Sstevel@tonic-gate hat_kernelbase(uintptr_t va)
4670Sstevel@tonic-gate {
4680Sstevel@tonic-gate #if defined(__i386)
4690Sstevel@tonic-gate 	va &= LEVEL_MASK(1);
4700Sstevel@tonic-gate #endif
4710Sstevel@tonic-gate 	if (IN_VA_HOLE(va))
4720Sstevel@tonic-gate 		panic("_userlimit %p will fall in VA hole\n", (void *)va);
4730Sstevel@tonic-gate 	return (va);
4740Sstevel@tonic-gate }
4750Sstevel@tonic-gate 
4760Sstevel@tonic-gate /*
4776691Skchow  *
4786691Skchow  */
4796691Skchow static void
4806691Skchow set_max_page_level()
4816691Skchow {
4826691Skchow 	level_t lvl;
4836691Skchow 
4846691Skchow 	if (!kbm_largepage_support) {
4856691Skchow 		lvl = 0;
4866720Skchow 	} else {
4876720Skchow 		if (x86_feature & X86_1GPG) {
4886720Skchow 			lvl = 2;
4896720Skchow 			if (chk_optimal_1gtlb &&
4906720Skchow 			    cpuid_opteron_erratum(CPU, 6671130)) {
4916720Skchow 				lvl = 1;
4926720Skchow 			}
4936720Skchow 			if (plat_mnode_xcheck(LEVEL_SIZE(2) >>
4946720Skchow 			    LEVEL_SHIFT(0))) {
4956720Skchow 				lvl = 1;
4966720Skchow 			}
4976720Skchow 		} else {
4986691Skchow 			lvl = 1;
4996691Skchow 		}
5006691Skchow 	}
5016691Skchow 	mmu.max_page_level = lvl;
5026691Skchow 
5036691Skchow 	if ((lvl == 2) && (enable_1gpg == 0))
5046691Skchow 		mmu.umax_page_level = 1;
5056691Skchow 	else
5066691Skchow 		mmu.umax_page_level = lvl;
5076691Skchow }
5086691Skchow 
5096691Skchow /*
5100Sstevel@tonic-gate  * Initialize hat data structures based on processor MMU information.
5110Sstevel@tonic-gate  */
5120Sstevel@tonic-gate void
5130Sstevel@tonic-gate mmu_init(void)
5140Sstevel@tonic-gate {
5150Sstevel@tonic-gate 	uint_t max_htables;
5160Sstevel@tonic-gate 	uint_t pa_bits;
5170Sstevel@tonic-gate 	uint_t va_bits;
5180Sstevel@tonic-gate 	int i;
5190Sstevel@tonic-gate 
5200Sstevel@tonic-gate 	/*
5213446Smrj 	 * If CPU enabled the page table global bit, use it for the kernel
5223446Smrj 	 * This is bit 7 in CR4 (PGE - Page Global Enable).
5230Sstevel@tonic-gate 	 */
5243446Smrj 	if ((x86_feature & X86_PGE) != 0 && (getcr4() & CR4_PGE) != 0)
5250Sstevel@tonic-gate 		mmu.pt_global = PT_GLOBAL;
5260Sstevel@tonic-gate 
5270Sstevel@tonic-gate 	/*
5283446Smrj 	 * Detect NX and PAE usage.
5290Sstevel@tonic-gate 	 */
5303446Smrj 	mmu.pae_hat = kbm_pae_support;
5313446Smrj 	if (kbm_nx_support)
5320Sstevel@tonic-gate 		mmu.pt_nx = PT_NX;
5333446Smrj 	else
5340Sstevel@tonic-gate 		mmu.pt_nx = 0;
5350Sstevel@tonic-gate 
5360Sstevel@tonic-gate 	/*
5370Sstevel@tonic-gate 	 * Use CPU info to set various MMU parameters
5380Sstevel@tonic-gate 	 */
5390Sstevel@tonic-gate 	cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
5400Sstevel@tonic-gate 
5410Sstevel@tonic-gate 	if (va_bits < sizeof (void *) * NBBY) {
5420Sstevel@tonic-gate 		mmu.hole_start = (1ul << (va_bits - 1));
5430Sstevel@tonic-gate 		mmu.hole_end = 0ul - mmu.hole_start - 1;
5440Sstevel@tonic-gate 	} else {
5450Sstevel@tonic-gate 		mmu.hole_end = 0;
5460Sstevel@tonic-gate 		mmu.hole_start = mmu.hole_end - 1;
5470Sstevel@tonic-gate 	}
5480Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121)
5490Sstevel@tonic-gate 	/*
5500Sstevel@tonic-gate 	 * If erratum 121 has already been detected at this time, hole_start
5510Sstevel@tonic-gate 	 * contains the value to be subtracted from mmu.hole_start.
5520Sstevel@tonic-gate 	 */
5530Sstevel@tonic-gate 	ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
5540Sstevel@tonic-gate 	hole_start = mmu.hole_start - hole_start;
5550Sstevel@tonic-gate #else
5560Sstevel@tonic-gate 	hole_start = mmu.hole_start;
5570Sstevel@tonic-gate #endif
5580Sstevel@tonic-gate 	hole_end = mmu.hole_end;
5590Sstevel@tonic-gate 
5600Sstevel@tonic-gate 	mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
5610Sstevel@tonic-gate 	if (mmu.pae_hat == 0 && pa_bits > 32)
5620Sstevel@tonic-gate 		mmu.highest_pfn = PFN_4G - 1;
5630Sstevel@tonic-gate 
5640Sstevel@tonic-gate 	if (mmu.pae_hat) {
5650Sstevel@tonic-gate 		mmu.pte_size = 8;	/* 8 byte PTEs */
5660Sstevel@tonic-gate 		mmu.pte_size_shift = 3;
5670Sstevel@tonic-gate 	} else {
5680Sstevel@tonic-gate 		mmu.pte_size = 4;	/* 4 byte PTEs */
5690Sstevel@tonic-gate 		mmu.pte_size_shift = 2;
5700Sstevel@tonic-gate 	}
5710Sstevel@tonic-gate 
5720Sstevel@tonic-gate 	if (mmu.pae_hat && (x86_feature & X86_PAE) == 0)
5730Sstevel@tonic-gate 		panic("Processor does not support PAE");
5740Sstevel@tonic-gate 
5750Sstevel@tonic-gate 	if ((x86_feature & X86_CX8) == 0)
5760Sstevel@tonic-gate 		panic("Processor does not support cmpxchg8b instruction");
5770Sstevel@tonic-gate 
5780Sstevel@tonic-gate #if defined(__amd64)
5790Sstevel@tonic-gate 
5800Sstevel@tonic-gate 	mmu.num_level = 4;
5810Sstevel@tonic-gate 	mmu.max_level = 3;
5820Sstevel@tonic-gate 	mmu.ptes_per_table = 512;
5830Sstevel@tonic-gate 	mmu.top_level_count = 512;
5840Sstevel@tonic-gate 
5850Sstevel@tonic-gate 	mmu.level_shift[0] = 12;
5860Sstevel@tonic-gate 	mmu.level_shift[1] = 21;
5870Sstevel@tonic-gate 	mmu.level_shift[2] = 30;
5880Sstevel@tonic-gate 	mmu.level_shift[3] = 39;
5890Sstevel@tonic-gate 
5900Sstevel@tonic-gate #elif defined(__i386)
5910Sstevel@tonic-gate 
5920Sstevel@tonic-gate 	if (mmu.pae_hat) {
5930Sstevel@tonic-gate 		mmu.num_level = 3;
5940Sstevel@tonic-gate 		mmu.max_level = 2;
5950Sstevel@tonic-gate 		mmu.ptes_per_table = 512;
5960Sstevel@tonic-gate 		mmu.top_level_count = 4;
5970Sstevel@tonic-gate 
5980Sstevel@tonic-gate 		mmu.level_shift[0] = 12;
5990Sstevel@tonic-gate 		mmu.level_shift[1] = 21;
6000Sstevel@tonic-gate 		mmu.level_shift[2] = 30;
6010Sstevel@tonic-gate 
6020Sstevel@tonic-gate 	} else {
6030Sstevel@tonic-gate 		mmu.num_level = 2;
6040Sstevel@tonic-gate 		mmu.max_level = 1;
6050Sstevel@tonic-gate 		mmu.ptes_per_table = 1024;
6060Sstevel@tonic-gate 		mmu.top_level_count = 1024;
6070Sstevel@tonic-gate 
6080Sstevel@tonic-gate 		mmu.level_shift[0] = 12;
6090Sstevel@tonic-gate 		mmu.level_shift[1] = 22;
6100Sstevel@tonic-gate 	}
6110Sstevel@tonic-gate 
6120Sstevel@tonic-gate #endif	/* __i386 */
6130Sstevel@tonic-gate 
6140Sstevel@tonic-gate 	for (i = 0; i < mmu.num_level; ++i) {
6150Sstevel@tonic-gate 		mmu.level_size[i] = 1UL << mmu.level_shift[i];
6160Sstevel@tonic-gate 		mmu.level_offset[i] = mmu.level_size[i] - 1;
6170Sstevel@tonic-gate 		mmu.level_mask[i] = ~mmu.level_offset[i];
6180Sstevel@tonic-gate 	}
6190Sstevel@tonic-gate 
6206691Skchow 	set_max_page_level();
6216691Skchow 
6226291Skchow 	mmu_page_sizes = mmu.max_page_level + 1;
6236291Skchow 	mmu_exported_page_sizes = mmu.umax_page_level + 1;
6246291Skchow 
6256291Skchow 	/* restrict legacy applications from using pagesizes 1g and above */
6266291Skchow 	mmu_legacy_page_sizes =
6276291Skchow 	    (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes;
6286291Skchow 
6296291Skchow 
6303446Smrj 	for (i = 0; i <= mmu.max_page_level; ++i) {
6315316Sjohnlev 		mmu.pte_bits[i] = PT_VALID | pt_kern;
6323446Smrj 		if (i > 0)
6333446Smrj 			mmu.pte_bits[i] |= PT_PAGESIZE;
6343446Smrj 	}
6350Sstevel@tonic-gate 
6360Sstevel@tonic-gate 	/*
6370Sstevel@tonic-gate 	 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
6380Sstevel@tonic-gate 	 */
6390Sstevel@tonic-gate 	for (i = 1; i < mmu.num_level; ++i)
6400Sstevel@tonic-gate 		mmu.ptp_bits[i] = PT_PTPBITS;
6413446Smrj 
6420Sstevel@tonic-gate #if defined(__i386)
6430Sstevel@tonic-gate 	mmu.ptp_bits[2] = PT_VALID;
6440Sstevel@tonic-gate #endif
6450Sstevel@tonic-gate 
6460Sstevel@tonic-gate 	/*
6470Sstevel@tonic-gate 	 * Compute how many hash table entries to have per process for htables.
6480Sstevel@tonic-gate 	 * We start with 1 page's worth of entries.
6490Sstevel@tonic-gate 	 *
6500Sstevel@tonic-gate 	 * If physical memory is small, reduce the amount need to cover it.
6510Sstevel@tonic-gate 	 */
6520Sstevel@tonic-gate 	max_htables = physmax / mmu.ptes_per_table;
6530Sstevel@tonic-gate 	mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
6540Sstevel@tonic-gate 	while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
6550Sstevel@tonic-gate 		mmu.hash_cnt >>= 1;
6560Sstevel@tonic-gate 	mmu.vlp_hash_cnt = mmu.hash_cnt;
6570Sstevel@tonic-gate 
6580Sstevel@tonic-gate #if defined(__amd64)
6590Sstevel@tonic-gate 	/*
6600Sstevel@tonic-gate 	 * If running in 64 bits and physical memory is large,
6610Sstevel@tonic-gate 	 * increase the size of the cache to cover all of memory for
6620Sstevel@tonic-gate 	 * a 64 bit process.
6630Sstevel@tonic-gate 	 */
6640Sstevel@tonic-gate #define	HASH_MAX_LENGTH 4
6650Sstevel@tonic-gate 	while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
6660Sstevel@tonic-gate 		mmu.hash_cnt <<= 1;
6670Sstevel@tonic-gate #endif
6680Sstevel@tonic-gate }
6690Sstevel@tonic-gate 
6700Sstevel@tonic-gate 
6710Sstevel@tonic-gate /*
6720Sstevel@tonic-gate  * initialize hat data structures
6730Sstevel@tonic-gate  */
6740Sstevel@tonic-gate void
6750Sstevel@tonic-gate hat_init()
6760Sstevel@tonic-gate {
6770Sstevel@tonic-gate #if defined(__i386)
6780Sstevel@tonic-gate 	/*
6790Sstevel@tonic-gate 	 * _userlimit must be aligned correctly
6800Sstevel@tonic-gate 	 */
6810Sstevel@tonic-gate 	if ((_userlimit & LEVEL_MASK(1)) != _userlimit) {
6820Sstevel@tonic-gate 		prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n",
6830Sstevel@tonic-gate 		    (void *)_userlimit, (void *)LEVEL_SIZE(1));
6840Sstevel@tonic-gate 		halt("hat_init(): Unable to continue");
6850Sstevel@tonic-gate 	}
6860Sstevel@tonic-gate #endif
6870Sstevel@tonic-gate 
6880Sstevel@tonic-gate 	cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
6890Sstevel@tonic-gate 
6900Sstevel@tonic-gate 	/*
6910Sstevel@tonic-gate 	 * initialize kmem caches
6920Sstevel@tonic-gate 	 */
6930Sstevel@tonic-gate 	htable_init();
6940Sstevel@tonic-gate 	hment_init();
6950Sstevel@tonic-gate 
6960Sstevel@tonic-gate 	hat_cache = kmem_cache_create("hat_t",
6970Sstevel@tonic-gate 	    sizeof (hat_t), 0, hati_constructor, NULL, NULL,
6980Sstevel@tonic-gate 	    NULL, 0, 0);
6990Sstevel@tonic-gate 
7000Sstevel@tonic-gate 	hat_hash_cache = kmem_cache_create("HatHash",
7010Sstevel@tonic-gate 	    mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
7020Sstevel@tonic-gate 	    NULL, 0, 0);
7030Sstevel@tonic-gate 
7040Sstevel@tonic-gate 	/*
7050Sstevel@tonic-gate 	 * VLP hats can use a smaller hash table size on large memroy machines
7060Sstevel@tonic-gate 	 */
7070Sstevel@tonic-gate 	if (mmu.hash_cnt == mmu.vlp_hash_cnt) {
7080Sstevel@tonic-gate 		vlp_hash_cache = hat_hash_cache;
7090Sstevel@tonic-gate 	} else {
7100Sstevel@tonic-gate 		vlp_hash_cache = kmem_cache_create("HatVlpHash",
7110Sstevel@tonic-gate 		    mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
7120Sstevel@tonic-gate 		    NULL, 0, 0);
7130Sstevel@tonic-gate 	}
7140Sstevel@tonic-gate 
7150Sstevel@tonic-gate 	/*
7160Sstevel@tonic-gate 	 * Set up the kernel's hat
7170Sstevel@tonic-gate 	 */
7180Sstevel@tonic-gate 	AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
7190Sstevel@tonic-gate 	kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
7200Sstevel@tonic-gate 	mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
7210Sstevel@tonic-gate 	kas.a_hat->hat_as = &kas;
7220Sstevel@tonic-gate 	kas.a_hat->hat_flags = 0;
7230Sstevel@tonic-gate 	AS_LOCK_EXIT(&kas, &kas.a_lock);
7240Sstevel@tonic-gate 
7250Sstevel@tonic-gate 	CPUSET_ZERO(khat_cpuset);
7260Sstevel@tonic-gate 	CPUSET_ADD(khat_cpuset, CPU->cpu_id);
7270Sstevel@tonic-gate 
7280Sstevel@tonic-gate 	/*
7290Sstevel@tonic-gate 	 * The kernel hat's next pointer serves as the head of the hat list .
7301747Sjosephb 	 * The kernel hat's prev pointer tracks the last hat on the list for
7311747Sjosephb 	 * htable_steal() to use.
7320Sstevel@tonic-gate 	 */
7330Sstevel@tonic-gate 	kas.a_hat->hat_next = NULL;
7341747Sjosephb 	kas.a_hat->hat_prev = NULL;
7350Sstevel@tonic-gate 
7360Sstevel@tonic-gate 	/*
7370Sstevel@tonic-gate 	 * Allocate an htable hash bucket for the kernel
7380Sstevel@tonic-gate 	 * XX64 - tune for 64 bit procs
7390Sstevel@tonic-gate 	 */
7400Sstevel@tonic-gate 	kas.a_hat->hat_num_hash = mmu.hash_cnt;
7410Sstevel@tonic-gate 	kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
7420Sstevel@tonic-gate 	bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
7430Sstevel@tonic-gate 
7440Sstevel@tonic-gate 	/*
7450Sstevel@tonic-gate 	 * zero out the top level and cached htable pointers
7460Sstevel@tonic-gate 	 */
7470Sstevel@tonic-gate 	kas.a_hat->hat_ht_cached = NULL;
7480Sstevel@tonic-gate 	kas.a_hat->hat_htable = NULL;
7493258Strevtom 
7503258Strevtom 	/*
7513258Strevtom 	 * Pre-allocate hrm_hashtab before enabling the collection of
7523258Strevtom 	 * refmod statistics.  Allocating on the fly would mean us
7533258Strevtom 	 * running the risk of suffering recursive mutex enters or
7543258Strevtom 	 * deadlocks.
7553258Strevtom 	 */
7563258Strevtom 	hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
7573258Strevtom 	    KM_SLEEP);
7580Sstevel@tonic-gate }
7590Sstevel@tonic-gate 
7600Sstevel@tonic-gate /*
7610Sstevel@tonic-gate  * Prepare CPU specific pagetables for VLP processes on 64 bit kernels.
7620Sstevel@tonic-gate  *
7630Sstevel@tonic-gate  * Each CPU has a set of 2 pagetables that are reused for any 32 bit
7640Sstevel@tonic-gate  * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and
7650Sstevel@tonic-gate  * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes.
7660Sstevel@tonic-gate  */
7670Sstevel@tonic-gate /*ARGSUSED*/
7680Sstevel@tonic-gate static void
7690Sstevel@tonic-gate hat_vlp_setup(struct cpu *cpu)
7700Sstevel@tonic-gate {
7715084Sjohnlev #if defined(__amd64) && !defined(__xpv)
7720Sstevel@tonic-gate 	struct hat_cpu_info *hci = cpu->cpu_hat_info;
7730Sstevel@tonic-gate 	pfn_t pfn;
7740Sstevel@tonic-gate 
7750Sstevel@tonic-gate 	/*
7760Sstevel@tonic-gate 	 * allocate the level==2 page table for the bottom most
7770Sstevel@tonic-gate 	 * 512Gig of address space (this is where 32 bit apps live)
7780Sstevel@tonic-gate 	 */
7790Sstevel@tonic-gate 	ASSERT(hci != NULL);
7800Sstevel@tonic-gate 	hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
7810Sstevel@tonic-gate 
7820Sstevel@tonic-gate 	/*
7830Sstevel@tonic-gate 	 * Allocate a top level pagetable and copy the kernel's
7840Sstevel@tonic-gate 	 * entries into it. Then link in hci_vlp_l2ptes in the 1st entry.
7850Sstevel@tonic-gate 	 */
7860Sstevel@tonic-gate 	hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
7870Sstevel@tonic-gate 	hci->hci_vlp_pfn =
7880Sstevel@tonic-gate 	    hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes);
7890Sstevel@tonic-gate 	ASSERT(hci->hci_vlp_pfn != PFN_INVALID);
7905084Sjohnlev 	bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE);
7910Sstevel@tonic-gate 
7920Sstevel@tonic-gate 	pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes);
7930Sstevel@tonic-gate 	ASSERT(pfn != PFN_INVALID);
7940Sstevel@tonic-gate 	hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2);
7955084Sjohnlev #endif /* __amd64 && !__xpv */
7960Sstevel@tonic-gate }
7970Sstevel@tonic-gate 
7983446Smrj /*ARGSUSED*/
7993446Smrj static void
8003446Smrj hat_vlp_teardown(cpu_t *cpu)
8013446Smrj {
8025084Sjohnlev #if defined(__amd64) && !defined(__xpv)
8033446Smrj 	struct hat_cpu_info *hci;
8043446Smrj 
8053446Smrj 	if ((hci = cpu->cpu_hat_info) == NULL)
8063446Smrj 		return;
8073446Smrj 	if (hci->hci_vlp_l2ptes)
8083446Smrj 		kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE);
8093446Smrj 	if (hci->hci_vlp_l3ptes)
8103446Smrj 		kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE);
8115084Sjohnlev #endif
8125084Sjohnlev }
8135084Sjohnlev 
8145084Sjohnlev #define	NEXT_HKR(r, l, s, e) {			\
8155084Sjohnlev 	kernel_ranges[r].hkr_level = l;		\
8165084Sjohnlev 	kernel_ranges[r].hkr_start_va = s;	\
8175084Sjohnlev 	kernel_ranges[r].hkr_end_va = e;	\
8185084Sjohnlev 	++r;					\
8193446Smrj }
8203446Smrj 
8210Sstevel@tonic-gate /*
8220Sstevel@tonic-gate  * Finish filling in the kernel hat.
8230Sstevel@tonic-gate  * Pre fill in all top level kernel page table entries for the kernel's
8240Sstevel@tonic-gate  * part of the address range.  From this point on we can't use any new
8250Sstevel@tonic-gate  * kernel large pages if they need PTE's at max_level
8263446Smrj  *
8273446Smrj  * create the kmap mappings.
8280Sstevel@tonic-gate  */
8290Sstevel@tonic-gate void
8300Sstevel@tonic-gate hat_init_finish(void)
8310Sstevel@tonic-gate {
8323446Smrj 	size_t		size;
8335084Sjohnlev 	uint_t		r = 0;
8345084Sjohnlev 	uintptr_t	va;
8355084Sjohnlev 	hat_kernel_range_t *rp;
8365084Sjohnlev 
8370Sstevel@tonic-gate 
8380Sstevel@tonic-gate 	/*
8390Sstevel@tonic-gate 	 * We are now effectively running on the kernel hat.
8400Sstevel@tonic-gate 	 * Clearing use_boot_reserve shuts off using the pre-allocated boot
8410Sstevel@tonic-gate 	 * reserve for all HAT allocations.  From here on, the reserves are
8425084Sjohnlev 	 * only used when avoiding recursion in kmem_alloc().
8430Sstevel@tonic-gate 	 */
8440Sstevel@tonic-gate 	use_boot_reserve = 0;
8450Sstevel@tonic-gate 	htable_adjust_reserve();
8460Sstevel@tonic-gate 
8470Sstevel@tonic-gate 	/*
8485084Sjohnlev 	 * User HATs are initialized with copies of all kernel mappings in
8495084Sjohnlev 	 * higher level page tables. Ensure that those entries exist.
8505084Sjohnlev 	 */
8515084Sjohnlev #if defined(__amd64)
8525084Sjohnlev 
8535084Sjohnlev 	NEXT_HKR(r, 3, kernelbase, 0);
8545084Sjohnlev #if defined(__xpv)
8555084Sjohnlev 	NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END);
8565084Sjohnlev #endif
8575084Sjohnlev 
8585084Sjohnlev #elif defined(__i386)
8595084Sjohnlev 
8605084Sjohnlev #if !defined(__xpv)
8615084Sjohnlev 	if (mmu.pae_hat) {
8625084Sjohnlev 		va = kernelbase;
8635084Sjohnlev 		if ((va & LEVEL_MASK(2)) != va) {
8645084Sjohnlev 			va = P2ROUNDUP(va, LEVEL_SIZE(2));
8655084Sjohnlev 			NEXT_HKR(r, 1, kernelbase, va);
8665084Sjohnlev 		}
8675084Sjohnlev 		if (va != 0)
8685084Sjohnlev 			NEXT_HKR(r, 2, va, 0);
8695084Sjohnlev 	} else
8705084Sjohnlev #endif /* __xpv */
8715084Sjohnlev 		NEXT_HKR(r, 1, kernelbase, 0);
8725084Sjohnlev 
8735084Sjohnlev #endif /* __i386 */
8745084Sjohnlev 
8755084Sjohnlev 	num_kernel_ranges = r;
8765084Sjohnlev 
8775084Sjohnlev 	/*
8785084Sjohnlev 	 * Create all the kernel pagetables that will have entries
8795084Sjohnlev 	 * shared to user HATs.
8805084Sjohnlev 	 */
8815084Sjohnlev 	for (r = 0; r < num_kernel_ranges; ++r) {
8825084Sjohnlev 		rp = &kernel_ranges[r];
8835084Sjohnlev 		for (va = rp->hkr_start_va; va != rp->hkr_end_va;
8845084Sjohnlev 		    va += LEVEL_SIZE(rp->hkr_level)) {
8855084Sjohnlev 			htable_t *ht;
8865084Sjohnlev 
8875084Sjohnlev 			if (IN_HYPERVISOR_VA(va))
8885084Sjohnlev 				continue;
8895084Sjohnlev 
8905084Sjohnlev 			/* can/must skip if a page mapping already exists */
8915084Sjohnlev 			if (rp->hkr_level <= mmu.max_page_level &&
8925084Sjohnlev 			    (ht = htable_getpage(kas.a_hat, va, NULL)) !=
8935084Sjohnlev 			    NULL) {
8945084Sjohnlev 				htable_release(ht);
8955084Sjohnlev 				continue;
8965084Sjohnlev 			}
8975084Sjohnlev 
8985084Sjohnlev 			(void) htable_create(kas.a_hat, va, rp->hkr_level - 1,
8995084Sjohnlev 			    NULL);
9005084Sjohnlev 		}
9015084Sjohnlev 	}
9025084Sjohnlev 
9035084Sjohnlev 	/*
9045084Sjohnlev 	 * 32 bit PAE metal kernels use only 4 of the 512 entries in the
9055084Sjohnlev 	 * page holding the top level pagetable. We use the remainder for
9065084Sjohnlev 	 * the "per CPU" page tables for VLP processes.
9075084Sjohnlev 	 * Map the top level kernel pagetable into the kernel to make
9085084Sjohnlev 	 * it easy to use bcopy access these tables.
9090Sstevel@tonic-gate 	 */
9100Sstevel@tonic-gate 	if (mmu.pae_hat) {
9110Sstevel@tonic-gate 		vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
9120Sstevel@tonic-gate 		hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE,
9130Sstevel@tonic-gate 		    kas.a_hat->hat_htable->ht_pfn,
9145084Sjohnlev #if !defined(__xpv)
9153446Smrj 		    PROT_WRITE |
9165084Sjohnlev #endif
9173446Smrj 		    PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
9180Sstevel@tonic-gate 		    HAT_LOAD | HAT_LOAD_NOCONSIST);
9190Sstevel@tonic-gate 	}
9200Sstevel@tonic-gate 	hat_vlp_setup(CPU);
9213446Smrj 
9223446Smrj 	/*
9233446Smrj 	 * Create kmap (cached mappings of kernel PTEs)
9243446Smrj 	 * for 32 bit we map from segmap_start .. ekernelheap
9253446Smrj 	 * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
9263446Smrj 	 */
9273446Smrj #if defined(__i386)
9283446Smrj 	size = (uintptr_t)ekernelheap - segmap_start;
9293446Smrj #elif defined(__amd64)
9303446Smrj 	size = segmapsize;
9313446Smrj #endif
9323446Smrj 	hat_kmap_init((uintptr_t)segmap_start, size);
9330Sstevel@tonic-gate }
9340Sstevel@tonic-gate 
9350Sstevel@tonic-gate /*
9360Sstevel@tonic-gate  * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
9370Sstevel@tonic-gate  * are 32 bit, so for safety we must use cas64() to install these.
9380Sstevel@tonic-gate  */
9390Sstevel@tonic-gate #ifdef __i386
9400Sstevel@tonic-gate static void
9410Sstevel@tonic-gate reload_pae32(hat_t *hat, cpu_t *cpu)
9420Sstevel@tonic-gate {
9430Sstevel@tonic-gate 	x86pte_t *src;
9440Sstevel@tonic-gate 	x86pte_t *dest;
9450Sstevel@tonic-gate 	x86pte_t pte;
9460Sstevel@tonic-gate 	int i;
9470Sstevel@tonic-gate 
9480Sstevel@tonic-gate 	/*
9490Sstevel@tonic-gate 	 * Load the 4 entries of the level 2 page table into this
9500Sstevel@tonic-gate 	 * cpu's range of the vlp_page and point cr3 at them.
9510Sstevel@tonic-gate 	 */
9520Sstevel@tonic-gate 	ASSERT(mmu.pae_hat);
9530Sstevel@tonic-gate 	src = hat->hat_vlp_ptes;
9540Sstevel@tonic-gate 	dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
9550Sstevel@tonic-gate 	for (i = 0; i < VLP_NUM_PTES; ++i) {
9560Sstevel@tonic-gate 		for (;;) {
9570Sstevel@tonic-gate 			pte = dest[i];
9580Sstevel@tonic-gate 			if (pte == src[i])
9590Sstevel@tonic-gate 				break;
9600Sstevel@tonic-gate 			if (cas64(dest + i, pte, src[i]) != src[i])
9610Sstevel@tonic-gate 				break;
9620Sstevel@tonic-gate 		}
9630Sstevel@tonic-gate 	}
9640Sstevel@tonic-gate }
9650Sstevel@tonic-gate #endif
9660Sstevel@tonic-gate 
9670Sstevel@tonic-gate /*
9680Sstevel@tonic-gate  * Switch to a new active hat, maintaining bit masks to track active CPUs.
9695084Sjohnlev  *
9705084Sjohnlev  * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it
9715084Sjohnlev  * remains a 32-bit value.
9720Sstevel@tonic-gate  */
9730Sstevel@tonic-gate void
9740Sstevel@tonic-gate hat_switch(hat_t *hat)
9750Sstevel@tonic-gate {
9765084Sjohnlev 	uint64_t	newcr3;
9770Sstevel@tonic-gate 	cpu_t		*cpu = CPU;
9780Sstevel@tonic-gate 	hat_t		*old = cpu->cpu_current_hat;
9790Sstevel@tonic-gate 
9800Sstevel@tonic-gate 	/*
9810Sstevel@tonic-gate 	 * set up this information first, so we don't miss any cross calls
9820Sstevel@tonic-gate 	 */
9830Sstevel@tonic-gate 	if (old != NULL) {
9840Sstevel@tonic-gate 		if (old == hat)
9850Sstevel@tonic-gate 			return;
9860Sstevel@tonic-gate 		if (old != kas.a_hat)
9870Sstevel@tonic-gate 			CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
9880Sstevel@tonic-gate 	}
9890Sstevel@tonic-gate 
9900Sstevel@tonic-gate 	/*
9914191Sjosephb 	 * Add this CPU to the active set for this HAT.
9920Sstevel@tonic-gate 	 */
9930Sstevel@tonic-gate 	if (hat != kas.a_hat) {
9940Sstevel@tonic-gate 		CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
9950Sstevel@tonic-gate 	}
9960Sstevel@tonic-gate 	cpu->cpu_current_hat = hat;
9970Sstevel@tonic-gate 
9980Sstevel@tonic-gate 	/*
9990Sstevel@tonic-gate 	 * now go ahead and load cr3
10000Sstevel@tonic-gate 	 */
10010Sstevel@tonic-gate 	if (hat->hat_flags & HAT_VLP) {
10020Sstevel@tonic-gate #if defined(__amd64)
10030Sstevel@tonic-gate 		x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
10040Sstevel@tonic-gate 
10050Sstevel@tonic-gate 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
10060Sstevel@tonic-gate 		newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn);
10070Sstevel@tonic-gate #elif defined(__i386)
10080Sstevel@tonic-gate 		reload_pae32(hat, cpu);
10090Sstevel@tonic-gate 		newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) +
10100Sstevel@tonic-gate 		    (cpu->cpu_id + 1) * VLP_SIZE;
10110Sstevel@tonic-gate #endif
10120Sstevel@tonic-gate 	} else {
10135084Sjohnlev 		newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn);
10140Sstevel@tonic-gate 	}
10155084Sjohnlev #ifdef __xpv
10165084Sjohnlev 	{
10175084Sjohnlev 		struct mmuext_op t[2];
10185084Sjohnlev 		uint_t retcnt;
10195084Sjohnlev 		uint_t opcnt = 1;
10205084Sjohnlev 
10215084Sjohnlev 		t[0].cmd = MMUEXT_NEW_BASEPTR;
10225084Sjohnlev 		t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
10235084Sjohnlev #if defined(__amd64)
10245084Sjohnlev 		/*
10255084Sjohnlev 		 * There's an interesting problem here, as to what to
10265084Sjohnlev 		 * actually specify when switching to the kernel hat.
10275084Sjohnlev 		 * For now we'll reuse the kernel hat again.
10285084Sjohnlev 		 */
10295084Sjohnlev 		t[1].cmd = MMUEXT_NEW_USER_BASEPTR;
10305084Sjohnlev 		if (hat == kas.a_hat)
10315084Sjohnlev 			t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
10325084Sjohnlev 		else
10335084Sjohnlev 			t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable);
10345084Sjohnlev 		++opcnt;
10355084Sjohnlev #endif	/* __amd64 */
10365084Sjohnlev 		if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0)
10375084Sjohnlev 			panic("HYPERVISOR_mmu_update() failed");
10385084Sjohnlev 		ASSERT(retcnt == opcnt);
10395084Sjohnlev 
10405084Sjohnlev 	}
10415084Sjohnlev #else
10420Sstevel@tonic-gate 	setcr3(newcr3);
10435084Sjohnlev #endif
10440Sstevel@tonic-gate 	ASSERT(cpu == CPU);
10450Sstevel@tonic-gate }
10460Sstevel@tonic-gate 
10470Sstevel@tonic-gate /*
10480Sstevel@tonic-gate  * Utility to return a valid x86pte_t from protections, pfn, and level number
10490Sstevel@tonic-gate  */
10500Sstevel@tonic-gate static x86pte_t
10510Sstevel@tonic-gate hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
10520Sstevel@tonic-gate {
10530Sstevel@tonic-gate 	x86pte_t	pte;
10540Sstevel@tonic-gate 	uint_t		cache_attr = attr & HAT_ORDER_MASK;
10550Sstevel@tonic-gate 
10560Sstevel@tonic-gate 	pte = MAKEPTE(pfn, level);
10570Sstevel@tonic-gate 
10580Sstevel@tonic-gate 	if (attr & PROT_WRITE)
10590Sstevel@tonic-gate 		PTE_SET(pte, PT_WRITABLE);
10600Sstevel@tonic-gate 
10610Sstevel@tonic-gate 	if (attr & PROT_USER)
10620Sstevel@tonic-gate 		PTE_SET(pte, PT_USER);
10630Sstevel@tonic-gate 
10640Sstevel@tonic-gate 	if (!(attr & PROT_EXEC))
10650Sstevel@tonic-gate 		PTE_SET(pte, mmu.pt_nx);
10660Sstevel@tonic-gate 
10670Sstevel@tonic-gate 	/*
10683446Smrj 	 * Set the software bits used track ref/mod sync's and hments.
10693446Smrj 	 * If not using REF/MOD, set them to avoid h/w rewriting PTEs.
10700Sstevel@tonic-gate 	 */
10710Sstevel@tonic-gate 	if (flags & HAT_LOAD_NOCONSIST)
10723446Smrj 		PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD);
10733446Smrj 	else if (attr & HAT_NOSYNC)
10743446Smrj 		PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD);
10750Sstevel@tonic-gate 
10760Sstevel@tonic-gate 	/*
10770Sstevel@tonic-gate 	 * Set the caching attributes in the PTE. The combination
10780Sstevel@tonic-gate 	 * of attributes are poorly defined, so we pay attention
10790Sstevel@tonic-gate 	 * to them in the given order.
10800Sstevel@tonic-gate 	 *
10810Sstevel@tonic-gate 	 * The test for HAT_STRICTORDER is different because it's defined
10820Sstevel@tonic-gate 	 * as "0" - which was a stupid thing to do, but is too late to change!
10830Sstevel@tonic-gate 	 */
10840Sstevel@tonic-gate 	if (cache_attr == HAT_STRICTORDER) {
10850Sstevel@tonic-gate 		PTE_SET(pte, PT_NOCACHE);
10860Sstevel@tonic-gate 	/*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
10870Sstevel@tonic-gate 	} else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
10880Sstevel@tonic-gate 		/* nothing to set */;
10890Sstevel@tonic-gate 	} else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
10900Sstevel@tonic-gate 		PTE_SET(pte, PT_NOCACHE);
10910Sstevel@tonic-gate 		if (x86_feature & X86_PAT)
10920Sstevel@tonic-gate 			PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
10930Sstevel@tonic-gate 		else
10940Sstevel@tonic-gate 			PTE_SET(pte, PT_WRITETHRU);
10950Sstevel@tonic-gate 	} else {
10960Sstevel@tonic-gate 		panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
10970Sstevel@tonic-gate 	}
10980Sstevel@tonic-gate 
10990Sstevel@tonic-gate 	return (pte);
11000Sstevel@tonic-gate }
11010Sstevel@tonic-gate 
11020Sstevel@tonic-gate /*
11030Sstevel@tonic-gate  * Duplicate address translations of the parent to the child.
11040Sstevel@tonic-gate  * This function really isn't used anymore.
11050Sstevel@tonic-gate  */
11060Sstevel@tonic-gate /*ARGSUSED*/
11070Sstevel@tonic-gate int
11080Sstevel@tonic-gate hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
11090Sstevel@tonic-gate {
11100Sstevel@tonic-gate 	ASSERT((uintptr_t)addr < kernelbase);
11110Sstevel@tonic-gate 	ASSERT(new != kas.a_hat);
11120Sstevel@tonic-gate 	ASSERT(old != kas.a_hat);
11130Sstevel@tonic-gate 	return (0);
11140Sstevel@tonic-gate }
11150Sstevel@tonic-gate 
11160Sstevel@tonic-gate /*
11170Sstevel@tonic-gate  * Allocate any hat resources required for a process being swapped in.
11180Sstevel@tonic-gate  */
11190Sstevel@tonic-gate /*ARGSUSED*/
11200Sstevel@tonic-gate void
11210Sstevel@tonic-gate hat_swapin(hat_t *hat)
11220Sstevel@tonic-gate {
11230Sstevel@tonic-gate 	/* do nothing - we let everything fault back in */
11240Sstevel@tonic-gate }
11250Sstevel@tonic-gate 
11260Sstevel@tonic-gate /*
11270Sstevel@tonic-gate  * Unload all translations associated with an address space of a process
11280Sstevel@tonic-gate  * that is being swapped out.
11290Sstevel@tonic-gate  */
11300Sstevel@tonic-gate void
11310Sstevel@tonic-gate hat_swapout(hat_t *hat)
11320Sstevel@tonic-gate {
11330Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)0;
11340Sstevel@tonic-gate 	uintptr_t	eaddr = _userlimit;
11350Sstevel@tonic-gate 	htable_t	*ht = NULL;
11360Sstevel@tonic-gate 	level_t		l;
11370Sstevel@tonic-gate 
11385084Sjohnlev 	XPV_DISALLOW_MIGRATE();
11390Sstevel@tonic-gate 	/*
11400Sstevel@tonic-gate 	 * We can't just call hat_unload(hat, 0, _userlimit...)  here, because
11410Sstevel@tonic-gate 	 * seg_spt and shared pagetables can't be swapped out.
11420Sstevel@tonic-gate 	 * Take a look at segspt_shmswapout() - it's a big no-op.
11430Sstevel@tonic-gate 	 *
11440Sstevel@tonic-gate 	 * Instead we'll walk through all the address space and unload
11450Sstevel@tonic-gate 	 * any mappings which we are sure are not shared, not locked.
11460Sstevel@tonic-gate 	 */
11470Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
11480Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
11490Sstevel@tonic-gate 	ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
11500Sstevel@tonic-gate 	if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
11510Sstevel@tonic-gate 		eaddr = (uintptr_t)hat->hat_as->a_userlimit;
11520Sstevel@tonic-gate 
11530Sstevel@tonic-gate 	while (vaddr < eaddr) {
11540Sstevel@tonic-gate 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
11550Sstevel@tonic-gate 		if (ht == NULL)
11560Sstevel@tonic-gate 			break;
11570Sstevel@tonic-gate 
11580Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(vaddr));
11590Sstevel@tonic-gate 
11600Sstevel@tonic-gate 		/*
11610Sstevel@tonic-gate 		 * If the page table is shared skip its entire range.
11620Sstevel@tonic-gate 		 */
11630Sstevel@tonic-gate 		l = ht->ht_level;
11640Sstevel@tonic-gate 		if (ht->ht_flags & HTABLE_SHARED_PFN) {
11656285Speterte 			vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1);
11660Sstevel@tonic-gate 			htable_release(ht);
11670Sstevel@tonic-gate 			ht = NULL;
11680Sstevel@tonic-gate 			continue;
11690Sstevel@tonic-gate 		}
11700Sstevel@tonic-gate 
11710Sstevel@tonic-gate 		/*
11720Sstevel@tonic-gate 		 * If the page table has no locked entries, unload this one.
11730Sstevel@tonic-gate 		 */
11740Sstevel@tonic-gate 		if (ht->ht_lock_cnt == 0)
11750Sstevel@tonic-gate 			hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
11760Sstevel@tonic-gate 			    HAT_UNLOAD_UNMAP);
11770Sstevel@tonic-gate 
11780Sstevel@tonic-gate 		/*
11790Sstevel@tonic-gate 		 * If we have a level 0 page table with locked entries,
11800Sstevel@tonic-gate 		 * skip the entire page table, otherwise skip just one entry.
11810Sstevel@tonic-gate 		 */
11820Sstevel@tonic-gate 		if (ht->ht_lock_cnt > 0 && l == 0)
11830Sstevel@tonic-gate 			vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
11840Sstevel@tonic-gate 		else
11850Sstevel@tonic-gate 			vaddr += LEVEL_SIZE(l);
11860Sstevel@tonic-gate 	}
11870Sstevel@tonic-gate 	if (ht)
11880Sstevel@tonic-gate 		htable_release(ht);
11890Sstevel@tonic-gate 
11900Sstevel@tonic-gate 	/*
11910Sstevel@tonic-gate 	 * We're in swapout because the system is low on memory, so
11920Sstevel@tonic-gate 	 * go back and flush all the htables off the cached list.
11930Sstevel@tonic-gate 	 */
11940Sstevel@tonic-gate 	htable_purge_hat(hat);
11955084Sjohnlev 	XPV_ALLOW_MIGRATE();
11960Sstevel@tonic-gate }
11970Sstevel@tonic-gate 
11980Sstevel@tonic-gate /*
11990Sstevel@tonic-gate  * returns number of bytes that have valid mappings in hat.
12000Sstevel@tonic-gate  */
12010Sstevel@tonic-gate size_t
12020Sstevel@tonic-gate hat_get_mapped_size(hat_t *hat)
12030Sstevel@tonic-gate {
12040Sstevel@tonic-gate 	size_t total = 0;
12050Sstevel@tonic-gate 	int l;
12060Sstevel@tonic-gate 
12070Sstevel@tonic-gate 	for (l = 0; l <= mmu.max_page_level; l++)
12080Sstevel@tonic-gate 		total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
12094381Sjosephb 	total += hat->hat_ism_pgcnt;
12100Sstevel@tonic-gate 
12110Sstevel@tonic-gate 	return (total);
12120Sstevel@tonic-gate }
12130Sstevel@tonic-gate 
12140Sstevel@tonic-gate /*
12150Sstevel@tonic-gate  * enable/disable collection of stats for hat.
12160Sstevel@tonic-gate  */
12170Sstevel@tonic-gate int
12180Sstevel@tonic-gate hat_stats_enable(hat_t *hat)
12190Sstevel@tonic-gate {
12200Sstevel@tonic-gate 	atomic_add_32(&hat->hat_stats, 1);
12210Sstevel@tonic-gate 	return (1);
12220Sstevel@tonic-gate }
12230Sstevel@tonic-gate 
12240Sstevel@tonic-gate void
12250Sstevel@tonic-gate hat_stats_disable(hat_t *hat)
12260Sstevel@tonic-gate {
12270Sstevel@tonic-gate 	atomic_add_32(&hat->hat_stats, -1);
12280Sstevel@tonic-gate }
12290Sstevel@tonic-gate 
12300Sstevel@tonic-gate /*
12310Sstevel@tonic-gate  * Utility to sync the ref/mod bits from a page table entry to the page_t
12320Sstevel@tonic-gate  * We must be holding the mapping list lock when this is called.
12330Sstevel@tonic-gate  */
12340Sstevel@tonic-gate static void
12350Sstevel@tonic-gate hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
12360Sstevel@tonic-gate {
12370Sstevel@tonic-gate 	uint_t	rm = 0;
12380Sstevel@tonic-gate 	pgcnt_t	pgcnt;
12390Sstevel@tonic-gate 
12403446Smrj 	if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
12410Sstevel@tonic-gate 		return;
12420Sstevel@tonic-gate 
12430Sstevel@tonic-gate 	if (PTE_GET(pte, PT_REF))
12440Sstevel@tonic-gate 		rm |= P_REF;
12450Sstevel@tonic-gate 
12460Sstevel@tonic-gate 	if (PTE_GET(pte, PT_MOD))
12470Sstevel@tonic-gate 		rm |= P_MOD;
12480Sstevel@tonic-gate 
12490Sstevel@tonic-gate 	if (rm == 0)
12500Sstevel@tonic-gate 		return;
12510Sstevel@tonic-gate 
12520Sstevel@tonic-gate 	/*
12530Sstevel@tonic-gate 	 * sync to all constituent pages of a large page
12540Sstevel@tonic-gate 	 */
12550Sstevel@tonic-gate 	ASSERT(x86_hm_held(pp));
12560Sstevel@tonic-gate 	pgcnt = page_get_pagecnt(level);
12570Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
12580Sstevel@tonic-gate 	for (; pgcnt > 0; --pgcnt) {
12590Sstevel@tonic-gate 		/*
12600Sstevel@tonic-gate 		 * hat_page_demote() can't decrease
12610Sstevel@tonic-gate 		 * pszc below this mapping size
12620Sstevel@tonic-gate 		 * since this large mapping existed after we
12630Sstevel@tonic-gate 		 * took mlist lock.
12640Sstevel@tonic-gate 		 */
12650Sstevel@tonic-gate 		ASSERT(pp->p_szc >= level);
12660Sstevel@tonic-gate 		hat_page_setattr(pp, rm);
12670Sstevel@tonic-gate 		++pp;
12680Sstevel@tonic-gate 	}
12690Sstevel@tonic-gate }
12700Sstevel@tonic-gate 
12710Sstevel@tonic-gate /*
12720Sstevel@tonic-gate  * This the set of PTE bits for PFN, permissions and caching
12735084Sjohnlev  * that are allowed to change on a HAT_LOAD_REMAP
12740Sstevel@tonic-gate  */
12750Sstevel@tonic-gate #define	PT_REMAP_BITS							\
12760Sstevel@tonic-gate 	(PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU |		\
12775084Sjohnlev 	PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD)
12780Sstevel@tonic-gate 
1279510Skchow #define	REMAPASSERT(EX)	if (!(EX)) panic("hati_pte_map: " #EX)
12800Sstevel@tonic-gate /*
12810Sstevel@tonic-gate  * Do the low-level work to get a mapping entered into a HAT's pagetables
12820Sstevel@tonic-gate  * and in the mapping list of the associated page_t.
12830Sstevel@tonic-gate  */
12843446Smrj static int
12850Sstevel@tonic-gate hati_pte_map(
12860Sstevel@tonic-gate 	htable_t	*ht,
12870Sstevel@tonic-gate 	uint_t		entry,
12880Sstevel@tonic-gate 	page_t		*pp,
12890Sstevel@tonic-gate 	x86pte_t	pte,
12900Sstevel@tonic-gate 	int		flags,
12910Sstevel@tonic-gate 	void		*pte_ptr)
12920Sstevel@tonic-gate {
12930Sstevel@tonic-gate 	hat_t		*hat = ht->ht_hat;
12940Sstevel@tonic-gate 	x86pte_t	old_pte;
12950Sstevel@tonic-gate 	level_t		l = ht->ht_level;
12960Sstevel@tonic-gate 	hment_t		*hm;
12970Sstevel@tonic-gate 	uint_t		is_consist;
12988522SJakub.Jermar@Sun.COM 	uint_t		is_locked;
12993446Smrj 	int		rv = 0;
13000Sstevel@tonic-gate 
13010Sstevel@tonic-gate 	/*
1302*9894SPavel.Tatashin@Sun.COM 	 * Is this a consistant (ie. need mapping list lock) mapping?
13030Sstevel@tonic-gate 	 */
13040Sstevel@tonic-gate 	is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
13050Sstevel@tonic-gate 
13060Sstevel@tonic-gate 	/*
13070Sstevel@tonic-gate 	 * Track locked mapping count in the htable.  Do this first,
13080Sstevel@tonic-gate 	 * as we track locking even if there already is a mapping present.
13090Sstevel@tonic-gate 	 */
13108522SJakub.Jermar@Sun.COM 	is_locked = (flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat;
13118522SJakub.Jermar@Sun.COM 	if (is_locked)
13120Sstevel@tonic-gate 		HTABLE_LOCK_INC(ht);
13130Sstevel@tonic-gate 
13140Sstevel@tonic-gate 	/*
13150Sstevel@tonic-gate 	 * Acquire the page's mapping list lock and get an hment to use.
13160Sstevel@tonic-gate 	 * Note that hment_prepare() might return NULL.
13170Sstevel@tonic-gate 	 */
13180Sstevel@tonic-gate 	if (is_consist) {
13190Sstevel@tonic-gate 		x86_hm_enter(pp);
13200Sstevel@tonic-gate 		hm = hment_prepare(ht, entry, pp);
13210Sstevel@tonic-gate 	}
13220Sstevel@tonic-gate 
13230Sstevel@tonic-gate 	/*
13240Sstevel@tonic-gate 	 * Set the new pte, retrieving the old one at the same time.
13250Sstevel@tonic-gate 	 */
13260Sstevel@tonic-gate 	old_pte = x86pte_set(ht, entry, pte, pte_ptr);
13270Sstevel@tonic-gate 
13280Sstevel@tonic-gate 	/*
13298522SJakub.Jermar@Sun.COM 	 * Did we get a large page / page table collision?
13303446Smrj 	 */
13313446Smrj 	if (old_pte == LPAGE_ERROR) {
13328522SJakub.Jermar@Sun.COM 		if (is_locked)
13338522SJakub.Jermar@Sun.COM 			HTABLE_LOCK_DEC(ht);
13343446Smrj 		rv = -1;
13353446Smrj 		goto done;
13363446Smrj 	}
13373446Smrj 
13383446Smrj 	/*
13390Sstevel@tonic-gate 	 * If the mapping didn't change there is nothing more to do.
13400Sstevel@tonic-gate 	 */
13413446Smrj 	if (PTE_EQUIV(pte, old_pte))
13423446Smrj 		goto done;
13430Sstevel@tonic-gate 
13440Sstevel@tonic-gate 	/*
13450Sstevel@tonic-gate 	 * Install a new mapping in the page's mapping list
13460Sstevel@tonic-gate 	 */
13470Sstevel@tonic-gate 	if (!PTE_ISVALID(old_pte)) {
13480Sstevel@tonic-gate 		if (is_consist) {
13490Sstevel@tonic-gate 			hment_assign(ht, entry, pp, hm);
13500Sstevel@tonic-gate 			x86_hm_exit(pp);
13510Sstevel@tonic-gate 		} else {
13520Sstevel@tonic-gate 			ASSERT(flags & HAT_LOAD_NOCONSIST);
13530Sstevel@tonic-gate 		}
13545349Skchow #if defined(__amd64)
13555349Skchow 		if (ht->ht_flags & HTABLE_VLP) {
13565349Skchow 			cpu_t *cpu = CPU;
13575349Skchow 			x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
13585349Skchow 			VLP_COPY(hat->hat_vlp_ptes, vlpptep);
13595349Skchow 		}
13605349Skchow #endif
13610Sstevel@tonic-gate 		HTABLE_INC(ht->ht_valid_cnt);
13620Sstevel@tonic-gate 		PGCNT_INC(hat, l);
13633446Smrj 		return (rv);
13640Sstevel@tonic-gate 	}
13650Sstevel@tonic-gate 
13660Sstevel@tonic-gate 	/*
13670Sstevel@tonic-gate 	 * Remap's are more complicated:
13680Sstevel@tonic-gate 	 *  - HAT_LOAD_REMAP must be specified if changing the pfn.
13690Sstevel@tonic-gate 	 *    We also require that NOCONSIST be specified.
13700Sstevel@tonic-gate 	 *  - Otherwise only permission or caching bits may change.
13710Sstevel@tonic-gate 	 */
13720Sstevel@tonic-gate 	if (!PTE_ISPAGE(old_pte, l))
13730Sstevel@tonic-gate 		panic("non-null/page mapping pte=" FMT_PTE, old_pte);
13740Sstevel@tonic-gate 
13750Sstevel@tonic-gate 	if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1376510Skchow 		REMAPASSERT(flags & HAT_LOAD_REMAP);
1377510Skchow 		REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
13783446Smrj 		REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
1379510Skchow 		REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
13800Sstevel@tonic-gate 		    pf_is_memory(PTE2PFN(pte, l)));
1381510Skchow 		REMAPASSERT(!is_consist);
13820Sstevel@tonic-gate 	}
13830Sstevel@tonic-gate 
13840Sstevel@tonic-gate 	/*
13855084Sjohnlev 	 * We only let remaps change the certain bits in the PTE.
13860Sstevel@tonic-gate 	 */
13875084Sjohnlev 	if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS))
13885084Sjohnlev 		panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n",
13895084Sjohnlev 		    old_pte, pte);
13900Sstevel@tonic-gate 
13910Sstevel@tonic-gate 	/*
13920Sstevel@tonic-gate 	 * We don't create any mapping list entries on a remap, so release
13930Sstevel@tonic-gate 	 * any allocated hment after we drop the mapping list lock.
13940Sstevel@tonic-gate 	 */
13953446Smrj done:
13960Sstevel@tonic-gate 	if (is_consist) {
13970Sstevel@tonic-gate 		x86_hm_exit(pp);
13980Sstevel@tonic-gate 		if (hm != NULL)
13990Sstevel@tonic-gate 			hment_free(hm);
14000Sstevel@tonic-gate 	}
14013446Smrj 	return (rv);
14020Sstevel@tonic-gate }
14030Sstevel@tonic-gate 
14040Sstevel@tonic-gate /*
14053446Smrj  * Internal routine to load a single page table entry. This only fails if
14063446Smrj  * we attempt to overwrite a page table link with a large page.
14070Sstevel@tonic-gate  */
14083446Smrj static int
14090Sstevel@tonic-gate hati_load_common(
14100Sstevel@tonic-gate 	hat_t		*hat,
14110Sstevel@tonic-gate 	uintptr_t	va,
14120Sstevel@tonic-gate 	page_t		*pp,
14130Sstevel@tonic-gate 	uint_t		attr,
14140Sstevel@tonic-gate 	uint_t		flags,
14150Sstevel@tonic-gate 	level_t		level,
14160Sstevel@tonic-gate 	pfn_t		pfn)
14170Sstevel@tonic-gate {
14180Sstevel@tonic-gate 	htable_t	*ht;
14190Sstevel@tonic-gate 	uint_t		entry;
14200Sstevel@tonic-gate 	x86pte_t	pte;
14213446Smrj 	int		rv = 0;
14220Sstevel@tonic-gate 
14234004Sjosephb 	/*
14244004Sjosephb 	 * The number 16 is arbitrary and here to catch a recursion problem
14254004Sjosephb 	 * early before we blow out the kernel stack.
14264004Sjosephb 	 */
14274004Sjosephb 	++curthread->t_hatdepth;
14284004Sjosephb 	ASSERT(curthread->t_hatdepth < 16);
14294004Sjosephb 
14300Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
14310Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
14320Sstevel@tonic-gate 
14330Sstevel@tonic-gate 	if (flags & HAT_LOAD_SHARE)
14340Sstevel@tonic-gate 		hat->hat_flags |= HAT_SHARED;
14350Sstevel@tonic-gate 
14360Sstevel@tonic-gate 	/*
14370Sstevel@tonic-gate 	 * Find the page table that maps this page if it already exists.
14380Sstevel@tonic-gate 	 */
14390Sstevel@tonic-gate 	ht = htable_lookup(hat, va, level);
14400Sstevel@tonic-gate 
14410Sstevel@tonic-gate 	/*
14424004Sjosephb 	 * We must have HAT_LOAD_NOCONSIST if page_t is NULL.
14430Sstevel@tonic-gate 	 */
14444004Sjosephb 	if (pp == NULL)
14450Sstevel@tonic-gate 		flags |= HAT_LOAD_NOCONSIST;
14460Sstevel@tonic-gate 
14470Sstevel@tonic-gate 	if (ht == NULL) {
14480Sstevel@tonic-gate 		ht = htable_create(hat, va, level, NULL);
14490Sstevel@tonic-gate 		ASSERT(ht != NULL);
14500Sstevel@tonic-gate 	}
14510Sstevel@tonic-gate 	entry = htable_va2entry(va, ht);
14520Sstevel@tonic-gate 
14530Sstevel@tonic-gate 	/*
14540Sstevel@tonic-gate 	 * a bunch of paranoid error checking
14550Sstevel@tonic-gate 	 */
14560Sstevel@tonic-gate 	ASSERT(ht->ht_busy > 0);
14570Sstevel@tonic-gate 	if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht))
14587240Srh87107 		panic("hati_load_common: bad htable %p, va %p",
14597240Srh87107 		    (void *)ht, (void *)va);
14600Sstevel@tonic-gate 	ASSERT(ht->ht_level == level);
14610Sstevel@tonic-gate 
14620Sstevel@tonic-gate 	/*
14630Sstevel@tonic-gate 	 * construct the new PTE
14640Sstevel@tonic-gate 	 */
14650Sstevel@tonic-gate 	if (hat == kas.a_hat)
14660Sstevel@tonic-gate 		attr &= ~PROT_USER;
14670Sstevel@tonic-gate 	pte = hati_mkpte(pfn, attr, level, flags);
14680Sstevel@tonic-gate 	if (hat == kas.a_hat && va >= kernelbase)
14690Sstevel@tonic-gate 		PTE_SET(pte, mmu.pt_global);
14700Sstevel@tonic-gate 
14710Sstevel@tonic-gate 	/*
14720Sstevel@tonic-gate 	 * establish the mapping
14730Sstevel@tonic-gate 	 */
14743446Smrj 	rv = hati_pte_map(ht, entry, pp, pte, flags, NULL);
14750Sstevel@tonic-gate 
14760Sstevel@tonic-gate 	/*
14770Sstevel@tonic-gate 	 * release the htable and any reserves
14780Sstevel@tonic-gate 	 */
14790Sstevel@tonic-gate 	htable_release(ht);
14804004Sjosephb 	--curthread->t_hatdepth;
14813446Smrj 	return (rv);
14820Sstevel@tonic-gate }
14830Sstevel@tonic-gate 
14840Sstevel@tonic-gate /*
14850Sstevel@tonic-gate  * special case of hat_memload to deal with some kernel addrs for performance
14860Sstevel@tonic-gate  */
14870Sstevel@tonic-gate static void
14880Sstevel@tonic-gate hat_kmap_load(
14890Sstevel@tonic-gate 	caddr_t		addr,
14900Sstevel@tonic-gate 	page_t		*pp,
14910Sstevel@tonic-gate 	uint_t		attr,
14920Sstevel@tonic-gate 	uint_t		flags)
14930Sstevel@tonic-gate {
14940Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
14950Sstevel@tonic-gate 	x86pte_t	pte;
14960Sstevel@tonic-gate 	pfn_t		pfn = page_pptonum(pp);
14970Sstevel@tonic-gate 	pgcnt_t		pg_off = mmu_btop(va - mmu.kmap_addr);
14980Sstevel@tonic-gate 	htable_t	*ht;
14990Sstevel@tonic-gate 	uint_t		entry;
15000Sstevel@tonic-gate 	void		*pte_ptr;
15010Sstevel@tonic-gate 
15020Sstevel@tonic-gate 	/*
15030Sstevel@tonic-gate 	 * construct the requested PTE
15040Sstevel@tonic-gate 	 */
15050Sstevel@tonic-gate 	attr &= ~PROT_USER;
15060Sstevel@tonic-gate 	attr |= HAT_STORECACHING_OK;
15070Sstevel@tonic-gate 	pte = hati_mkpte(pfn, attr, 0, flags);
15080Sstevel@tonic-gate 	PTE_SET(pte, mmu.pt_global);
15090Sstevel@tonic-gate 
15100Sstevel@tonic-gate 	/*
15110Sstevel@tonic-gate 	 * Figure out the pte_ptr and htable and use common code to finish up
15120Sstevel@tonic-gate 	 */
15130Sstevel@tonic-gate 	if (mmu.pae_hat)
15140Sstevel@tonic-gate 		pte_ptr = mmu.kmap_ptes + pg_off;
15150Sstevel@tonic-gate 	else
15160Sstevel@tonic-gate 		pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
15170Sstevel@tonic-gate 	ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
15180Sstevel@tonic-gate 	    LEVEL_SHIFT(1)];
15190Sstevel@tonic-gate 	entry = htable_va2entry(va, ht);
15204004Sjosephb 	++curthread->t_hatdepth;
15214004Sjosephb 	ASSERT(curthread->t_hatdepth < 16);
15223446Smrj 	(void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
15234004Sjosephb 	--curthread->t_hatdepth;
15240Sstevel@tonic-gate }
15250Sstevel@tonic-gate 
15260Sstevel@tonic-gate /*
15270Sstevel@tonic-gate  * hat_memload() - load a translation to the given page struct
15280Sstevel@tonic-gate  *
15290Sstevel@tonic-gate  * Flags for hat_memload/hat_devload/hat_*attr.
15300Sstevel@tonic-gate  *
15310Sstevel@tonic-gate  * 	HAT_LOAD	Default flags to load a translation to the page.
15320Sstevel@tonic-gate  *
15330Sstevel@tonic-gate  * 	HAT_LOAD_LOCK	Lock down mapping resources; hat_map(), hat_memload(),
15340Sstevel@tonic-gate  *			and hat_devload().
15350Sstevel@tonic-gate  *
15360Sstevel@tonic-gate  *	HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
15373446Smrj  *			sets PT_NOCONSIST
15380Sstevel@tonic-gate  *
15390Sstevel@tonic-gate  *	HAT_LOAD_SHARE	A flag to hat_memload() to indicate h/w page tables
15400Sstevel@tonic-gate  *			that map some user pages (not kas) is shared by more
15410Sstevel@tonic-gate  *			than one process (eg. ISM).
15420Sstevel@tonic-gate  *
15430Sstevel@tonic-gate  *	HAT_LOAD_REMAP	Reload a valid pte with a different page frame.
15440Sstevel@tonic-gate  *
15450Sstevel@tonic-gate  *	HAT_NO_KALLOC	Do not kmem_alloc while creating the mapping; at this
15460Sstevel@tonic-gate  *			point, it's setting up mapping to allocate internal
15470Sstevel@tonic-gate  *			hat layer data structures.  This flag forces hat layer
15480Sstevel@tonic-gate  *			to tap its reserves in order to prevent infinite
15490Sstevel@tonic-gate  *			recursion.
15500Sstevel@tonic-gate  *
15510Sstevel@tonic-gate  * The following is a protection attribute (like PROT_READ, etc.)
15520Sstevel@tonic-gate  *
15533446Smrj  *	HAT_NOSYNC	set PT_NOSYNC - this mapping's ref/mod bits
15540Sstevel@tonic-gate  *			are never cleared.
15550Sstevel@tonic-gate  *
15560Sstevel@tonic-gate  * Installing new valid PTE's and creation of the mapping list
15570Sstevel@tonic-gate  * entry are controlled under the same lock. It's derived from the
15580Sstevel@tonic-gate  * page_t being mapped.
15590Sstevel@tonic-gate  */
15600Sstevel@tonic-gate static uint_t supported_memload_flags =
15610Sstevel@tonic-gate 	HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
15620Sstevel@tonic-gate 	HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
15630Sstevel@tonic-gate 
15640Sstevel@tonic-gate void
15650Sstevel@tonic-gate hat_memload(
15660Sstevel@tonic-gate 	hat_t		*hat,
15670Sstevel@tonic-gate 	caddr_t		addr,
15680Sstevel@tonic-gate 	page_t		*pp,
15690Sstevel@tonic-gate 	uint_t		attr,
15700Sstevel@tonic-gate 	uint_t		flags)
15710Sstevel@tonic-gate {
15720Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
15730Sstevel@tonic-gate 	level_t		level = 0;
15740Sstevel@tonic-gate 	pfn_t		pfn = page_pptonum(pp);
15750Sstevel@tonic-gate 
15765084Sjohnlev 	XPV_DISALLOW_MIGRATE();
15770Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
15783446Smrj 	ASSERT(hat == kas.a_hat || va < _userlimit);
15790Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
15800Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
15810Sstevel@tonic-gate 	ASSERT((flags & supported_memload_flags) == flags);
15820Sstevel@tonic-gate 
15830Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(va));
15840Sstevel@tonic-gate 	ASSERT(!PP_ISFREE(pp));
15850Sstevel@tonic-gate 
15860Sstevel@tonic-gate 	/*
15870Sstevel@tonic-gate 	 * kernel address special case for performance.
15880Sstevel@tonic-gate 	 */
15890Sstevel@tonic-gate 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
15900Sstevel@tonic-gate 		ASSERT(hat == kas.a_hat);
15910Sstevel@tonic-gate 		hat_kmap_load(addr, pp, attr, flags);
15925084Sjohnlev 		XPV_ALLOW_MIGRATE();
15930Sstevel@tonic-gate 		return;
15940Sstevel@tonic-gate 	}
15950Sstevel@tonic-gate 
15960Sstevel@tonic-gate 	/*
15970Sstevel@tonic-gate 	 * This is used for memory with normal caching enabled, so
15980Sstevel@tonic-gate 	 * always set HAT_STORECACHING_OK.
15990Sstevel@tonic-gate 	 */
16000Sstevel@tonic-gate 	attr |= HAT_STORECACHING_OK;
16013446Smrj 	if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
16023446Smrj 		panic("unexpected hati_load_common() failure");
16035084Sjohnlev 	XPV_ALLOW_MIGRATE();
16040Sstevel@tonic-gate }
16050Sstevel@tonic-gate 
16064528Spaulsan /* ARGSUSED */
16074528Spaulsan void
16084528Spaulsan hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
16094528Spaulsan     uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
16104528Spaulsan {
16114528Spaulsan 	hat_memload(hat, addr, pp, attr, flags);
16124528Spaulsan }
16134528Spaulsan 
16140Sstevel@tonic-gate /*
16150Sstevel@tonic-gate  * Load the given array of page structs using large pages when possible
16160Sstevel@tonic-gate  */
16170Sstevel@tonic-gate void
16180Sstevel@tonic-gate hat_memload_array(
16190Sstevel@tonic-gate 	hat_t		*hat,
16200Sstevel@tonic-gate 	caddr_t		addr,
16210Sstevel@tonic-gate 	size_t		len,
16220Sstevel@tonic-gate 	page_t		**pages,
16230Sstevel@tonic-gate 	uint_t		attr,
16240Sstevel@tonic-gate 	uint_t		flags)
16250Sstevel@tonic-gate {
16260Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
16270Sstevel@tonic-gate 	uintptr_t	eaddr = va + len;
16280Sstevel@tonic-gate 	level_t		level;
16290Sstevel@tonic-gate 	size_t		pgsize;
16300Sstevel@tonic-gate 	pgcnt_t		pgindx = 0;
16310Sstevel@tonic-gate 	pfn_t		pfn;
16320Sstevel@tonic-gate 	pgcnt_t		i;
16330Sstevel@tonic-gate 
16345084Sjohnlev 	XPV_DISALLOW_MIGRATE();
16350Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
16363446Smrj 	ASSERT(hat == kas.a_hat || va + len <= _userlimit);
16370Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
16380Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
16390Sstevel@tonic-gate 	ASSERT((flags & supported_memload_flags) == flags);
16400Sstevel@tonic-gate 
16410Sstevel@tonic-gate 	/*
16420Sstevel@tonic-gate 	 * memload is used for memory with full caching enabled, so
16430Sstevel@tonic-gate 	 * set HAT_STORECACHING_OK.
16440Sstevel@tonic-gate 	 */
16450Sstevel@tonic-gate 	attr |= HAT_STORECACHING_OK;
16460Sstevel@tonic-gate 
16470Sstevel@tonic-gate 	/*
16480Sstevel@tonic-gate 	 * handle all pages using largest possible pagesize
16490Sstevel@tonic-gate 	 */
16500Sstevel@tonic-gate 	while (va < eaddr) {
16510Sstevel@tonic-gate 		/*
16520Sstevel@tonic-gate 		 * decide what level mapping to use (ie. pagesize)
16530Sstevel@tonic-gate 		 */
16540Sstevel@tonic-gate 		pfn = page_pptonum(pages[pgindx]);
16550Sstevel@tonic-gate 		for (level = mmu.max_page_level; ; --level) {
16560Sstevel@tonic-gate 			pgsize = LEVEL_SIZE(level);
16570Sstevel@tonic-gate 			if (level == 0)
16580Sstevel@tonic-gate 				break;
16593446Smrj 
16600Sstevel@tonic-gate 			if (!IS_P2ALIGNED(va, pgsize) ||
16610Sstevel@tonic-gate 			    (eaddr - va) < pgsize ||
16623446Smrj 			    !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize))
16630Sstevel@tonic-gate 				continue;
16640Sstevel@tonic-gate 
16650Sstevel@tonic-gate 			/*
16660Sstevel@tonic-gate 			 * To use a large mapping of this size, all the
16670Sstevel@tonic-gate 			 * pages we are passed must be sequential subpages
16680Sstevel@tonic-gate 			 * of the large page.
16690Sstevel@tonic-gate 			 * hat_page_demote() can't change p_szc because
16700Sstevel@tonic-gate 			 * all pages are locked.
16710Sstevel@tonic-gate 			 */
16720Sstevel@tonic-gate 			if (pages[pgindx]->p_szc >= level) {
16730Sstevel@tonic-gate 				for (i = 0; i < mmu_btop(pgsize); ++i) {
16740Sstevel@tonic-gate 					if (pfn + i !=
16750Sstevel@tonic-gate 					    page_pptonum(pages[pgindx + i]))
16760Sstevel@tonic-gate 						break;
16770Sstevel@tonic-gate 					ASSERT(pages[pgindx + i]->p_szc >=
16780Sstevel@tonic-gate 					    level);
16790Sstevel@tonic-gate 					ASSERT(pages[pgindx] + i ==
16800Sstevel@tonic-gate 					    pages[pgindx + i]);
16810Sstevel@tonic-gate 				}
16825349Skchow 				if (i == mmu_btop(pgsize)) {
16835349Skchow #ifdef DEBUG
16845349Skchow 					if (level == 2)
16855349Skchow 						map1gcnt++;
16865349Skchow #endif
16870Sstevel@tonic-gate 					break;
16885349Skchow 				}
16890Sstevel@tonic-gate 			}
16900Sstevel@tonic-gate 		}
16910Sstevel@tonic-gate 
16920Sstevel@tonic-gate 		/*
16933446Smrj 		 * Load this page mapping. If the load fails, try a smaller
16943446Smrj 		 * pagesize.
16950Sstevel@tonic-gate 		 */
16960Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(va));
16973446Smrj 		while (hati_load_common(hat, va, pages[pgindx], attr,
16984381Sjosephb 		    flags, level, pfn) != 0) {
16993446Smrj 			if (level == 0)
17003446Smrj 				panic("unexpected hati_load_common() failure");
17013446Smrj 			--level;
17023446Smrj 			pgsize = LEVEL_SIZE(level);
17033446Smrj 		}
17040Sstevel@tonic-gate 
17050Sstevel@tonic-gate 		/*
17060Sstevel@tonic-gate 		 * move to next page
17070Sstevel@tonic-gate 		 */
17080Sstevel@tonic-gate 		va += pgsize;
17090Sstevel@tonic-gate 		pgindx += mmu_btop(pgsize);
17100Sstevel@tonic-gate 	}
17115084Sjohnlev 	XPV_ALLOW_MIGRATE();
17120Sstevel@tonic-gate }
17130Sstevel@tonic-gate 
17144528Spaulsan /* ARGSUSED */
17154528Spaulsan void
17164528Spaulsan hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
17174528Spaulsan     struct page **pps, uint_t attr, uint_t flags,
17184528Spaulsan     hat_region_cookie_t rcookie)
17194528Spaulsan {
17204528Spaulsan 	hat_memload_array(hat, addr, len, pps, attr, flags);
17214528Spaulsan }
17224528Spaulsan 
17230Sstevel@tonic-gate /*
17240Sstevel@tonic-gate  * void hat_devload(hat, addr, len, pf, attr, flags)
17250Sstevel@tonic-gate  *	load/lock the given page frame number
17260Sstevel@tonic-gate  *
17270Sstevel@tonic-gate  * Advisory ordering attributes. Apply only to device mappings.
17280Sstevel@tonic-gate  *
17290Sstevel@tonic-gate  * HAT_STRICTORDER: the CPU must issue the references in order, as the
17300Sstevel@tonic-gate  *	programmer specified.  This is the default.
17310Sstevel@tonic-gate  * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
17320Sstevel@tonic-gate  *	of reordering; store or load with store or load).
17330Sstevel@tonic-gate  * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
17340Sstevel@tonic-gate  *	to consecutive locations (for example, turn two consecutive byte
17350Sstevel@tonic-gate  *	stores into one halfword store), and it may batch individual loads
17360Sstevel@tonic-gate  *	(for example, turn two consecutive byte loads into one halfword load).
17370Sstevel@tonic-gate  *	This also implies re-ordering.
17380Sstevel@tonic-gate  * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
17390Sstevel@tonic-gate  *	until another store occurs.  The default is to fetch new data
17400Sstevel@tonic-gate  *	on every load.  This also implies merging.
17410Sstevel@tonic-gate  * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
17420Sstevel@tonic-gate  *	the device (perhaps with other data) at a later time.  The default is
17430Sstevel@tonic-gate  *	to push the data right away.  This also implies load caching.
17440Sstevel@tonic-gate  *
17450Sstevel@tonic-gate  * Equivalent of hat_memload(), but can be used for device memory where
17460Sstevel@tonic-gate  * there are no page_t's and we support additional flags (write merging, etc).
17470Sstevel@tonic-gate  * Note that we can have large page mappings with this interface.
17480Sstevel@tonic-gate  */
17490Sstevel@tonic-gate int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
17500Sstevel@tonic-gate 	HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
17510Sstevel@tonic-gate 	HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
17520Sstevel@tonic-gate 
17530Sstevel@tonic-gate void
17540Sstevel@tonic-gate hat_devload(
17550Sstevel@tonic-gate 	hat_t		*hat,
17560Sstevel@tonic-gate 	caddr_t		addr,
17570Sstevel@tonic-gate 	size_t		len,
17580Sstevel@tonic-gate 	pfn_t		pfn,
17590Sstevel@tonic-gate 	uint_t		attr,
17600Sstevel@tonic-gate 	int		flags)
17610Sstevel@tonic-gate {
17620Sstevel@tonic-gate 	uintptr_t	va = ALIGN2PAGE(addr);
17630Sstevel@tonic-gate 	uintptr_t	eva = va + len;
17640Sstevel@tonic-gate 	level_t		level;
17650Sstevel@tonic-gate 	size_t		pgsize;
17660Sstevel@tonic-gate 	page_t		*pp;
17670Sstevel@tonic-gate 	int		f;	/* per PTE copy of flags  - maybe modified */
17680Sstevel@tonic-gate 	uint_t		a;	/* per PTE copy of attr */
17690Sstevel@tonic-gate 
17705084Sjohnlev 	XPV_DISALLOW_MIGRATE();
17710Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
17723446Smrj 	ASSERT(hat == kas.a_hat || eva <= _userlimit);
17730Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
17740Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
17750Sstevel@tonic-gate 	ASSERT((flags & supported_devload_flags) == flags);
17760Sstevel@tonic-gate 
17770Sstevel@tonic-gate 	/*
17780Sstevel@tonic-gate 	 * handle all pages
17790Sstevel@tonic-gate 	 */
17800Sstevel@tonic-gate 	while (va < eva) {
17810Sstevel@tonic-gate 
17820Sstevel@tonic-gate 		/*
17830Sstevel@tonic-gate 		 * decide what level mapping to use (ie. pagesize)
17840Sstevel@tonic-gate 		 */
17850Sstevel@tonic-gate 		for (level = mmu.max_page_level; ; --level) {
17860Sstevel@tonic-gate 			pgsize = LEVEL_SIZE(level);
17870Sstevel@tonic-gate 			if (level == 0)
17880Sstevel@tonic-gate 				break;
17890Sstevel@tonic-gate 			if (IS_P2ALIGNED(va, pgsize) &&
17900Sstevel@tonic-gate 			    (eva - va) >= pgsize &&
17915349Skchow 			    IS_P2ALIGNED(pfn, mmu_btop(pgsize))) {
17925349Skchow #ifdef DEBUG
17935349Skchow 				if (level == 2)
17945349Skchow 					map1gcnt++;
17955349Skchow #endif
17960Sstevel@tonic-gate 				break;
17975349Skchow 			}
17980Sstevel@tonic-gate 		}
17990Sstevel@tonic-gate 
18000Sstevel@tonic-gate 		/*
18013446Smrj 		 * If this is just memory then allow caching (this happens
18020Sstevel@tonic-gate 		 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
18033446Smrj 		 * to override that. If we don't have a page_t then make sure
18040Sstevel@tonic-gate 		 * NOCONSIST is set.
18050Sstevel@tonic-gate 		 */
18060Sstevel@tonic-gate 		a = attr;
18070Sstevel@tonic-gate 		f = flags;
18085084Sjohnlev 		if (!pf_is_memory(pfn))
18095084Sjohnlev 			f |= HAT_LOAD_NOCONSIST;
18105084Sjohnlev 		else if (!(a & HAT_PLAT_NOCACHE))
18115084Sjohnlev 			a |= HAT_STORECACHING_OK;
18125084Sjohnlev 
18135084Sjohnlev 		if (f & HAT_LOAD_NOCONSIST)
18140Sstevel@tonic-gate 			pp = NULL;
18155084Sjohnlev 		else
18165084Sjohnlev 			pp = page_numtopp_nolock(pfn);
18170Sstevel@tonic-gate 
18180Sstevel@tonic-gate 		/*
18199441SPrakash.Sangappa@Sun.COM 		 * Check to make sure we are really trying to map a valid
18209441SPrakash.Sangappa@Sun.COM 		 * memory page. The caller wishing to intentionally map
18219441SPrakash.Sangappa@Sun.COM 		 * free memory pages will have passed the HAT_LOAD_NOCONSIST
18229441SPrakash.Sangappa@Sun.COM 		 * flag, then pp will be NULL.
18239441SPrakash.Sangappa@Sun.COM 		 */
18249441SPrakash.Sangappa@Sun.COM 		if (pp != NULL) {
18259441SPrakash.Sangappa@Sun.COM 			if (PP_ISFREE(pp)) {
18269441SPrakash.Sangappa@Sun.COM 				panic("hat_devload: loading "
18279441SPrakash.Sangappa@Sun.COM 				    "a mapping to free page %p", (void *)pp);
18289441SPrakash.Sangappa@Sun.COM 			}
18299441SPrakash.Sangappa@Sun.COM 
18309441SPrakash.Sangappa@Sun.COM 			if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
18319441SPrakash.Sangappa@Sun.COM 				panic("hat_devload: loading a mapping "
18329441SPrakash.Sangappa@Sun.COM 				    "to an unlocked page %p",
18339441SPrakash.Sangappa@Sun.COM 				    (void *)pp);
18349441SPrakash.Sangappa@Sun.COM 			}
18359441SPrakash.Sangappa@Sun.COM 		}
18369441SPrakash.Sangappa@Sun.COM 
18379441SPrakash.Sangappa@Sun.COM 		/*
18380Sstevel@tonic-gate 		 * load this page mapping
18390Sstevel@tonic-gate 		 */
18400Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(va));
18413446Smrj 		while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
18423446Smrj 			if (level == 0)
18433446Smrj 				panic("unexpected hati_load_common() failure");
18443446Smrj 			--level;
18453446Smrj 			pgsize = LEVEL_SIZE(level);
18463446Smrj 		}
18470Sstevel@tonic-gate 
18480Sstevel@tonic-gate 		/*
18490Sstevel@tonic-gate 		 * move to next page
18500Sstevel@tonic-gate 		 */
18510Sstevel@tonic-gate 		va += pgsize;
18520Sstevel@tonic-gate 		pfn += mmu_btop(pgsize);
18530Sstevel@tonic-gate 	}
18545084Sjohnlev 	XPV_ALLOW_MIGRATE();
18550Sstevel@tonic-gate }
18560Sstevel@tonic-gate 
18570Sstevel@tonic-gate /*
18580Sstevel@tonic-gate  * void hat_unlock(hat, addr, len)
18590Sstevel@tonic-gate  *	unlock the mappings to a given range of addresses
18600Sstevel@tonic-gate  *
18610Sstevel@tonic-gate  * Locks are tracked by ht_lock_cnt in the htable.
18620Sstevel@tonic-gate  */
18630Sstevel@tonic-gate void
18640Sstevel@tonic-gate hat_unlock(hat_t *hat, caddr_t addr, size_t len)
18650Sstevel@tonic-gate {
18660Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
18670Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
18680Sstevel@tonic-gate 	htable_t	*ht = NULL;
18690Sstevel@tonic-gate 
18700Sstevel@tonic-gate 	/*
18710Sstevel@tonic-gate 	 * kernel entries are always locked, we don't track lock counts
18720Sstevel@tonic-gate 	 */
18733446Smrj 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
18740Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
18750Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
18760Sstevel@tonic-gate 	if (hat == kas.a_hat)
18770Sstevel@tonic-gate 		return;
18780Sstevel@tonic-gate 	if (eaddr > _userlimit)
18790Sstevel@tonic-gate 		panic("hat_unlock() address out of range - above _userlimit");
18800Sstevel@tonic-gate 
18815084Sjohnlev 	XPV_DISALLOW_MIGRATE();
18820Sstevel@tonic-gate 	ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
18830Sstevel@tonic-gate 	while (vaddr < eaddr) {
18840Sstevel@tonic-gate 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
18850Sstevel@tonic-gate 		if (ht == NULL)
18860Sstevel@tonic-gate 			break;
18870Sstevel@tonic-gate 
18880Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(vaddr));
18890Sstevel@tonic-gate 
18900Sstevel@tonic-gate 		if (ht->ht_lock_cnt < 1)
18910Sstevel@tonic-gate 			panic("hat_unlock(): lock_cnt < 1, "
18927240Srh87107 			    "htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr);
18930Sstevel@tonic-gate 		HTABLE_LOCK_DEC(ht);
18940Sstevel@tonic-gate 
18950Sstevel@tonic-gate 		vaddr += LEVEL_SIZE(ht->ht_level);
18960Sstevel@tonic-gate 	}
18970Sstevel@tonic-gate 	if (ht)
18980Sstevel@tonic-gate 		htable_release(ht);
18995084Sjohnlev 	XPV_ALLOW_MIGRATE();
19000Sstevel@tonic-gate }
19010Sstevel@tonic-gate 
19024528Spaulsan /* ARGSUSED */
19034528Spaulsan void
19045075Spaulsan hat_unlock_region(struct hat *hat, caddr_t addr, size_t len,
19054528Spaulsan     hat_region_cookie_t rcookie)
19064528Spaulsan {
19074528Spaulsan 	panic("No shared region support on x86");
19084528Spaulsan }
19094528Spaulsan 
19105084Sjohnlev #if !defined(__xpv)
19110Sstevel@tonic-gate /*
19120Sstevel@tonic-gate  * Cross call service routine to demap a virtual page on
19130Sstevel@tonic-gate  * the current CPU or flush all mappings in TLB.
19140Sstevel@tonic-gate  */
19150Sstevel@tonic-gate /*ARGSUSED*/
19160Sstevel@tonic-gate static int
19170Sstevel@tonic-gate hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
19180Sstevel@tonic-gate {
19190Sstevel@tonic-gate 	hat_t	*hat = (hat_t *)a1;
19200Sstevel@tonic-gate 	caddr_t	addr = (caddr_t)a2;
19210Sstevel@tonic-gate 
19220Sstevel@tonic-gate 	/*
19230Sstevel@tonic-gate 	 * If the target hat isn't the kernel and this CPU isn't operating
19240Sstevel@tonic-gate 	 * in the target hat, we can ignore the cross call.
19250Sstevel@tonic-gate 	 */
19260Sstevel@tonic-gate 	if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
19270Sstevel@tonic-gate 		return (0);
19280Sstevel@tonic-gate 
19290Sstevel@tonic-gate 	/*
19300Sstevel@tonic-gate 	 * For a normal address, we just flush one page mapping
19310Sstevel@tonic-gate 	 */
19320Sstevel@tonic-gate 	if ((uintptr_t)addr != DEMAP_ALL_ADDR) {
19333446Smrj 		mmu_tlbflush_entry(addr);
19340Sstevel@tonic-gate 		return (0);
19350Sstevel@tonic-gate 	}
19360Sstevel@tonic-gate 
19370Sstevel@tonic-gate 	/*
19380Sstevel@tonic-gate 	 * Otherwise we reload cr3 to effect a complete TLB flush.
19390Sstevel@tonic-gate 	 *
19400Sstevel@tonic-gate 	 * A reload of cr3 on a VLP process also means we must also recopy in
19410Sstevel@tonic-gate 	 * the pte values from the struct hat
19420Sstevel@tonic-gate 	 */
19430Sstevel@tonic-gate 	if (hat->hat_flags & HAT_VLP) {
19440Sstevel@tonic-gate #if defined(__amd64)
19450Sstevel@tonic-gate 		x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes;
19460Sstevel@tonic-gate 
19470Sstevel@tonic-gate 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
19480Sstevel@tonic-gate #elif defined(__i386)
19490Sstevel@tonic-gate 		reload_pae32(hat, CPU);
19500Sstevel@tonic-gate #endif
19510Sstevel@tonic-gate 	}
19520Sstevel@tonic-gate 	reload_cr3();
19530Sstevel@tonic-gate 	return (0);
19540Sstevel@tonic-gate }
19550Sstevel@tonic-gate 
19560Sstevel@tonic-gate /*
19574191Sjosephb  * Flush all TLB entries, including global (ie. kernel) ones.
19584191Sjosephb  */
19594191Sjosephb static void
19604191Sjosephb flush_all_tlb_entries(void)
19614191Sjosephb {
19624191Sjosephb 	ulong_t cr4 = getcr4();
19634191Sjosephb 
19644191Sjosephb 	if (cr4 & CR4_PGE) {
19654191Sjosephb 		setcr4(cr4 & ~(ulong_t)CR4_PGE);
19664191Sjosephb 		setcr4(cr4);
19674191Sjosephb 
19684191Sjosephb 		/*
19694191Sjosephb 		 * 32 bit PAE also needs to always reload_cr3()
19704191Sjosephb 		 */
19714191Sjosephb 		if (mmu.max_level == 2)
19724191Sjosephb 			reload_cr3();
19734191Sjosephb 	} else {
19744191Sjosephb 		reload_cr3();
19754191Sjosephb 	}
19764191Sjosephb }
19774191Sjosephb 
19784191Sjosephb #define	TLB_CPU_HALTED	(01ul)
19794191Sjosephb #define	TLB_INVAL_ALL	(02ul)
19804191Sjosephb #define	CAS_TLB_INFO(cpu, old, new)	\
19814191Sjosephb 	caslong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
19824191Sjosephb 
19834191Sjosephb /*
19844191Sjosephb  * Record that a CPU is going idle
19854191Sjosephb  */
19864191Sjosephb void
19874191Sjosephb tlb_going_idle(void)
19884191Sjosephb {
19894191Sjosephb 	atomic_or_long((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
19904191Sjosephb }
19914191Sjosephb 
19924191Sjosephb /*
19934191Sjosephb  * Service a delayed TLB flush if coming out of being idle.
19944191Sjosephb  */
19954191Sjosephb void
19964191Sjosephb tlb_service(void)
19974191Sjosephb {
1998*9894SPavel.Tatashin@Sun.COM 	ulong_t flags = getflags();
19994191Sjosephb 	ulong_t tlb_info;
20004191Sjosephb 	ulong_t found;
20014191Sjosephb 
20024191Sjosephb 	/*
2003*9894SPavel.Tatashin@Sun.COM 	 * Be sure interrupts are off while doing this so that
2004*9894SPavel.Tatashin@Sun.COM 	 * higher level interrupts correctly wait for flushes to finish.
2005*9894SPavel.Tatashin@Sun.COM 	 */
2006*9894SPavel.Tatashin@Sun.COM 	if (flags & PS_IE)
2007*9894SPavel.Tatashin@Sun.COM 		flags = intr_clear();
2008*9894SPavel.Tatashin@Sun.COM 
2009*9894SPavel.Tatashin@Sun.COM 	/*
20104191Sjosephb 	 * We only have to do something if coming out of being idle.
20114191Sjosephb 	 */
20124191Sjosephb 	tlb_info = CPU->cpu_m.mcpu_tlb_info;
20134191Sjosephb 	if (tlb_info & TLB_CPU_HALTED) {
20144191Sjosephb 		ASSERT(CPU->cpu_current_hat == kas.a_hat);
20154191Sjosephb 
20164191Sjosephb 		/*
20174191Sjosephb 		 * Atomic clear and fetch of old state.
20184191Sjosephb 		 */
20194191Sjosephb 		while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) {
20204191Sjosephb 			ASSERT(found & TLB_CPU_HALTED);
20214191Sjosephb 			tlb_info = found;
20224191Sjosephb 			SMT_PAUSE();
20234191Sjosephb 		}
20244191Sjosephb 		if (tlb_info & TLB_INVAL_ALL)
20254191Sjosephb 			flush_all_tlb_entries();
20264191Sjosephb 	}
2027*9894SPavel.Tatashin@Sun.COM 
2028*9894SPavel.Tatashin@Sun.COM 	/*
2029*9894SPavel.Tatashin@Sun.COM 	 * Restore interrupt enable control bit.
2030*9894SPavel.Tatashin@Sun.COM 	 */
2031*9894SPavel.Tatashin@Sun.COM 	if (flags & PS_IE)
2032*9894SPavel.Tatashin@Sun.COM 		sti();
20334191Sjosephb }
20345084Sjohnlev #endif /* !__xpv */
20354191Sjosephb 
20364191Sjosephb /*
20370Sstevel@tonic-gate  * Internal routine to do cross calls to invalidate a range of pages on
20380Sstevel@tonic-gate  * all CPUs using a given hat.
20390Sstevel@tonic-gate  */
20400Sstevel@tonic-gate void
20413446Smrj hat_tlb_inval(hat_t *hat, uintptr_t va)
20420Sstevel@tonic-gate {
20430Sstevel@tonic-gate 	extern int	flushes_require_xcalls;	/* from mp_startup.c */
20440Sstevel@tonic-gate 	cpuset_t	justme;
20455084Sjohnlev 	cpuset_t	cpus_to_shootdown;
20465084Sjohnlev #ifndef __xpv
20474191Sjosephb 	cpuset_t	check_cpus;
20484191Sjosephb 	cpu_t		*cpup;
20494191Sjosephb 	int		c;
20505084Sjohnlev #endif
20510Sstevel@tonic-gate 
20520Sstevel@tonic-gate 	/*
20530Sstevel@tonic-gate 	 * If the hat is being destroyed, there are no more users, so
20540Sstevel@tonic-gate 	 * demap need not do anything.
20550Sstevel@tonic-gate 	 */
20560Sstevel@tonic-gate 	if (hat->hat_flags & HAT_FREEING)
20570Sstevel@tonic-gate 		return;
20580Sstevel@tonic-gate 
20590Sstevel@tonic-gate 	/*
20600Sstevel@tonic-gate 	 * If demapping from a shared pagetable, we best demap the
20610Sstevel@tonic-gate 	 * entire set of user TLBs, since we don't know what addresses
20620Sstevel@tonic-gate 	 * these were shared at.
20630Sstevel@tonic-gate 	 */
20640Sstevel@tonic-gate 	if (hat->hat_flags & HAT_SHARED) {
20650Sstevel@tonic-gate 		hat = kas.a_hat;
20660Sstevel@tonic-gate 		va = DEMAP_ALL_ADDR;
20670Sstevel@tonic-gate 	}
20680Sstevel@tonic-gate 
20690Sstevel@tonic-gate 	/*
20700Sstevel@tonic-gate 	 * if not running with multiple CPUs, don't use cross calls
20710Sstevel@tonic-gate 	 */
20720Sstevel@tonic-gate 	if (panicstr || !flushes_require_xcalls) {
20735084Sjohnlev #ifdef __xpv
20745084Sjohnlev 		if (va == DEMAP_ALL_ADDR)
20755084Sjohnlev 			xen_flush_tlb();
20765084Sjohnlev 		else
20775084Sjohnlev 			xen_flush_va((caddr_t)va);
20785084Sjohnlev #else
20790Sstevel@tonic-gate 		(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
20805084Sjohnlev #endif
20810Sstevel@tonic-gate 		return;
20820Sstevel@tonic-gate 	}
20830Sstevel@tonic-gate 
20840Sstevel@tonic-gate 
20850Sstevel@tonic-gate 	/*
20863446Smrj 	 * Determine CPUs to shootdown. Kernel changes always do all CPUs.
20873446Smrj 	 * Otherwise it's just CPUs currently executing in this hat.
20880Sstevel@tonic-gate 	 */
20890Sstevel@tonic-gate 	kpreempt_disable();
20900Sstevel@tonic-gate 	CPUSET_ONLY(justme, CPU->cpu_id);
20913446Smrj 	if (hat == kas.a_hat)
20923446Smrj 		cpus_to_shootdown = khat_cpuset;
20930Sstevel@tonic-gate 	else
20943446Smrj 		cpus_to_shootdown = hat->hat_cpus;
20953446Smrj 
20965084Sjohnlev #ifndef __xpv
20974191Sjosephb 	/*
20984191Sjosephb 	 * If any CPUs in the set are idle, just request a delayed flush
20994191Sjosephb 	 * and avoid waking them up.
21004191Sjosephb 	 */
21014191Sjosephb 	check_cpus = cpus_to_shootdown;
21024191Sjosephb 	for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) {
21034191Sjosephb 		ulong_t tlb_info;
21044191Sjosephb 
21054191Sjosephb 		if (!CPU_IN_SET(check_cpus, c))
21064191Sjosephb 			continue;
21074191Sjosephb 		CPUSET_DEL(check_cpus, c);
21084191Sjosephb 		cpup = cpu[c];
21094191Sjosephb 		if (cpup == NULL)
21104191Sjosephb 			continue;
21114191Sjosephb 
21124191Sjosephb 		tlb_info = cpup->cpu_m.mcpu_tlb_info;
21134191Sjosephb 		while (tlb_info == TLB_CPU_HALTED) {
21144191Sjosephb 			(void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED,
21154381Sjosephb 			    TLB_CPU_HALTED | TLB_INVAL_ALL);
21164191Sjosephb 			SMT_PAUSE();
21174191Sjosephb 			tlb_info = cpup->cpu_m.mcpu_tlb_info;
21184191Sjosephb 		}
21194191Sjosephb 		if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) {
21204191Sjosephb 			HATSTAT_INC(hs_tlb_inval_delayed);
21214191Sjosephb 			CPUSET_DEL(cpus_to_shootdown, c);
21224191Sjosephb 		}
21234191Sjosephb 	}
21245084Sjohnlev #endif
21254191Sjosephb 
21263446Smrj 	if (CPUSET_ISNULL(cpus_to_shootdown) ||
21273446Smrj 	    CPUSET_ISEQUAL(cpus_to_shootdown, justme)) {
21283446Smrj 
21295084Sjohnlev #ifdef __xpv
21305084Sjohnlev 		if (va == DEMAP_ALL_ADDR)
21315084Sjohnlev 			xen_flush_tlb();
21325084Sjohnlev 		else
21335084Sjohnlev 			xen_flush_va((caddr_t)va);
21345084Sjohnlev #else
21353446Smrj 		(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
21365084Sjohnlev #endif
21373446Smrj 
21383446Smrj 	} else {
21393446Smrj 
21403446Smrj 		CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
21415084Sjohnlev #ifdef __xpv
21425084Sjohnlev 		if (va == DEMAP_ALL_ADDR)
21435084Sjohnlev 			xen_gflush_tlb(cpus_to_shootdown);
21445084Sjohnlev 		else
21455084Sjohnlev 			xen_gflush_va((caddr_t)va, cpus_to_shootdown);
21465084Sjohnlev #else
21479489SJoe.Bonasera@sun.com 		xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL,
21489489SJoe.Bonasera@sun.com 		    CPUSET2BV(cpus_to_shootdown), hati_demap_func);
21495084Sjohnlev #endif
21503446Smrj 
21513446Smrj 	}
21520Sstevel@tonic-gate 	kpreempt_enable();
21530Sstevel@tonic-gate }
21540Sstevel@tonic-gate 
21550Sstevel@tonic-gate /*
21560Sstevel@tonic-gate  * Interior routine for HAT_UNLOADs from hat_unload_callback(),
21570Sstevel@tonic-gate  * hat_kmap_unload() OR from hat_steal() code.  This routine doesn't
21580Sstevel@tonic-gate  * handle releasing of the htables.
21590Sstevel@tonic-gate  */
21600Sstevel@tonic-gate void
21610Sstevel@tonic-gate hat_pte_unmap(
21620Sstevel@tonic-gate 	htable_t	*ht,
21630Sstevel@tonic-gate 	uint_t		entry,
21640Sstevel@tonic-gate 	uint_t		flags,
21650Sstevel@tonic-gate 	x86pte_t	old_pte,
21660Sstevel@tonic-gate 	void		*pte_ptr)
21670Sstevel@tonic-gate {
21680Sstevel@tonic-gate 	hat_t		*hat = ht->ht_hat;
21690Sstevel@tonic-gate 	hment_t		*hm = NULL;
21700Sstevel@tonic-gate 	page_t		*pp = NULL;
21710Sstevel@tonic-gate 	level_t		l = ht->ht_level;
21720Sstevel@tonic-gate 	pfn_t		pfn;
21730Sstevel@tonic-gate 
21740Sstevel@tonic-gate 	/*
21750Sstevel@tonic-gate 	 * We always track the locking counts, even if nothing is unmapped
21760Sstevel@tonic-gate 	 */
21770Sstevel@tonic-gate 	if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
21780Sstevel@tonic-gate 		ASSERT(ht->ht_lock_cnt > 0);
21790Sstevel@tonic-gate 		HTABLE_LOCK_DEC(ht);
21800Sstevel@tonic-gate 	}
21810Sstevel@tonic-gate 
21820Sstevel@tonic-gate 	/*
21830Sstevel@tonic-gate 	 * Figure out which page's mapping list lock to acquire using the PFN
21840Sstevel@tonic-gate 	 * passed in "old" PTE. We then attempt to invalidate the PTE.
21850Sstevel@tonic-gate 	 * If another thread, probably a hat_pageunload, has asynchronously
21860Sstevel@tonic-gate 	 * unmapped/remapped this address we'll loop here.
21870Sstevel@tonic-gate 	 */
21880Sstevel@tonic-gate 	ASSERT(ht->ht_busy > 0);
21890Sstevel@tonic-gate 	while (PTE_ISVALID(old_pte)) {
21900Sstevel@tonic-gate 		pfn = PTE2PFN(old_pte, l);
21913446Smrj 		if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) {
21920Sstevel@tonic-gate 			pp = NULL;
21930Sstevel@tonic-gate 		} else {
21945084Sjohnlev #ifdef __xpv
21955084Sjohnlev 			if (pfn == PFN_INVALID)
21965084Sjohnlev 				panic("Invalid PFN, but not PT_NOCONSIST");
21975084Sjohnlev #endif
21980Sstevel@tonic-gate 			pp = page_numtopp_nolock(pfn);
219947Sjosephb 			if (pp == NULL) {
220047Sjosephb 				panic("no page_t, not NOCONSIST: old_pte="
220147Sjosephb 				    FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
220247Sjosephb 				    old_pte, (uintptr_t)ht, entry,
220347Sjosephb 				    (uintptr_t)pte_ptr);
220447Sjosephb 			}
22050Sstevel@tonic-gate 			x86_hm_enter(pp);
22060Sstevel@tonic-gate 		}
220747Sjosephb 
220847Sjosephb 		/*
220947Sjosephb 		 * If freeing the address space, check that the PTE
221047Sjosephb 		 * hasn't changed, as the mappings are no longer in use by
221147Sjosephb 		 * any thread, invalidation is unnecessary.
221247Sjosephb 		 * If not freeing, do a full invalidate.
22135084Sjohnlev 		 *
22145084Sjohnlev 		 * On the hypervisor we must always remove mappings, as a
22155084Sjohnlev 		 * writable mapping left behind could cause a page table
22165084Sjohnlev 		 * allocation to fail.
221747Sjosephb 		 */
22185084Sjohnlev #if !defined(__xpv)
221947Sjosephb 		if (hat->hat_flags & HAT_FREEING)
222047Sjosephb 			old_pte = x86pte_get(ht, entry);
222147Sjosephb 		else
22225084Sjohnlev #endif
22233446Smrj 			old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr);
22240Sstevel@tonic-gate 
22250Sstevel@tonic-gate 		/*
22260Sstevel@tonic-gate 		 * If the page hadn't changed we've unmapped it and can proceed
22270Sstevel@tonic-gate 		 */
22280Sstevel@tonic-gate 		if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
22290Sstevel@tonic-gate 			break;
22300Sstevel@tonic-gate 
22310Sstevel@tonic-gate 		/*
22320Sstevel@tonic-gate 		 * Otherwise, we'll have to retry with the current old_pte.
22330Sstevel@tonic-gate 		 * Drop the hment lock, since the pfn may have changed.
22340Sstevel@tonic-gate 		 */
22350Sstevel@tonic-gate 		if (pp != NULL) {
22360Sstevel@tonic-gate 			x86_hm_exit(pp);
22370Sstevel@tonic-gate 			pp = NULL;
22380Sstevel@tonic-gate 		} else {
22393446Smrj 			ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
22400Sstevel@tonic-gate 		}
22410Sstevel@tonic-gate 	}
22420Sstevel@tonic-gate 
22430Sstevel@tonic-gate 	/*
22440Sstevel@tonic-gate 	 * If the old mapping wasn't valid, there's nothing more to do
22450Sstevel@tonic-gate 	 */
22460Sstevel@tonic-gate 	if (!PTE_ISVALID(old_pte)) {
22470Sstevel@tonic-gate 		if (pp != NULL)
22480Sstevel@tonic-gate 			x86_hm_exit(pp);
22490Sstevel@tonic-gate 		return;
22500Sstevel@tonic-gate 	}
22510Sstevel@tonic-gate 
22520Sstevel@tonic-gate 	/*
22530Sstevel@tonic-gate 	 * Take care of syncing any MOD/REF bits and removing the hment.
22540Sstevel@tonic-gate 	 */
22550Sstevel@tonic-gate 	if (pp != NULL) {
22560Sstevel@tonic-gate 		if (!(flags & HAT_UNLOAD_NOSYNC))
22570Sstevel@tonic-gate 			hati_sync_pte_to_page(pp, old_pte, l);
22580Sstevel@tonic-gate 		hm = hment_remove(pp, ht, entry);
22590Sstevel@tonic-gate 		x86_hm_exit(pp);
22600Sstevel@tonic-gate 		if (hm != NULL)
22610Sstevel@tonic-gate 			hment_free(hm);
22620Sstevel@tonic-gate 	}
22630Sstevel@tonic-gate 
22640Sstevel@tonic-gate 	/*
22650Sstevel@tonic-gate 	 * Handle book keeping in the htable and hat
22660Sstevel@tonic-gate 	 */
22670Sstevel@tonic-gate 	ASSERT(ht->ht_valid_cnt > 0);
22680Sstevel@tonic-gate 	HTABLE_DEC(ht->ht_valid_cnt);
22690Sstevel@tonic-gate 	PGCNT_DEC(hat, l);
22700Sstevel@tonic-gate }
22710Sstevel@tonic-gate 
22720Sstevel@tonic-gate /*
22730Sstevel@tonic-gate  * very cheap unload implementation to special case some kernel addresses
22740Sstevel@tonic-gate  */
22750Sstevel@tonic-gate static void
22760Sstevel@tonic-gate hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
22770Sstevel@tonic-gate {
22780Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
22790Sstevel@tonic-gate 	uintptr_t	eva = va + len;
22803446Smrj 	pgcnt_t		pg_index;
22810Sstevel@tonic-gate 	htable_t	*ht;
22820Sstevel@tonic-gate 	uint_t		entry;
22833446Smrj 	x86pte_t	*pte_ptr;
22840Sstevel@tonic-gate 	x86pte_t	old_pte;
22850Sstevel@tonic-gate 
22860Sstevel@tonic-gate 	for (; va < eva; va += MMU_PAGESIZE) {
22870Sstevel@tonic-gate 		/*
22880Sstevel@tonic-gate 		 * Get the PTE
22890Sstevel@tonic-gate 		 */
22903446Smrj 		pg_index = mmu_btop(va - mmu.kmap_addr);
22913446Smrj 		pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index);
22923446Smrj 		old_pte = GET_PTE(pte_ptr);
22930Sstevel@tonic-gate 
22940Sstevel@tonic-gate 		/*
22950Sstevel@tonic-gate 		 * get the htable / entry
22960Sstevel@tonic-gate 		 */
22970Sstevel@tonic-gate 		ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
22980Sstevel@tonic-gate 		    >> LEVEL_SHIFT(1)];
22990Sstevel@tonic-gate 		entry = htable_va2entry(va, ht);
23000Sstevel@tonic-gate 
23010Sstevel@tonic-gate 		/*
23020Sstevel@tonic-gate 		 * use mostly common code to unmap it.
23030Sstevel@tonic-gate 		 */
23040Sstevel@tonic-gate 		hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr);
23050Sstevel@tonic-gate 	}
23060Sstevel@tonic-gate }
23070Sstevel@tonic-gate 
23080Sstevel@tonic-gate 
23090Sstevel@tonic-gate /*
23100Sstevel@tonic-gate  * unload a range of virtual address space (no callback)
23110Sstevel@tonic-gate  */
23120Sstevel@tonic-gate void
23130Sstevel@tonic-gate hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
23140Sstevel@tonic-gate {
23150Sstevel@tonic-gate 	uintptr_t va = (uintptr_t)addr;
23163446Smrj 
23175084Sjohnlev 	XPV_DISALLOW_MIGRATE();
23183446Smrj 	ASSERT(hat == kas.a_hat || va + len <= _userlimit);
23190Sstevel@tonic-gate 
23200Sstevel@tonic-gate 	/*
23210Sstevel@tonic-gate 	 * special case for performance.
23220Sstevel@tonic-gate 	 */
23230Sstevel@tonic-gate 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
23240Sstevel@tonic-gate 		ASSERT(hat == kas.a_hat);
23250Sstevel@tonic-gate 		hat_kmap_unload(addr, len, flags);
23263446Smrj 	} else {
23273446Smrj 		hat_unload_callback(hat, addr, len, flags, NULL);
23280Sstevel@tonic-gate 	}
23295084Sjohnlev 	XPV_ALLOW_MIGRATE();
23300Sstevel@tonic-gate }
23310Sstevel@tonic-gate 
23320Sstevel@tonic-gate /*
23330Sstevel@tonic-gate  * Do the callbacks for ranges being unloaded.
23340Sstevel@tonic-gate  */
23350Sstevel@tonic-gate typedef struct range_info {
23360Sstevel@tonic-gate 	uintptr_t	rng_va;
23370Sstevel@tonic-gate 	ulong_t		rng_cnt;
23380Sstevel@tonic-gate 	level_t		rng_level;
23390Sstevel@tonic-gate } range_info_t;
23400Sstevel@tonic-gate 
23410Sstevel@tonic-gate static void
23420Sstevel@tonic-gate handle_ranges(hat_callback_t *cb, uint_t cnt, range_info_t *range)
23430Sstevel@tonic-gate {
23440Sstevel@tonic-gate 	/*
23450Sstevel@tonic-gate 	 * do callbacks to upper level VM system
23460Sstevel@tonic-gate 	 */
23470Sstevel@tonic-gate 	while (cb != NULL && cnt > 0) {
23480Sstevel@tonic-gate 		--cnt;
23490Sstevel@tonic-gate 		cb->hcb_start_addr = (caddr_t)range[cnt].rng_va;
23500Sstevel@tonic-gate 		cb->hcb_end_addr = cb->hcb_start_addr;
23510Sstevel@tonic-gate 		cb->hcb_end_addr +=
23520Sstevel@tonic-gate 		    range[cnt].rng_cnt << LEVEL_SIZE(range[cnt].rng_level);
23530Sstevel@tonic-gate 		cb->hcb_function(cb);
23540Sstevel@tonic-gate 	}
23550Sstevel@tonic-gate }
23560Sstevel@tonic-gate 
23570Sstevel@tonic-gate /*
23580Sstevel@tonic-gate  * Unload a given range of addresses (has optional callback)
23590Sstevel@tonic-gate  *
23600Sstevel@tonic-gate  * Flags:
23610Sstevel@tonic-gate  * define	HAT_UNLOAD		0x00
23620Sstevel@tonic-gate  * define	HAT_UNLOAD_NOSYNC	0x02
23630Sstevel@tonic-gate  * define	HAT_UNLOAD_UNLOCK	0x04
23640Sstevel@tonic-gate  * define	HAT_UNLOAD_OTHER	0x08 - not used
23650Sstevel@tonic-gate  * define	HAT_UNLOAD_UNMAP	0x10 - same as HAT_UNLOAD
23660Sstevel@tonic-gate  */
23670Sstevel@tonic-gate #define	MAX_UNLOAD_CNT (8)
23680Sstevel@tonic-gate void
23690Sstevel@tonic-gate hat_unload_callback(
23700Sstevel@tonic-gate 	hat_t		*hat,
23710Sstevel@tonic-gate 	caddr_t		addr,
23720Sstevel@tonic-gate 	size_t		len,
23730Sstevel@tonic-gate 	uint_t		flags,
23740Sstevel@tonic-gate 	hat_callback_t	*cb)
23750Sstevel@tonic-gate {
23760Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
23770Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
23780Sstevel@tonic-gate 	htable_t	*ht = NULL;
23790Sstevel@tonic-gate 	uint_t		entry;
238047Sjosephb 	uintptr_t	contig_va = (uintptr_t)-1L;
23810Sstevel@tonic-gate 	range_info_t	r[MAX_UNLOAD_CNT];
23820Sstevel@tonic-gate 	uint_t		r_cnt = 0;
23830Sstevel@tonic-gate 	x86pte_t	old_pte;
23840Sstevel@tonic-gate 
23855084Sjohnlev 	XPV_DISALLOW_MIGRATE();
23863446Smrj 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
23870Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
23880Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
23890Sstevel@tonic-gate 
23903446Smrj 	/*
23913446Smrj 	 * Special case a single page being unloaded for speed. This happens
23923446Smrj 	 * quite frequently, COW faults after a fork() for example.
23933446Smrj 	 */
23943446Smrj 	if (cb == NULL && len == MMU_PAGESIZE) {
23953446Smrj 		ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
23963446Smrj 		if (ht != NULL) {
23973446Smrj 			if (PTE_ISVALID(old_pte))
23983446Smrj 				hat_pte_unmap(ht, entry, flags, old_pte, NULL);
23993446Smrj 			htable_release(ht);
24003446Smrj 		}
24015084Sjohnlev 		XPV_ALLOW_MIGRATE();
24023446Smrj 		return;
24033446Smrj 	}
24043446Smrj 
24050Sstevel@tonic-gate 	while (vaddr < eaddr) {
24060Sstevel@tonic-gate 		old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
24070Sstevel@tonic-gate 		if (ht == NULL)
24080Sstevel@tonic-gate 			break;
24090Sstevel@tonic-gate 
24100Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(vaddr));
24110Sstevel@tonic-gate 
24120Sstevel@tonic-gate 		if (vaddr < (uintptr_t)addr)
24130Sstevel@tonic-gate 			panic("hat_unload_callback(): unmap inside large page");
24140Sstevel@tonic-gate 
24150Sstevel@tonic-gate 		/*
24160Sstevel@tonic-gate 		 * We'll do the call backs for contiguous ranges
24170Sstevel@tonic-gate 		 */
241847Sjosephb 		if (vaddr != contig_va ||
24190Sstevel@tonic-gate 		    (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) {
24200Sstevel@tonic-gate 			if (r_cnt == MAX_UNLOAD_CNT) {
24210Sstevel@tonic-gate 				handle_ranges(cb, r_cnt, r);
24220Sstevel@tonic-gate 				r_cnt = 0;
24230Sstevel@tonic-gate 			}
24240Sstevel@tonic-gate 			r[r_cnt].rng_va = vaddr;
24250Sstevel@tonic-gate 			r[r_cnt].rng_cnt = 0;
24260Sstevel@tonic-gate 			r[r_cnt].rng_level = ht->ht_level;
24270Sstevel@tonic-gate 			++r_cnt;
24280Sstevel@tonic-gate 		}
24290Sstevel@tonic-gate 
24300Sstevel@tonic-gate 		/*
24310Sstevel@tonic-gate 		 * Unload one mapping from the page tables.
24320Sstevel@tonic-gate 		 */
24330Sstevel@tonic-gate 		entry = htable_va2entry(vaddr, ht);
24340Sstevel@tonic-gate 		hat_pte_unmap(ht, entry, flags, old_pte, NULL);
24350Sstevel@tonic-gate 		ASSERT(ht->ht_level <= mmu.max_page_level);
24360Sstevel@tonic-gate 		vaddr += LEVEL_SIZE(ht->ht_level);
243747Sjosephb 		contig_va = vaddr;
24380Sstevel@tonic-gate 		++r[r_cnt - 1].rng_cnt;
24390Sstevel@tonic-gate 	}
24400Sstevel@tonic-gate 	if (ht)
24410Sstevel@tonic-gate 		htable_release(ht);
24420Sstevel@tonic-gate 
24430Sstevel@tonic-gate 	/*
24440Sstevel@tonic-gate 	 * handle last range for callbacks
24450Sstevel@tonic-gate 	 */
24460Sstevel@tonic-gate 	if (r_cnt > 0)
24470Sstevel@tonic-gate 		handle_ranges(cb, r_cnt, r);
24485084Sjohnlev 	XPV_ALLOW_MIGRATE();
24490Sstevel@tonic-gate }
24500Sstevel@tonic-gate 
24510Sstevel@tonic-gate /*
24520Sstevel@tonic-gate  * synchronize mapping with software data structures
24530Sstevel@tonic-gate  *
24540Sstevel@tonic-gate  * This interface is currently only used by the working set monitor
24550Sstevel@tonic-gate  * driver.
24560Sstevel@tonic-gate  */
24570Sstevel@tonic-gate /*ARGSUSED*/
24580Sstevel@tonic-gate void
24590Sstevel@tonic-gate hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
24600Sstevel@tonic-gate {
24610Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
24620Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
24630Sstevel@tonic-gate 	htable_t	*ht = NULL;
24640Sstevel@tonic-gate 	uint_t		entry;
24650Sstevel@tonic-gate 	x86pte_t	pte;
24660Sstevel@tonic-gate 	x86pte_t	save_pte;
24670Sstevel@tonic-gate 	x86pte_t	new;
24680Sstevel@tonic-gate 	page_t		*pp;
24690Sstevel@tonic-gate 
24700Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(vaddr));
24710Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
24720Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
24733446Smrj 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
24740Sstevel@tonic-gate 
24755084Sjohnlev 	XPV_DISALLOW_MIGRATE();
24760Sstevel@tonic-gate 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
24770Sstevel@tonic-gate try_again:
24780Sstevel@tonic-gate 		pte = htable_walk(hat, &ht, &vaddr, eaddr);
24790Sstevel@tonic-gate 		if (ht == NULL)
24800Sstevel@tonic-gate 			break;
24810Sstevel@tonic-gate 		entry = htable_va2entry(vaddr, ht);
24820Sstevel@tonic-gate 
24833446Smrj 		if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
24840Sstevel@tonic-gate 		    PTE_GET(pte, PT_REF | PT_MOD) == 0)
24850Sstevel@tonic-gate 			continue;
24860Sstevel@tonic-gate 
24870Sstevel@tonic-gate 		/*
24880Sstevel@tonic-gate 		 * We need to acquire the mapping list lock to protect
24890Sstevel@tonic-gate 		 * against hat_pageunload(), hat_unload(), etc.
24900Sstevel@tonic-gate 		 */
24910Sstevel@tonic-gate 		pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
24920Sstevel@tonic-gate 		if (pp == NULL)
24930Sstevel@tonic-gate 			break;
24940Sstevel@tonic-gate 		x86_hm_enter(pp);
24950Sstevel@tonic-gate 		save_pte = pte;
24960Sstevel@tonic-gate 		pte = x86pte_get(ht, entry);
24970Sstevel@tonic-gate 		if (pte != save_pte) {
24980Sstevel@tonic-gate 			x86_hm_exit(pp);
24990Sstevel@tonic-gate 			goto try_again;
25000Sstevel@tonic-gate 		}
25013446Smrj 		if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
25020Sstevel@tonic-gate 		    PTE_GET(pte, PT_REF | PT_MOD) == 0) {
25030Sstevel@tonic-gate 			x86_hm_exit(pp);
25040Sstevel@tonic-gate 			continue;
25050Sstevel@tonic-gate 		}
25060Sstevel@tonic-gate 
25070Sstevel@tonic-gate 		/*
25080Sstevel@tonic-gate 		 * Need to clear ref or mod bits. We may compete with
25090Sstevel@tonic-gate 		 * hardware updating the R/M bits and have to try again.
25100Sstevel@tonic-gate 		 */
25110Sstevel@tonic-gate 		if (flags == HAT_SYNC_ZERORM) {
25120Sstevel@tonic-gate 			new = pte;
25130Sstevel@tonic-gate 			PTE_CLR(new, PT_REF | PT_MOD);
25140Sstevel@tonic-gate 			pte = hati_update_pte(ht, entry, pte, new);
25150Sstevel@tonic-gate 			if (pte != 0) {
25160Sstevel@tonic-gate 				x86_hm_exit(pp);
25170Sstevel@tonic-gate 				goto try_again;
25180Sstevel@tonic-gate 			}
25190Sstevel@tonic-gate 		} else {
25200Sstevel@tonic-gate 			/*
25210Sstevel@tonic-gate 			 * sync the PTE to the page_t
25220Sstevel@tonic-gate 			 */
25230Sstevel@tonic-gate 			hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
25240Sstevel@tonic-gate 		}
25250Sstevel@tonic-gate 		x86_hm_exit(pp);
25260Sstevel@tonic-gate 	}
25270Sstevel@tonic-gate 	if (ht)
25280Sstevel@tonic-gate 		htable_release(ht);
25295084Sjohnlev 	XPV_ALLOW_MIGRATE();
25300Sstevel@tonic-gate }
25310Sstevel@tonic-gate 
25320Sstevel@tonic-gate /*
25330Sstevel@tonic-gate  * void	hat_map(hat, addr, len, flags)
25340Sstevel@tonic-gate  */
25350Sstevel@tonic-gate /*ARGSUSED*/
25360Sstevel@tonic-gate void
25370Sstevel@tonic-gate hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
25380Sstevel@tonic-gate {
25390Sstevel@tonic-gate 	/* does nothing */
25400Sstevel@tonic-gate }
25410Sstevel@tonic-gate 
25420Sstevel@tonic-gate /*
25430Sstevel@tonic-gate  * uint_t hat_getattr(hat, addr, *attr)
25440Sstevel@tonic-gate  *	returns attr for <hat,addr> in *attr.  returns 0 if there was a
25450Sstevel@tonic-gate  *	mapping and *attr is valid, nonzero if there was no mapping and
25460Sstevel@tonic-gate  *	*attr is not valid.
25470Sstevel@tonic-gate  */
25480Sstevel@tonic-gate uint_t
25490Sstevel@tonic-gate hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
25500Sstevel@tonic-gate {
25510Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
25520Sstevel@tonic-gate 	htable_t	*ht = NULL;
25530Sstevel@tonic-gate 	x86pte_t	pte;
25540Sstevel@tonic-gate 
25553446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
25560Sstevel@tonic-gate 
25570Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
25580Sstevel@tonic-gate 		return ((uint_t)-1);
25590Sstevel@tonic-gate 
25603446Smrj 	ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
25610Sstevel@tonic-gate 	if (ht == NULL)
25620Sstevel@tonic-gate 		return ((uint_t)-1);
25630Sstevel@tonic-gate 
25640Sstevel@tonic-gate 	if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
25650Sstevel@tonic-gate 		htable_release(ht);
25660Sstevel@tonic-gate 		return ((uint_t)-1);
25670Sstevel@tonic-gate 	}
25680Sstevel@tonic-gate 
25690Sstevel@tonic-gate 	*attr = PROT_READ;
25700Sstevel@tonic-gate 	if (PTE_GET(pte, PT_WRITABLE))
25710Sstevel@tonic-gate 		*attr |= PROT_WRITE;
25720Sstevel@tonic-gate 	if (PTE_GET(pte, PT_USER))
25730Sstevel@tonic-gate 		*attr |= PROT_USER;
25740Sstevel@tonic-gate 	if (!PTE_GET(pte, mmu.pt_nx))
25750Sstevel@tonic-gate 		*attr |= PROT_EXEC;
25763446Smrj 	if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
25770Sstevel@tonic-gate 		*attr |= HAT_NOSYNC;
25780Sstevel@tonic-gate 	htable_release(ht);
25790Sstevel@tonic-gate 	return (0);
25800Sstevel@tonic-gate }
25810Sstevel@tonic-gate 
25820Sstevel@tonic-gate /*
25830Sstevel@tonic-gate  * hat_updateattr() applies the given attribute change to an existing mapping
25840Sstevel@tonic-gate  */
25850Sstevel@tonic-gate #define	HAT_LOAD_ATTR		1
25860Sstevel@tonic-gate #define	HAT_SET_ATTR		2
25870Sstevel@tonic-gate #define	HAT_CLR_ATTR		3
25880Sstevel@tonic-gate 
25890Sstevel@tonic-gate static void
25900Sstevel@tonic-gate hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
25910Sstevel@tonic-gate {
25920Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
25930Sstevel@tonic-gate 	uintptr_t	eaddr = (uintptr_t)addr + len;
25940Sstevel@tonic-gate 	htable_t	*ht = NULL;
25950Sstevel@tonic-gate 	uint_t		entry;
25960Sstevel@tonic-gate 	x86pte_t	oldpte, newpte;
25970Sstevel@tonic-gate 	page_t		*pp;
25980Sstevel@tonic-gate 
25995084Sjohnlev 	XPV_DISALLOW_MIGRATE();
26000Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
26010Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
26020Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
26030Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
26040Sstevel@tonic-gate 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
26050Sstevel@tonic-gate try_again:
26060Sstevel@tonic-gate 		oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
26070Sstevel@tonic-gate 		if (ht == NULL)
26080Sstevel@tonic-gate 			break;
26093446Smrj 		if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
26100Sstevel@tonic-gate 			continue;
26110Sstevel@tonic-gate 
26120Sstevel@tonic-gate 		pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
26130Sstevel@tonic-gate 		if (pp == NULL)
26140Sstevel@tonic-gate 			continue;
26150Sstevel@tonic-gate 		x86_hm_enter(pp);
26160Sstevel@tonic-gate 
26170Sstevel@tonic-gate 		newpte = oldpte;
26180Sstevel@tonic-gate 		/*
26190Sstevel@tonic-gate 		 * We found a page table entry in the desired range,
26200Sstevel@tonic-gate 		 * figure out the new attributes.
26210Sstevel@tonic-gate 		 */
26220Sstevel@tonic-gate 		if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
26230Sstevel@tonic-gate 			if ((attr & PROT_WRITE) &&
26240Sstevel@tonic-gate 			    !PTE_GET(oldpte, PT_WRITABLE))
26250Sstevel@tonic-gate 				newpte |= PT_WRITABLE;
26260Sstevel@tonic-gate 
26273446Smrj 			if ((attr & HAT_NOSYNC) &&
26283446Smrj 			    PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC)
26290Sstevel@tonic-gate 				newpte |= PT_NOSYNC;
26300Sstevel@tonic-gate 
26310Sstevel@tonic-gate 			if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
26320Sstevel@tonic-gate 				newpte &= ~mmu.pt_nx;
26330Sstevel@tonic-gate 		}
26340Sstevel@tonic-gate 
26350Sstevel@tonic-gate 		if (what == HAT_LOAD_ATTR) {
26360Sstevel@tonic-gate 			if (!(attr & PROT_WRITE) &&
26370Sstevel@tonic-gate 			    PTE_GET(oldpte, PT_WRITABLE))
26380Sstevel@tonic-gate 				newpte &= ~PT_WRITABLE;
26390Sstevel@tonic-gate 
26403446Smrj 			if (!(attr & HAT_NOSYNC) &&
26413446Smrj 			    PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
26423446Smrj 				newpte &= ~PT_SOFTWARE;
26430Sstevel@tonic-gate 
26440Sstevel@tonic-gate 			if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
26450Sstevel@tonic-gate 				newpte |= mmu.pt_nx;
26460Sstevel@tonic-gate 		}
26470Sstevel@tonic-gate 
26480Sstevel@tonic-gate 		if (what == HAT_CLR_ATTR) {
26490Sstevel@tonic-gate 			if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
26500Sstevel@tonic-gate 				newpte &= ~PT_WRITABLE;
26510Sstevel@tonic-gate 
26523446Smrj 			if ((attr & HAT_NOSYNC) &&
26533446Smrj 			    PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
26543446Smrj 				newpte &= ~PT_SOFTWARE;
26550Sstevel@tonic-gate 
26560Sstevel@tonic-gate 			if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
26570Sstevel@tonic-gate 				newpte |= mmu.pt_nx;
26580Sstevel@tonic-gate 		}
26590Sstevel@tonic-gate 
26600Sstevel@tonic-gate 		/*
26613446Smrj 		 * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set.
26623446Smrj 		 * x86pte_set() depends on this.
26633446Smrj 		 */
26643446Smrj 		if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC)
26653446Smrj 			newpte |= PT_REF | PT_MOD;
26663446Smrj 
26673446Smrj 		/*
26680Sstevel@tonic-gate 		 * what about PROT_READ or others? this code only handles:
26690Sstevel@tonic-gate 		 * EXEC, WRITE, NOSYNC
26700Sstevel@tonic-gate 		 */
26710Sstevel@tonic-gate 
26720Sstevel@tonic-gate 		/*
26730Sstevel@tonic-gate 		 * If new PTE really changed, update the table.
26740Sstevel@tonic-gate 		 */
26750Sstevel@tonic-gate 		if (newpte != oldpte) {
26760Sstevel@tonic-gate 			entry = htable_va2entry(vaddr, ht);
26770Sstevel@tonic-gate 			oldpte = hati_update_pte(ht, entry, oldpte, newpte);
26780Sstevel@tonic-gate 			if (oldpte != 0) {
26790Sstevel@tonic-gate 				x86_hm_exit(pp);
26800Sstevel@tonic-gate 				goto try_again;
26810Sstevel@tonic-gate 			}
26820Sstevel@tonic-gate 		}
26830Sstevel@tonic-gate 		x86_hm_exit(pp);
26840Sstevel@tonic-gate 	}
26850Sstevel@tonic-gate 	if (ht)
26860Sstevel@tonic-gate 		htable_release(ht);
26875084Sjohnlev 	XPV_ALLOW_MIGRATE();
26880Sstevel@tonic-gate }
26890Sstevel@tonic-gate 
26900Sstevel@tonic-gate /*
26910Sstevel@tonic-gate  * Various wrappers for hat_updateattr()
26920Sstevel@tonic-gate  */
26930Sstevel@tonic-gate void
26940Sstevel@tonic-gate hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
26950Sstevel@tonic-gate {
26963446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
26970Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
26980Sstevel@tonic-gate }
26990Sstevel@tonic-gate 
27000Sstevel@tonic-gate void
27010Sstevel@tonic-gate hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
27020Sstevel@tonic-gate {
27033446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
27040Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
27050Sstevel@tonic-gate }
27060Sstevel@tonic-gate 
27070Sstevel@tonic-gate void
27080Sstevel@tonic-gate hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
27090Sstevel@tonic-gate {
27103446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
27110Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
27120Sstevel@tonic-gate }
27130Sstevel@tonic-gate 
27140Sstevel@tonic-gate void
27150Sstevel@tonic-gate hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
27160Sstevel@tonic-gate {
27173446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
27180Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
27190Sstevel@tonic-gate }
27200Sstevel@tonic-gate 
27210Sstevel@tonic-gate /*
27220Sstevel@tonic-gate  * size_t hat_getpagesize(hat, addr)
27230Sstevel@tonic-gate  *	returns pagesize in bytes for <hat, addr>. returns -1 of there is
27240Sstevel@tonic-gate  *	no mapping. This is an advisory call.
27250Sstevel@tonic-gate  */
27260Sstevel@tonic-gate ssize_t
27270Sstevel@tonic-gate hat_getpagesize(hat_t *hat, caddr_t addr)
27280Sstevel@tonic-gate {
27290Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
27300Sstevel@tonic-gate 	htable_t	*ht;
27310Sstevel@tonic-gate 	size_t		pagesize;
27320Sstevel@tonic-gate 
27333446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
27340Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
27350Sstevel@tonic-gate 		return (-1);
27360Sstevel@tonic-gate 	ht = htable_getpage(hat, vaddr, NULL);
27370Sstevel@tonic-gate 	if (ht == NULL)
27380Sstevel@tonic-gate 		return (-1);
27390Sstevel@tonic-gate 	pagesize = LEVEL_SIZE(ht->ht_level);
27400Sstevel@tonic-gate 	htable_release(ht);
27410Sstevel@tonic-gate 	return (pagesize);
27420Sstevel@tonic-gate }
27430Sstevel@tonic-gate 
27440Sstevel@tonic-gate 
27450Sstevel@tonic-gate 
27460Sstevel@tonic-gate /*
27470Sstevel@tonic-gate  * pfn_t hat_getpfnum(hat, addr)
27480Sstevel@tonic-gate  *	returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
27490Sstevel@tonic-gate  */
27500Sstevel@tonic-gate pfn_t
27510Sstevel@tonic-gate hat_getpfnum(hat_t *hat, caddr_t addr)
27520Sstevel@tonic-gate {
27530Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
27540Sstevel@tonic-gate 	htable_t	*ht;
27550Sstevel@tonic-gate 	uint_t		entry;
27560Sstevel@tonic-gate 	pfn_t		pfn = PFN_INVALID;
27570Sstevel@tonic-gate 
27583446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
27590Sstevel@tonic-gate 	if (khat_running == 0)
27603446Smrj 		return (PFN_INVALID);
27610Sstevel@tonic-gate 
27620Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
27630Sstevel@tonic-gate 		return (PFN_INVALID);
27640Sstevel@tonic-gate 
27655084Sjohnlev 	XPV_DISALLOW_MIGRATE();
27660Sstevel@tonic-gate 	/*
27670Sstevel@tonic-gate 	 * A very common use of hat_getpfnum() is from the DDI for kernel pages.
27680Sstevel@tonic-gate 	 * Use the kmap_ptes (which also covers the 32 bit heap) to speed
27690Sstevel@tonic-gate 	 * this up.
27700Sstevel@tonic-gate 	 */
27710Sstevel@tonic-gate 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
27720Sstevel@tonic-gate 		x86pte_t pte;
27733446Smrj 		pgcnt_t pg_index;
27743446Smrj 
27753446Smrj 		pg_index = mmu_btop(vaddr - mmu.kmap_addr);
27763446Smrj 		pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index));
27775084Sjohnlev 		if (PTE_ISVALID(pte))
27785084Sjohnlev 			/*LINTED [use of constant 0 causes a lint warning] */
27795084Sjohnlev 			pfn = PTE2PFN(pte, 0);
27805084Sjohnlev 		XPV_ALLOW_MIGRATE();
27815084Sjohnlev 		return (pfn);
27820Sstevel@tonic-gate 	}
27830Sstevel@tonic-gate 
27840Sstevel@tonic-gate 	ht = htable_getpage(hat, vaddr, &entry);
27855084Sjohnlev 	if (ht == NULL) {
27865084Sjohnlev 		XPV_ALLOW_MIGRATE();
27870Sstevel@tonic-gate 		return (PFN_INVALID);
27885084Sjohnlev 	}
27890Sstevel@tonic-gate 	ASSERT(vaddr >= ht->ht_vaddr);
27900Sstevel@tonic-gate 	ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
27910Sstevel@tonic-gate 	pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
27920Sstevel@tonic-gate 	if (ht->ht_level > 0)
27930Sstevel@tonic-gate 		pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
27940Sstevel@tonic-gate 	htable_release(ht);
27955084Sjohnlev 	XPV_ALLOW_MIGRATE();
27960Sstevel@tonic-gate 	return (pfn);
27970Sstevel@tonic-gate }
27980Sstevel@tonic-gate 
27990Sstevel@tonic-gate /*
28000Sstevel@tonic-gate  * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged.
28010Sstevel@tonic-gate  * Use hat_getpfnum(kas.a_hat, ...) instead.
28020Sstevel@tonic-gate  *
28030Sstevel@tonic-gate  * We'd like to return PFN_INVALID if the mappings have underlying page_t's
28040Sstevel@tonic-gate  * but can't right now due to the fact that some software has grown to use
28050Sstevel@tonic-gate  * this interface incorrectly. So for now when the interface is misused,
28060Sstevel@tonic-gate  * return a warning to the user that in the future it won't work in the
28070Sstevel@tonic-gate  * way they're abusing it, and carry on.
28080Sstevel@tonic-gate  *
28090Sstevel@tonic-gate  * Note that hat_getkpfnum() is never supported on amd64.
28100Sstevel@tonic-gate  */
28110Sstevel@tonic-gate #if !defined(__amd64)
28120Sstevel@tonic-gate pfn_t
28130Sstevel@tonic-gate hat_getkpfnum(caddr_t addr)
28140Sstevel@tonic-gate {
28150Sstevel@tonic-gate 	pfn_t	pfn;
28160Sstevel@tonic-gate 	int badcaller = 0;
28170Sstevel@tonic-gate 
28180Sstevel@tonic-gate 	if (khat_running == 0)
28190Sstevel@tonic-gate 		panic("hat_getkpfnum(): called too early\n");
28200Sstevel@tonic-gate 	if ((uintptr_t)addr < kernelbase)
28210Sstevel@tonic-gate 		return (PFN_INVALID);
28220Sstevel@tonic-gate 
28235084Sjohnlev 	XPV_DISALLOW_MIGRATE();
28240Sstevel@tonic-gate 	if (segkpm && IS_KPM_ADDR(addr)) {
28250Sstevel@tonic-gate 		badcaller = 1;
28260Sstevel@tonic-gate 		pfn = hat_kpm_va2pfn(addr);
28270Sstevel@tonic-gate 	} else {
28280Sstevel@tonic-gate 		pfn = hat_getpfnum(kas.a_hat, addr);
28290Sstevel@tonic-gate 		badcaller = pf_is_memory(pfn);
28300Sstevel@tonic-gate 	}
28310Sstevel@tonic-gate 
28320Sstevel@tonic-gate 	if (badcaller)
28330Sstevel@tonic-gate 		hat_getkpfnum_badcall(caller());
28345084Sjohnlev 	XPV_ALLOW_MIGRATE();
28350Sstevel@tonic-gate 	return (pfn);
28360Sstevel@tonic-gate }
28370Sstevel@tonic-gate #endif /* __amd64 */
28380Sstevel@tonic-gate 
28390Sstevel@tonic-gate /*
28400Sstevel@tonic-gate  * int hat_probe(hat, addr)
28410Sstevel@tonic-gate  *	return 0 if no valid mapping is present.  Faster version
28420Sstevel@tonic-gate  *	of hat_getattr in certain architectures.
28430Sstevel@tonic-gate  */
28440Sstevel@tonic-gate int
28450Sstevel@tonic-gate hat_probe(hat_t *hat, caddr_t addr)
28460Sstevel@tonic-gate {
28470Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
28480Sstevel@tonic-gate 	uint_t		entry;
28490Sstevel@tonic-gate 	htable_t	*ht;
28500Sstevel@tonic-gate 	pgcnt_t		pg_off;
28510Sstevel@tonic-gate 
28523446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
28530Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
28540Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
28550Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
28560Sstevel@tonic-gate 		return (0);
28570Sstevel@tonic-gate 
28580Sstevel@tonic-gate 	/*
28590Sstevel@tonic-gate 	 * Most common use of hat_probe is from segmap. We special case it
28600Sstevel@tonic-gate 	 * for performance.
28610Sstevel@tonic-gate 	 */
28620Sstevel@tonic-gate 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
28630Sstevel@tonic-gate 		pg_off = mmu_btop(vaddr - mmu.kmap_addr);
28640Sstevel@tonic-gate 		if (mmu.pae_hat)
28650Sstevel@tonic-gate 			return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
28660Sstevel@tonic-gate 		else
28670Sstevel@tonic-gate 			return (PTE_ISVALID(
28680Sstevel@tonic-gate 			    ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
28690Sstevel@tonic-gate 	}
28700Sstevel@tonic-gate 
28710Sstevel@tonic-gate 	ht = htable_getpage(hat, vaddr, &entry);
28720Sstevel@tonic-gate 	htable_release(ht);
28735084Sjohnlev 	return (ht != NULL);
28740Sstevel@tonic-gate }
28750Sstevel@tonic-gate 
28760Sstevel@tonic-gate /*
28774381Sjosephb  * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM.
28784381Sjosephb  */
28794381Sjosephb static int
28804381Sjosephb is_it_dism(hat_t *hat, caddr_t va)
28814381Sjosephb {
28824381Sjosephb 	struct seg *seg;
28834381Sjosephb 	struct shm_data *shmd;
28844381Sjosephb 	struct spt_data *sptd;
28854381Sjosephb 
28864381Sjosephb 	seg = as_findseg(hat->hat_as, va, 0);
28874381Sjosephb 	ASSERT(seg != NULL);
28884381Sjosephb 	ASSERT(seg->s_base <= va);
28894381Sjosephb 	shmd = (struct shm_data *)seg->s_data;
28904381Sjosephb 	ASSERT(shmd != NULL);
28914381Sjosephb 	sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
28924381Sjosephb 	ASSERT(sptd != NULL);
28934381Sjosephb 	if (sptd->spt_flags & SHM_PAGEABLE)
28944381Sjosephb 		return (1);
28954381Sjosephb 	return (0);
28964381Sjosephb }
28974381Sjosephb 
28984381Sjosephb /*
28994381Sjosephb  * Simple implementation of ISM. hat_share() is similar to hat_memload_array(),
29000Sstevel@tonic-gate  * except that we use the ism_hat's existing mappings to determine the pages
29014381Sjosephb  * and protections to use for this hat. If we find a full properly aligned
29024381Sjosephb  * and sized pagetable, we will attempt to share the pagetable itself.
29030Sstevel@tonic-gate  */
29040Sstevel@tonic-gate /*ARGSUSED*/
29050Sstevel@tonic-gate int
29060Sstevel@tonic-gate hat_share(
29070Sstevel@tonic-gate 	hat_t		*hat,
29080Sstevel@tonic-gate 	caddr_t		addr,
29090Sstevel@tonic-gate 	hat_t		*ism_hat,
29100Sstevel@tonic-gate 	caddr_t		src_addr,
29110Sstevel@tonic-gate 	size_t		len,	/* almost useless value, see below.. */
29120Sstevel@tonic-gate 	uint_t		ismszc)
29130Sstevel@tonic-gate {
29140Sstevel@tonic-gate 	uintptr_t	vaddr_start = (uintptr_t)addr;
29150Sstevel@tonic-gate 	uintptr_t	vaddr;
29160Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr_start + len;
29170Sstevel@tonic-gate 	uintptr_t	ism_addr_start = (uintptr_t)src_addr;
29180Sstevel@tonic-gate 	uintptr_t	ism_addr = ism_addr_start;
29190Sstevel@tonic-gate 	uintptr_t	e_ism_addr = ism_addr + len;
29200Sstevel@tonic-gate 	htable_t	*ism_ht = NULL;
29210Sstevel@tonic-gate 	htable_t	*ht;
29220Sstevel@tonic-gate 	x86pte_t	pte;
29230Sstevel@tonic-gate 	page_t		*pp;
29240Sstevel@tonic-gate 	pfn_t		pfn;
29250Sstevel@tonic-gate 	level_t		l;
29260Sstevel@tonic-gate 	pgcnt_t		pgcnt;
29270Sstevel@tonic-gate 	uint_t		prot;
29284381Sjosephb 	int		is_dism;
29294381Sjosephb 	int		flags;
29300Sstevel@tonic-gate 
29310Sstevel@tonic-gate 	/*
29320Sstevel@tonic-gate 	 * We might be asked to share an empty DISM hat by as_dup()
29330Sstevel@tonic-gate 	 */
29340Sstevel@tonic-gate 	ASSERT(hat != kas.a_hat);
29353446Smrj 	ASSERT(eaddr <= _userlimit);
29360Sstevel@tonic-gate 	if (!(ism_hat->hat_flags & HAT_SHARED)) {
29370Sstevel@tonic-gate 		ASSERT(hat_get_mapped_size(ism_hat) == 0);
29380Sstevel@tonic-gate 		return (0);
29390Sstevel@tonic-gate 	}
29405084Sjohnlev 	XPV_DISALLOW_MIGRATE();
29410Sstevel@tonic-gate 
29420Sstevel@tonic-gate 	/*
29430Sstevel@tonic-gate 	 * The SPT segment driver often passes us a size larger than there are
29440Sstevel@tonic-gate 	 * valid mappings. That's because it rounds the segment size up to a
29450Sstevel@tonic-gate 	 * large pagesize, even if the actual memory mapped by ism_hat is less.
29460Sstevel@tonic-gate 	 */
29470Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr_start));
29480Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(ism_addr_start));
29490Sstevel@tonic-gate 	ASSERT(ism_hat->hat_flags & HAT_SHARED);
29504381Sjosephb 	is_dism = is_it_dism(hat, addr);
29510Sstevel@tonic-gate 	while (ism_addr < e_ism_addr) {
29520Sstevel@tonic-gate 		/*
29530Sstevel@tonic-gate 		 * use htable_walk to get the next valid ISM mapping
29540Sstevel@tonic-gate 		 */
29550Sstevel@tonic-gate 		pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
29560Sstevel@tonic-gate 		if (ism_ht == NULL)
29570Sstevel@tonic-gate 			break;
29580Sstevel@tonic-gate 
29590Sstevel@tonic-gate 		/*
29604381Sjosephb 		 * First check to see if we already share the page table.
29614381Sjosephb 		 */
29624381Sjosephb 		l = ism_ht->ht_level;
29634381Sjosephb 		vaddr = vaddr_start + (ism_addr - ism_addr_start);
29644381Sjosephb 		ht = htable_lookup(hat, vaddr, l);
29654381Sjosephb 		if (ht != NULL) {
29664381Sjosephb 			if (ht->ht_flags & HTABLE_SHARED_PFN)
29674381Sjosephb 				goto shared;
29684381Sjosephb 			htable_release(ht);
29694381Sjosephb 			goto not_shared;
29704381Sjosephb 		}
29714381Sjosephb 
29724381Sjosephb 		/*
29734381Sjosephb 		 * Can't ever share top table.
29744381Sjosephb 		 */
29754381Sjosephb 		if (l == mmu.max_level)
29764381Sjosephb 			goto not_shared;
29774381Sjosephb 
29784381Sjosephb 		/*
29794381Sjosephb 		 * Avoid level mismatches later due to DISM faults.
29804381Sjosephb 		 */
29814381Sjosephb 		if (is_dism && l > 0)
29824381Sjosephb 			goto not_shared;
29834381Sjosephb 
29844381Sjosephb 		/*
29854381Sjosephb 		 * addresses and lengths must align
29864381Sjosephb 		 * table must be fully populated
29874381Sjosephb 		 * no lower level page tables
29884381Sjosephb 		 */
29894381Sjosephb 		if (ism_addr != ism_ht->ht_vaddr ||
29904381Sjosephb 		    (vaddr & LEVEL_OFFSET(l + 1)) != 0)
29914381Sjosephb 			goto not_shared;
29924381Sjosephb 
29934381Sjosephb 		/*
29944381Sjosephb 		 * The range of address space must cover a full table.
29950Sstevel@tonic-gate 		 */
29965159Sjohnlev 		if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1))
29974381Sjosephb 			goto not_shared;
29984381Sjosephb 
29994381Sjosephb 		/*
30004381Sjosephb 		 * All entries in the ISM page table must be leaf PTEs.
30014381Sjosephb 		 */
30024381Sjosephb 		if (l > 0) {
30034381Sjosephb 			int e;
30044381Sjosephb 
30054381Sjosephb 			/*
30064381Sjosephb 			 * We know the 0th is from htable_walk() above.
30074381Sjosephb 			 */
30084381Sjosephb 			for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) {
30094381Sjosephb 				x86pte_t pte;
30104381Sjosephb 				pte = x86pte_get(ism_ht, e);
30114381Sjosephb 				if (!PTE_ISPAGE(pte, l))
30124381Sjosephb 					goto not_shared;
30134381Sjosephb 			}
30144381Sjosephb 		}
30154381Sjosephb 
30164381Sjosephb 		/*
30174381Sjosephb 		 * share the page table
30184381Sjosephb 		 */
30194381Sjosephb 		ht = htable_create(hat, vaddr, l, ism_ht);
30204381Sjosephb shared:
30214381Sjosephb 		ASSERT(ht->ht_flags & HTABLE_SHARED_PFN);
30224381Sjosephb 		ASSERT(ht->ht_shares == ism_ht);
30234381Sjosephb 		hat->hat_ism_pgcnt +=
30244381Sjosephb 		    (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) <<
30254381Sjosephb 		    (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
30264381Sjosephb 		ht->ht_valid_cnt = ism_ht->ht_valid_cnt;
30274381Sjosephb 		htable_release(ht);
30284381Sjosephb 		ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1);
30294381Sjosephb 		htable_release(ism_ht);
30304381Sjosephb 		ism_ht = NULL;
30314381Sjosephb 		continue;
30324381Sjosephb 
30334381Sjosephb not_shared:
30344381Sjosephb 		/*
30354381Sjosephb 		 * Unable to share the page table. Instead we will
30364381Sjosephb 		 * create new mappings from the values in the ISM mappings.
30374381Sjosephb 		 * Figure out what level size mappings to use;
30384381Sjosephb 		 */
30390Sstevel@tonic-gate 		for (l = ism_ht->ht_level; l > 0; --l) {
30400Sstevel@tonic-gate 			if (LEVEL_SIZE(l) <= eaddr - vaddr &&
30410Sstevel@tonic-gate 			    (vaddr & LEVEL_OFFSET(l)) == 0)
30420Sstevel@tonic-gate 				break;
30430Sstevel@tonic-gate 		}
30440Sstevel@tonic-gate 
30450Sstevel@tonic-gate 		/*
30460Sstevel@tonic-gate 		 * The ISM mapping might be larger than the share area,
30474381Sjosephb 		 * be careful to truncate it if needed.
30480Sstevel@tonic-gate 		 */
30490Sstevel@tonic-gate 		if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
30500Sstevel@tonic-gate 			pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
30510Sstevel@tonic-gate 		} else {
30520Sstevel@tonic-gate 			pgcnt = mmu_btop(eaddr - vaddr);
30530Sstevel@tonic-gate 			l = 0;
30540Sstevel@tonic-gate 		}
30550Sstevel@tonic-gate 
30560Sstevel@tonic-gate 		pfn = PTE2PFN(pte, ism_ht->ht_level);
30570Sstevel@tonic-gate 		ASSERT(pfn != PFN_INVALID);
30580Sstevel@tonic-gate 		while (pgcnt > 0) {
30590Sstevel@tonic-gate 			/*
30600Sstevel@tonic-gate 			 * Make a new pte for the PFN for this level.
30610Sstevel@tonic-gate 			 * Copy protections for the pte from the ISM pte.
30620Sstevel@tonic-gate 			 */
30630Sstevel@tonic-gate 			pp = page_numtopp_nolock(pfn);
30640Sstevel@tonic-gate 			ASSERT(pp != NULL);
30650Sstevel@tonic-gate 
30660Sstevel@tonic-gate 			prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
30670Sstevel@tonic-gate 			if (PTE_GET(pte, PT_WRITABLE))
30680Sstevel@tonic-gate 				prot |= PROT_WRITE;
30690Sstevel@tonic-gate 			if (!PTE_GET(pte, PT_NX))
30700Sstevel@tonic-gate 				prot |= PROT_EXEC;
30710Sstevel@tonic-gate 
30724381Sjosephb 			flags = HAT_LOAD;
30734381Sjosephb 			if (!is_dism)
30744381Sjosephb 				flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST;
30754381Sjosephb 			while (hati_load_common(hat, vaddr, pp, prot, flags,
30763446Smrj 			    l, pfn) != 0) {
30773446Smrj 				if (l == 0)
30783446Smrj 					panic("hati_load_common() failure");
30793446Smrj 				--l;
30803446Smrj 			}
30810Sstevel@tonic-gate 
30820Sstevel@tonic-gate 			vaddr += LEVEL_SIZE(l);
30830Sstevel@tonic-gate 			ism_addr += LEVEL_SIZE(l);
30840Sstevel@tonic-gate 			pfn += mmu_btop(LEVEL_SIZE(l));
30850Sstevel@tonic-gate 			pgcnt -= mmu_btop(LEVEL_SIZE(l));
30860Sstevel@tonic-gate 		}
30870Sstevel@tonic-gate 	}
30880Sstevel@tonic-gate 	if (ism_ht != NULL)
30890Sstevel@tonic-gate 		htable_release(ism_ht);
30905084Sjohnlev 	XPV_ALLOW_MIGRATE();
30910Sstevel@tonic-gate 	return (0);
30920Sstevel@tonic-gate }
30930Sstevel@tonic-gate 
30940Sstevel@tonic-gate 
30950Sstevel@tonic-gate /*
30960Sstevel@tonic-gate  * hat_unshare() is similar to hat_unload_callback(), but
30970Sstevel@tonic-gate  * we have to look for empty shared pagetables. Note that
30980Sstevel@tonic-gate  * hat_unshare() is always invoked against an entire segment.
30990Sstevel@tonic-gate  */
31000Sstevel@tonic-gate /*ARGSUSED*/
31010Sstevel@tonic-gate void
31020Sstevel@tonic-gate hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
31030Sstevel@tonic-gate {
31044654Sjosephb 	uint64_t	vaddr = (uintptr_t)addr;
31050Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
31060Sstevel@tonic-gate 	htable_t	*ht = NULL;
31070Sstevel@tonic-gate 	uint_t		need_demaps = 0;
31084381Sjosephb 	int		flags = HAT_UNLOAD_UNMAP;
31094381Sjosephb 	level_t		l;
31100Sstevel@tonic-gate 
31110Sstevel@tonic-gate 	ASSERT(hat != kas.a_hat);
31123446Smrj 	ASSERT(eaddr <= _userlimit);
31130Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
31140Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
31155084Sjohnlev 	XPV_DISALLOW_MIGRATE();
31160Sstevel@tonic-gate 
31170Sstevel@tonic-gate 	/*
31180Sstevel@tonic-gate 	 * First go through and remove any shared pagetables.
31190Sstevel@tonic-gate 	 *
31203446Smrj 	 * Note that it's ok to delay the TLB shootdown till the entire range is
31210Sstevel@tonic-gate 	 * finished, because if hat_pageunload() were to unload a shared
31223446Smrj 	 * pagetable page, its hat_tlb_inval() will do a global TLB invalidate.
31230Sstevel@tonic-gate 	 */
31244381Sjosephb 	l = mmu.max_page_level;
31254381Sjosephb 	if (l == mmu.max_level)
31264381Sjosephb 		--l;
31274381Sjosephb 	for (; l >= 0; --l) {
31284381Sjosephb 		for (vaddr = (uintptr_t)addr; vaddr < eaddr;
31294381Sjosephb 		    vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) {
31304381Sjosephb 			ASSERT(!IN_VA_HOLE(vaddr));
31314381Sjosephb 			/*
31324381Sjosephb 			 * find a pagetable that maps the current address
31334381Sjosephb 			 */
31344381Sjosephb 			ht = htable_lookup(hat, vaddr, l);
31354381Sjosephb 			if (ht == NULL)
31364381Sjosephb 				continue;
31370Sstevel@tonic-gate 			if (ht->ht_flags & HTABLE_SHARED_PFN) {
31380Sstevel@tonic-gate 				/*
31394381Sjosephb 				 * clear page count, set valid_cnt to 0,
31404381Sjosephb 				 * let htable_release() finish the job
31410Sstevel@tonic-gate 				 */
31424381Sjosephb 				hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
31434381Sjosephb 				    (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
31440Sstevel@tonic-gate 				ht->ht_valid_cnt = 0;
31450Sstevel@tonic-gate 				need_demaps = 1;
31460Sstevel@tonic-gate 			}
31470Sstevel@tonic-gate 			htable_release(ht);
31480Sstevel@tonic-gate 		}
31490Sstevel@tonic-gate 	}
31500Sstevel@tonic-gate 
31510Sstevel@tonic-gate 	/*
31520Sstevel@tonic-gate 	 * flush the TLBs - since we're probably dealing with MANY mappings
31530Sstevel@tonic-gate 	 * we do just one CR3 reload.
31540Sstevel@tonic-gate 	 */
31550Sstevel@tonic-gate 	if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
31563446Smrj 		hat_tlb_inval(hat, DEMAP_ALL_ADDR);
31570Sstevel@tonic-gate 
31580Sstevel@tonic-gate 	/*
31590Sstevel@tonic-gate 	 * Now go back and clean up any unaligned mappings that
31600Sstevel@tonic-gate 	 * couldn't share pagetables.
31610Sstevel@tonic-gate 	 */
31624381Sjosephb 	if (!is_it_dism(hat, addr))
31634381Sjosephb 		flags |= HAT_UNLOAD_UNLOCK;
31644381Sjosephb 	hat_unload(hat, addr, len, flags);
31655084Sjohnlev 	XPV_ALLOW_MIGRATE();
31660Sstevel@tonic-gate }
31670Sstevel@tonic-gate 
31680Sstevel@tonic-gate 
31690Sstevel@tonic-gate /*
31700Sstevel@tonic-gate  * hat_reserve() does nothing
31710Sstevel@tonic-gate  */
31720Sstevel@tonic-gate /*ARGSUSED*/
31730Sstevel@tonic-gate void
31740Sstevel@tonic-gate hat_reserve(struct as *as, caddr_t addr, size_t len)
31750Sstevel@tonic-gate {
31760Sstevel@tonic-gate }
31770Sstevel@tonic-gate 
31780Sstevel@tonic-gate 
31790Sstevel@tonic-gate /*
31800Sstevel@tonic-gate  * Called when all mappings to a page should have write permission removed.
3181*9894SPavel.Tatashin@Sun.COM  * Mostly stolem from hat_pagesync()
31820Sstevel@tonic-gate  */
31830Sstevel@tonic-gate static void
31840Sstevel@tonic-gate hati_page_clrwrt(struct page *pp)
31850Sstevel@tonic-gate {
31860Sstevel@tonic-gate 	hment_t		*hm = NULL;
31870Sstevel@tonic-gate 	htable_t	*ht;
31880Sstevel@tonic-gate 	uint_t		entry;
31890Sstevel@tonic-gate 	x86pte_t	old;
31900Sstevel@tonic-gate 	x86pte_t	new;
31910Sstevel@tonic-gate 	uint_t		pszc = 0;
31920Sstevel@tonic-gate 
31935084Sjohnlev 	XPV_DISALLOW_MIGRATE();
31940Sstevel@tonic-gate next_size:
31950Sstevel@tonic-gate 	/*
31960Sstevel@tonic-gate 	 * walk thru the mapping list clearing write permission
31970Sstevel@tonic-gate 	 */
31980Sstevel@tonic-gate 	x86_hm_enter(pp);
31990Sstevel@tonic-gate 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
32000Sstevel@tonic-gate 		if (ht->ht_level < pszc)
32010Sstevel@tonic-gate 			continue;
32020Sstevel@tonic-gate 		old = x86pte_get(ht, entry);
32030Sstevel@tonic-gate 
32040Sstevel@tonic-gate 		for (;;) {
32050Sstevel@tonic-gate 			/*
32060Sstevel@tonic-gate 			 * Is this mapping of interest?
32070Sstevel@tonic-gate 			 */
32080Sstevel@tonic-gate 			if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
32090Sstevel@tonic-gate 			    PTE_GET(old, PT_WRITABLE) == 0)
32100Sstevel@tonic-gate 				break;
32110Sstevel@tonic-gate 
32120Sstevel@tonic-gate 			/*
32130Sstevel@tonic-gate 			 * Clear ref/mod writable bits. This requires cross
32140Sstevel@tonic-gate 			 * calls to ensure any executing TLBs see cleared bits.
32150Sstevel@tonic-gate 			 */
32160Sstevel@tonic-gate 			new = old;
32170Sstevel@tonic-gate 			PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
32180Sstevel@tonic-gate 			old = hati_update_pte(ht, entry, old, new);
32190Sstevel@tonic-gate 			if (old != 0)
32200Sstevel@tonic-gate 				continue;
32210Sstevel@tonic-gate 
32220Sstevel@tonic-gate 			break;
32230Sstevel@tonic-gate 		}
32240Sstevel@tonic-gate 	}
32250Sstevel@tonic-gate 	x86_hm_exit(pp);
32260Sstevel@tonic-gate 	while (pszc < pp->p_szc) {
32270Sstevel@tonic-gate 		page_t *tpp;
32280Sstevel@tonic-gate 		pszc++;
32290Sstevel@tonic-gate 		tpp = PP_GROUPLEADER(pp, pszc);
32300Sstevel@tonic-gate 		if (pp != tpp) {
32310Sstevel@tonic-gate 			pp = tpp;
32320Sstevel@tonic-gate 			goto next_size;
32330Sstevel@tonic-gate 		}
32340Sstevel@tonic-gate 	}
32355084Sjohnlev 	XPV_ALLOW_MIGRATE();
32360Sstevel@tonic-gate }
32370Sstevel@tonic-gate 
32380Sstevel@tonic-gate /*
32390Sstevel@tonic-gate  * void hat_page_setattr(pp, flag)
32400Sstevel@tonic-gate  * void hat_page_clrattr(pp, flag)
32410Sstevel@tonic-gate  *	used to set/clr ref/mod bits.
32420Sstevel@tonic-gate  */
32430Sstevel@tonic-gate void
32440Sstevel@tonic-gate hat_page_setattr(struct page *pp, uint_t flag)
32450Sstevel@tonic-gate {
32460Sstevel@tonic-gate 	vnode_t		*vp = pp->p_vnode;
32470Sstevel@tonic-gate 	kmutex_t	*vphm = NULL;
32480Sstevel@tonic-gate 	page_t		**listp;
32494324Sqiao 	int		noshuffle;
32504324Sqiao 
32514324Sqiao 	noshuffle = flag & P_NSH;
32524324Sqiao 	flag &= ~P_NSH;
32530Sstevel@tonic-gate 
32540Sstevel@tonic-gate 	if (PP_GETRM(pp, flag) == flag)
32550Sstevel@tonic-gate 		return;
32560Sstevel@tonic-gate 
32574324Sqiao 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
32584324Sqiao 	    !noshuffle) {
32590Sstevel@tonic-gate 		vphm = page_vnode_mutex(vp);
32600Sstevel@tonic-gate 		mutex_enter(vphm);
32610Sstevel@tonic-gate 	}
32620Sstevel@tonic-gate 
32630Sstevel@tonic-gate 	PP_SETRM(pp, flag);
32640Sstevel@tonic-gate 
32650Sstevel@tonic-gate 	if (vphm != NULL) {
32660Sstevel@tonic-gate 
32670Sstevel@tonic-gate 		/*
32680Sstevel@tonic-gate 		 * Some File Systems examine v_pages for NULL w/o
32690Sstevel@tonic-gate 		 * grabbing the vphm mutex. Must not let it become NULL when
32700Sstevel@tonic-gate 		 * pp is the only page on the list.
32710Sstevel@tonic-gate 		 */
32720Sstevel@tonic-gate 		if (pp->p_vpnext != pp) {
32730Sstevel@tonic-gate 			page_vpsub(&vp->v_pages, pp);
32740Sstevel@tonic-gate 			if (vp->v_pages != NULL)
32750Sstevel@tonic-gate 				listp = &vp->v_pages->p_vpprev->p_vpnext;
32760Sstevel@tonic-gate 			else
32770Sstevel@tonic-gate 				listp = &vp->v_pages;
32780Sstevel@tonic-gate 			page_vpadd(listp, pp);
32790Sstevel@tonic-gate 		}
32800Sstevel@tonic-gate 		mutex_exit(vphm);
32810Sstevel@tonic-gate 	}
32820Sstevel@tonic-gate }
32830Sstevel@tonic-gate 
32840Sstevel@tonic-gate void
32850Sstevel@tonic-gate hat_page_clrattr(struct page *pp, uint_t flag)
32860Sstevel@tonic-gate {
32870Sstevel@tonic-gate 	vnode_t		*vp = pp->p_vnode;
32880Sstevel@tonic-gate 	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
32890Sstevel@tonic-gate 
32900Sstevel@tonic-gate 	/*
32912999Sstans 	 * Caller is expected to hold page's io lock for VMODSORT to work
32922999Sstans 	 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
32932999Sstans 	 * bit is cleared.
32942999Sstans 	 * We don't have assert to avoid tripping some existing third party
32952999Sstans 	 * code. The dirty page is moved back to top of the v_page list
32962999Sstans 	 * after IO is done in pvn_write_done().
32970Sstevel@tonic-gate 	 */
32980Sstevel@tonic-gate 	PP_CLRRM(pp, flag);
32990Sstevel@tonic-gate 
33002999Sstans 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
33010Sstevel@tonic-gate 
33020Sstevel@tonic-gate 		/*
33030Sstevel@tonic-gate 		 * VMODSORT works by removing write permissions and getting
33040Sstevel@tonic-gate 		 * a fault when a page is made dirty. At this point
33050Sstevel@tonic-gate 		 * we need to remove write permission from all mappings
33060Sstevel@tonic-gate 		 * to this page.
33070Sstevel@tonic-gate 		 */
33080Sstevel@tonic-gate 		hati_page_clrwrt(pp);
33090Sstevel@tonic-gate 	}
33100Sstevel@tonic-gate }
33110Sstevel@tonic-gate 
33120Sstevel@tonic-gate /*
33130Sstevel@tonic-gate  *	If flag is specified, returns 0 if attribute is disabled
3314*9894SPavel.Tatashin@Sun.COM  *	and non zero if enabled.  If flag specifes multiple attributs
3315*9894SPavel.Tatashin@Sun.COM  *	then returns 0 if ALL atriibutes are disabled.  This is an advisory
33160Sstevel@tonic-gate  *	call.
33170Sstevel@tonic-gate  */
33180Sstevel@tonic-gate uint_t
33190Sstevel@tonic-gate hat_page_getattr(struct page *pp, uint_t flag)
33200Sstevel@tonic-gate {
33210Sstevel@tonic-gate 	return (PP_GETRM(pp, flag));
33220Sstevel@tonic-gate }
33230Sstevel@tonic-gate 
33240Sstevel@tonic-gate 
33250Sstevel@tonic-gate /*
33260Sstevel@tonic-gate  * common code used by hat_pageunload() and hment_steal()
33270Sstevel@tonic-gate  */
33280Sstevel@tonic-gate hment_t *
33290Sstevel@tonic-gate hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
33300Sstevel@tonic-gate {
33310Sstevel@tonic-gate 	x86pte_t old_pte;
33320Sstevel@tonic-gate 	pfn_t pfn = pp->p_pagenum;
33330Sstevel@tonic-gate 	hment_t *hm;
33340Sstevel@tonic-gate 
33350Sstevel@tonic-gate 	/*
33360Sstevel@tonic-gate 	 * We need to acquire a hold on the htable in order to
33370Sstevel@tonic-gate 	 * do the invalidate. We know the htable must exist, since
33380Sstevel@tonic-gate 	 * unmap's don't release the htable until after removing any
33390Sstevel@tonic-gate 	 * hment. Having x86_hm_enter() keeps that from proceeding.
33400Sstevel@tonic-gate 	 */
33410Sstevel@tonic-gate 	htable_acquire(ht);
33420Sstevel@tonic-gate 
33430Sstevel@tonic-gate 	/*
33440Sstevel@tonic-gate 	 * Invalidate the PTE and remove the hment.
33450Sstevel@tonic-gate 	 */
33463446Smrj 	old_pte = x86pte_inval(ht, entry, 0, NULL);
334747Sjosephb 	if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
33483446Smrj 		panic("x86pte_inval() failure found PTE = " FMT_PTE
334947Sjosephb 		    " pfn being unmapped is %lx ht=0x%lx entry=0x%x",
335047Sjosephb 		    old_pte, pfn, (uintptr_t)ht, entry);
335147Sjosephb 	}
33520Sstevel@tonic-gate 
33530Sstevel@tonic-gate 	/*
33540Sstevel@tonic-gate 	 * Clean up all the htable information for this mapping
33550Sstevel@tonic-gate 	 */
33560Sstevel@tonic-gate 	ASSERT(ht->ht_valid_cnt > 0);
33570Sstevel@tonic-gate 	HTABLE_DEC(ht->ht_valid_cnt);
33580Sstevel@tonic-gate 	PGCNT_DEC(ht->ht_hat, ht->ht_level);
33590Sstevel@tonic-gate 
33600Sstevel@tonic-gate 	/*
33610Sstevel@tonic-gate 	 * sync ref/mod bits to the page_t
33620Sstevel@tonic-gate 	 */
33633446Smrj 	if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC)
33640Sstevel@tonic-gate 		hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
33650Sstevel@tonic-gate 
33660Sstevel@tonic-gate 	/*
33670Sstevel@tonic-gate 	 * Remove the mapping list entry for this page.
33680Sstevel@tonic-gate 	 */
33690Sstevel@tonic-gate 	hm = hment_remove(pp, ht, entry);
33700Sstevel@tonic-gate 
33710Sstevel@tonic-gate 	/*
33720Sstevel@tonic-gate 	 * drop the mapping list lock so that we might free the
33730Sstevel@tonic-gate 	 * hment and htable.
33740Sstevel@tonic-gate 	 */
33750Sstevel@tonic-gate 	x86_hm_exit(pp);
33760Sstevel@tonic-gate 	htable_release(ht);
33770Sstevel@tonic-gate 	return (hm);
33780Sstevel@tonic-gate }
33790Sstevel@tonic-gate 
33801841Spraks extern int	vpm_enable;
33810Sstevel@tonic-gate /*
33820Sstevel@tonic-gate  * Unload all translations to a page. If the page is a subpage of a large
33830Sstevel@tonic-gate  * page, the large page mappings are also removed.
33840Sstevel@tonic-gate  *
33850Sstevel@tonic-gate  * The forceflags are unused.
33860Sstevel@tonic-gate  */
33870Sstevel@tonic-gate 
33880Sstevel@tonic-gate /*ARGSUSED*/
33890Sstevel@tonic-gate static int
33900Sstevel@tonic-gate hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
33910Sstevel@tonic-gate {
33920Sstevel@tonic-gate 	page_t		*cur_pp = pp;
33930Sstevel@tonic-gate 	hment_t		*hm;
33940Sstevel@tonic-gate 	hment_t		*prev;
33950Sstevel@tonic-gate 	htable_t	*ht;
33960Sstevel@tonic-gate 	uint_t		entry;
33970Sstevel@tonic-gate 	level_t		level;
33980Sstevel@tonic-gate 
33995084Sjohnlev 	XPV_DISALLOW_MIGRATE();
34001841Spraks #if defined(__amd64)
34011841Spraks 	/*
34021841Spraks 	 * clear the vpm ref.
34031841Spraks 	 */
34041841Spraks 	if (vpm_enable) {
34051841Spraks 		pp->p_vpmref = 0;
34061841Spraks 	}
34071841Spraks #endif
34080Sstevel@tonic-gate 	/*
34090Sstevel@tonic-gate 	 * The loop with next_size handles pages with multiple pagesize mappings
34100Sstevel@tonic-gate 	 */
34110Sstevel@tonic-gate next_size:
34120Sstevel@tonic-gate 	for (;;) {
34130Sstevel@tonic-gate 
34140Sstevel@tonic-gate 		/*
34150Sstevel@tonic-gate 		 * Get a mapping list entry
34160Sstevel@tonic-gate 		 */
34170Sstevel@tonic-gate 		x86_hm_enter(cur_pp);
34180Sstevel@tonic-gate 		for (prev = NULL; ; prev = hm) {
34190Sstevel@tonic-gate 			hm = hment_walk(cur_pp, &ht, &entry, prev);
34200Sstevel@tonic-gate 			if (hm == NULL) {
34210Sstevel@tonic-gate 				x86_hm_exit(cur_pp);
34220Sstevel@tonic-gate 
34230Sstevel@tonic-gate 				/*
34240Sstevel@tonic-gate 				 * If not part of a larger page, we're done.
34250Sstevel@tonic-gate 				 */
34263446Smrj 				if (cur_pp->p_szc <= pg_szcd) {
34275084Sjohnlev 					XPV_ALLOW_MIGRATE();
34280Sstevel@tonic-gate 					return (0);
34293446Smrj 				}
34300Sstevel@tonic-gate 
34310Sstevel@tonic-gate 				/*
34320Sstevel@tonic-gate 				 * Else check the next larger page size.
34330Sstevel@tonic-gate 				 * hat_page_demote() may decrease p_szc
34340Sstevel@tonic-gate 				 * but that's ok we'll just take an extra
34350Sstevel@tonic-gate 				 * trip discover there're no larger mappings
34360Sstevel@tonic-gate 				 * and return.
34370Sstevel@tonic-gate 				 */
34380Sstevel@tonic-gate 				++pg_szcd;
34390Sstevel@tonic-gate 				cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
34400Sstevel@tonic-gate 				goto next_size;
34410Sstevel@tonic-gate 			}
34420Sstevel@tonic-gate 
34430Sstevel@tonic-gate 			/*
34440Sstevel@tonic-gate 			 * If this mapping size matches, remove it.
34450Sstevel@tonic-gate 			 */
34460Sstevel@tonic-gate 			level = ht->ht_level;
34470Sstevel@tonic-gate 			if (level == pg_szcd)
34480Sstevel@tonic-gate 				break;
34490Sstevel@tonic-gate 		}
34500Sstevel@tonic-gate 
34510Sstevel@tonic-gate 		/*
34520Sstevel@tonic-gate 		 * Remove the mapping list entry for this page.
34530Sstevel@tonic-gate 		 * Note this does the x86_hm_exit() for us.
34540Sstevel@tonic-gate 		 */
34550Sstevel@tonic-gate 		hm = hati_page_unmap(cur_pp, ht, entry);
34560Sstevel@tonic-gate 		if (hm != NULL)
34570Sstevel@tonic-gate 			hment_free(hm);
34580Sstevel@tonic-gate 	}
34590Sstevel@tonic-gate }
34600Sstevel@tonic-gate 
34610Sstevel@tonic-gate int
34620Sstevel@tonic-gate hat_pageunload(struct page *pp, uint_t forceflag)
34630Sstevel@tonic-gate {
34640Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp));
34650Sstevel@tonic-gate 	return (hati_pageunload(pp, 0, forceflag));
34660Sstevel@tonic-gate }
34670Sstevel@tonic-gate 
34680Sstevel@tonic-gate /*
34690Sstevel@tonic-gate  * Unload all large mappings to pp and reduce by 1 p_szc field of every large
34700Sstevel@tonic-gate  * page level that included pp.
34710Sstevel@tonic-gate  *
34720Sstevel@tonic-gate  * pp must be locked EXCL. Even though no other constituent pages are locked
34730Sstevel@tonic-gate  * it's legal to unload large mappings to pp because all constituent pages of
34740Sstevel@tonic-gate  * large locked mappings have to be locked SHARED.  therefore if we have EXCL
34750Sstevel@tonic-gate  * lock on one of constituent pages none of the large mappings to pp are
34760Sstevel@tonic-gate  * locked.
34770Sstevel@tonic-gate  *
34780Sstevel@tonic-gate  * Change (always decrease) p_szc field starting from the last constituent
34790Sstevel@tonic-gate  * page and ending with root constituent page so that root's pszc always shows
34800Sstevel@tonic-gate  * the area where hat_page_demote() may be active.
34810Sstevel@tonic-gate  *
34820Sstevel@tonic-gate  * This mechanism is only used for file system pages where it's not always
34830Sstevel@tonic-gate  * possible to get EXCL locks on all constituent pages to demote the size code
34840Sstevel@tonic-gate  * (as is done for anonymous or kernel large pages).
34850Sstevel@tonic-gate  */
34860Sstevel@tonic-gate void
34870Sstevel@tonic-gate hat_page_demote(page_t *pp)
34880Sstevel@tonic-gate {
34890Sstevel@tonic-gate 	uint_t		pszc;
34900Sstevel@tonic-gate 	uint_t		rszc;
34910Sstevel@tonic-gate 	uint_t		szc;
34920Sstevel@tonic-gate 	page_t		*rootpp;
34930Sstevel@tonic-gate 	page_t		*firstpp;
34940Sstevel@tonic-gate 	page_t		*lastpp;
34950Sstevel@tonic-gate 	pgcnt_t		pgcnt;
34960Sstevel@tonic-gate 
34970Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp));
34980Sstevel@tonic-gate 	ASSERT(!PP_ISFREE(pp));
34990Sstevel@tonic-gate 	ASSERT(page_szc_lock_assert(pp));
35000Sstevel@tonic-gate 
35010Sstevel@tonic-gate 	if (pp->p_szc == 0)
35020Sstevel@tonic-gate 		return;
35030Sstevel@tonic-gate 
35040Sstevel@tonic-gate 	rootpp = PP_GROUPLEADER(pp, 1);
35050Sstevel@tonic-gate 	(void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
35060Sstevel@tonic-gate 
35070Sstevel@tonic-gate 	/*
35080Sstevel@tonic-gate 	 * all large mappings to pp are gone
35090Sstevel@tonic-gate 	 * and no new can be setup since pp is locked exclusively.
35100Sstevel@tonic-gate 	 *
35110Sstevel@tonic-gate 	 * Lock the root to make sure there's only one hat_page_demote()
35120Sstevel@tonic-gate 	 * outstanding within the area of this root's pszc.
35130Sstevel@tonic-gate 	 *
35140Sstevel@tonic-gate 	 * Second potential hat_page_demote() is already eliminated by upper
35150Sstevel@tonic-gate 	 * VM layer via page_szc_lock() but we don't rely on it and use our
35160Sstevel@tonic-gate 	 * own locking (so that upper layer locking can be changed without
35170Sstevel@tonic-gate 	 * assumptions that hat depends on upper layer VM to prevent multiple
35180Sstevel@tonic-gate 	 * hat_page_demote() to be issued simultaneously to the same large
35190Sstevel@tonic-gate 	 * page).
35200Sstevel@tonic-gate 	 */
35210Sstevel@tonic-gate again:
35220Sstevel@tonic-gate 	pszc = pp->p_szc;
35230Sstevel@tonic-gate 	if (pszc == 0)
35240Sstevel@tonic-gate 		return;
35250Sstevel@tonic-gate 	rootpp = PP_GROUPLEADER(pp, pszc);
35260Sstevel@tonic-gate 	x86_hm_enter(rootpp);
35270Sstevel@tonic-gate 	/*
35280Sstevel@tonic-gate 	 * If root's p_szc is different from pszc we raced with another
35290Sstevel@tonic-gate 	 * hat_page_demote().  Drop the lock and try to find the root again.
35300Sstevel@tonic-gate 	 * If root's p_szc is greater than pszc previous hat_page_demote() is
35310Sstevel@tonic-gate 	 * not done yet.  Take and release mlist lock of root's root to wait
35320Sstevel@tonic-gate 	 * for previous hat_page_demote() to complete.
35330Sstevel@tonic-gate 	 */
35340Sstevel@tonic-gate 	if ((rszc = rootpp->p_szc) != pszc) {
35350Sstevel@tonic-gate 		x86_hm_exit(rootpp);
35360Sstevel@tonic-gate 		if (rszc > pszc) {
35370Sstevel@tonic-gate 			/* p_szc of a locked non free page can't increase */
35380Sstevel@tonic-gate 			ASSERT(pp != rootpp);
35390Sstevel@tonic-gate 
35400Sstevel@tonic-gate 			rootpp = PP_GROUPLEADER(rootpp, rszc);
35410Sstevel@tonic-gate 			x86_hm_enter(rootpp);
35420Sstevel@tonic-gate 			x86_hm_exit(rootpp);
35430Sstevel@tonic-gate 		}
35440Sstevel@tonic-gate 		goto again;
35450Sstevel@tonic-gate 	}
35460Sstevel@tonic-gate 	ASSERT(pp->p_szc == pszc);
35470Sstevel@tonic-gate 
35480Sstevel@tonic-gate 	/*
35490Sstevel@tonic-gate 	 * Decrement by 1 p_szc of every constituent page of a region that
35500Sstevel@tonic-gate 	 * covered pp. For example if original szc is 3 it gets changed to 2
35510Sstevel@tonic-gate 	 * everywhere except in region 2 that covered pp. Region 2 that
35520Sstevel@tonic-gate 	 * covered pp gets demoted to 1 everywhere except in region 1 that
35530Sstevel@tonic-gate 	 * covered pp. The region 1 that covered pp is demoted to region
35540Sstevel@tonic-gate 	 * 0. It's done this way because from region 3 we removed level 3
35550Sstevel@tonic-gate 	 * mappings, from region 2 that covered pp we removed level 2 mappings
35560Sstevel@tonic-gate 	 * and from region 1 that covered pp we removed level 1 mappings.  All
35570Sstevel@tonic-gate 	 * changes are done from from high pfn's to low pfn's so that roots
35580Sstevel@tonic-gate 	 * are changed last allowing one to know the largest region where
35590Sstevel@tonic-gate 	 * hat_page_demote() is stil active by only looking at the root page.
35600Sstevel@tonic-gate 	 *
35610Sstevel@tonic-gate 	 * This algorithm is implemented in 2 while loops. First loop changes
35620Sstevel@tonic-gate 	 * p_szc of pages to the right of pp's level 1 region and second
35630Sstevel@tonic-gate 	 * loop changes p_szc of pages of level 1 region that covers pp
35640Sstevel@tonic-gate 	 * and all pages to the left of level 1 region that covers pp.
35650Sstevel@tonic-gate 	 * In the first loop p_szc keeps dropping with every iteration
35660Sstevel@tonic-gate 	 * and in the second loop it keeps increasing with every iteration.
35670Sstevel@tonic-gate 	 *
35680Sstevel@tonic-gate 	 * First loop description: Demote pages to the right of pp outside of
35690Sstevel@tonic-gate 	 * level 1 region that covers pp.  In every iteration of the while
35700Sstevel@tonic-gate 	 * loop below find the last page of szc region and the first page of
35710Sstevel@tonic-gate 	 * (szc - 1) region that is immediately to the right of (szc - 1)
35720Sstevel@tonic-gate 	 * region that covers pp.  From last such page to first such page
35730Sstevel@tonic-gate 	 * change every page's szc to szc - 1. Decrement szc and continue
35740Sstevel@tonic-gate 	 * looping until szc is 1. If pp belongs to the last (szc - 1) region
35750Sstevel@tonic-gate 	 * of szc region skip to the next iteration.
35760Sstevel@tonic-gate 	 */
35770Sstevel@tonic-gate 	szc = pszc;
35780Sstevel@tonic-gate 	while (szc > 1) {
35790Sstevel@tonic-gate 		lastpp = PP_GROUPLEADER(pp, szc);
35800Sstevel@tonic-gate 		pgcnt = page_get_pagecnt(szc);
35810Sstevel@tonic-gate 		lastpp += pgcnt - 1;
35820Sstevel@tonic-gate 		firstpp = PP_GROUPLEADER(pp, (szc - 1));
35830Sstevel@tonic-gate 		pgcnt = page_get_pagecnt(szc - 1);
35840Sstevel@tonic-gate 		if (lastpp - firstpp < pgcnt) {
35850Sstevel@tonic-gate 			szc--;
35860Sstevel@tonic-gate 			continue;
35870Sstevel@tonic-gate 		}
35880Sstevel@tonic-gate 		firstpp += pgcnt;
35890Sstevel@tonic-gate 		while (lastpp != firstpp) {
35900Sstevel@tonic-gate 			ASSERT(lastpp->p_szc == pszc);
35910Sstevel@tonic-gate 			lastpp->p_szc = szc - 1;
35920Sstevel@tonic-gate 			lastpp--;
35930Sstevel@tonic-gate 		}
35940Sstevel@tonic-gate 		firstpp->p_szc = szc - 1;
35950Sstevel@tonic-gate 		szc--;
35960Sstevel@tonic-gate 	}
35970Sstevel@tonic-gate 
35980Sstevel@tonic-gate 	/*
35990Sstevel@tonic-gate 	 * Second loop description:
36000Sstevel@tonic-gate 	 * First iteration changes p_szc to 0 of every
36010Sstevel@tonic-gate 	 * page of level 1 region that covers pp.
36020Sstevel@tonic-gate 	 * Subsequent iterations find last page of szc region
36030Sstevel@tonic-gate 	 * immediately to the left of szc region that covered pp
36040Sstevel@tonic-gate 	 * and first page of (szc + 1) region that covers pp.
36050Sstevel@tonic-gate 	 * From last to first page change p_szc of every page to szc.
36060Sstevel@tonic-gate 	 * Increment szc and continue looping until szc is pszc.
36070Sstevel@tonic-gate 	 * If pp belongs to the fist szc region of (szc + 1) region
36080Sstevel@tonic-gate 	 * skip to the next iteration.
36090Sstevel@tonic-gate 	 *
36100Sstevel@tonic-gate 	 */
36110Sstevel@tonic-gate 	szc = 0;
36120Sstevel@tonic-gate 	while (szc < pszc) {
36130Sstevel@tonic-gate 		firstpp = PP_GROUPLEADER(pp, (szc + 1));
36140Sstevel@tonic-gate 		if (szc == 0) {
36150Sstevel@tonic-gate 			pgcnt = page_get_pagecnt(1);
36160Sstevel@tonic-gate 			lastpp = firstpp + (pgcnt - 1);
36170Sstevel@tonic-gate 		} else {
36180Sstevel@tonic-gate 			lastpp = PP_GROUPLEADER(pp, szc);
36190Sstevel@tonic-gate 			if (firstpp == lastpp) {
36200Sstevel@tonic-gate 				szc++;
36210Sstevel@tonic-gate 				continue;
36220Sstevel@tonic-gate 			}
36230Sstevel@tonic-gate 			lastpp--;
36240Sstevel@tonic-gate 			pgcnt = page_get_pagecnt(szc);
36250Sstevel@tonic-gate 		}
36260Sstevel@tonic-gate 		while (lastpp != firstpp) {
36270Sstevel@tonic-gate 			ASSERT(lastpp->p_szc == pszc);
36280Sstevel@tonic-gate 			lastpp->p_szc = szc;
36290Sstevel@tonic-gate 			lastpp--;
36300Sstevel@tonic-gate 		}
36310Sstevel@tonic-gate 		firstpp->p_szc = szc;
36320Sstevel@tonic-gate 		if (firstpp == rootpp)
36330Sstevel@tonic-gate 			break;
36340Sstevel@tonic-gate 		szc++;
36350Sstevel@tonic-gate 	}
36360Sstevel@tonic-gate 	x86_hm_exit(rootpp);
36370Sstevel@tonic-gate }
36380Sstevel@tonic-gate 
36390Sstevel@tonic-gate /*
36400Sstevel@tonic-gate  * get hw stats from hardware into page struct and reset hw stats
36410Sstevel@tonic-gate  * returns attributes of page
36420Sstevel@tonic-gate  * Flags for hat_pagesync, hat_getstat, hat_sync
36430Sstevel@tonic-gate  *
36440Sstevel@tonic-gate  * define	HAT_SYNC_ZERORM		0x01
36450Sstevel@tonic-gate  *
36460Sstevel@tonic-gate  * Additional flags for hat_pagesync
36470Sstevel@tonic-gate  *
36480Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_REF	0x02
36490Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_MOD	0x04
36500Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_RM	0x06
36510Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_SHARED	0x08
36520Sstevel@tonic-gate  */
36530Sstevel@tonic-gate uint_t
36540Sstevel@tonic-gate hat_pagesync(struct page *pp, uint_t flags)
36550Sstevel@tonic-gate {
36560Sstevel@tonic-gate 	hment_t		*hm = NULL;
36570Sstevel@tonic-gate 	htable_t	*ht;
36580Sstevel@tonic-gate 	uint_t		entry;
36590Sstevel@tonic-gate 	x86pte_t	old, save_old;
36600Sstevel@tonic-gate 	x86pte_t	new;
36610Sstevel@tonic-gate 	uchar_t		nrmbits = P_REF|P_MOD|P_RO;
36620Sstevel@tonic-gate 	extern ulong_t	po_share;
36630Sstevel@tonic-gate 	page_t		*save_pp = pp;
36640Sstevel@tonic-gate 	uint_t		pszc = 0;
36650Sstevel@tonic-gate 
36660Sstevel@tonic-gate 	ASSERT(PAGE_LOCKED(pp) || panicstr);
36670Sstevel@tonic-gate 
36680Sstevel@tonic-gate 	if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
36690Sstevel@tonic-gate 		return (pp->p_nrm & nrmbits);
36700Sstevel@tonic-gate 
36710Sstevel@tonic-gate 	if ((flags & HAT_SYNC_ZERORM) == 0) {
36720Sstevel@tonic-gate 
36730Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
36740Sstevel@tonic-gate 			return (pp->p_nrm & nrmbits);
36750Sstevel@tonic-gate 
36760Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
36770Sstevel@tonic-gate 			return (pp->p_nrm & nrmbits);
36780Sstevel@tonic-gate 
36790Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
36800Sstevel@tonic-gate 		    hat_page_getshare(pp) > po_share) {
36810Sstevel@tonic-gate 			if (PP_ISRO(pp))
36820Sstevel@tonic-gate 				PP_SETREF(pp);
36830Sstevel@tonic-gate 			return (pp->p_nrm & nrmbits);
36840Sstevel@tonic-gate 		}
36850Sstevel@tonic-gate 	}
36860Sstevel@tonic-gate 
36875084Sjohnlev 	XPV_DISALLOW_MIGRATE();
36880Sstevel@tonic-gate next_size:
36890Sstevel@tonic-gate 	/*
36900Sstevel@tonic-gate 	 * walk thru the mapping list syncing (and clearing) ref/mod bits.
36910Sstevel@tonic-gate 	 */
36920Sstevel@tonic-gate 	x86_hm_enter(pp);
36930Sstevel@tonic-gate 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
36940Sstevel@tonic-gate 		if (ht->ht_level < pszc)
36950Sstevel@tonic-gate 			continue;
36960Sstevel@tonic-gate 		old = x86pte_get(ht, entry);
36970Sstevel@tonic-gate try_again:
36980Sstevel@tonic-gate 
36990Sstevel@tonic-gate 		ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
37000Sstevel@tonic-gate 
37010Sstevel@tonic-gate 		if (PTE_GET(old, PT_REF | PT_MOD) == 0)
37020Sstevel@tonic-gate 			continue;
37030Sstevel@tonic-gate 
37040Sstevel@tonic-gate 		save_old = old;
37050Sstevel@tonic-gate 		if ((flags & HAT_SYNC_ZERORM) != 0) {
37060Sstevel@tonic-gate 
37070Sstevel@tonic-gate 			/*
37080Sstevel@tonic-gate 			 * Need to clear ref or mod bits. Need to demap
37090Sstevel@tonic-gate 			 * to make sure any executing TLBs see cleared bits.
37100Sstevel@tonic-gate 			 */
37110Sstevel@tonic-gate 			new = old;
37120Sstevel@tonic-gate 			PTE_CLR(new, PT_REF | PT_MOD);
37130Sstevel@tonic-gate 			old = hati_update_pte(ht, entry, old, new);
37140Sstevel@tonic-gate 			if (old != 0)
37150Sstevel@tonic-gate 				goto try_again;
37160Sstevel@tonic-gate 
37170Sstevel@tonic-gate 			old = save_old;
37180Sstevel@tonic-gate 		}
37190Sstevel@tonic-gate 
37200Sstevel@tonic-gate 		/*
37210Sstevel@tonic-gate 		 * Sync the PTE
37220Sstevel@tonic-gate 		 */
37233446Smrj 		if (!(flags & HAT_SYNC_ZERORM) &&
37243446Smrj 		    PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC)
37250Sstevel@tonic-gate 			hati_sync_pte_to_page(pp, old, ht->ht_level);
37260Sstevel@tonic-gate 
37270Sstevel@tonic-gate 		/*
37280Sstevel@tonic-gate 		 * can stop short if we found a ref'd or mod'd page
37290Sstevel@tonic-gate 		 */
37300Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) ||
37310Sstevel@tonic-gate 		    (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) {
37320Sstevel@tonic-gate 			x86_hm_exit(pp);
37333446Smrj 			goto done;
37340Sstevel@tonic-gate 		}
37350Sstevel@tonic-gate 	}
37360Sstevel@tonic-gate 	x86_hm_exit(pp);
37370Sstevel@tonic-gate 	while (pszc < pp->p_szc) {
37380Sstevel@tonic-gate 		page_t *tpp;
37390Sstevel@tonic-gate 		pszc++;
37400Sstevel@tonic-gate 		tpp = PP_GROUPLEADER(pp, pszc);
37410Sstevel@tonic-gate 		if (pp != tpp) {
37420Sstevel@tonic-gate 			pp = tpp;
37430Sstevel@tonic-gate 			goto next_size;
37440Sstevel@tonic-gate 		}
37450Sstevel@tonic-gate 	}
37463446Smrj done:
37475084Sjohnlev 	XPV_ALLOW_MIGRATE();
37480Sstevel@tonic-gate 	return (save_pp->p_nrm & nrmbits);
37490Sstevel@tonic-gate }
37500Sstevel@tonic-gate 
37510Sstevel@tonic-gate /*
37520Sstevel@tonic-gate  * returns approx number of mappings to this pp.  A return of 0 implies
37530Sstevel@tonic-gate  * there are no mappings to the page.
37540Sstevel@tonic-gate  */
37550Sstevel@tonic-gate ulong_t
37560Sstevel@tonic-gate hat_page_getshare(page_t *pp)
37570Sstevel@tonic-gate {
37580Sstevel@tonic-gate 	uint_t cnt;
37590Sstevel@tonic-gate 	cnt = hment_mapcnt(pp);
37601841Spraks #if defined(__amd64)
37611841Spraks 	if (vpm_enable && pp->p_vpmref) {
37621841Spraks 		cnt += 1;
37631841Spraks 	}
37641841Spraks #endif
37650Sstevel@tonic-gate 	return (cnt);
37660Sstevel@tonic-gate }
37670Sstevel@tonic-gate 
37680Sstevel@tonic-gate /*
37694528Spaulsan  * Return 1 the number of mappings exceeds sh_thresh. Return 0
37704528Spaulsan  * otherwise.
37714528Spaulsan  */
37724528Spaulsan int
37734528Spaulsan hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
37744528Spaulsan {
37754528Spaulsan 	return (hat_page_getshare(pp) > sh_thresh);
37764528Spaulsan }
37774528Spaulsan 
37784528Spaulsan /*
37790Sstevel@tonic-gate  * hat_softlock isn't supported anymore
37800Sstevel@tonic-gate  */
37810Sstevel@tonic-gate /*ARGSUSED*/
37820Sstevel@tonic-gate faultcode_t
37830Sstevel@tonic-gate hat_softlock(
37840Sstevel@tonic-gate 	hat_t *hat,
37850Sstevel@tonic-gate 	caddr_t addr,
37860Sstevel@tonic-gate 	size_t *len,
37870Sstevel@tonic-gate 	struct page **page_array,
37880Sstevel@tonic-gate 	uint_t flags)
37890Sstevel@tonic-gate {
37900Sstevel@tonic-gate 	return (FC_NOSUPPORT);
37910Sstevel@tonic-gate }
37920Sstevel@tonic-gate 
37930Sstevel@tonic-gate 
37940Sstevel@tonic-gate 
37950Sstevel@tonic-gate /*
37960Sstevel@tonic-gate  * Routine to expose supported HAT features to platform independent code.
37970Sstevel@tonic-gate  */
37980Sstevel@tonic-gate /*ARGSUSED*/
37990Sstevel@tonic-gate int
38000Sstevel@tonic-gate hat_supported(enum hat_features feature, void *arg)
38010Sstevel@tonic-gate {
38020Sstevel@tonic-gate 	switch (feature) {
38030Sstevel@tonic-gate 
38040Sstevel@tonic-gate 	case HAT_SHARED_PT:	/* this is really ISM */
38050Sstevel@tonic-gate 		return (1);
38060Sstevel@tonic-gate 
38070Sstevel@tonic-gate 	case HAT_DYNAMIC_ISM_UNMAP:
38080Sstevel@tonic-gate 		return (0);
38090Sstevel@tonic-gate 
38100Sstevel@tonic-gate 	case HAT_VMODSORT:
38110Sstevel@tonic-gate 		return (1);
38120Sstevel@tonic-gate 
38134528Spaulsan 	case HAT_SHARED_REGIONS:
38144528Spaulsan 		return (0);
38154528Spaulsan 
38160Sstevel@tonic-gate 	default:
38170Sstevel@tonic-gate 		panic("hat_supported() - unknown feature");
38180Sstevel@tonic-gate 	}
38190Sstevel@tonic-gate 	return (0);
38200Sstevel@tonic-gate }
38210Sstevel@tonic-gate 
38220Sstevel@tonic-gate /*
38230Sstevel@tonic-gate  * Called when a thread is exiting and has been switched to the kernel AS
38240Sstevel@tonic-gate  */
38250Sstevel@tonic-gate void
38260Sstevel@tonic-gate hat_thread_exit(kthread_t *thd)
38270Sstevel@tonic-gate {
38280Sstevel@tonic-gate 	ASSERT(thd->t_procp->p_as == &kas);
38295084Sjohnlev 	XPV_DISALLOW_MIGRATE();
38300Sstevel@tonic-gate 	hat_switch(thd->t_procp->p_as->a_hat);
38315084Sjohnlev 	XPV_ALLOW_MIGRATE();
38320Sstevel@tonic-gate }
38330Sstevel@tonic-gate 
38340Sstevel@tonic-gate /*
38350Sstevel@tonic-gate  * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
38360Sstevel@tonic-gate  */
38370Sstevel@tonic-gate /*ARGSUSED*/
38380Sstevel@tonic-gate void
38390Sstevel@tonic-gate hat_setup(hat_t *hat, int flags)
38400Sstevel@tonic-gate {
38415084Sjohnlev 	XPV_DISALLOW_MIGRATE();
38420Sstevel@tonic-gate 	kpreempt_disable();
38430Sstevel@tonic-gate 
38440Sstevel@tonic-gate 	hat_switch(hat);
38450Sstevel@tonic-gate 
38460Sstevel@tonic-gate 	kpreempt_enable();
38475084Sjohnlev 	XPV_ALLOW_MIGRATE();
38480Sstevel@tonic-gate }
38490Sstevel@tonic-gate 
38500Sstevel@tonic-gate /*
38510Sstevel@tonic-gate  * Prepare for a CPU private mapping for the given address.
38520Sstevel@tonic-gate  *
38530Sstevel@tonic-gate  * The address can only be used from a single CPU and can be remapped
38540Sstevel@tonic-gate  * using hat_mempte_remap().  Return the address of the PTE.
38550Sstevel@tonic-gate  *
38560Sstevel@tonic-gate  * We do the htable_create() if necessary and increment the valid count so
38570Sstevel@tonic-gate  * the htable can't disappear.  We also hat_devload() the page table into
38580Sstevel@tonic-gate  * kernel so that the PTE is quickly accessed.
38590Sstevel@tonic-gate  */
38603446Smrj hat_mempte_t
38613446Smrj hat_mempte_setup(caddr_t addr)
38620Sstevel@tonic-gate {
38630Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
38640Sstevel@tonic-gate 	htable_t	*ht;
38650Sstevel@tonic-gate 	uint_t		entry;
38660Sstevel@tonic-gate 	x86pte_t	oldpte;
38673446Smrj 	hat_mempte_t	p;
38680Sstevel@tonic-gate 
38690Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
38700Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(va));
38714004Sjosephb 	++curthread->t_hatdepth;
38725741Smrj 	XPV_DISALLOW_MIGRATE();
38730Sstevel@tonic-gate 	ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
38740Sstevel@tonic-gate 	if (ht == NULL) {
38750Sstevel@tonic-gate 		ht = htable_create(kas.a_hat, va, 0, NULL);
38760Sstevel@tonic-gate 		entry = htable_va2entry(va, ht);
38770Sstevel@tonic-gate 		ASSERT(ht->ht_level == 0);
38780Sstevel@tonic-gate 		oldpte = x86pte_get(ht, entry);
38790Sstevel@tonic-gate 	}
38800Sstevel@tonic-gate 	if (PTE_ISVALID(oldpte))
38810Sstevel@tonic-gate 		panic("hat_mempte_setup(): address already mapped"
38827240Srh87107 		    "ht=%p, entry=%d, pte=" FMT_PTE, (void *)ht, entry, oldpte);
38830Sstevel@tonic-gate 
38840Sstevel@tonic-gate 	/*
38850Sstevel@tonic-gate 	 * increment ht_valid_cnt so that the pagetable can't disappear
38860Sstevel@tonic-gate 	 */
38870Sstevel@tonic-gate 	HTABLE_INC(ht->ht_valid_cnt);
38880Sstevel@tonic-gate 
38890Sstevel@tonic-gate 	/*
38903446Smrj 	 * return the PTE physical address to the caller.
38910Sstevel@tonic-gate 	 */
38920Sstevel@tonic-gate 	htable_release(ht);
38935741Smrj 	XPV_ALLOW_MIGRATE();
38943446Smrj 	p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry);
38954004Sjosephb 	--curthread->t_hatdepth;
38963446Smrj 	return (p);
38970Sstevel@tonic-gate }
38980Sstevel@tonic-gate 
38990Sstevel@tonic-gate /*
39000Sstevel@tonic-gate  * Release a CPU private mapping for the given address.
39010Sstevel@tonic-gate  * We decrement the htable valid count so it might be destroyed.
39020Sstevel@tonic-gate  */
39033446Smrj /*ARGSUSED1*/
39040Sstevel@tonic-gate void
39053446Smrj hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa)
39060Sstevel@tonic-gate {
39070Sstevel@tonic-gate 	htable_t	*ht;
39080Sstevel@tonic-gate 
39095741Smrj 	XPV_DISALLOW_MIGRATE();
39100Sstevel@tonic-gate 	/*
39113446Smrj 	 * invalidate any left over mapping and decrement the htable valid count
39120Sstevel@tonic-gate 	 */
39135084Sjohnlev #ifdef __xpv
39145084Sjohnlev 	if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0,
39155084Sjohnlev 	    UVMF_INVLPG | UVMF_LOCAL))
39165084Sjohnlev 		panic("HYPERVISOR_update_va_mapping() failed");
39175084Sjohnlev #else
39183446Smrj 	{
39193446Smrj 		x86pte_t *pteptr;
39203446Smrj 
39213446Smrj 		pteptr = x86pte_mapin(mmu_btop(pte_pa),
39223446Smrj 		    (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
39233446Smrj 		if (mmu.pae_hat)
39243446Smrj 			*pteptr = 0;
39253446Smrj 		else
39263446Smrj 			*(x86pte32_t *)pteptr = 0;
39273446Smrj 		mmu_tlbflush_entry(addr);
39283446Smrj 		x86pte_mapout();
39293446Smrj 	}
39305084Sjohnlev #endif
39313446Smrj 
39320Sstevel@tonic-gate 	ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
39330Sstevel@tonic-gate 	if (ht == NULL)
39340Sstevel@tonic-gate 		panic("hat_mempte_release(): invalid address");
39350Sstevel@tonic-gate 	ASSERT(ht->ht_level == 0);
39360Sstevel@tonic-gate 	HTABLE_DEC(ht->ht_valid_cnt);
39370Sstevel@tonic-gate 	htable_release(ht);
39385741Smrj 	XPV_ALLOW_MIGRATE();
39390Sstevel@tonic-gate }
39400Sstevel@tonic-gate 
39410Sstevel@tonic-gate /*
39420Sstevel@tonic-gate  * Apply a temporary CPU private mapping to a page. We flush the TLB only
39430Sstevel@tonic-gate  * on this CPU, so this ought to have been called with preemption disabled.
39440Sstevel@tonic-gate  */
39450Sstevel@tonic-gate void
39460Sstevel@tonic-gate hat_mempte_remap(
39473446Smrj 	pfn_t		pfn,
39483446Smrj 	caddr_t		addr,
39493446Smrj 	hat_mempte_t	pte_pa,
39503446Smrj 	uint_t		attr,
39513446Smrj 	uint_t		flags)
39520Sstevel@tonic-gate {
39530Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
39540Sstevel@tonic-gate 	x86pte_t	pte;
39550Sstevel@tonic-gate 
39560Sstevel@tonic-gate 	/*
39570Sstevel@tonic-gate 	 * Remap the given PTE to the new page's PFN. Invalidate only
39580Sstevel@tonic-gate 	 * on this CPU.
39590Sstevel@tonic-gate 	 */
39600Sstevel@tonic-gate #ifdef DEBUG
39610Sstevel@tonic-gate 	htable_t	*ht;
39620Sstevel@tonic-gate 	uint_t		entry;
39630Sstevel@tonic-gate 
39640Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
39650Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(va));
39660Sstevel@tonic-gate 	ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
39670Sstevel@tonic-gate 	ASSERT(ht != NULL);
39680Sstevel@tonic-gate 	ASSERT(ht->ht_level == 0);
39690Sstevel@tonic-gate 	ASSERT(ht->ht_valid_cnt > 0);
39703446Smrj 	ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
39710Sstevel@tonic-gate 	htable_release(ht);
39720Sstevel@tonic-gate #endif
39735084Sjohnlev 	XPV_DISALLOW_MIGRATE();
39740Sstevel@tonic-gate 	pte = hati_mkpte(pfn, attr, 0, flags);
39755084Sjohnlev #ifdef __xpv
39765084Sjohnlev 	if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL))
39775084Sjohnlev 		panic("HYPERVISOR_update_va_mapping() failed");
39785084Sjohnlev #else
39793446Smrj 	{
39803446Smrj 		x86pte_t *pteptr;
39813446Smrj 
39823446Smrj 		pteptr = x86pte_mapin(mmu_btop(pte_pa),
39833446Smrj 		    (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
39843446Smrj 		if (mmu.pae_hat)
39853446Smrj 			*(x86pte_t *)pteptr = pte;
39863446Smrj 		else
39873446Smrj 			*(x86pte32_t *)pteptr = (x86pte32_t)pte;
39883446Smrj 		mmu_tlbflush_entry(addr);
39893446Smrj 		x86pte_mapout();
39903446Smrj 	}
39915084Sjohnlev #endif
39925084Sjohnlev 	XPV_ALLOW_MIGRATE();
39930Sstevel@tonic-gate }
39940Sstevel@tonic-gate 
39950Sstevel@tonic-gate 
39960Sstevel@tonic-gate 
39970Sstevel@tonic-gate /*
39980Sstevel@tonic-gate  * Hat locking functions
39990Sstevel@tonic-gate  * XXX - these two functions are currently being used by hatstats
40000Sstevel@tonic-gate  * 	they can be removed by using a per-as mutex for hatstats.
40010Sstevel@tonic-gate  */
40020Sstevel@tonic-gate void
40030Sstevel@tonic-gate hat_enter(hat_t *hat)
40040Sstevel@tonic-gate {
40050Sstevel@tonic-gate 	mutex_enter(&hat->hat_mutex);
40060Sstevel@tonic-gate }
40070Sstevel@tonic-gate 
40080Sstevel@tonic-gate void
40090Sstevel@tonic-gate hat_exit(hat_t *hat)
40100Sstevel@tonic-gate {
40110Sstevel@tonic-gate 	mutex_exit(&hat->hat_mutex);
40120Sstevel@tonic-gate }
40130Sstevel@tonic-gate 
40140Sstevel@tonic-gate /*
40153446Smrj  * HAT part of cpu initialization.
40160Sstevel@tonic-gate  */
40170Sstevel@tonic-gate void
40180Sstevel@tonic-gate hat_cpu_online(struct cpu *cpup)
40190Sstevel@tonic-gate {
40200Sstevel@tonic-gate 	if (cpup != CPU) {
40213446Smrj 		x86pte_cpu_init(cpup);
40220Sstevel@tonic-gate 		hat_vlp_setup(cpup);
40230Sstevel@tonic-gate 	}
40240Sstevel@tonic-gate 	CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
40250Sstevel@tonic-gate }
40260Sstevel@tonic-gate 
40270Sstevel@tonic-gate /*
40283446Smrj  * HAT part of cpu deletion.
40293446Smrj  * (currently, we only call this after the cpu is safely passivated.)
40303446Smrj  */
40313446Smrj void
40323446Smrj hat_cpu_offline(struct cpu *cpup)
40333446Smrj {
40343446Smrj 	ASSERT(cpup != CPU);
40353446Smrj 
40363446Smrj 	CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id);
40373446Smrj 	x86pte_cpu_fini(cpup);
40383446Smrj 	hat_vlp_teardown(cpup);
40393446Smrj }
40403446Smrj 
40413446Smrj /*
40420Sstevel@tonic-gate  * Function called after all CPUs are brought online.
40430Sstevel@tonic-gate  * Used to remove low address boot mappings.
40440Sstevel@tonic-gate  */
40450Sstevel@tonic-gate void
40460Sstevel@tonic-gate clear_boot_mappings(uintptr_t low, uintptr_t high)
40470Sstevel@tonic-gate {
40480Sstevel@tonic-gate 	uintptr_t vaddr = low;
40490Sstevel@tonic-gate 	htable_t *ht = NULL;
40500Sstevel@tonic-gate 	level_t level;
40510Sstevel@tonic-gate 	uint_t entry;
40520Sstevel@tonic-gate 	x86pte_t pte;
40530Sstevel@tonic-gate 
40540Sstevel@tonic-gate 	/*
40550Sstevel@tonic-gate 	 * On 1st CPU we can unload the prom mappings, basically we blow away
40563446Smrj 	 * all virtual mappings under _userlimit.
40570Sstevel@tonic-gate 	 */
40580Sstevel@tonic-gate 	while (vaddr < high) {
40590Sstevel@tonic-gate 		pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
40600Sstevel@tonic-gate 		if (ht == NULL)
40610Sstevel@tonic-gate 			break;
40620Sstevel@tonic-gate 
40630Sstevel@tonic-gate 		level = ht->ht_level;
40640Sstevel@tonic-gate 		entry = htable_va2entry(vaddr, ht);
40650Sstevel@tonic-gate 		ASSERT(level <= mmu.max_page_level);
40660Sstevel@tonic-gate 		ASSERT(PTE_ISPAGE(pte, level));
40670Sstevel@tonic-gate 
40680Sstevel@tonic-gate 		/*
40690Sstevel@tonic-gate 		 * Unload the mapping from the page tables.
40700Sstevel@tonic-gate 		 */
40713446Smrj 		(void) x86pte_inval(ht, entry, 0, NULL);
40720Sstevel@tonic-gate 		ASSERT(ht->ht_valid_cnt > 0);
40730Sstevel@tonic-gate 		HTABLE_DEC(ht->ht_valid_cnt);
40740Sstevel@tonic-gate 		PGCNT_DEC(ht->ht_hat, ht->ht_level);
40750Sstevel@tonic-gate 
40760Sstevel@tonic-gate 		vaddr += LEVEL_SIZE(ht->ht_level);
40770Sstevel@tonic-gate 	}
40780Sstevel@tonic-gate 	if (ht)
40790Sstevel@tonic-gate 		htable_release(ht);
40800Sstevel@tonic-gate }
40810Sstevel@tonic-gate 
40820Sstevel@tonic-gate /*
40830Sstevel@tonic-gate  * Atomically update a new translation for a single page.  If the
40840Sstevel@tonic-gate  * currently installed PTE doesn't match the value we expect to find,
40850Sstevel@tonic-gate  * it's not updated and we return the PTE we found.
40860Sstevel@tonic-gate  *
40870Sstevel@tonic-gate  * If activating nosync or NOWRITE and the page was modified we need to sync
40880Sstevel@tonic-gate  * with the page_t. Also sync with page_t if clearing ref/mod bits.
40890Sstevel@tonic-gate  */
40900Sstevel@tonic-gate static x86pte_t
40910Sstevel@tonic-gate hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
40920Sstevel@tonic-gate {
40930Sstevel@tonic-gate 	page_t		*pp;
40940Sstevel@tonic-gate 	uint_t		rm = 0;
40950Sstevel@tonic-gate 	x86pte_t	replaced;
40960Sstevel@tonic-gate 
40973446Smrj 	if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC &&
40980Sstevel@tonic-gate 	    PTE_GET(expected, PT_MOD | PT_REF) &&
40990Sstevel@tonic-gate 	    (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
41004381Sjosephb 	    !PTE_GET(new, PT_MOD | PT_REF))) {
41010Sstevel@tonic-gate 
41023446Smrj 		ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level)));
41030Sstevel@tonic-gate 		pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
41040Sstevel@tonic-gate 		ASSERT(pp != NULL);
41050Sstevel@tonic-gate 		if (PTE_GET(expected, PT_MOD))
41060Sstevel@tonic-gate 			rm |= P_MOD;
41070Sstevel@tonic-gate 		if (PTE_GET(expected, PT_REF))
41080Sstevel@tonic-gate 			rm |= P_REF;
41090Sstevel@tonic-gate 		PTE_CLR(new, PT_MOD | PT_REF);
41100Sstevel@tonic-gate 	}
41110Sstevel@tonic-gate 
41120Sstevel@tonic-gate 	replaced = x86pte_update(ht, entry, expected, new);
41130Sstevel@tonic-gate 	if (replaced != expected)
41140Sstevel@tonic-gate 		return (replaced);
41150Sstevel@tonic-gate 
41160Sstevel@tonic-gate 	if (rm) {
41170Sstevel@tonic-gate 		/*
41180Sstevel@tonic-gate 		 * sync to all constituent pages of a large page
41190Sstevel@tonic-gate 		 */
41200Sstevel@tonic-gate 		pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
41210Sstevel@tonic-gate 		ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
41220Sstevel@tonic-gate 		while (pgcnt-- > 0) {
41230Sstevel@tonic-gate 			/*
41240Sstevel@tonic-gate 			 * hat_page_demote() can't decrease
41250Sstevel@tonic-gate 			 * pszc below this mapping size
41260Sstevel@tonic-gate 			 * since large mapping existed after we
41270Sstevel@tonic-gate 			 * took mlist lock.
41280Sstevel@tonic-gate 			 */
41290Sstevel@tonic-gate 			ASSERT(pp->p_szc >= ht->ht_level);
41300Sstevel@tonic-gate 			hat_page_setattr(pp, rm);
41310Sstevel@tonic-gate 			++pp;
41320Sstevel@tonic-gate 		}
41330Sstevel@tonic-gate 	}
41340Sstevel@tonic-gate 
41350Sstevel@tonic-gate 	return (0);
41360Sstevel@tonic-gate }
41370Sstevel@tonic-gate 
41384528Spaulsan /* ARGSUSED */
41394528Spaulsan void
41405075Spaulsan hat_join_srd(struct hat *hat, vnode_t *evp)
41414528Spaulsan {
41424528Spaulsan }
41434528Spaulsan 
41444528Spaulsan /* ARGSUSED */
41454528Spaulsan hat_region_cookie_t
41465075Spaulsan hat_join_region(struct hat *hat,
41474528Spaulsan     caddr_t r_saddr,
41484528Spaulsan     size_t r_size,
41494528Spaulsan     void *r_obj,
41504528Spaulsan     u_offset_t r_objoff,
41514528Spaulsan     uchar_t r_perm,
41524528Spaulsan     uchar_t r_pgszc,
41534528Spaulsan     hat_rgn_cb_func_t r_cb_function,
41544528Spaulsan     uint_t flags)
41554528Spaulsan {
41564528Spaulsan 	panic("No shared region support on x86");
41574528Spaulsan 	return (HAT_INVALID_REGION_COOKIE);
41584528Spaulsan }
41594528Spaulsan 
41604528Spaulsan /* ARGSUSED */
41614528Spaulsan void
41625075Spaulsan hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags)
41634528Spaulsan {
41644528Spaulsan 	panic("No shared region support on x86");
41654528Spaulsan }
41664528Spaulsan 
41674528Spaulsan /* ARGSUSED */
41684528Spaulsan void
41695075Spaulsan hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie)
41704528Spaulsan {
41714528Spaulsan 	panic("No shared region support on x86");
41724528Spaulsan }
41734528Spaulsan 
41744528Spaulsan 
41750Sstevel@tonic-gate /*
41760Sstevel@tonic-gate  * Kernel Physical Mapping (kpm) facility
41770Sstevel@tonic-gate  *
41780Sstevel@tonic-gate  * Most of the routines needed to support segkpm are almost no-ops on the
41790Sstevel@tonic-gate  * x86 platform.  We map in the entire segment when it is created and leave
41800Sstevel@tonic-gate  * it mapped in, so there is no additional work required to set up and tear
41810Sstevel@tonic-gate  * down individual mappings.  All of these routines were created to support
41820Sstevel@tonic-gate  * SPARC platforms that have to avoid aliasing in their virtually indexed
41830Sstevel@tonic-gate  * caches.
41840Sstevel@tonic-gate  *
41850Sstevel@tonic-gate  * Most of the routines have sanity checks in them (e.g. verifying that the
41860Sstevel@tonic-gate  * passed-in page is locked).  We don't actually care about most of these
41870Sstevel@tonic-gate  * checks on x86, but we leave them in place to identify problems in the
41880Sstevel@tonic-gate  * upper levels.
41890Sstevel@tonic-gate  */
41900Sstevel@tonic-gate 
41910Sstevel@tonic-gate /*
41920Sstevel@tonic-gate  * Map in a locked page and return the vaddr.
41930Sstevel@tonic-gate  */
41940Sstevel@tonic-gate /*ARGSUSED*/
41950Sstevel@tonic-gate caddr_t
41960Sstevel@tonic-gate hat_kpm_mapin(struct page *pp, struct kpme *kpme)
41970Sstevel@tonic-gate {
41980Sstevel@tonic-gate 	caddr_t		vaddr;
41990Sstevel@tonic-gate 
42000Sstevel@tonic-gate #ifdef DEBUG
42010Sstevel@tonic-gate 	if (kpm_enable == 0) {
42020Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
42030Sstevel@tonic-gate 		return ((caddr_t)NULL);
42040Sstevel@tonic-gate 	}
42050Sstevel@tonic-gate 
42060Sstevel@tonic-gate 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
42070Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
42080Sstevel@tonic-gate 		return ((caddr_t)NULL);
42090Sstevel@tonic-gate 	}
42100Sstevel@tonic-gate #endif
42110Sstevel@tonic-gate 
42120Sstevel@tonic-gate 	vaddr = hat_kpm_page2va(pp, 1);
42130Sstevel@tonic-gate 
42140Sstevel@tonic-gate 	return (vaddr);
42150Sstevel@tonic-gate }
42160Sstevel@tonic-gate 
42170Sstevel@tonic-gate /*
42180Sstevel@tonic-gate  * Mapout a locked page.
42190Sstevel@tonic-gate  */
42200Sstevel@tonic-gate /*ARGSUSED*/
42210Sstevel@tonic-gate void
42220Sstevel@tonic-gate hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
42230Sstevel@tonic-gate {
42240Sstevel@tonic-gate #ifdef DEBUG
42250Sstevel@tonic-gate 	if (kpm_enable == 0) {
42260Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
42270Sstevel@tonic-gate 		return;
42280Sstevel@tonic-gate 	}
42290Sstevel@tonic-gate 
42300Sstevel@tonic-gate 	if (IS_KPM_ADDR(vaddr) == 0) {
42310Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
42320Sstevel@tonic-gate 		return;
42330Sstevel@tonic-gate 	}
42340Sstevel@tonic-gate 
42350Sstevel@tonic-gate 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
42360Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
42370Sstevel@tonic-gate 		return;
42380Sstevel@tonic-gate 	}
42390Sstevel@tonic-gate #endif
42400Sstevel@tonic-gate }
42410Sstevel@tonic-gate 
42420Sstevel@tonic-gate /*
4243*9894SPavel.Tatashin@Sun.COM  * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical
4244*9894SPavel.Tatashin@Sun.COM  * memory addresses that are not described by a page_t.  It can
4245*9894SPavel.Tatashin@Sun.COM  * also be used for normal pages that are not locked, but beware
4246*9894SPavel.Tatashin@Sun.COM  * this is dangerous - no locking is performed, so the identity of
4247*9894SPavel.Tatashin@Sun.COM  * the page could change.  hat_kpm_mapin_pfn is not supported when
4248*9894SPavel.Tatashin@Sun.COM  * vac_colors > 1, because the chosen va depends on the page identity,
4249*9894SPavel.Tatashin@Sun.COM  * which could change.
4250*9894SPavel.Tatashin@Sun.COM  * The caller must only pass pfn's for valid physical addresses; violation
4251*9894SPavel.Tatashin@Sun.COM  * of this rule will cause panic.
4252*9894SPavel.Tatashin@Sun.COM  */
4253*9894SPavel.Tatashin@Sun.COM caddr_t
4254*9894SPavel.Tatashin@Sun.COM hat_kpm_mapin_pfn(pfn_t pfn)
4255*9894SPavel.Tatashin@Sun.COM {
4256*9894SPavel.Tatashin@Sun.COM 	caddr_t paddr, vaddr;
4257*9894SPavel.Tatashin@Sun.COM 
4258*9894SPavel.Tatashin@Sun.COM 	if (kpm_enable == 0)
4259*9894SPavel.Tatashin@Sun.COM 		return ((caddr_t)NULL);
4260*9894SPavel.Tatashin@Sun.COM 
4261*9894SPavel.Tatashin@Sun.COM 	paddr = (caddr_t)ptob(pfn);
4262*9894SPavel.Tatashin@Sun.COM 	vaddr = (uintptr_t)kpm_vbase + paddr;
4263*9894SPavel.Tatashin@Sun.COM 
4264*9894SPavel.Tatashin@Sun.COM 	return ((caddr_t)vaddr);
4265*9894SPavel.Tatashin@Sun.COM }
4266*9894SPavel.Tatashin@Sun.COM 
4267*9894SPavel.Tatashin@Sun.COM /*ARGSUSED*/
4268*9894SPavel.Tatashin@Sun.COM void
4269*9894SPavel.Tatashin@Sun.COM hat_kpm_mapout_pfn(pfn_t pfn)
4270*9894SPavel.Tatashin@Sun.COM {
4271*9894SPavel.Tatashin@Sun.COM 	/* empty */
4272*9894SPavel.Tatashin@Sun.COM }
4273*9894SPavel.Tatashin@Sun.COM 
4274*9894SPavel.Tatashin@Sun.COM /*
42750Sstevel@tonic-gate  * Return the kpm virtual address for a specific pfn
42760Sstevel@tonic-gate  */
42770Sstevel@tonic-gate caddr_t
42780Sstevel@tonic-gate hat_kpm_pfn2va(pfn_t pfn)
42790Sstevel@tonic-gate {
42803446Smrj 	uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
42810Sstevel@tonic-gate 
42825262Srscott 	ASSERT(!pfn_is_foreign(pfn));
42830Sstevel@tonic-gate 	return ((caddr_t)vaddr);
42840Sstevel@tonic-gate }
42850Sstevel@tonic-gate 
42860Sstevel@tonic-gate /*
42870Sstevel@tonic-gate  * Return the kpm virtual address for the page at pp.
42880Sstevel@tonic-gate  */
42890Sstevel@tonic-gate /*ARGSUSED*/
42900Sstevel@tonic-gate caddr_t
42910Sstevel@tonic-gate hat_kpm_page2va(struct page *pp, int checkswap)
42920Sstevel@tonic-gate {
42930Sstevel@tonic-gate 	return (hat_kpm_pfn2va(pp->p_pagenum));
42940Sstevel@tonic-gate }
42950Sstevel@tonic-gate 
42960Sstevel@tonic-gate /*
42970Sstevel@tonic-gate  * Return the page frame number for the kpm virtual address vaddr.
42980Sstevel@tonic-gate  */
42990Sstevel@tonic-gate pfn_t
43000Sstevel@tonic-gate hat_kpm_va2pfn(caddr_t vaddr)
43010Sstevel@tonic-gate {
43020Sstevel@tonic-gate 	pfn_t		pfn;
43030Sstevel@tonic-gate 
43040Sstevel@tonic-gate 	ASSERT(IS_KPM_ADDR(vaddr));
43050Sstevel@tonic-gate 
43060Sstevel@tonic-gate 	pfn = (pfn_t)btop(vaddr - kpm_vbase);
43070Sstevel@tonic-gate 
43080Sstevel@tonic-gate 	return (pfn);
43090Sstevel@tonic-gate }
43100Sstevel@tonic-gate 
43110Sstevel@tonic-gate 
43120Sstevel@tonic-gate /*
43130Sstevel@tonic-gate  * Return the page for the kpm virtual address vaddr.
43140Sstevel@tonic-gate  */
43150Sstevel@tonic-gate page_t *
43160Sstevel@tonic-gate hat_kpm_vaddr2page(caddr_t vaddr)
43170Sstevel@tonic-gate {
43180Sstevel@tonic-gate 	pfn_t		pfn;
43190Sstevel@tonic-gate 
43200Sstevel@tonic-gate 	ASSERT(IS_KPM_ADDR(vaddr));
43210Sstevel@tonic-gate 
43220Sstevel@tonic-gate 	pfn = hat_kpm_va2pfn(vaddr);
43230Sstevel@tonic-gate 
43240Sstevel@tonic-gate 	return (page_numtopp_nolock(pfn));
43250Sstevel@tonic-gate }
43260Sstevel@tonic-gate 
43270Sstevel@tonic-gate /*
43280Sstevel@tonic-gate  * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
43290Sstevel@tonic-gate  * KPM page.  This should never happen on x86
43300Sstevel@tonic-gate  */
43310Sstevel@tonic-gate int
43320Sstevel@tonic-gate hat_kpm_fault(hat_t *hat, caddr_t vaddr)
43330Sstevel@tonic-gate {
43347240Srh87107 	panic("pagefault in seg_kpm.  hat: 0x%p  vaddr: 0x%p",
43357240Srh87107 	    (void *)hat, (void *)vaddr);
43360Sstevel@tonic-gate 
43370Sstevel@tonic-gate 	return (0);
43380Sstevel@tonic-gate }
43390Sstevel@tonic-gate 
43400Sstevel@tonic-gate /*ARGSUSED*/
43410Sstevel@tonic-gate void
43420Sstevel@tonic-gate hat_kpm_mseghash_clear(int nentries)
43430Sstevel@tonic-gate {}
43440Sstevel@tonic-gate 
43450Sstevel@tonic-gate /*ARGSUSED*/
43460Sstevel@tonic-gate void
43470Sstevel@tonic-gate hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
43480Sstevel@tonic-gate {}
43495084Sjohnlev 
43505084Sjohnlev #ifdef __xpv
43515084Sjohnlev /*
43525084Sjohnlev  * There are specific Hypervisor calls to establish and remove mappings
43535084Sjohnlev  * to grant table references and the privcmd driver. We have to ensure
43545084Sjohnlev  * that a page table actually exists.
43555084Sjohnlev  */
43565084Sjohnlev void
43577756SMark.Johnson@Sun.COM hat_prepare_mapping(hat_t *hat, caddr_t addr, uint64_t *pte_ma)
43585084Sjohnlev {
43597756SMark.Johnson@Sun.COM 	maddr_t base_ma;
43607756SMark.Johnson@Sun.COM 	htable_t *ht;
43617756SMark.Johnson@Sun.COM 	uint_t entry;
43627756SMark.Johnson@Sun.COM 
43635084Sjohnlev 	ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
43645741Smrj 	XPV_DISALLOW_MIGRATE();
43657756SMark.Johnson@Sun.COM 	ht = htable_create(hat, (uintptr_t)addr, 0, NULL);
43667756SMark.Johnson@Sun.COM 
43677756SMark.Johnson@Sun.COM 	/*
43687756SMark.Johnson@Sun.COM 	 * if an address for pte_ma is passed in, return the MA of the pte
43697756SMark.Johnson@Sun.COM 	 * for this specific address.  This address is only valid as long
43707756SMark.Johnson@Sun.COM 	 * as the htable stays locked.
43717756SMark.Johnson@Sun.COM 	 */
43727756SMark.Johnson@Sun.COM 	if (pte_ma != NULL) {
43737756SMark.Johnson@Sun.COM 		entry = htable_va2entry((uintptr_t)addr, ht);
43747756SMark.Johnson@Sun.COM 		base_ma = pa_to_ma(ptob(ht->ht_pfn));
43757756SMark.Johnson@Sun.COM 		*pte_ma = base_ma + (entry << mmu.pte_size_shift);
43767756SMark.Johnson@Sun.COM 	}
43775741Smrj 	XPV_ALLOW_MIGRATE();
43785084Sjohnlev }
43795084Sjohnlev 
43805084Sjohnlev void
43815084Sjohnlev hat_release_mapping(hat_t *hat, caddr_t addr)
43825084Sjohnlev {
43835084Sjohnlev 	htable_t *ht;
43845084Sjohnlev 
43855084Sjohnlev 	ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
43865741Smrj 	XPV_DISALLOW_MIGRATE();
43875084Sjohnlev 	ht = htable_lookup(hat, (uintptr_t)addr, 0);
43885084Sjohnlev 	ASSERT(ht != NULL);
43895084Sjohnlev 	ASSERT(ht->ht_busy >= 2);
43905084Sjohnlev 	htable_release(ht);
43915084Sjohnlev 	htable_release(ht);
43925741Smrj 	XPV_ALLOW_MIGRATE();
43935741Smrj 									}
43945084Sjohnlev #endif
4395