xref: /onnv-gate/usr/src/uts/i86pc/vm/hat_i86.c (revision 12532:e0c8045b31e0)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51747Sjosephb  * Common Development and Distribution License (the "License").
61747Sjosephb  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
22*12532Sjoe.bonasera@oracle.com  * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
230Sstevel@tonic-gate  */
2412004Sjiang.liu@intel.com /*
2512004Sjiang.liu@intel.com  * Copyright (c) 2010, Intel Corporation.
2612004Sjiang.liu@intel.com  * All rights reserved.
2712004Sjiang.liu@intel.com  */
280Sstevel@tonic-gate 
290Sstevel@tonic-gate 
300Sstevel@tonic-gate /*
310Sstevel@tonic-gate  * VM - Hardware Address Translation management for i386 and amd64
320Sstevel@tonic-gate  *
330Sstevel@tonic-gate  * Implementation of the interfaces described in <common/vm/hat.h>
340Sstevel@tonic-gate  *
350Sstevel@tonic-gate  * Nearly all the details of how the hardware is managed should not be
360Sstevel@tonic-gate  * visible outside this layer except for misc. machine specific functions
370Sstevel@tonic-gate  * that work in conjunction with this code.
380Sstevel@tonic-gate  *
390Sstevel@tonic-gate  * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
400Sstevel@tonic-gate  */
410Sstevel@tonic-gate 
420Sstevel@tonic-gate #include <sys/machparam.h>
430Sstevel@tonic-gate #include <sys/machsystm.h>
440Sstevel@tonic-gate #include <sys/mman.h>
450Sstevel@tonic-gate #include <sys/types.h>
460Sstevel@tonic-gate #include <sys/systm.h>
470Sstevel@tonic-gate #include <sys/cpuvar.h>
480Sstevel@tonic-gate #include <sys/thread.h>
490Sstevel@tonic-gate #include <sys/proc.h>
500Sstevel@tonic-gate #include <sys/cpu.h>
510Sstevel@tonic-gate #include <sys/kmem.h>
520Sstevel@tonic-gate #include <sys/disp.h>
530Sstevel@tonic-gate #include <sys/shm.h>
540Sstevel@tonic-gate #include <sys/sysmacros.h>
550Sstevel@tonic-gate #include <sys/machparam.h>
560Sstevel@tonic-gate #include <sys/vmem.h>
570Sstevel@tonic-gate #include <sys/vmsystm.h>
580Sstevel@tonic-gate #include <sys/promif.h>
590Sstevel@tonic-gate #include <sys/var.h>
600Sstevel@tonic-gate #include <sys/x86_archext.h>
610Sstevel@tonic-gate #include <sys/atomic.h>
620Sstevel@tonic-gate #include <sys/bitmap.h>
633446Smrj #include <sys/controlregs.h>
643446Smrj #include <sys/bootconf.h>
653446Smrj #include <sys/bootsvcs.h>
663446Smrj #include <sys/bootinfo.h>
674191Sjosephb #include <sys/archsystm.h>
680Sstevel@tonic-gate 
690Sstevel@tonic-gate #include <vm/seg_kmem.h>
700Sstevel@tonic-gate #include <vm/hat_i86.h>
710Sstevel@tonic-gate #include <vm/as.h>
720Sstevel@tonic-gate #include <vm/seg.h>
730Sstevel@tonic-gate #include <vm/page.h>
740Sstevel@tonic-gate #include <vm/seg_kp.h>
750Sstevel@tonic-gate #include <vm/seg_kpm.h>
760Sstevel@tonic-gate #include <vm/vm_dep.h>
775084Sjohnlev #ifdef __xpv
785084Sjohnlev #include <sys/hypervisor.h>
795084Sjohnlev #endif
803446Smrj #include <vm/kboot_mmu.h>
814381Sjosephb #include <vm/seg_spt.h>
820Sstevel@tonic-gate 
830Sstevel@tonic-gate #include <sys/cmn_err.h>
840Sstevel@tonic-gate 
850Sstevel@tonic-gate /*
860Sstevel@tonic-gate  * Basic parameters for hat operation.
870Sstevel@tonic-gate  */
880Sstevel@tonic-gate struct hat_mmu_info mmu;
890Sstevel@tonic-gate 
900Sstevel@tonic-gate /*
910Sstevel@tonic-gate  * The page that is the kernel's top level pagetable.
920Sstevel@tonic-gate  *
935084Sjohnlev  * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries
940Sstevel@tonic-gate  * on this 4K page for its top level page table. The remaining groups of
950Sstevel@tonic-gate  * 4 entries are used for per processor copies of user VLP pagetables for
960Sstevel@tonic-gate  * running threads.  See hat_switch() and reload_pae32() for details.
970Sstevel@tonic-gate  *
985084Sjohnlev  * vlp_page[0..3] - level==2 PTEs for kernel HAT
995084Sjohnlev  * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0
1005084Sjohnlev  * vlp_page[8..11]  - level==2 PTE for user thread on cpu 1
1015084Sjohnlev  * etc...
1020Sstevel@tonic-gate  */
1030Sstevel@tonic-gate static x86pte_t *vlp_page;
1040Sstevel@tonic-gate 
1050Sstevel@tonic-gate /*
1060Sstevel@tonic-gate  * forward declaration of internal utility routines
1070Sstevel@tonic-gate  */
1080Sstevel@tonic-gate static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
1090Sstevel@tonic-gate 	x86pte_t new);
1100Sstevel@tonic-gate 
1110Sstevel@tonic-gate /*
1120Sstevel@tonic-gate  * The kernel address space exists in all HATs. To implement this the
1135084Sjohnlev  * kernel reserves a fixed number of entries in the topmost level(s) of page
1145084Sjohnlev  * tables. The values are setup during startup and then copied to every user
1155084Sjohnlev  * hat created by hat_alloc(). This means that kernelbase must be:
1160Sstevel@tonic-gate  *
1170Sstevel@tonic-gate  *	  4Meg aligned for 32 bit kernels
1180Sstevel@tonic-gate  *	512Gig aligned for x86_64 64 bit kernel
1190Sstevel@tonic-gate  *
1205084Sjohnlev  * The hat_kernel_range_ts describe what needs to be copied from kernel hat
1215084Sjohnlev  * to each user hat.
1220Sstevel@tonic-gate  */
1235084Sjohnlev typedef struct hat_kernel_range {
1245084Sjohnlev 	level_t		hkr_level;
1255084Sjohnlev 	uintptr_t	hkr_start_va;
1265084Sjohnlev 	uintptr_t	hkr_end_va;	/* zero means to end of memory */
1275084Sjohnlev } hat_kernel_range_t;
1285084Sjohnlev #define	NUM_KERNEL_RANGE 2
1295084Sjohnlev static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE];
1305084Sjohnlev static int num_kernel_ranges;
1310Sstevel@tonic-gate 
1320Sstevel@tonic-gate uint_t use_boot_reserve = 1;	/* cleared after early boot process */
1330Sstevel@tonic-gate uint_t can_steal_post_boot = 0;	/* set late in boot to enable stealing */
1340Sstevel@tonic-gate 
1356691Skchow /*
1366691Skchow  * enable_1gpg: controls 1g page support for user applications.
1376691Skchow  * By default, 1g pages are exported to user applications. enable_1gpg can
1386691Skchow  * be set to 0 to not export.
1396691Skchow  */
1405466Skchow int	enable_1gpg = 1;
1415349Skchow 
1426691Skchow /*
1436691Skchow  * AMD shanghai processors provide better management of 1gb ptes in its tlb.
1449903SPavel.Tatashin@Sun.COM  * By default, 1g page support will be disabled for pre-shanghai AMD
1456691Skchow  * processors that don't have optimal tlb support for the 1g page size.
1466691Skchow  * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal
1476691Skchow  * processors.
1486691Skchow  */
1496691Skchow int	chk_optimal_1gtlb = 1;
1506691Skchow 
1516691Skchow 
1525349Skchow #ifdef DEBUG
1535349Skchow uint_t	map1gcnt;
1545349Skchow #endif
1555349Skchow 
1565349Skchow 
1570Sstevel@tonic-gate /*
1580Sstevel@tonic-gate  * A cpuset for all cpus. This is used for kernel address cross calls, since
1590Sstevel@tonic-gate  * the kernel addresses apply to all cpus.
1600Sstevel@tonic-gate  */
1610Sstevel@tonic-gate cpuset_t khat_cpuset;
1620Sstevel@tonic-gate 
1630Sstevel@tonic-gate /*
1640Sstevel@tonic-gate  * management stuff for hat structures
1650Sstevel@tonic-gate  */
1660Sstevel@tonic-gate kmutex_t	hat_list_lock;
1670Sstevel@tonic-gate kcondvar_t	hat_list_cv;
1680Sstevel@tonic-gate kmem_cache_t	*hat_cache;
1690Sstevel@tonic-gate kmem_cache_t	*hat_hash_cache;
1700Sstevel@tonic-gate kmem_cache_t	*vlp_hash_cache;
1710Sstevel@tonic-gate 
1720Sstevel@tonic-gate /*
1730Sstevel@tonic-gate  * Simple statistics
1740Sstevel@tonic-gate  */
1750Sstevel@tonic-gate struct hatstats hatstat;
1760Sstevel@tonic-gate 
1770Sstevel@tonic-gate /*
1785316Sjohnlev  * Some earlier hypervisor versions do not emulate cmpxchg of PTEs
1795316Sjohnlev  * correctly.  For such hypervisors we must set PT_USER for kernel
1805316Sjohnlev  * entries ourselves (normally the emulation would set PT_USER for
1815316Sjohnlev  * kernel entries and PT_USER|PT_GLOBAL for user entries).  pt_kern is
1825316Sjohnlev  * thus set appropriately.  Note that dboot/kbm is OK, as only the full
1835316Sjohnlev  * HAT uses cmpxchg() and the other paths (hypercall etc.) were never
1845316Sjohnlev  * incorrect.
1855316Sjohnlev  */
1865316Sjohnlev int pt_kern;
1875316Sjohnlev 
1885316Sjohnlev /*
1890Sstevel@tonic-gate  * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's.
1900Sstevel@tonic-gate  */
1910Sstevel@tonic-gate extern void atomic_orb(uchar_t *addr, uchar_t val);
1920Sstevel@tonic-gate extern void atomic_andb(uchar_t *addr, uchar_t val);
1930Sstevel@tonic-gate 
19412004Sjiang.liu@intel.com #ifndef __xpv
19512004Sjiang.liu@intel.com extern pfn_t memseg_get_start(struct memseg *);
19612004Sjiang.liu@intel.com #endif
19712004Sjiang.liu@intel.com 
1980Sstevel@tonic-gate #define	PP_GETRM(pp, rmmask)    (pp->p_nrm & rmmask)
1990Sstevel@tonic-gate #define	PP_ISMOD(pp)		PP_GETRM(pp, P_MOD)
2000Sstevel@tonic-gate #define	PP_ISREF(pp)		PP_GETRM(pp, P_REF)
2010Sstevel@tonic-gate #define	PP_ISRO(pp)		PP_GETRM(pp, P_RO)
2020Sstevel@tonic-gate 
2030Sstevel@tonic-gate #define	PP_SETRM(pp, rm)	atomic_orb(&(pp->p_nrm), rm)
2040Sstevel@tonic-gate #define	PP_SETMOD(pp)		PP_SETRM(pp, P_MOD)
2050Sstevel@tonic-gate #define	PP_SETREF(pp)		PP_SETRM(pp, P_REF)
2060Sstevel@tonic-gate #define	PP_SETRO(pp)		PP_SETRM(pp, P_RO)
2070Sstevel@tonic-gate 
2080Sstevel@tonic-gate #define	PP_CLRRM(pp, rm)	atomic_andb(&(pp->p_nrm), ~(rm))
2090Sstevel@tonic-gate #define	PP_CLRMOD(pp)   	PP_CLRRM(pp, P_MOD)
2100Sstevel@tonic-gate #define	PP_CLRREF(pp)   	PP_CLRRM(pp, P_REF)
2110Sstevel@tonic-gate #define	PP_CLRRO(pp)    	PP_CLRRM(pp, P_RO)
2120Sstevel@tonic-gate #define	PP_CLRALL(pp)		PP_CLRRM(pp, P_MOD | P_REF | P_RO)
2130Sstevel@tonic-gate 
2140Sstevel@tonic-gate /*
2150Sstevel@tonic-gate  * kmem cache constructor for struct hat
2160Sstevel@tonic-gate  */
2170Sstevel@tonic-gate /*ARGSUSED*/
2180Sstevel@tonic-gate static int
2190Sstevel@tonic-gate hati_constructor(void *buf, void *handle, int kmflags)
2200Sstevel@tonic-gate {
2210Sstevel@tonic-gate 	hat_t	*hat = buf;
2220Sstevel@tonic-gate 
2230Sstevel@tonic-gate 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
2240Sstevel@tonic-gate 	bzero(hat->hat_pages_mapped,
2250Sstevel@tonic-gate 	    sizeof (pgcnt_t) * (mmu.max_page_level + 1));
2264381Sjosephb 	hat->hat_ism_pgcnt = 0;
2270Sstevel@tonic-gate 	hat->hat_stats = 0;
2280Sstevel@tonic-gate 	hat->hat_flags = 0;
2290Sstevel@tonic-gate 	CPUSET_ZERO(hat->hat_cpus);
2300Sstevel@tonic-gate 	hat->hat_htable = NULL;
2310Sstevel@tonic-gate 	hat->hat_ht_hash = NULL;
2320Sstevel@tonic-gate 	return (0);
2330Sstevel@tonic-gate }
2340Sstevel@tonic-gate 
2350Sstevel@tonic-gate /*
2360Sstevel@tonic-gate  * Allocate a hat structure for as. We also create the top level
2370Sstevel@tonic-gate  * htable and initialize it to contain the kernel hat entries.
2380Sstevel@tonic-gate  */
2390Sstevel@tonic-gate hat_t *
2400Sstevel@tonic-gate hat_alloc(struct as *as)
2410Sstevel@tonic-gate {
2425084Sjohnlev 	hat_t			*hat;
2435084Sjohnlev 	htable_t		*ht;	/* top level htable */
2445084Sjohnlev 	uint_t			use_vlp;
2455084Sjohnlev 	uint_t			r;
2465084Sjohnlev 	hat_kernel_range_t	*rp;
2475084Sjohnlev 	uintptr_t		va;
2485084Sjohnlev 	uintptr_t		eva;
2495084Sjohnlev 	uint_t			start;
2505084Sjohnlev 	uint_t			cnt;
2515084Sjohnlev 	htable_t		*src;
2520Sstevel@tonic-gate 
2530Sstevel@tonic-gate 	/*
2540Sstevel@tonic-gate 	 * Once we start creating user process HATs we can enable
2550Sstevel@tonic-gate 	 * the htable_steal() code.
2560Sstevel@tonic-gate 	 */
2570Sstevel@tonic-gate 	if (can_steal_post_boot == 0)
2580Sstevel@tonic-gate 		can_steal_post_boot = 1;
2590Sstevel@tonic-gate 
2600Sstevel@tonic-gate 	ASSERT(AS_WRITE_HELD(as, &as->a_lock));
2610Sstevel@tonic-gate 	hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
2620Sstevel@tonic-gate 	hat->hat_as = as;
2630Sstevel@tonic-gate 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
2640Sstevel@tonic-gate 	ASSERT(hat->hat_flags == 0);
2650Sstevel@tonic-gate 
2665084Sjohnlev #if defined(__xpv)
2670Sstevel@tonic-gate 	/*
2685084Sjohnlev 	 * No VLP stuff on the hypervisor due to the 64-bit split top level
2695084Sjohnlev 	 * page tables.  On 32-bit it's not needed as the hypervisor takes
2705084Sjohnlev 	 * care of copying the top level PTEs to a below 4Gig page.
2710Sstevel@tonic-gate 	 */
2725084Sjohnlev 	use_vlp = 0;
2735084Sjohnlev #else	/* __xpv */
2745084Sjohnlev 	/* 32 bit processes uses a VLP style hat when running with PAE */
2750Sstevel@tonic-gate #if defined(__amd64)
2760Sstevel@tonic-gate 	use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32);
2770Sstevel@tonic-gate #elif defined(__i386)
2780Sstevel@tonic-gate 	use_vlp = mmu.pae_hat;
2790Sstevel@tonic-gate #endif
2805084Sjohnlev #endif	/* __xpv */
2810Sstevel@tonic-gate 	if (use_vlp) {
2820Sstevel@tonic-gate 		hat->hat_flags = HAT_VLP;
2830Sstevel@tonic-gate 		bzero(hat->hat_vlp_ptes, VLP_SIZE);
2840Sstevel@tonic-gate 	}
2850Sstevel@tonic-gate 
2860Sstevel@tonic-gate 	/*
2870Sstevel@tonic-gate 	 * Allocate the htable hash
2880Sstevel@tonic-gate 	 */
2890Sstevel@tonic-gate 	if ((hat->hat_flags & HAT_VLP)) {
2900Sstevel@tonic-gate 		hat->hat_num_hash = mmu.vlp_hash_cnt;
2910Sstevel@tonic-gate 		hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
2920Sstevel@tonic-gate 	} else {
2930Sstevel@tonic-gate 		hat->hat_num_hash = mmu.hash_cnt;
2940Sstevel@tonic-gate 		hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
2950Sstevel@tonic-gate 	}
2960Sstevel@tonic-gate 	bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
2970Sstevel@tonic-gate 
2980Sstevel@tonic-gate 	/*
2990Sstevel@tonic-gate 	 * Initialize Kernel HAT entries at the top of the top level page
3005084Sjohnlev 	 * tables for the new hat.
3010Sstevel@tonic-gate 	 */
3020Sstevel@tonic-gate 	hat->hat_htable = NULL;
3030Sstevel@tonic-gate 	hat->hat_ht_cached = NULL;
3045084Sjohnlev 	XPV_DISALLOW_MIGRATE();
3050Sstevel@tonic-gate 	ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
3065084Sjohnlev 	hat->hat_htable = ht;
3075084Sjohnlev 
3085084Sjohnlev #if defined(__amd64)
3095084Sjohnlev 	if (hat->hat_flags & HAT_VLP)
3105084Sjohnlev 		goto init_done;
3110Sstevel@tonic-gate #endif
3125084Sjohnlev 
3135084Sjohnlev 	for (r = 0; r < num_kernel_ranges; ++r) {
3145084Sjohnlev 		rp = &kernel_ranges[r];
3155084Sjohnlev 		for (va = rp->hkr_start_va; va != rp->hkr_end_va;
3165084Sjohnlev 		    va += cnt * LEVEL_SIZE(rp->hkr_level)) {
3175084Sjohnlev 
3185084Sjohnlev 			if (rp->hkr_level == TOP_LEVEL(hat))
3195084Sjohnlev 				ht = hat->hat_htable;
3205084Sjohnlev 			else
3215084Sjohnlev 				ht = htable_create(hat, va, rp->hkr_level,
3225084Sjohnlev 				    NULL);
3235084Sjohnlev 
3245084Sjohnlev 			start = htable_va2entry(va, ht);
3255084Sjohnlev 			cnt = HTABLE_NUM_PTES(ht) - start;
3265084Sjohnlev 			eva = va +
3275084Sjohnlev 			    ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level));
3285084Sjohnlev 			if (rp->hkr_end_va != 0 &&
3295084Sjohnlev 			    (eva > rp->hkr_end_va || eva == 0))
3305084Sjohnlev 				cnt = htable_va2entry(rp->hkr_end_va, ht) -
3315084Sjohnlev 				    start;
3325084Sjohnlev 
3335084Sjohnlev #if defined(__i386) && !defined(__xpv)
3345084Sjohnlev 			if (ht->ht_flags & HTABLE_VLP) {
3355084Sjohnlev 				bcopy(&vlp_page[start],
3365084Sjohnlev 				    &hat->hat_vlp_ptes[start],
3375084Sjohnlev 				    cnt * sizeof (x86pte_t));
3385084Sjohnlev 				continue;
3395084Sjohnlev 			}
3405084Sjohnlev #endif
3415084Sjohnlev 			src = htable_lookup(kas.a_hat, va, rp->hkr_level);
3425084Sjohnlev 			ASSERT(src != NULL);
3435084Sjohnlev 			x86pte_copy(src, ht, start, cnt);
3445084Sjohnlev 			htable_release(src);
3455084Sjohnlev 		}
3465084Sjohnlev 	}
3475084Sjohnlev 
3485084Sjohnlev init_done:
3495084Sjohnlev 
3505084Sjohnlev #if defined(__xpv)
3510Sstevel@tonic-gate 	/*
3525084Sjohnlev 	 * Pin top level page tables after initializing them
3530Sstevel@tonic-gate 	 */
3545084Sjohnlev 	xen_pin(hat->hat_htable->ht_pfn, mmu.max_level);
3555084Sjohnlev #if defined(__amd64)
3565084Sjohnlev 	xen_pin(hat->hat_user_ptable, mmu.max_level);
3575084Sjohnlev #endif
3580Sstevel@tonic-gate #endif
3595741Smrj 	XPV_ALLOW_MIGRATE();
3600Sstevel@tonic-gate 
3610Sstevel@tonic-gate 	/*
3621747Sjosephb 	 * Put it at the start of the global list of all hats (used by stealing)
3631747Sjosephb 	 *
3641747Sjosephb 	 * kas.a_hat is not in the list but is instead used to find the
3651747Sjosephb 	 * first and last items in the list.
3661747Sjosephb 	 *
3671747Sjosephb 	 * - kas.a_hat->hat_next points to the start of the user hats.
3681747Sjosephb 	 *   The list ends where hat->hat_next == NULL
3691747Sjosephb 	 *
3701747Sjosephb 	 * - kas.a_hat->hat_prev points to the last of the user hats.
3711747Sjosephb 	 *   The list begins where hat->hat_prev == NULL
3720Sstevel@tonic-gate 	 */
3730Sstevel@tonic-gate 	mutex_enter(&hat_list_lock);
3741747Sjosephb 	hat->hat_prev = NULL;
3751747Sjosephb 	hat->hat_next = kas.a_hat->hat_next;
3761747Sjosephb 	if (hat->hat_next)
3771747Sjosephb 		hat->hat_next->hat_prev = hat;
3781747Sjosephb 	else
3791747Sjosephb 		kas.a_hat->hat_prev = hat;
3800Sstevel@tonic-gate 	kas.a_hat->hat_next = hat;
3810Sstevel@tonic-gate 	mutex_exit(&hat_list_lock);
3820Sstevel@tonic-gate 
3830Sstevel@tonic-gate 	return (hat);
3840Sstevel@tonic-gate }
3850Sstevel@tonic-gate 
3860Sstevel@tonic-gate /*
3870Sstevel@tonic-gate  * process has finished executing but as has not been cleaned up yet.
3880Sstevel@tonic-gate  */
3890Sstevel@tonic-gate /*ARGSUSED*/
3900Sstevel@tonic-gate void
3910Sstevel@tonic-gate hat_free_start(hat_t *hat)
3920Sstevel@tonic-gate {
3930Sstevel@tonic-gate 	ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock));
3941747Sjosephb 
3951747Sjosephb 	/*
3961747Sjosephb 	 * If the hat is currently a stealing victim, wait for the stealing
3971747Sjosephb 	 * to finish.  Once we mark it as HAT_FREEING, htable_steal()
3981747Sjosephb 	 * won't look at its pagetables anymore.
3991747Sjosephb 	 */
4000Sstevel@tonic-gate 	mutex_enter(&hat_list_lock);
4011747Sjosephb 	while (hat->hat_flags & HAT_VICTIM)
4021747Sjosephb 		cv_wait(&hat_list_cv, &hat_list_lock);
4030Sstevel@tonic-gate 	hat->hat_flags |= HAT_FREEING;
4040Sstevel@tonic-gate 	mutex_exit(&hat_list_lock);
4050Sstevel@tonic-gate }
4060Sstevel@tonic-gate 
4070Sstevel@tonic-gate /*
4080Sstevel@tonic-gate  * An address space is being destroyed, so we destroy the associated hat.
4090Sstevel@tonic-gate  */
4100Sstevel@tonic-gate void
4110Sstevel@tonic-gate hat_free_end(hat_t *hat)
4120Sstevel@tonic-gate {
4130Sstevel@tonic-gate 	kmem_cache_t *cache;
4140Sstevel@tonic-gate 
4150Sstevel@tonic-gate 	ASSERT(hat->hat_flags & HAT_FREEING);
4160Sstevel@tonic-gate 
4170Sstevel@tonic-gate 	/*
4180Sstevel@tonic-gate 	 * must not be running on the given hat
4190Sstevel@tonic-gate 	 */
4200Sstevel@tonic-gate 	ASSERT(CPU->cpu_current_hat != hat);
4210Sstevel@tonic-gate 
4220Sstevel@tonic-gate 	/*
4231747Sjosephb 	 * Remove it from the list of HATs
4240Sstevel@tonic-gate 	 */
4250Sstevel@tonic-gate 	mutex_enter(&hat_list_lock);
4261747Sjosephb 	if (hat->hat_prev)
4271747Sjosephb 		hat->hat_prev->hat_next = hat->hat_next;
4281747Sjosephb 	else
4290Sstevel@tonic-gate 		kas.a_hat->hat_next = hat->hat_next;
4301747Sjosephb 	if (hat->hat_next)
4311747Sjosephb 		hat->hat_next->hat_prev = hat->hat_prev;
4321747Sjosephb 	else
4331747Sjosephb 		kas.a_hat->hat_prev = hat->hat_prev;
4340Sstevel@tonic-gate 	mutex_exit(&hat_list_lock);
4351747Sjosephb 	hat->hat_next = hat->hat_prev = NULL;
4360Sstevel@tonic-gate 
4375084Sjohnlev #if defined(__xpv)
4385084Sjohnlev 	/*
4395084Sjohnlev 	 * On the hypervisor, unpin top level page table(s)
4405084Sjohnlev 	 */
4415084Sjohnlev 	xen_unpin(hat->hat_htable->ht_pfn);
4425084Sjohnlev #if defined(__amd64)
4435084Sjohnlev 	xen_unpin(hat->hat_user_ptable);
4445084Sjohnlev #endif
4455084Sjohnlev #endif
4465084Sjohnlev 
4470Sstevel@tonic-gate 	/*
4480Sstevel@tonic-gate 	 * Make a pass through the htables freeing them all up.
4490Sstevel@tonic-gate 	 */
4500Sstevel@tonic-gate 	htable_purge_hat(hat);
4510Sstevel@tonic-gate 
4520Sstevel@tonic-gate 	/*
4530Sstevel@tonic-gate 	 * Decide which kmem cache the hash table came from, then free it.
4540Sstevel@tonic-gate 	 */
4550Sstevel@tonic-gate 	if (hat->hat_flags & HAT_VLP)
4560Sstevel@tonic-gate 		cache = vlp_hash_cache;
4570Sstevel@tonic-gate 	else
4580Sstevel@tonic-gate 		cache = hat_hash_cache;
4590Sstevel@tonic-gate 	kmem_cache_free(cache, hat->hat_ht_hash);
4600Sstevel@tonic-gate 	hat->hat_ht_hash = NULL;
4610Sstevel@tonic-gate 
4620Sstevel@tonic-gate 	hat->hat_flags = 0;
4630Sstevel@tonic-gate 	kmem_cache_free(hat_cache, hat);
4640Sstevel@tonic-gate }
4650Sstevel@tonic-gate 
4660Sstevel@tonic-gate /*
4670Sstevel@tonic-gate  * round kernelbase down to a supported value to use for _userlimit
4680Sstevel@tonic-gate  *
4690Sstevel@tonic-gate  * userlimit must be aligned down to an entry in the top level htable.
4700Sstevel@tonic-gate  * The one exception is for 32 bit HAT's running PAE.
4710Sstevel@tonic-gate  */
4720Sstevel@tonic-gate uintptr_t
4730Sstevel@tonic-gate hat_kernelbase(uintptr_t va)
4740Sstevel@tonic-gate {
4750Sstevel@tonic-gate #if defined(__i386)
4760Sstevel@tonic-gate 	va &= LEVEL_MASK(1);
4770Sstevel@tonic-gate #endif
4780Sstevel@tonic-gate 	if (IN_VA_HOLE(va))
4790Sstevel@tonic-gate 		panic("_userlimit %p will fall in VA hole\n", (void *)va);
4800Sstevel@tonic-gate 	return (va);
4810Sstevel@tonic-gate }
4820Sstevel@tonic-gate 
4830Sstevel@tonic-gate /*
4846691Skchow  *
4856691Skchow  */
4866691Skchow static void
4876691Skchow set_max_page_level()
4886691Skchow {
4896691Skchow 	level_t lvl;
4906691Skchow 
4916691Skchow 	if (!kbm_largepage_support) {
4926691Skchow 		lvl = 0;
4936720Skchow 	} else {
4946720Skchow 		if (x86_feature & X86_1GPG) {
4956720Skchow 			lvl = 2;
4966720Skchow 			if (chk_optimal_1gtlb &&
4976720Skchow 			    cpuid_opteron_erratum(CPU, 6671130)) {
4986720Skchow 				lvl = 1;
4996720Skchow 			}
5006720Skchow 			if (plat_mnode_xcheck(LEVEL_SIZE(2) >>
5016720Skchow 			    LEVEL_SHIFT(0))) {
5026720Skchow 				lvl = 1;
5036720Skchow 			}
5046720Skchow 		} else {
5056691Skchow 			lvl = 1;
5066691Skchow 		}
5076691Skchow 	}
5086691Skchow 	mmu.max_page_level = lvl;
5096691Skchow 
5106691Skchow 	if ((lvl == 2) && (enable_1gpg == 0))
5116691Skchow 		mmu.umax_page_level = 1;
5126691Skchow 	else
5136691Skchow 		mmu.umax_page_level = lvl;
5146691Skchow }
5156691Skchow 
5166691Skchow /*
5170Sstevel@tonic-gate  * Initialize hat data structures based on processor MMU information.
5180Sstevel@tonic-gate  */
5190Sstevel@tonic-gate void
5200Sstevel@tonic-gate mmu_init(void)
5210Sstevel@tonic-gate {
5220Sstevel@tonic-gate 	uint_t max_htables;
5230Sstevel@tonic-gate 	uint_t pa_bits;
5240Sstevel@tonic-gate 	uint_t va_bits;
5250Sstevel@tonic-gate 	int i;
5260Sstevel@tonic-gate 
5270Sstevel@tonic-gate 	/*
5283446Smrj 	 * If CPU enabled the page table global bit, use it for the kernel
5293446Smrj 	 * This is bit 7 in CR4 (PGE - Page Global Enable).
5300Sstevel@tonic-gate 	 */
5313446Smrj 	if ((x86_feature & X86_PGE) != 0 && (getcr4() & CR4_PGE) != 0)
5320Sstevel@tonic-gate 		mmu.pt_global = PT_GLOBAL;
5330Sstevel@tonic-gate 
5340Sstevel@tonic-gate 	/*
5353446Smrj 	 * Detect NX and PAE usage.
5360Sstevel@tonic-gate 	 */
5373446Smrj 	mmu.pae_hat = kbm_pae_support;
5383446Smrj 	if (kbm_nx_support)
5390Sstevel@tonic-gate 		mmu.pt_nx = PT_NX;
5403446Smrj 	else
5410Sstevel@tonic-gate 		mmu.pt_nx = 0;
5420Sstevel@tonic-gate 
5430Sstevel@tonic-gate 	/*
5440Sstevel@tonic-gate 	 * Use CPU info to set various MMU parameters
5450Sstevel@tonic-gate 	 */
5460Sstevel@tonic-gate 	cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
5470Sstevel@tonic-gate 
5480Sstevel@tonic-gate 	if (va_bits < sizeof (void *) * NBBY) {
5490Sstevel@tonic-gate 		mmu.hole_start = (1ul << (va_bits - 1));
5500Sstevel@tonic-gate 		mmu.hole_end = 0ul - mmu.hole_start - 1;
5510Sstevel@tonic-gate 	} else {
5520Sstevel@tonic-gate 		mmu.hole_end = 0;
5530Sstevel@tonic-gate 		mmu.hole_start = mmu.hole_end - 1;
5540Sstevel@tonic-gate 	}
5550Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121)
5560Sstevel@tonic-gate 	/*
5570Sstevel@tonic-gate 	 * If erratum 121 has already been detected at this time, hole_start
5580Sstevel@tonic-gate 	 * contains the value to be subtracted from mmu.hole_start.
5590Sstevel@tonic-gate 	 */
5600Sstevel@tonic-gate 	ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
5610Sstevel@tonic-gate 	hole_start = mmu.hole_start - hole_start;
5620Sstevel@tonic-gate #else
5630Sstevel@tonic-gate 	hole_start = mmu.hole_start;
5640Sstevel@tonic-gate #endif
5650Sstevel@tonic-gate 	hole_end = mmu.hole_end;
5660Sstevel@tonic-gate 
5670Sstevel@tonic-gate 	mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
5680Sstevel@tonic-gate 	if (mmu.pae_hat == 0 && pa_bits > 32)
5690Sstevel@tonic-gate 		mmu.highest_pfn = PFN_4G - 1;
5700Sstevel@tonic-gate 
5710Sstevel@tonic-gate 	if (mmu.pae_hat) {
5720Sstevel@tonic-gate 		mmu.pte_size = 8;	/* 8 byte PTEs */
5730Sstevel@tonic-gate 		mmu.pte_size_shift = 3;
5740Sstevel@tonic-gate 	} else {
5750Sstevel@tonic-gate 		mmu.pte_size = 4;	/* 4 byte PTEs */
5760Sstevel@tonic-gate 		mmu.pte_size_shift = 2;
5770Sstevel@tonic-gate 	}
5780Sstevel@tonic-gate 
5790Sstevel@tonic-gate 	if (mmu.pae_hat && (x86_feature & X86_PAE) == 0)
5800Sstevel@tonic-gate 		panic("Processor does not support PAE");
5810Sstevel@tonic-gate 
5820Sstevel@tonic-gate 	if ((x86_feature & X86_CX8) == 0)
5830Sstevel@tonic-gate 		panic("Processor does not support cmpxchg8b instruction");
5840Sstevel@tonic-gate 
5850Sstevel@tonic-gate #if defined(__amd64)
5860Sstevel@tonic-gate 
5870Sstevel@tonic-gate 	mmu.num_level = 4;
5880Sstevel@tonic-gate 	mmu.max_level = 3;
5890Sstevel@tonic-gate 	mmu.ptes_per_table = 512;
5900Sstevel@tonic-gate 	mmu.top_level_count = 512;
5910Sstevel@tonic-gate 
5920Sstevel@tonic-gate 	mmu.level_shift[0] = 12;
5930Sstevel@tonic-gate 	mmu.level_shift[1] = 21;
5940Sstevel@tonic-gate 	mmu.level_shift[2] = 30;
5950Sstevel@tonic-gate 	mmu.level_shift[3] = 39;
5960Sstevel@tonic-gate 
5970Sstevel@tonic-gate #elif defined(__i386)
5980Sstevel@tonic-gate 
5990Sstevel@tonic-gate 	if (mmu.pae_hat) {
6000Sstevel@tonic-gate 		mmu.num_level = 3;
6010Sstevel@tonic-gate 		mmu.max_level = 2;
6020Sstevel@tonic-gate 		mmu.ptes_per_table = 512;
6030Sstevel@tonic-gate 		mmu.top_level_count = 4;
6040Sstevel@tonic-gate 
6050Sstevel@tonic-gate 		mmu.level_shift[0] = 12;
6060Sstevel@tonic-gate 		mmu.level_shift[1] = 21;
6070Sstevel@tonic-gate 		mmu.level_shift[2] = 30;
6080Sstevel@tonic-gate 
6090Sstevel@tonic-gate 	} else {
6100Sstevel@tonic-gate 		mmu.num_level = 2;
6110Sstevel@tonic-gate 		mmu.max_level = 1;
6120Sstevel@tonic-gate 		mmu.ptes_per_table = 1024;
6130Sstevel@tonic-gate 		mmu.top_level_count = 1024;
6140Sstevel@tonic-gate 
6150Sstevel@tonic-gate 		mmu.level_shift[0] = 12;
6160Sstevel@tonic-gate 		mmu.level_shift[1] = 22;
6170Sstevel@tonic-gate 	}
6180Sstevel@tonic-gate 
6190Sstevel@tonic-gate #endif	/* __i386 */
6200Sstevel@tonic-gate 
6210Sstevel@tonic-gate 	for (i = 0; i < mmu.num_level; ++i) {
6220Sstevel@tonic-gate 		mmu.level_size[i] = 1UL << mmu.level_shift[i];
6230Sstevel@tonic-gate 		mmu.level_offset[i] = mmu.level_size[i] - 1;
6240Sstevel@tonic-gate 		mmu.level_mask[i] = ~mmu.level_offset[i];
6250Sstevel@tonic-gate 	}
6260Sstevel@tonic-gate 
6276691Skchow 	set_max_page_level();
6286691Skchow 
6296291Skchow 	mmu_page_sizes = mmu.max_page_level + 1;
6306291Skchow 	mmu_exported_page_sizes = mmu.umax_page_level + 1;
6316291Skchow 
6326291Skchow 	/* restrict legacy applications from using pagesizes 1g and above */
6336291Skchow 	mmu_legacy_page_sizes =
6346291Skchow 	    (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes;
6356291Skchow 
6366291Skchow 
6373446Smrj 	for (i = 0; i <= mmu.max_page_level; ++i) {
6385316Sjohnlev 		mmu.pte_bits[i] = PT_VALID | pt_kern;
6393446Smrj 		if (i > 0)
6403446Smrj 			mmu.pte_bits[i] |= PT_PAGESIZE;
6413446Smrj 	}
6420Sstevel@tonic-gate 
6430Sstevel@tonic-gate 	/*
6440Sstevel@tonic-gate 	 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
6450Sstevel@tonic-gate 	 */
6460Sstevel@tonic-gate 	for (i = 1; i < mmu.num_level; ++i)
6470Sstevel@tonic-gate 		mmu.ptp_bits[i] = PT_PTPBITS;
6483446Smrj 
6490Sstevel@tonic-gate #if defined(__i386)
6500Sstevel@tonic-gate 	mmu.ptp_bits[2] = PT_VALID;
6510Sstevel@tonic-gate #endif
6520Sstevel@tonic-gate 
6530Sstevel@tonic-gate 	/*
6540Sstevel@tonic-gate 	 * Compute how many hash table entries to have per process for htables.
6550Sstevel@tonic-gate 	 * We start with 1 page's worth of entries.
6560Sstevel@tonic-gate 	 *
6570Sstevel@tonic-gate 	 * If physical memory is small, reduce the amount need to cover it.
6580Sstevel@tonic-gate 	 */
6590Sstevel@tonic-gate 	max_htables = physmax / mmu.ptes_per_table;
6600Sstevel@tonic-gate 	mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
6610Sstevel@tonic-gate 	while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
6620Sstevel@tonic-gate 		mmu.hash_cnt >>= 1;
6630Sstevel@tonic-gate 	mmu.vlp_hash_cnt = mmu.hash_cnt;
6640Sstevel@tonic-gate 
6650Sstevel@tonic-gate #if defined(__amd64)
6660Sstevel@tonic-gate 	/*
6670Sstevel@tonic-gate 	 * If running in 64 bits and physical memory is large,
6680Sstevel@tonic-gate 	 * increase the size of the cache to cover all of memory for
6690Sstevel@tonic-gate 	 * a 64 bit process.
6700Sstevel@tonic-gate 	 */
6710Sstevel@tonic-gate #define	HASH_MAX_LENGTH 4
6720Sstevel@tonic-gate 	while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
6730Sstevel@tonic-gate 		mmu.hash_cnt <<= 1;
6740Sstevel@tonic-gate #endif
6750Sstevel@tonic-gate }
6760Sstevel@tonic-gate 
6770Sstevel@tonic-gate 
6780Sstevel@tonic-gate /*
6790Sstevel@tonic-gate  * initialize hat data structures
6800Sstevel@tonic-gate  */
6810Sstevel@tonic-gate void
6820Sstevel@tonic-gate hat_init()
6830Sstevel@tonic-gate {
6840Sstevel@tonic-gate #if defined(__i386)
6850Sstevel@tonic-gate 	/*
6860Sstevel@tonic-gate 	 * _userlimit must be aligned correctly
6870Sstevel@tonic-gate 	 */
6880Sstevel@tonic-gate 	if ((_userlimit & LEVEL_MASK(1)) != _userlimit) {
6890Sstevel@tonic-gate 		prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n",
6900Sstevel@tonic-gate 		    (void *)_userlimit, (void *)LEVEL_SIZE(1));
6910Sstevel@tonic-gate 		halt("hat_init(): Unable to continue");
6920Sstevel@tonic-gate 	}
6930Sstevel@tonic-gate #endif
6940Sstevel@tonic-gate 
6950Sstevel@tonic-gate 	cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
6960Sstevel@tonic-gate 
6970Sstevel@tonic-gate 	/*
6980Sstevel@tonic-gate 	 * initialize kmem caches
6990Sstevel@tonic-gate 	 */
7000Sstevel@tonic-gate 	htable_init();
7010Sstevel@tonic-gate 	hment_init();
7020Sstevel@tonic-gate 
7030Sstevel@tonic-gate 	hat_cache = kmem_cache_create("hat_t",
7040Sstevel@tonic-gate 	    sizeof (hat_t), 0, hati_constructor, NULL, NULL,
7050Sstevel@tonic-gate 	    NULL, 0, 0);
7060Sstevel@tonic-gate 
7070Sstevel@tonic-gate 	hat_hash_cache = kmem_cache_create("HatHash",
7080Sstevel@tonic-gate 	    mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
7090Sstevel@tonic-gate 	    NULL, 0, 0);
7100Sstevel@tonic-gate 
7110Sstevel@tonic-gate 	/*
7120Sstevel@tonic-gate 	 * VLP hats can use a smaller hash table size on large memroy machines
7130Sstevel@tonic-gate 	 */
7140Sstevel@tonic-gate 	if (mmu.hash_cnt == mmu.vlp_hash_cnt) {
7150Sstevel@tonic-gate 		vlp_hash_cache = hat_hash_cache;
7160Sstevel@tonic-gate 	} else {
7170Sstevel@tonic-gate 		vlp_hash_cache = kmem_cache_create("HatVlpHash",
7180Sstevel@tonic-gate 		    mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
7190Sstevel@tonic-gate 		    NULL, 0, 0);
7200Sstevel@tonic-gate 	}
7210Sstevel@tonic-gate 
7220Sstevel@tonic-gate 	/*
7230Sstevel@tonic-gate 	 * Set up the kernel's hat
7240Sstevel@tonic-gate 	 */
7250Sstevel@tonic-gate 	AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
7260Sstevel@tonic-gate 	kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
7270Sstevel@tonic-gate 	mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
7280Sstevel@tonic-gate 	kas.a_hat->hat_as = &kas;
7290Sstevel@tonic-gate 	kas.a_hat->hat_flags = 0;
7300Sstevel@tonic-gate 	AS_LOCK_EXIT(&kas, &kas.a_lock);
7310Sstevel@tonic-gate 
7320Sstevel@tonic-gate 	CPUSET_ZERO(khat_cpuset);
7330Sstevel@tonic-gate 	CPUSET_ADD(khat_cpuset, CPU->cpu_id);
7340Sstevel@tonic-gate 
7350Sstevel@tonic-gate 	/*
7360Sstevel@tonic-gate 	 * The kernel hat's next pointer serves as the head of the hat list .
7371747Sjosephb 	 * The kernel hat's prev pointer tracks the last hat on the list for
7381747Sjosephb 	 * htable_steal() to use.
7390Sstevel@tonic-gate 	 */
7400Sstevel@tonic-gate 	kas.a_hat->hat_next = NULL;
7411747Sjosephb 	kas.a_hat->hat_prev = NULL;
7420Sstevel@tonic-gate 
7430Sstevel@tonic-gate 	/*
7440Sstevel@tonic-gate 	 * Allocate an htable hash bucket for the kernel
7450Sstevel@tonic-gate 	 * XX64 - tune for 64 bit procs
7460Sstevel@tonic-gate 	 */
7470Sstevel@tonic-gate 	kas.a_hat->hat_num_hash = mmu.hash_cnt;
7480Sstevel@tonic-gate 	kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
7490Sstevel@tonic-gate 	bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
7500Sstevel@tonic-gate 
7510Sstevel@tonic-gate 	/*
7520Sstevel@tonic-gate 	 * zero out the top level and cached htable pointers
7530Sstevel@tonic-gate 	 */
7540Sstevel@tonic-gate 	kas.a_hat->hat_ht_cached = NULL;
7550Sstevel@tonic-gate 	kas.a_hat->hat_htable = NULL;
7563258Strevtom 
7573258Strevtom 	/*
7583258Strevtom 	 * Pre-allocate hrm_hashtab before enabling the collection of
7593258Strevtom 	 * refmod statistics.  Allocating on the fly would mean us
7603258Strevtom 	 * running the risk of suffering recursive mutex enters or
7613258Strevtom 	 * deadlocks.
7623258Strevtom 	 */
7633258Strevtom 	hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
7643258Strevtom 	    KM_SLEEP);
7650Sstevel@tonic-gate }
7660Sstevel@tonic-gate 
7670Sstevel@tonic-gate /*
7680Sstevel@tonic-gate  * Prepare CPU specific pagetables for VLP processes on 64 bit kernels.
7690Sstevel@tonic-gate  *
7700Sstevel@tonic-gate  * Each CPU has a set of 2 pagetables that are reused for any 32 bit
7710Sstevel@tonic-gate  * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and
7720Sstevel@tonic-gate  * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes.
7730Sstevel@tonic-gate  */
7740Sstevel@tonic-gate /*ARGSUSED*/
7750Sstevel@tonic-gate static void
7760Sstevel@tonic-gate hat_vlp_setup(struct cpu *cpu)
7770Sstevel@tonic-gate {
7785084Sjohnlev #if defined(__amd64) && !defined(__xpv)
7790Sstevel@tonic-gate 	struct hat_cpu_info *hci = cpu->cpu_hat_info;
7800Sstevel@tonic-gate 	pfn_t pfn;
7810Sstevel@tonic-gate 
7820Sstevel@tonic-gate 	/*
7830Sstevel@tonic-gate 	 * allocate the level==2 page table for the bottom most
7840Sstevel@tonic-gate 	 * 512Gig of address space (this is where 32 bit apps live)
7850Sstevel@tonic-gate 	 */
7860Sstevel@tonic-gate 	ASSERT(hci != NULL);
7870Sstevel@tonic-gate 	hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
7880Sstevel@tonic-gate 
7890Sstevel@tonic-gate 	/*
7900Sstevel@tonic-gate 	 * Allocate a top level pagetable and copy the kernel's
7910Sstevel@tonic-gate 	 * entries into it. Then link in hci_vlp_l2ptes in the 1st entry.
7920Sstevel@tonic-gate 	 */
7930Sstevel@tonic-gate 	hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
7940Sstevel@tonic-gate 	hci->hci_vlp_pfn =
7950Sstevel@tonic-gate 	    hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes);
7960Sstevel@tonic-gate 	ASSERT(hci->hci_vlp_pfn != PFN_INVALID);
7975084Sjohnlev 	bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE);
7980Sstevel@tonic-gate 
7990Sstevel@tonic-gate 	pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes);
8000Sstevel@tonic-gate 	ASSERT(pfn != PFN_INVALID);
8010Sstevel@tonic-gate 	hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2);
8025084Sjohnlev #endif /* __amd64 && !__xpv */
8030Sstevel@tonic-gate }
8040Sstevel@tonic-gate 
8053446Smrj /*ARGSUSED*/
8063446Smrj static void
8073446Smrj hat_vlp_teardown(cpu_t *cpu)
8083446Smrj {
8095084Sjohnlev #if defined(__amd64) && !defined(__xpv)
8103446Smrj 	struct hat_cpu_info *hci;
8113446Smrj 
8123446Smrj 	if ((hci = cpu->cpu_hat_info) == NULL)
8133446Smrj 		return;
8143446Smrj 	if (hci->hci_vlp_l2ptes)
8153446Smrj 		kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE);
8163446Smrj 	if (hci->hci_vlp_l3ptes)
8173446Smrj 		kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE);
8185084Sjohnlev #endif
8195084Sjohnlev }
8205084Sjohnlev 
8215084Sjohnlev #define	NEXT_HKR(r, l, s, e) {			\
8225084Sjohnlev 	kernel_ranges[r].hkr_level = l;		\
8235084Sjohnlev 	kernel_ranges[r].hkr_start_va = s;	\
8245084Sjohnlev 	kernel_ranges[r].hkr_end_va = e;	\
8255084Sjohnlev 	++r;					\
8263446Smrj }
8273446Smrj 
8280Sstevel@tonic-gate /*
8290Sstevel@tonic-gate  * Finish filling in the kernel hat.
8300Sstevel@tonic-gate  * Pre fill in all top level kernel page table entries for the kernel's
8310Sstevel@tonic-gate  * part of the address range.  From this point on we can't use any new
8320Sstevel@tonic-gate  * kernel large pages if they need PTE's at max_level
8333446Smrj  *
8343446Smrj  * create the kmap mappings.
8350Sstevel@tonic-gate  */
8360Sstevel@tonic-gate void
8370Sstevel@tonic-gate hat_init_finish(void)
8380Sstevel@tonic-gate {
8393446Smrj 	size_t		size;
8405084Sjohnlev 	uint_t		r = 0;
8415084Sjohnlev 	uintptr_t	va;
8425084Sjohnlev 	hat_kernel_range_t *rp;
8435084Sjohnlev 
8440Sstevel@tonic-gate 
8450Sstevel@tonic-gate 	/*
8460Sstevel@tonic-gate 	 * We are now effectively running on the kernel hat.
8470Sstevel@tonic-gate 	 * Clearing use_boot_reserve shuts off using the pre-allocated boot
8480Sstevel@tonic-gate 	 * reserve for all HAT allocations.  From here on, the reserves are
8495084Sjohnlev 	 * only used when avoiding recursion in kmem_alloc().
8500Sstevel@tonic-gate 	 */
8510Sstevel@tonic-gate 	use_boot_reserve = 0;
8520Sstevel@tonic-gate 	htable_adjust_reserve();
8530Sstevel@tonic-gate 
8540Sstevel@tonic-gate 	/*
8555084Sjohnlev 	 * User HATs are initialized with copies of all kernel mappings in
8565084Sjohnlev 	 * higher level page tables. Ensure that those entries exist.
8575084Sjohnlev 	 */
8585084Sjohnlev #if defined(__amd64)
8595084Sjohnlev 
8605084Sjohnlev 	NEXT_HKR(r, 3, kernelbase, 0);
8615084Sjohnlev #if defined(__xpv)
8625084Sjohnlev 	NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END);
8635084Sjohnlev #endif
8645084Sjohnlev 
8655084Sjohnlev #elif defined(__i386)
8665084Sjohnlev 
8675084Sjohnlev #if !defined(__xpv)
8685084Sjohnlev 	if (mmu.pae_hat) {
8695084Sjohnlev 		va = kernelbase;
8705084Sjohnlev 		if ((va & LEVEL_MASK(2)) != va) {
8715084Sjohnlev 			va = P2ROUNDUP(va, LEVEL_SIZE(2));
8725084Sjohnlev 			NEXT_HKR(r, 1, kernelbase, va);
8735084Sjohnlev 		}
8745084Sjohnlev 		if (va != 0)
8755084Sjohnlev 			NEXT_HKR(r, 2, va, 0);
8765084Sjohnlev 	} else
8775084Sjohnlev #endif /* __xpv */
8785084Sjohnlev 		NEXT_HKR(r, 1, kernelbase, 0);
8795084Sjohnlev 
8805084Sjohnlev #endif /* __i386 */
8815084Sjohnlev 
8825084Sjohnlev 	num_kernel_ranges = r;
8835084Sjohnlev 
8845084Sjohnlev 	/*
8855084Sjohnlev 	 * Create all the kernel pagetables that will have entries
8865084Sjohnlev 	 * shared to user HATs.
8875084Sjohnlev 	 */
8885084Sjohnlev 	for (r = 0; r < num_kernel_ranges; ++r) {
8895084Sjohnlev 		rp = &kernel_ranges[r];
8905084Sjohnlev 		for (va = rp->hkr_start_va; va != rp->hkr_end_va;
8915084Sjohnlev 		    va += LEVEL_SIZE(rp->hkr_level)) {
8925084Sjohnlev 			htable_t *ht;
8935084Sjohnlev 
8945084Sjohnlev 			if (IN_HYPERVISOR_VA(va))
8955084Sjohnlev 				continue;
8965084Sjohnlev 
8975084Sjohnlev 			/* can/must skip if a page mapping already exists */
8985084Sjohnlev 			if (rp->hkr_level <= mmu.max_page_level &&
8995084Sjohnlev 			    (ht = htable_getpage(kas.a_hat, va, NULL)) !=
9005084Sjohnlev 			    NULL) {
9015084Sjohnlev 				htable_release(ht);
9025084Sjohnlev 				continue;
9035084Sjohnlev 			}
9045084Sjohnlev 
9055084Sjohnlev 			(void) htable_create(kas.a_hat, va, rp->hkr_level - 1,
9065084Sjohnlev 			    NULL);
9075084Sjohnlev 		}
9085084Sjohnlev 	}
9095084Sjohnlev 
9105084Sjohnlev 	/*
9115084Sjohnlev 	 * 32 bit PAE metal kernels use only 4 of the 512 entries in the
9125084Sjohnlev 	 * page holding the top level pagetable. We use the remainder for
9135084Sjohnlev 	 * the "per CPU" page tables for VLP processes.
9145084Sjohnlev 	 * Map the top level kernel pagetable into the kernel to make
9155084Sjohnlev 	 * it easy to use bcopy access these tables.
9160Sstevel@tonic-gate 	 */
9170Sstevel@tonic-gate 	if (mmu.pae_hat) {
9180Sstevel@tonic-gate 		vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
9190Sstevel@tonic-gate 		hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE,
9200Sstevel@tonic-gate 		    kas.a_hat->hat_htable->ht_pfn,
9215084Sjohnlev #if !defined(__xpv)
9223446Smrj 		    PROT_WRITE |
9235084Sjohnlev #endif
9243446Smrj 		    PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
9250Sstevel@tonic-gate 		    HAT_LOAD | HAT_LOAD_NOCONSIST);
9260Sstevel@tonic-gate 	}
9270Sstevel@tonic-gate 	hat_vlp_setup(CPU);
9283446Smrj 
9293446Smrj 	/*
9303446Smrj 	 * Create kmap (cached mappings of kernel PTEs)
9313446Smrj 	 * for 32 bit we map from segmap_start .. ekernelheap
9323446Smrj 	 * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
9333446Smrj 	 */
9343446Smrj #if defined(__i386)
9353446Smrj 	size = (uintptr_t)ekernelheap - segmap_start;
9363446Smrj #elif defined(__amd64)
9373446Smrj 	size = segmapsize;
9383446Smrj #endif
9393446Smrj 	hat_kmap_init((uintptr_t)segmap_start, size);
9400Sstevel@tonic-gate }
9410Sstevel@tonic-gate 
9420Sstevel@tonic-gate /*
9430Sstevel@tonic-gate  * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
9440Sstevel@tonic-gate  * are 32 bit, so for safety we must use cas64() to install these.
9450Sstevel@tonic-gate  */
9460Sstevel@tonic-gate #ifdef __i386
9470Sstevel@tonic-gate static void
9480Sstevel@tonic-gate reload_pae32(hat_t *hat, cpu_t *cpu)
9490Sstevel@tonic-gate {
9500Sstevel@tonic-gate 	x86pte_t *src;
9510Sstevel@tonic-gate 	x86pte_t *dest;
9520Sstevel@tonic-gate 	x86pte_t pte;
9530Sstevel@tonic-gate 	int i;
9540Sstevel@tonic-gate 
9550Sstevel@tonic-gate 	/*
9560Sstevel@tonic-gate 	 * Load the 4 entries of the level 2 page table into this
9570Sstevel@tonic-gate 	 * cpu's range of the vlp_page and point cr3 at them.
9580Sstevel@tonic-gate 	 */
9590Sstevel@tonic-gate 	ASSERT(mmu.pae_hat);
9600Sstevel@tonic-gate 	src = hat->hat_vlp_ptes;
9610Sstevel@tonic-gate 	dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
9620Sstevel@tonic-gate 	for (i = 0; i < VLP_NUM_PTES; ++i) {
9630Sstevel@tonic-gate 		for (;;) {
9640Sstevel@tonic-gate 			pte = dest[i];
9650Sstevel@tonic-gate 			if (pte == src[i])
9660Sstevel@tonic-gate 				break;
9670Sstevel@tonic-gate 			if (cas64(dest + i, pte, src[i]) != src[i])
9680Sstevel@tonic-gate 				break;
9690Sstevel@tonic-gate 		}
9700Sstevel@tonic-gate 	}
9710Sstevel@tonic-gate }
9720Sstevel@tonic-gate #endif
9730Sstevel@tonic-gate 
9740Sstevel@tonic-gate /*
9750Sstevel@tonic-gate  * Switch to a new active hat, maintaining bit masks to track active CPUs.
9765084Sjohnlev  *
9775084Sjohnlev  * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it
9785084Sjohnlev  * remains a 32-bit value.
9790Sstevel@tonic-gate  */
9800Sstevel@tonic-gate void
9810Sstevel@tonic-gate hat_switch(hat_t *hat)
9820Sstevel@tonic-gate {
9835084Sjohnlev 	uint64_t	newcr3;
9840Sstevel@tonic-gate 	cpu_t		*cpu = CPU;
9850Sstevel@tonic-gate 	hat_t		*old = cpu->cpu_current_hat;
9860Sstevel@tonic-gate 
9870Sstevel@tonic-gate 	/*
9880Sstevel@tonic-gate 	 * set up this information first, so we don't miss any cross calls
9890Sstevel@tonic-gate 	 */
9900Sstevel@tonic-gate 	if (old != NULL) {
9910Sstevel@tonic-gate 		if (old == hat)
9920Sstevel@tonic-gate 			return;
9930Sstevel@tonic-gate 		if (old != kas.a_hat)
9940Sstevel@tonic-gate 			CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
9950Sstevel@tonic-gate 	}
9960Sstevel@tonic-gate 
9970Sstevel@tonic-gate 	/*
9984191Sjosephb 	 * Add this CPU to the active set for this HAT.
9990Sstevel@tonic-gate 	 */
10000Sstevel@tonic-gate 	if (hat != kas.a_hat) {
10010Sstevel@tonic-gate 		CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
10020Sstevel@tonic-gate 	}
10030Sstevel@tonic-gate 	cpu->cpu_current_hat = hat;
10040Sstevel@tonic-gate 
10050Sstevel@tonic-gate 	/*
10060Sstevel@tonic-gate 	 * now go ahead and load cr3
10070Sstevel@tonic-gate 	 */
10080Sstevel@tonic-gate 	if (hat->hat_flags & HAT_VLP) {
10090Sstevel@tonic-gate #if defined(__amd64)
10100Sstevel@tonic-gate 		x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
10110Sstevel@tonic-gate 
10120Sstevel@tonic-gate 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
10130Sstevel@tonic-gate 		newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn);
10140Sstevel@tonic-gate #elif defined(__i386)
10150Sstevel@tonic-gate 		reload_pae32(hat, cpu);
10160Sstevel@tonic-gate 		newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) +
10170Sstevel@tonic-gate 		    (cpu->cpu_id + 1) * VLP_SIZE;
10180Sstevel@tonic-gate #endif
10190Sstevel@tonic-gate 	} else {
10205084Sjohnlev 		newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn);
10210Sstevel@tonic-gate 	}
10225084Sjohnlev #ifdef __xpv
10235084Sjohnlev 	{
10245084Sjohnlev 		struct mmuext_op t[2];
10255084Sjohnlev 		uint_t retcnt;
10265084Sjohnlev 		uint_t opcnt = 1;
10275084Sjohnlev 
10285084Sjohnlev 		t[0].cmd = MMUEXT_NEW_BASEPTR;
10295084Sjohnlev 		t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
10305084Sjohnlev #if defined(__amd64)
10315084Sjohnlev 		/*
10325084Sjohnlev 		 * There's an interesting problem here, as to what to
10335084Sjohnlev 		 * actually specify when switching to the kernel hat.
10345084Sjohnlev 		 * For now we'll reuse the kernel hat again.
10355084Sjohnlev 		 */
10365084Sjohnlev 		t[1].cmd = MMUEXT_NEW_USER_BASEPTR;
10375084Sjohnlev 		if (hat == kas.a_hat)
10385084Sjohnlev 			t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
10395084Sjohnlev 		else
10405084Sjohnlev 			t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable);
10415084Sjohnlev 		++opcnt;
10425084Sjohnlev #endif	/* __amd64 */
10435084Sjohnlev 		if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0)
10445084Sjohnlev 			panic("HYPERVISOR_mmu_update() failed");
10455084Sjohnlev 		ASSERT(retcnt == opcnt);
10465084Sjohnlev 
10475084Sjohnlev 	}
10485084Sjohnlev #else
10490Sstevel@tonic-gate 	setcr3(newcr3);
10505084Sjohnlev #endif
10510Sstevel@tonic-gate 	ASSERT(cpu == CPU);
10520Sstevel@tonic-gate }
10530Sstevel@tonic-gate 
10540Sstevel@tonic-gate /*
10550Sstevel@tonic-gate  * Utility to return a valid x86pte_t from protections, pfn, and level number
10560Sstevel@tonic-gate  */
10570Sstevel@tonic-gate static x86pte_t
10580Sstevel@tonic-gate hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
10590Sstevel@tonic-gate {
10600Sstevel@tonic-gate 	x86pte_t	pte;
10610Sstevel@tonic-gate 	uint_t		cache_attr = attr & HAT_ORDER_MASK;
10620Sstevel@tonic-gate 
10630Sstevel@tonic-gate 	pte = MAKEPTE(pfn, level);
10640Sstevel@tonic-gate 
10650Sstevel@tonic-gate 	if (attr & PROT_WRITE)
10660Sstevel@tonic-gate 		PTE_SET(pte, PT_WRITABLE);
10670Sstevel@tonic-gate 
10680Sstevel@tonic-gate 	if (attr & PROT_USER)
10690Sstevel@tonic-gate 		PTE_SET(pte, PT_USER);
10700Sstevel@tonic-gate 
10710Sstevel@tonic-gate 	if (!(attr & PROT_EXEC))
10720Sstevel@tonic-gate 		PTE_SET(pte, mmu.pt_nx);
10730Sstevel@tonic-gate 
10740Sstevel@tonic-gate 	/*
10753446Smrj 	 * Set the software bits used track ref/mod sync's and hments.
10763446Smrj 	 * If not using REF/MOD, set them to avoid h/w rewriting PTEs.
10770Sstevel@tonic-gate 	 */
10780Sstevel@tonic-gate 	if (flags & HAT_LOAD_NOCONSIST)
10793446Smrj 		PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD);
10803446Smrj 	else if (attr & HAT_NOSYNC)
10813446Smrj 		PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD);
10820Sstevel@tonic-gate 
10830Sstevel@tonic-gate 	/*
10840Sstevel@tonic-gate 	 * Set the caching attributes in the PTE. The combination
10850Sstevel@tonic-gate 	 * of attributes are poorly defined, so we pay attention
10860Sstevel@tonic-gate 	 * to them in the given order.
10870Sstevel@tonic-gate 	 *
10880Sstevel@tonic-gate 	 * The test for HAT_STRICTORDER is different because it's defined
10890Sstevel@tonic-gate 	 * as "0" - which was a stupid thing to do, but is too late to change!
10900Sstevel@tonic-gate 	 */
10910Sstevel@tonic-gate 	if (cache_attr == HAT_STRICTORDER) {
10920Sstevel@tonic-gate 		PTE_SET(pte, PT_NOCACHE);
10930Sstevel@tonic-gate 	/*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
10940Sstevel@tonic-gate 	} else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
10950Sstevel@tonic-gate 		/* nothing to set */;
10960Sstevel@tonic-gate 	} else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
10970Sstevel@tonic-gate 		PTE_SET(pte, PT_NOCACHE);
10980Sstevel@tonic-gate 		if (x86_feature & X86_PAT)
10990Sstevel@tonic-gate 			PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
11000Sstevel@tonic-gate 		else
11010Sstevel@tonic-gate 			PTE_SET(pte, PT_WRITETHRU);
11020Sstevel@tonic-gate 	} else {
11030Sstevel@tonic-gate 		panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
11040Sstevel@tonic-gate 	}
11050Sstevel@tonic-gate 
11060Sstevel@tonic-gate 	return (pte);
11070Sstevel@tonic-gate }
11080Sstevel@tonic-gate 
11090Sstevel@tonic-gate /*
11100Sstevel@tonic-gate  * Duplicate address translations of the parent to the child.
11110Sstevel@tonic-gate  * This function really isn't used anymore.
11120Sstevel@tonic-gate  */
11130Sstevel@tonic-gate /*ARGSUSED*/
11140Sstevel@tonic-gate int
11150Sstevel@tonic-gate hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
11160Sstevel@tonic-gate {
11170Sstevel@tonic-gate 	ASSERT((uintptr_t)addr < kernelbase);
11180Sstevel@tonic-gate 	ASSERT(new != kas.a_hat);
11190Sstevel@tonic-gate 	ASSERT(old != kas.a_hat);
11200Sstevel@tonic-gate 	return (0);
11210Sstevel@tonic-gate }
11220Sstevel@tonic-gate 
11230Sstevel@tonic-gate /*
11240Sstevel@tonic-gate  * Allocate any hat resources required for a process being swapped in.
11250Sstevel@tonic-gate  */
11260Sstevel@tonic-gate /*ARGSUSED*/
11270Sstevel@tonic-gate void
11280Sstevel@tonic-gate hat_swapin(hat_t *hat)
11290Sstevel@tonic-gate {
11300Sstevel@tonic-gate 	/* do nothing - we let everything fault back in */
11310Sstevel@tonic-gate }
11320Sstevel@tonic-gate 
11330Sstevel@tonic-gate /*
11340Sstevel@tonic-gate  * Unload all translations associated with an address space of a process
11350Sstevel@tonic-gate  * that is being swapped out.
11360Sstevel@tonic-gate  */
11370Sstevel@tonic-gate void
11380Sstevel@tonic-gate hat_swapout(hat_t *hat)
11390Sstevel@tonic-gate {
11400Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)0;
11410Sstevel@tonic-gate 	uintptr_t	eaddr = _userlimit;
11420Sstevel@tonic-gate 	htable_t	*ht = NULL;
11430Sstevel@tonic-gate 	level_t		l;
11440Sstevel@tonic-gate 
11455084Sjohnlev 	XPV_DISALLOW_MIGRATE();
11460Sstevel@tonic-gate 	/*
11470Sstevel@tonic-gate 	 * We can't just call hat_unload(hat, 0, _userlimit...)  here, because
11480Sstevel@tonic-gate 	 * seg_spt and shared pagetables can't be swapped out.
11490Sstevel@tonic-gate 	 * Take a look at segspt_shmswapout() - it's a big no-op.
11500Sstevel@tonic-gate 	 *
11510Sstevel@tonic-gate 	 * Instead we'll walk through all the address space and unload
11520Sstevel@tonic-gate 	 * any mappings which we are sure are not shared, not locked.
11530Sstevel@tonic-gate 	 */
11540Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
11550Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
11560Sstevel@tonic-gate 	ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
11570Sstevel@tonic-gate 	if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
11580Sstevel@tonic-gate 		eaddr = (uintptr_t)hat->hat_as->a_userlimit;
11590Sstevel@tonic-gate 
11600Sstevel@tonic-gate 	while (vaddr < eaddr) {
11610Sstevel@tonic-gate 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
11620Sstevel@tonic-gate 		if (ht == NULL)
11630Sstevel@tonic-gate 			break;
11640Sstevel@tonic-gate 
11650Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(vaddr));
11660Sstevel@tonic-gate 
11670Sstevel@tonic-gate 		/*
11680Sstevel@tonic-gate 		 * If the page table is shared skip its entire range.
11690Sstevel@tonic-gate 		 */
11700Sstevel@tonic-gate 		l = ht->ht_level;
11710Sstevel@tonic-gate 		if (ht->ht_flags & HTABLE_SHARED_PFN) {
11726285Speterte 			vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1);
11730Sstevel@tonic-gate 			htable_release(ht);
11740Sstevel@tonic-gate 			ht = NULL;
11750Sstevel@tonic-gate 			continue;
11760Sstevel@tonic-gate 		}
11770Sstevel@tonic-gate 
11780Sstevel@tonic-gate 		/*
11790Sstevel@tonic-gate 		 * If the page table has no locked entries, unload this one.
11800Sstevel@tonic-gate 		 */
11810Sstevel@tonic-gate 		if (ht->ht_lock_cnt == 0)
11820Sstevel@tonic-gate 			hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
11830Sstevel@tonic-gate 			    HAT_UNLOAD_UNMAP);
11840Sstevel@tonic-gate 
11850Sstevel@tonic-gate 		/*
11860Sstevel@tonic-gate 		 * If we have a level 0 page table with locked entries,
11870Sstevel@tonic-gate 		 * skip the entire page table, otherwise skip just one entry.
11880Sstevel@tonic-gate 		 */
11890Sstevel@tonic-gate 		if (ht->ht_lock_cnt > 0 && l == 0)
11900Sstevel@tonic-gate 			vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
11910Sstevel@tonic-gate 		else
11920Sstevel@tonic-gate 			vaddr += LEVEL_SIZE(l);
11930Sstevel@tonic-gate 	}
11940Sstevel@tonic-gate 	if (ht)
11950Sstevel@tonic-gate 		htable_release(ht);
11960Sstevel@tonic-gate 
11970Sstevel@tonic-gate 	/*
11980Sstevel@tonic-gate 	 * We're in swapout because the system is low on memory, so
11990Sstevel@tonic-gate 	 * go back and flush all the htables off the cached list.
12000Sstevel@tonic-gate 	 */
12010Sstevel@tonic-gate 	htable_purge_hat(hat);
12025084Sjohnlev 	XPV_ALLOW_MIGRATE();
12030Sstevel@tonic-gate }
12040Sstevel@tonic-gate 
12050Sstevel@tonic-gate /*
12060Sstevel@tonic-gate  * returns number of bytes that have valid mappings in hat.
12070Sstevel@tonic-gate  */
12080Sstevel@tonic-gate size_t
12090Sstevel@tonic-gate hat_get_mapped_size(hat_t *hat)
12100Sstevel@tonic-gate {
12110Sstevel@tonic-gate 	size_t total = 0;
12120Sstevel@tonic-gate 	int l;
12130Sstevel@tonic-gate 
12140Sstevel@tonic-gate 	for (l = 0; l <= mmu.max_page_level; l++)
12150Sstevel@tonic-gate 		total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
12164381Sjosephb 	total += hat->hat_ism_pgcnt;
12170Sstevel@tonic-gate 
12180Sstevel@tonic-gate 	return (total);
12190Sstevel@tonic-gate }
12200Sstevel@tonic-gate 
12210Sstevel@tonic-gate /*
12220Sstevel@tonic-gate  * enable/disable collection of stats for hat.
12230Sstevel@tonic-gate  */
12240Sstevel@tonic-gate int
12250Sstevel@tonic-gate hat_stats_enable(hat_t *hat)
12260Sstevel@tonic-gate {
12270Sstevel@tonic-gate 	atomic_add_32(&hat->hat_stats, 1);
12280Sstevel@tonic-gate 	return (1);
12290Sstevel@tonic-gate }
12300Sstevel@tonic-gate 
12310Sstevel@tonic-gate void
12320Sstevel@tonic-gate hat_stats_disable(hat_t *hat)
12330Sstevel@tonic-gate {
12340Sstevel@tonic-gate 	atomic_add_32(&hat->hat_stats, -1);
12350Sstevel@tonic-gate }
12360Sstevel@tonic-gate 
12370Sstevel@tonic-gate /*
12380Sstevel@tonic-gate  * Utility to sync the ref/mod bits from a page table entry to the page_t
12390Sstevel@tonic-gate  * We must be holding the mapping list lock when this is called.
12400Sstevel@tonic-gate  */
12410Sstevel@tonic-gate static void
12420Sstevel@tonic-gate hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
12430Sstevel@tonic-gate {
12440Sstevel@tonic-gate 	uint_t	rm = 0;
12450Sstevel@tonic-gate 	pgcnt_t	pgcnt;
12460Sstevel@tonic-gate 
12473446Smrj 	if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
12480Sstevel@tonic-gate 		return;
12490Sstevel@tonic-gate 
12500Sstevel@tonic-gate 	if (PTE_GET(pte, PT_REF))
12510Sstevel@tonic-gate 		rm |= P_REF;
12520Sstevel@tonic-gate 
12530Sstevel@tonic-gate 	if (PTE_GET(pte, PT_MOD))
12540Sstevel@tonic-gate 		rm |= P_MOD;
12550Sstevel@tonic-gate 
12560Sstevel@tonic-gate 	if (rm == 0)
12570Sstevel@tonic-gate 		return;
12580Sstevel@tonic-gate 
12590Sstevel@tonic-gate 	/*
12600Sstevel@tonic-gate 	 * sync to all constituent pages of a large page
12610Sstevel@tonic-gate 	 */
12620Sstevel@tonic-gate 	ASSERT(x86_hm_held(pp));
12630Sstevel@tonic-gate 	pgcnt = page_get_pagecnt(level);
12640Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
12650Sstevel@tonic-gate 	for (; pgcnt > 0; --pgcnt) {
12660Sstevel@tonic-gate 		/*
12670Sstevel@tonic-gate 		 * hat_page_demote() can't decrease
12680Sstevel@tonic-gate 		 * pszc below this mapping size
12690Sstevel@tonic-gate 		 * since this large mapping existed after we
12700Sstevel@tonic-gate 		 * took mlist lock.
12710Sstevel@tonic-gate 		 */
12720Sstevel@tonic-gate 		ASSERT(pp->p_szc >= level);
12730Sstevel@tonic-gate 		hat_page_setattr(pp, rm);
12740Sstevel@tonic-gate 		++pp;
12750Sstevel@tonic-gate 	}
12760Sstevel@tonic-gate }
12770Sstevel@tonic-gate 
12780Sstevel@tonic-gate /*
12790Sstevel@tonic-gate  * This the set of PTE bits for PFN, permissions and caching
12805084Sjohnlev  * that are allowed to change on a HAT_LOAD_REMAP
12810Sstevel@tonic-gate  */
12820Sstevel@tonic-gate #define	PT_REMAP_BITS							\
12830Sstevel@tonic-gate 	(PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU |		\
12845084Sjohnlev 	PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD)
12850Sstevel@tonic-gate 
1286510Skchow #define	REMAPASSERT(EX)	if (!(EX)) panic("hati_pte_map: " #EX)
12870Sstevel@tonic-gate /*
12880Sstevel@tonic-gate  * Do the low-level work to get a mapping entered into a HAT's pagetables
12890Sstevel@tonic-gate  * and in the mapping list of the associated page_t.
12900Sstevel@tonic-gate  */
12913446Smrj static int
12920Sstevel@tonic-gate hati_pte_map(
12930Sstevel@tonic-gate 	htable_t	*ht,
12940Sstevel@tonic-gate 	uint_t		entry,
12950Sstevel@tonic-gate 	page_t		*pp,
12960Sstevel@tonic-gate 	x86pte_t	pte,
12970Sstevel@tonic-gate 	int		flags,
12980Sstevel@tonic-gate 	void		*pte_ptr)
12990Sstevel@tonic-gate {
13000Sstevel@tonic-gate 	hat_t		*hat = ht->ht_hat;
13010Sstevel@tonic-gate 	x86pte_t	old_pte;
13020Sstevel@tonic-gate 	level_t		l = ht->ht_level;
13030Sstevel@tonic-gate 	hment_t		*hm;
13040Sstevel@tonic-gate 	uint_t		is_consist;
13058522SJakub.Jermar@Sun.COM 	uint_t		is_locked;
13063446Smrj 	int		rv = 0;
13070Sstevel@tonic-gate 
13080Sstevel@tonic-gate 	/*
13099903SPavel.Tatashin@Sun.COM 	 * Is this a consistent (ie. need mapping list lock) mapping?
13100Sstevel@tonic-gate 	 */
13110Sstevel@tonic-gate 	is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
13120Sstevel@tonic-gate 
13130Sstevel@tonic-gate 	/*
13140Sstevel@tonic-gate 	 * Track locked mapping count in the htable.  Do this first,
13150Sstevel@tonic-gate 	 * as we track locking even if there already is a mapping present.
13160Sstevel@tonic-gate 	 */
13178522SJakub.Jermar@Sun.COM 	is_locked = (flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat;
13188522SJakub.Jermar@Sun.COM 	if (is_locked)
13190Sstevel@tonic-gate 		HTABLE_LOCK_INC(ht);
13200Sstevel@tonic-gate 
13210Sstevel@tonic-gate 	/*
13220Sstevel@tonic-gate 	 * Acquire the page's mapping list lock and get an hment to use.
13230Sstevel@tonic-gate 	 * Note that hment_prepare() might return NULL.
13240Sstevel@tonic-gate 	 */
13250Sstevel@tonic-gate 	if (is_consist) {
13260Sstevel@tonic-gate 		x86_hm_enter(pp);
13270Sstevel@tonic-gate 		hm = hment_prepare(ht, entry, pp);
13280Sstevel@tonic-gate 	}
13290Sstevel@tonic-gate 
13300Sstevel@tonic-gate 	/*
13310Sstevel@tonic-gate 	 * Set the new pte, retrieving the old one at the same time.
13320Sstevel@tonic-gate 	 */
13330Sstevel@tonic-gate 	old_pte = x86pte_set(ht, entry, pte, pte_ptr);
13340Sstevel@tonic-gate 
13350Sstevel@tonic-gate 	/*
13368522SJakub.Jermar@Sun.COM 	 * Did we get a large page / page table collision?
13373446Smrj 	 */
13383446Smrj 	if (old_pte == LPAGE_ERROR) {
13398522SJakub.Jermar@Sun.COM 		if (is_locked)
13408522SJakub.Jermar@Sun.COM 			HTABLE_LOCK_DEC(ht);
13413446Smrj 		rv = -1;
13423446Smrj 		goto done;
13433446Smrj 	}
13443446Smrj 
13453446Smrj 	/*
13460Sstevel@tonic-gate 	 * If the mapping didn't change there is nothing more to do.
13470Sstevel@tonic-gate 	 */
13483446Smrj 	if (PTE_EQUIV(pte, old_pte))
13493446Smrj 		goto done;
13500Sstevel@tonic-gate 
13510Sstevel@tonic-gate 	/*
13520Sstevel@tonic-gate 	 * Install a new mapping in the page's mapping list
13530Sstevel@tonic-gate 	 */
13540Sstevel@tonic-gate 	if (!PTE_ISVALID(old_pte)) {
13550Sstevel@tonic-gate 		if (is_consist) {
13560Sstevel@tonic-gate 			hment_assign(ht, entry, pp, hm);
13570Sstevel@tonic-gate 			x86_hm_exit(pp);
13580Sstevel@tonic-gate 		} else {
13590Sstevel@tonic-gate 			ASSERT(flags & HAT_LOAD_NOCONSIST);
13600Sstevel@tonic-gate 		}
13615349Skchow #if defined(__amd64)
13625349Skchow 		if (ht->ht_flags & HTABLE_VLP) {
13635349Skchow 			cpu_t *cpu = CPU;
13645349Skchow 			x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
13655349Skchow 			VLP_COPY(hat->hat_vlp_ptes, vlpptep);
13665349Skchow 		}
13675349Skchow #endif
13680Sstevel@tonic-gate 		HTABLE_INC(ht->ht_valid_cnt);
13690Sstevel@tonic-gate 		PGCNT_INC(hat, l);
13703446Smrj 		return (rv);
13710Sstevel@tonic-gate 	}
13720Sstevel@tonic-gate 
13730Sstevel@tonic-gate 	/*
13740Sstevel@tonic-gate 	 * Remap's are more complicated:
13750Sstevel@tonic-gate 	 *  - HAT_LOAD_REMAP must be specified if changing the pfn.
13760Sstevel@tonic-gate 	 *    We also require that NOCONSIST be specified.
13770Sstevel@tonic-gate 	 *  - Otherwise only permission or caching bits may change.
13780Sstevel@tonic-gate 	 */
13790Sstevel@tonic-gate 	if (!PTE_ISPAGE(old_pte, l))
13800Sstevel@tonic-gate 		panic("non-null/page mapping pte=" FMT_PTE, old_pte);
13810Sstevel@tonic-gate 
13820Sstevel@tonic-gate 	if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1383510Skchow 		REMAPASSERT(flags & HAT_LOAD_REMAP);
1384510Skchow 		REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
13853446Smrj 		REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
1386510Skchow 		REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
13870Sstevel@tonic-gate 		    pf_is_memory(PTE2PFN(pte, l)));
1388510Skchow 		REMAPASSERT(!is_consist);
13890Sstevel@tonic-gate 	}
13900Sstevel@tonic-gate 
13910Sstevel@tonic-gate 	/*
13925084Sjohnlev 	 * We only let remaps change the certain bits in the PTE.
13930Sstevel@tonic-gate 	 */
13945084Sjohnlev 	if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS))
13955084Sjohnlev 		panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n",
13965084Sjohnlev 		    old_pte, pte);
13970Sstevel@tonic-gate 
13980Sstevel@tonic-gate 	/*
13990Sstevel@tonic-gate 	 * We don't create any mapping list entries on a remap, so release
14000Sstevel@tonic-gate 	 * any allocated hment after we drop the mapping list lock.
14010Sstevel@tonic-gate 	 */
14023446Smrj done:
14030Sstevel@tonic-gate 	if (is_consist) {
14040Sstevel@tonic-gate 		x86_hm_exit(pp);
14050Sstevel@tonic-gate 		if (hm != NULL)
14060Sstevel@tonic-gate 			hment_free(hm);
14070Sstevel@tonic-gate 	}
14083446Smrj 	return (rv);
14090Sstevel@tonic-gate }
14100Sstevel@tonic-gate 
14110Sstevel@tonic-gate /*
14123446Smrj  * Internal routine to load a single page table entry. This only fails if
14133446Smrj  * we attempt to overwrite a page table link with a large page.
14140Sstevel@tonic-gate  */
14153446Smrj static int
14160Sstevel@tonic-gate hati_load_common(
14170Sstevel@tonic-gate 	hat_t		*hat,
14180Sstevel@tonic-gate 	uintptr_t	va,
14190Sstevel@tonic-gate 	page_t		*pp,
14200Sstevel@tonic-gate 	uint_t		attr,
14210Sstevel@tonic-gate 	uint_t		flags,
14220Sstevel@tonic-gate 	level_t		level,
14230Sstevel@tonic-gate 	pfn_t		pfn)
14240Sstevel@tonic-gate {
14250Sstevel@tonic-gate 	htable_t	*ht;
14260Sstevel@tonic-gate 	uint_t		entry;
14270Sstevel@tonic-gate 	x86pte_t	pte;
14283446Smrj 	int		rv = 0;
14290Sstevel@tonic-gate 
14304004Sjosephb 	/*
14314004Sjosephb 	 * The number 16 is arbitrary and here to catch a recursion problem
14324004Sjosephb 	 * early before we blow out the kernel stack.
14334004Sjosephb 	 */
14344004Sjosephb 	++curthread->t_hatdepth;
14354004Sjosephb 	ASSERT(curthread->t_hatdepth < 16);
14364004Sjosephb 
14370Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
14380Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
14390Sstevel@tonic-gate 
14400Sstevel@tonic-gate 	if (flags & HAT_LOAD_SHARE)
14410Sstevel@tonic-gate 		hat->hat_flags |= HAT_SHARED;
14420Sstevel@tonic-gate 
14430Sstevel@tonic-gate 	/*
14440Sstevel@tonic-gate 	 * Find the page table that maps this page if it already exists.
14450Sstevel@tonic-gate 	 */
14460Sstevel@tonic-gate 	ht = htable_lookup(hat, va, level);
14470Sstevel@tonic-gate 
14480Sstevel@tonic-gate 	/*
14494004Sjosephb 	 * We must have HAT_LOAD_NOCONSIST if page_t is NULL.
14500Sstevel@tonic-gate 	 */
14514004Sjosephb 	if (pp == NULL)
14520Sstevel@tonic-gate 		flags |= HAT_LOAD_NOCONSIST;
14530Sstevel@tonic-gate 
14540Sstevel@tonic-gate 	if (ht == NULL) {
14550Sstevel@tonic-gate 		ht = htable_create(hat, va, level, NULL);
14560Sstevel@tonic-gate 		ASSERT(ht != NULL);
14570Sstevel@tonic-gate 	}
14580Sstevel@tonic-gate 	entry = htable_va2entry(va, ht);
14590Sstevel@tonic-gate 
14600Sstevel@tonic-gate 	/*
14610Sstevel@tonic-gate 	 * a bunch of paranoid error checking
14620Sstevel@tonic-gate 	 */
14630Sstevel@tonic-gate 	ASSERT(ht->ht_busy > 0);
14640Sstevel@tonic-gate 	if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht))
14657240Srh87107 		panic("hati_load_common: bad htable %p, va %p",
14667240Srh87107 		    (void *)ht, (void *)va);
14670Sstevel@tonic-gate 	ASSERT(ht->ht_level == level);
14680Sstevel@tonic-gate 
14690Sstevel@tonic-gate 	/*
14700Sstevel@tonic-gate 	 * construct the new PTE
14710Sstevel@tonic-gate 	 */
14720Sstevel@tonic-gate 	if (hat == kas.a_hat)
14730Sstevel@tonic-gate 		attr &= ~PROT_USER;
14740Sstevel@tonic-gate 	pte = hati_mkpte(pfn, attr, level, flags);
14750Sstevel@tonic-gate 	if (hat == kas.a_hat && va >= kernelbase)
14760Sstevel@tonic-gate 		PTE_SET(pte, mmu.pt_global);
14770Sstevel@tonic-gate 
14780Sstevel@tonic-gate 	/*
14790Sstevel@tonic-gate 	 * establish the mapping
14800Sstevel@tonic-gate 	 */
14813446Smrj 	rv = hati_pte_map(ht, entry, pp, pte, flags, NULL);
14820Sstevel@tonic-gate 
14830Sstevel@tonic-gate 	/*
14840Sstevel@tonic-gate 	 * release the htable and any reserves
14850Sstevel@tonic-gate 	 */
14860Sstevel@tonic-gate 	htable_release(ht);
14874004Sjosephb 	--curthread->t_hatdepth;
14883446Smrj 	return (rv);
14890Sstevel@tonic-gate }
14900Sstevel@tonic-gate 
14910Sstevel@tonic-gate /*
14920Sstevel@tonic-gate  * special case of hat_memload to deal with some kernel addrs for performance
14930Sstevel@tonic-gate  */
14940Sstevel@tonic-gate static void
14950Sstevel@tonic-gate hat_kmap_load(
14960Sstevel@tonic-gate 	caddr_t		addr,
14970Sstevel@tonic-gate 	page_t		*pp,
14980Sstevel@tonic-gate 	uint_t		attr,
14990Sstevel@tonic-gate 	uint_t		flags)
15000Sstevel@tonic-gate {
15010Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
15020Sstevel@tonic-gate 	x86pte_t	pte;
15030Sstevel@tonic-gate 	pfn_t		pfn = page_pptonum(pp);
15040Sstevel@tonic-gate 	pgcnt_t		pg_off = mmu_btop(va - mmu.kmap_addr);
15050Sstevel@tonic-gate 	htable_t	*ht;
15060Sstevel@tonic-gate 	uint_t		entry;
15070Sstevel@tonic-gate 	void		*pte_ptr;
15080Sstevel@tonic-gate 
15090Sstevel@tonic-gate 	/*
15100Sstevel@tonic-gate 	 * construct the requested PTE
15110Sstevel@tonic-gate 	 */
15120Sstevel@tonic-gate 	attr &= ~PROT_USER;
15130Sstevel@tonic-gate 	attr |= HAT_STORECACHING_OK;
15140Sstevel@tonic-gate 	pte = hati_mkpte(pfn, attr, 0, flags);
15150Sstevel@tonic-gate 	PTE_SET(pte, mmu.pt_global);
15160Sstevel@tonic-gate 
15170Sstevel@tonic-gate 	/*
15180Sstevel@tonic-gate 	 * Figure out the pte_ptr and htable and use common code to finish up
15190Sstevel@tonic-gate 	 */
15200Sstevel@tonic-gate 	if (mmu.pae_hat)
15210Sstevel@tonic-gate 		pte_ptr = mmu.kmap_ptes + pg_off;
15220Sstevel@tonic-gate 	else
15230Sstevel@tonic-gate 		pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
15240Sstevel@tonic-gate 	ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
15250Sstevel@tonic-gate 	    LEVEL_SHIFT(1)];
15260Sstevel@tonic-gate 	entry = htable_va2entry(va, ht);
15274004Sjosephb 	++curthread->t_hatdepth;
15284004Sjosephb 	ASSERT(curthread->t_hatdepth < 16);
15293446Smrj 	(void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
15304004Sjosephb 	--curthread->t_hatdepth;
15310Sstevel@tonic-gate }
15320Sstevel@tonic-gate 
15330Sstevel@tonic-gate /*
15340Sstevel@tonic-gate  * hat_memload() - load a translation to the given page struct
15350Sstevel@tonic-gate  *
15360Sstevel@tonic-gate  * Flags for hat_memload/hat_devload/hat_*attr.
15370Sstevel@tonic-gate  *
15380Sstevel@tonic-gate  * 	HAT_LOAD	Default flags to load a translation to the page.
15390Sstevel@tonic-gate  *
15400Sstevel@tonic-gate  * 	HAT_LOAD_LOCK	Lock down mapping resources; hat_map(), hat_memload(),
15410Sstevel@tonic-gate  *			and hat_devload().
15420Sstevel@tonic-gate  *
15430Sstevel@tonic-gate  *	HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
15443446Smrj  *			sets PT_NOCONSIST
15450Sstevel@tonic-gate  *
15460Sstevel@tonic-gate  *	HAT_LOAD_SHARE	A flag to hat_memload() to indicate h/w page tables
15470Sstevel@tonic-gate  *			that map some user pages (not kas) is shared by more
15480Sstevel@tonic-gate  *			than one process (eg. ISM).
15490Sstevel@tonic-gate  *
15500Sstevel@tonic-gate  *	HAT_LOAD_REMAP	Reload a valid pte with a different page frame.
15510Sstevel@tonic-gate  *
15520Sstevel@tonic-gate  *	HAT_NO_KALLOC	Do not kmem_alloc while creating the mapping; at this
15530Sstevel@tonic-gate  *			point, it's setting up mapping to allocate internal
15540Sstevel@tonic-gate  *			hat layer data structures.  This flag forces hat layer
15550Sstevel@tonic-gate  *			to tap its reserves in order to prevent infinite
15560Sstevel@tonic-gate  *			recursion.
15570Sstevel@tonic-gate  *
15580Sstevel@tonic-gate  * The following is a protection attribute (like PROT_READ, etc.)
15590Sstevel@tonic-gate  *
15603446Smrj  *	HAT_NOSYNC	set PT_NOSYNC - this mapping's ref/mod bits
15610Sstevel@tonic-gate  *			are never cleared.
15620Sstevel@tonic-gate  *
15630Sstevel@tonic-gate  * Installing new valid PTE's and creation of the mapping list
15640Sstevel@tonic-gate  * entry are controlled under the same lock. It's derived from the
15650Sstevel@tonic-gate  * page_t being mapped.
15660Sstevel@tonic-gate  */
15670Sstevel@tonic-gate static uint_t supported_memload_flags =
15680Sstevel@tonic-gate 	HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
15690Sstevel@tonic-gate 	HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
15700Sstevel@tonic-gate 
15710Sstevel@tonic-gate void
15720Sstevel@tonic-gate hat_memload(
15730Sstevel@tonic-gate 	hat_t		*hat,
15740Sstevel@tonic-gate 	caddr_t		addr,
15750Sstevel@tonic-gate 	page_t		*pp,
15760Sstevel@tonic-gate 	uint_t		attr,
15770Sstevel@tonic-gate 	uint_t		flags)
15780Sstevel@tonic-gate {
15790Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
15800Sstevel@tonic-gate 	level_t		level = 0;
15810Sstevel@tonic-gate 	pfn_t		pfn = page_pptonum(pp);
15820Sstevel@tonic-gate 
15835084Sjohnlev 	XPV_DISALLOW_MIGRATE();
15840Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
15853446Smrj 	ASSERT(hat == kas.a_hat || va < _userlimit);
15860Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
15870Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
15880Sstevel@tonic-gate 	ASSERT((flags & supported_memload_flags) == flags);
15890Sstevel@tonic-gate 
15900Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(va));
15910Sstevel@tonic-gate 	ASSERT(!PP_ISFREE(pp));
15920Sstevel@tonic-gate 
15930Sstevel@tonic-gate 	/*
15940Sstevel@tonic-gate 	 * kernel address special case for performance.
15950Sstevel@tonic-gate 	 */
15960Sstevel@tonic-gate 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
15970Sstevel@tonic-gate 		ASSERT(hat == kas.a_hat);
15980Sstevel@tonic-gate 		hat_kmap_load(addr, pp, attr, flags);
15995084Sjohnlev 		XPV_ALLOW_MIGRATE();
16000Sstevel@tonic-gate 		return;
16010Sstevel@tonic-gate 	}
16020Sstevel@tonic-gate 
16030Sstevel@tonic-gate 	/*
16040Sstevel@tonic-gate 	 * This is used for memory with normal caching enabled, so
16050Sstevel@tonic-gate 	 * always set HAT_STORECACHING_OK.
16060Sstevel@tonic-gate 	 */
16070Sstevel@tonic-gate 	attr |= HAT_STORECACHING_OK;
16083446Smrj 	if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
16093446Smrj 		panic("unexpected hati_load_common() failure");
16105084Sjohnlev 	XPV_ALLOW_MIGRATE();
16110Sstevel@tonic-gate }
16120Sstevel@tonic-gate 
16134528Spaulsan /* ARGSUSED */
16144528Spaulsan void
16154528Spaulsan hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
16164528Spaulsan     uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
16174528Spaulsan {
16184528Spaulsan 	hat_memload(hat, addr, pp, attr, flags);
16194528Spaulsan }
16204528Spaulsan 
16210Sstevel@tonic-gate /*
16220Sstevel@tonic-gate  * Load the given array of page structs using large pages when possible
16230Sstevel@tonic-gate  */
16240Sstevel@tonic-gate void
16250Sstevel@tonic-gate hat_memload_array(
16260Sstevel@tonic-gate 	hat_t		*hat,
16270Sstevel@tonic-gate 	caddr_t		addr,
16280Sstevel@tonic-gate 	size_t		len,
16290Sstevel@tonic-gate 	page_t		**pages,
16300Sstevel@tonic-gate 	uint_t		attr,
16310Sstevel@tonic-gate 	uint_t		flags)
16320Sstevel@tonic-gate {
16330Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
16340Sstevel@tonic-gate 	uintptr_t	eaddr = va + len;
16350Sstevel@tonic-gate 	level_t		level;
16360Sstevel@tonic-gate 	size_t		pgsize;
16370Sstevel@tonic-gate 	pgcnt_t		pgindx = 0;
16380Sstevel@tonic-gate 	pfn_t		pfn;
16390Sstevel@tonic-gate 	pgcnt_t		i;
16400Sstevel@tonic-gate 
16415084Sjohnlev 	XPV_DISALLOW_MIGRATE();
16420Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
16433446Smrj 	ASSERT(hat == kas.a_hat || va + len <= _userlimit);
16440Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
16450Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
16460Sstevel@tonic-gate 	ASSERT((flags & supported_memload_flags) == flags);
16470Sstevel@tonic-gate 
16480Sstevel@tonic-gate 	/*
16490Sstevel@tonic-gate 	 * memload is used for memory with full caching enabled, so
16500Sstevel@tonic-gate 	 * set HAT_STORECACHING_OK.
16510Sstevel@tonic-gate 	 */
16520Sstevel@tonic-gate 	attr |= HAT_STORECACHING_OK;
16530Sstevel@tonic-gate 
16540Sstevel@tonic-gate 	/*
16550Sstevel@tonic-gate 	 * handle all pages using largest possible pagesize
16560Sstevel@tonic-gate 	 */
16570Sstevel@tonic-gate 	while (va < eaddr) {
16580Sstevel@tonic-gate 		/*
16590Sstevel@tonic-gate 		 * decide what level mapping to use (ie. pagesize)
16600Sstevel@tonic-gate 		 */
16610Sstevel@tonic-gate 		pfn = page_pptonum(pages[pgindx]);
16620Sstevel@tonic-gate 		for (level = mmu.max_page_level; ; --level) {
16630Sstevel@tonic-gate 			pgsize = LEVEL_SIZE(level);
16640Sstevel@tonic-gate 			if (level == 0)
16650Sstevel@tonic-gate 				break;
16663446Smrj 
16670Sstevel@tonic-gate 			if (!IS_P2ALIGNED(va, pgsize) ||
16680Sstevel@tonic-gate 			    (eaddr - va) < pgsize ||
16693446Smrj 			    !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize))
16700Sstevel@tonic-gate 				continue;
16710Sstevel@tonic-gate 
16720Sstevel@tonic-gate 			/*
16730Sstevel@tonic-gate 			 * To use a large mapping of this size, all the
16740Sstevel@tonic-gate 			 * pages we are passed must be sequential subpages
16750Sstevel@tonic-gate 			 * of the large page.
16760Sstevel@tonic-gate 			 * hat_page_demote() can't change p_szc because
16770Sstevel@tonic-gate 			 * all pages are locked.
16780Sstevel@tonic-gate 			 */
16790Sstevel@tonic-gate 			if (pages[pgindx]->p_szc >= level) {
16800Sstevel@tonic-gate 				for (i = 0; i < mmu_btop(pgsize); ++i) {
16810Sstevel@tonic-gate 					if (pfn + i !=
16820Sstevel@tonic-gate 					    page_pptonum(pages[pgindx + i]))
16830Sstevel@tonic-gate 						break;
16840Sstevel@tonic-gate 					ASSERT(pages[pgindx + i]->p_szc >=
16850Sstevel@tonic-gate 					    level);
16860Sstevel@tonic-gate 					ASSERT(pages[pgindx] + i ==
16870Sstevel@tonic-gate 					    pages[pgindx + i]);
16880Sstevel@tonic-gate 				}
16895349Skchow 				if (i == mmu_btop(pgsize)) {
16905349Skchow #ifdef DEBUG
16915349Skchow 					if (level == 2)
16925349Skchow 						map1gcnt++;
16935349Skchow #endif
16940Sstevel@tonic-gate 					break;
16955349Skchow 				}
16960Sstevel@tonic-gate 			}
16970Sstevel@tonic-gate 		}
16980Sstevel@tonic-gate 
16990Sstevel@tonic-gate 		/*
17003446Smrj 		 * Load this page mapping. If the load fails, try a smaller
17013446Smrj 		 * pagesize.
17020Sstevel@tonic-gate 		 */
17030Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(va));
17043446Smrj 		while (hati_load_common(hat, va, pages[pgindx], attr,
17054381Sjosephb 		    flags, level, pfn) != 0) {
17063446Smrj 			if (level == 0)
17073446Smrj 				panic("unexpected hati_load_common() failure");
17083446Smrj 			--level;
17093446Smrj 			pgsize = LEVEL_SIZE(level);
17103446Smrj 		}
17110Sstevel@tonic-gate 
17120Sstevel@tonic-gate 		/*
17130Sstevel@tonic-gate 		 * move to next page
17140Sstevel@tonic-gate 		 */
17150Sstevel@tonic-gate 		va += pgsize;
17160Sstevel@tonic-gate 		pgindx += mmu_btop(pgsize);
17170Sstevel@tonic-gate 	}
17185084Sjohnlev 	XPV_ALLOW_MIGRATE();
17190Sstevel@tonic-gate }
17200Sstevel@tonic-gate 
17214528Spaulsan /* ARGSUSED */
17224528Spaulsan void
17234528Spaulsan hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
17244528Spaulsan     struct page **pps, uint_t attr, uint_t flags,
17254528Spaulsan     hat_region_cookie_t rcookie)
17264528Spaulsan {
17274528Spaulsan 	hat_memload_array(hat, addr, len, pps, attr, flags);
17284528Spaulsan }
17294528Spaulsan 
17300Sstevel@tonic-gate /*
17310Sstevel@tonic-gate  * void hat_devload(hat, addr, len, pf, attr, flags)
17320Sstevel@tonic-gate  *	load/lock the given page frame number
17330Sstevel@tonic-gate  *
17340Sstevel@tonic-gate  * Advisory ordering attributes. Apply only to device mappings.
17350Sstevel@tonic-gate  *
17360Sstevel@tonic-gate  * HAT_STRICTORDER: the CPU must issue the references in order, as the
17370Sstevel@tonic-gate  *	programmer specified.  This is the default.
17380Sstevel@tonic-gate  * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
17390Sstevel@tonic-gate  *	of reordering; store or load with store or load).
17400Sstevel@tonic-gate  * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
17410Sstevel@tonic-gate  *	to consecutive locations (for example, turn two consecutive byte
17420Sstevel@tonic-gate  *	stores into one halfword store), and it may batch individual loads
17430Sstevel@tonic-gate  *	(for example, turn two consecutive byte loads into one halfword load).
17440Sstevel@tonic-gate  *	This also implies re-ordering.
17450Sstevel@tonic-gate  * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
17460Sstevel@tonic-gate  *	until another store occurs.  The default is to fetch new data
17470Sstevel@tonic-gate  *	on every load.  This also implies merging.
17480Sstevel@tonic-gate  * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
17490Sstevel@tonic-gate  *	the device (perhaps with other data) at a later time.  The default is
17500Sstevel@tonic-gate  *	to push the data right away.  This also implies load caching.
17510Sstevel@tonic-gate  *
17520Sstevel@tonic-gate  * Equivalent of hat_memload(), but can be used for device memory where
17530Sstevel@tonic-gate  * there are no page_t's and we support additional flags (write merging, etc).
17540Sstevel@tonic-gate  * Note that we can have large page mappings with this interface.
17550Sstevel@tonic-gate  */
17560Sstevel@tonic-gate int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
17570Sstevel@tonic-gate 	HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
17580Sstevel@tonic-gate 	HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
17590Sstevel@tonic-gate 
17600Sstevel@tonic-gate void
17610Sstevel@tonic-gate hat_devload(
17620Sstevel@tonic-gate 	hat_t		*hat,
17630Sstevel@tonic-gate 	caddr_t		addr,
17640Sstevel@tonic-gate 	size_t		len,
17650Sstevel@tonic-gate 	pfn_t		pfn,
17660Sstevel@tonic-gate 	uint_t		attr,
17670Sstevel@tonic-gate 	int		flags)
17680Sstevel@tonic-gate {
17690Sstevel@tonic-gate 	uintptr_t	va = ALIGN2PAGE(addr);
17700Sstevel@tonic-gate 	uintptr_t	eva = va + len;
17710Sstevel@tonic-gate 	level_t		level;
17720Sstevel@tonic-gate 	size_t		pgsize;
17730Sstevel@tonic-gate 	page_t		*pp;
17740Sstevel@tonic-gate 	int		f;	/* per PTE copy of flags  - maybe modified */
17750Sstevel@tonic-gate 	uint_t		a;	/* per PTE copy of attr */
17760Sstevel@tonic-gate 
17775084Sjohnlev 	XPV_DISALLOW_MIGRATE();
17780Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
17793446Smrj 	ASSERT(hat == kas.a_hat || eva <= _userlimit);
17800Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
17810Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
17820Sstevel@tonic-gate 	ASSERT((flags & supported_devload_flags) == flags);
17830Sstevel@tonic-gate 
17840Sstevel@tonic-gate 	/*
17850Sstevel@tonic-gate 	 * handle all pages
17860Sstevel@tonic-gate 	 */
17870Sstevel@tonic-gate 	while (va < eva) {
17880Sstevel@tonic-gate 
17890Sstevel@tonic-gate 		/*
17900Sstevel@tonic-gate 		 * decide what level mapping to use (ie. pagesize)
17910Sstevel@tonic-gate 		 */
17920Sstevel@tonic-gate 		for (level = mmu.max_page_level; ; --level) {
17930Sstevel@tonic-gate 			pgsize = LEVEL_SIZE(level);
17940Sstevel@tonic-gate 			if (level == 0)
17950Sstevel@tonic-gate 				break;
17960Sstevel@tonic-gate 			if (IS_P2ALIGNED(va, pgsize) &&
17970Sstevel@tonic-gate 			    (eva - va) >= pgsize &&
17985349Skchow 			    IS_P2ALIGNED(pfn, mmu_btop(pgsize))) {
17995349Skchow #ifdef DEBUG
18005349Skchow 				if (level == 2)
18015349Skchow 					map1gcnt++;
18025349Skchow #endif
18030Sstevel@tonic-gate 				break;
18045349Skchow 			}
18050Sstevel@tonic-gate 		}
18060Sstevel@tonic-gate 
18070Sstevel@tonic-gate 		/*
18083446Smrj 		 * If this is just memory then allow caching (this happens
18090Sstevel@tonic-gate 		 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
18103446Smrj 		 * to override that. If we don't have a page_t then make sure
18110Sstevel@tonic-gate 		 * NOCONSIST is set.
18120Sstevel@tonic-gate 		 */
18130Sstevel@tonic-gate 		a = attr;
18140Sstevel@tonic-gate 		f = flags;
18155084Sjohnlev 		if (!pf_is_memory(pfn))
18165084Sjohnlev 			f |= HAT_LOAD_NOCONSIST;
18175084Sjohnlev 		else if (!(a & HAT_PLAT_NOCACHE))
18185084Sjohnlev 			a |= HAT_STORECACHING_OK;
18195084Sjohnlev 
18205084Sjohnlev 		if (f & HAT_LOAD_NOCONSIST)
18210Sstevel@tonic-gate 			pp = NULL;
18225084Sjohnlev 		else
18235084Sjohnlev 			pp = page_numtopp_nolock(pfn);
18240Sstevel@tonic-gate 
18250Sstevel@tonic-gate 		/*
18269441SPrakash.Sangappa@Sun.COM 		 * Check to make sure we are really trying to map a valid
18279441SPrakash.Sangappa@Sun.COM 		 * memory page. The caller wishing to intentionally map
18289441SPrakash.Sangappa@Sun.COM 		 * free memory pages will have passed the HAT_LOAD_NOCONSIST
18299441SPrakash.Sangappa@Sun.COM 		 * flag, then pp will be NULL.
18309441SPrakash.Sangappa@Sun.COM 		 */
18319441SPrakash.Sangappa@Sun.COM 		if (pp != NULL) {
18329441SPrakash.Sangappa@Sun.COM 			if (PP_ISFREE(pp)) {
18339441SPrakash.Sangappa@Sun.COM 				panic("hat_devload: loading "
18349441SPrakash.Sangappa@Sun.COM 				    "a mapping to free page %p", (void *)pp);
18359441SPrakash.Sangappa@Sun.COM 			}
18369441SPrakash.Sangappa@Sun.COM 
18379441SPrakash.Sangappa@Sun.COM 			if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
18389441SPrakash.Sangappa@Sun.COM 				panic("hat_devload: loading a mapping "
18399441SPrakash.Sangappa@Sun.COM 				    "to an unlocked page %p",
18409441SPrakash.Sangappa@Sun.COM 				    (void *)pp);
18419441SPrakash.Sangappa@Sun.COM 			}
18429441SPrakash.Sangappa@Sun.COM 		}
18439441SPrakash.Sangappa@Sun.COM 
18449441SPrakash.Sangappa@Sun.COM 		/*
18450Sstevel@tonic-gate 		 * load this page mapping
18460Sstevel@tonic-gate 		 */
18470Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(va));
18483446Smrj 		while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
18493446Smrj 			if (level == 0)
18503446Smrj 				panic("unexpected hati_load_common() failure");
18513446Smrj 			--level;
18523446Smrj 			pgsize = LEVEL_SIZE(level);
18533446Smrj 		}
18540Sstevel@tonic-gate 
18550Sstevel@tonic-gate 		/*
18560Sstevel@tonic-gate 		 * move to next page
18570Sstevel@tonic-gate 		 */
18580Sstevel@tonic-gate 		va += pgsize;
18590Sstevel@tonic-gate 		pfn += mmu_btop(pgsize);
18600Sstevel@tonic-gate 	}
18615084Sjohnlev 	XPV_ALLOW_MIGRATE();
18620Sstevel@tonic-gate }
18630Sstevel@tonic-gate 
18640Sstevel@tonic-gate /*
18650Sstevel@tonic-gate  * void hat_unlock(hat, addr, len)
18660Sstevel@tonic-gate  *	unlock the mappings to a given range of addresses
18670Sstevel@tonic-gate  *
18680Sstevel@tonic-gate  * Locks are tracked by ht_lock_cnt in the htable.
18690Sstevel@tonic-gate  */
18700Sstevel@tonic-gate void
18710Sstevel@tonic-gate hat_unlock(hat_t *hat, caddr_t addr, size_t len)
18720Sstevel@tonic-gate {
18730Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
18740Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
18750Sstevel@tonic-gate 	htable_t	*ht = NULL;
18760Sstevel@tonic-gate 
18770Sstevel@tonic-gate 	/*
18780Sstevel@tonic-gate 	 * kernel entries are always locked, we don't track lock counts
18790Sstevel@tonic-gate 	 */
18803446Smrj 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
18810Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
18820Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
18830Sstevel@tonic-gate 	if (hat == kas.a_hat)
18840Sstevel@tonic-gate 		return;
18850Sstevel@tonic-gate 	if (eaddr > _userlimit)
18860Sstevel@tonic-gate 		panic("hat_unlock() address out of range - above _userlimit");
18870Sstevel@tonic-gate 
18885084Sjohnlev 	XPV_DISALLOW_MIGRATE();
18890Sstevel@tonic-gate 	ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
18900Sstevel@tonic-gate 	while (vaddr < eaddr) {
18910Sstevel@tonic-gate 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
18920Sstevel@tonic-gate 		if (ht == NULL)
18930Sstevel@tonic-gate 			break;
18940Sstevel@tonic-gate 
18950Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(vaddr));
18960Sstevel@tonic-gate 
18970Sstevel@tonic-gate 		if (ht->ht_lock_cnt < 1)
18980Sstevel@tonic-gate 			panic("hat_unlock(): lock_cnt < 1, "
18997240Srh87107 			    "htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr);
19000Sstevel@tonic-gate 		HTABLE_LOCK_DEC(ht);
19010Sstevel@tonic-gate 
19020Sstevel@tonic-gate 		vaddr += LEVEL_SIZE(ht->ht_level);
19030Sstevel@tonic-gate 	}
19040Sstevel@tonic-gate 	if (ht)
19050Sstevel@tonic-gate 		htable_release(ht);
19065084Sjohnlev 	XPV_ALLOW_MIGRATE();
19070Sstevel@tonic-gate }
19080Sstevel@tonic-gate 
19094528Spaulsan /* ARGSUSED */
19104528Spaulsan void
19115075Spaulsan hat_unlock_region(struct hat *hat, caddr_t addr, size_t len,
19124528Spaulsan     hat_region_cookie_t rcookie)
19134528Spaulsan {
19144528Spaulsan 	panic("No shared region support on x86");
19154528Spaulsan }
19164528Spaulsan 
19175084Sjohnlev #if !defined(__xpv)
19180Sstevel@tonic-gate /*
19190Sstevel@tonic-gate  * Cross call service routine to demap a virtual page on
19200Sstevel@tonic-gate  * the current CPU or flush all mappings in TLB.
19210Sstevel@tonic-gate  */
19220Sstevel@tonic-gate /*ARGSUSED*/
19230Sstevel@tonic-gate static int
19240Sstevel@tonic-gate hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
19250Sstevel@tonic-gate {
19260Sstevel@tonic-gate 	hat_t	*hat = (hat_t *)a1;
19270Sstevel@tonic-gate 	caddr_t	addr = (caddr_t)a2;
19280Sstevel@tonic-gate 
19290Sstevel@tonic-gate 	/*
19300Sstevel@tonic-gate 	 * If the target hat isn't the kernel and this CPU isn't operating
19310Sstevel@tonic-gate 	 * in the target hat, we can ignore the cross call.
19320Sstevel@tonic-gate 	 */
19330Sstevel@tonic-gate 	if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
19340Sstevel@tonic-gate 		return (0);
19350Sstevel@tonic-gate 
19360Sstevel@tonic-gate 	/*
19370Sstevel@tonic-gate 	 * For a normal address, we just flush one page mapping
19380Sstevel@tonic-gate 	 */
19390Sstevel@tonic-gate 	if ((uintptr_t)addr != DEMAP_ALL_ADDR) {
19403446Smrj 		mmu_tlbflush_entry(addr);
19410Sstevel@tonic-gate 		return (0);
19420Sstevel@tonic-gate 	}
19430Sstevel@tonic-gate 
19440Sstevel@tonic-gate 	/*
19450Sstevel@tonic-gate 	 * Otherwise we reload cr3 to effect a complete TLB flush.
19460Sstevel@tonic-gate 	 *
19470Sstevel@tonic-gate 	 * A reload of cr3 on a VLP process also means we must also recopy in
19480Sstevel@tonic-gate 	 * the pte values from the struct hat
19490Sstevel@tonic-gate 	 */
19500Sstevel@tonic-gate 	if (hat->hat_flags & HAT_VLP) {
19510Sstevel@tonic-gate #if defined(__amd64)
19520Sstevel@tonic-gate 		x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes;
19530Sstevel@tonic-gate 
19540Sstevel@tonic-gate 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
19550Sstevel@tonic-gate #elif defined(__i386)
19560Sstevel@tonic-gate 		reload_pae32(hat, CPU);
19570Sstevel@tonic-gate #endif
19580Sstevel@tonic-gate 	}
19590Sstevel@tonic-gate 	reload_cr3();
19600Sstevel@tonic-gate 	return (0);
19610Sstevel@tonic-gate }
19620Sstevel@tonic-gate 
19630Sstevel@tonic-gate /*
19644191Sjosephb  * Flush all TLB entries, including global (ie. kernel) ones.
19654191Sjosephb  */
19664191Sjosephb static void
19674191Sjosephb flush_all_tlb_entries(void)
19684191Sjosephb {
19694191Sjosephb 	ulong_t cr4 = getcr4();
19704191Sjosephb 
19714191Sjosephb 	if (cr4 & CR4_PGE) {
19724191Sjosephb 		setcr4(cr4 & ~(ulong_t)CR4_PGE);
19734191Sjosephb 		setcr4(cr4);
19744191Sjosephb 
19754191Sjosephb 		/*
19764191Sjosephb 		 * 32 bit PAE also needs to always reload_cr3()
19774191Sjosephb 		 */
19784191Sjosephb 		if (mmu.max_level == 2)
19794191Sjosephb 			reload_cr3();
19804191Sjosephb 	} else {
19814191Sjosephb 		reload_cr3();
19824191Sjosephb 	}
19834191Sjosephb }
19844191Sjosephb 
19854191Sjosephb #define	TLB_CPU_HALTED	(01ul)
19864191Sjosephb #define	TLB_INVAL_ALL	(02ul)
19874191Sjosephb #define	CAS_TLB_INFO(cpu, old, new)	\
19884191Sjosephb 	caslong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
19894191Sjosephb 
19904191Sjosephb /*
19914191Sjosephb  * Record that a CPU is going idle
19924191Sjosephb  */
19934191Sjosephb void
19944191Sjosephb tlb_going_idle(void)
19954191Sjosephb {
19964191Sjosephb 	atomic_or_long((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
19974191Sjosephb }
19984191Sjosephb 
19994191Sjosephb /*
20004191Sjosephb  * Service a delayed TLB flush if coming out of being idle.
20019903SPavel.Tatashin@Sun.COM  * It will be called from cpu idle notification with interrupt disabled.
20024191Sjosephb  */
20034191Sjosephb void
20044191Sjosephb tlb_service(void)
20054191Sjosephb {
20064191Sjosephb 	ulong_t tlb_info;
20074191Sjosephb 	ulong_t found;
20084191Sjosephb 
20094191Sjosephb 	/*
20104191Sjosephb 	 * We only have to do something if coming out of being idle.
20114191Sjosephb 	 */
20124191Sjosephb 	tlb_info = CPU->cpu_m.mcpu_tlb_info;
20134191Sjosephb 	if (tlb_info & TLB_CPU_HALTED) {
20144191Sjosephb 		ASSERT(CPU->cpu_current_hat == kas.a_hat);
20154191Sjosephb 
20164191Sjosephb 		/*
20174191Sjosephb 		 * Atomic clear and fetch of old state.
20184191Sjosephb 		 */
20194191Sjosephb 		while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) {
20204191Sjosephb 			ASSERT(found & TLB_CPU_HALTED);
20214191Sjosephb 			tlb_info = found;
20224191Sjosephb 			SMT_PAUSE();
20234191Sjosephb 		}
20244191Sjosephb 		if (tlb_info & TLB_INVAL_ALL)
20254191Sjosephb 			flush_all_tlb_entries();
20264191Sjosephb 	}
20274191Sjosephb }
20285084Sjohnlev #endif /* !__xpv */
20294191Sjosephb 
20304191Sjosephb /*
20310Sstevel@tonic-gate  * Internal routine to do cross calls to invalidate a range of pages on
20320Sstevel@tonic-gate  * all CPUs using a given hat.
20330Sstevel@tonic-gate  */
20340Sstevel@tonic-gate void
20353446Smrj hat_tlb_inval(hat_t *hat, uintptr_t va)
20360Sstevel@tonic-gate {
20370Sstevel@tonic-gate 	extern int	flushes_require_xcalls;	/* from mp_startup.c */
20380Sstevel@tonic-gate 	cpuset_t	justme;
20395084Sjohnlev 	cpuset_t	cpus_to_shootdown;
20405084Sjohnlev #ifndef __xpv
20414191Sjosephb 	cpuset_t	check_cpus;
20424191Sjosephb 	cpu_t		*cpup;
20434191Sjosephb 	int		c;
20445084Sjohnlev #endif
20450Sstevel@tonic-gate 
20460Sstevel@tonic-gate 	/*
20470Sstevel@tonic-gate 	 * If the hat is being destroyed, there are no more users, so
20480Sstevel@tonic-gate 	 * demap need not do anything.
20490Sstevel@tonic-gate 	 */
20500Sstevel@tonic-gate 	if (hat->hat_flags & HAT_FREEING)
20510Sstevel@tonic-gate 		return;
20520Sstevel@tonic-gate 
20530Sstevel@tonic-gate 	/*
20540Sstevel@tonic-gate 	 * If demapping from a shared pagetable, we best demap the
20550Sstevel@tonic-gate 	 * entire set of user TLBs, since we don't know what addresses
20560Sstevel@tonic-gate 	 * these were shared at.
20570Sstevel@tonic-gate 	 */
20580Sstevel@tonic-gate 	if (hat->hat_flags & HAT_SHARED) {
20590Sstevel@tonic-gate 		hat = kas.a_hat;
20600Sstevel@tonic-gate 		va = DEMAP_ALL_ADDR;
20610Sstevel@tonic-gate 	}
20620Sstevel@tonic-gate 
20630Sstevel@tonic-gate 	/*
20640Sstevel@tonic-gate 	 * if not running with multiple CPUs, don't use cross calls
20650Sstevel@tonic-gate 	 */
20660Sstevel@tonic-gate 	if (panicstr || !flushes_require_xcalls) {
20675084Sjohnlev #ifdef __xpv
20685084Sjohnlev 		if (va == DEMAP_ALL_ADDR)
20695084Sjohnlev 			xen_flush_tlb();
20705084Sjohnlev 		else
20715084Sjohnlev 			xen_flush_va((caddr_t)va);
20725084Sjohnlev #else
20730Sstevel@tonic-gate 		(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
20745084Sjohnlev #endif
20750Sstevel@tonic-gate 		return;
20760Sstevel@tonic-gate 	}
20770Sstevel@tonic-gate 
20780Sstevel@tonic-gate 
20790Sstevel@tonic-gate 	/*
20803446Smrj 	 * Determine CPUs to shootdown. Kernel changes always do all CPUs.
20813446Smrj 	 * Otherwise it's just CPUs currently executing in this hat.
20820Sstevel@tonic-gate 	 */
20830Sstevel@tonic-gate 	kpreempt_disable();
20840Sstevel@tonic-gate 	CPUSET_ONLY(justme, CPU->cpu_id);
20853446Smrj 	if (hat == kas.a_hat)
20863446Smrj 		cpus_to_shootdown = khat_cpuset;
20870Sstevel@tonic-gate 	else
20883446Smrj 		cpus_to_shootdown = hat->hat_cpus;
20893446Smrj 
20905084Sjohnlev #ifndef __xpv
20914191Sjosephb 	/*
20924191Sjosephb 	 * If any CPUs in the set are idle, just request a delayed flush
20934191Sjosephb 	 * and avoid waking them up.
20944191Sjosephb 	 */
20954191Sjosephb 	check_cpus = cpus_to_shootdown;
20964191Sjosephb 	for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) {
20974191Sjosephb 		ulong_t tlb_info;
20984191Sjosephb 
20994191Sjosephb 		if (!CPU_IN_SET(check_cpus, c))
21004191Sjosephb 			continue;
21014191Sjosephb 		CPUSET_DEL(check_cpus, c);
21024191Sjosephb 		cpup = cpu[c];
21034191Sjosephb 		if (cpup == NULL)
21044191Sjosephb 			continue;
21054191Sjosephb 
21064191Sjosephb 		tlb_info = cpup->cpu_m.mcpu_tlb_info;
21074191Sjosephb 		while (tlb_info == TLB_CPU_HALTED) {
21084191Sjosephb 			(void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED,
21094381Sjosephb 			    TLB_CPU_HALTED | TLB_INVAL_ALL);
21104191Sjosephb 			SMT_PAUSE();
21114191Sjosephb 			tlb_info = cpup->cpu_m.mcpu_tlb_info;
21124191Sjosephb 		}
21134191Sjosephb 		if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) {
21144191Sjosephb 			HATSTAT_INC(hs_tlb_inval_delayed);
21154191Sjosephb 			CPUSET_DEL(cpus_to_shootdown, c);
21164191Sjosephb 		}
21174191Sjosephb 	}
21185084Sjohnlev #endif
21194191Sjosephb 
21203446Smrj 	if (CPUSET_ISNULL(cpus_to_shootdown) ||
21213446Smrj 	    CPUSET_ISEQUAL(cpus_to_shootdown, justme)) {
21223446Smrj 
21235084Sjohnlev #ifdef __xpv
21245084Sjohnlev 		if (va == DEMAP_ALL_ADDR)
21255084Sjohnlev 			xen_flush_tlb();
21265084Sjohnlev 		else
21275084Sjohnlev 			xen_flush_va((caddr_t)va);
21285084Sjohnlev #else
21293446Smrj 		(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
21305084Sjohnlev #endif
21313446Smrj 
21323446Smrj 	} else {
21333446Smrj 
21343446Smrj 		CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
21355084Sjohnlev #ifdef __xpv
21365084Sjohnlev 		if (va == DEMAP_ALL_ADDR)
21375084Sjohnlev 			xen_gflush_tlb(cpus_to_shootdown);
21385084Sjohnlev 		else
21395084Sjohnlev 			xen_gflush_va((caddr_t)va, cpus_to_shootdown);
21405084Sjohnlev #else
21419489SJoe.Bonasera@sun.com 		xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL,
21429489SJoe.Bonasera@sun.com 		    CPUSET2BV(cpus_to_shootdown), hati_demap_func);
21435084Sjohnlev #endif
21443446Smrj 
21453446Smrj 	}
21460Sstevel@tonic-gate 	kpreempt_enable();
21470Sstevel@tonic-gate }
21480Sstevel@tonic-gate 
21490Sstevel@tonic-gate /*
21500Sstevel@tonic-gate  * Interior routine for HAT_UNLOADs from hat_unload_callback(),
21510Sstevel@tonic-gate  * hat_kmap_unload() OR from hat_steal() code.  This routine doesn't
21520Sstevel@tonic-gate  * handle releasing of the htables.
21530Sstevel@tonic-gate  */
21540Sstevel@tonic-gate void
21550Sstevel@tonic-gate hat_pte_unmap(
21560Sstevel@tonic-gate 	htable_t	*ht,
21570Sstevel@tonic-gate 	uint_t		entry,
21580Sstevel@tonic-gate 	uint_t		flags,
21590Sstevel@tonic-gate 	x86pte_t	old_pte,
21600Sstevel@tonic-gate 	void		*pte_ptr)
21610Sstevel@tonic-gate {
21620Sstevel@tonic-gate 	hat_t		*hat = ht->ht_hat;
21630Sstevel@tonic-gate 	hment_t		*hm = NULL;
21640Sstevel@tonic-gate 	page_t		*pp = NULL;
21650Sstevel@tonic-gate 	level_t		l = ht->ht_level;
21660Sstevel@tonic-gate 	pfn_t		pfn;
21670Sstevel@tonic-gate 
21680Sstevel@tonic-gate 	/*
21690Sstevel@tonic-gate 	 * We always track the locking counts, even if nothing is unmapped
21700Sstevel@tonic-gate 	 */
21710Sstevel@tonic-gate 	if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
21720Sstevel@tonic-gate 		ASSERT(ht->ht_lock_cnt > 0);
21730Sstevel@tonic-gate 		HTABLE_LOCK_DEC(ht);
21740Sstevel@tonic-gate 	}
21750Sstevel@tonic-gate 
21760Sstevel@tonic-gate 	/*
21770Sstevel@tonic-gate 	 * Figure out which page's mapping list lock to acquire using the PFN
21780Sstevel@tonic-gate 	 * passed in "old" PTE. We then attempt to invalidate the PTE.
21790Sstevel@tonic-gate 	 * If another thread, probably a hat_pageunload, has asynchronously
21800Sstevel@tonic-gate 	 * unmapped/remapped this address we'll loop here.
21810Sstevel@tonic-gate 	 */
21820Sstevel@tonic-gate 	ASSERT(ht->ht_busy > 0);
21830Sstevel@tonic-gate 	while (PTE_ISVALID(old_pte)) {
21840Sstevel@tonic-gate 		pfn = PTE2PFN(old_pte, l);
21853446Smrj 		if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) {
21860Sstevel@tonic-gate 			pp = NULL;
21870Sstevel@tonic-gate 		} else {
21885084Sjohnlev #ifdef __xpv
21895084Sjohnlev 			if (pfn == PFN_INVALID)
21905084Sjohnlev 				panic("Invalid PFN, but not PT_NOCONSIST");
21915084Sjohnlev #endif
21920Sstevel@tonic-gate 			pp = page_numtopp_nolock(pfn);
219347Sjosephb 			if (pp == NULL) {
219447Sjosephb 				panic("no page_t, not NOCONSIST: old_pte="
219547Sjosephb 				    FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
219647Sjosephb 				    old_pte, (uintptr_t)ht, entry,
219747Sjosephb 				    (uintptr_t)pte_ptr);
219847Sjosephb 			}
21990Sstevel@tonic-gate 			x86_hm_enter(pp);
22000Sstevel@tonic-gate 		}
220147Sjosephb 
2202*12532Sjoe.bonasera@oracle.com 		old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr);
22030Sstevel@tonic-gate 
22040Sstevel@tonic-gate 		/*
22050Sstevel@tonic-gate 		 * If the page hadn't changed we've unmapped it and can proceed
22060Sstevel@tonic-gate 		 */
22070Sstevel@tonic-gate 		if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
22080Sstevel@tonic-gate 			break;
22090Sstevel@tonic-gate 
22100Sstevel@tonic-gate 		/*
22110Sstevel@tonic-gate 		 * Otherwise, we'll have to retry with the current old_pte.
22120Sstevel@tonic-gate 		 * Drop the hment lock, since the pfn may have changed.
22130Sstevel@tonic-gate 		 */
22140Sstevel@tonic-gate 		if (pp != NULL) {
22150Sstevel@tonic-gate 			x86_hm_exit(pp);
22160Sstevel@tonic-gate 			pp = NULL;
22170Sstevel@tonic-gate 		} else {
22183446Smrj 			ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
22190Sstevel@tonic-gate 		}
22200Sstevel@tonic-gate 	}
22210Sstevel@tonic-gate 
22220Sstevel@tonic-gate 	/*
22230Sstevel@tonic-gate 	 * If the old mapping wasn't valid, there's nothing more to do
22240Sstevel@tonic-gate 	 */
22250Sstevel@tonic-gate 	if (!PTE_ISVALID(old_pte)) {
22260Sstevel@tonic-gate 		if (pp != NULL)
22270Sstevel@tonic-gate 			x86_hm_exit(pp);
22280Sstevel@tonic-gate 		return;
22290Sstevel@tonic-gate 	}
22300Sstevel@tonic-gate 
22310Sstevel@tonic-gate 	/*
22320Sstevel@tonic-gate 	 * Take care of syncing any MOD/REF bits and removing the hment.
22330Sstevel@tonic-gate 	 */
22340Sstevel@tonic-gate 	if (pp != NULL) {
22350Sstevel@tonic-gate 		if (!(flags & HAT_UNLOAD_NOSYNC))
22360Sstevel@tonic-gate 			hati_sync_pte_to_page(pp, old_pte, l);
22370Sstevel@tonic-gate 		hm = hment_remove(pp, ht, entry);
22380Sstevel@tonic-gate 		x86_hm_exit(pp);
22390Sstevel@tonic-gate 		if (hm != NULL)
22400Sstevel@tonic-gate 			hment_free(hm);
22410Sstevel@tonic-gate 	}
22420Sstevel@tonic-gate 
22430Sstevel@tonic-gate 	/*
22440Sstevel@tonic-gate 	 * Handle book keeping in the htable and hat
22450Sstevel@tonic-gate 	 */
22460Sstevel@tonic-gate 	ASSERT(ht->ht_valid_cnt > 0);
22470Sstevel@tonic-gate 	HTABLE_DEC(ht->ht_valid_cnt);
22480Sstevel@tonic-gate 	PGCNT_DEC(hat, l);
22490Sstevel@tonic-gate }
22500Sstevel@tonic-gate 
22510Sstevel@tonic-gate /*
22520Sstevel@tonic-gate  * very cheap unload implementation to special case some kernel addresses
22530Sstevel@tonic-gate  */
22540Sstevel@tonic-gate static void
22550Sstevel@tonic-gate hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
22560Sstevel@tonic-gate {
22570Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
22580Sstevel@tonic-gate 	uintptr_t	eva = va + len;
22593446Smrj 	pgcnt_t		pg_index;
22600Sstevel@tonic-gate 	htable_t	*ht;
22610Sstevel@tonic-gate 	uint_t		entry;
22623446Smrj 	x86pte_t	*pte_ptr;
22630Sstevel@tonic-gate 	x86pte_t	old_pte;
22640Sstevel@tonic-gate 
22650Sstevel@tonic-gate 	for (; va < eva; va += MMU_PAGESIZE) {
22660Sstevel@tonic-gate 		/*
22670Sstevel@tonic-gate 		 * Get the PTE
22680Sstevel@tonic-gate 		 */
22693446Smrj 		pg_index = mmu_btop(va - mmu.kmap_addr);
22703446Smrj 		pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index);
22713446Smrj 		old_pte = GET_PTE(pte_ptr);
22720Sstevel@tonic-gate 
22730Sstevel@tonic-gate 		/*
22740Sstevel@tonic-gate 		 * get the htable / entry
22750Sstevel@tonic-gate 		 */
22760Sstevel@tonic-gate 		ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
22770Sstevel@tonic-gate 		    >> LEVEL_SHIFT(1)];
22780Sstevel@tonic-gate 		entry = htable_va2entry(va, ht);
22790Sstevel@tonic-gate 
22800Sstevel@tonic-gate 		/*
22810Sstevel@tonic-gate 		 * use mostly common code to unmap it.
22820Sstevel@tonic-gate 		 */
22830Sstevel@tonic-gate 		hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr);
22840Sstevel@tonic-gate 	}
22850Sstevel@tonic-gate }
22860Sstevel@tonic-gate 
22870Sstevel@tonic-gate 
22880Sstevel@tonic-gate /*
22890Sstevel@tonic-gate  * unload a range of virtual address space (no callback)
22900Sstevel@tonic-gate  */
22910Sstevel@tonic-gate void
22920Sstevel@tonic-gate hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
22930Sstevel@tonic-gate {
22940Sstevel@tonic-gate 	uintptr_t va = (uintptr_t)addr;
22953446Smrj 
22965084Sjohnlev 	XPV_DISALLOW_MIGRATE();
22973446Smrj 	ASSERT(hat == kas.a_hat || va + len <= _userlimit);
22980Sstevel@tonic-gate 
22990Sstevel@tonic-gate 	/*
23000Sstevel@tonic-gate 	 * special case for performance.
23010Sstevel@tonic-gate 	 */
23020Sstevel@tonic-gate 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
23030Sstevel@tonic-gate 		ASSERT(hat == kas.a_hat);
23040Sstevel@tonic-gate 		hat_kmap_unload(addr, len, flags);
23053446Smrj 	} else {
23063446Smrj 		hat_unload_callback(hat, addr, len, flags, NULL);
23070Sstevel@tonic-gate 	}
23085084Sjohnlev 	XPV_ALLOW_MIGRATE();
23090Sstevel@tonic-gate }
23100Sstevel@tonic-gate 
23110Sstevel@tonic-gate /*
23120Sstevel@tonic-gate  * Do the callbacks for ranges being unloaded.
23130Sstevel@tonic-gate  */
23140Sstevel@tonic-gate typedef struct range_info {
23150Sstevel@tonic-gate 	uintptr_t	rng_va;
23160Sstevel@tonic-gate 	ulong_t		rng_cnt;
23170Sstevel@tonic-gate 	level_t		rng_level;
23180Sstevel@tonic-gate } range_info_t;
23190Sstevel@tonic-gate 
23200Sstevel@tonic-gate static void
23210Sstevel@tonic-gate handle_ranges(hat_callback_t *cb, uint_t cnt, range_info_t *range)
23220Sstevel@tonic-gate {
23230Sstevel@tonic-gate 	/*
23240Sstevel@tonic-gate 	 * do callbacks to upper level VM system
23250Sstevel@tonic-gate 	 */
23260Sstevel@tonic-gate 	while (cb != NULL && cnt > 0) {
23270Sstevel@tonic-gate 		--cnt;
23280Sstevel@tonic-gate 		cb->hcb_start_addr = (caddr_t)range[cnt].rng_va;
23290Sstevel@tonic-gate 		cb->hcb_end_addr = cb->hcb_start_addr;
23300Sstevel@tonic-gate 		cb->hcb_end_addr +=
23310Sstevel@tonic-gate 		    range[cnt].rng_cnt << LEVEL_SIZE(range[cnt].rng_level);
23320Sstevel@tonic-gate 		cb->hcb_function(cb);
23330Sstevel@tonic-gate 	}
23340Sstevel@tonic-gate }
23350Sstevel@tonic-gate 
23360Sstevel@tonic-gate /*
23370Sstevel@tonic-gate  * Unload a given range of addresses (has optional callback)
23380Sstevel@tonic-gate  *
23390Sstevel@tonic-gate  * Flags:
23400Sstevel@tonic-gate  * define	HAT_UNLOAD		0x00
23410Sstevel@tonic-gate  * define	HAT_UNLOAD_NOSYNC	0x02
23420Sstevel@tonic-gate  * define	HAT_UNLOAD_UNLOCK	0x04
23430Sstevel@tonic-gate  * define	HAT_UNLOAD_OTHER	0x08 - not used
23440Sstevel@tonic-gate  * define	HAT_UNLOAD_UNMAP	0x10 - same as HAT_UNLOAD
23450Sstevel@tonic-gate  */
23460Sstevel@tonic-gate #define	MAX_UNLOAD_CNT (8)
23470Sstevel@tonic-gate void
23480Sstevel@tonic-gate hat_unload_callback(
23490Sstevel@tonic-gate 	hat_t		*hat,
23500Sstevel@tonic-gate 	caddr_t		addr,
23510Sstevel@tonic-gate 	size_t		len,
23520Sstevel@tonic-gate 	uint_t		flags,
23530Sstevel@tonic-gate 	hat_callback_t	*cb)
23540Sstevel@tonic-gate {
23550Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
23560Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
23570Sstevel@tonic-gate 	htable_t	*ht = NULL;
23580Sstevel@tonic-gate 	uint_t		entry;
235947Sjosephb 	uintptr_t	contig_va = (uintptr_t)-1L;
23600Sstevel@tonic-gate 	range_info_t	r[MAX_UNLOAD_CNT];
23610Sstevel@tonic-gate 	uint_t		r_cnt = 0;
23620Sstevel@tonic-gate 	x86pte_t	old_pte;
23630Sstevel@tonic-gate 
23645084Sjohnlev 	XPV_DISALLOW_MIGRATE();
23653446Smrj 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
23660Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
23670Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
23680Sstevel@tonic-gate 
23693446Smrj 	/*
23703446Smrj 	 * Special case a single page being unloaded for speed. This happens
23713446Smrj 	 * quite frequently, COW faults after a fork() for example.
23723446Smrj 	 */
23733446Smrj 	if (cb == NULL && len == MMU_PAGESIZE) {
23743446Smrj 		ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
23753446Smrj 		if (ht != NULL) {
23763446Smrj 			if (PTE_ISVALID(old_pte))
23773446Smrj 				hat_pte_unmap(ht, entry, flags, old_pte, NULL);
23783446Smrj 			htable_release(ht);
23793446Smrj 		}
23805084Sjohnlev 		XPV_ALLOW_MIGRATE();
23813446Smrj 		return;
23823446Smrj 	}
23833446Smrj 
23840Sstevel@tonic-gate 	while (vaddr < eaddr) {
23850Sstevel@tonic-gate 		old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
23860Sstevel@tonic-gate 		if (ht == NULL)
23870Sstevel@tonic-gate 			break;
23880Sstevel@tonic-gate 
23890Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(vaddr));
23900Sstevel@tonic-gate 
23910Sstevel@tonic-gate 		if (vaddr < (uintptr_t)addr)
23920Sstevel@tonic-gate 			panic("hat_unload_callback(): unmap inside large page");
23930Sstevel@tonic-gate 
23940Sstevel@tonic-gate 		/*
23950Sstevel@tonic-gate 		 * We'll do the call backs for contiguous ranges
23960Sstevel@tonic-gate 		 */
239747Sjosephb 		if (vaddr != contig_va ||
23980Sstevel@tonic-gate 		    (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) {
23990Sstevel@tonic-gate 			if (r_cnt == MAX_UNLOAD_CNT) {
24000Sstevel@tonic-gate 				handle_ranges(cb, r_cnt, r);
24010Sstevel@tonic-gate 				r_cnt = 0;
24020Sstevel@tonic-gate 			}
24030Sstevel@tonic-gate 			r[r_cnt].rng_va = vaddr;
24040Sstevel@tonic-gate 			r[r_cnt].rng_cnt = 0;
24050Sstevel@tonic-gate 			r[r_cnt].rng_level = ht->ht_level;
24060Sstevel@tonic-gate 			++r_cnt;
24070Sstevel@tonic-gate 		}
24080Sstevel@tonic-gate 
24090Sstevel@tonic-gate 		/*
24100Sstevel@tonic-gate 		 * Unload one mapping from the page tables.
24110Sstevel@tonic-gate 		 */
24120Sstevel@tonic-gate 		entry = htable_va2entry(vaddr, ht);
24130Sstevel@tonic-gate 		hat_pte_unmap(ht, entry, flags, old_pte, NULL);
24140Sstevel@tonic-gate 		ASSERT(ht->ht_level <= mmu.max_page_level);
24150Sstevel@tonic-gate 		vaddr += LEVEL_SIZE(ht->ht_level);
241647Sjosephb 		contig_va = vaddr;
24170Sstevel@tonic-gate 		++r[r_cnt - 1].rng_cnt;
24180Sstevel@tonic-gate 	}
24190Sstevel@tonic-gate 	if (ht)
24200Sstevel@tonic-gate 		htable_release(ht);
24210Sstevel@tonic-gate 
24220Sstevel@tonic-gate 	/*
24230Sstevel@tonic-gate 	 * handle last range for callbacks
24240Sstevel@tonic-gate 	 */
24250Sstevel@tonic-gate 	if (r_cnt > 0)
24260Sstevel@tonic-gate 		handle_ranges(cb, r_cnt, r);
24275084Sjohnlev 	XPV_ALLOW_MIGRATE();
24280Sstevel@tonic-gate }
24290Sstevel@tonic-gate 
24300Sstevel@tonic-gate /*
243111079SDave.Plauger@Sun.COM  * Invalidate a virtual address translation on a slave CPU during
243211079SDave.Plauger@Sun.COM  * panic() dumps.
243310843SDave.Plauger@Sun.COM  */
243410843SDave.Plauger@Sun.COM void
243510843SDave.Plauger@Sun.COM hat_flush_range(hat_t *hat, caddr_t va, size_t size)
243610843SDave.Plauger@Sun.COM {
243710843SDave.Plauger@Sun.COM 	ssize_t sz;
243810843SDave.Plauger@Sun.COM 	caddr_t endva = va + size;
243910843SDave.Plauger@Sun.COM 
244010843SDave.Plauger@Sun.COM 	while (va < endva) {
244110843SDave.Plauger@Sun.COM 		sz = hat_getpagesize(hat, va);
244211079SDave.Plauger@Sun.COM 		if (sz < 0) {
244310843SDave.Plauger@Sun.COM #ifdef __xpv
244410843SDave.Plauger@Sun.COM 			xen_flush_tlb();
244510843SDave.Plauger@Sun.COM #else
244611079SDave.Plauger@Sun.COM 			flush_all_tlb_entries();
244710843SDave.Plauger@Sun.COM #endif
244810843SDave.Plauger@Sun.COM 			break;
244911079SDave.Plauger@Sun.COM 		}
245011079SDave.Plauger@Sun.COM #ifdef __xpv
245111079SDave.Plauger@Sun.COM 		xen_flush_va(va);
245211079SDave.Plauger@Sun.COM #else
245311079SDave.Plauger@Sun.COM 		mmu_tlbflush_entry(va);
245411079SDave.Plauger@Sun.COM #endif
245510843SDave.Plauger@Sun.COM 		va += sz;
245610843SDave.Plauger@Sun.COM 	}
245710843SDave.Plauger@Sun.COM }
245810843SDave.Plauger@Sun.COM 
245910843SDave.Plauger@Sun.COM /*
24600Sstevel@tonic-gate  * synchronize mapping with software data structures
24610Sstevel@tonic-gate  *
24620Sstevel@tonic-gate  * This interface is currently only used by the working set monitor
24630Sstevel@tonic-gate  * driver.
24640Sstevel@tonic-gate  */
24650Sstevel@tonic-gate /*ARGSUSED*/
24660Sstevel@tonic-gate void
24670Sstevel@tonic-gate hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
24680Sstevel@tonic-gate {
24690Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
24700Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
24710Sstevel@tonic-gate 	htable_t	*ht = NULL;
24720Sstevel@tonic-gate 	uint_t		entry;
24730Sstevel@tonic-gate 	x86pte_t	pte;
24740Sstevel@tonic-gate 	x86pte_t	save_pte;
24750Sstevel@tonic-gate 	x86pte_t	new;
24760Sstevel@tonic-gate 	page_t		*pp;
24770Sstevel@tonic-gate 
24780Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(vaddr));
24790Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
24800Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
24813446Smrj 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
24820Sstevel@tonic-gate 
24835084Sjohnlev 	XPV_DISALLOW_MIGRATE();
24840Sstevel@tonic-gate 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
24850Sstevel@tonic-gate try_again:
24860Sstevel@tonic-gate 		pte = htable_walk(hat, &ht, &vaddr, eaddr);
24870Sstevel@tonic-gate 		if (ht == NULL)
24880Sstevel@tonic-gate 			break;
24890Sstevel@tonic-gate 		entry = htable_va2entry(vaddr, ht);
24900Sstevel@tonic-gate 
24913446Smrj 		if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
24920Sstevel@tonic-gate 		    PTE_GET(pte, PT_REF | PT_MOD) == 0)
24930Sstevel@tonic-gate 			continue;
24940Sstevel@tonic-gate 
24950Sstevel@tonic-gate 		/*
24960Sstevel@tonic-gate 		 * We need to acquire the mapping list lock to protect
24970Sstevel@tonic-gate 		 * against hat_pageunload(), hat_unload(), etc.
24980Sstevel@tonic-gate 		 */
24990Sstevel@tonic-gate 		pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
25000Sstevel@tonic-gate 		if (pp == NULL)
25010Sstevel@tonic-gate 			break;
25020Sstevel@tonic-gate 		x86_hm_enter(pp);
25030Sstevel@tonic-gate 		save_pte = pte;
25040Sstevel@tonic-gate 		pte = x86pte_get(ht, entry);
25050Sstevel@tonic-gate 		if (pte != save_pte) {
25060Sstevel@tonic-gate 			x86_hm_exit(pp);
25070Sstevel@tonic-gate 			goto try_again;
25080Sstevel@tonic-gate 		}
25093446Smrj 		if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
25100Sstevel@tonic-gate 		    PTE_GET(pte, PT_REF | PT_MOD) == 0) {
25110Sstevel@tonic-gate 			x86_hm_exit(pp);
25120Sstevel@tonic-gate 			continue;
25130Sstevel@tonic-gate 		}
25140Sstevel@tonic-gate 
25150Sstevel@tonic-gate 		/*
25160Sstevel@tonic-gate 		 * Need to clear ref or mod bits. We may compete with
25170Sstevel@tonic-gate 		 * hardware updating the R/M bits and have to try again.
25180Sstevel@tonic-gate 		 */
25190Sstevel@tonic-gate 		if (flags == HAT_SYNC_ZERORM) {
25200Sstevel@tonic-gate 			new = pte;
25210Sstevel@tonic-gate 			PTE_CLR(new, PT_REF | PT_MOD);
25220Sstevel@tonic-gate 			pte = hati_update_pte(ht, entry, pte, new);
25230Sstevel@tonic-gate 			if (pte != 0) {
25240Sstevel@tonic-gate 				x86_hm_exit(pp);
25250Sstevel@tonic-gate 				goto try_again;
25260Sstevel@tonic-gate 			}
25270Sstevel@tonic-gate 		} else {
25280Sstevel@tonic-gate 			/*
25290Sstevel@tonic-gate 			 * sync the PTE to the page_t
25300Sstevel@tonic-gate 			 */
25310Sstevel@tonic-gate 			hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
25320Sstevel@tonic-gate 		}
25330Sstevel@tonic-gate 		x86_hm_exit(pp);
25340Sstevel@tonic-gate 	}
25350Sstevel@tonic-gate 	if (ht)
25360Sstevel@tonic-gate 		htable_release(ht);
25375084Sjohnlev 	XPV_ALLOW_MIGRATE();
25380Sstevel@tonic-gate }
25390Sstevel@tonic-gate 
25400Sstevel@tonic-gate /*
25410Sstevel@tonic-gate  * void	hat_map(hat, addr, len, flags)
25420Sstevel@tonic-gate  */
25430Sstevel@tonic-gate /*ARGSUSED*/
25440Sstevel@tonic-gate void
25450Sstevel@tonic-gate hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
25460Sstevel@tonic-gate {
25470Sstevel@tonic-gate 	/* does nothing */
25480Sstevel@tonic-gate }
25490Sstevel@tonic-gate 
25500Sstevel@tonic-gate /*
25510Sstevel@tonic-gate  * uint_t hat_getattr(hat, addr, *attr)
25520Sstevel@tonic-gate  *	returns attr for <hat,addr> in *attr.  returns 0 if there was a
25530Sstevel@tonic-gate  *	mapping and *attr is valid, nonzero if there was no mapping and
25540Sstevel@tonic-gate  *	*attr is not valid.
25550Sstevel@tonic-gate  */
25560Sstevel@tonic-gate uint_t
25570Sstevel@tonic-gate hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
25580Sstevel@tonic-gate {
25590Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
25600Sstevel@tonic-gate 	htable_t	*ht = NULL;
25610Sstevel@tonic-gate 	x86pte_t	pte;
25620Sstevel@tonic-gate 
25633446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
25640Sstevel@tonic-gate 
25650Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
25660Sstevel@tonic-gate 		return ((uint_t)-1);
25670Sstevel@tonic-gate 
25683446Smrj 	ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
25690Sstevel@tonic-gate 	if (ht == NULL)
25700Sstevel@tonic-gate 		return ((uint_t)-1);
25710Sstevel@tonic-gate 
25720Sstevel@tonic-gate 	if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
25730Sstevel@tonic-gate 		htable_release(ht);
25740Sstevel@tonic-gate 		return ((uint_t)-1);
25750Sstevel@tonic-gate 	}
25760Sstevel@tonic-gate 
25770Sstevel@tonic-gate 	*attr = PROT_READ;
25780Sstevel@tonic-gate 	if (PTE_GET(pte, PT_WRITABLE))
25790Sstevel@tonic-gate 		*attr |= PROT_WRITE;
25800Sstevel@tonic-gate 	if (PTE_GET(pte, PT_USER))
25810Sstevel@tonic-gate 		*attr |= PROT_USER;
25820Sstevel@tonic-gate 	if (!PTE_GET(pte, mmu.pt_nx))
25830Sstevel@tonic-gate 		*attr |= PROT_EXEC;
25843446Smrj 	if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
25850Sstevel@tonic-gate 		*attr |= HAT_NOSYNC;
25860Sstevel@tonic-gate 	htable_release(ht);
25870Sstevel@tonic-gate 	return (0);
25880Sstevel@tonic-gate }
25890Sstevel@tonic-gate 
25900Sstevel@tonic-gate /*
25910Sstevel@tonic-gate  * hat_updateattr() applies the given attribute change to an existing mapping
25920Sstevel@tonic-gate  */
25930Sstevel@tonic-gate #define	HAT_LOAD_ATTR		1
25940Sstevel@tonic-gate #define	HAT_SET_ATTR		2
25950Sstevel@tonic-gate #define	HAT_CLR_ATTR		3
25960Sstevel@tonic-gate 
25970Sstevel@tonic-gate static void
25980Sstevel@tonic-gate hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
25990Sstevel@tonic-gate {
26000Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
26010Sstevel@tonic-gate 	uintptr_t	eaddr = (uintptr_t)addr + len;
26020Sstevel@tonic-gate 	htable_t	*ht = NULL;
26030Sstevel@tonic-gate 	uint_t		entry;
26040Sstevel@tonic-gate 	x86pte_t	oldpte, newpte;
26050Sstevel@tonic-gate 	page_t		*pp;
26060Sstevel@tonic-gate 
26075084Sjohnlev 	XPV_DISALLOW_MIGRATE();
26080Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
26090Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
26100Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
26110Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
26120Sstevel@tonic-gate 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
26130Sstevel@tonic-gate try_again:
26140Sstevel@tonic-gate 		oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
26150Sstevel@tonic-gate 		if (ht == NULL)
26160Sstevel@tonic-gate 			break;
26173446Smrj 		if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
26180Sstevel@tonic-gate 			continue;
26190Sstevel@tonic-gate 
26200Sstevel@tonic-gate 		pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
26210Sstevel@tonic-gate 		if (pp == NULL)
26220Sstevel@tonic-gate 			continue;
26230Sstevel@tonic-gate 		x86_hm_enter(pp);
26240Sstevel@tonic-gate 
26250Sstevel@tonic-gate 		newpte = oldpte;
26260Sstevel@tonic-gate 		/*
26270Sstevel@tonic-gate 		 * We found a page table entry in the desired range,
26280Sstevel@tonic-gate 		 * figure out the new attributes.
26290Sstevel@tonic-gate 		 */
26300Sstevel@tonic-gate 		if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
26310Sstevel@tonic-gate 			if ((attr & PROT_WRITE) &&
26320Sstevel@tonic-gate 			    !PTE_GET(oldpte, PT_WRITABLE))
26330Sstevel@tonic-gate 				newpte |= PT_WRITABLE;
26340Sstevel@tonic-gate 
26353446Smrj 			if ((attr & HAT_NOSYNC) &&
26363446Smrj 			    PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC)
26370Sstevel@tonic-gate 				newpte |= PT_NOSYNC;
26380Sstevel@tonic-gate 
26390Sstevel@tonic-gate 			if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
26400Sstevel@tonic-gate 				newpte &= ~mmu.pt_nx;
26410Sstevel@tonic-gate 		}
26420Sstevel@tonic-gate 
26430Sstevel@tonic-gate 		if (what == HAT_LOAD_ATTR) {
26440Sstevel@tonic-gate 			if (!(attr & PROT_WRITE) &&
26450Sstevel@tonic-gate 			    PTE_GET(oldpte, PT_WRITABLE))
26460Sstevel@tonic-gate 				newpte &= ~PT_WRITABLE;
26470Sstevel@tonic-gate 
26483446Smrj 			if (!(attr & HAT_NOSYNC) &&
26493446Smrj 			    PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
26503446Smrj 				newpte &= ~PT_SOFTWARE;
26510Sstevel@tonic-gate 
26520Sstevel@tonic-gate 			if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
26530Sstevel@tonic-gate 				newpte |= mmu.pt_nx;
26540Sstevel@tonic-gate 		}
26550Sstevel@tonic-gate 
26560Sstevel@tonic-gate 		if (what == HAT_CLR_ATTR) {
26570Sstevel@tonic-gate 			if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
26580Sstevel@tonic-gate 				newpte &= ~PT_WRITABLE;
26590Sstevel@tonic-gate 
26603446Smrj 			if ((attr & HAT_NOSYNC) &&
26613446Smrj 			    PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
26623446Smrj 				newpte &= ~PT_SOFTWARE;
26630Sstevel@tonic-gate 
26640Sstevel@tonic-gate 			if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
26650Sstevel@tonic-gate 				newpte |= mmu.pt_nx;
26660Sstevel@tonic-gate 		}
26670Sstevel@tonic-gate 
26680Sstevel@tonic-gate 		/*
26693446Smrj 		 * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set.
26703446Smrj 		 * x86pte_set() depends on this.
26713446Smrj 		 */
26723446Smrj 		if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC)
26733446Smrj 			newpte |= PT_REF | PT_MOD;
26743446Smrj 
26753446Smrj 		/*
26760Sstevel@tonic-gate 		 * what about PROT_READ or others? this code only handles:
26770Sstevel@tonic-gate 		 * EXEC, WRITE, NOSYNC
26780Sstevel@tonic-gate 		 */
26790Sstevel@tonic-gate 
26800Sstevel@tonic-gate 		/*
26810Sstevel@tonic-gate 		 * If new PTE really changed, update the table.
26820Sstevel@tonic-gate 		 */
26830Sstevel@tonic-gate 		if (newpte != oldpte) {
26840Sstevel@tonic-gate 			entry = htable_va2entry(vaddr, ht);
26850Sstevel@tonic-gate 			oldpte = hati_update_pte(ht, entry, oldpte, newpte);
26860Sstevel@tonic-gate 			if (oldpte != 0) {
26870Sstevel@tonic-gate 				x86_hm_exit(pp);
26880Sstevel@tonic-gate 				goto try_again;
26890Sstevel@tonic-gate 			}
26900Sstevel@tonic-gate 		}
26910Sstevel@tonic-gate 		x86_hm_exit(pp);
26920Sstevel@tonic-gate 	}
26930Sstevel@tonic-gate 	if (ht)
26940Sstevel@tonic-gate 		htable_release(ht);
26955084Sjohnlev 	XPV_ALLOW_MIGRATE();
26960Sstevel@tonic-gate }
26970Sstevel@tonic-gate 
26980Sstevel@tonic-gate /*
26990Sstevel@tonic-gate  * Various wrappers for hat_updateattr()
27000Sstevel@tonic-gate  */
27010Sstevel@tonic-gate void
27020Sstevel@tonic-gate hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
27030Sstevel@tonic-gate {
27043446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
27050Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
27060Sstevel@tonic-gate }
27070Sstevel@tonic-gate 
27080Sstevel@tonic-gate void
27090Sstevel@tonic-gate hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
27100Sstevel@tonic-gate {
27113446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
27120Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
27130Sstevel@tonic-gate }
27140Sstevel@tonic-gate 
27150Sstevel@tonic-gate void
27160Sstevel@tonic-gate hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
27170Sstevel@tonic-gate {
27183446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
27190Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
27200Sstevel@tonic-gate }
27210Sstevel@tonic-gate 
27220Sstevel@tonic-gate void
27230Sstevel@tonic-gate hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
27240Sstevel@tonic-gate {
27253446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
27260Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
27270Sstevel@tonic-gate }
27280Sstevel@tonic-gate 
27290Sstevel@tonic-gate /*
27300Sstevel@tonic-gate  * size_t hat_getpagesize(hat, addr)
27310Sstevel@tonic-gate  *	returns pagesize in bytes for <hat, addr>. returns -1 of there is
27320Sstevel@tonic-gate  *	no mapping. This is an advisory call.
27330Sstevel@tonic-gate  */
27340Sstevel@tonic-gate ssize_t
27350Sstevel@tonic-gate hat_getpagesize(hat_t *hat, caddr_t addr)
27360Sstevel@tonic-gate {
27370Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
27380Sstevel@tonic-gate 	htable_t	*ht;
27390Sstevel@tonic-gate 	size_t		pagesize;
27400Sstevel@tonic-gate 
27413446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
27420Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
27430Sstevel@tonic-gate 		return (-1);
27440Sstevel@tonic-gate 	ht = htable_getpage(hat, vaddr, NULL);
27450Sstevel@tonic-gate 	if (ht == NULL)
27460Sstevel@tonic-gate 		return (-1);
27470Sstevel@tonic-gate 	pagesize = LEVEL_SIZE(ht->ht_level);
27480Sstevel@tonic-gate 	htable_release(ht);
27490Sstevel@tonic-gate 	return (pagesize);
27500Sstevel@tonic-gate }
27510Sstevel@tonic-gate 
27520Sstevel@tonic-gate 
27530Sstevel@tonic-gate 
27540Sstevel@tonic-gate /*
27550Sstevel@tonic-gate  * pfn_t hat_getpfnum(hat, addr)
27560Sstevel@tonic-gate  *	returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
27570Sstevel@tonic-gate  */
27580Sstevel@tonic-gate pfn_t
27590Sstevel@tonic-gate hat_getpfnum(hat_t *hat, caddr_t addr)
27600Sstevel@tonic-gate {
27610Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
27620Sstevel@tonic-gate 	htable_t	*ht;
27630Sstevel@tonic-gate 	uint_t		entry;
27640Sstevel@tonic-gate 	pfn_t		pfn = PFN_INVALID;
27650Sstevel@tonic-gate 
27663446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
27670Sstevel@tonic-gate 	if (khat_running == 0)
27683446Smrj 		return (PFN_INVALID);
27690Sstevel@tonic-gate 
27700Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
27710Sstevel@tonic-gate 		return (PFN_INVALID);
27720Sstevel@tonic-gate 
27735084Sjohnlev 	XPV_DISALLOW_MIGRATE();
27740Sstevel@tonic-gate 	/*
27750Sstevel@tonic-gate 	 * A very common use of hat_getpfnum() is from the DDI for kernel pages.
27760Sstevel@tonic-gate 	 * Use the kmap_ptes (which also covers the 32 bit heap) to speed
27770Sstevel@tonic-gate 	 * this up.
27780Sstevel@tonic-gate 	 */
27790Sstevel@tonic-gate 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
27800Sstevel@tonic-gate 		x86pte_t pte;
27813446Smrj 		pgcnt_t pg_index;
27823446Smrj 
27833446Smrj 		pg_index = mmu_btop(vaddr - mmu.kmap_addr);
27843446Smrj 		pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index));
27855084Sjohnlev 		if (PTE_ISVALID(pte))
27865084Sjohnlev 			/*LINTED [use of constant 0 causes a lint warning] */
27875084Sjohnlev 			pfn = PTE2PFN(pte, 0);
27885084Sjohnlev 		XPV_ALLOW_MIGRATE();
27895084Sjohnlev 		return (pfn);
27900Sstevel@tonic-gate 	}
27910Sstevel@tonic-gate 
27920Sstevel@tonic-gate 	ht = htable_getpage(hat, vaddr, &entry);
27935084Sjohnlev 	if (ht == NULL) {
27945084Sjohnlev 		XPV_ALLOW_MIGRATE();
27950Sstevel@tonic-gate 		return (PFN_INVALID);
27965084Sjohnlev 	}
27970Sstevel@tonic-gate 	ASSERT(vaddr >= ht->ht_vaddr);
27980Sstevel@tonic-gate 	ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
27990Sstevel@tonic-gate 	pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
28000Sstevel@tonic-gate 	if (ht->ht_level > 0)
28010Sstevel@tonic-gate 		pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
28020Sstevel@tonic-gate 	htable_release(ht);
28035084Sjohnlev 	XPV_ALLOW_MIGRATE();
28040Sstevel@tonic-gate 	return (pfn);
28050Sstevel@tonic-gate }
28060Sstevel@tonic-gate 
28070Sstevel@tonic-gate /*
28080Sstevel@tonic-gate  * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged.
28090Sstevel@tonic-gate  * Use hat_getpfnum(kas.a_hat, ...) instead.
28100Sstevel@tonic-gate  *
28110Sstevel@tonic-gate  * We'd like to return PFN_INVALID if the mappings have underlying page_t's
28120Sstevel@tonic-gate  * but can't right now due to the fact that some software has grown to use
28130Sstevel@tonic-gate  * this interface incorrectly. So for now when the interface is misused,
28140Sstevel@tonic-gate  * return a warning to the user that in the future it won't work in the
28150Sstevel@tonic-gate  * way they're abusing it, and carry on.
28160Sstevel@tonic-gate  *
28170Sstevel@tonic-gate  * Note that hat_getkpfnum() is never supported on amd64.
28180Sstevel@tonic-gate  */
28190Sstevel@tonic-gate #if !defined(__amd64)
28200Sstevel@tonic-gate pfn_t
28210Sstevel@tonic-gate hat_getkpfnum(caddr_t addr)
28220Sstevel@tonic-gate {
28230Sstevel@tonic-gate 	pfn_t	pfn;
28240Sstevel@tonic-gate 	int badcaller = 0;
28250Sstevel@tonic-gate 
28260Sstevel@tonic-gate 	if (khat_running == 0)
28270Sstevel@tonic-gate 		panic("hat_getkpfnum(): called too early\n");
28280Sstevel@tonic-gate 	if ((uintptr_t)addr < kernelbase)
28290Sstevel@tonic-gate 		return (PFN_INVALID);
28300Sstevel@tonic-gate 
28315084Sjohnlev 	XPV_DISALLOW_MIGRATE();
28320Sstevel@tonic-gate 	if (segkpm && IS_KPM_ADDR(addr)) {
28330Sstevel@tonic-gate 		badcaller = 1;
28340Sstevel@tonic-gate 		pfn = hat_kpm_va2pfn(addr);
28350Sstevel@tonic-gate 	} else {
28360Sstevel@tonic-gate 		pfn = hat_getpfnum(kas.a_hat, addr);
28370Sstevel@tonic-gate 		badcaller = pf_is_memory(pfn);
28380Sstevel@tonic-gate 	}
28390Sstevel@tonic-gate 
28400Sstevel@tonic-gate 	if (badcaller)
28410Sstevel@tonic-gate 		hat_getkpfnum_badcall(caller());
28425084Sjohnlev 	XPV_ALLOW_MIGRATE();
28430Sstevel@tonic-gate 	return (pfn);
28440Sstevel@tonic-gate }
28450Sstevel@tonic-gate #endif /* __amd64 */
28460Sstevel@tonic-gate 
28470Sstevel@tonic-gate /*
28480Sstevel@tonic-gate  * int hat_probe(hat, addr)
28490Sstevel@tonic-gate  *	return 0 if no valid mapping is present.  Faster version
28500Sstevel@tonic-gate  *	of hat_getattr in certain architectures.
28510Sstevel@tonic-gate  */
28520Sstevel@tonic-gate int
28530Sstevel@tonic-gate hat_probe(hat_t *hat, caddr_t addr)
28540Sstevel@tonic-gate {
28550Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
28560Sstevel@tonic-gate 	uint_t		entry;
28570Sstevel@tonic-gate 	htable_t	*ht;
28580Sstevel@tonic-gate 	pgcnt_t		pg_off;
28590Sstevel@tonic-gate 
28603446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
28610Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
28620Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
28630Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
28640Sstevel@tonic-gate 		return (0);
28650Sstevel@tonic-gate 
28660Sstevel@tonic-gate 	/*
28670Sstevel@tonic-gate 	 * Most common use of hat_probe is from segmap. We special case it
28680Sstevel@tonic-gate 	 * for performance.
28690Sstevel@tonic-gate 	 */
28700Sstevel@tonic-gate 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
28710Sstevel@tonic-gate 		pg_off = mmu_btop(vaddr - mmu.kmap_addr);
28720Sstevel@tonic-gate 		if (mmu.pae_hat)
28730Sstevel@tonic-gate 			return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
28740Sstevel@tonic-gate 		else
28750Sstevel@tonic-gate 			return (PTE_ISVALID(
28760Sstevel@tonic-gate 			    ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
28770Sstevel@tonic-gate 	}
28780Sstevel@tonic-gate 
28790Sstevel@tonic-gate 	ht = htable_getpage(hat, vaddr, &entry);
28800Sstevel@tonic-gate 	htable_release(ht);
28815084Sjohnlev 	return (ht != NULL);
28820Sstevel@tonic-gate }
28830Sstevel@tonic-gate 
28840Sstevel@tonic-gate /*
28854381Sjosephb  * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM.
28864381Sjosephb  */
28874381Sjosephb static int
28884381Sjosephb is_it_dism(hat_t *hat, caddr_t va)
28894381Sjosephb {
28904381Sjosephb 	struct seg *seg;
28914381Sjosephb 	struct shm_data *shmd;
28924381Sjosephb 	struct spt_data *sptd;
28934381Sjosephb 
28944381Sjosephb 	seg = as_findseg(hat->hat_as, va, 0);
28954381Sjosephb 	ASSERT(seg != NULL);
28964381Sjosephb 	ASSERT(seg->s_base <= va);
28974381Sjosephb 	shmd = (struct shm_data *)seg->s_data;
28984381Sjosephb 	ASSERT(shmd != NULL);
28994381Sjosephb 	sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
29004381Sjosephb 	ASSERT(sptd != NULL);
29014381Sjosephb 	if (sptd->spt_flags & SHM_PAGEABLE)
29024381Sjosephb 		return (1);
29034381Sjosephb 	return (0);
29044381Sjosephb }
29054381Sjosephb 
29064381Sjosephb /*
29074381Sjosephb  * Simple implementation of ISM. hat_share() is similar to hat_memload_array(),
29080Sstevel@tonic-gate  * except that we use the ism_hat's existing mappings to determine the pages
29094381Sjosephb  * and protections to use for this hat. If we find a full properly aligned
29104381Sjosephb  * and sized pagetable, we will attempt to share the pagetable itself.
29110Sstevel@tonic-gate  */
29120Sstevel@tonic-gate /*ARGSUSED*/
29130Sstevel@tonic-gate int
29140Sstevel@tonic-gate hat_share(
29150Sstevel@tonic-gate 	hat_t		*hat,
29160Sstevel@tonic-gate 	caddr_t		addr,
29170Sstevel@tonic-gate 	hat_t		*ism_hat,
29180Sstevel@tonic-gate 	caddr_t		src_addr,
29190Sstevel@tonic-gate 	size_t		len,	/* almost useless value, see below.. */
29200Sstevel@tonic-gate 	uint_t		ismszc)
29210Sstevel@tonic-gate {
29220Sstevel@tonic-gate 	uintptr_t	vaddr_start = (uintptr_t)addr;
29230Sstevel@tonic-gate 	uintptr_t	vaddr;
29240Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr_start + len;
29250Sstevel@tonic-gate 	uintptr_t	ism_addr_start = (uintptr_t)src_addr;
29260Sstevel@tonic-gate 	uintptr_t	ism_addr = ism_addr_start;
29270Sstevel@tonic-gate 	uintptr_t	e_ism_addr = ism_addr + len;
29280Sstevel@tonic-gate 	htable_t	*ism_ht = NULL;
29290Sstevel@tonic-gate 	htable_t	*ht;
29300Sstevel@tonic-gate 	x86pte_t	pte;
29310Sstevel@tonic-gate 	page_t		*pp;
29320Sstevel@tonic-gate 	pfn_t		pfn;
29330Sstevel@tonic-gate 	level_t		l;
29340Sstevel@tonic-gate 	pgcnt_t		pgcnt;
29350Sstevel@tonic-gate 	uint_t		prot;
29364381Sjosephb 	int		is_dism;
29374381Sjosephb 	int		flags;
29380Sstevel@tonic-gate 
29390Sstevel@tonic-gate 	/*
29400Sstevel@tonic-gate 	 * We might be asked to share an empty DISM hat by as_dup()
29410Sstevel@tonic-gate 	 */
29420Sstevel@tonic-gate 	ASSERT(hat != kas.a_hat);
29433446Smrj 	ASSERT(eaddr <= _userlimit);
29440Sstevel@tonic-gate 	if (!(ism_hat->hat_flags & HAT_SHARED)) {
29450Sstevel@tonic-gate 		ASSERT(hat_get_mapped_size(ism_hat) == 0);
29460Sstevel@tonic-gate 		return (0);
29470Sstevel@tonic-gate 	}
29485084Sjohnlev 	XPV_DISALLOW_MIGRATE();
29490Sstevel@tonic-gate 
29500Sstevel@tonic-gate 	/*
29510Sstevel@tonic-gate 	 * The SPT segment driver often passes us a size larger than there are
29520Sstevel@tonic-gate 	 * valid mappings. That's because it rounds the segment size up to a
29530Sstevel@tonic-gate 	 * large pagesize, even if the actual memory mapped by ism_hat is less.
29540Sstevel@tonic-gate 	 */
29550Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr_start));
29560Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(ism_addr_start));
29570Sstevel@tonic-gate 	ASSERT(ism_hat->hat_flags & HAT_SHARED);
29584381Sjosephb 	is_dism = is_it_dism(hat, addr);
29590Sstevel@tonic-gate 	while (ism_addr < e_ism_addr) {
29600Sstevel@tonic-gate 		/*
29610Sstevel@tonic-gate 		 * use htable_walk to get the next valid ISM mapping
29620Sstevel@tonic-gate 		 */
29630Sstevel@tonic-gate 		pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
29640Sstevel@tonic-gate 		if (ism_ht == NULL)
29650Sstevel@tonic-gate 			break;
29660Sstevel@tonic-gate 
29670Sstevel@tonic-gate 		/*
29684381Sjosephb 		 * First check to see if we already share the page table.
29694381Sjosephb 		 */
29704381Sjosephb 		l = ism_ht->ht_level;
29714381Sjosephb 		vaddr = vaddr_start + (ism_addr - ism_addr_start);
29724381Sjosephb 		ht = htable_lookup(hat, vaddr, l);
29734381Sjosephb 		if (ht != NULL) {
29744381Sjosephb 			if (ht->ht_flags & HTABLE_SHARED_PFN)
29754381Sjosephb 				goto shared;
29764381Sjosephb 			htable_release(ht);
29774381Sjosephb 			goto not_shared;
29784381Sjosephb 		}
29794381Sjosephb 
29804381Sjosephb 		/*
29814381Sjosephb 		 * Can't ever share top table.
29824381Sjosephb 		 */
29834381Sjosephb 		if (l == mmu.max_level)
29844381Sjosephb 			goto not_shared;
29854381Sjosephb 
29864381Sjosephb 		/*
29874381Sjosephb 		 * Avoid level mismatches later due to DISM faults.
29884381Sjosephb 		 */
29894381Sjosephb 		if (is_dism && l > 0)
29904381Sjosephb 			goto not_shared;
29914381Sjosephb 
29924381Sjosephb 		/*
29934381Sjosephb 		 * addresses and lengths must align
29944381Sjosephb 		 * table must be fully populated
29954381Sjosephb 		 * no lower level page tables
29964381Sjosephb 		 */
29974381Sjosephb 		if (ism_addr != ism_ht->ht_vaddr ||
29984381Sjosephb 		    (vaddr & LEVEL_OFFSET(l + 1)) != 0)
29994381Sjosephb 			goto not_shared;
30004381Sjosephb 
30014381Sjosephb 		/*
30024381Sjosephb 		 * The range of address space must cover a full table.
30030Sstevel@tonic-gate 		 */
30045159Sjohnlev 		if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1))
30054381Sjosephb 			goto not_shared;
30064381Sjosephb 
30074381Sjosephb 		/*
30084381Sjosephb 		 * All entries in the ISM page table must be leaf PTEs.
30094381Sjosephb 		 */
30104381Sjosephb 		if (l > 0) {
30114381Sjosephb 			int e;
30124381Sjosephb 
30134381Sjosephb 			/*
30144381Sjosephb 			 * We know the 0th is from htable_walk() above.
30154381Sjosephb 			 */
30164381Sjosephb 			for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) {
30174381Sjosephb 				x86pte_t pte;
30184381Sjosephb 				pte = x86pte_get(ism_ht, e);
30194381Sjosephb 				if (!PTE_ISPAGE(pte, l))
30204381Sjosephb 					goto not_shared;
30214381Sjosephb 			}
30224381Sjosephb 		}
30234381Sjosephb 
30244381Sjosephb 		/*
30254381Sjosephb 		 * share the page table
30264381Sjosephb 		 */
30274381Sjosephb 		ht = htable_create(hat, vaddr, l, ism_ht);
30284381Sjosephb shared:
30294381Sjosephb 		ASSERT(ht->ht_flags & HTABLE_SHARED_PFN);
30304381Sjosephb 		ASSERT(ht->ht_shares == ism_ht);
30314381Sjosephb 		hat->hat_ism_pgcnt +=
30324381Sjosephb 		    (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) <<
30334381Sjosephb 		    (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
30344381Sjosephb 		ht->ht_valid_cnt = ism_ht->ht_valid_cnt;
30354381Sjosephb 		htable_release(ht);
30364381Sjosephb 		ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1);
30374381Sjosephb 		htable_release(ism_ht);
30384381Sjosephb 		ism_ht = NULL;
30394381Sjosephb 		continue;
30404381Sjosephb 
30414381Sjosephb not_shared:
30424381Sjosephb 		/*
30434381Sjosephb 		 * Unable to share the page table. Instead we will
30444381Sjosephb 		 * create new mappings from the values in the ISM mappings.
30454381Sjosephb 		 * Figure out what level size mappings to use;
30464381Sjosephb 		 */
30470Sstevel@tonic-gate 		for (l = ism_ht->ht_level; l > 0; --l) {
30480Sstevel@tonic-gate 			if (LEVEL_SIZE(l) <= eaddr - vaddr &&
30490Sstevel@tonic-gate 			    (vaddr & LEVEL_OFFSET(l)) == 0)
30500Sstevel@tonic-gate 				break;
30510Sstevel@tonic-gate 		}
30520Sstevel@tonic-gate 
30530Sstevel@tonic-gate 		/*
30540Sstevel@tonic-gate 		 * The ISM mapping might be larger than the share area,
30554381Sjosephb 		 * be careful to truncate it if needed.
30560Sstevel@tonic-gate 		 */
30570Sstevel@tonic-gate 		if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
30580Sstevel@tonic-gate 			pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
30590Sstevel@tonic-gate 		} else {
30600Sstevel@tonic-gate 			pgcnt = mmu_btop(eaddr - vaddr);
30610Sstevel@tonic-gate 			l = 0;
30620Sstevel@tonic-gate 		}
30630Sstevel@tonic-gate 
30640Sstevel@tonic-gate 		pfn = PTE2PFN(pte, ism_ht->ht_level);
30650Sstevel@tonic-gate 		ASSERT(pfn != PFN_INVALID);
30660Sstevel@tonic-gate 		while (pgcnt > 0) {
30670Sstevel@tonic-gate 			/*
30680Sstevel@tonic-gate 			 * Make a new pte for the PFN for this level.
30690Sstevel@tonic-gate 			 * Copy protections for the pte from the ISM pte.
30700Sstevel@tonic-gate 			 */
30710Sstevel@tonic-gate 			pp = page_numtopp_nolock(pfn);
30720Sstevel@tonic-gate 			ASSERT(pp != NULL);
30730Sstevel@tonic-gate 
30740Sstevel@tonic-gate 			prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
30750Sstevel@tonic-gate 			if (PTE_GET(pte, PT_WRITABLE))
30760Sstevel@tonic-gate 				prot |= PROT_WRITE;
30770Sstevel@tonic-gate 			if (!PTE_GET(pte, PT_NX))
30780Sstevel@tonic-gate 				prot |= PROT_EXEC;
30790Sstevel@tonic-gate 
30804381Sjosephb 			flags = HAT_LOAD;
30814381Sjosephb 			if (!is_dism)
30824381Sjosephb 				flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST;
30834381Sjosephb 			while (hati_load_common(hat, vaddr, pp, prot, flags,
30843446Smrj 			    l, pfn) != 0) {
30853446Smrj 				if (l == 0)
30863446Smrj 					panic("hati_load_common() failure");
30873446Smrj 				--l;
30883446Smrj 			}
30890Sstevel@tonic-gate 
30900Sstevel@tonic-gate 			vaddr += LEVEL_SIZE(l);
30910Sstevel@tonic-gate 			ism_addr += LEVEL_SIZE(l);
30920Sstevel@tonic-gate 			pfn += mmu_btop(LEVEL_SIZE(l));
30930Sstevel@tonic-gate 			pgcnt -= mmu_btop(LEVEL_SIZE(l));
30940Sstevel@tonic-gate 		}
30950Sstevel@tonic-gate 	}
30960Sstevel@tonic-gate 	if (ism_ht != NULL)
30970Sstevel@tonic-gate 		htable_release(ism_ht);
30985084Sjohnlev 	XPV_ALLOW_MIGRATE();
30990Sstevel@tonic-gate 	return (0);
31000Sstevel@tonic-gate }
31010Sstevel@tonic-gate 
31020Sstevel@tonic-gate 
31030Sstevel@tonic-gate /*
31040Sstevel@tonic-gate  * hat_unshare() is similar to hat_unload_callback(), but
31050Sstevel@tonic-gate  * we have to look for empty shared pagetables. Note that
31060Sstevel@tonic-gate  * hat_unshare() is always invoked against an entire segment.
31070Sstevel@tonic-gate  */
31080Sstevel@tonic-gate /*ARGSUSED*/
31090Sstevel@tonic-gate void
31100Sstevel@tonic-gate hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
31110Sstevel@tonic-gate {
31124654Sjosephb 	uint64_t	vaddr = (uintptr_t)addr;
31130Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
31140Sstevel@tonic-gate 	htable_t	*ht = NULL;
31150Sstevel@tonic-gate 	uint_t		need_demaps = 0;
31164381Sjosephb 	int		flags = HAT_UNLOAD_UNMAP;
31174381Sjosephb 	level_t		l;
31180Sstevel@tonic-gate 
31190Sstevel@tonic-gate 	ASSERT(hat != kas.a_hat);
31203446Smrj 	ASSERT(eaddr <= _userlimit);
31210Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
31220Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
31235084Sjohnlev 	XPV_DISALLOW_MIGRATE();
31240Sstevel@tonic-gate 
31250Sstevel@tonic-gate 	/*
31260Sstevel@tonic-gate 	 * First go through and remove any shared pagetables.
31270Sstevel@tonic-gate 	 *
31283446Smrj 	 * Note that it's ok to delay the TLB shootdown till the entire range is
31290Sstevel@tonic-gate 	 * finished, because if hat_pageunload() were to unload a shared
31303446Smrj 	 * pagetable page, its hat_tlb_inval() will do a global TLB invalidate.
31310Sstevel@tonic-gate 	 */
31324381Sjosephb 	l = mmu.max_page_level;
31334381Sjosephb 	if (l == mmu.max_level)
31344381Sjosephb 		--l;
31354381Sjosephb 	for (; l >= 0; --l) {
31364381Sjosephb 		for (vaddr = (uintptr_t)addr; vaddr < eaddr;
31374381Sjosephb 		    vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) {
31384381Sjosephb 			ASSERT(!IN_VA_HOLE(vaddr));
31394381Sjosephb 			/*
31404381Sjosephb 			 * find a pagetable that maps the current address
31414381Sjosephb 			 */
31424381Sjosephb 			ht = htable_lookup(hat, vaddr, l);
31434381Sjosephb 			if (ht == NULL)
31444381Sjosephb 				continue;
31450Sstevel@tonic-gate 			if (ht->ht_flags & HTABLE_SHARED_PFN) {
31460Sstevel@tonic-gate 				/*
31474381Sjosephb 				 * clear page count, set valid_cnt to 0,
31484381Sjosephb 				 * let htable_release() finish the job
31490Sstevel@tonic-gate 				 */
31504381Sjosephb 				hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
31514381Sjosephb 				    (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
31520Sstevel@tonic-gate 				ht->ht_valid_cnt = 0;
31530Sstevel@tonic-gate 				need_demaps = 1;
31540Sstevel@tonic-gate 			}
31550Sstevel@tonic-gate 			htable_release(ht);
31560Sstevel@tonic-gate 		}
31570Sstevel@tonic-gate 	}
31580Sstevel@tonic-gate 
31590Sstevel@tonic-gate 	/*
31600Sstevel@tonic-gate 	 * flush the TLBs - since we're probably dealing with MANY mappings
31610Sstevel@tonic-gate 	 * we do just one CR3 reload.
31620Sstevel@tonic-gate 	 */
31630Sstevel@tonic-gate 	if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
31643446Smrj 		hat_tlb_inval(hat, DEMAP_ALL_ADDR);
31650Sstevel@tonic-gate 
31660Sstevel@tonic-gate 	/*
31670Sstevel@tonic-gate 	 * Now go back and clean up any unaligned mappings that
31680Sstevel@tonic-gate 	 * couldn't share pagetables.
31690Sstevel@tonic-gate 	 */
31704381Sjosephb 	if (!is_it_dism(hat, addr))
31714381Sjosephb 		flags |= HAT_UNLOAD_UNLOCK;
31724381Sjosephb 	hat_unload(hat, addr, len, flags);
31735084Sjohnlev 	XPV_ALLOW_MIGRATE();
31740Sstevel@tonic-gate }
31750Sstevel@tonic-gate 
31760Sstevel@tonic-gate 
31770Sstevel@tonic-gate /*
31780Sstevel@tonic-gate  * hat_reserve() does nothing
31790Sstevel@tonic-gate  */
31800Sstevel@tonic-gate /*ARGSUSED*/
31810Sstevel@tonic-gate void
31820Sstevel@tonic-gate hat_reserve(struct as *as, caddr_t addr, size_t len)
31830Sstevel@tonic-gate {
31840Sstevel@tonic-gate }
31850Sstevel@tonic-gate 
31860Sstevel@tonic-gate 
31870Sstevel@tonic-gate /*
31880Sstevel@tonic-gate  * Called when all mappings to a page should have write permission removed.
31899903SPavel.Tatashin@Sun.COM  * Mostly stolen from hat_pagesync()
31900Sstevel@tonic-gate  */
31910Sstevel@tonic-gate static void
31920Sstevel@tonic-gate hati_page_clrwrt(struct page *pp)
31930Sstevel@tonic-gate {
31940Sstevel@tonic-gate 	hment_t		*hm = NULL;
31950Sstevel@tonic-gate 	htable_t	*ht;
31960Sstevel@tonic-gate 	uint_t		entry;
31970Sstevel@tonic-gate 	x86pte_t	old;
31980Sstevel@tonic-gate 	x86pte_t	new;
31990Sstevel@tonic-gate 	uint_t		pszc = 0;
32000Sstevel@tonic-gate 
32015084Sjohnlev 	XPV_DISALLOW_MIGRATE();
32020Sstevel@tonic-gate next_size:
32030Sstevel@tonic-gate 	/*
32040Sstevel@tonic-gate 	 * walk thru the mapping list clearing write permission
32050Sstevel@tonic-gate 	 */
32060Sstevel@tonic-gate 	x86_hm_enter(pp);
32070Sstevel@tonic-gate 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
32080Sstevel@tonic-gate 		if (ht->ht_level < pszc)
32090Sstevel@tonic-gate 			continue;
32100Sstevel@tonic-gate 		old = x86pte_get(ht, entry);
32110Sstevel@tonic-gate 
32120Sstevel@tonic-gate 		for (;;) {
32130Sstevel@tonic-gate 			/*
32140Sstevel@tonic-gate 			 * Is this mapping of interest?
32150Sstevel@tonic-gate 			 */
32160Sstevel@tonic-gate 			if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
32170Sstevel@tonic-gate 			    PTE_GET(old, PT_WRITABLE) == 0)
32180Sstevel@tonic-gate 				break;
32190Sstevel@tonic-gate 
32200Sstevel@tonic-gate 			/*
32210Sstevel@tonic-gate 			 * Clear ref/mod writable bits. This requires cross
32220Sstevel@tonic-gate 			 * calls to ensure any executing TLBs see cleared bits.
32230Sstevel@tonic-gate 			 */
32240Sstevel@tonic-gate 			new = old;
32250Sstevel@tonic-gate 			PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
32260Sstevel@tonic-gate 			old = hati_update_pte(ht, entry, old, new);
32270Sstevel@tonic-gate 			if (old != 0)
32280Sstevel@tonic-gate 				continue;
32290Sstevel@tonic-gate 
32300Sstevel@tonic-gate 			break;
32310Sstevel@tonic-gate 		}
32320Sstevel@tonic-gate 	}
32330Sstevel@tonic-gate 	x86_hm_exit(pp);
32340Sstevel@tonic-gate 	while (pszc < pp->p_szc) {
32350Sstevel@tonic-gate 		page_t *tpp;
32360Sstevel@tonic-gate 		pszc++;
32370Sstevel@tonic-gate 		tpp = PP_GROUPLEADER(pp, pszc);
32380Sstevel@tonic-gate 		if (pp != tpp) {
32390Sstevel@tonic-gate 			pp = tpp;
32400Sstevel@tonic-gate 			goto next_size;
32410Sstevel@tonic-gate 		}
32420Sstevel@tonic-gate 	}
32435084Sjohnlev 	XPV_ALLOW_MIGRATE();
32440Sstevel@tonic-gate }
32450Sstevel@tonic-gate 
32460Sstevel@tonic-gate /*
32470Sstevel@tonic-gate  * void hat_page_setattr(pp, flag)
32480Sstevel@tonic-gate  * void hat_page_clrattr(pp, flag)
32490Sstevel@tonic-gate  *	used to set/clr ref/mod bits.
32500Sstevel@tonic-gate  */
32510Sstevel@tonic-gate void
32520Sstevel@tonic-gate hat_page_setattr(struct page *pp, uint_t flag)
32530Sstevel@tonic-gate {
32540Sstevel@tonic-gate 	vnode_t		*vp = pp->p_vnode;
32550Sstevel@tonic-gate 	kmutex_t	*vphm = NULL;
32560Sstevel@tonic-gate 	page_t		**listp;
32574324Sqiao 	int		noshuffle;
32584324Sqiao 
32594324Sqiao 	noshuffle = flag & P_NSH;
32604324Sqiao 	flag &= ~P_NSH;
32610Sstevel@tonic-gate 
32620Sstevel@tonic-gate 	if (PP_GETRM(pp, flag) == flag)
32630Sstevel@tonic-gate 		return;
32640Sstevel@tonic-gate 
32654324Sqiao 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
32664324Sqiao 	    !noshuffle) {
32670Sstevel@tonic-gate 		vphm = page_vnode_mutex(vp);
32680Sstevel@tonic-gate 		mutex_enter(vphm);
32690Sstevel@tonic-gate 	}
32700Sstevel@tonic-gate 
32710Sstevel@tonic-gate 	PP_SETRM(pp, flag);
32720Sstevel@tonic-gate 
32730Sstevel@tonic-gate 	if (vphm != NULL) {
32740Sstevel@tonic-gate 
32750Sstevel@tonic-gate 		/*
32760Sstevel@tonic-gate 		 * Some File Systems examine v_pages for NULL w/o
32770Sstevel@tonic-gate 		 * grabbing the vphm mutex. Must not let it become NULL when
32780Sstevel@tonic-gate 		 * pp is the only page on the list.
32790Sstevel@tonic-gate 		 */
32800Sstevel@tonic-gate 		if (pp->p_vpnext != pp) {
32810Sstevel@tonic-gate 			page_vpsub(&vp->v_pages, pp);
32820Sstevel@tonic-gate 			if (vp->v_pages != NULL)
32830Sstevel@tonic-gate 				listp = &vp->v_pages->p_vpprev->p_vpnext;
32840Sstevel@tonic-gate 			else
32850Sstevel@tonic-gate 				listp = &vp->v_pages;
32860Sstevel@tonic-gate 			page_vpadd(listp, pp);
32870Sstevel@tonic-gate 		}
32880Sstevel@tonic-gate 		mutex_exit(vphm);
32890Sstevel@tonic-gate 	}
32900Sstevel@tonic-gate }
32910Sstevel@tonic-gate 
32920Sstevel@tonic-gate void
32930Sstevel@tonic-gate hat_page_clrattr(struct page *pp, uint_t flag)
32940Sstevel@tonic-gate {
32950Sstevel@tonic-gate 	vnode_t		*vp = pp->p_vnode;
32960Sstevel@tonic-gate 	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
32970Sstevel@tonic-gate 
32980Sstevel@tonic-gate 	/*
32992999Sstans 	 * Caller is expected to hold page's io lock for VMODSORT to work
33002999Sstans 	 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
33012999Sstans 	 * bit is cleared.
33022999Sstans 	 * We don't have assert to avoid tripping some existing third party
33032999Sstans 	 * code. The dirty page is moved back to top of the v_page list
33042999Sstans 	 * after IO is done in pvn_write_done().
33050Sstevel@tonic-gate 	 */
33060Sstevel@tonic-gate 	PP_CLRRM(pp, flag);
33070Sstevel@tonic-gate 
33082999Sstans 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
33090Sstevel@tonic-gate 
33100Sstevel@tonic-gate 		/*
33110Sstevel@tonic-gate 		 * VMODSORT works by removing write permissions and getting
33120Sstevel@tonic-gate 		 * a fault when a page is made dirty. At this point
33130Sstevel@tonic-gate 		 * we need to remove write permission from all mappings
33140Sstevel@tonic-gate 		 * to this page.
33150Sstevel@tonic-gate 		 */
33160Sstevel@tonic-gate 		hati_page_clrwrt(pp);
33170Sstevel@tonic-gate 	}
33180Sstevel@tonic-gate }
33190Sstevel@tonic-gate 
33200Sstevel@tonic-gate /*
33210Sstevel@tonic-gate  *	If flag is specified, returns 0 if attribute is disabled
33229903SPavel.Tatashin@Sun.COM  *	and non zero if enabled.  If flag specifes multiple attributes
33239903SPavel.Tatashin@Sun.COM  *	then returns 0 if ALL attributes are disabled.  This is an advisory
33240Sstevel@tonic-gate  *	call.
33250Sstevel@tonic-gate  */
33260Sstevel@tonic-gate uint_t
33270Sstevel@tonic-gate hat_page_getattr(struct page *pp, uint_t flag)
33280Sstevel@tonic-gate {
33290Sstevel@tonic-gate 	return (PP_GETRM(pp, flag));
33300Sstevel@tonic-gate }
33310Sstevel@tonic-gate 
33320Sstevel@tonic-gate 
33330Sstevel@tonic-gate /*
33340Sstevel@tonic-gate  * common code used by hat_pageunload() and hment_steal()
33350Sstevel@tonic-gate  */
33360Sstevel@tonic-gate hment_t *
33370Sstevel@tonic-gate hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
33380Sstevel@tonic-gate {
33390Sstevel@tonic-gate 	x86pte_t old_pte;
33400Sstevel@tonic-gate 	pfn_t pfn = pp->p_pagenum;
33410Sstevel@tonic-gate 	hment_t *hm;
33420Sstevel@tonic-gate 
33430Sstevel@tonic-gate 	/*
33440Sstevel@tonic-gate 	 * We need to acquire a hold on the htable in order to
33450Sstevel@tonic-gate 	 * do the invalidate. We know the htable must exist, since
33460Sstevel@tonic-gate 	 * unmap's don't release the htable until after removing any
33470Sstevel@tonic-gate 	 * hment. Having x86_hm_enter() keeps that from proceeding.
33480Sstevel@tonic-gate 	 */
33490Sstevel@tonic-gate 	htable_acquire(ht);
33500Sstevel@tonic-gate 
33510Sstevel@tonic-gate 	/*
33520Sstevel@tonic-gate 	 * Invalidate the PTE and remove the hment.
33530Sstevel@tonic-gate 	 */
33543446Smrj 	old_pte = x86pte_inval(ht, entry, 0, NULL);
335547Sjosephb 	if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
33563446Smrj 		panic("x86pte_inval() failure found PTE = " FMT_PTE
335747Sjosephb 		    " pfn being unmapped is %lx ht=0x%lx entry=0x%x",
335847Sjosephb 		    old_pte, pfn, (uintptr_t)ht, entry);
335947Sjosephb 	}
33600Sstevel@tonic-gate 
33610Sstevel@tonic-gate 	/*
33620Sstevel@tonic-gate 	 * Clean up all the htable information for this mapping
33630Sstevel@tonic-gate 	 */
33640Sstevel@tonic-gate 	ASSERT(ht->ht_valid_cnt > 0);
33650Sstevel@tonic-gate 	HTABLE_DEC(ht->ht_valid_cnt);
33660Sstevel@tonic-gate 	PGCNT_DEC(ht->ht_hat, ht->ht_level);
33670Sstevel@tonic-gate 
33680Sstevel@tonic-gate 	/*
33690Sstevel@tonic-gate 	 * sync ref/mod bits to the page_t
33700Sstevel@tonic-gate 	 */
33713446Smrj 	if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC)
33720Sstevel@tonic-gate 		hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
33730Sstevel@tonic-gate 
33740Sstevel@tonic-gate 	/*
33750Sstevel@tonic-gate 	 * Remove the mapping list entry for this page.
33760Sstevel@tonic-gate 	 */
33770Sstevel@tonic-gate 	hm = hment_remove(pp, ht, entry);
33780Sstevel@tonic-gate 
33790Sstevel@tonic-gate 	/*
33800Sstevel@tonic-gate 	 * drop the mapping list lock so that we might free the
33810Sstevel@tonic-gate 	 * hment and htable.
33820Sstevel@tonic-gate 	 */
33830Sstevel@tonic-gate 	x86_hm_exit(pp);
33840Sstevel@tonic-gate 	htable_release(ht);
33850Sstevel@tonic-gate 	return (hm);
33860Sstevel@tonic-gate }
33870Sstevel@tonic-gate 
33881841Spraks extern int	vpm_enable;
33890Sstevel@tonic-gate /*
33900Sstevel@tonic-gate  * Unload all translations to a page. If the page is a subpage of a large
33910Sstevel@tonic-gate  * page, the large page mappings are also removed.
33920Sstevel@tonic-gate  *
33930Sstevel@tonic-gate  * The forceflags are unused.
33940Sstevel@tonic-gate  */
33950Sstevel@tonic-gate 
33960Sstevel@tonic-gate /*ARGSUSED*/
33970Sstevel@tonic-gate static int
33980Sstevel@tonic-gate hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
33990Sstevel@tonic-gate {
34000Sstevel@tonic-gate 	page_t		*cur_pp = pp;
34010Sstevel@tonic-gate 	hment_t		*hm;
34020Sstevel@tonic-gate 	hment_t		*prev;
34030Sstevel@tonic-gate 	htable_t	*ht;
34040Sstevel@tonic-gate 	uint_t		entry;
34050Sstevel@tonic-gate 	level_t		level;
34060Sstevel@tonic-gate 
34075084Sjohnlev 	XPV_DISALLOW_MIGRATE();
3408*12532Sjoe.bonasera@oracle.com 
3409*12532Sjoe.bonasera@oracle.com 	/*
3410*12532Sjoe.bonasera@oracle.com 	 * prevent recursion due to kmem_free()
3411*12532Sjoe.bonasera@oracle.com 	 */
3412*12532Sjoe.bonasera@oracle.com 	++curthread->t_hatdepth;
3413*12532Sjoe.bonasera@oracle.com 	ASSERT(curthread->t_hatdepth < 16);
3414*12532Sjoe.bonasera@oracle.com 
34151841Spraks #if defined(__amd64)
34161841Spraks 	/*
34171841Spraks 	 * clear the vpm ref.
34181841Spraks 	 */
34191841Spraks 	if (vpm_enable) {
34201841Spraks 		pp->p_vpmref = 0;
34211841Spraks 	}
34221841Spraks #endif
34230Sstevel@tonic-gate 	/*
34240Sstevel@tonic-gate 	 * The loop with next_size handles pages with multiple pagesize mappings
34250Sstevel@tonic-gate 	 */
34260Sstevel@tonic-gate next_size:
34270Sstevel@tonic-gate 	for (;;) {
34280Sstevel@tonic-gate 
34290Sstevel@tonic-gate 		/*
34300Sstevel@tonic-gate 		 * Get a mapping list entry
34310Sstevel@tonic-gate 		 */
34320Sstevel@tonic-gate 		x86_hm_enter(cur_pp);
34330Sstevel@tonic-gate 		for (prev = NULL; ; prev = hm) {
34340Sstevel@tonic-gate 			hm = hment_walk(cur_pp, &ht, &entry, prev);
34350Sstevel@tonic-gate 			if (hm == NULL) {
34360Sstevel@tonic-gate 				x86_hm_exit(cur_pp);
34370Sstevel@tonic-gate 
34380Sstevel@tonic-gate 				/*
34390Sstevel@tonic-gate 				 * If not part of a larger page, we're done.
34400Sstevel@tonic-gate 				 */
34413446Smrj 				if (cur_pp->p_szc <= pg_szcd) {
3442*12532Sjoe.bonasera@oracle.com 					ASSERT(curthread->t_hatdepth > 0);
3443*12532Sjoe.bonasera@oracle.com 					--curthread->t_hatdepth;
34445084Sjohnlev 					XPV_ALLOW_MIGRATE();
34450Sstevel@tonic-gate 					return (0);
34463446Smrj 				}
34470Sstevel@tonic-gate 
34480Sstevel@tonic-gate 				/*
34490Sstevel@tonic-gate 				 * Else check the next larger page size.
34500Sstevel@tonic-gate 				 * hat_page_demote() may decrease p_szc
34510Sstevel@tonic-gate 				 * but that's ok we'll just take an extra
34520Sstevel@tonic-gate 				 * trip discover there're no larger mappings
34530Sstevel@tonic-gate 				 * and return.
34540Sstevel@tonic-gate 				 */
34550Sstevel@tonic-gate 				++pg_szcd;
34560Sstevel@tonic-gate 				cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
34570Sstevel@tonic-gate 				goto next_size;
34580Sstevel@tonic-gate 			}
34590Sstevel@tonic-gate 
34600Sstevel@tonic-gate 			/*
34610Sstevel@tonic-gate 			 * If this mapping size matches, remove it.
34620Sstevel@tonic-gate 			 */
34630Sstevel@tonic-gate 			level = ht->ht_level;
34640Sstevel@tonic-gate 			if (level == pg_szcd)
34650Sstevel@tonic-gate 				break;
34660Sstevel@tonic-gate 		}
34670Sstevel@tonic-gate 
34680Sstevel@tonic-gate 		/*
34690Sstevel@tonic-gate 		 * Remove the mapping list entry for this page.
34700Sstevel@tonic-gate 		 * Note this does the x86_hm_exit() for us.
34710Sstevel@tonic-gate 		 */
34720Sstevel@tonic-gate 		hm = hati_page_unmap(cur_pp, ht, entry);
34730Sstevel@tonic-gate 		if (hm != NULL)
34740Sstevel@tonic-gate 			hment_free(hm);
34750Sstevel@tonic-gate 	}
34760Sstevel@tonic-gate }
34770Sstevel@tonic-gate 
34780Sstevel@tonic-gate int
34790Sstevel@tonic-gate hat_pageunload(struct page *pp, uint_t forceflag)
34800Sstevel@tonic-gate {
34810Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp));
34820Sstevel@tonic-gate 	return (hati_pageunload(pp, 0, forceflag));
34830Sstevel@tonic-gate }
34840Sstevel@tonic-gate 
34850Sstevel@tonic-gate /*
34860Sstevel@tonic-gate  * Unload all large mappings to pp and reduce by 1 p_szc field of every large
34870Sstevel@tonic-gate  * page level that included pp.
34880Sstevel@tonic-gate  *
34890Sstevel@tonic-gate  * pp must be locked EXCL. Even though no other constituent pages are locked
34900Sstevel@tonic-gate  * it's legal to unload large mappings to pp because all constituent pages of
34910Sstevel@tonic-gate  * large locked mappings have to be locked SHARED.  therefore if we have EXCL
34920Sstevel@tonic-gate  * lock on one of constituent pages none of the large mappings to pp are
34930Sstevel@tonic-gate  * locked.
34940Sstevel@tonic-gate  *
34950Sstevel@tonic-gate  * Change (always decrease) p_szc field starting from the last constituent
34960Sstevel@tonic-gate  * page and ending with root constituent page so that root's pszc always shows
34970Sstevel@tonic-gate  * the area where hat_page_demote() may be active.
34980Sstevel@tonic-gate  *
34990Sstevel@tonic-gate  * This mechanism is only used for file system pages where it's not always
35000Sstevel@tonic-gate  * possible to get EXCL locks on all constituent pages to demote the size code
35010Sstevel@tonic-gate  * (as is done for anonymous or kernel large pages).
35020Sstevel@tonic-gate  */
35030Sstevel@tonic-gate void
35040Sstevel@tonic-gate hat_page_demote(page_t *pp)
35050Sstevel@tonic-gate {
35060Sstevel@tonic-gate 	uint_t		pszc;
35070Sstevel@tonic-gate 	uint_t		rszc;
35080Sstevel@tonic-gate 	uint_t		szc;
35090Sstevel@tonic-gate 	page_t		*rootpp;
35100Sstevel@tonic-gate 	page_t		*firstpp;
35110Sstevel@tonic-gate 	page_t		*lastpp;
35120Sstevel@tonic-gate 	pgcnt_t		pgcnt;
35130Sstevel@tonic-gate 
35140Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp));
35150Sstevel@tonic-gate 	ASSERT(!PP_ISFREE(pp));
35160Sstevel@tonic-gate 	ASSERT(page_szc_lock_assert(pp));
35170Sstevel@tonic-gate 
35180Sstevel@tonic-gate 	if (pp->p_szc == 0)
35190Sstevel@tonic-gate 		return;
35200Sstevel@tonic-gate 
35210Sstevel@tonic-gate 	rootpp = PP_GROUPLEADER(pp, 1);
35220Sstevel@tonic-gate 	(void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
35230Sstevel@tonic-gate 
35240Sstevel@tonic-gate 	/*
35250Sstevel@tonic-gate 	 * all large mappings to pp are gone
35260Sstevel@tonic-gate 	 * and no new can be setup since pp is locked exclusively.
35270Sstevel@tonic-gate 	 *
35280Sstevel@tonic-gate 	 * Lock the root to make sure there's only one hat_page_demote()
35290Sstevel@tonic-gate 	 * outstanding within the area of this root's pszc.
35300Sstevel@tonic-gate 	 *
35310Sstevel@tonic-gate 	 * Second potential hat_page_demote() is already eliminated by upper
35320Sstevel@tonic-gate 	 * VM layer via page_szc_lock() but we don't rely on it and use our
35330Sstevel@tonic-gate 	 * own locking (so that upper layer locking can be changed without
35340Sstevel@tonic-gate 	 * assumptions that hat depends on upper layer VM to prevent multiple
35350Sstevel@tonic-gate 	 * hat_page_demote() to be issued simultaneously to the same large
35360Sstevel@tonic-gate 	 * page).
35370Sstevel@tonic-gate 	 */
35380Sstevel@tonic-gate again:
35390Sstevel@tonic-gate 	pszc = pp->p_szc;
35400Sstevel@tonic-gate 	if (pszc == 0)
35410Sstevel@tonic-gate 		return;
35420Sstevel@tonic-gate 	rootpp = PP_GROUPLEADER(pp, pszc);
35430Sstevel@tonic-gate 	x86_hm_enter(rootpp);
35440Sstevel@tonic-gate 	/*
35450Sstevel@tonic-gate 	 * If root's p_szc is different from pszc we raced with another
35460Sstevel@tonic-gate 	 * hat_page_demote().  Drop the lock and try to find the root again.
35470Sstevel@tonic-gate 	 * If root's p_szc is greater than pszc previous hat_page_demote() is
35480Sstevel@tonic-gate 	 * not done yet.  Take and release mlist lock of root's root to wait
35490Sstevel@tonic-gate 	 * for previous hat_page_demote() to complete.
35500Sstevel@tonic-gate 	 */
35510Sstevel@tonic-gate 	if ((rszc = rootpp->p_szc) != pszc) {
35520Sstevel@tonic-gate 		x86_hm_exit(rootpp);
35530Sstevel@tonic-gate 		if (rszc > pszc) {
35540Sstevel@tonic-gate 			/* p_szc of a locked non free page can't increase */
35550Sstevel@tonic-gate 			ASSERT(pp != rootpp);
35560Sstevel@tonic-gate 
35570Sstevel@tonic-gate 			rootpp = PP_GROUPLEADER(rootpp, rszc);
35580Sstevel@tonic-gate 			x86_hm_enter(rootpp);
35590Sstevel@tonic-gate 			x86_hm_exit(rootpp);
35600Sstevel@tonic-gate 		}
35610Sstevel@tonic-gate 		goto again;
35620Sstevel@tonic-gate 	}
35630Sstevel@tonic-gate 	ASSERT(pp->p_szc == pszc);
35640Sstevel@tonic-gate 
35650Sstevel@tonic-gate 	/*
35660Sstevel@tonic-gate 	 * Decrement by 1 p_szc of every constituent page of a region that
35670Sstevel@tonic-gate 	 * covered pp. For example if original szc is 3 it gets changed to 2
35680Sstevel@tonic-gate 	 * everywhere except in region 2 that covered pp. Region 2 that
35690Sstevel@tonic-gate 	 * covered pp gets demoted to 1 everywhere except in region 1 that
35700Sstevel@tonic-gate 	 * covered pp. The region 1 that covered pp is demoted to region
35710Sstevel@tonic-gate 	 * 0. It's done this way because from region 3 we removed level 3
35720Sstevel@tonic-gate 	 * mappings, from region 2 that covered pp we removed level 2 mappings
35730Sstevel@tonic-gate 	 * and from region 1 that covered pp we removed level 1 mappings.  All
35740Sstevel@tonic-gate 	 * changes are done from from high pfn's to low pfn's so that roots
35750Sstevel@tonic-gate 	 * are changed last allowing one to know the largest region where
35760Sstevel@tonic-gate 	 * hat_page_demote() is stil active by only looking at the root page.
35770Sstevel@tonic-gate 	 *
35780Sstevel@tonic-gate 	 * This algorithm is implemented in 2 while loops. First loop changes
35790Sstevel@tonic-gate 	 * p_szc of pages to the right of pp's level 1 region and second
35800Sstevel@tonic-gate 	 * loop changes p_szc of pages of level 1 region that covers pp
35810Sstevel@tonic-gate 	 * and all pages to the left of level 1 region that covers pp.
35820Sstevel@tonic-gate 	 * In the first loop p_szc keeps dropping with every iteration
35830Sstevel@tonic-gate 	 * and in the second loop it keeps increasing with every iteration.
35840Sstevel@tonic-gate 	 *
35850Sstevel@tonic-gate 	 * First loop description: Demote pages to the right of pp outside of
35860Sstevel@tonic-gate 	 * level 1 region that covers pp.  In every iteration of the while
35870Sstevel@tonic-gate 	 * loop below find the last page of szc region and the first page of
35880Sstevel@tonic-gate 	 * (szc - 1) region that is immediately to the right of (szc - 1)
35890Sstevel@tonic-gate 	 * region that covers pp.  From last such page to first such page
35900Sstevel@tonic-gate 	 * change every page's szc to szc - 1. Decrement szc and continue
35910Sstevel@tonic-gate 	 * looping until szc is 1. If pp belongs to the last (szc - 1) region
35920Sstevel@tonic-gate 	 * of szc region skip to the next iteration.
35930Sstevel@tonic-gate 	 */
35940Sstevel@tonic-gate 	szc = pszc;
35950Sstevel@tonic-gate 	while (szc > 1) {
35960Sstevel@tonic-gate 		lastpp = PP_GROUPLEADER(pp, szc);
35970Sstevel@tonic-gate 		pgcnt = page_get_pagecnt(szc);
35980Sstevel@tonic-gate 		lastpp += pgcnt - 1;
35990Sstevel@tonic-gate 		firstpp = PP_GROUPLEADER(pp, (szc - 1));
36000Sstevel@tonic-gate 		pgcnt = page_get_pagecnt(szc - 1);
36010Sstevel@tonic-gate 		if (lastpp - firstpp < pgcnt) {
36020Sstevel@tonic-gate 			szc--;
36030Sstevel@tonic-gate 			continue;
36040Sstevel@tonic-gate 		}
36050Sstevel@tonic-gate 		firstpp += pgcnt;
36060Sstevel@tonic-gate 		while (lastpp != firstpp) {
36070Sstevel@tonic-gate 			ASSERT(lastpp->p_szc == pszc);
36080Sstevel@tonic-gate 			lastpp->p_szc = szc - 1;
36090Sstevel@tonic-gate 			lastpp--;
36100Sstevel@tonic-gate 		}
36110Sstevel@tonic-gate 		firstpp->p_szc = szc - 1;
36120Sstevel@tonic-gate 		szc--;
36130Sstevel@tonic-gate 	}
36140Sstevel@tonic-gate 
36150Sstevel@tonic-gate 	/*
36160Sstevel@tonic-gate 	 * Second loop description:
36170Sstevel@tonic-gate 	 * First iteration changes p_szc to 0 of every
36180Sstevel@tonic-gate 	 * page of level 1 region that covers pp.
36190Sstevel@tonic-gate 	 * Subsequent iterations find last page of szc region
36200Sstevel@tonic-gate 	 * immediately to the left of szc region that covered pp
36210Sstevel@tonic-gate 	 * and first page of (szc + 1) region that covers pp.
36220Sstevel@tonic-gate 	 * From last to first page change p_szc of every page to szc.
36230Sstevel@tonic-gate 	 * Increment szc and continue looping until szc is pszc.
36240Sstevel@tonic-gate 	 * If pp belongs to the fist szc region of (szc + 1) region
36250Sstevel@tonic-gate 	 * skip to the next iteration.
36260Sstevel@tonic-gate 	 *
36270Sstevel@tonic-gate 	 */
36280Sstevel@tonic-gate 	szc = 0;
36290Sstevel@tonic-gate 	while (szc < pszc) {
36300Sstevel@tonic-gate 		firstpp = PP_GROUPLEADER(pp, (szc + 1));
36310Sstevel@tonic-gate 		if (szc == 0) {
36320Sstevel@tonic-gate 			pgcnt = page_get_pagecnt(1);
36330Sstevel@tonic-gate 			lastpp = firstpp + (pgcnt - 1);
36340Sstevel@tonic-gate 		} else {
36350Sstevel@tonic-gate 			lastpp = PP_GROUPLEADER(pp, szc);
36360Sstevel@tonic-gate 			if (firstpp == lastpp) {
36370Sstevel@tonic-gate 				szc++;
36380Sstevel@tonic-gate 				continue;
36390Sstevel@tonic-gate 			}
36400Sstevel@tonic-gate 			lastpp--;
36410Sstevel@tonic-gate 			pgcnt = page_get_pagecnt(szc);
36420Sstevel@tonic-gate 		}
36430Sstevel@tonic-gate 		while (lastpp != firstpp) {
36440Sstevel@tonic-gate 			ASSERT(lastpp->p_szc == pszc);
36450Sstevel@tonic-gate 			lastpp->p_szc = szc;
36460Sstevel@tonic-gate 			lastpp--;
36470Sstevel@tonic-gate 		}
36480Sstevel@tonic-gate 		firstpp->p_szc = szc;
36490Sstevel@tonic-gate 		if (firstpp == rootpp)
36500Sstevel@tonic-gate 			break;
36510Sstevel@tonic-gate 		szc++;
36520Sstevel@tonic-gate 	}
36530Sstevel@tonic-gate 	x86_hm_exit(rootpp);
36540Sstevel@tonic-gate }
36550Sstevel@tonic-gate 
36560Sstevel@tonic-gate /*
36570Sstevel@tonic-gate  * get hw stats from hardware into page struct and reset hw stats
36580Sstevel@tonic-gate  * returns attributes of page
36590Sstevel@tonic-gate  * Flags for hat_pagesync, hat_getstat, hat_sync
36600Sstevel@tonic-gate  *
36610Sstevel@tonic-gate  * define	HAT_SYNC_ZERORM		0x01
36620Sstevel@tonic-gate  *
36630Sstevel@tonic-gate  * Additional flags for hat_pagesync
36640Sstevel@tonic-gate  *
36650Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_REF	0x02
36660Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_MOD	0x04
36670Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_RM	0x06
36680Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_SHARED	0x08
36690Sstevel@tonic-gate  */
36700Sstevel@tonic-gate uint_t
36710Sstevel@tonic-gate hat_pagesync(struct page *pp, uint_t flags)
36720Sstevel@tonic-gate {
36730Sstevel@tonic-gate 	hment_t		*hm = NULL;
36740Sstevel@tonic-gate 	htable_t	*ht;
36750Sstevel@tonic-gate 	uint_t		entry;
36760Sstevel@tonic-gate 	x86pte_t	old, save_old;
36770Sstevel@tonic-gate 	x86pte_t	new;
36780Sstevel@tonic-gate 	uchar_t		nrmbits = P_REF|P_MOD|P_RO;
36790Sstevel@tonic-gate 	extern ulong_t	po_share;
36800Sstevel@tonic-gate 	page_t		*save_pp = pp;
36810Sstevel@tonic-gate 	uint_t		pszc = 0;
36820Sstevel@tonic-gate 
36830Sstevel@tonic-gate 	ASSERT(PAGE_LOCKED(pp) || panicstr);
36840Sstevel@tonic-gate 
36850Sstevel@tonic-gate 	if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
36860Sstevel@tonic-gate 		return (pp->p_nrm & nrmbits);
36870Sstevel@tonic-gate 
36880Sstevel@tonic-gate 	if ((flags & HAT_SYNC_ZERORM) == 0) {
36890Sstevel@tonic-gate 
36900Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
36910Sstevel@tonic-gate 			return (pp->p_nrm & nrmbits);
36920Sstevel@tonic-gate 
36930Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
36940Sstevel@tonic-gate 			return (pp->p_nrm & nrmbits);
36950Sstevel@tonic-gate 
36960Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
36970Sstevel@tonic-gate 		    hat_page_getshare(pp) > po_share) {
36980Sstevel@tonic-gate 			if (PP_ISRO(pp))
36990Sstevel@tonic-gate 				PP_SETREF(pp);
37000Sstevel@tonic-gate 			return (pp->p_nrm & nrmbits);
37010Sstevel@tonic-gate 		}
37020Sstevel@tonic-gate 	}
37030Sstevel@tonic-gate 
37045084Sjohnlev 	XPV_DISALLOW_MIGRATE();
37050Sstevel@tonic-gate next_size:
37060Sstevel@tonic-gate 	/*
37070Sstevel@tonic-gate 	 * walk thru the mapping list syncing (and clearing) ref/mod bits.
37080Sstevel@tonic-gate 	 */
37090Sstevel@tonic-gate 	x86_hm_enter(pp);
37100Sstevel@tonic-gate 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
37110Sstevel@tonic-gate 		if (ht->ht_level < pszc)
37120Sstevel@tonic-gate 			continue;
37130Sstevel@tonic-gate 		old = x86pte_get(ht, entry);
37140Sstevel@tonic-gate try_again:
37150Sstevel@tonic-gate 
37160Sstevel@tonic-gate 		ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
37170Sstevel@tonic-gate 
37180Sstevel@tonic-gate 		if (PTE_GET(old, PT_REF | PT_MOD) == 0)
37190Sstevel@tonic-gate 			continue;
37200Sstevel@tonic-gate 
37210Sstevel@tonic-gate 		save_old = old;
37220Sstevel@tonic-gate 		if ((flags & HAT_SYNC_ZERORM) != 0) {
37230Sstevel@tonic-gate 
37240Sstevel@tonic-gate 			/*
37250Sstevel@tonic-gate 			 * Need to clear ref or mod bits. Need to demap
37260Sstevel@tonic-gate 			 * to make sure any executing TLBs see cleared bits.
37270Sstevel@tonic-gate 			 */
37280Sstevel@tonic-gate 			new = old;
37290Sstevel@tonic-gate 			PTE_CLR(new, PT_REF | PT_MOD);
37300Sstevel@tonic-gate 			old = hati_update_pte(ht, entry, old, new);
37310Sstevel@tonic-gate 			if (old != 0)
37320Sstevel@tonic-gate 				goto try_again;
37330Sstevel@tonic-gate 
37340Sstevel@tonic-gate 			old = save_old;
37350Sstevel@tonic-gate 		}
37360Sstevel@tonic-gate 
37370Sstevel@tonic-gate 		/*
37380Sstevel@tonic-gate 		 * Sync the PTE
37390Sstevel@tonic-gate 		 */
37403446Smrj 		if (!(flags & HAT_SYNC_ZERORM) &&
37413446Smrj 		    PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC)
37420Sstevel@tonic-gate 			hati_sync_pte_to_page(pp, old, ht->ht_level);
37430Sstevel@tonic-gate 
37440Sstevel@tonic-gate 		/*
37450Sstevel@tonic-gate 		 * can stop short if we found a ref'd or mod'd page
37460Sstevel@tonic-gate 		 */
37470Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) ||
37480Sstevel@tonic-gate 		    (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) {
37490Sstevel@tonic-gate 			x86_hm_exit(pp);
37503446Smrj 			goto done;
37510Sstevel@tonic-gate 		}
37520Sstevel@tonic-gate 	}
37530Sstevel@tonic-gate 	x86_hm_exit(pp);
37540Sstevel@tonic-gate 	while (pszc < pp->p_szc) {
37550Sstevel@tonic-gate 		page_t *tpp;
37560Sstevel@tonic-gate 		pszc++;
37570Sstevel@tonic-gate 		tpp = PP_GROUPLEADER(pp, pszc);
37580Sstevel@tonic-gate 		if (pp != tpp) {
37590Sstevel@tonic-gate 			pp = tpp;
37600Sstevel@tonic-gate 			goto next_size;
37610Sstevel@tonic-gate 		}
37620Sstevel@tonic-gate 	}
37633446Smrj done:
37645084Sjohnlev 	XPV_ALLOW_MIGRATE();
37650Sstevel@tonic-gate 	return (save_pp->p_nrm & nrmbits);
37660Sstevel@tonic-gate }
37670Sstevel@tonic-gate 
37680Sstevel@tonic-gate /*
37690Sstevel@tonic-gate  * returns approx number of mappings to this pp.  A return of 0 implies
37700Sstevel@tonic-gate  * there are no mappings to the page.
37710Sstevel@tonic-gate  */
37720Sstevel@tonic-gate ulong_t
37730Sstevel@tonic-gate hat_page_getshare(page_t *pp)
37740Sstevel@tonic-gate {
37750Sstevel@tonic-gate 	uint_t cnt;
37760Sstevel@tonic-gate 	cnt = hment_mapcnt(pp);
37771841Spraks #if defined(__amd64)
37781841Spraks 	if (vpm_enable && pp->p_vpmref) {
37791841Spraks 		cnt += 1;
37801841Spraks 	}
37811841Spraks #endif
37820Sstevel@tonic-gate 	return (cnt);
37830Sstevel@tonic-gate }
37840Sstevel@tonic-gate 
37850Sstevel@tonic-gate /*
37864528Spaulsan  * Return 1 the number of mappings exceeds sh_thresh. Return 0
37874528Spaulsan  * otherwise.
37884528Spaulsan  */
37894528Spaulsan int
37904528Spaulsan hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
37914528Spaulsan {
37924528Spaulsan 	return (hat_page_getshare(pp) > sh_thresh);
37934528Spaulsan }
37944528Spaulsan 
37954528Spaulsan /*
37960Sstevel@tonic-gate  * hat_softlock isn't supported anymore
37970Sstevel@tonic-gate  */
37980Sstevel@tonic-gate /*ARGSUSED*/
37990Sstevel@tonic-gate faultcode_t
38000Sstevel@tonic-gate hat_softlock(
38010Sstevel@tonic-gate 	hat_t *hat,
38020Sstevel@tonic-gate 	caddr_t addr,
38030Sstevel@tonic-gate 	size_t *len,
38040Sstevel@tonic-gate 	struct page **page_array,
38050Sstevel@tonic-gate 	uint_t flags)
38060Sstevel@tonic-gate {
38070Sstevel@tonic-gate 	return (FC_NOSUPPORT);
38080Sstevel@tonic-gate }
38090Sstevel@tonic-gate 
38100Sstevel@tonic-gate 
38110Sstevel@tonic-gate 
38120Sstevel@tonic-gate /*
38130Sstevel@tonic-gate  * Routine to expose supported HAT features to platform independent code.
38140Sstevel@tonic-gate  */
38150Sstevel@tonic-gate /*ARGSUSED*/
38160Sstevel@tonic-gate int
38170Sstevel@tonic-gate hat_supported(enum hat_features feature, void *arg)
38180Sstevel@tonic-gate {
38190Sstevel@tonic-gate 	switch (feature) {
38200Sstevel@tonic-gate 
38210Sstevel@tonic-gate 	case HAT_SHARED_PT:	/* this is really ISM */
38220Sstevel@tonic-gate 		return (1);
38230Sstevel@tonic-gate 
38240Sstevel@tonic-gate 	case HAT_DYNAMIC_ISM_UNMAP:
38250Sstevel@tonic-gate 		return (0);
38260Sstevel@tonic-gate 
38270Sstevel@tonic-gate 	case HAT_VMODSORT:
38280Sstevel@tonic-gate 		return (1);
38290Sstevel@tonic-gate 
38304528Spaulsan 	case HAT_SHARED_REGIONS:
38314528Spaulsan 		return (0);
38324528Spaulsan 
38330Sstevel@tonic-gate 	default:
38340Sstevel@tonic-gate 		panic("hat_supported() - unknown feature");
38350Sstevel@tonic-gate 	}
38360Sstevel@tonic-gate 	return (0);
38370Sstevel@tonic-gate }
38380Sstevel@tonic-gate 
38390Sstevel@tonic-gate /*
38400Sstevel@tonic-gate  * Called when a thread is exiting and has been switched to the kernel AS
38410Sstevel@tonic-gate  */
38420Sstevel@tonic-gate void
38430Sstevel@tonic-gate hat_thread_exit(kthread_t *thd)
38440Sstevel@tonic-gate {
38450Sstevel@tonic-gate 	ASSERT(thd->t_procp->p_as == &kas);
38465084Sjohnlev 	XPV_DISALLOW_MIGRATE();
38470Sstevel@tonic-gate 	hat_switch(thd->t_procp->p_as->a_hat);
38485084Sjohnlev 	XPV_ALLOW_MIGRATE();
38490Sstevel@tonic-gate }
38500Sstevel@tonic-gate 
38510Sstevel@tonic-gate /*
38520Sstevel@tonic-gate  * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
38530Sstevel@tonic-gate  */
38540Sstevel@tonic-gate /*ARGSUSED*/
38550Sstevel@tonic-gate void
38560Sstevel@tonic-gate hat_setup(hat_t *hat, int flags)
38570Sstevel@tonic-gate {
38585084Sjohnlev 	XPV_DISALLOW_MIGRATE();
38590Sstevel@tonic-gate 	kpreempt_disable();
38600Sstevel@tonic-gate 
38610Sstevel@tonic-gate 	hat_switch(hat);
38620Sstevel@tonic-gate 
38630Sstevel@tonic-gate 	kpreempt_enable();
38645084Sjohnlev 	XPV_ALLOW_MIGRATE();
38650Sstevel@tonic-gate }
38660Sstevel@tonic-gate 
38670Sstevel@tonic-gate /*
38680Sstevel@tonic-gate  * Prepare for a CPU private mapping for the given address.
38690Sstevel@tonic-gate  *
38700Sstevel@tonic-gate  * The address can only be used from a single CPU and can be remapped
38710Sstevel@tonic-gate  * using hat_mempte_remap().  Return the address of the PTE.
38720Sstevel@tonic-gate  *
38730Sstevel@tonic-gate  * We do the htable_create() if necessary and increment the valid count so
38740Sstevel@tonic-gate  * the htable can't disappear.  We also hat_devload() the page table into
38750Sstevel@tonic-gate  * kernel so that the PTE is quickly accessed.
38760Sstevel@tonic-gate  */
38773446Smrj hat_mempte_t
38783446Smrj hat_mempte_setup(caddr_t addr)
38790Sstevel@tonic-gate {
38800Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
38810Sstevel@tonic-gate 	htable_t	*ht;
38820Sstevel@tonic-gate 	uint_t		entry;
38830Sstevel@tonic-gate 	x86pte_t	oldpte;
38843446Smrj 	hat_mempte_t	p;
38850Sstevel@tonic-gate 
38860Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
38870Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(va));
38884004Sjosephb 	++curthread->t_hatdepth;
38895741Smrj 	XPV_DISALLOW_MIGRATE();
38900Sstevel@tonic-gate 	ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
38910Sstevel@tonic-gate 	if (ht == NULL) {
38920Sstevel@tonic-gate 		ht = htable_create(kas.a_hat, va, 0, NULL);
38930Sstevel@tonic-gate 		entry = htable_va2entry(va, ht);
38940Sstevel@tonic-gate 		ASSERT(ht->ht_level == 0);
38950Sstevel@tonic-gate 		oldpte = x86pte_get(ht, entry);
38960Sstevel@tonic-gate 	}
38970Sstevel@tonic-gate 	if (PTE_ISVALID(oldpte))
38980Sstevel@tonic-gate 		panic("hat_mempte_setup(): address already mapped"
38997240Srh87107 		    "ht=%p, entry=%d, pte=" FMT_PTE, (void *)ht, entry, oldpte);
39000Sstevel@tonic-gate 
39010Sstevel@tonic-gate 	/*
39020Sstevel@tonic-gate 	 * increment ht_valid_cnt so that the pagetable can't disappear
39030Sstevel@tonic-gate 	 */
39040Sstevel@tonic-gate 	HTABLE_INC(ht->ht_valid_cnt);
39050Sstevel@tonic-gate 
39060Sstevel@tonic-gate 	/*
39073446Smrj 	 * return the PTE physical address to the caller.
39080Sstevel@tonic-gate 	 */
39090Sstevel@tonic-gate 	htable_release(ht);
39105741Smrj 	XPV_ALLOW_MIGRATE();
39113446Smrj 	p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry);
39124004Sjosephb 	--curthread->t_hatdepth;
39133446Smrj 	return (p);
39140Sstevel@tonic-gate }
39150Sstevel@tonic-gate 
39160Sstevel@tonic-gate /*
39170Sstevel@tonic-gate  * Release a CPU private mapping for the given address.
39180Sstevel@tonic-gate  * We decrement the htable valid count so it might be destroyed.
39190Sstevel@tonic-gate  */
39203446Smrj /*ARGSUSED1*/
39210Sstevel@tonic-gate void
39223446Smrj hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa)
39230Sstevel@tonic-gate {
39240Sstevel@tonic-gate 	htable_t	*ht;
39250Sstevel@tonic-gate 
39265741Smrj 	XPV_DISALLOW_MIGRATE();
39270Sstevel@tonic-gate 	/*
39283446Smrj 	 * invalidate any left over mapping and decrement the htable valid count
39290Sstevel@tonic-gate 	 */
39305084Sjohnlev #ifdef __xpv
39315084Sjohnlev 	if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0,
39325084Sjohnlev 	    UVMF_INVLPG | UVMF_LOCAL))
39335084Sjohnlev 		panic("HYPERVISOR_update_va_mapping() failed");
39345084Sjohnlev #else
39353446Smrj 	{
39363446Smrj 		x86pte_t *pteptr;
39373446Smrj 
39383446Smrj 		pteptr = x86pte_mapin(mmu_btop(pte_pa),
39393446Smrj 		    (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
39403446Smrj 		if (mmu.pae_hat)
39413446Smrj 			*pteptr = 0;
39423446Smrj 		else
39433446Smrj 			*(x86pte32_t *)pteptr = 0;
39443446Smrj 		mmu_tlbflush_entry(addr);
39453446Smrj 		x86pte_mapout();
39463446Smrj 	}
39475084Sjohnlev #endif
39483446Smrj 
39490Sstevel@tonic-gate 	ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
39500Sstevel@tonic-gate 	if (ht == NULL)
39510Sstevel@tonic-gate 		panic("hat_mempte_release(): invalid address");
39520Sstevel@tonic-gate 	ASSERT(ht->ht_level == 0);
39530Sstevel@tonic-gate 	HTABLE_DEC(ht->ht_valid_cnt);
39540Sstevel@tonic-gate 	htable_release(ht);
39555741Smrj 	XPV_ALLOW_MIGRATE();
39560Sstevel@tonic-gate }
39570Sstevel@tonic-gate 
39580Sstevel@tonic-gate /*
39590Sstevel@tonic-gate  * Apply a temporary CPU private mapping to a page. We flush the TLB only
39600Sstevel@tonic-gate  * on this CPU, so this ought to have been called with preemption disabled.
39610Sstevel@tonic-gate  */
39620Sstevel@tonic-gate void
39630Sstevel@tonic-gate hat_mempte_remap(
39643446Smrj 	pfn_t		pfn,
39653446Smrj 	caddr_t		addr,
39663446Smrj 	hat_mempte_t	pte_pa,
39673446Smrj 	uint_t		attr,
39683446Smrj 	uint_t		flags)
39690Sstevel@tonic-gate {
39700Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
39710Sstevel@tonic-gate 	x86pte_t	pte;
39720Sstevel@tonic-gate 
39730Sstevel@tonic-gate 	/*
39740Sstevel@tonic-gate 	 * Remap the given PTE to the new page's PFN. Invalidate only
39750Sstevel@tonic-gate 	 * on this CPU.
39760Sstevel@tonic-gate 	 */
39770Sstevel@tonic-gate #ifdef DEBUG
39780Sstevel@tonic-gate 	htable_t	*ht;
39790Sstevel@tonic-gate 	uint_t		entry;
39800Sstevel@tonic-gate 
39810Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
39820Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(va));
39830Sstevel@tonic-gate 	ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
39840Sstevel@tonic-gate 	ASSERT(ht != NULL);
39850Sstevel@tonic-gate 	ASSERT(ht->ht_level == 0);
39860Sstevel@tonic-gate 	ASSERT(ht->ht_valid_cnt > 0);
39873446Smrj 	ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
39880Sstevel@tonic-gate 	htable_release(ht);
39890Sstevel@tonic-gate #endif
39905084Sjohnlev 	XPV_DISALLOW_MIGRATE();
39910Sstevel@tonic-gate 	pte = hati_mkpte(pfn, attr, 0, flags);
39925084Sjohnlev #ifdef __xpv
39935084Sjohnlev 	if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL))
39945084Sjohnlev 		panic("HYPERVISOR_update_va_mapping() failed");
39955084Sjohnlev #else
39963446Smrj 	{
39973446Smrj 		x86pte_t *pteptr;
39983446Smrj 
39993446Smrj 		pteptr = x86pte_mapin(mmu_btop(pte_pa),
40003446Smrj 		    (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
40013446Smrj 		if (mmu.pae_hat)
40023446Smrj 			*(x86pte_t *)pteptr = pte;
40033446Smrj 		else
40043446Smrj 			*(x86pte32_t *)pteptr = (x86pte32_t)pte;
40053446Smrj 		mmu_tlbflush_entry(addr);
40063446Smrj 		x86pte_mapout();
40073446Smrj 	}
40085084Sjohnlev #endif
40095084Sjohnlev 	XPV_ALLOW_MIGRATE();
40100Sstevel@tonic-gate }
40110Sstevel@tonic-gate 
40120Sstevel@tonic-gate 
40130Sstevel@tonic-gate 
40140Sstevel@tonic-gate /*
40150Sstevel@tonic-gate  * Hat locking functions
40160Sstevel@tonic-gate  * XXX - these two functions are currently being used by hatstats
40170Sstevel@tonic-gate  * 	they can be removed by using a per-as mutex for hatstats.
40180Sstevel@tonic-gate  */
40190Sstevel@tonic-gate void
40200Sstevel@tonic-gate hat_enter(hat_t *hat)
40210Sstevel@tonic-gate {
40220Sstevel@tonic-gate 	mutex_enter(&hat->hat_mutex);
40230Sstevel@tonic-gate }
40240Sstevel@tonic-gate 
40250Sstevel@tonic-gate void
40260Sstevel@tonic-gate hat_exit(hat_t *hat)
40270Sstevel@tonic-gate {
40280Sstevel@tonic-gate 	mutex_exit(&hat->hat_mutex);
40290Sstevel@tonic-gate }
40300Sstevel@tonic-gate 
40310Sstevel@tonic-gate /*
40323446Smrj  * HAT part of cpu initialization.
40330Sstevel@tonic-gate  */
40340Sstevel@tonic-gate void
40350Sstevel@tonic-gate hat_cpu_online(struct cpu *cpup)
40360Sstevel@tonic-gate {
40370Sstevel@tonic-gate 	if (cpup != CPU) {
40383446Smrj 		x86pte_cpu_init(cpup);
40390Sstevel@tonic-gate 		hat_vlp_setup(cpup);
40400Sstevel@tonic-gate 	}
40410Sstevel@tonic-gate 	CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
40420Sstevel@tonic-gate }
40430Sstevel@tonic-gate 
40440Sstevel@tonic-gate /*
40453446Smrj  * HAT part of cpu deletion.
40463446Smrj  * (currently, we only call this after the cpu is safely passivated.)
40473446Smrj  */
40483446Smrj void
40493446Smrj hat_cpu_offline(struct cpu *cpup)
40503446Smrj {
40513446Smrj 	ASSERT(cpup != CPU);
40523446Smrj 
40533446Smrj 	CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id);
405412004Sjiang.liu@intel.com 	hat_vlp_teardown(cpup);
40553446Smrj 	x86pte_cpu_fini(cpup);
40563446Smrj }
40573446Smrj 
40583446Smrj /*
40590Sstevel@tonic-gate  * Function called after all CPUs are brought online.
40600Sstevel@tonic-gate  * Used to remove low address boot mappings.
40610Sstevel@tonic-gate  */
40620Sstevel@tonic-gate void
40630Sstevel@tonic-gate clear_boot_mappings(uintptr_t low, uintptr_t high)
40640Sstevel@tonic-gate {
40650Sstevel@tonic-gate 	uintptr_t vaddr = low;
40660Sstevel@tonic-gate 	htable_t *ht = NULL;
40670Sstevel@tonic-gate 	level_t level;
40680Sstevel@tonic-gate 	uint_t entry;
40690Sstevel@tonic-gate 	x86pte_t pte;
40700Sstevel@tonic-gate 
40710Sstevel@tonic-gate 	/*
40720Sstevel@tonic-gate 	 * On 1st CPU we can unload the prom mappings, basically we blow away
40733446Smrj 	 * all virtual mappings under _userlimit.
40740Sstevel@tonic-gate 	 */
40750Sstevel@tonic-gate 	while (vaddr < high) {
40760Sstevel@tonic-gate 		pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
40770Sstevel@tonic-gate 		if (ht == NULL)
40780Sstevel@tonic-gate 			break;
40790Sstevel@tonic-gate 
40800Sstevel@tonic-gate 		level = ht->ht_level;
40810Sstevel@tonic-gate 		entry = htable_va2entry(vaddr, ht);
40820Sstevel@tonic-gate 		ASSERT(level <= mmu.max_page_level);
40830Sstevel@tonic-gate 		ASSERT(PTE_ISPAGE(pte, level));
40840Sstevel@tonic-gate 
40850Sstevel@tonic-gate 		/*
40860Sstevel@tonic-gate 		 * Unload the mapping from the page tables.
40870Sstevel@tonic-gate 		 */
40883446Smrj 		(void) x86pte_inval(ht, entry, 0, NULL);
40890Sstevel@tonic-gate 		ASSERT(ht->ht_valid_cnt > 0);
40900Sstevel@tonic-gate 		HTABLE_DEC(ht->ht_valid_cnt);
40910Sstevel@tonic-gate 		PGCNT_DEC(ht->ht_hat, ht->ht_level);
40920Sstevel@tonic-gate 
40930Sstevel@tonic-gate 		vaddr += LEVEL_SIZE(ht->ht_level);
40940Sstevel@tonic-gate 	}
40950Sstevel@tonic-gate 	if (ht)
40960Sstevel@tonic-gate 		htable_release(ht);
40970Sstevel@tonic-gate }
40980Sstevel@tonic-gate 
40990Sstevel@tonic-gate /*
41000Sstevel@tonic-gate  * Atomically update a new translation for a single page.  If the
41010Sstevel@tonic-gate  * currently installed PTE doesn't match the value we expect to find,
41020Sstevel@tonic-gate  * it's not updated and we return the PTE we found.
41030Sstevel@tonic-gate  *
41040Sstevel@tonic-gate  * If activating nosync or NOWRITE and the page was modified we need to sync
41050Sstevel@tonic-gate  * with the page_t. Also sync with page_t if clearing ref/mod bits.
41060Sstevel@tonic-gate  */
41070Sstevel@tonic-gate static x86pte_t
41080Sstevel@tonic-gate hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
41090Sstevel@tonic-gate {
41100Sstevel@tonic-gate 	page_t		*pp;
41110Sstevel@tonic-gate 	uint_t		rm = 0;
41120Sstevel@tonic-gate 	x86pte_t	replaced;
41130Sstevel@tonic-gate 
41143446Smrj 	if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC &&
41150Sstevel@tonic-gate 	    PTE_GET(expected, PT_MOD | PT_REF) &&
41160Sstevel@tonic-gate 	    (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
41174381Sjosephb 	    !PTE_GET(new, PT_MOD | PT_REF))) {
41180Sstevel@tonic-gate 
41193446Smrj 		ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level)));
41200Sstevel@tonic-gate 		pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
41210Sstevel@tonic-gate 		ASSERT(pp != NULL);
41220Sstevel@tonic-gate 		if (PTE_GET(expected, PT_MOD))
41230Sstevel@tonic-gate 			rm |= P_MOD;
41240Sstevel@tonic-gate 		if (PTE_GET(expected, PT_REF))
41250Sstevel@tonic-gate 			rm |= P_REF;
41260Sstevel@tonic-gate 		PTE_CLR(new, PT_MOD | PT_REF);
41270Sstevel@tonic-gate 	}
41280Sstevel@tonic-gate 
41290Sstevel@tonic-gate 	replaced = x86pte_update(ht, entry, expected, new);
41300Sstevel@tonic-gate 	if (replaced != expected)
41310Sstevel@tonic-gate 		return (replaced);
41320Sstevel@tonic-gate 
41330Sstevel@tonic-gate 	if (rm) {
41340Sstevel@tonic-gate 		/*
41350Sstevel@tonic-gate 		 * sync to all constituent pages of a large page
41360Sstevel@tonic-gate 		 */
41370Sstevel@tonic-gate 		pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
41380Sstevel@tonic-gate 		ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
41390Sstevel@tonic-gate 		while (pgcnt-- > 0) {
41400Sstevel@tonic-gate 			/*
41410Sstevel@tonic-gate 			 * hat_page_demote() can't decrease
41420Sstevel@tonic-gate 			 * pszc below this mapping size
41430Sstevel@tonic-gate 			 * since large mapping existed after we
41440Sstevel@tonic-gate 			 * took mlist lock.
41450Sstevel@tonic-gate 			 */
41460Sstevel@tonic-gate 			ASSERT(pp->p_szc >= ht->ht_level);
41470Sstevel@tonic-gate 			hat_page_setattr(pp, rm);
41480Sstevel@tonic-gate 			++pp;
41490Sstevel@tonic-gate 		}
41500Sstevel@tonic-gate 	}
41510Sstevel@tonic-gate 
41520Sstevel@tonic-gate 	return (0);
41530Sstevel@tonic-gate }
41540Sstevel@tonic-gate 
41554528Spaulsan /* ARGSUSED */
41564528Spaulsan void
41575075Spaulsan hat_join_srd(struct hat *hat, vnode_t *evp)
41584528Spaulsan {
41594528Spaulsan }
41604528Spaulsan 
41614528Spaulsan /* ARGSUSED */
41624528Spaulsan hat_region_cookie_t
41635075Spaulsan hat_join_region(struct hat *hat,
41644528Spaulsan     caddr_t r_saddr,
41654528Spaulsan     size_t r_size,
41664528Spaulsan     void *r_obj,
41674528Spaulsan     u_offset_t r_objoff,
41684528Spaulsan     uchar_t r_perm,
41694528Spaulsan     uchar_t r_pgszc,
41704528Spaulsan     hat_rgn_cb_func_t r_cb_function,
41714528Spaulsan     uint_t flags)
41724528Spaulsan {
41734528Spaulsan 	panic("No shared region support on x86");
41744528Spaulsan 	return (HAT_INVALID_REGION_COOKIE);
41754528Spaulsan }
41764528Spaulsan 
41774528Spaulsan /* ARGSUSED */
41784528Spaulsan void
41795075Spaulsan hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags)
41804528Spaulsan {
41814528Spaulsan 	panic("No shared region support on x86");
41824528Spaulsan }
41834528Spaulsan 
41844528Spaulsan /* ARGSUSED */
41854528Spaulsan void
41865075Spaulsan hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie)
41874528Spaulsan {
41884528Spaulsan 	panic("No shared region support on x86");
41894528Spaulsan }
41904528Spaulsan 
41914528Spaulsan 
41920Sstevel@tonic-gate /*
41930Sstevel@tonic-gate  * Kernel Physical Mapping (kpm) facility
41940Sstevel@tonic-gate  *
41950Sstevel@tonic-gate  * Most of the routines needed to support segkpm are almost no-ops on the
41960Sstevel@tonic-gate  * x86 platform.  We map in the entire segment when it is created and leave
41970Sstevel@tonic-gate  * it mapped in, so there is no additional work required to set up and tear
41980Sstevel@tonic-gate  * down individual mappings.  All of these routines were created to support
41990Sstevel@tonic-gate  * SPARC platforms that have to avoid aliasing in their virtually indexed
42000Sstevel@tonic-gate  * caches.
42010Sstevel@tonic-gate  *
42020Sstevel@tonic-gate  * Most of the routines have sanity checks in them (e.g. verifying that the
42030Sstevel@tonic-gate  * passed-in page is locked).  We don't actually care about most of these
42040Sstevel@tonic-gate  * checks on x86, but we leave them in place to identify problems in the
42050Sstevel@tonic-gate  * upper levels.
42060Sstevel@tonic-gate  */
42070Sstevel@tonic-gate 
42080Sstevel@tonic-gate /*
42090Sstevel@tonic-gate  * Map in a locked page and return the vaddr.
42100Sstevel@tonic-gate  */
42110Sstevel@tonic-gate /*ARGSUSED*/
42120Sstevel@tonic-gate caddr_t
42130Sstevel@tonic-gate hat_kpm_mapin(struct page *pp, struct kpme *kpme)
42140Sstevel@tonic-gate {
42150Sstevel@tonic-gate 	caddr_t		vaddr;
42160Sstevel@tonic-gate 
42170Sstevel@tonic-gate #ifdef DEBUG
42180Sstevel@tonic-gate 	if (kpm_enable == 0) {
42190Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
42200Sstevel@tonic-gate 		return ((caddr_t)NULL);
42210Sstevel@tonic-gate 	}
42220Sstevel@tonic-gate 
42230Sstevel@tonic-gate 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
42240Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
42250Sstevel@tonic-gate 		return ((caddr_t)NULL);
42260Sstevel@tonic-gate 	}
42270Sstevel@tonic-gate #endif
42280Sstevel@tonic-gate 
42290Sstevel@tonic-gate 	vaddr = hat_kpm_page2va(pp, 1);
42300Sstevel@tonic-gate 
42310Sstevel@tonic-gate 	return (vaddr);
42320Sstevel@tonic-gate }
42330Sstevel@tonic-gate 
42340Sstevel@tonic-gate /*
42350Sstevel@tonic-gate  * Mapout a locked page.
42360Sstevel@tonic-gate  */
42370Sstevel@tonic-gate /*ARGSUSED*/
42380Sstevel@tonic-gate void
42390Sstevel@tonic-gate hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
42400Sstevel@tonic-gate {
42410Sstevel@tonic-gate #ifdef DEBUG
42420Sstevel@tonic-gate 	if (kpm_enable == 0) {
42430Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
42440Sstevel@tonic-gate 		return;
42450Sstevel@tonic-gate 	}
42460Sstevel@tonic-gate 
42470Sstevel@tonic-gate 	if (IS_KPM_ADDR(vaddr) == 0) {
42480Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
42490Sstevel@tonic-gate 		return;
42500Sstevel@tonic-gate 	}
42510Sstevel@tonic-gate 
42520Sstevel@tonic-gate 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
42530Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
42540Sstevel@tonic-gate 		return;
42550Sstevel@tonic-gate 	}
42560Sstevel@tonic-gate #endif
42570Sstevel@tonic-gate }
42580Sstevel@tonic-gate 
42590Sstevel@tonic-gate /*
42609894SPavel.Tatashin@Sun.COM  * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical
42619894SPavel.Tatashin@Sun.COM  * memory addresses that are not described by a page_t.  It can
42629894SPavel.Tatashin@Sun.COM  * also be used for normal pages that are not locked, but beware
42639894SPavel.Tatashin@Sun.COM  * this is dangerous - no locking is performed, so the identity of
42649894SPavel.Tatashin@Sun.COM  * the page could change.  hat_kpm_mapin_pfn is not supported when
42659894SPavel.Tatashin@Sun.COM  * vac_colors > 1, because the chosen va depends on the page identity,
42669894SPavel.Tatashin@Sun.COM  * which could change.
42679894SPavel.Tatashin@Sun.COM  * The caller must only pass pfn's for valid physical addresses; violation
42689894SPavel.Tatashin@Sun.COM  * of this rule will cause panic.
42699894SPavel.Tatashin@Sun.COM  */
42709894SPavel.Tatashin@Sun.COM caddr_t
42719894SPavel.Tatashin@Sun.COM hat_kpm_mapin_pfn(pfn_t pfn)
42729894SPavel.Tatashin@Sun.COM {
42739894SPavel.Tatashin@Sun.COM 	caddr_t paddr, vaddr;
42749894SPavel.Tatashin@Sun.COM 
42759894SPavel.Tatashin@Sun.COM 	if (kpm_enable == 0)
42769894SPavel.Tatashin@Sun.COM 		return ((caddr_t)NULL);
42779894SPavel.Tatashin@Sun.COM 
42789894SPavel.Tatashin@Sun.COM 	paddr = (caddr_t)ptob(pfn);
42799894SPavel.Tatashin@Sun.COM 	vaddr = (uintptr_t)kpm_vbase + paddr;
42809894SPavel.Tatashin@Sun.COM 
42819894SPavel.Tatashin@Sun.COM 	return ((caddr_t)vaddr);
42829894SPavel.Tatashin@Sun.COM }
42839894SPavel.Tatashin@Sun.COM 
42849894SPavel.Tatashin@Sun.COM /*ARGSUSED*/
42859894SPavel.Tatashin@Sun.COM void
42869894SPavel.Tatashin@Sun.COM hat_kpm_mapout_pfn(pfn_t pfn)
42879894SPavel.Tatashin@Sun.COM {
42889894SPavel.Tatashin@Sun.COM 	/* empty */
42899894SPavel.Tatashin@Sun.COM }
42909894SPavel.Tatashin@Sun.COM 
42919894SPavel.Tatashin@Sun.COM /*
42920Sstevel@tonic-gate  * Return the kpm virtual address for a specific pfn
42930Sstevel@tonic-gate  */
42940Sstevel@tonic-gate caddr_t
42950Sstevel@tonic-gate hat_kpm_pfn2va(pfn_t pfn)
42960Sstevel@tonic-gate {
42973446Smrj 	uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
42980Sstevel@tonic-gate 
42995262Srscott 	ASSERT(!pfn_is_foreign(pfn));
43000Sstevel@tonic-gate 	return ((caddr_t)vaddr);
43010Sstevel@tonic-gate }
43020Sstevel@tonic-gate 
43030Sstevel@tonic-gate /*
43040Sstevel@tonic-gate  * Return the kpm virtual address for the page at pp.
43050Sstevel@tonic-gate  */
43060Sstevel@tonic-gate /*ARGSUSED*/
43070Sstevel@tonic-gate caddr_t
43080Sstevel@tonic-gate hat_kpm_page2va(struct page *pp, int checkswap)
43090Sstevel@tonic-gate {
43100Sstevel@tonic-gate 	return (hat_kpm_pfn2va(pp->p_pagenum));
43110Sstevel@tonic-gate }
43120Sstevel@tonic-gate 
43130Sstevel@tonic-gate /*
43140Sstevel@tonic-gate  * Return the page frame number for the kpm virtual address vaddr.
43150Sstevel@tonic-gate  */
43160Sstevel@tonic-gate pfn_t
43170Sstevel@tonic-gate hat_kpm_va2pfn(caddr_t vaddr)
43180Sstevel@tonic-gate {
43190Sstevel@tonic-gate 	pfn_t		pfn;
43200Sstevel@tonic-gate 
43210Sstevel@tonic-gate 	ASSERT(IS_KPM_ADDR(vaddr));
43220Sstevel@tonic-gate 
43230Sstevel@tonic-gate 	pfn = (pfn_t)btop(vaddr - kpm_vbase);
43240Sstevel@tonic-gate 
43250Sstevel@tonic-gate 	return (pfn);
43260Sstevel@tonic-gate }
43270Sstevel@tonic-gate 
43280Sstevel@tonic-gate 
43290Sstevel@tonic-gate /*
43300Sstevel@tonic-gate  * Return the page for the kpm virtual address vaddr.
43310Sstevel@tonic-gate  */
43320Sstevel@tonic-gate page_t *
43330Sstevel@tonic-gate hat_kpm_vaddr2page(caddr_t vaddr)
43340Sstevel@tonic-gate {
43350Sstevel@tonic-gate 	pfn_t		pfn;
43360Sstevel@tonic-gate 
43370Sstevel@tonic-gate 	ASSERT(IS_KPM_ADDR(vaddr));
43380Sstevel@tonic-gate 
43390Sstevel@tonic-gate 	pfn = hat_kpm_va2pfn(vaddr);
43400Sstevel@tonic-gate 
43410Sstevel@tonic-gate 	return (page_numtopp_nolock(pfn));
43420Sstevel@tonic-gate }
43430Sstevel@tonic-gate 
43440Sstevel@tonic-gate /*
43450Sstevel@tonic-gate  * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
43460Sstevel@tonic-gate  * KPM page.  This should never happen on x86
43470Sstevel@tonic-gate  */
43480Sstevel@tonic-gate int
43490Sstevel@tonic-gate hat_kpm_fault(hat_t *hat, caddr_t vaddr)
43500Sstevel@tonic-gate {
43517240Srh87107 	panic("pagefault in seg_kpm.  hat: 0x%p  vaddr: 0x%p",
43527240Srh87107 	    (void *)hat, (void *)vaddr);
43530Sstevel@tonic-gate 
43540Sstevel@tonic-gate 	return (0);
43550Sstevel@tonic-gate }
43560Sstevel@tonic-gate 
43570Sstevel@tonic-gate /*ARGSUSED*/
43580Sstevel@tonic-gate void
43590Sstevel@tonic-gate hat_kpm_mseghash_clear(int nentries)
43600Sstevel@tonic-gate {}
43610Sstevel@tonic-gate 
43620Sstevel@tonic-gate /*ARGSUSED*/
43630Sstevel@tonic-gate void
43640Sstevel@tonic-gate hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
43650Sstevel@tonic-gate {}
43665084Sjohnlev 
436712004Sjiang.liu@intel.com #ifndef	__xpv
436812004Sjiang.liu@intel.com void
436912004Sjiang.liu@intel.com hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
437012004Sjiang.liu@intel.com 	offset_t kpm_pages_off)
437112004Sjiang.liu@intel.com {
437212004Sjiang.liu@intel.com 	_NOTE(ARGUNUSED(nkpmpgs, kpm_pages_off));
437312004Sjiang.liu@intel.com 	pfn_t base, end;
437412004Sjiang.liu@intel.com 
437512004Sjiang.liu@intel.com 	/*
437612004Sjiang.liu@intel.com 	 * kphysm_add_memory_dynamic() does not set nkpmpgs
437712004Sjiang.liu@intel.com 	 * when page_t memory is externally allocated.  That
437812004Sjiang.liu@intel.com 	 * code must properly calculate nkpmpgs in all cases
437912004Sjiang.liu@intel.com 	 * if nkpmpgs needs to be used at some point.
438012004Sjiang.liu@intel.com 	 */
438112004Sjiang.liu@intel.com 
438212004Sjiang.liu@intel.com 	/*
438312004Sjiang.liu@intel.com 	 * The meta (page_t) pages for dynamically added memory are allocated
438412004Sjiang.liu@intel.com 	 * either from the incoming memory itself or from existing memory.
438512004Sjiang.liu@intel.com 	 * In the former case the base of the incoming pages will be different
438612004Sjiang.liu@intel.com 	 * than the base of the dynamic segment so call memseg_get_start() to
438712004Sjiang.liu@intel.com 	 * get the actual base of the incoming memory for each case.
438812004Sjiang.liu@intel.com 	 */
438912004Sjiang.liu@intel.com 
439012004Sjiang.liu@intel.com 	base = memseg_get_start(msp);
439112004Sjiang.liu@intel.com 	end = msp->pages_end;
439212004Sjiang.liu@intel.com 
439312004Sjiang.liu@intel.com 	hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base),
439412004Sjiang.liu@intel.com 	    mmu_ptob(end - base), base, PROT_READ | PROT_WRITE,
439512004Sjiang.liu@intel.com 	    HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
439612004Sjiang.liu@intel.com }
439712004Sjiang.liu@intel.com 
439812004Sjiang.liu@intel.com void
439912004Sjiang.liu@intel.com hat_kpm_addmem_mseg_insert(struct memseg *msp)
440012004Sjiang.liu@intel.com {
440112004Sjiang.liu@intel.com 	_NOTE(ARGUNUSED(msp));
440212004Sjiang.liu@intel.com }
440312004Sjiang.liu@intel.com 
440412004Sjiang.liu@intel.com void
440512004Sjiang.liu@intel.com hat_kpm_addmem_memsegs_update(struct memseg *msp)
440612004Sjiang.liu@intel.com {
440712004Sjiang.liu@intel.com 	_NOTE(ARGUNUSED(msp));
440812004Sjiang.liu@intel.com }
440912004Sjiang.liu@intel.com 
441012004Sjiang.liu@intel.com /*
441112004Sjiang.liu@intel.com  * Return end of metadata for an already setup memseg.
441212004Sjiang.liu@intel.com  * X86 platforms don't need per-page meta data to support kpm.
441312004Sjiang.liu@intel.com  */
441412004Sjiang.liu@intel.com caddr_t
441512004Sjiang.liu@intel.com hat_kpm_mseg_reuse(struct memseg *msp)
441612004Sjiang.liu@intel.com {
441712004Sjiang.liu@intel.com 	return ((caddr_t)msp->epages);
441812004Sjiang.liu@intel.com }
441912004Sjiang.liu@intel.com 
442012004Sjiang.liu@intel.com void
442112004Sjiang.liu@intel.com hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
442212004Sjiang.liu@intel.com {
442312004Sjiang.liu@intel.com 	_NOTE(ARGUNUSED(msp, mspp));
442412004Sjiang.liu@intel.com 	ASSERT(0);
442512004Sjiang.liu@intel.com }
442612004Sjiang.liu@intel.com 
442712004Sjiang.liu@intel.com void
442812004Sjiang.liu@intel.com hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
442912004Sjiang.liu@intel.com 	struct memseg *lo, struct memseg *mid, struct memseg *hi)
443012004Sjiang.liu@intel.com {
443112004Sjiang.liu@intel.com 	_NOTE(ARGUNUSED(msp, mspp, lo, mid, hi));
443212004Sjiang.liu@intel.com 	ASSERT(0);
443312004Sjiang.liu@intel.com }
443412004Sjiang.liu@intel.com 
443512004Sjiang.liu@intel.com /*
443612004Sjiang.liu@intel.com  * Walk the memsegs chain, applying func to each memseg span.
443712004Sjiang.liu@intel.com  */
443812004Sjiang.liu@intel.com void
443912004Sjiang.liu@intel.com hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
444012004Sjiang.liu@intel.com {
444112004Sjiang.liu@intel.com 	pfn_t	pbase, pend;
444212004Sjiang.liu@intel.com 	void	*base;
444312004Sjiang.liu@intel.com 	size_t	size;
444412004Sjiang.liu@intel.com 	struct memseg *msp;
444512004Sjiang.liu@intel.com 
444612004Sjiang.liu@intel.com 	for (msp = memsegs; msp; msp = msp->next) {
444712004Sjiang.liu@intel.com 		pbase = msp->pages_base;
444812004Sjiang.liu@intel.com 		pend = msp->pages_end;
444912004Sjiang.liu@intel.com 		base = ptob(pbase) + kpm_vbase;
445012004Sjiang.liu@intel.com 		size = ptob(pend - pbase);
445112004Sjiang.liu@intel.com 		func(arg, base, size);
445212004Sjiang.liu@intel.com 	}
445312004Sjiang.liu@intel.com }
445412004Sjiang.liu@intel.com 
445512004Sjiang.liu@intel.com #else	/* __xpv */
445612004Sjiang.liu@intel.com 
44575084Sjohnlev /*
44585084Sjohnlev  * There are specific Hypervisor calls to establish and remove mappings
44595084Sjohnlev  * to grant table references and the privcmd driver. We have to ensure
44605084Sjohnlev  * that a page table actually exists.
44615084Sjohnlev  */
44625084Sjohnlev void
44637756SMark.Johnson@Sun.COM hat_prepare_mapping(hat_t *hat, caddr_t addr, uint64_t *pte_ma)
44645084Sjohnlev {
44657756SMark.Johnson@Sun.COM 	maddr_t base_ma;
44667756SMark.Johnson@Sun.COM 	htable_t *ht;
44677756SMark.Johnson@Sun.COM 	uint_t entry;
44687756SMark.Johnson@Sun.COM 
44695084Sjohnlev 	ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
44705741Smrj 	XPV_DISALLOW_MIGRATE();
44717756SMark.Johnson@Sun.COM 	ht = htable_create(hat, (uintptr_t)addr, 0, NULL);
44727756SMark.Johnson@Sun.COM 
44737756SMark.Johnson@Sun.COM 	/*
44747756SMark.Johnson@Sun.COM 	 * if an address for pte_ma is passed in, return the MA of the pte
44757756SMark.Johnson@Sun.COM 	 * for this specific address.  This address is only valid as long
44767756SMark.Johnson@Sun.COM 	 * as the htable stays locked.
44777756SMark.Johnson@Sun.COM 	 */
44787756SMark.Johnson@Sun.COM 	if (pte_ma != NULL) {
44797756SMark.Johnson@Sun.COM 		entry = htable_va2entry((uintptr_t)addr, ht);
44807756SMark.Johnson@Sun.COM 		base_ma = pa_to_ma(ptob(ht->ht_pfn));
44817756SMark.Johnson@Sun.COM 		*pte_ma = base_ma + (entry << mmu.pte_size_shift);
44827756SMark.Johnson@Sun.COM 	}
44835741Smrj 	XPV_ALLOW_MIGRATE();
44845084Sjohnlev }
44855084Sjohnlev 
44865084Sjohnlev void
44875084Sjohnlev hat_release_mapping(hat_t *hat, caddr_t addr)
44885084Sjohnlev {
44895084Sjohnlev 	htable_t *ht;
44905084Sjohnlev 
44915084Sjohnlev 	ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
44925741Smrj 	XPV_DISALLOW_MIGRATE();
44935084Sjohnlev 	ht = htable_lookup(hat, (uintptr_t)addr, 0);
44945084Sjohnlev 	ASSERT(ht != NULL);
44955084Sjohnlev 	ASSERT(ht->ht_busy >= 2);
44965084Sjohnlev 	htable_release(ht);
44975084Sjohnlev 	htable_release(ht);
44985741Smrj 	XPV_ALLOW_MIGRATE();
449912004Sjiang.liu@intel.com }
450012004Sjiang.liu@intel.com #endif	/* __xpv */
4501