xref: /onnv-gate/usr/src/uts/i86pc/vm/hat_i86.c (revision 4654:2b2bb89445af)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51747Sjosephb  * Common Development and Distribution License (the "License").
61747Sjosephb  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
223446Smrj  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
270Sstevel@tonic-gate 
280Sstevel@tonic-gate /*
290Sstevel@tonic-gate  * VM - Hardware Address Translation management for i386 and amd64
300Sstevel@tonic-gate  *
310Sstevel@tonic-gate  * Implementation of the interfaces described in <common/vm/hat.h>
320Sstevel@tonic-gate  *
330Sstevel@tonic-gate  * Nearly all the details of how the hardware is managed should not be
340Sstevel@tonic-gate  * visible outside this layer except for misc. machine specific functions
350Sstevel@tonic-gate  * that work in conjunction with this code.
360Sstevel@tonic-gate  *
370Sstevel@tonic-gate  * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
380Sstevel@tonic-gate  */
390Sstevel@tonic-gate 
400Sstevel@tonic-gate #include <sys/machparam.h>
410Sstevel@tonic-gate #include <sys/machsystm.h>
420Sstevel@tonic-gate #include <sys/mman.h>
430Sstevel@tonic-gate #include <sys/types.h>
440Sstevel@tonic-gate #include <sys/systm.h>
450Sstevel@tonic-gate #include <sys/cpuvar.h>
460Sstevel@tonic-gate #include <sys/thread.h>
470Sstevel@tonic-gate #include <sys/proc.h>
480Sstevel@tonic-gate #include <sys/cpu.h>
490Sstevel@tonic-gate #include <sys/kmem.h>
500Sstevel@tonic-gate #include <sys/disp.h>
510Sstevel@tonic-gate #include <sys/shm.h>
520Sstevel@tonic-gate #include <sys/sysmacros.h>
530Sstevel@tonic-gate #include <sys/machparam.h>
540Sstevel@tonic-gate #include <sys/vmem.h>
550Sstevel@tonic-gate #include <sys/vmsystm.h>
560Sstevel@tonic-gate #include <sys/promif.h>
570Sstevel@tonic-gate #include <sys/var.h>
580Sstevel@tonic-gate #include <sys/x86_archext.h>
590Sstevel@tonic-gate #include <sys/atomic.h>
600Sstevel@tonic-gate #include <sys/bitmap.h>
613446Smrj #include <sys/controlregs.h>
623446Smrj #include <sys/bootconf.h>
633446Smrj #include <sys/bootsvcs.h>
643446Smrj #include <sys/bootinfo.h>
654191Sjosephb #include <sys/archsystm.h>
660Sstevel@tonic-gate 
670Sstevel@tonic-gate #include <vm/seg_kmem.h>
680Sstevel@tonic-gate #include <vm/hat_i86.h>
690Sstevel@tonic-gate #include <vm/as.h>
700Sstevel@tonic-gate #include <vm/seg.h>
710Sstevel@tonic-gate #include <vm/page.h>
720Sstevel@tonic-gate #include <vm/seg_kp.h>
730Sstevel@tonic-gate #include <vm/seg_kpm.h>
740Sstevel@tonic-gate #include <vm/vm_dep.h>
753446Smrj #include <vm/kboot_mmu.h>
764381Sjosephb #include <vm/seg_spt.h>
770Sstevel@tonic-gate 
780Sstevel@tonic-gate #include <sys/cmn_err.h>
790Sstevel@tonic-gate 
800Sstevel@tonic-gate /*
810Sstevel@tonic-gate  * Basic parameters for hat operation.
820Sstevel@tonic-gate  */
830Sstevel@tonic-gate struct hat_mmu_info mmu;
840Sstevel@tonic-gate 
850Sstevel@tonic-gate /*
860Sstevel@tonic-gate  * The page that is the kernel's top level pagetable.
870Sstevel@tonic-gate  *
880Sstevel@tonic-gate  * For 32 bit VLP support, the kernel hat will use the 1st 4 entries
890Sstevel@tonic-gate  * on this 4K page for its top level page table. The remaining groups of
900Sstevel@tonic-gate  * 4 entries are used for per processor copies of user VLP pagetables for
910Sstevel@tonic-gate  * running threads.  See hat_switch() and reload_pae32() for details.
920Sstevel@tonic-gate  *
930Sstevel@tonic-gate  * vlp_page[0] - 0th level==2 PTE for kernel HAT (will be zero)
940Sstevel@tonic-gate  * vlp_page[1] - 1st level==2 PTE for kernel HAT (will be zero)
950Sstevel@tonic-gate  * vlp_page[2] - 2nd level==2 PTE for kernel HAT (zero for small memory)
960Sstevel@tonic-gate  * vlp_page[3] - 3rd level==2 PTE for kernel
970Sstevel@tonic-gate  *
980Sstevel@tonic-gate  * vlp_page[4] - 0th level==2 PTE for user thread on cpu 0
990Sstevel@tonic-gate  * vlp_page[5] - 1st level==2 PTE for user thread on cpu 0
1000Sstevel@tonic-gate  * vlp_page[6] - 2nd level==2 PTE for user thread on cpu 0
1010Sstevel@tonic-gate  * vlp_page[7] - probably copy of kernel PTE
1020Sstevel@tonic-gate  *
1030Sstevel@tonic-gate  * vlp_page[8]  - 0th level==2 PTE for user thread on cpu 1
1040Sstevel@tonic-gate  * vlp_page[9]  - 1st level==2 PTE for user thread on cpu 1
1050Sstevel@tonic-gate  * vlp_page[10] - 2nd level==2 PTE for user thread on cpu 1
1060Sstevel@tonic-gate  * vlp_page[11] - probably copy of kernel PTE
1070Sstevel@tonic-gate  * ...
1080Sstevel@tonic-gate  *
1090Sstevel@tonic-gate  * when / where the kernel PTE's are (entry 2 or 3 or none) depends
1100Sstevel@tonic-gate  * on kernelbase.
1110Sstevel@tonic-gate  */
1120Sstevel@tonic-gate static x86pte_t *vlp_page;
1130Sstevel@tonic-gate 
1140Sstevel@tonic-gate /*
1150Sstevel@tonic-gate  * forward declaration of internal utility routines
1160Sstevel@tonic-gate  */
1170Sstevel@tonic-gate static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
1180Sstevel@tonic-gate 	x86pte_t new);
1190Sstevel@tonic-gate 
1200Sstevel@tonic-gate /*
1210Sstevel@tonic-gate  * The kernel address space exists in all HATs. To implement this the
1220Sstevel@tonic-gate  * kernel reserves a fixed number of entries in every topmost level page
1230Sstevel@tonic-gate  * table. The values are setup in hat_init() and then copied to every hat
1240Sstevel@tonic-gate  * created by hat_alloc(). This means that kernelbase must be:
1250Sstevel@tonic-gate  *
1260Sstevel@tonic-gate  *	  4Meg aligned for 32 bit kernels
1270Sstevel@tonic-gate  *	512Gig aligned for x86_64 64 bit kernel
1280Sstevel@tonic-gate  *
1290Sstevel@tonic-gate  * The PAE 32 bit hat is handled as a special case. Otherwise requiring 1Gig
1300Sstevel@tonic-gate  * alignment would use too much VA for the kernel.
1310Sstevel@tonic-gate  *
1320Sstevel@tonic-gate  */
1330Sstevel@tonic-gate static uint_t	khat_start;	/* index of 1st entry in kernel's top ptable */
1340Sstevel@tonic-gate static uint_t	khat_entries;	/* number of entries in kernel's top ptable */
1350Sstevel@tonic-gate 
1360Sstevel@tonic-gate #if defined(__i386)
1370Sstevel@tonic-gate 
1380Sstevel@tonic-gate static htable_t	*khat_pae32_htable = NULL;
1390Sstevel@tonic-gate static uint_t	khat_pae32_start;
1400Sstevel@tonic-gate static uint_t	khat_pae32_entries;
1410Sstevel@tonic-gate 
1420Sstevel@tonic-gate #endif
1430Sstevel@tonic-gate 
1440Sstevel@tonic-gate uint_t use_boot_reserve = 1;	/* cleared after early boot process */
1450Sstevel@tonic-gate uint_t can_steal_post_boot = 0;	/* set late in boot to enable stealing */
1460Sstevel@tonic-gate 
1470Sstevel@tonic-gate /*
1480Sstevel@tonic-gate  * A cpuset for all cpus. This is used for kernel address cross calls, since
1490Sstevel@tonic-gate  * the kernel addresses apply to all cpus.
1500Sstevel@tonic-gate  */
1510Sstevel@tonic-gate cpuset_t khat_cpuset;
1520Sstevel@tonic-gate 
1530Sstevel@tonic-gate /*
1540Sstevel@tonic-gate  * management stuff for hat structures
1550Sstevel@tonic-gate  */
1560Sstevel@tonic-gate kmutex_t	hat_list_lock;
1570Sstevel@tonic-gate kcondvar_t	hat_list_cv;
1580Sstevel@tonic-gate kmem_cache_t	*hat_cache;
1590Sstevel@tonic-gate kmem_cache_t	*hat_hash_cache;
1600Sstevel@tonic-gate kmem_cache_t	*vlp_hash_cache;
1610Sstevel@tonic-gate 
1620Sstevel@tonic-gate /*
1630Sstevel@tonic-gate  * Simple statistics
1640Sstevel@tonic-gate  */
1650Sstevel@tonic-gate struct hatstats hatstat;
1660Sstevel@tonic-gate 
1670Sstevel@tonic-gate /*
1680Sstevel@tonic-gate  * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's.
1690Sstevel@tonic-gate  */
1700Sstevel@tonic-gate extern void atomic_orb(uchar_t *addr, uchar_t val);
1710Sstevel@tonic-gate extern void atomic_andb(uchar_t *addr, uchar_t val);
1720Sstevel@tonic-gate 
1730Sstevel@tonic-gate #define	PP_GETRM(pp, rmmask)    (pp->p_nrm & rmmask)
1740Sstevel@tonic-gate #define	PP_ISMOD(pp)		PP_GETRM(pp, P_MOD)
1750Sstevel@tonic-gate #define	PP_ISREF(pp)		PP_GETRM(pp, P_REF)
1760Sstevel@tonic-gate #define	PP_ISRO(pp)		PP_GETRM(pp, P_RO)
1770Sstevel@tonic-gate 
1780Sstevel@tonic-gate #define	PP_SETRM(pp, rm)	atomic_orb(&(pp->p_nrm), rm)
1790Sstevel@tonic-gate #define	PP_SETMOD(pp)		PP_SETRM(pp, P_MOD)
1800Sstevel@tonic-gate #define	PP_SETREF(pp)		PP_SETRM(pp, P_REF)
1810Sstevel@tonic-gate #define	PP_SETRO(pp)		PP_SETRM(pp, P_RO)
1820Sstevel@tonic-gate 
1830Sstevel@tonic-gate #define	PP_CLRRM(pp, rm)	atomic_andb(&(pp->p_nrm), ~(rm))
1840Sstevel@tonic-gate #define	PP_CLRMOD(pp)   	PP_CLRRM(pp, P_MOD)
1850Sstevel@tonic-gate #define	PP_CLRREF(pp)   	PP_CLRRM(pp, P_REF)
1860Sstevel@tonic-gate #define	PP_CLRRO(pp)    	PP_CLRRM(pp, P_RO)
1870Sstevel@tonic-gate #define	PP_CLRALL(pp)		PP_CLRRM(pp, P_MOD | P_REF | P_RO)
1880Sstevel@tonic-gate 
1890Sstevel@tonic-gate /*
1900Sstevel@tonic-gate  * kmem cache constructor for struct hat
1910Sstevel@tonic-gate  */
1920Sstevel@tonic-gate /*ARGSUSED*/
1930Sstevel@tonic-gate static int
1940Sstevel@tonic-gate hati_constructor(void *buf, void *handle, int kmflags)
1950Sstevel@tonic-gate {
1960Sstevel@tonic-gate 	hat_t	*hat = buf;
1970Sstevel@tonic-gate 
1980Sstevel@tonic-gate 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
1990Sstevel@tonic-gate 	bzero(hat->hat_pages_mapped,
2000Sstevel@tonic-gate 	    sizeof (pgcnt_t) * (mmu.max_page_level + 1));
2014381Sjosephb 	hat->hat_ism_pgcnt = 0;
2020Sstevel@tonic-gate 	hat->hat_stats = 0;
2030Sstevel@tonic-gate 	hat->hat_flags = 0;
2040Sstevel@tonic-gate 	CPUSET_ZERO(hat->hat_cpus);
2050Sstevel@tonic-gate 	hat->hat_htable = NULL;
2060Sstevel@tonic-gate 	hat->hat_ht_hash = NULL;
2070Sstevel@tonic-gate 	return (0);
2080Sstevel@tonic-gate }
2090Sstevel@tonic-gate 
2100Sstevel@tonic-gate /*
2110Sstevel@tonic-gate  * Allocate a hat structure for as. We also create the top level
2120Sstevel@tonic-gate  * htable and initialize it to contain the kernel hat entries.
2130Sstevel@tonic-gate  */
2140Sstevel@tonic-gate hat_t *
2150Sstevel@tonic-gate hat_alloc(struct as *as)
2160Sstevel@tonic-gate {
2170Sstevel@tonic-gate 	hat_t		*hat;
2180Sstevel@tonic-gate 	htable_t	*ht;	/* top level htable */
2190Sstevel@tonic-gate 	uint_t		use_vlp;
2200Sstevel@tonic-gate 
2210Sstevel@tonic-gate 	/*
2220Sstevel@tonic-gate 	 * Once we start creating user process HATs we can enable
2230Sstevel@tonic-gate 	 * the htable_steal() code.
2240Sstevel@tonic-gate 	 */
2250Sstevel@tonic-gate 	if (can_steal_post_boot == 0)
2260Sstevel@tonic-gate 		can_steal_post_boot = 1;
2270Sstevel@tonic-gate 
2280Sstevel@tonic-gate 	ASSERT(AS_WRITE_HELD(as, &as->a_lock));
2290Sstevel@tonic-gate 	hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
2300Sstevel@tonic-gate 	hat->hat_as = as;
2310Sstevel@tonic-gate 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
2320Sstevel@tonic-gate 	ASSERT(hat->hat_flags == 0);
2330Sstevel@tonic-gate 
2340Sstevel@tonic-gate 	/*
2350Sstevel@tonic-gate 	 * a 32 bit process uses a VLP style hat when using PAE
2360Sstevel@tonic-gate 	 */
2370Sstevel@tonic-gate #if defined(__amd64)
2380Sstevel@tonic-gate 	use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32);
2390Sstevel@tonic-gate #elif defined(__i386)
2400Sstevel@tonic-gate 	use_vlp = mmu.pae_hat;
2410Sstevel@tonic-gate #endif
2420Sstevel@tonic-gate 	if (use_vlp) {
2430Sstevel@tonic-gate 		hat->hat_flags = HAT_VLP;
2440Sstevel@tonic-gate 		bzero(hat->hat_vlp_ptes, VLP_SIZE);
2450Sstevel@tonic-gate 	}
2460Sstevel@tonic-gate 
2470Sstevel@tonic-gate 	/*
2480Sstevel@tonic-gate 	 * Allocate the htable hash
2490Sstevel@tonic-gate 	 */
2500Sstevel@tonic-gate 	if ((hat->hat_flags & HAT_VLP)) {
2510Sstevel@tonic-gate 		hat->hat_num_hash = mmu.vlp_hash_cnt;
2520Sstevel@tonic-gate 		hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
2530Sstevel@tonic-gate 	} else {
2540Sstevel@tonic-gate 		hat->hat_num_hash = mmu.hash_cnt;
2550Sstevel@tonic-gate 		hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
2560Sstevel@tonic-gate 	}
2570Sstevel@tonic-gate 	bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
2580Sstevel@tonic-gate 
2590Sstevel@tonic-gate 	/*
2600Sstevel@tonic-gate 	 * Initialize Kernel HAT entries at the top of the top level page
2610Sstevel@tonic-gate 	 * table for the new hat.
2620Sstevel@tonic-gate 	 *
2630Sstevel@tonic-gate 	 * Note that we don't call htable_release() for the top level, that
2640Sstevel@tonic-gate 	 * happens when the hat is destroyed in hat_free_end()
2650Sstevel@tonic-gate 	 */
2660Sstevel@tonic-gate 	hat->hat_htable = NULL;
2670Sstevel@tonic-gate 	hat->hat_ht_cached = NULL;
2680Sstevel@tonic-gate 	ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
2693446Smrj 
2700Sstevel@tonic-gate 	if (!(hat->hat_flags & HAT_VLP))
2710Sstevel@tonic-gate 		x86pte_copy(kas.a_hat->hat_htable, ht, khat_start,
2720Sstevel@tonic-gate 		    khat_entries);
2730Sstevel@tonic-gate #if defined(__i386)
2740Sstevel@tonic-gate 	else if (khat_entries > 0)
2750Sstevel@tonic-gate 		bcopy(vlp_page + khat_start, hat->hat_vlp_ptes + khat_start,
2760Sstevel@tonic-gate 		    khat_entries * sizeof (x86pte_t));
2770Sstevel@tonic-gate #endif
2780Sstevel@tonic-gate 	hat->hat_htable = ht;
2790Sstevel@tonic-gate 
2800Sstevel@tonic-gate #if defined(__i386)
2810Sstevel@tonic-gate 	/*
2820Sstevel@tonic-gate 	 * PAE32 HAT alignment is less restrictive than the others to keep
2830Sstevel@tonic-gate 	 * the kernel from using too much VA. Because of this we may need
2840Sstevel@tonic-gate 	 * one layer further down when kernelbase isn't 1Gig aligned.
2850Sstevel@tonic-gate 	 * See hat_free_end() for the htable_release() that goes with this
2860Sstevel@tonic-gate 	 * htable_create()
2870Sstevel@tonic-gate 	 */
2880Sstevel@tonic-gate 	if (khat_pae32_htable != NULL) {
2890Sstevel@tonic-gate 		ht = htable_create(hat, kernelbase,
2900Sstevel@tonic-gate 		    khat_pae32_htable->ht_level, NULL);
2910Sstevel@tonic-gate 		x86pte_copy(khat_pae32_htable, ht, khat_pae32_start,
2920Sstevel@tonic-gate 		    khat_pae32_entries);
2930Sstevel@tonic-gate 		ht->ht_valid_cnt = khat_pae32_entries;
2940Sstevel@tonic-gate 	}
2950Sstevel@tonic-gate #endif
2960Sstevel@tonic-gate 
2970Sstevel@tonic-gate 	/*
2981747Sjosephb 	 * Put it at the start of the global list of all hats (used by stealing)
2991747Sjosephb 	 *
3001747Sjosephb 	 * kas.a_hat is not in the list but is instead used to find the
3011747Sjosephb 	 * first and last items in the list.
3021747Sjosephb 	 *
3031747Sjosephb 	 * - kas.a_hat->hat_next points to the start of the user hats.
3041747Sjosephb 	 *   The list ends where hat->hat_next == NULL
3051747Sjosephb 	 *
3061747Sjosephb 	 * - kas.a_hat->hat_prev points to the last of the user hats.
3071747Sjosephb 	 *   The list begins where hat->hat_prev == NULL
3080Sstevel@tonic-gate 	 */
3090Sstevel@tonic-gate 	mutex_enter(&hat_list_lock);
3101747Sjosephb 	hat->hat_prev = NULL;
3111747Sjosephb 	hat->hat_next = kas.a_hat->hat_next;
3121747Sjosephb 	if (hat->hat_next)
3131747Sjosephb 		hat->hat_next->hat_prev = hat;
3141747Sjosephb 	else
3151747Sjosephb 		kas.a_hat->hat_prev = hat;
3160Sstevel@tonic-gate 	kas.a_hat->hat_next = hat;
3170Sstevel@tonic-gate 	mutex_exit(&hat_list_lock);
3180Sstevel@tonic-gate 
3190Sstevel@tonic-gate 	return (hat);
3200Sstevel@tonic-gate }
3210Sstevel@tonic-gate 
3220Sstevel@tonic-gate /*
3230Sstevel@tonic-gate  * process has finished executing but as has not been cleaned up yet.
3240Sstevel@tonic-gate  */
3250Sstevel@tonic-gate /*ARGSUSED*/
3260Sstevel@tonic-gate void
3270Sstevel@tonic-gate hat_free_start(hat_t *hat)
3280Sstevel@tonic-gate {
3290Sstevel@tonic-gate 	ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock));
3301747Sjosephb 
3311747Sjosephb 	/*
3321747Sjosephb 	 * If the hat is currently a stealing victim, wait for the stealing
3331747Sjosephb 	 * to finish.  Once we mark it as HAT_FREEING, htable_steal()
3341747Sjosephb 	 * won't look at its pagetables anymore.
3351747Sjosephb 	 */
3360Sstevel@tonic-gate 	mutex_enter(&hat_list_lock);
3371747Sjosephb 	while (hat->hat_flags & HAT_VICTIM)
3381747Sjosephb 		cv_wait(&hat_list_cv, &hat_list_lock);
3390Sstevel@tonic-gate 	hat->hat_flags |= HAT_FREEING;
3400Sstevel@tonic-gate 	mutex_exit(&hat_list_lock);
3410Sstevel@tonic-gate }
3420Sstevel@tonic-gate 
3430Sstevel@tonic-gate /*
3440Sstevel@tonic-gate  * An address space is being destroyed, so we destroy the associated hat.
3450Sstevel@tonic-gate  */
3460Sstevel@tonic-gate void
3470Sstevel@tonic-gate hat_free_end(hat_t *hat)
3480Sstevel@tonic-gate {
3490Sstevel@tonic-gate 	int i;
3500Sstevel@tonic-gate 	kmem_cache_t *cache;
3510Sstevel@tonic-gate 
3520Sstevel@tonic-gate #ifdef DEBUG
3530Sstevel@tonic-gate 	for (i = 0; i <= mmu.max_page_level; i++)
3540Sstevel@tonic-gate 		ASSERT(hat->hat_pages_mapped[i] == 0);
3550Sstevel@tonic-gate #endif
3560Sstevel@tonic-gate 	ASSERT(hat->hat_flags & HAT_FREEING);
3570Sstevel@tonic-gate 
3580Sstevel@tonic-gate 	/*
3590Sstevel@tonic-gate 	 * must not be running on the given hat
3600Sstevel@tonic-gate 	 */
3610Sstevel@tonic-gate 	ASSERT(CPU->cpu_current_hat != hat);
3620Sstevel@tonic-gate 
3630Sstevel@tonic-gate 	/*
3641747Sjosephb 	 * Remove it from the list of HATs
3650Sstevel@tonic-gate 	 */
3660Sstevel@tonic-gate 	mutex_enter(&hat_list_lock);
3671747Sjosephb 	if (hat->hat_prev)
3681747Sjosephb 		hat->hat_prev->hat_next = hat->hat_next;
3691747Sjosephb 	else
3700Sstevel@tonic-gate 		kas.a_hat->hat_next = hat->hat_next;
3711747Sjosephb 	if (hat->hat_next)
3721747Sjosephb 		hat->hat_next->hat_prev = hat->hat_prev;
3731747Sjosephb 	else
3741747Sjosephb 		kas.a_hat->hat_prev = hat->hat_prev;
3750Sstevel@tonic-gate 	mutex_exit(&hat_list_lock);
3761747Sjosephb 	hat->hat_next = hat->hat_prev = NULL;
3770Sstevel@tonic-gate 
3780Sstevel@tonic-gate 	/*
3790Sstevel@tonic-gate 	 * Make a pass through the htables freeing them all up.
3800Sstevel@tonic-gate 	 */
3810Sstevel@tonic-gate 	htable_purge_hat(hat);
3820Sstevel@tonic-gate 
3830Sstevel@tonic-gate 	/*
3840Sstevel@tonic-gate 	 * Decide which kmem cache the hash table came from, then free it.
3850Sstevel@tonic-gate 	 */
3860Sstevel@tonic-gate 	if (hat->hat_flags & HAT_VLP)
3870Sstevel@tonic-gate 		cache = vlp_hash_cache;
3880Sstevel@tonic-gate 	else
3890Sstevel@tonic-gate 		cache = hat_hash_cache;
3900Sstevel@tonic-gate 	kmem_cache_free(cache, hat->hat_ht_hash);
3910Sstevel@tonic-gate 	hat->hat_ht_hash = NULL;
3920Sstevel@tonic-gate 
3930Sstevel@tonic-gate 	hat->hat_flags = 0;
3940Sstevel@tonic-gate 	kmem_cache_free(hat_cache, hat);
3950Sstevel@tonic-gate }
3960Sstevel@tonic-gate 
3970Sstevel@tonic-gate /*
3980Sstevel@tonic-gate  * round kernelbase down to a supported value to use for _userlimit
3990Sstevel@tonic-gate  *
4000Sstevel@tonic-gate  * userlimit must be aligned down to an entry in the top level htable.
4010Sstevel@tonic-gate  * The one exception is for 32 bit HAT's running PAE.
4020Sstevel@tonic-gate  */
4030Sstevel@tonic-gate uintptr_t
4040Sstevel@tonic-gate hat_kernelbase(uintptr_t va)
4050Sstevel@tonic-gate {
4060Sstevel@tonic-gate #if defined(__i386)
4070Sstevel@tonic-gate 	va &= LEVEL_MASK(1);
4080Sstevel@tonic-gate #endif
4090Sstevel@tonic-gate 	if (IN_VA_HOLE(va))
4100Sstevel@tonic-gate 		panic("_userlimit %p will fall in VA hole\n", (void *)va);
4110Sstevel@tonic-gate 	return (va);
4120Sstevel@tonic-gate }
4130Sstevel@tonic-gate 
4140Sstevel@tonic-gate /*
4150Sstevel@tonic-gate  * Initialize hat data structures based on processor MMU information.
4160Sstevel@tonic-gate  */
4170Sstevel@tonic-gate void
4180Sstevel@tonic-gate mmu_init(void)
4190Sstevel@tonic-gate {
4200Sstevel@tonic-gate 	uint_t max_htables;
4210Sstevel@tonic-gate 	uint_t pa_bits;
4220Sstevel@tonic-gate 	uint_t va_bits;
4230Sstevel@tonic-gate 	int i;
4240Sstevel@tonic-gate 
4250Sstevel@tonic-gate 	/*
4263446Smrj 	 * If CPU enabled the page table global bit, use it for the kernel
4273446Smrj 	 * This is bit 7 in CR4 (PGE - Page Global Enable).
4280Sstevel@tonic-gate 	 */
4293446Smrj 	if ((x86_feature & X86_PGE) != 0 && (getcr4() & CR4_PGE) != 0)
4300Sstevel@tonic-gate 		mmu.pt_global = PT_GLOBAL;
4310Sstevel@tonic-gate 
4320Sstevel@tonic-gate 	/*
4333446Smrj 	 * Detect NX and PAE usage.
4340Sstevel@tonic-gate 	 */
4353446Smrj 	mmu.pae_hat = kbm_pae_support;
4363446Smrj 	if (kbm_nx_support)
4370Sstevel@tonic-gate 		mmu.pt_nx = PT_NX;
4383446Smrj 	else
4390Sstevel@tonic-gate 		mmu.pt_nx = 0;
4400Sstevel@tonic-gate 
4410Sstevel@tonic-gate 	/*
4420Sstevel@tonic-gate 	 * Use CPU info to set various MMU parameters
4430Sstevel@tonic-gate 	 */
4440Sstevel@tonic-gate 	cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
4450Sstevel@tonic-gate 
4460Sstevel@tonic-gate 	if (va_bits < sizeof (void *) * NBBY) {
4470Sstevel@tonic-gate 		mmu.hole_start = (1ul << (va_bits - 1));
4480Sstevel@tonic-gate 		mmu.hole_end = 0ul - mmu.hole_start - 1;
4490Sstevel@tonic-gate 	} else {
4500Sstevel@tonic-gate 		mmu.hole_end = 0;
4510Sstevel@tonic-gate 		mmu.hole_start = mmu.hole_end - 1;
4520Sstevel@tonic-gate 	}
4530Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121)
4540Sstevel@tonic-gate 	/*
4550Sstevel@tonic-gate 	 * If erratum 121 has already been detected at this time, hole_start
4560Sstevel@tonic-gate 	 * contains the value to be subtracted from mmu.hole_start.
4570Sstevel@tonic-gate 	 */
4580Sstevel@tonic-gate 	ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
4590Sstevel@tonic-gate 	hole_start = mmu.hole_start - hole_start;
4600Sstevel@tonic-gate #else
4610Sstevel@tonic-gate 	hole_start = mmu.hole_start;
4620Sstevel@tonic-gate #endif
4630Sstevel@tonic-gate 	hole_end = mmu.hole_end;
4640Sstevel@tonic-gate 
4650Sstevel@tonic-gate 	mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
4660Sstevel@tonic-gate 	if (mmu.pae_hat == 0 && pa_bits > 32)
4670Sstevel@tonic-gate 		mmu.highest_pfn = PFN_4G - 1;
4680Sstevel@tonic-gate 
4690Sstevel@tonic-gate 	if (mmu.pae_hat) {
4700Sstevel@tonic-gate 		mmu.pte_size = 8;	/* 8 byte PTEs */
4710Sstevel@tonic-gate 		mmu.pte_size_shift = 3;
4720Sstevel@tonic-gate 	} else {
4730Sstevel@tonic-gate 		mmu.pte_size = 4;	/* 4 byte PTEs */
4740Sstevel@tonic-gate 		mmu.pte_size_shift = 2;
4750Sstevel@tonic-gate 	}
4760Sstevel@tonic-gate 
4770Sstevel@tonic-gate 	if (mmu.pae_hat && (x86_feature & X86_PAE) == 0)
4780Sstevel@tonic-gate 		panic("Processor does not support PAE");
4790Sstevel@tonic-gate 
4800Sstevel@tonic-gate 	if ((x86_feature & X86_CX8) == 0)
4810Sstevel@tonic-gate 		panic("Processor does not support cmpxchg8b instruction");
4820Sstevel@tonic-gate 
4830Sstevel@tonic-gate 	/*
4840Sstevel@tonic-gate 	 * Initialize parameters based on the 64 or 32 bit kernels and
4850Sstevel@tonic-gate 	 * for the 32 bit kernel decide if we should use PAE.
4860Sstevel@tonic-gate 	 */
4873446Smrj 	if (kbm_largepage_support)
4880Sstevel@tonic-gate 		mmu.max_page_level = 1;
4890Sstevel@tonic-gate 	else
4900Sstevel@tonic-gate 		mmu.max_page_level = 0;
4910Sstevel@tonic-gate 	mmu_page_sizes = mmu.max_page_level + 1;
4920Sstevel@tonic-gate 	mmu_exported_page_sizes = mmu_page_sizes;
4930Sstevel@tonic-gate 
4940Sstevel@tonic-gate #if defined(__amd64)
4950Sstevel@tonic-gate 
4960Sstevel@tonic-gate 	mmu.num_level = 4;
4970Sstevel@tonic-gate 	mmu.max_level = 3;
4980Sstevel@tonic-gate 	mmu.ptes_per_table = 512;
4990Sstevel@tonic-gate 	mmu.top_level_count = 512;
5000Sstevel@tonic-gate 
5010Sstevel@tonic-gate 	mmu.level_shift[0] = 12;
5020Sstevel@tonic-gate 	mmu.level_shift[1] = 21;
5030Sstevel@tonic-gate 	mmu.level_shift[2] = 30;
5040Sstevel@tonic-gate 	mmu.level_shift[3] = 39;
5050Sstevel@tonic-gate 
5060Sstevel@tonic-gate #elif defined(__i386)
5070Sstevel@tonic-gate 
5080Sstevel@tonic-gate 	if (mmu.pae_hat) {
5090Sstevel@tonic-gate 		mmu.num_level = 3;
5100Sstevel@tonic-gate 		mmu.max_level = 2;
5110Sstevel@tonic-gate 		mmu.ptes_per_table = 512;
5120Sstevel@tonic-gate 		mmu.top_level_count = 4;
5130Sstevel@tonic-gate 
5140Sstevel@tonic-gate 		mmu.level_shift[0] = 12;
5150Sstevel@tonic-gate 		mmu.level_shift[1] = 21;
5160Sstevel@tonic-gate 		mmu.level_shift[2] = 30;
5170Sstevel@tonic-gate 
5180Sstevel@tonic-gate 	} else {
5190Sstevel@tonic-gate 		mmu.num_level = 2;
5200Sstevel@tonic-gate 		mmu.max_level = 1;
5210Sstevel@tonic-gate 		mmu.ptes_per_table = 1024;
5220Sstevel@tonic-gate 		mmu.top_level_count = 1024;
5230Sstevel@tonic-gate 
5240Sstevel@tonic-gate 		mmu.level_shift[0] = 12;
5250Sstevel@tonic-gate 		mmu.level_shift[1] = 22;
5260Sstevel@tonic-gate 	}
5270Sstevel@tonic-gate 
5280Sstevel@tonic-gate #endif	/* __i386 */
5290Sstevel@tonic-gate 
5300Sstevel@tonic-gate 	for (i = 0; i < mmu.num_level; ++i) {
5310Sstevel@tonic-gate 		mmu.level_size[i] = 1UL << mmu.level_shift[i];
5320Sstevel@tonic-gate 		mmu.level_offset[i] = mmu.level_size[i] - 1;
5330Sstevel@tonic-gate 		mmu.level_mask[i] = ~mmu.level_offset[i];
5340Sstevel@tonic-gate 	}
5350Sstevel@tonic-gate 
5363446Smrj 	for (i = 0; i <= mmu.max_page_level; ++i) {
5373446Smrj 		mmu.pte_bits[i] = PT_VALID;
5383446Smrj 		if (i > 0)
5393446Smrj 			mmu.pte_bits[i] |= PT_PAGESIZE;
5403446Smrj 	}
5410Sstevel@tonic-gate 
5420Sstevel@tonic-gate 	/*
5430Sstevel@tonic-gate 	 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
5440Sstevel@tonic-gate 	 */
5450Sstevel@tonic-gate 	for (i = 1; i < mmu.num_level; ++i)
5460Sstevel@tonic-gate 		mmu.ptp_bits[i] = PT_PTPBITS;
5473446Smrj 
5480Sstevel@tonic-gate #if defined(__i386)
5490Sstevel@tonic-gate 	mmu.ptp_bits[2] = PT_VALID;
5500Sstevel@tonic-gate #endif
5510Sstevel@tonic-gate 
5520Sstevel@tonic-gate 	/*
5530Sstevel@tonic-gate 	 * Compute how many hash table entries to have per process for htables.
5540Sstevel@tonic-gate 	 * We start with 1 page's worth of entries.
5550Sstevel@tonic-gate 	 *
5560Sstevel@tonic-gate 	 * If physical memory is small, reduce the amount need to cover it.
5570Sstevel@tonic-gate 	 */
5580Sstevel@tonic-gate 	max_htables = physmax / mmu.ptes_per_table;
5590Sstevel@tonic-gate 	mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
5600Sstevel@tonic-gate 	while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
5610Sstevel@tonic-gate 		mmu.hash_cnt >>= 1;
5620Sstevel@tonic-gate 	mmu.vlp_hash_cnt = mmu.hash_cnt;
5630Sstevel@tonic-gate 
5640Sstevel@tonic-gate #if defined(__amd64)
5650Sstevel@tonic-gate 	/*
5660Sstevel@tonic-gate 	 * If running in 64 bits and physical memory is large,
5670Sstevel@tonic-gate 	 * increase the size of the cache to cover all of memory for
5680Sstevel@tonic-gate 	 * a 64 bit process.
5690Sstevel@tonic-gate 	 */
5700Sstevel@tonic-gate #define	HASH_MAX_LENGTH 4
5710Sstevel@tonic-gate 	while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
5720Sstevel@tonic-gate 		mmu.hash_cnt <<= 1;
5730Sstevel@tonic-gate #endif
5740Sstevel@tonic-gate }
5750Sstevel@tonic-gate 
5760Sstevel@tonic-gate 
5770Sstevel@tonic-gate /*
5780Sstevel@tonic-gate  * initialize hat data structures
5790Sstevel@tonic-gate  */
5800Sstevel@tonic-gate void
5810Sstevel@tonic-gate hat_init()
5820Sstevel@tonic-gate {
5830Sstevel@tonic-gate #if defined(__i386)
5840Sstevel@tonic-gate 	/*
5850Sstevel@tonic-gate 	 * _userlimit must be aligned correctly
5860Sstevel@tonic-gate 	 */
5870Sstevel@tonic-gate 	if ((_userlimit & LEVEL_MASK(1)) != _userlimit) {
5880Sstevel@tonic-gate 		prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n",
5890Sstevel@tonic-gate 		    (void *)_userlimit, (void *)LEVEL_SIZE(1));
5900Sstevel@tonic-gate 		halt("hat_init(): Unable to continue");
5910Sstevel@tonic-gate 	}
5920Sstevel@tonic-gate #endif
5930Sstevel@tonic-gate 
5940Sstevel@tonic-gate 	cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
5950Sstevel@tonic-gate 
5960Sstevel@tonic-gate 	/*
5970Sstevel@tonic-gate 	 * initialize kmem caches
5980Sstevel@tonic-gate 	 */
5990Sstevel@tonic-gate 	htable_init();
6000Sstevel@tonic-gate 	hment_init();
6010Sstevel@tonic-gate 
6020Sstevel@tonic-gate 	hat_cache = kmem_cache_create("hat_t",
6030Sstevel@tonic-gate 	    sizeof (hat_t), 0, hati_constructor, NULL, NULL,
6040Sstevel@tonic-gate 	    NULL, 0, 0);
6050Sstevel@tonic-gate 
6060Sstevel@tonic-gate 	hat_hash_cache = kmem_cache_create("HatHash",
6070Sstevel@tonic-gate 	    mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
6080Sstevel@tonic-gate 	    NULL, 0, 0);
6090Sstevel@tonic-gate 
6100Sstevel@tonic-gate 	/*
6110Sstevel@tonic-gate 	 * VLP hats can use a smaller hash table size on large memroy machines
6120Sstevel@tonic-gate 	 */
6130Sstevel@tonic-gate 	if (mmu.hash_cnt == mmu.vlp_hash_cnt) {
6140Sstevel@tonic-gate 		vlp_hash_cache = hat_hash_cache;
6150Sstevel@tonic-gate 	} else {
6160Sstevel@tonic-gate 		vlp_hash_cache = kmem_cache_create("HatVlpHash",
6170Sstevel@tonic-gate 		    mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
6180Sstevel@tonic-gate 		    NULL, 0, 0);
6190Sstevel@tonic-gate 	}
6200Sstevel@tonic-gate 
6210Sstevel@tonic-gate 	/*
6220Sstevel@tonic-gate 	 * Set up the kernel's hat
6230Sstevel@tonic-gate 	 */
6240Sstevel@tonic-gate 	AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
6250Sstevel@tonic-gate 	kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
6260Sstevel@tonic-gate 	mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
6270Sstevel@tonic-gate 	kas.a_hat->hat_as = &kas;
6280Sstevel@tonic-gate 	kas.a_hat->hat_flags = 0;
6290Sstevel@tonic-gate 	AS_LOCK_EXIT(&kas, &kas.a_lock);
6300Sstevel@tonic-gate 
6310Sstevel@tonic-gate 	CPUSET_ZERO(khat_cpuset);
6320Sstevel@tonic-gate 	CPUSET_ADD(khat_cpuset, CPU->cpu_id);
6330Sstevel@tonic-gate 
6340Sstevel@tonic-gate 	/*
6350Sstevel@tonic-gate 	 * The kernel hat's next pointer serves as the head of the hat list .
6361747Sjosephb 	 * The kernel hat's prev pointer tracks the last hat on the list for
6371747Sjosephb 	 * htable_steal() to use.
6380Sstevel@tonic-gate 	 */
6390Sstevel@tonic-gate 	kas.a_hat->hat_next = NULL;
6401747Sjosephb 	kas.a_hat->hat_prev = NULL;
6410Sstevel@tonic-gate 
6420Sstevel@tonic-gate 	/*
6430Sstevel@tonic-gate 	 * Allocate an htable hash bucket for the kernel
6440Sstevel@tonic-gate 	 * XX64 - tune for 64 bit procs
6450Sstevel@tonic-gate 	 */
6460Sstevel@tonic-gate 	kas.a_hat->hat_num_hash = mmu.hash_cnt;
6470Sstevel@tonic-gate 	kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
6480Sstevel@tonic-gate 	bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
6490Sstevel@tonic-gate 
6500Sstevel@tonic-gate 	/*
6510Sstevel@tonic-gate 	 * zero out the top level and cached htable pointers
6520Sstevel@tonic-gate 	 */
6530Sstevel@tonic-gate 	kas.a_hat->hat_ht_cached = NULL;
6540Sstevel@tonic-gate 	kas.a_hat->hat_htable = NULL;
6553258Strevtom 
6563258Strevtom 	/*
6573258Strevtom 	 * Pre-allocate hrm_hashtab before enabling the collection of
6583258Strevtom 	 * refmod statistics.  Allocating on the fly would mean us
6593258Strevtom 	 * running the risk of suffering recursive mutex enters or
6603258Strevtom 	 * deadlocks.
6613258Strevtom 	 */
6623258Strevtom 	hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
6633258Strevtom 	    KM_SLEEP);
6640Sstevel@tonic-gate }
6650Sstevel@tonic-gate 
6660Sstevel@tonic-gate /*
6670Sstevel@tonic-gate  * Prepare CPU specific pagetables for VLP processes on 64 bit kernels.
6680Sstevel@tonic-gate  *
6690Sstevel@tonic-gate  * Each CPU has a set of 2 pagetables that are reused for any 32 bit
6700Sstevel@tonic-gate  * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and
6710Sstevel@tonic-gate  * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes.
6720Sstevel@tonic-gate  */
6730Sstevel@tonic-gate /*ARGSUSED*/
6740Sstevel@tonic-gate static void
6750Sstevel@tonic-gate hat_vlp_setup(struct cpu *cpu)
6760Sstevel@tonic-gate {
6770Sstevel@tonic-gate #if defined(__amd64)
6780Sstevel@tonic-gate 	struct hat_cpu_info *hci = cpu->cpu_hat_info;
6790Sstevel@tonic-gate 	pfn_t pfn;
6800Sstevel@tonic-gate 
6810Sstevel@tonic-gate 	/*
6820Sstevel@tonic-gate 	 * allocate the level==2 page table for the bottom most
6830Sstevel@tonic-gate 	 * 512Gig of address space (this is where 32 bit apps live)
6840Sstevel@tonic-gate 	 */
6850Sstevel@tonic-gate 	ASSERT(hci != NULL);
6860Sstevel@tonic-gate 	hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
6870Sstevel@tonic-gate 
6880Sstevel@tonic-gate 	/*
6890Sstevel@tonic-gate 	 * Allocate a top level pagetable and copy the kernel's
6900Sstevel@tonic-gate 	 * entries into it. Then link in hci_vlp_l2ptes in the 1st entry.
6910Sstevel@tonic-gate 	 */
6920Sstevel@tonic-gate 	hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
6930Sstevel@tonic-gate 	hci->hci_vlp_pfn =
6940Sstevel@tonic-gate 	    hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes);
6950Sstevel@tonic-gate 	ASSERT(hci->hci_vlp_pfn != PFN_INVALID);
6960Sstevel@tonic-gate 	bcopy(vlp_page + khat_start, hci->hci_vlp_l3ptes + khat_start,
6970Sstevel@tonic-gate 	    khat_entries * sizeof (x86pte_t));
6980Sstevel@tonic-gate 
6990Sstevel@tonic-gate 	pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes);
7000Sstevel@tonic-gate 	ASSERT(pfn != PFN_INVALID);
7010Sstevel@tonic-gate 	hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2);
7020Sstevel@tonic-gate #endif /* __amd64 */
7030Sstevel@tonic-gate }
7040Sstevel@tonic-gate 
7053446Smrj /*ARGSUSED*/
7063446Smrj static void
7073446Smrj hat_vlp_teardown(cpu_t *cpu)
7083446Smrj {
7093446Smrj #if defined(__amd64)
7103446Smrj 	struct hat_cpu_info *hci;
7113446Smrj 
7123446Smrj 	if ((hci = cpu->cpu_hat_info) == NULL)
7133446Smrj 		return;
7143446Smrj 	if (hci->hci_vlp_l2ptes)
7153446Smrj 		kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE);
7163446Smrj 	if (hci->hci_vlp_l3ptes)
7173446Smrj 		kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE);
7183446Smrj #endif	/* __amd64 */
7193446Smrj }
7203446Smrj 
7210Sstevel@tonic-gate /*
7220Sstevel@tonic-gate  * Finish filling in the kernel hat.
7230Sstevel@tonic-gate  * Pre fill in all top level kernel page table entries for the kernel's
7240Sstevel@tonic-gate  * part of the address range.  From this point on we can't use any new
7250Sstevel@tonic-gate  * kernel large pages if they need PTE's at max_level
7263446Smrj  *
7273446Smrj  * create the kmap mappings.
7280Sstevel@tonic-gate  */
7290Sstevel@tonic-gate void
7300Sstevel@tonic-gate hat_init_finish(void)
7310Sstevel@tonic-gate {
7320Sstevel@tonic-gate 	htable_t	*top = kas.a_hat->hat_htable;
7330Sstevel@tonic-gate 	htable_t	*ht;
7340Sstevel@tonic-gate 	uint_t		e;
7350Sstevel@tonic-gate 	x86pte_t	pte;
7360Sstevel@tonic-gate 	uintptr_t	va = kernelbase;
7373446Smrj 	size_t		size;
7380Sstevel@tonic-gate 
7390Sstevel@tonic-gate 
7400Sstevel@tonic-gate #if defined(__i386)
7410Sstevel@tonic-gate 	ASSERT((va & LEVEL_MASK(1)) == va);
7420Sstevel@tonic-gate 
7430Sstevel@tonic-gate 	/*
7440Sstevel@tonic-gate 	 * Deal with kernelbase not 1Gig aligned for 32 bit PAE hats.
7450Sstevel@tonic-gate 	 */
7460Sstevel@tonic-gate 	if (!mmu.pae_hat || (va & LEVEL_OFFSET(mmu.max_level)) == 0) {
7470Sstevel@tonic-gate 		khat_pae32_htable = NULL;
7480Sstevel@tonic-gate 	} else {
7490Sstevel@tonic-gate 		ASSERT(mmu.max_level == 2);
7500Sstevel@tonic-gate 		ASSERT((va & LEVEL_OFFSET(mmu.max_level - 1)) == 0);
7510Sstevel@tonic-gate 		khat_pae32_htable =
7520Sstevel@tonic-gate 		    htable_create(kas.a_hat, va, mmu.max_level - 1, NULL);
7530Sstevel@tonic-gate 		khat_pae32_start = htable_va2entry(va, khat_pae32_htable);
7540Sstevel@tonic-gate 		khat_pae32_entries = mmu.ptes_per_table - khat_pae32_start;
7550Sstevel@tonic-gate 		for (e = khat_pae32_start; e < mmu.ptes_per_table;
7560Sstevel@tonic-gate 		    ++e, va += LEVEL_SIZE(mmu.max_level - 1)) {
7570Sstevel@tonic-gate 			pte = x86pte_get(khat_pae32_htable, e);
7580Sstevel@tonic-gate 			if (PTE_ISVALID(pte))
7590Sstevel@tonic-gate 				continue;
7600Sstevel@tonic-gate 			ht = htable_create(kas.a_hat, va, mmu.max_level - 2,
7610Sstevel@tonic-gate 			    NULL);
7620Sstevel@tonic-gate 			ASSERT(ht != NULL);
7630Sstevel@tonic-gate 		}
7640Sstevel@tonic-gate 	}
7650Sstevel@tonic-gate #endif
7660Sstevel@tonic-gate 
7670Sstevel@tonic-gate 	/*
7680Sstevel@tonic-gate 	 * The kernel hat will need fixed values in the highest level
7690Sstevel@tonic-gate 	 * ptable for copying to all other hat's. This implies
7700Sstevel@tonic-gate 	 * alignment restrictions on _userlimit.
7710Sstevel@tonic-gate 	 *
7720Sstevel@tonic-gate 	 * Note we don't htable_release() these htables. This keeps them
7730Sstevel@tonic-gate 	 * from ever being stolen or free'd.
7740Sstevel@tonic-gate 	 *
7750Sstevel@tonic-gate 	 * top_level_count is used instead of ptes_per_table, since
7760Sstevel@tonic-gate 	 * on 32-bit PAE we only have 4 usable entries at the top level ptable.
7770Sstevel@tonic-gate 	 */
7780Sstevel@tonic-gate 	if (va == 0)
7790Sstevel@tonic-gate 		khat_start = mmu.top_level_count;
7800Sstevel@tonic-gate 	else
7810Sstevel@tonic-gate 		khat_start = htable_va2entry(va, kas.a_hat->hat_htable);
7820Sstevel@tonic-gate 	khat_entries = mmu.top_level_count - khat_start;
7830Sstevel@tonic-gate 	for (e = khat_start; e < mmu.top_level_count;
7840Sstevel@tonic-gate 	    ++e, va += LEVEL_SIZE(mmu.max_level)) {
7853446Smrj 		if (IN_HYPERVISOR_VA(va))
7863446Smrj 			continue;
7870Sstevel@tonic-gate 		pte = x86pte_get(top, e);
7880Sstevel@tonic-gate 		if (PTE_ISVALID(pte))
7890Sstevel@tonic-gate 			continue;
7900Sstevel@tonic-gate 		ht = htable_create(kas.a_hat, va, mmu.max_level - 1, NULL);
7910Sstevel@tonic-gate 		ASSERT(ht != NULL);
7920Sstevel@tonic-gate 	}
7930Sstevel@tonic-gate 
7940Sstevel@tonic-gate 	/*
7950Sstevel@tonic-gate 	 * We are now effectively running on the kernel hat.
7960Sstevel@tonic-gate 	 * Clearing use_boot_reserve shuts off using the pre-allocated boot
7970Sstevel@tonic-gate 	 * reserve for all HAT allocations.  From here on, the reserves are
7980Sstevel@tonic-gate 	 * only used when mapping in memory for the hat's own allocations.
7990Sstevel@tonic-gate 	 */
8000Sstevel@tonic-gate 	use_boot_reserve = 0;
8010Sstevel@tonic-gate 	htable_adjust_reserve();
8020Sstevel@tonic-gate 
8030Sstevel@tonic-gate 	/*
8040Sstevel@tonic-gate 	 * 32 bit kernels use only 4 of the 512 entries in its top level
8050Sstevel@tonic-gate 	 * pagetable. We'll use the remainder for the "per CPU" page tables
8060Sstevel@tonic-gate 	 * for VLP processes.
8070Sstevel@tonic-gate 	 *
8083446Smrj 	 * We also map the top level kernel pagetable into the kernel to make
8093446Smrj 	 * it easy to use bcopy to initialize new address spaces.
8100Sstevel@tonic-gate 	 */
8110Sstevel@tonic-gate 	if (mmu.pae_hat) {
8120Sstevel@tonic-gate 		vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
8130Sstevel@tonic-gate 		hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE,
8140Sstevel@tonic-gate 		    kas.a_hat->hat_htable->ht_pfn,
8153446Smrj 		    PROT_WRITE |
8163446Smrj 		    PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
8170Sstevel@tonic-gate 		    HAT_LOAD | HAT_LOAD_NOCONSIST);
8180Sstevel@tonic-gate 	}
8190Sstevel@tonic-gate 	hat_vlp_setup(CPU);
8203446Smrj 
8213446Smrj 	/*
8223446Smrj 	 * Create kmap (cached mappings of kernel PTEs)
8233446Smrj 	 * for 32 bit we map from segmap_start .. ekernelheap
8243446Smrj 	 * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
8253446Smrj 	 */
8263446Smrj #if defined(__i386)
8273446Smrj 	size = (uintptr_t)ekernelheap - segmap_start;
8283446Smrj #elif defined(__amd64)
8293446Smrj 	size = segmapsize;
8303446Smrj #endif
8313446Smrj 	hat_kmap_init((uintptr_t)segmap_start, size);
8320Sstevel@tonic-gate }
8330Sstevel@tonic-gate 
8340Sstevel@tonic-gate /*
8350Sstevel@tonic-gate  * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
8360Sstevel@tonic-gate  * are 32 bit, so for safety we must use cas64() to install these.
8370Sstevel@tonic-gate  */
8380Sstevel@tonic-gate #ifdef __i386
8390Sstevel@tonic-gate static void
8400Sstevel@tonic-gate reload_pae32(hat_t *hat, cpu_t *cpu)
8410Sstevel@tonic-gate {
8420Sstevel@tonic-gate 	x86pte_t *src;
8430Sstevel@tonic-gate 	x86pte_t *dest;
8440Sstevel@tonic-gate 	x86pte_t pte;
8450Sstevel@tonic-gate 	int i;
8460Sstevel@tonic-gate 
8470Sstevel@tonic-gate 	/*
8480Sstevel@tonic-gate 	 * Load the 4 entries of the level 2 page table into this
8490Sstevel@tonic-gate 	 * cpu's range of the vlp_page and point cr3 at them.
8500Sstevel@tonic-gate 	 */
8510Sstevel@tonic-gate 	ASSERT(mmu.pae_hat);
8520Sstevel@tonic-gate 	src = hat->hat_vlp_ptes;
8530Sstevel@tonic-gate 	dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
8540Sstevel@tonic-gate 	for (i = 0; i < VLP_NUM_PTES; ++i) {
8550Sstevel@tonic-gate 		for (;;) {
8560Sstevel@tonic-gate 			pte = dest[i];
8570Sstevel@tonic-gate 			if (pte == src[i])
8580Sstevel@tonic-gate 				break;
8590Sstevel@tonic-gate 			if (cas64(dest + i, pte, src[i]) != src[i])
8600Sstevel@tonic-gate 				break;
8610Sstevel@tonic-gate 		}
8620Sstevel@tonic-gate 	}
8630Sstevel@tonic-gate }
8640Sstevel@tonic-gate #endif
8650Sstevel@tonic-gate 
8660Sstevel@tonic-gate /*
8670Sstevel@tonic-gate  * Switch to a new active hat, maintaining bit masks to track active CPUs.
8680Sstevel@tonic-gate  */
8690Sstevel@tonic-gate void
8700Sstevel@tonic-gate hat_switch(hat_t *hat)
8710Sstevel@tonic-gate {
8720Sstevel@tonic-gate 	uintptr_t	newcr3;
8730Sstevel@tonic-gate 	cpu_t		*cpu = CPU;
8740Sstevel@tonic-gate 	hat_t		*old = cpu->cpu_current_hat;
8750Sstevel@tonic-gate 
8760Sstevel@tonic-gate 	/*
8770Sstevel@tonic-gate 	 * set up this information first, so we don't miss any cross calls
8780Sstevel@tonic-gate 	 */
8790Sstevel@tonic-gate 	if (old != NULL) {
8800Sstevel@tonic-gate 		if (old == hat)
8810Sstevel@tonic-gate 			return;
8820Sstevel@tonic-gate 		if (old != kas.a_hat)
8830Sstevel@tonic-gate 			CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
8840Sstevel@tonic-gate 	}
8850Sstevel@tonic-gate 
8860Sstevel@tonic-gate 	/*
8874191Sjosephb 	 * Add this CPU to the active set for this HAT.
8880Sstevel@tonic-gate 	 */
8890Sstevel@tonic-gate 	if (hat != kas.a_hat) {
8900Sstevel@tonic-gate 		CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
8910Sstevel@tonic-gate 	}
8920Sstevel@tonic-gate 	cpu->cpu_current_hat = hat;
8930Sstevel@tonic-gate 
8940Sstevel@tonic-gate 	/*
8950Sstevel@tonic-gate 	 * now go ahead and load cr3
8960Sstevel@tonic-gate 	 */
8970Sstevel@tonic-gate 	if (hat->hat_flags & HAT_VLP) {
8980Sstevel@tonic-gate #if defined(__amd64)
8990Sstevel@tonic-gate 		x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
9000Sstevel@tonic-gate 
9010Sstevel@tonic-gate 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
9020Sstevel@tonic-gate 		newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn);
9030Sstevel@tonic-gate #elif defined(__i386)
9040Sstevel@tonic-gate 		reload_pae32(hat, cpu);
9050Sstevel@tonic-gate 		newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) +
9060Sstevel@tonic-gate 		    (cpu->cpu_id + 1) * VLP_SIZE;
9070Sstevel@tonic-gate #endif
9080Sstevel@tonic-gate 	} else {
9090Sstevel@tonic-gate 		newcr3 = MAKECR3(hat->hat_htable->ht_pfn);
9100Sstevel@tonic-gate 	}
9110Sstevel@tonic-gate 	setcr3(newcr3);
9120Sstevel@tonic-gate 	ASSERT(cpu == CPU);
9130Sstevel@tonic-gate }
9140Sstevel@tonic-gate 
9150Sstevel@tonic-gate /*
9160Sstevel@tonic-gate  * Utility to return a valid x86pte_t from protections, pfn, and level number
9170Sstevel@tonic-gate  */
9180Sstevel@tonic-gate static x86pte_t
9190Sstevel@tonic-gate hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
9200Sstevel@tonic-gate {
9210Sstevel@tonic-gate 	x86pte_t	pte;
9220Sstevel@tonic-gate 	uint_t		cache_attr = attr & HAT_ORDER_MASK;
9230Sstevel@tonic-gate 
9240Sstevel@tonic-gate 	pte = MAKEPTE(pfn, level);
9250Sstevel@tonic-gate 
9260Sstevel@tonic-gate 	if (attr & PROT_WRITE)
9270Sstevel@tonic-gate 		PTE_SET(pte, PT_WRITABLE);
9280Sstevel@tonic-gate 
9290Sstevel@tonic-gate 	if (attr & PROT_USER)
9300Sstevel@tonic-gate 		PTE_SET(pte, PT_USER);
9310Sstevel@tonic-gate 
9320Sstevel@tonic-gate 	if (!(attr & PROT_EXEC))
9330Sstevel@tonic-gate 		PTE_SET(pte, mmu.pt_nx);
9340Sstevel@tonic-gate 
9350Sstevel@tonic-gate 	/*
9363446Smrj 	 * Set the software bits used track ref/mod sync's and hments.
9373446Smrj 	 * If not using REF/MOD, set them to avoid h/w rewriting PTEs.
9380Sstevel@tonic-gate 	 */
9390Sstevel@tonic-gate 	if (flags & HAT_LOAD_NOCONSIST)
9403446Smrj 		PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD);
9413446Smrj 	else if (attr & HAT_NOSYNC)
9423446Smrj 		PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD);
9430Sstevel@tonic-gate 
9440Sstevel@tonic-gate 	/*
9450Sstevel@tonic-gate 	 * Set the caching attributes in the PTE. The combination
9460Sstevel@tonic-gate 	 * of attributes are poorly defined, so we pay attention
9470Sstevel@tonic-gate 	 * to them in the given order.
9480Sstevel@tonic-gate 	 *
9490Sstevel@tonic-gate 	 * The test for HAT_STRICTORDER is different because it's defined
9500Sstevel@tonic-gate 	 * as "0" - which was a stupid thing to do, but is too late to change!
9510Sstevel@tonic-gate 	 */
9520Sstevel@tonic-gate 	if (cache_attr == HAT_STRICTORDER) {
9530Sstevel@tonic-gate 		PTE_SET(pte, PT_NOCACHE);
9540Sstevel@tonic-gate 	/*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
9550Sstevel@tonic-gate 	} else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
9560Sstevel@tonic-gate 		/* nothing to set */;
9570Sstevel@tonic-gate 	} else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
9580Sstevel@tonic-gate 		PTE_SET(pte, PT_NOCACHE);
9590Sstevel@tonic-gate 		if (x86_feature & X86_PAT)
9600Sstevel@tonic-gate 			PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
9610Sstevel@tonic-gate 		else
9620Sstevel@tonic-gate 			PTE_SET(pte, PT_WRITETHRU);
9630Sstevel@tonic-gate 	} else {
9640Sstevel@tonic-gate 		panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
9650Sstevel@tonic-gate 	}
9660Sstevel@tonic-gate 
9670Sstevel@tonic-gate 	return (pte);
9680Sstevel@tonic-gate }
9690Sstevel@tonic-gate 
9700Sstevel@tonic-gate /*
9710Sstevel@tonic-gate  * Duplicate address translations of the parent to the child.
9720Sstevel@tonic-gate  * This function really isn't used anymore.
9730Sstevel@tonic-gate  */
9740Sstevel@tonic-gate /*ARGSUSED*/
9750Sstevel@tonic-gate int
9760Sstevel@tonic-gate hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
9770Sstevel@tonic-gate {
9780Sstevel@tonic-gate 	ASSERT((uintptr_t)addr < kernelbase);
9790Sstevel@tonic-gate 	ASSERT(new != kas.a_hat);
9800Sstevel@tonic-gate 	ASSERT(old != kas.a_hat);
9810Sstevel@tonic-gate 	return (0);
9820Sstevel@tonic-gate }
9830Sstevel@tonic-gate 
9840Sstevel@tonic-gate /*
9850Sstevel@tonic-gate  * Allocate any hat resources required for a process being swapped in.
9860Sstevel@tonic-gate  */
9870Sstevel@tonic-gate /*ARGSUSED*/
9880Sstevel@tonic-gate void
9890Sstevel@tonic-gate hat_swapin(hat_t *hat)
9900Sstevel@tonic-gate {
9910Sstevel@tonic-gate 	/* do nothing - we let everything fault back in */
9920Sstevel@tonic-gate }
9930Sstevel@tonic-gate 
9940Sstevel@tonic-gate /*
9950Sstevel@tonic-gate  * Unload all translations associated with an address space of a process
9960Sstevel@tonic-gate  * that is being swapped out.
9970Sstevel@tonic-gate  */
9980Sstevel@tonic-gate void
9990Sstevel@tonic-gate hat_swapout(hat_t *hat)
10000Sstevel@tonic-gate {
10010Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)0;
10020Sstevel@tonic-gate 	uintptr_t	eaddr = _userlimit;
10030Sstevel@tonic-gate 	htable_t	*ht = NULL;
10040Sstevel@tonic-gate 	level_t		l;
10050Sstevel@tonic-gate 
10060Sstevel@tonic-gate 	/*
10070Sstevel@tonic-gate 	 * We can't just call hat_unload(hat, 0, _userlimit...)  here, because
10080Sstevel@tonic-gate 	 * seg_spt and shared pagetables can't be swapped out.
10090Sstevel@tonic-gate 	 * Take a look at segspt_shmswapout() - it's a big no-op.
10100Sstevel@tonic-gate 	 *
10110Sstevel@tonic-gate 	 * Instead we'll walk through all the address space and unload
10120Sstevel@tonic-gate 	 * any mappings which we are sure are not shared, not locked.
10130Sstevel@tonic-gate 	 */
10140Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
10150Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
10160Sstevel@tonic-gate 	ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
10170Sstevel@tonic-gate 	if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
10180Sstevel@tonic-gate 		eaddr = (uintptr_t)hat->hat_as->a_userlimit;
10190Sstevel@tonic-gate 
10200Sstevel@tonic-gate 	while (vaddr < eaddr) {
10210Sstevel@tonic-gate 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
10220Sstevel@tonic-gate 		if (ht == NULL)
10230Sstevel@tonic-gate 			break;
10240Sstevel@tonic-gate 
10250Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(vaddr));
10260Sstevel@tonic-gate 
10270Sstevel@tonic-gate 		/*
10280Sstevel@tonic-gate 		 * If the page table is shared skip its entire range.
10290Sstevel@tonic-gate 		 * This code knows that only level 0 page tables are shared
10300Sstevel@tonic-gate 		 */
10310Sstevel@tonic-gate 		l = ht->ht_level;
10320Sstevel@tonic-gate 		if (ht->ht_flags & HTABLE_SHARED_PFN) {
10330Sstevel@tonic-gate 			ASSERT(l == 0);
10340Sstevel@tonic-gate 			vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
10350Sstevel@tonic-gate 			htable_release(ht);
10360Sstevel@tonic-gate 			ht = NULL;
10370Sstevel@tonic-gate 			continue;
10380Sstevel@tonic-gate 		}
10390Sstevel@tonic-gate 
10400Sstevel@tonic-gate 		/*
10410Sstevel@tonic-gate 		 * If the page table has no locked entries, unload this one.
10420Sstevel@tonic-gate 		 */
10430Sstevel@tonic-gate 		if (ht->ht_lock_cnt == 0)
10440Sstevel@tonic-gate 			hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
10450Sstevel@tonic-gate 			    HAT_UNLOAD_UNMAP);
10460Sstevel@tonic-gate 
10470Sstevel@tonic-gate 		/*
10480Sstevel@tonic-gate 		 * If we have a level 0 page table with locked entries,
10490Sstevel@tonic-gate 		 * skip the entire page table, otherwise skip just one entry.
10500Sstevel@tonic-gate 		 */
10510Sstevel@tonic-gate 		if (ht->ht_lock_cnt > 0 && l == 0)
10520Sstevel@tonic-gate 			vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
10530Sstevel@tonic-gate 		else
10540Sstevel@tonic-gate 			vaddr += LEVEL_SIZE(l);
10550Sstevel@tonic-gate 	}
10560Sstevel@tonic-gate 	if (ht)
10570Sstevel@tonic-gate 		htable_release(ht);
10580Sstevel@tonic-gate 
10590Sstevel@tonic-gate 	/*
10600Sstevel@tonic-gate 	 * We're in swapout because the system is low on memory, so
10610Sstevel@tonic-gate 	 * go back and flush all the htables off the cached list.
10620Sstevel@tonic-gate 	 */
10630Sstevel@tonic-gate 	htable_purge_hat(hat);
10640Sstevel@tonic-gate }
10650Sstevel@tonic-gate 
10660Sstevel@tonic-gate /*
10670Sstevel@tonic-gate  * returns number of bytes that have valid mappings in hat.
10680Sstevel@tonic-gate  */
10690Sstevel@tonic-gate size_t
10700Sstevel@tonic-gate hat_get_mapped_size(hat_t *hat)
10710Sstevel@tonic-gate {
10720Sstevel@tonic-gate 	size_t total = 0;
10730Sstevel@tonic-gate 	int l;
10740Sstevel@tonic-gate 
10750Sstevel@tonic-gate 	for (l = 0; l <= mmu.max_page_level; l++)
10760Sstevel@tonic-gate 		total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
10774381Sjosephb 	total += hat->hat_ism_pgcnt;
10780Sstevel@tonic-gate 
10790Sstevel@tonic-gate 	return (total);
10800Sstevel@tonic-gate }
10810Sstevel@tonic-gate 
10820Sstevel@tonic-gate /*
10830Sstevel@tonic-gate  * enable/disable collection of stats for hat.
10840Sstevel@tonic-gate  */
10850Sstevel@tonic-gate int
10860Sstevel@tonic-gate hat_stats_enable(hat_t *hat)
10870Sstevel@tonic-gate {
10880Sstevel@tonic-gate 	atomic_add_32(&hat->hat_stats, 1);
10890Sstevel@tonic-gate 	return (1);
10900Sstevel@tonic-gate }
10910Sstevel@tonic-gate 
10920Sstevel@tonic-gate void
10930Sstevel@tonic-gate hat_stats_disable(hat_t *hat)
10940Sstevel@tonic-gate {
10950Sstevel@tonic-gate 	atomic_add_32(&hat->hat_stats, -1);
10960Sstevel@tonic-gate }
10970Sstevel@tonic-gate 
10980Sstevel@tonic-gate /*
10990Sstevel@tonic-gate  * Utility to sync the ref/mod bits from a page table entry to the page_t
11000Sstevel@tonic-gate  * We must be holding the mapping list lock when this is called.
11010Sstevel@tonic-gate  */
11020Sstevel@tonic-gate static void
11030Sstevel@tonic-gate hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
11040Sstevel@tonic-gate {
11050Sstevel@tonic-gate 	uint_t	rm = 0;
11060Sstevel@tonic-gate 	pgcnt_t	pgcnt;
11070Sstevel@tonic-gate 
11083446Smrj 	if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
11090Sstevel@tonic-gate 		return;
11100Sstevel@tonic-gate 
11110Sstevel@tonic-gate 	if (PTE_GET(pte, PT_REF))
11120Sstevel@tonic-gate 		rm |= P_REF;
11130Sstevel@tonic-gate 
11140Sstevel@tonic-gate 	if (PTE_GET(pte, PT_MOD))
11150Sstevel@tonic-gate 		rm |= P_MOD;
11160Sstevel@tonic-gate 
11170Sstevel@tonic-gate 	if (rm == 0)
11180Sstevel@tonic-gate 		return;
11190Sstevel@tonic-gate 
11200Sstevel@tonic-gate 	/*
11210Sstevel@tonic-gate 	 * sync to all constituent pages of a large page
11220Sstevel@tonic-gate 	 */
11230Sstevel@tonic-gate 	ASSERT(x86_hm_held(pp));
11240Sstevel@tonic-gate 	pgcnt = page_get_pagecnt(level);
11250Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
11260Sstevel@tonic-gate 	for (; pgcnt > 0; --pgcnt) {
11270Sstevel@tonic-gate 		/*
11280Sstevel@tonic-gate 		 * hat_page_demote() can't decrease
11290Sstevel@tonic-gate 		 * pszc below this mapping size
11300Sstevel@tonic-gate 		 * since this large mapping existed after we
11310Sstevel@tonic-gate 		 * took mlist lock.
11320Sstevel@tonic-gate 		 */
11330Sstevel@tonic-gate 		ASSERT(pp->p_szc >= level);
11340Sstevel@tonic-gate 		hat_page_setattr(pp, rm);
11350Sstevel@tonic-gate 		++pp;
11360Sstevel@tonic-gate 	}
11370Sstevel@tonic-gate }
11380Sstevel@tonic-gate 
11390Sstevel@tonic-gate /*
11400Sstevel@tonic-gate  * This the set of PTE bits for PFN, permissions and caching
11413446Smrj  * that require a TLB flush (hat_tlb_inval) if changed on a HAT_LOAD_REMAP
11420Sstevel@tonic-gate  */
11430Sstevel@tonic-gate #define	PT_REMAP_BITS							\
11440Sstevel@tonic-gate 	(PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU |		\
11450Sstevel@tonic-gate 	PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE)
11460Sstevel@tonic-gate 
1147510Skchow #define	REMAPASSERT(EX)	if (!(EX)) panic("hati_pte_map: " #EX)
11480Sstevel@tonic-gate /*
11490Sstevel@tonic-gate  * Do the low-level work to get a mapping entered into a HAT's pagetables
11500Sstevel@tonic-gate  * and in the mapping list of the associated page_t.
11510Sstevel@tonic-gate  */
11523446Smrj static int
11530Sstevel@tonic-gate hati_pte_map(
11540Sstevel@tonic-gate 	htable_t	*ht,
11550Sstevel@tonic-gate 	uint_t		entry,
11560Sstevel@tonic-gate 	page_t		*pp,
11570Sstevel@tonic-gate 	x86pte_t	pte,
11580Sstevel@tonic-gate 	int		flags,
11590Sstevel@tonic-gate 	void		*pte_ptr)
11600Sstevel@tonic-gate {
11610Sstevel@tonic-gate 	hat_t		*hat = ht->ht_hat;
11620Sstevel@tonic-gate 	x86pte_t	old_pte;
11630Sstevel@tonic-gate 	level_t		l = ht->ht_level;
11640Sstevel@tonic-gate 	hment_t		*hm;
11650Sstevel@tonic-gate 	uint_t		is_consist;
11663446Smrj 	int		rv = 0;
11670Sstevel@tonic-gate 
11680Sstevel@tonic-gate 	/*
11690Sstevel@tonic-gate 	 * Is this a consistant (ie. need mapping list lock) mapping?
11700Sstevel@tonic-gate 	 */
11710Sstevel@tonic-gate 	is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
11720Sstevel@tonic-gate 
11730Sstevel@tonic-gate 	/*
11740Sstevel@tonic-gate 	 * Track locked mapping count in the htable.  Do this first,
11750Sstevel@tonic-gate 	 * as we track locking even if there already is a mapping present.
11760Sstevel@tonic-gate 	 */
11770Sstevel@tonic-gate 	if ((flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat)
11780Sstevel@tonic-gate 		HTABLE_LOCK_INC(ht);
11790Sstevel@tonic-gate 
11800Sstevel@tonic-gate 	/*
11810Sstevel@tonic-gate 	 * Acquire the page's mapping list lock and get an hment to use.
11820Sstevel@tonic-gate 	 * Note that hment_prepare() might return NULL.
11830Sstevel@tonic-gate 	 */
11840Sstevel@tonic-gate 	if (is_consist) {
11850Sstevel@tonic-gate 		x86_hm_enter(pp);
11860Sstevel@tonic-gate 		hm = hment_prepare(ht, entry, pp);
11870Sstevel@tonic-gate 	}
11880Sstevel@tonic-gate 
11890Sstevel@tonic-gate 	/*
11900Sstevel@tonic-gate 	 * Set the new pte, retrieving the old one at the same time.
11910Sstevel@tonic-gate 	 */
11920Sstevel@tonic-gate 	old_pte = x86pte_set(ht, entry, pte, pte_ptr);
11930Sstevel@tonic-gate 
11940Sstevel@tonic-gate 	/*
11953446Smrj 	 * did we get a large page / page table collision?
11963446Smrj 	 */
11973446Smrj 	if (old_pte == LPAGE_ERROR) {
11983446Smrj 		rv = -1;
11993446Smrj 		goto done;
12003446Smrj 	}
12013446Smrj 
12023446Smrj 	/*
12030Sstevel@tonic-gate 	 * If the mapping didn't change there is nothing more to do.
12040Sstevel@tonic-gate 	 */
12053446Smrj 	if (PTE_EQUIV(pte, old_pte))
12063446Smrj 		goto done;
12070Sstevel@tonic-gate 
12080Sstevel@tonic-gate 	/*
12090Sstevel@tonic-gate 	 * Install a new mapping in the page's mapping list
12100Sstevel@tonic-gate 	 */
12110Sstevel@tonic-gate 	if (!PTE_ISVALID(old_pte)) {
12120Sstevel@tonic-gate 		if (is_consist) {
12130Sstevel@tonic-gate 			hment_assign(ht, entry, pp, hm);
12140Sstevel@tonic-gate 			x86_hm_exit(pp);
12150Sstevel@tonic-gate 		} else {
12160Sstevel@tonic-gate 			ASSERT(flags & HAT_LOAD_NOCONSIST);
12170Sstevel@tonic-gate 		}
12180Sstevel@tonic-gate 		HTABLE_INC(ht->ht_valid_cnt);
12190Sstevel@tonic-gate 		PGCNT_INC(hat, l);
12203446Smrj 		return (rv);
12210Sstevel@tonic-gate 	}
12220Sstevel@tonic-gate 
12230Sstevel@tonic-gate 	/*
12240Sstevel@tonic-gate 	 * Remap's are more complicated:
12250Sstevel@tonic-gate 	 *  - HAT_LOAD_REMAP must be specified if changing the pfn.
12260Sstevel@tonic-gate 	 *    We also require that NOCONSIST be specified.
12270Sstevel@tonic-gate 	 *  - Otherwise only permission or caching bits may change.
12280Sstevel@tonic-gate 	 */
12290Sstevel@tonic-gate 	if (!PTE_ISPAGE(old_pte, l))
12300Sstevel@tonic-gate 		panic("non-null/page mapping pte=" FMT_PTE, old_pte);
12310Sstevel@tonic-gate 
12320Sstevel@tonic-gate 	if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1233510Skchow 		REMAPASSERT(flags & HAT_LOAD_REMAP);
1234510Skchow 		REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
12353446Smrj 		REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
1236510Skchow 		REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
12370Sstevel@tonic-gate 		    pf_is_memory(PTE2PFN(pte, l)));
1238510Skchow 		REMAPASSERT(!is_consist);
12390Sstevel@tonic-gate 	}
12400Sstevel@tonic-gate 
12410Sstevel@tonic-gate 	/*
12420Sstevel@tonic-gate 	 * We only let remaps change the bits for PFNs, permissions
12430Sstevel@tonic-gate 	 * or caching type.
12440Sstevel@tonic-gate 	 */
12450Sstevel@tonic-gate 	ASSERT(PTE_GET(old_pte, ~(PT_REMAP_BITS | PT_REF | PT_MOD)) ==
12460Sstevel@tonic-gate 	    PTE_GET(pte, ~PT_REMAP_BITS));
12470Sstevel@tonic-gate 
12480Sstevel@tonic-gate 	/*
12490Sstevel@tonic-gate 	 * We don't create any mapping list entries on a remap, so release
12500Sstevel@tonic-gate 	 * any allocated hment after we drop the mapping list lock.
12510Sstevel@tonic-gate 	 */
12523446Smrj done:
12530Sstevel@tonic-gate 	if (is_consist) {
12540Sstevel@tonic-gate 		x86_hm_exit(pp);
12550Sstevel@tonic-gate 		if (hm != NULL)
12560Sstevel@tonic-gate 			hment_free(hm);
12570Sstevel@tonic-gate 	}
12583446Smrj 	return (rv);
12590Sstevel@tonic-gate }
12600Sstevel@tonic-gate 
12610Sstevel@tonic-gate /*
12623446Smrj  * Internal routine to load a single page table entry. This only fails if
12633446Smrj  * we attempt to overwrite a page table link with a large page.
12640Sstevel@tonic-gate  */
12653446Smrj static int
12660Sstevel@tonic-gate hati_load_common(
12670Sstevel@tonic-gate 	hat_t		*hat,
12680Sstevel@tonic-gate 	uintptr_t	va,
12690Sstevel@tonic-gate 	page_t		*pp,
12700Sstevel@tonic-gate 	uint_t		attr,
12710Sstevel@tonic-gate 	uint_t		flags,
12720Sstevel@tonic-gate 	level_t		level,
12730Sstevel@tonic-gate 	pfn_t		pfn)
12740Sstevel@tonic-gate {
12750Sstevel@tonic-gate 	htable_t	*ht;
12760Sstevel@tonic-gate 	uint_t		entry;
12770Sstevel@tonic-gate 	x86pte_t	pte;
12783446Smrj 	int		rv = 0;
12790Sstevel@tonic-gate 
12804004Sjosephb 	/*
12814004Sjosephb 	 * The number 16 is arbitrary and here to catch a recursion problem
12824004Sjosephb 	 * early before we blow out the kernel stack.
12834004Sjosephb 	 */
12844004Sjosephb 	++curthread->t_hatdepth;
12854004Sjosephb 	ASSERT(curthread->t_hatdepth < 16);
12864004Sjosephb 
12870Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
12880Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
12890Sstevel@tonic-gate 
12900Sstevel@tonic-gate 	if (flags & HAT_LOAD_SHARE)
12910Sstevel@tonic-gate 		hat->hat_flags |= HAT_SHARED;
12920Sstevel@tonic-gate 
12930Sstevel@tonic-gate 	/*
12940Sstevel@tonic-gate 	 * Find the page table that maps this page if it already exists.
12950Sstevel@tonic-gate 	 */
12960Sstevel@tonic-gate 	ht = htable_lookup(hat, va, level);
12970Sstevel@tonic-gate 
12980Sstevel@tonic-gate 	/*
12994004Sjosephb 	 * We must have HAT_LOAD_NOCONSIST if page_t is NULL.
13000Sstevel@tonic-gate 	 */
13014004Sjosephb 	if (pp == NULL)
13020Sstevel@tonic-gate 		flags |= HAT_LOAD_NOCONSIST;
13030Sstevel@tonic-gate 
13040Sstevel@tonic-gate 	if (ht == NULL) {
13050Sstevel@tonic-gate 		ht = htable_create(hat, va, level, NULL);
13060Sstevel@tonic-gate 		ASSERT(ht != NULL);
13070Sstevel@tonic-gate 	}
13080Sstevel@tonic-gate 	entry = htable_va2entry(va, ht);
13090Sstevel@tonic-gate 
13100Sstevel@tonic-gate 	/*
13110Sstevel@tonic-gate 	 * a bunch of paranoid error checking
13120Sstevel@tonic-gate 	 */
13130Sstevel@tonic-gate 	ASSERT(ht->ht_busy > 0);
13140Sstevel@tonic-gate 	if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht))
13150Sstevel@tonic-gate 		panic("hati_load_common: bad htable %p, va %p", ht, (void *)va);
13160Sstevel@tonic-gate 	ASSERT(ht->ht_level == level);
13170Sstevel@tonic-gate 
13180Sstevel@tonic-gate 	/*
13190Sstevel@tonic-gate 	 * construct the new PTE
13200Sstevel@tonic-gate 	 */
13210Sstevel@tonic-gate 	if (hat == kas.a_hat)
13220Sstevel@tonic-gate 		attr &= ~PROT_USER;
13230Sstevel@tonic-gate 	pte = hati_mkpte(pfn, attr, level, flags);
13240Sstevel@tonic-gate 	if (hat == kas.a_hat && va >= kernelbase)
13250Sstevel@tonic-gate 		PTE_SET(pte, mmu.pt_global);
13260Sstevel@tonic-gate 
13270Sstevel@tonic-gate 	/*
13280Sstevel@tonic-gate 	 * establish the mapping
13290Sstevel@tonic-gate 	 */
13303446Smrj 	rv = hati_pte_map(ht, entry, pp, pte, flags, NULL);
13310Sstevel@tonic-gate 
13320Sstevel@tonic-gate 	/*
13330Sstevel@tonic-gate 	 * release the htable and any reserves
13340Sstevel@tonic-gate 	 */
13350Sstevel@tonic-gate 	htable_release(ht);
13364004Sjosephb 	--curthread->t_hatdepth;
13373446Smrj 	return (rv);
13380Sstevel@tonic-gate }
13390Sstevel@tonic-gate 
13400Sstevel@tonic-gate /*
13410Sstevel@tonic-gate  * special case of hat_memload to deal with some kernel addrs for performance
13420Sstevel@tonic-gate  */
13430Sstevel@tonic-gate static void
13440Sstevel@tonic-gate hat_kmap_load(
13450Sstevel@tonic-gate 	caddr_t		addr,
13460Sstevel@tonic-gate 	page_t		*pp,
13470Sstevel@tonic-gate 	uint_t		attr,
13480Sstevel@tonic-gate 	uint_t		flags)
13490Sstevel@tonic-gate {
13500Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
13510Sstevel@tonic-gate 	x86pte_t	pte;
13520Sstevel@tonic-gate 	pfn_t		pfn = page_pptonum(pp);
13530Sstevel@tonic-gate 	pgcnt_t		pg_off = mmu_btop(va - mmu.kmap_addr);
13540Sstevel@tonic-gate 	htable_t	*ht;
13550Sstevel@tonic-gate 	uint_t		entry;
13560Sstevel@tonic-gate 	void		*pte_ptr;
13570Sstevel@tonic-gate 
13580Sstevel@tonic-gate 	/*
13590Sstevel@tonic-gate 	 * construct the requested PTE
13600Sstevel@tonic-gate 	 */
13610Sstevel@tonic-gate 	attr &= ~PROT_USER;
13620Sstevel@tonic-gate 	attr |= HAT_STORECACHING_OK;
13630Sstevel@tonic-gate 	pte = hati_mkpte(pfn, attr, 0, flags);
13640Sstevel@tonic-gate 	PTE_SET(pte, mmu.pt_global);
13650Sstevel@tonic-gate 
13660Sstevel@tonic-gate 	/*
13670Sstevel@tonic-gate 	 * Figure out the pte_ptr and htable and use common code to finish up
13680Sstevel@tonic-gate 	 */
13690Sstevel@tonic-gate 	if (mmu.pae_hat)
13700Sstevel@tonic-gate 		pte_ptr = mmu.kmap_ptes + pg_off;
13710Sstevel@tonic-gate 	else
13720Sstevel@tonic-gate 		pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
13730Sstevel@tonic-gate 	ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
13740Sstevel@tonic-gate 	    LEVEL_SHIFT(1)];
13750Sstevel@tonic-gate 	entry = htable_va2entry(va, ht);
13764004Sjosephb 	++curthread->t_hatdepth;
13774004Sjosephb 	ASSERT(curthread->t_hatdepth < 16);
13783446Smrj 	(void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
13794004Sjosephb 	--curthread->t_hatdepth;
13800Sstevel@tonic-gate }
13810Sstevel@tonic-gate 
13820Sstevel@tonic-gate /*
13830Sstevel@tonic-gate  * hat_memload() - load a translation to the given page struct
13840Sstevel@tonic-gate  *
13850Sstevel@tonic-gate  * Flags for hat_memload/hat_devload/hat_*attr.
13860Sstevel@tonic-gate  *
13870Sstevel@tonic-gate  * 	HAT_LOAD	Default flags to load a translation to the page.
13880Sstevel@tonic-gate  *
13890Sstevel@tonic-gate  * 	HAT_LOAD_LOCK	Lock down mapping resources; hat_map(), hat_memload(),
13900Sstevel@tonic-gate  *			and hat_devload().
13910Sstevel@tonic-gate  *
13920Sstevel@tonic-gate  *	HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
13933446Smrj  *			sets PT_NOCONSIST
13940Sstevel@tonic-gate  *
13950Sstevel@tonic-gate  *	HAT_LOAD_SHARE	A flag to hat_memload() to indicate h/w page tables
13960Sstevel@tonic-gate  *			that map some user pages (not kas) is shared by more
13970Sstevel@tonic-gate  *			than one process (eg. ISM).
13980Sstevel@tonic-gate  *
13990Sstevel@tonic-gate  *	HAT_LOAD_REMAP	Reload a valid pte with a different page frame.
14000Sstevel@tonic-gate  *
14010Sstevel@tonic-gate  *	HAT_NO_KALLOC	Do not kmem_alloc while creating the mapping; at this
14020Sstevel@tonic-gate  *			point, it's setting up mapping to allocate internal
14030Sstevel@tonic-gate  *			hat layer data structures.  This flag forces hat layer
14040Sstevel@tonic-gate  *			to tap its reserves in order to prevent infinite
14050Sstevel@tonic-gate  *			recursion.
14060Sstevel@tonic-gate  *
14070Sstevel@tonic-gate  * The following is a protection attribute (like PROT_READ, etc.)
14080Sstevel@tonic-gate  *
14093446Smrj  *	HAT_NOSYNC	set PT_NOSYNC - this mapping's ref/mod bits
14100Sstevel@tonic-gate  *			are never cleared.
14110Sstevel@tonic-gate  *
14120Sstevel@tonic-gate  * Installing new valid PTE's and creation of the mapping list
14130Sstevel@tonic-gate  * entry are controlled under the same lock. It's derived from the
14140Sstevel@tonic-gate  * page_t being mapped.
14150Sstevel@tonic-gate  */
14160Sstevel@tonic-gate static uint_t supported_memload_flags =
14170Sstevel@tonic-gate 	HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
14180Sstevel@tonic-gate 	HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
14190Sstevel@tonic-gate 
14200Sstevel@tonic-gate void
14210Sstevel@tonic-gate hat_memload(
14220Sstevel@tonic-gate 	hat_t		*hat,
14230Sstevel@tonic-gate 	caddr_t		addr,
14240Sstevel@tonic-gate 	page_t		*pp,
14250Sstevel@tonic-gate 	uint_t		attr,
14260Sstevel@tonic-gate 	uint_t		flags)
14270Sstevel@tonic-gate {
14280Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
14290Sstevel@tonic-gate 	level_t		level = 0;
14300Sstevel@tonic-gate 	pfn_t		pfn = page_pptonum(pp);
14310Sstevel@tonic-gate 
14320Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
14333446Smrj 	ASSERT(hat == kas.a_hat || va < _userlimit);
14340Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
14350Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
14360Sstevel@tonic-gate 	ASSERT((flags & supported_memload_flags) == flags);
14370Sstevel@tonic-gate 
14380Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(va));
14390Sstevel@tonic-gate 	ASSERT(!PP_ISFREE(pp));
14400Sstevel@tonic-gate 
14410Sstevel@tonic-gate 	/*
14420Sstevel@tonic-gate 	 * kernel address special case for performance.
14430Sstevel@tonic-gate 	 */
14440Sstevel@tonic-gate 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
14450Sstevel@tonic-gate 		ASSERT(hat == kas.a_hat);
14460Sstevel@tonic-gate 		hat_kmap_load(addr, pp, attr, flags);
14470Sstevel@tonic-gate 		return;
14480Sstevel@tonic-gate 	}
14490Sstevel@tonic-gate 
14500Sstevel@tonic-gate 	/*
14510Sstevel@tonic-gate 	 * This is used for memory with normal caching enabled, so
14520Sstevel@tonic-gate 	 * always set HAT_STORECACHING_OK.
14530Sstevel@tonic-gate 	 */
14540Sstevel@tonic-gate 	attr |= HAT_STORECACHING_OK;
14553446Smrj 	if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
14563446Smrj 		panic("unexpected hati_load_common() failure");
14570Sstevel@tonic-gate }
14580Sstevel@tonic-gate 
14594528Spaulsan /* ARGSUSED */
14604528Spaulsan void
14614528Spaulsan hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
14624528Spaulsan     uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
14634528Spaulsan {
14644528Spaulsan 	hat_memload(hat, addr, pp, attr, flags);
14654528Spaulsan }
14664528Spaulsan 
14670Sstevel@tonic-gate /*
14680Sstevel@tonic-gate  * Load the given array of page structs using large pages when possible
14690Sstevel@tonic-gate  */
14700Sstevel@tonic-gate void
14710Sstevel@tonic-gate hat_memload_array(
14720Sstevel@tonic-gate 	hat_t		*hat,
14730Sstevel@tonic-gate 	caddr_t		addr,
14740Sstevel@tonic-gate 	size_t		len,
14750Sstevel@tonic-gate 	page_t		**pages,
14760Sstevel@tonic-gate 	uint_t		attr,
14770Sstevel@tonic-gate 	uint_t		flags)
14780Sstevel@tonic-gate {
14790Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
14800Sstevel@tonic-gate 	uintptr_t	eaddr = va + len;
14810Sstevel@tonic-gate 	level_t		level;
14820Sstevel@tonic-gate 	size_t		pgsize;
14830Sstevel@tonic-gate 	pgcnt_t		pgindx = 0;
14840Sstevel@tonic-gate 	pfn_t		pfn;
14850Sstevel@tonic-gate 	pgcnt_t		i;
14860Sstevel@tonic-gate 
14870Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
14883446Smrj 	ASSERT(hat == kas.a_hat || va + len <= _userlimit);
14890Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
14900Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
14910Sstevel@tonic-gate 	ASSERT((flags & supported_memload_flags) == flags);
14920Sstevel@tonic-gate 
14930Sstevel@tonic-gate 	/*
14940Sstevel@tonic-gate 	 * memload is used for memory with full caching enabled, so
14950Sstevel@tonic-gate 	 * set HAT_STORECACHING_OK.
14960Sstevel@tonic-gate 	 */
14970Sstevel@tonic-gate 	attr |= HAT_STORECACHING_OK;
14980Sstevel@tonic-gate 
14990Sstevel@tonic-gate 	/*
15000Sstevel@tonic-gate 	 * handle all pages using largest possible pagesize
15010Sstevel@tonic-gate 	 */
15020Sstevel@tonic-gate 	while (va < eaddr) {
15030Sstevel@tonic-gate 		/*
15040Sstevel@tonic-gate 		 * decide what level mapping to use (ie. pagesize)
15050Sstevel@tonic-gate 		 */
15060Sstevel@tonic-gate 		pfn = page_pptonum(pages[pgindx]);
15070Sstevel@tonic-gate 		for (level = mmu.max_page_level; ; --level) {
15080Sstevel@tonic-gate 			pgsize = LEVEL_SIZE(level);
15090Sstevel@tonic-gate 			if (level == 0)
15100Sstevel@tonic-gate 				break;
15113446Smrj 
15120Sstevel@tonic-gate 			if (!IS_P2ALIGNED(va, pgsize) ||
15130Sstevel@tonic-gate 			    (eaddr - va) < pgsize ||
15143446Smrj 			    !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize))
15150Sstevel@tonic-gate 				continue;
15160Sstevel@tonic-gate 
15170Sstevel@tonic-gate 			/*
15180Sstevel@tonic-gate 			 * To use a large mapping of this size, all the
15190Sstevel@tonic-gate 			 * pages we are passed must be sequential subpages
15200Sstevel@tonic-gate 			 * of the large page.
15210Sstevel@tonic-gate 			 * hat_page_demote() can't change p_szc because
15220Sstevel@tonic-gate 			 * all pages are locked.
15230Sstevel@tonic-gate 			 */
15240Sstevel@tonic-gate 			if (pages[pgindx]->p_szc >= level) {
15250Sstevel@tonic-gate 				for (i = 0; i < mmu_btop(pgsize); ++i) {
15260Sstevel@tonic-gate 					if (pfn + i !=
15270Sstevel@tonic-gate 					    page_pptonum(pages[pgindx + i]))
15280Sstevel@tonic-gate 						break;
15290Sstevel@tonic-gate 					ASSERT(pages[pgindx + i]->p_szc >=
15300Sstevel@tonic-gate 					    level);
15310Sstevel@tonic-gate 					ASSERT(pages[pgindx] + i ==
15320Sstevel@tonic-gate 					    pages[pgindx + i]);
15330Sstevel@tonic-gate 				}
15340Sstevel@tonic-gate 				if (i == mmu_btop(pgsize))
15350Sstevel@tonic-gate 					break;
15360Sstevel@tonic-gate 			}
15370Sstevel@tonic-gate 		}
15380Sstevel@tonic-gate 
15390Sstevel@tonic-gate 		/*
15403446Smrj 		 * Load this page mapping. If the load fails, try a smaller
15413446Smrj 		 * pagesize.
15420Sstevel@tonic-gate 		 */
15430Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(va));
15443446Smrj 		while (hati_load_common(hat, va, pages[pgindx], attr,
15454381Sjosephb 		    flags, level, pfn) != 0) {
15463446Smrj 			if (level == 0)
15473446Smrj 				panic("unexpected hati_load_common() failure");
15483446Smrj 			--level;
15493446Smrj 			pgsize = LEVEL_SIZE(level);
15503446Smrj 		}
15510Sstevel@tonic-gate 
15520Sstevel@tonic-gate 		/*
15530Sstevel@tonic-gate 		 * move to next page
15540Sstevel@tonic-gate 		 */
15550Sstevel@tonic-gate 		va += pgsize;
15560Sstevel@tonic-gate 		pgindx += mmu_btop(pgsize);
15570Sstevel@tonic-gate 	}
15580Sstevel@tonic-gate }
15590Sstevel@tonic-gate 
15604528Spaulsan /* ARGSUSED */
15614528Spaulsan void
15624528Spaulsan hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
15634528Spaulsan     struct page **pps, uint_t attr, uint_t flags,
15644528Spaulsan     hat_region_cookie_t rcookie)
15654528Spaulsan {
15664528Spaulsan 	hat_memload_array(hat, addr, len, pps, attr, flags);
15674528Spaulsan }
15684528Spaulsan 
15690Sstevel@tonic-gate /*
15700Sstevel@tonic-gate  * void hat_devload(hat, addr, len, pf, attr, flags)
15710Sstevel@tonic-gate  *	load/lock the given page frame number
15720Sstevel@tonic-gate  *
15730Sstevel@tonic-gate  * Advisory ordering attributes. Apply only to device mappings.
15740Sstevel@tonic-gate  *
15750Sstevel@tonic-gate  * HAT_STRICTORDER: the CPU must issue the references in order, as the
15760Sstevel@tonic-gate  *	programmer specified.  This is the default.
15770Sstevel@tonic-gate  * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
15780Sstevel@tonic-gate  *	of reordering; store or load with store or load).
15790Sstevel@tonic-gate  * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
15800Sstevel@tonic-gate  *	to consecutive locations (for example, turn two consecutive byte
15810Sstevel@tonic-gate  *	stores into one halfword store), and it may batch individual loads
15820Sstevel@tonic-gate  *	(for example, turn two consecutive byte loads into one halfword load).
15830Sstevel@tonic-gate  *	This also implies re-ordering.
15840Sstevel@tonic-gate  * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
15850Sstevel@tonic-gate  *	until another store occurs.  The default is to fetch new data
15860Sstevel@tonic-gate  *	on every load.  This also implies merging.
15870Sstevel@tonic-gate  * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
15880Sstevel@tonic-gate  *	the device (perhaps with other data) at a later time.  The default is
15890Sstevel@tonic-gate  *	to push the data right away.  This also implies load caching.
15900Sstevel@tonic-gate  *
15910Sstevel@tonic-gate  * Equivalent of hat_memload(), but can be used for device memory where
15920Sstevel@tonic-gate  * there are no page_t's and we support additional flags (write merging, etc).
15930Sstevel@tonic-gate  * Note that we can have large page mappings with this interface.
15940Sstevel@tonic-gate  */
15950Sstevel@tonic-gate int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
15960Sstevel@tonic-gate 	HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
15970Sstevel@tonic-gate 	HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
15980Sstevel@tonic-gate 
15990Sstevel@tonic-gate void
16000Sstevel@tonic-gate hat_devload(
16010Sstevel@tonic-gate 	hat_t		*hat,
16020Sstevel@tonic-gate 	caddr_t		addr,
16030Sstevel@tonic-gate 	size_t		len,
16040Sstevel@tonic-gate 	pfn_t		pfn,
16050Sstevel@tonic-gate 	uint_t		attr,
16060Sstevel@tonic-gate 	int		flags)
16070Sstevel@tonic-gate {
16080Sstevel@tonic-gate 	uintptr_t	va = ALIGN2PAGE(addr);
16090Sstevel@tonic-gate 	uintptr_t	eva = va + len;
16100Sstevel@tonic-gate 	level_t		level;
16110Sstevel@tonic-gate 	size_t		pgsize;
16120Sstevel@tonic-gate 	page_t		*pp;
16130Sstevel@tonic-gate 	int		f;	/* per PTE copy of flags  - maybe modified */
16140Sstevel@tonic-gate 	uint_t		a;	/* per PTE copy of attr */
16150Sstevel@tonic-gate 
16160Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
16173446Smrj 	ASSERT(hat == kas.a_hat || eva <= _userlimit);
16180Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
16190Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
16200Sstevel@tonic-gate 	ASSERT((flags & supported_devload_flags) == flags);
16210Sstevel@tonic-gate 
16220Sstevel@tonic-gate 	/*
16230Sstevel@tonic-gate 	 * handle all pages
16240Sstevel@tonic-gate 	 */
16250Sstevel@tonic-gate 	while (va < eva) {
16260Sstevel@tonic-gate 
16270Sstevel@tonic-gate 		/*
16280Sstevel@tonic-gate 		 * decide what level mapping to use (ie. pagesize)
16290Sstevel@tonic-gate 		 */
16300Sstevel@tonic-gate 		for (level = mmu.max_page_level; ; --level) {
16310Sstevel@tonic-gate 			pgsize = LEVEL_SIZE(level);
16320Sstevel@tonic-gate 			if (level == 0)
16330Sstevel@tonic-gate 				break;
16340Sstevel@tonic-gate 			if (IS_P2ALIGNED(va, pgsize) &&
16350Sstevel@tonic-gate 			    (eva - va) >= pgsize &&
16360Sstevel@tonic-gate 			    IS_P2ALIGNED(pfn, mmu_btop(pgsize)))
16370Sstevel@tonic-gate 				break;
16380Sstevel@tonic-gate 		}
16390Sstevel@tonic-gate 
16400Sstevel@tonic-gate 		/*
16413446Smrj 		 * If this is just memory then allow caching (this happens
16420Sstevel@tonic-gate 		 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
16433446Smrj 		 * to override that. If we don't have a page_t then make sure
16440Sstevel@tonic-gate 		 * NOCONSIST is set.
16450Sstevel@tonic-gate 		 */
16460Sstevel@tonic-gate 		a = attr;
16470Sstevel@tonic-gate 		f = flags;
16480Sstevel@tonic-gate 		if (pf_is_memory(pfn)) {
16490Sstevel@tonic-gate 			if (!(a & HAT_PLAT_NOCACHE))
16500Sstevel@tonic-gate 				a |= HAT_STORECACHING_OK;
16510Sstevel@tonic-gate 
16520Sstevel@tonic-gate 			if (f & HAT_LOAD_NOCONSIST)
16530Sstevel@tonic-gate 				pp = NULL;
16540Sstevel@tonic-gate 			else
16550Sstevel@tonic-gate 				pp = page_numtopp_nolock(pfn);
16560Sstevel@tonic-gate 		} else {
16570Sstevel@tonic-gate 			pp = NULL;
16580Sstevel@tonic-gate 			f |= HAT_LOAD_NOCONSIST;
16590Sstevel@tonic-gate 		}
16600Sstevel@tonic-gate 
16610Sstevel@tonic-gate 		/*
16620Sstevel@tonic-gate 		 * load this page mapping
16630Sstevel@tonic-gate 		 */
16640Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(va));
16653446Smrj 		while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
16663446Smrj 			if (level == 0)
16673446Smrj 				panic("unexpected hati_load_common() failure");
16683446Smrj 			--level;
16693446Smrj 			pgsize = LEVEL_SIZE(level);
16703446Smrj 		}
16710Sstevel@tonic-gate 
16720Sstevel@tonic-gate 		/*
16730Sstevel@tonic-gate 		 * move to next page
16740Sstevel@tonic-gate 		 */
16750Sstevel@tonic-gate 		va += pgsize;
16760Sstevel@tonic-gate 		pfn += mmu_btop(pgsize);
16770Sstevel@tonic-gate 	}
16780Sstevel@tonic-gate }
16790Sstevel@tonic-gate 
16800Sstevel@tonic-gate /*
16810Sstevel@tonic-gate  * void hat_unlock(hat, addr, len)
16820Sstevel@tonic-gate  *	unlock the mappings to a given range of addresses
16830Sstevel@tonic-gate  *
16840Sstevel@tonic-gate  * Locks are tracked by ht_lock_cnt in the htable.
16850Sstevel@tonic-gate  */
16860Sstevel@tonic-gate void
16870Sstevel@tonic-gate hat_unlock(hat_t *hat, caddr_t addr, size_t len)
16880Sstevel@tonic-gate {
16890Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
16900Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
16910Sstevel@tonic-gate 	htable_t	*ht = NULL;
16920Sstevel@tonic-gate 
16930Sstevel@tonic-gate 	/*
16940Sstevel@tonic-gate 	 * kernel entries are always locked, we don't track lock counts
16950Sstevel@tonic-gate 	 */
16963446Smrj 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
16970Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
16980Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
16990Sstevel@tonic-gate 	if (hat == kas.a_hat)
17000Sstevel@tonic-gate 		return;
17010Sstevel@tonic-gate 	if (eaddr > _userlimit)
17020Sstevel@tonic-gate 		panic("hat_unlock() address out of range - above _userlimit");
17030Sstevel@tonic-gate 
17040Sstevel@tonic-gate 	ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
17050Sstevel@tonic-gate 	while (vaddr < eaddr) {
17060Sstevel@tonic-gate 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
17070Sstevel@tonic-gate 		if (ht == NULL)
17080Sstevel@tonic-gate 			break;
17090Sstevel@tonic-gate 
17100Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(vaddr));
17110Sstevel@tonic-gate 
17120Sstevel@tonic-gate 		if (ht->ht_lock_cnt < 1)
17130Sstevel@tonic-gate 			panic("hat_unlock(): lock_cnt < 1, "
17140Sstevel@tonic-gate 			    "htable=%p, vaddr=%p\n", ht, (caddr_t)vaddr);
17150Sstevel@tonic-gate 		HTABLE_LOCK_DEC(ht);
17160Sstevel@tonic-gate 
17170Sstevel@tonic-gate 		vaddr += LEVEL_SIZE(ht->ht_level);
17180Sstevel@tonic-gate 	}
17190Sstevel@tonic-gate 	if (ht)
17200Sstevel@tonic-gate 		htable_release(ht);
17210Sstevel@tonic-gate }
17220Sstevel@tonic-gate 
17234528Spaulsan /* ARGSUSED */
17244528Spaulsan void
17254528Spaulsan hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len,
17264528Spaulsan     hat_region_cookie_t rcookie)
17274528Spaulsan {
17284528Spaulsan 	panic("No shared region support on x86");
17294528Spaulsan }
17304528Spaulsan 
17310Sstevel@tonic-gate /*
17320Sstevel@tonic-gate  * Cross call service routine to demap a virtual page on
17330Sstevel@tonic-gate  * the current CPU or flush all mappings in TLB.
17340Sstevel@tonic-gate  */
17350Sstevel@tonic-gate /*ARGSUSED*/
17360Sstevel@tonic-gate static int
17370Sstevel@tonic-gate hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
17380Sstevel@tonic-gate {
17390Sstevel@tonic-gate 	hat_t	*hat = (hat_t *)a1;
17400Sstevel@tonic-gate 	caddr_t	addr = (caddr_t)a2;
17410Sstevel@tonic-gate 
17420Sstevel@tonic-gate 	/*
17430Sstevel@tonic-gate 	 * If the target hat isn't the kernel and this CPU isn't operating
17440Sstevel@tonic-gate 	 * in the target hat, we can ignore the cross call.
17450Sstevel@tonic-gate 	 */
17460Sstevel@tonic-gate 	if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
17470Sstevel@tonic-gate 		return (0);
17480Sstevel@tonic-gate 
17490Sstevel@tonic-gate 	/*
17500Sstevel@tonic-gate 	 * For a normal address, we just flush one page mapping
17510Sstevel@tonic-gate 	 */
17520Sstevel@tonic-gate 	if ((uintptr_t)addr != DEMAP_ALL_ADDR) {
17533446Smrj 		mmu_tlbflush_entry(addr);
17540Sstevel@tonic-gate 		return (0);
17550Sstevel@tonic-gate 	}
17560Sstevel@tonic-gate 
17570Sstevel@tonic-gate 	/*
17580Sstevel@tonic-gate 	 * Otherwise we reload cr3 to effect a complete TLB flush.
17590Sstevel@tonic-gate 	 *
17600Sstevel@tonic-gate 	 * A reload of cr3 on a VLP process also means we must also recopy in
17610Sstevel@tonic-gate 	 * the pte values from the struct hat
17620Sstevel@tonic-gate 	 */
17630Sstevel@tonic-gate 	if (hat->hat_flags & HAT_VLP) {
17640Sstevel@tonic-gate #if defined(__amd64)
17650Sstevel@tonic-gate 		x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes;
17660Sstevel@tonic-gate 
17670Sstevel@tonic-gate 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
17680Sstevel@tonic-gate #elif defined(__i386)
17690Sstevel@tonic-gate 		reload_pae32(hat, CPU);
17700Sstevel@tonic-gate #endif
17710Sstevel@tonic-gate 	}
17720Sstevel@tonic-gate 	reload_cr3();
17730Sstevel@tonic-gate 	return (0);
17740Sstevel@tonic-gate }
17750Sstevel@tonic-gate 
17760Sstevel@tonic-gate /*
17774191Sjosephb  * Flush all TLB entries, including global (ie. kernel) ones.
17784191Sjosephb  */
17794191Sjosephb static void
17804191Sjosephb flush_all_tlb_entries(void)
17814191Sjosephb {
17824191Sjosephb 	ulong_t cr4 = getcr4();
17834191Sjosephb 
17844191Sjosephb 	if (cr4 & CR4_PGE) {
17854191Sjosephb 		setcr4(cr4 & ~(ulong_t)CR4_PGE);
17864191Sjosephb 		setcr4(cr4);
17874191Sjosephb 
17884191Sjosephb 		/*
17894191Sjosephb 		 * 32 bit PAE also needs to always reload_cr3()
17904191Sjosephb 		 */
17914191Sjosephb 		if (mmu.max_level == 2)
17924191Sjosephb 			reload_cr3();
17934191Sjosephb 	} else {
17944191Sjosephb 		reload_cr3();
17954191Sjosephb 	}
17964191Sjosephb }
17974191Sjosephb 
17984191Sjosephb #define	TLB_CPU_HALTED	(01ul)
17994191Sjosephb #define	TLB_INVAL_ALL	(02ul)
18004191Sjosephb #define	CAS_TLB_INFO(cpu, old, new)	\
18014191Sjosephb 	caslong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
18024191Sjosephb 
18034191Sjosephb /*
18044191Sjosephb  * Record that a CPU is going idle
18054191Sjosephb  */
18064191Sjosephb void
18074191Sjosephb tlb_going_idle(void)
18084191Sjosephb {
18094191Sjosephb 	atomic_or_long((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
18104191Sjosephb }
18114191Sjosephb 
18124191Sjosephb /*
18134191Sjosephb  * Service a delayed TLB flush if coming out of being idle.
18144191Sjosephb  */
18154191Sjosephb void
18164191Sjosephb tlb_service(void)
18174191Sjosephb {
18184191Sjosephb 	ulong_t flags = getflags();
18194191Sjosephb 	ulong_t tlb_info;
18204191Sjosephb 	ulong_t found;
18214191Sjosephb 
18224191Sjosephb 	/*
18234191Sjosephb 	 * Be sure interrupts are off while doing this so that
18244191Sjosephb 	 * higher level interrupts correctly wait for flushes to finish.
18254191Sjosephb 	 */
18264191Sjosephb 	if (flags & PS_IE)
18274191Sjosephb 		flags = intr_clear();
18284191Sjosephb 
18294191Sjosephb 	/*
18304191Sjosephb 	 * We only have to do something if coming out of being idle.
18314191Sjosephb 	 */
18324191Sjosephb 	tlb_info = CPU->cpu_m.mcpu_tlb_info;
18334191Sjosephb 	if (tlb_info & TLB_CPU_HALTED) {
18344191Sjosephb 		ASSERT(CPU->cpu_current_hat == kas.a_hat);
18354191Sjosephb 
18364191Sjosephb 		/*
18374191Sjosephb 		 * Atomic clear and fetch of old state.
18384191Sjosephb 		 */
18394191Sjosephb 		while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) {
18404191Sjosephb 			ASSERT(found & TLB_CPU_HALTED);
18414191Sjosephb 			tlb_info = found;
18424191Sjosephb 			SMT_PAUSE();
18434191Sjosephb 		}
18444191Sjosephb 		if (tlb_info & TLB_INVAL_ALL)
18454191Sjosephb 			flush_all_tlb_entries();
18464191Sjosephb 	}
18474191Sjosephb 
18484191Sjosephb 	/*
18494191Sjosephb 	 * Restore interrupt enable control bit.
18504191Sjosephb 	 */
18514191Sjosephb 	if (flags & PS_IE)
18524191Sjosephb 		sti();
18534191Sjosephb }
18544191Sjosephb 
18554191Sjosephb /*
18560Sstevel@tonic-gate  * Internal routine to do cross calls to invalidate a range of pages on
18570Sstevel@tonic-gate  * all CPUs using a given hat.
18580Sstevel@tonic-gate  */
18590Sstevel@tonic-gate void
18603446Smrj hat_tlb_inval(hat_t *hat, uintptr_t va)
18610Sstevel@tonic-gate {
18620Sstevel@tonic-gate 	extern int	flushes_require_xcalls;	/* from mp_startup.c */
18630Sstevel@tonic-gate 	cpuset_t	justme;
18644191Sjosephb 	cpuset_t	check_cpus;
18653446Smrj 	cpuset_t	cpus_to_shootdown;
18664191Sjosephb 	cpu_t		*cpup;
18674191Sjosephb 	int		c;
18680Sstevel@tonic-gate 
18690Sstevel@tonic-gate 	/*
18700Sstevel@tonic-gate 	 * If the hat is being destroyed, there are no more users, so
18710Sstevel@tonic-gate 	 * demap need not do anything.
18720Sstevel@tonic-gate 	 */
18730Sstevel@tonic-gate 	if (hat->hat_flags & HAT_FREEING)
18740Sstevel@tonic-gate 		return;
18750Sstevel@tonic-gate 
18760Sstevel@tonic-gate 	/*
18770Sstevel@tonic-gate 	 * If demapping from a shared pagetable, we best demap the
18780Sstevel@tonic-gate 	 * entire set of user TLBs, since we don't know what addresses
18790Sstevel@tonic-gate 	 * these were shared at.
18800Sstevel@tonic-gate 	 */
18810Sstevel@tonic-gate 	if (hat->hat_flags & HAT_SHARED) {
18820Sstevel@tonic-gate 		hat = kas.a_hat;
18830Sstevel@tonic-gate 		va = DEMAP_ALL_ADDR;
18840Sstevel@tonic-gate 	}
18850Sstevel@tonic-gate 
18860Sstevel@tonic-gate 	/*
18870Sstevel@tonic-gate 	 * if not running with multiple CPUs, don't use cross calls
18880Sstevel@tonic-gate 	 */
18890Sstevel@tonic-gate 	if (panicstr || !flushes_require_xcalls) {
18900Sstevel@tonic-gate 		(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
18910Sstevel@tonic-gate 		return;
18920Sstevel@tonic-gate 	}
18930Sstevel@tonic-gate 
18940Sstevel@tonic-gate 
18950Sstevel@tonic-gate 	/*
18963446Smrj 	 * Determine CPUs to shootdown. Kernel changes always do all CPUs.
18973446Smrj 	 * Otherwise it's just CPUs currently executing in this hat.
18980Sstevel@tonic-gate 	 */
18990Sstevel@tonic-gate 	kpreempt_disable();
19000Sstevel@tonic-gate 	CPUSET_ONLY(justme, CPU->cpu_id);
19013446Smrj 	if (hat == kas.a_hat)
19023446Smrj 		cpus_to_shootdown = khat_cpuset;
19030Sstevel@tonic-gate 	else
19043446Smrj 		cpus_to_shootdown = hat->hat_cpus;
19053446Smrj 
19064191Sjosephb 	/*
19074191Sjosephb 	 * If any CPUs in the set are idle, just request a delayed flush
19084191Sjosephb 	 * and avoid waking them up.
19094191Sjosephb 	 */
19104191Sjosephb 	check_cpus = cpus_to_shootdown;
19114191Sjosephb 	for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) {
19124191Sjosephb 		ulong_t tlb_info;
19134191Sjosephb 
19144191Sjosephb 		if (!CPU_IN_SET(check_cpus, c))
19154191Sjosephb 			continue;
19164191Sjosephb 		CPUSET_DEL(check_cpus, c);
19174191Sjosephb 		cpup = cpu[c];
19184191Sjosephb 		if (cpup == NULL)
19194191Sjosephb 			continue;
19204191Sjosephb 
19214191Sjosephb 		tlb_info = cpup->cpu_m.mcpu_tlb_info;
19224191Sjosephb 		while (tlb_info == TLB_CPU_HALTED) {
19234191Sjosephb 			(void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED,
19244381Sjosephb 			    TLB_CPU_HALTED | TLB_INVAL_ALL);
19254191Sjosephb 			SMT_PAUSE();
19264191Sjosephb 			tlb_info = cpup->cpu_m.mcpu_tlb_info;
19274191Sjosephb 		}
19284191Sjosephb 		if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) {
19294191Sjosephb 			HATSTAT_INC(hs_tlb_inval_delayed);
19304191Sjosephb 			CPUSET_DEL(cpus_to_shootdown, c);
19314191Sjosephb 		}
19324191Sjosephb 	}
19334191Sjosephb 
19343446Smrj 	if (CPUSET_ISNULL(cpus_to_shootdown) ||
19353446Smrj 	    CPUSET_ISEQUAL(cpus_to_shootdown, justme)) {
19363446Smrj 
19373446Smrj 		(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
19383446Smrj 
19393446Smrj 	} else {
19403446Smrj 
19413446Smrj 		CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
19423446Smrj 		xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL, X_CALL_HIPRI,
19433446Smrj 		    cpus_to_shootdown, hati_demap_func);
19443446Smrj 
19453446Smrj 	}
19460Sstevel@tonic-gate 	kpreempt_enable();
19470Sstevel@tonic-gate }
19480Sstevel@tonic-gate 
19490Sstevel@tonic-gate /*
19500Sstevel@tonic-gate  * Interior routine for HAT_UNLOADs from hat_unload_callback(),
19510Sstevel@tonic-gate  * hat_kmap_unload() OR from hat_steal() code.  This routine doesn't
19520Sstevel@tonic-gate  * handle releasing of the htables.
19530Sstevel@tonic-gate  */
19540Sstevel@tonic-gate void
19550Sstevel@tonic-gate hat_pte_unmap(
19560Sstevel@tonic-gate 	htable_t	*ht,
19570Sstevel@tonic-gate 	uint_t		entry,
19580Sstevel@tonic-gate 	uint_t		flags,
19590Sstevel@tonic-gate 	x86pte_t	old_pte,
19600Sstevel@tonic-gate 	void		*pte_ptr)
19610Sstevel@tonic-gate {
19620Sstevel@tonic-gate 	hat_t		*hat = ht->ht_hat;
19630Sstevel@tonic-gate 	hment_t		*hm = NULL;
19640Sstevel@tonic-gate 	page_t		*pp = NULL;
19650Sstevel@tonic-gate 	level_t		l = ht->ht_level;
19660Sstevel@tonic-gate 	pfn_t		pfn;
19670Sstevel@tonic-gate 
19680Sstevel@tonic-gate 	/*
19690Sstevel@tonic-gate 	 * We always track the locking counts, even if nothing is unmapped
19700Sstevel@tonic-gate 	 */
19710Sstevel@tonic-gate 	if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
19720Sstevel@tonic-gate 		ASSERT(ht->ht_lock_cnt > 0);
19730Sstevel@tonic-gate 		HTABLE_LOCK_DEC(ht);
19740Sstevel@tonic-gate 	}
19750Sstevel@tonic-gate 
19760Sstevel@tonic-gate 	/*
19770Sstevel@tonic-gate 	 * Figure out which page's mapping list lock to acquire using the PFN
19780Sstevel@tonic-gate 	 * passed in "old" PTE. We then attempt to invalidate the PTE.
19790Sstevel@tonic-gate 	 * If another thread, probably a hat_pageunload, has asynchronously
19800Sstevel@tonic-gate 	 * unmapped/remapped this address we'll loop here.
19810Sstevel@tonic-gate 	 */
19820Sstevel@tonic-gate 	ASSERT(ht->ht_busy > 0);
19830Sstevel@tonic-gate 	while (PTE_ISVALID(old_pte)) {
19840Sstevel@tonic-gate 		pfn = PTE2PFN(old_pte, l);
19853446Smrj 		if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) {
19860Sstevel@tonic-gate 			pp = NULL;
19870Sstevel@tonic-gate 		} else {
19880Sstevel@tonic-gate 			pp = page_numtopp_nolock(pfn);
198947Sjosephb 			if (pp == NULL) {
199047Sjosephb 				panic("no page_t, not NOCONSIST: old_pte="
199147Sjosephb 				    FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
199247Sjosephb 				    old_pte, (uintptr_t)ht, entry,
199347Sjosephb 				    (uintptr_t)pte_ptr);
199447Sjosephb 			}
19950Sstevel@tonic-gate 			x86_hm_enter(pp);
19960Sstevel@tonic-gate 		}
199747Sjosephb 
199847Sjosephb 		/*
199947Sjosephb 		 * If freeing the address space, check that the PTE
200047Sjosephb 		 * hasn't changed, as the mappings are no longer in use by
200147Sjosephb 		 * any thread, invalidation is unnecessary.
200247Sjosephb 		 * If not freeing, do a full invalidate.
200347Sjosephb 		 */
200447Sjosephb 		if (hat->hat_flags & HAT_FREEING)
200547Sjosephb 			old_pte = x86pte_get(ht, entry);
200647Sjosephb 		else
20073446Smrj 			old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr);
20080Sstevel@tonic-gate 
20090Sstevel@tonic-gate 		/*
20100Sstevel@tonic-gate 		 * If the page hadn't changed we've unmapped it and can proceed
20110Sstevel@tonic-gate 		 */
20120Sstevel@tonic-gate 		if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
20130Sstevel@tonic-gate 			break;
20140Sstevel@tonic-gate 
20150Sstevel@tonic-gate 		/*
20160Sstevel@tonic-gate 		 * Otherwise, we'll have to retry with the current old_pte.
20170Sstevel@tonic-gate 		 * Drop the hment lock, since the pfn may have changed.
20180Sstevel@tonic-gate 		 */
20190Sstevel@tonic-gate 		if (pp != NULL) {
20200Sstevel@tonic-gate 			x86_hm_exit(pp);
20210Sstevel@tonic-gate 			pp = NULL;
20220Sstevel@tonic-gate 		} else {
20233446Smrj 			ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
20240Sstevel@tonic-gate 		}
20250Sstevel@tonic-gate 	}
20260Sstevel@tonic-gate 
20270Sstevel@tonic-gate 	/*
20280Sstevel@tonic-gate 	 * If the old mapping wasn't valid, there's nothing more to do
20290Sstevel@tonic-gate 	 */
20300Sstevel@tonic-gate 	if (!PTE_ISVALID(old_pte)) {
20310Sstevel@tonic-gate 		if (pp != NULL)
20320Sstevel@tonic-gate 			x86_hm_exit(pp);
20330Sstevel@tonic-gate 		return;
20340Sstevel@tonic-gate 	}
20350Sstevel@tonic-gate 
20360Sstevel@tonic-gate 	/*
20370Sstevel@tonic-gate 	 * Take care of syncing any MOD/REF bits and removing the hment.
20380Sstevel@tonic-gate 	 */
20390Sstevel@tonic-gate 	if (pp != NULL) {
20400Sstevel@tonic-gate 		if (!(flags & HAT_UNLOAD_NOSYNC))
20410Sstevel@tonic-gate 			hati_sync_pte_to_page(pp, old_pte, l);
20420Sstevel@tonic-gate 		hm = hment_remove(pp, ht, entry);
20430Sstevel@tonic-gate 		x86_hm_exit(pp);
20440Sstevel@tonic-gate 		if (hm != NULL)
20450Sstevel@tonic-gate 			hment_free(hm);
20460Sstevel@tonic-gate 	}
20470Sstevel@tonic-gate 
20480Sstevel@tonic-gate 	/*
20490Sstevel@tonic-gate 	 * Handle book keeping in the htable and hat
20500Sstevel@tonic-gate 	 */
20510Sstevel@tonic-gate 	ASSERT(ht->ht_valid_cnt > 0);
20520Sstevel@tonic-gate 	HTABLE_DEC(ht->ht_valid_cnt);
20530Sstevel@tonic-gate 	PGCNT_DEC(hat, l);
20540Sstevel@tonic-gate }
20550Sstevel@tonic-gate 
20560Sstevel@tonic-gate /*
20570Sstevel@tonic-gate  * very cheap unload implementation to special case some kernel addresses
20580Sstevel@tonic-gate  */
20590Sstevel@tonic-gate static void
20600Sstevel@tonic-gate hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
20610Sstevel@tonic-gate {
20620Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
20630Sstevel@tonic-gate 	uintptr_t	eva = va + len;
20643446Smrj 	pgcnt_t		pg_index;
20650Sstevel@tonic-gate 	htable_t	*ht;
20660Sstevel@tonic-gate 	uint_t		entry;
20673446Smrj 	x86pte_t	*pte_ptr;
20680Sstevel@tonic-gate 	x86pte_t	old_pte;
20690Sstevel@tonic-gate 
20700Sstevel@tonic-gate 	for (; va < eva; va += MMU_PAGESIZE) {
20710Sstevel@tonic-gate 		/*
20720Sstevel@tonic-gate 		 * Get the PTE
20730Sstevel@tonic-gate 		 */
20743446Smrj 		pg_index = mmu_btop(va - mmu.kmap_addr);
20753446Smrj 		pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index);
20763446Smrj 		old_pte = GET_PTE(pte_ptr);
20770Sstevel@tonic-gate 
20780Sstevel@tonic-gate 		/*
20790Sstevel@tonic-gate 		 * get the htable / entry
20800Sstevel@tonic-gate 		 */
20810Sstevel@tonic-gate 		ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
20820Sstevel@tonic-gate 		    >> LEVEL_SHIFT(1)];
20830Sstevel@tonic-gate 		entry = htable_va2entry(va, ht);
20840Sstevel@tonic-gate 
20850Sstevel@tonic-gate 		/*
20860Sstevel@tonic-gate 		 * use mostly common code to unmap it.
20870Sstevel@tonic-gate 		 */
20880Sstevel@tonic-gate 		hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr);
20890Sstevel@tonic-gate 	}
20900Sstevel@tonic-gate }
20910Sstevel@tonic-gate 
20920Sstevel@tonic-gate 
20930Sstevel@tonic-gate /*
20940Sstevel@tonic-gate  * unload a range of virtual address space (no callback)
20950Sstevel@tonic-gate  */
20960Sstevel@tonic-gate void
20970Sstevel@tonic-gate hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
20980Sstevel@tonic-gate {
20990Sstevel@tonic-gate 	uintptr_t va = (uintptr_t)addr;
21003446Smrj 
21013446Smrj 	ASSERT(hat == kas.a_hat || va + len <= _userlimit);
21020Sstevel@tonic-gate 
21030Sstevel@tonic-gate 	/*
21040Sstevel@tonic-gate 	 * special case for performance.
21050Sstevel@tonic-gate 	 */
21060Sstevel@tonic-gate 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
21070Sstevel@tonic-gate 		ASSERT(hat == kas.a_hat);
21080Sstevel@tonic-gate 		hat_kmap_unload(addr, len, flags);
21093446Smrj 	} else {
21103446Smrj 		hat_unload_callback(hat, addr, len, flags, NULL);
21110Sstevel@tonic-gate 	}
21120Sstevel@tonic-gate }
21130Sstevel@tonic-gate 
21140Sstevel@tonic-gate /*
21150Sstevel@tonic-gate  * Do the callbacks for ranges being unloaded.
21160Sstevel@tonic-gate  */
21170Sstevel@tonic-gate typedef struct range_info {
21180Sstevel@tonic-gate 	uintptr_t	rng_va;
21190Sstevel@tonic-gate 	ulong_t		rng_cnt;
21200Sstevel@tonic-gate 	level_t		rng_level;
21210Sstevel@tonic-gate } range_info_t;
21220Sstevel@tonic-gate 
21230Sstevel@tonic-gate static void
21240Sstevel@tonic-gate handle_ranges(hat_callback_t *cb, uint_t cnt, range_info_t *range)
21250Sstevel@tonic-gate {
21260Sstevel@tonic-gate 	/*
21270Sstevel@tonic-gate 	 * do callbacks to upper level VM system
21280Sstevel@tonic-gate 	 */
21290Sstevel@tonic-gate 	while (cb != NULL && cnt > 0) {
21300Sstevel@tonic-gate 		--cnt;
21310Sstevel@tonic-gate 		cb->hcb_start_addr = (caddr_t)range[cnt].rng_va;
21320Sstevel@tonic-gate 		cb->hcb_end_addr = cb->hcb_start_addr;
21330Sstevel@tonic-gate 		cb->hcb_end_addr +=
21340Sstevel@tonic-gate 		    range[cnt].rng_cnt << LEVEL_SIZE(range[cnt].rng_level);
21350Sstevel@tonic-gate 		cb->hcb_function(cb);
21360Sstevel@tonic-gate 	}
21370Sstevel@tonic-gate }
21380Sstevel@tonic-gate 
21390Sstevel@tonic-gate /*
21400Sstevel@tonic-gate  * Unload a given range of addresses (has optional callback)
21410Sstevel@tonic-gate  *
21420Sstevel@tonic-gate  * Flags:
21430Sstevel@tonic-gate  * define	HAT_UNLOAD		0x00
21440Sstevel@tonic-gate  * define	HAT_UNLOAD_NOSYNC	0x02
21450Sstevel@tonic-gate  * define	HAT_UNLOAD_UNLOCK	0x04
21460Sstevel@tonic-gate  * define	HAT_UNLOAD_OTHER	0x08 - not used
21470Sstevel@tonic-gate  * define	HAT_UNLOAD_UNMAP	0x10 - same as HAT_UNLOAD
21480Sstevel@tonic-gate  */
21490Sstevel@tonic-gate #define	MAX_UNLOAD_CNT (8)
21500Sstevel@tonic-gate void
21510Sstevel@tonic-gate hat_unload_callback(
21520Sstevel@tonic-gate 	hat_t		*hat,
21530Sstevel@tonic-gate 	caddr_t		addr,
21540Sstevel@tonic-gate 	size_t		len,
21550Sstevel@tonic-gate 	uint_t		flags,
21560Sstevel@tonic-gate 	hat_callback_t	*cb)
21570Sstevel@tonic-gate {
21580Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
21590Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
21600Sstevel@tonic-gate 	htable_t	*ht = NULL;
21610Sstevel@tonic-gate 	uint_t		entry;
216247Sjosephb 	uintptr_t	contig_va = (uintptr_t)-1L;
21630Sstevel@tonic-gate 	range_info_t	r[MAX_UNLOAD_CNT];
21640Sstevel@tonic-gate 	uint_t		r_cnt = 0;
21650Sstevel@tonic-gate 	x86pte_t	old_pte;
21660Sstevel@tonic-gate 
21673446Smrj 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
21680Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
21690Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
21700Sstevel@tonic-gate 
21713446Smrj 	/*
21723446Smrj 	 * Special case a single page being unloaded for speed. This happens
21733446Smrj 	 * quite frequently, COW faults after a fork() for example.
21743446Smrj 	 */
21753446Smrj 	if (cb == NULL && len == MMU_PAGESIZE) {
21763446Smrj 		ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
21773446Smrj 		if (ht != NULL) {
21783446Smrj 			if (PTE_ISVALID(old_pte))
21793446Smrj 				hat_pte_unmap(ht, entry, flags, old_pte, NULL);
21803446Smrj 			htable_release(ht);
21813446Smrj 		}
21823446Smrj 		return;
21833446Smrj 	}
21843446Smrj 
21850Sstevel@tonic-gate 	while (vaddr < eaddr) {
21860Sstevel@tonic-gate 		old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
21870Sstevel@tonic-gate 		if (ht == NULL)
21880Sstevel@tonic-gate 			break;
21890Sstevel@tonic-gate 
21900Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(vaddr));
21910Sstevel@tonic-gate 
21920Sstevel@tonic-gate 		if (vaddr < (uintptr_t)addr)
21930Sstevel@tonic-gate 			panic("hat_unload_callback(): unmap inside large page");
21940Sstevel@tonic-gate 
21950Sstevel@tonic-gate 		/*
21960Sstevel@tonic-gate 		 * We'll do the call backs for contiguous ranges
21970Sstevel@tonic-gate 		 */
219847Sjosephb 		if (vaddr != contig_va ||
21990Sstevel@tonic-gate 		    (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) {
22000Sstevel@tonic-gate 			if (r_cnt == MAX_UNLOAD_CNT) {
22010Sstevel@tonic-gate 				handle_ranges(cb, r_cnt, r);
22020Sstevel@tonic-gate 				r_cnt = 0;
22030Sstevel@tonic-gate 			}
22040Sstevel@tonic-gate 			r[r_cnt].rng_va = vaddr;
22050Sstevel@tonic-gate 			r[r_cnt].rng_cnt = 0;
22060Sstevel@tonic-gate 			r[r_cnt].rng_level = ht->ht_level;
22070Sstevel@tonic-gate 			++r_cnt;
22080Sstevel@tonic-gate 		}
22090Sstevel@tonic-gate 
22100Sstevel@tonic-gate 		/*
22110Sstevel@tonic-gate 		 * Unload one mapping from the page tables.
22120Sstevel@tonic-gate 		 */
22130Sstevel@tonic-gate 		entry = htable_va2entry(vaddr, ht);
22140Sstevel@tonic-gate 		hat_pte_unmap(ht, entry, flags, old_pte, NULL);
22150Sstevel@tonic-gate 		ASSERT(ht->ht_level <= mmu.max_page_level);
22160Sstevel@tonic-gate 		vaddr += LEVEL_SIZE(ht->ht_level);
221747Sjosephb 		contig_va = vaddr;
22180Sstevel@tonic-gate 		++r[r_cnt - 1].rng_cnt;
22190Sstevel@tonic-gate 	}
22200Sstevel@tonic-gate 	if (ht)
22210Sstevel@tonic-gate 		htable_release(ht);
22220Sstevel@tonic-gate 
22230Sstevel@tonic-gate 	/*
22240Sstevel@tonic-gate 	 * handle last range for callbacks
22250Sstevel@tonic-gate 	 */
22260Sstevel@tonic-gate 	if (r_cnt > 0)
22270Sstevel@tonic-gate 		handle_ranges(cb, r_cnt, r);
22280Sstevel@tonic-gate }
22290Sstevel@tonic-gate 
22300Sstevel@tonic-gate /*
22310Sstevel@tonic-gate  * synchronize mapping with software data structures
22320Sstevel@tonic-gate  *
22330Sstevel@tonic-gate  * This interface is currently only used by the working set monitor
22340Sstevel@tonic-gate  * driver.
22350Sstevel@tonic-gate  */
22360Sstevel@tonic-gate /*ARGSUSED*/
22370Sstevel@tonic-gate void
22380Sstevel@tonic-gate hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
22390Sstevel@tonic-gate {
22400Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
22410Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
22420Sstevel@tonic-gate 	htable_t	*ht = NULL;
22430Sstevel@tonic-gate 	uint_t		entry;
22440Sstevel@tonic-gate 	x86pte_t	pte;
22450Sstevel@tonic-gate 	x86pte_t	save_pte;
22460Sstevel@tonic-gate 	x86pte_t	new;
22470Sstevel@tonic-gate 	page_t		*pp;
22480Sstevel@tonic-gate 
22490Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(vaddr));
22500Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
22510Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
22523446Smrj 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
22530Sstevel@tonic-gate 
22540Sstevel@tonic-gate 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
22550Sstevel@tonic-gate try_again:
22560Sstevel@tonic-gate 		pte = htable_walk(hat, &ht, &vaddr, eaddr);
22570Sstevel@tonic-gate 		if (ht == NULL)
22580Sstevel@tonic-gate 			break;
22590Sstevel@tonic-gate 		entry = htable_va2entry(vaddr, ht);
22600Sstevel@tonic-gate 
22613446Smrj 		if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
22620Sstevel@tonic-gate 		    PTE_GET(pte, PT_REF | PT_MOD) == 0)
22630Sstevel@tonic-gate 			continue;
22640Sstevel@tonic-gate 
22650Sstevel@tonic-gate 		/*
22660Sstevel@tonic-gate 		 * We need to acquire the mapping list lock to protect
22670Sstevel@tonic-gate 		 * against hat_pageunload(), hat_unload(), etc.
22680Sstevel@tonic-gate 		 */
22690Sstevel@tonic-gate 		pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
22700Sstevel@tonic-gate 		if (pp == NULL)
22710Sstevel@tonic-gate 			break;
22720Sstevel@tonic-gate 		x86_hm_enter(pp);
22730Sstevel@tonic-gate 		save_pte = pte;
22740Sstevel@tonic-gate 		pte = x86pte_get(ht, entry);
22750Sstevel@tonic-gate 		if (pte != save_pte) {
22760Sstevel@tonic-gate 			x86_hm_exit(pp);
22770Sstevel@tonic-gate 			goto try_again;
22780Sstevel@tonic-gate 		}
22793446Smrj 		if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
22800Sstevel@tonic-gate 		    PTE_GET(pte, PT_REF | PT_MOD) == 0) {
22810Sstevel@tonic-gate 			x86_hm_exit(pp);
22820Sstevel@tonic-gate 			continue;
22830Sstevel@tonic-gate 		}
22840Sstevel@tonic-gate 
22850Sstevel@tonic-gate 		/*
22860Sstevel@tonic-gate 		 * Need to clear ref or mod bits. We may compete with
22870Sstevel@tonic-gate 		 * hardware updating the R/M bits and have to try again.
22880Sstevel@tonic-gate 		 */
22890Sstevel@tonic-gate 		if (flags == HAT_SYNC_ZERORM) {
22900Sstevel@tonic-gate 			new = pte;
22910Sstevel@tonic-gate 			PTE_CLR(new, PT_REF | PT_MOD);
22920Sstevel@tonic-gate 			pte = hati_update_pte(ht, entry, pte, new);
22930Sstevel@tonic-gate 			if (pte != 0) {
22940Sstevel@tonic-gate 				x86_hm_exit(pp);
22950Sstevel@tonic-gate 				goto try_again;
22960Sstevel@tonic-gate 			}
22970Sstevel@tonic-gate 		} else {
22980Sstevel@tonic-gate 			/*
22990Sstevel@tonic-gate 			 * sync the PTE to the page_t
23000Sstevel@tonic-gate 			 */
23010Sstevel@tonic-gate 			hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
23020Sstevel@tonic-gate 		}
23030Sstevel@tonic-gate 		x86_hm_exit(pp);
23040Sstevel@tonic-gate 	}
23050Sstevel@tonic-gate 	if (ht)
23060Sstevel@tonic-gate 		htable_release(ht);
23070Sstevel@tonic-gate }
23080Sstevel@tonic-gate 
23090Sstevel@tonic-gate /*
23100Sstevel@tonic-gate  * void	hat_map(hat, addr, len, flags)
23110Sstevel@tonic-gate  */
23120Sstevel@tonic-gate /*ARGSUSED*/
23130Sstevel@tonic-gate void
23140Sstevel@tonic-gate hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
23150Sstevel@tonic-gate {
23160Sstevel@tonic-gate 	/* does nothing */
23170Sstevel@tonic-gate }
23180Sstevel@tonic-gate 
23190Sstevel@tonic-gate /*
23200Sstevel@tonic-gate  * uint_t hat_getattr(hat, addr, *attr)
23210Sstevel@tonic-gate  *	returns attr for <hat,addr> in *attr.  returns 0 if there was a
23220Sstevel@tonic-gate  *	mapping and *attr is valid, nonzero if there was no mapping and
23230Sstevel@tonic-gate  *	*attr is not valid.
23240Sstevel@tonic-gate  */
23250Sstevel@tonic-gate uint_t
23260Sstevel@tonic-gate hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
23270Sstevel@tonic-gate {
23280Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
23290Sstevel@tonic-gate 	htable_t	*ht = NULL;
23300Sstevel@tonic-gate 	x86pte_t	pte;
23310Sstevel@tonic-gate 
23323446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
23330Sstevel@tonic-gate 
23340Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
23350Sstevel@tonic-gate 		return ((uint_t)-1);
23360Sstevel@tonic-gate 
23373446Smrj 	ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
23380Sstevel@tonic-gate 	if (ht == NULL)
23390Sstevel@tonic-gate 		return ((uint_t)-1);
23400Sstevel@tonic-gate 
23410Sstevel@tonic-gate 	if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
23420Sstevel@tonic-gate 		htable_release(ht);
23430Sstevel@tonic-gate 		return ((uint_t)-1);
23440Sstevel@tonic-gate 	}
23450Sstevel@tonic-gate 
23460Sstevel@tonic-gate 	*attr = PROT_READ;
23470Sstevel@tonic-gate 	if (PTE_GET(pte, PT_WRITABLE))
23480Sstevel@tonic-gate 		*attr |= PROT_WRITE;
23490Sstevel@tonic-gate 	if (PTE_GET(pte, PT_USER))
23500Sstevel@tonic-gate 		*attr |= PROT_USER;
23510Sstevel@tonic-gate 	if (!PTE_GET(pte, mmu.pt_nx))
23520Sstevel@tonic-gate 		*attr |= PROT_EXEC;
23533446Smrj 	if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
23540Sstevel@tonic-gate 		*attr |= HAT_NOSYNC;
23550Sstevel@tonic-gate 	htable_release(ht);
23560Sstevel@tonic-gate 	return (0);
23570Sstevel@tonic-gate }
23580Sstevel@tonic-gate 
23590Sstevel@tonic-gate /*
23600Sstevel@tonic-gate  * hat_updateattr() applies the given attribute change to an existing mapping
23610Sstevel@tonic-gate  */
23620Sstevel@tonic-gate #define	HAT_LOAD_ATTR		1
23630Sstevel@tonic-gate #define	HAT_SET_ATTR		2
23640Sstevel@tonic-gate #define	HAT_CLR_ATTR		3
23650Sstevel@tonic-gate 
23660Sstevel@tonic-gate static void
23670Sstevel@tonic-gate hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
23680Sstevel@tonic-gate {
23690Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
23700Sstevel@tonic-gate 	uintptr_t	eaddr = (uintptr_t)addr + len;
23710Sstevel@tonic-gate 	htable_t	*ht = NULL;
23720Sstevel@tonic-gate 	uint_t		entry;
23730Sstevel@tonic-gate 	x86pte_t	oldpte, newpte;
23740Sstevel@tonic-gate 	page_t		*pp;
23750Sstevel@tonic-gate 
23760Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
23770Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
23780Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
23790Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
23800Sstevel@tonic-gate 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
23810Sstevel@tonic-gate try_again:
23820Sstevel@tonic-gate 		oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
23830Sstevel@tonic-gate 		if (ht == NULL)
23840Sstevel@tonic-gate 			break;
23853446Smrj 		if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
23860Sstevel@tonic-gate 			continue;
23870Sstevel@tonic-gate 
23880Sstevel@tonic-gate 		pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
23890Sstevel@tonic-gate 		if (pp == NULL)
23900Sstevel@tonic-gate 			continue;
23910Sstevel@tonic-gate 		x86_hm_enter(pp);
23920Sstevel@tonic-gate 
23930Sstevel@tonic-gate 		newpte = oldpte;
23940Sstevel@tonic-gate 		/*
23950Sstevel@tonic-gate 		 * We found a page table entry in the desired range,
23960Sstevel@tonic-gate 		 * figure out the new attributes.
23970Sstevel@tonic-gate 		 */
23980Sstevel@tonic-gate 		if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
23990Sstevel@tonic-gate 			if ((attr & PROT_WRITE) &&
24000Sstevel@tonic-gate 			    !PTE_GET(oldpte, PT_WRITABLE))
24010Sstevel@tonic-gate 				newpte |= PT_WRITABLE;
24020Sstevel@tonic-gate 
24033446Smrj 			if ((attr & HAT_NOSYNC) &&
24043446Smrj 			    PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC)
24050Sstevel@tonic-gate 				newpte |= PT_NOSYNC;
24060Sstevel@tonic-gate 
24070Sstevel@tonic-gate 			if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
24080Sstevel@tonic-gate 				newpte &= ~mmu.pt_nx;
24090Sstevel@tonic-gate 		}
24100Sstevel@tonic-gate 
24110Sstevel@tonic-gate 		if (what == HAT_LOAD_ATTR) {
24120Sstevel@tonic-gate 			if (!(attr & PROT_WRITE) &&
24130Sstevel@tonic-gate 			    PTE_GET(oldpte, PT_WRITABLE))
24140Sstevel@tonic-gate 				newpte &= ~PT_WRITABLE;
24150Sstevel@tonic-gate 
24163446Smrj 			if (!(attr & HAT_NOSYNC) &&
24173446Smrj 			    PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
24183446Smrj 				newpte &= ~PT_SOFTWARE;
24190Sstevel@tonic-gate 
24200Sstevel@tonic-gate 			if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
24210Sstevel@tonic-gate 				newpte |= mmu.pt_nx;
24220Sstevel@tonic-gate 		}
24230Sstevel@tonic-gate 
24240Sstevel@tonic-gate 		if (what == HAT_CLR_ATTR) {
24250Sstevel@tonic-gate 			if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
24260Sstevel@tonic-gate 				newpte &= ~PT_WRITABLE;
24270Sstevel@tonic-gate 
24283446Smrj 			if ((attr & HAT_NOSYNC) &&
24293446Smrj 			    PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
24303446Smrj 				newpte &= ~PT_SOFTWARE;
24310Sstevel@tonic-gate 
24320Sstevel@tonic-gate 			if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
24330Sstevel@tonic-gate 				newpte |= mmu.pt_nx;
24340Sstevel@tonic-gate 		}
24350Sstevel@tonic-gate 
24360Sstevel@tonic-gate 		/*
24373446Smrj 		 * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set.
24383446Smrj 		 * x86pte_set() depends on this.
24393446Smrj 		 */
24403446Smrj 		if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC)
24413446Smrj 			newpte |= PT_REF | PT_MOD;
24423446Smrj 
24433446Smrj 		/*
24440Sstevel@tonic-gate 		 * what about PROT_READ or others? this code only handles:
24450Sstevel@tonic-gate 		 * EXEC, WRITE, NOSYNC
24460Sstevel@tonic-gate 		 */
24470Sstevel@tonic-gate 
24480Sstevel@tonic-gate 		/*
24490Sstevel@tonic-gate 		 * If new PTE really changed, update the table.
24500Sstevel@tonic-gate 		 */
24510Sstevel@tonic-gate 		if (newpte != oldpte) {
24520Sstevel@tonic-gate 			entry = htable_va2entry(vaddr, ht);
24530Sstevel@tonic-gate 			oldpte = hati_update_pte(ht, entry, oldpte, newpte);
24540Sstevel@tonic-gate 			if (oldpte != 0) {
24550Sstevel@tonic-gate 				x86_hm_exit(pp);
24560Sstevel@tonic-gate 				goto try_again;
24570Sstevel@tonic-gate 			}
24580Sstevel@tonic-gate 		}
24590Sstevel@tonic-gate 		x86_hm_exit(pp);
24600Sstevel@tonic-gate 	}
24610Sstevel@tonic-gate 	if (ht)
24620Sstevel@tonic-gate 		htable_release(ht);
24630Sstevel@tonic-gate }
24640Sstevel@tonic-gate 
24650Sstevel@tonic-gate /*
24660Sstevel@tonic-gate  * Various wrappers for hat_updateattr()
24670Sstevel@tonic-gate  */
24680Sstevel@tonic-gate void
24690Sstevel@tonic-gate hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
24700Sstevel@tonic-gate {
24713446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
24720Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
24730Sstevel@tonic-gate }
24740Sstevel@tonic-gate 
24750Sstevel@tonic-gate void
24760Sstevel@tonic-gate hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
24770Sstevel@tonic-gate {
24783446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
24790Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
24800Sstevel@tonic-gate }
24810Sstevel@tonic-gate 
24820Sstevel@tonic-gate void
24830Sstevel@tonic-gate hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
24840Sstevel@tonic-gate {
24853446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
24860Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
24870Sstevel@tonic-gate }
24880Sstevel@tonic-gate 
24890Sstevel@tonic-gate void
24900Sstevel@tonic-gate hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
24910Sstevel@tonic-gate {
24923446Smrj 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
24930Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
24940Sstevel@tonic-gate }
24950Sstevel@tonic-gate 
24960Sstevel@tonic-gate /*
24970Sstevel@tonic-gate  * size_t hat_getpagesize(hat, addr)
24980Sstevel@tonic-gate  *	returns pagesize in bytes for <hat, addr>. returns -1 of there is
24990Sstevel@tonic-gate  *	no mapping. This is an advisory call.
25000Sstevel@tonic-gate  */
25010Sstevel@tonic-gate ssize_t
25020Sstevel@tonic-gate hat_getpagesize(hat_t *hat, caddr_t addr)
25030Sstevel@tonic-gate {
25040Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
25050Sstevel@tonic-gate 	htable_t	*ht;
25060Sstevel@tonic-gate 	size_t		pagesize;
25070Sstevel@tonic-gate 
25083446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
25090Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
25100Sstevel@tonic-gate 		return (-1);
25110Sstevel@tonic-gate 	ht = htable_getpage(hat, vaddr, NULL);
25120Sstevel@tonic-gate 	if (ht == NULL)
25130Sstevel@tonic-gate 		return (-1);
25140Sstevel@tonic-gate 	pagesize = LEVEL_SIZE(ht->ht_level);
25150Sstevel@tonic-gate 	htable_release(ht);
25160Sstevel@tonic-gate 	return (pagesize);
25170Sstevel@tonic-gate }
25180Sstevel@tonic-gate 
25190Sstevel@tonic-gate 
25200Sstevel@tonic-gate 
25210Sstevel@tonic-gate /*
25220Sstevel@tonic-gate  * pfn_t hat_getpfnum(hat, addr)
25230Sstevel@tonic-gate  *	returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
25240Sstevel@tonic-gate  */
25250Sstevel@tonic-gate pfn_t
25260Sstevel@tonic-gate hat_getpfnum(hat_t *hat, caddr_t addr)
25270Sstevel@tonic-gate {
25280Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
25290Sstevel@tonic-gate 	htable_t	*ht;
25300Sstevel@tonic-gate 	uint_t		entry;
25310Sstevel@tonic-gate 	pfn_t		pfn = PFN_INVALID;
25320Sstevel@tonic-gate 
25333446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
25340Sstevel@tonic-gate 	if (khat_running == 0)
25353446Smrj 		return (PFN_INVALID);
25360Sstevel@tonic-gate 
25370Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
25380Sstevel@tonic-gate 		return (PFN_INVALID);
25390Sstevel@tonic-gate 
25400Sstevel@tonic-gate 	/*
25410Sstevel@tonic-gate 	 * A very common use of hat_getpfnum() is from the DDI for kernel pages.
25420Sstevel@tonic-gate 	 * Use the kmap_ptes (which also covers the 32 bit heap) to speed
25430Sstevel@tonic-gate 	 * this up.
25440Sstevel@tonic-gate 	 */
25450Sstevel@tonic-gate 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
25460Sstevel@tonic-gate 		x86pte_t pte;
25473446Smrj 		pgcnt_t pg_index;
25483446Smrj 
25493446Smrj 		pg_index = mmu_btop(vaddr - mmu.kmap_addr);
25503446Smrj 		pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index));
25510Sstevel@tonic-gate 		if (!PTE_ISVALID(pte))
25520Sstevel@tonic-gate 			return (PFN_INVALID);
25530Sstevel@tonic-gate 		/*LINTED [use of constant 0 causes a silly lint warning] */
25540Sstevel@tonic-gate 		return (PTE2PFN(pte, 0));
25550Sstevel@tonic-gate 	}
25560Sstevel@tonic-gate 
25570Sstevel@tonic-gate 	ht = htable_getpage(hat, vaddr, &entry);
25580Sstevel@tonic-gate 	if (ht == NULL)
25590Sstevel@tonic-gate 		return (PFN_INVALID);
25600Sstevel@tonic-gate 	ASSERT(vaddr >= ht->ht_vaddr);
25610Sstevel@tonic-gate 	ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
25620Sstevel@tonic-gate 	pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
25630Sstevel@tonic-gate 	if (ht->ht_level > 0)
25640Sstevel@tonic-gate 		pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
25650Sstevel@tonic-gate 	htable_release(ht);
25660Sstevel@tonic-gate 	return (pfn);
25670Sstevel@tonic-gate }
25680Sstevel@tonic-gate 
25690Sstevel@tonic-gate /*
25700Sstevel@tonic-gate  * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged.
25710Sstevel@tonic-gate  * Use hat_getpfnum(kas.a_hat, ...) instead.
25720Sstevel@tonic-gate  *
25730Sstevel@tonic-gate  * We'd like to return PFN_INVALID if the mappings have underlying page_t's
25740Sstevel@tonic-gate  * but can't right now due to the fact that some software has grown to use
25750Sstevel@tonic-gate  * this interface incorrectly. So for now when the interface is misused,
25760Sstevel@tonic-gate  * return a warning to the user that in the future it won't work in the
25770Sstevel@tonic-gate  * way they're abusing it, and carry on.
25780Sstevel@tonic-gate  *
25790Sstevel@tonic-gate  * Note that hat_getkpfnum() is never supported on amd64.
25800Sstevel@tonic-gate  */
25810Sstevel@tonic-gate #if !defined(__amd64)
25820Sstevel@tonic-gate pfn_t
25830Sstevel@tonic-gate hat_getkpfnum(caddr_t addr)
25840Sstevel@tonic-gate {
25850Sstevel@tonic-gate 	pfn_t	pfn;
25860Sstevel@tonic-gate 	int badcaller = 0;
25870Sstevel@tonic-gate 
25880Sstevel@tonic-gate 	if (khat_running == 0)
25890Sstevel@tonic-gate 		panic("hat_getkpfnum(): called too early\n");
25900Sstevel@tonic-gate 	if ((uintptr_t)addr < kernelbase)
25910Sstevel@tonic-gate 		return (PFN_INVALID);
25920Sstevel@tonic-gate 
25930Sstevel@tonic-gate 
25940Sstevel@tonic-gate 	if (segkpm && IS_KPM_ADDR(addr)) {
25950Sstevel@tonic-gate 		badcaller = 1;
25960Sstevel@tonic-gate 		pfn = hat_kpm_va2pfn(addr);
25970Sstevel@tonic-gate 	} else {
25980Sstevel@tonic-gate 		pfn = hat_getpfnum(kas.a_hat, addr);
25990Sstevel@tonic-gate 		badcaller = pf_is_memory(pfn);
26000Sstevel@tonic-gate 	}
26010Sstevel@tonic-gate 
26020Sstevel@tonic-gate 	if (badcaller)
26030Sstevel@tonic-gate 		hat_getkpfnum_badcall(caller());
26040Sstevel@tonic-gate 	return (pfn);
26050Sstevel@tonic-gate }
26060Sstevel@tonic-gate #endif /* __amd64 */
26070Sstevel@tonic-gate 
26080Sstevel@tonic-gate /*
26090Sstevel@tonic-gate  * int hat_probe(hat, addr)
26100Sstevel@tonic-gate  *	return 0 if no valid mapping is present.  Faster version
26110Sstevel@tonic-gate  *	of hat_getattr in certain architectures.
26120Sstevel@tonic-gate  */
26130Sstevel@tonic-gate int
26140Sstevel@tonic-gate hat_probe(hat_t *hat, caddr_t addr)
26150Sstevel@tonic-gate {
26160Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
26170Sstevel@tonic-gate 	uint_t		entry;
26180Sstevel@tonic-gate 	htable_t	*ht;
26190Sstevel@tonic-gate 	pgcnt_t		pg_off;
26200Sstevel@tonic-gate 
26213446Smrj 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
26220Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
26230Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
26240Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
26250Sstevel@tonic-gate 		return (0);
26260Sstevel@tonic-gate 
26270Sstevel@tonic-gate 	/*
26280Sstevel@tonic-gate 	 * Most common use of hat_probe is from segmap. We special case it
26290Sstevel@tonic-gate 	 * for performance.
26300Sstevel@tonic-gate 	 */
26310Sstevel@tonic-gate 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
26320Sstevel@tonic-gate 		pg_off = mmu_btop(vaddr - mmu.kmap_addr);
26330Sstevel@tonic-gate 		if (mmu.pae_hat)
26340Sstevel@tonic-gate 			return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
26350Sstevel@tonic-gate 		else
26360Sstevel@tonic-gate 			return (PTE_ISVALID(
26370Sstevel@tonic-gate 			    ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
26380Sstevel@tonic-gate 	}
26390Sstevel@tonic-gate 
26400Sstevel@tonic-gate 	ht = htable_getpage(hat, vaddr, &entry);
26410Sstevel@tonic-gate 	if (ht == NULL)
26420Sstevel@tonic-gate 		return (0);
26430Sstevel@tonic-gate 	htable_release(ht);
26440Sstevel@tonic-gate 	return (1);
26450Sstevel@tonic-gate }
26460Sstevel@tonic-gate 
26470Sstevel@tonic-gate /*
26484381Sjosephb  * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM.
26494381Sjosephb  */
26504381Sjosephb static int
26514381Sjosephb is_it_dism(hat_t *hat, caddr_t va)
26524381Sjosephb {
26534381Sjosephb 	struct seg *seg;
26544381Sjosephb 	struct shm_data *shmd;
26554381Sjosephb 	struct spt_data *sptd;
26564381Sjosephb 
26574381Sjosephb 	seg = as_findseg(hat->hat_as, va, 0);
26584381Sjosephb 	ASSERT(seg != NULL);
26594381Sjosephb 	ASSERT(seg->s_base <= va);
26604381Sjosephb 	shmd = (struct shm_data *)seg->s_data;
26614381Sjosephb 	ASSERT(shmd != NULL);
26624381Sjosephb 	sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
26634381Sjosephb 	ASSERT(sptd != NULL);
26644381Sjosephb 	if (sptd->spt_flags & SHM_PAGEABLE)
26654381Sjosephb 		return (1);
26664381Sjosephb 	return (0);
26674381Sjosephb }
26684381Sjosephb 
26694381Sjosephb /*
26704381Sjosephb  * Simple implementation of ISM. hat_share() is similar to hat_memload_array(),
26710Sstevel@tonic-gate  * except that we use the ism_hat's existing mappings to determine the pages
26724381Sjosephb  * and protections to use for this hat. If we find a full properly aligned
26734381Sjosephb  * and sized pagetable, we will attempt to share the pagetable itself.
26740Sstevel@tonic-gate  */
26750Sstevel@tonic-gate /*ARGSUSED*/
26760Sstevel@tonic-gate int
26770Sstevel@tonic-gate hat_share(
26780Sstevel@tonic-gate 	hat_t		*hat,
26790Sstevel@tonic-gate 	caddr_t		addr,
26800Sstevel@tonic-gate 	hat_t		*ism_hat,
26810Sstevel@tonic-gate 	caddr_t		src_addr,
26820Sstevel@tonic-gate 	size_t		len,	/* almost useless value, see below.. */
26830Sstevel@tonic-gate 	uint_t		ismszc)
26840Sstevel@tonic-gate {
26850Sstevel@tonic-gate 	uintptr_t	vaddr_start = (uintptr_t)addr;
26860Sstevel@tonic-gate 	uintptr_t	vaddr;
26870Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr_start + len;
26880Sstevel@tonic-gate 	uintptr_t	ism_addr_start = (uintptr_t)src_addr;
26890Sstevel@tonic-gate 	uintptr_t	ism_addr = ism_addr_start;
26900Sstevel@tonic-gate 	uintptr_t	e_ism_addr = ism_addr + len;
26910Sstevel@tonic-gate 	htable_t	*ism_ht = NULL;
26920Sstevel@tonic-gate 	htable_t	*ht;
26930Sstevel@tonic-gate 	x86pte_t	pte;
26940Sstevel@tonic-gate 	page_t		*pp;
26950Sstevel@tonic-gate 	pfn_t		pfn;
26960Sstevel@tonic-gate 	level_t		l;
26970Sstevel@tonic-gate 	pgcnt_t		pgcnt;
26980Sstevel@tonic-gate 	uint_t		prot;
26994381Sjosephb 	int		is_dism;
27004381Sjosephb 	int		flags;
27010Sstevel@tonic-gate 
27020Sstevel@tonic-gate 	/*
27030Sstevel@tonic-gate 	 * We might be asked to share an empty DISM hat by as_dup()
27040Sstevel@tonic-gate 	 */
27050Sstevel@tonic-gate 	ASSERT(hat != kas.a_hat);
27063446Smrj 	ASSERT(eaddr <= _userlimit);
27070Sstevel@tonic-gate 	if (!(ism_hat->hat_flags & HAT_SHARED)) {
27080Sstevel@tonic-gate 		ASSERT(hat_get_mapped_size(ism_hat) == 0);
27090Sstevel@tonic-gate 		return (0);
27100Sstevel@tonic-gate 	}
27110Sstevel@tonic-gate 
27120Sstevel@tonic-gate 	/*
27130Sstevel@tonic-gate 	 * The SPT segment driver often passes us a size larger than there are
27140Sstevel@tonic-gate 	 * valid mappings. That's because it rounds the segment size up to a
27150Sstevel@tonic-gate 	 * large pagesize, even if the actual memory mapped by ism_hat is less.
27160Sstevel@tonic-gate 	 */
27170Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr_start));
27180Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(ism_addr_start));
27190Sstevel@tonic-gate 	ASSERT(ism_hat->hat_flags & HAT_SHARED);
27204381Sjosephb 	is_dism = is_it_dism(hat, addr);
27210Sstevel@tonic-gate 	while (ism_addr < e_ism_addr) {
27220Sstevel@tonic-gate 		/*
27230Sstevel@tonic-gate 		 * use htable_walk to get the next valid ISM mapping
27240Sstevel@tonic-gate 		 */
27250Sstevel@tonic-gate 		pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
27260Sstevel@tonic-gate 		if (ism_ht == NULL)
27270Sstevel@tonic-gate 			break;
27280Sstevel@tonic-gate 
27290Sstevel@tonic-gate 		/*
27304381Sjosephb 		 * First check to see if we already share the page table.
27314381Sjosephb 		 */
27324381Sjosephb 		l = ism_ht->ht_level;
27334381Sjosephb 		vaddr = vaddr_start + (ism_addr - ism_addr_start);
27344381Sjosephb 		ht = htable_lookup(hat, vaddr, l);
27354381Sjosephb 		if (ht != NULL) {
27364381Sjosephb 			if (ht->ht_flags & HTABLE_SHARED_PFN)
27374381Sjosephb 				goto shared;
27384381Sjosephb 			htable_release(ht);
27394381Sjosephb 			goto not_shared;
27404381Sjosephb 		}
27414381Sjosephb 
27424381Sjosephb 		/*
27434381Sjosephb 		 * Can't ever share top table.
27444381Sjosephb 		 */
27454381Sjosephb 		if (l == mmu.max_level)
27464381Sjosephb 			goto not_shared;
27474381Sjosephb 
27484381Sjosephb 		/*
27494381Sjosephb 		 * Avoid level mismatches later due to DISM faults.
27504381Sjosephb 		 */
27514381Sjosephb 		if (is_dism && l > 0)
27524381Sjosephb 			goto not_shared;
27534381Sjosephb 
27544381Sjosephb 		/*
27554381Sjosephb 		 * addresses and lengths must align
27564381Sjosephb 		 * table must be fully populated
27574381Sjosephb 		 * no lower level page tables
27584381Sjosephb 		 */
27594381Sjosephb 		if (ism_addr != ism_ht->ht_vaddr ||
27604381Sjosephb 		    (vaddr & LEVEL_OFFSET(l + 1)) != 0)
27614381Sjosephb 			goto not_shared;
27624381Sjosephb 
27634381Sjosephb 		/*
27644381Sjosephb 		 * The range of address space must cover a full table.
27650Sstevel@tonic-gate 		 */
27664381Sjosephb 		if (e_ism_addr - ism_addr < LEVEL_SIZE(1 + 1))
27674381Sjosephb 			goto not_shared;
27684381Sjosephb 
27694381Sjosephb 		/*
27704381Sjosephb 		 * All entries in the ISM page table must be leaf PTEs.
27714381Sjosephb 		 */
27724381Sjosephb 		if (l > 0) {
27734381Sjosephb 			int e;
27744381Sjosephb 
27754381Sjosephb 			/*
27764381Sjosephb 			 * We know the 0th is from htable_walk() above.
27774381Sjosephb 			 */
27784381Sjosephb 			for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) {
27794381Sjosephb 				x86pte_t pte;
27804381Sjosephb 				pte = x86pte_get(ism_ht, e);
27814381Sjosephb 				if (!PTE_ISPAGE(pte, l))
27824381Sjosephb 					goto not_shared;
27834381Sjosephb 			}
27844381Sjosephb 		}
27854381Sjosephb 
27864381Sjosephb 		/*
27874381Sjosephb 		 * share the page table
27884381Sjosephb 		 */
27894381Sjosephb 		ht = htable_create(hat, vaddr, l, ism_ht);
27904381Sjosephb shared:
27914381Sjosephb 		ASSERT(ht->ht_flags & HTABLE_SHARED_PFN);
27924381Sjosephb 		ASSERT(ht->ht_shares == ism_ht);
27934381Sjosephb 		hat->hat_ism_pgcnt +=
27944381Sjosephb 		    (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) <<
27954381Sjosephb 		    (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
27964381Sjosephb 		ht->ht_valid_cnt = ism_ht->ht_valid_cnt;
27974381Sjosephb 		htable_release(ht);
27984381Sjosephb 		ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1);
27994381Sjosephb 		htable_release(ism_ht);
28004381Sjosephb 		ism_ht = NULL;
28014381Sjosephb 		continue;
28024381Sjosephb 
28034381Sjosephb not_shared:
28044381Sjosephb 		/*
28054381Sjosephb 		 * Unable to share the page table. Instead we will
28064381Sjosephb 		 * create new mappings from the values in the ISM mappings.
28074381Sjosephb 		 * Figure out what level size mappings to use;
28084381Sjosephb 		 */
28090Sstevel@tonic-gate 		for (l = ism_ht->ht_level; l > 0; --l) {
28100Sstevel@tonic-gate 			if (LEVEL_SIZE(l) <= eaddr - vaddr &&
28110Sstevel@tonic-gate 			    (vaddr & LEVEL_OFFSET(l)) == 0)
28120Sstevel@tonic-gate 				break;
28130Sstevel@tonic-gate 		}
28140Sstevel@tonic-gate 
28150Sstevel@tonic-gate 		/*
28160Sstevel@tonic-gate 		 * The ISM mapping might be larger than the share area,
28174381Sjosephb 		 * be careful to truncate it if needed.
28180Sstevel@tonic-gate 		 */
28190Sstevel@tonic-gate 		if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
28200Sstevel@tonic-gate 			pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
28210Sstevel@tonic-gate 		} else {
28220Sstevel@tonic-gate 			pgcnt = mmu_btop(eaddr - vaddr);
28230Sstevel@tonic-gate 			l = 0;
28240Sstevel@tonic-gate 		}
28250Sstevel@tonic-gate 
28260Sstevel@tonic-gate 		pfn = PTE2PFN(pte, ism_ht->ht_level);
28270Sstevel@tonic-gate 		ASSERT(pfn != PFN_INVALID);
28280Sstevel@tonic-gate 		while (pgcnt > 0) {
28290Sstevel@tonic-gate 			/*
28300Sstevel@tonic-gate 			 * Make a new pte for the PFN for this level.
28310Sstevel@tonic-gate 			 * Copy protections for the pte from the ISM pte.
28320Sstevel@tonic-gate 			 */
28330Sstevel@tonic-gate 			pp = page_numtopp_nolock(pfn);
28340Sstevel@tonic-gate 			ASSERT(pp != NULL);
28350Sstevel@tonic-gate 
28360Sstevel@tonic-gate 			prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
28370Sstevel@tonic-gate 			if (PTE_GET(pte, PT_WRITABLE))
28380Sstevel@tonic-gate 				prot |= PROT_WRITE;
28390Sstevel@tonic-gate 			if (!PTE_GET(pte, PT_NX))
28400Sstevel@tonic-gate 				prot |= PROT_EXEC;
28410Sstevel@tonic-gate 
28424381Sjosephb 			flags = HAT_LOAD;
28434381Sjosephb 			if (!is_dism)
28444381Sjosephb 				flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST;
28454381Sjosephb 			while (hati_load_common(hat, vaddr, pp, prot, flags,
28463446Smrj 			    l, pfn) != 0) {
28473446Smrj 				if (l == 0)
28483446Smrj 					panic("hati_load_common() failure");
28493446Smrj 				--l;
28503446Smrj 			}
28510Sstevel@tonic-gate 
28520Sstevel@tonic-gate 			vaddr += LEVEL_SIZE(l);
28530Sstevel@tonic-gate 			ism_addr += LEVEL_SIZE(l);
28540Sstevel@tonic-gate 			pfn += mmu_btop(LEVEL_SIZE(l));
28550Sstevel@tonic-gate 			pgcnt -= mmu_btop(LEVEL_SIZE(l));
28560Sstevel@tonic-gate 		}
28570Sstevel@tonic-gate 	}
28580Sstevel@tonic-gate 	if (ism_ht != NULL)
28590Sstevel@tonic-gate 		htable_release(ism_ht);
28600Sstevel@tonic-gate 	return (0);
28610Sstevel@tonic-gate }
28620Sstevel@tonic-gate 
28630Sstevel@tonic-gate 
28640Sstevel@tonic-gate /*
28650Sstevel@tonic-gate  * hat_unshare() is similar to hat_unload_callback(), but
28660Sstevel@tonic-gate  * we have to look for empty shared pagetables. Note that
28670Sstevel@tonic-gate  * hat_unshare() is always invoked against an entire segment.
28680Sstevel@tonic-gate  */
28690Sstevel@tonic-gate /*ARGSUSED*/
28700Sstevel@tonic-gate void
28710Sstevel@tonic-gate hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
28720Sstevel@tonic-gate {
2873*4654Sjosephb 	uint64_t	vaddr = (uintptr_t)addr;
28740Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
28750Sstevel@tonic-gate 	htable_t	*ht = NULL;
28760Sstevel@tonic-gate 	uint_t		need_demaps = 0;
28774381Sjosephb 	int		flags = HAT_UNLOAD_UNMAP;
28784381Sjosephb 	level_t		l;
28790Sstevel@tonic-gate 
28800Sstevel@tonic-gate 	ASSERT(hat != kas.a_hat);
28813446Smrj 	ASSERT(eaddr <= _userlimit);
28820Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
28830Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
28840Sstevel@tonic-gate 
28850Sstevel@tonic-gate 	/*
28860Sstevel@tonic-gate 	 * First go through and remove any shared pagetables.
28870Sstevel@tonic-gate 	 *
28883446Smrj 	 * Note that it's ok to delay the TLB shootdown till the entire range is
28890Sstevel@tonic-gate 	 * finished, because if hat_pageunload() were to unload a shared
28903446Smrj 	 * pagetable page, its hat_tlb_inval() will do a global TLB invalidate.
28910Sstevel@tonic-gate 	 */
28924381Sjosephb 	l = mmu.max_page_level;
28934381Sjosephb 	if (l == mmu.max_level)
28944381Sjosephb 		--l;
28954381Sjosephb 	for (; l >= 0; --l) {
28964381Sjosephb 		for (vaddr = (uintptr_t)addr; vaddr < eaddr;
28974381Sjosephb 		    vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) {
28984381Sjosephb 			ASSERT(!IN_VA_HOLE(vaddr));
28994381Sjosephb 			/*
29004381Sjosephb 			 * find a pagetable that maps the current address
29014381Sjosephb 			 */
29024381Sjosephb 			ht = htable_lookup(hat, vaddr, l);
29034381Sjosephb 			if (ht == NULL)
29044381Sjosephb 				continue;
29050Sstevel@tonic-gate 			if (ht->ht_flags & HTABLE_SHARED_PFN) {
29060Sstevel@tonic-gate 				/*
29074381Sjosephb 				 * clear page count, set valid_cnt to 0,
29084381Sjosephb 				 * let htable_release() finish the job
29090Sstevel@tonic-gate 				 */
29104381Sjosephb 				hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
29114381Sjosephb 				    (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
29120Sstevel@tonic-gate 				ht->ht_valid_cnt = 0;
29130Sstevel@tonic-gate 				need_demaps = 1;
29140Sstevel@tonic-gate 			}
29150Sstevel@tonic-gate 			htable_release(ht);
29160Sstevel@tonic-gate 		}
29170Sstevel@tonic-gate 	}
29180Sstevel@tonic-gate 
29190Sstevel@tonic-gate 	/*
29200Sstevel@tonic-gate 	 * flush the TLBs - since we're probably dealing with MANY mappings
29210Sstevel@tonic-gate 	 * we do just one CR3 reload.
29220Sstevel@tonic-gate 	 */
29230Sstevel@tonic-gate 	if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
29243446Smrj 		hat_tlb_inval(hat, DEMAP_ALL_ADDR);
29250Sstevel@tonic-gate 
29260Sstevel@tonic-gate 	/*
29270Sstevel@tonic-gate 	 * Now go back and clean up any unaligned mappings that
29280Sstevel@tonic-gate 	 * couldn't share pagetables.
29290Sstevel@tonic-gate 	 */
29304381Sjosephb 	if (!is_it_dism(hat, addr))
29314381Sjosephb 		flags |= HAT_UNLOAD_UNLOCK;
29324381Sjosephb 	hat_unload(hat, addr, len, flags);
29330Sstevel@tonic-gate }
29340Sstevel@tonic-gate 
29350Sstevel@tonic-gate 
29360Sstevel@tonic-gate /*
29370Sstevel@tonic-gate  * hat_reserve() does nothing
29380Sstevel@tonic-gate  */
29390Sstevel@tonic-gate /*ARGSUSED*/
29400Sstevel@tonic-gate void
29410Sstevel@tonic-gate hat_reserve(struct as *as, caddr_t addr, size_t len)
29420Sstevel@tonic-gate {
29430Sstevel@tonic-gate }
29440Sstevel@tonic-gate 
29450Sstevel@tonic-gate 
29460Sstevel@tonic-gate /*
29470Sstevel@tonic-gate  * Called when all mappings to a page should have write permission removed.
29480Sstevel@tonic-gate  * Mostly stolem from hat_pagesync()
29490Sstevel@tonic-gate  */
29500Sstevel@tonic-gate static void
29510Sstevel@tonic-gate hati_page_clrwrt(struct page *pp)
29520Sstevel@tonic-gate {
29530Sstevel@tonic-gate 	hment_t		*hm = NULL;
29540Sstevel@tonic-gate 	htable_t	*ht;
29550Sstevel@tonic-gate 	uint_t		entry;
29560Sstevel@tonic-gate 	x86pte_t	old;
29570Sstevel@tonic-gate 	x86pte_t	new;
29580Sstevel@tonic-gate 	uint_t		pszc = 0;
29590Sstevel@tonic-gate 
29600Sstevel@tonic-gate next_size:
29610Sstevel@tonic-gate 	/*
29620Sstevel@tonic-gate 	 * walk thru the mapping list clearing write permission
29630Sstevel@tonic-gate 	 */
29640Sstevel@tonic-gate 	x86_hm_enter(pp);
29650Sstevel@tonic-gate 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
29660Sstevel@tonic-gate 		if (ht->ht_level < pszc)
29670Sstevel@tonic-gate 			continue;
29680Sstevel@tonic-gate 		old = x86pte_get(ht, entry);
29690Sstevel@tonic-gate 
29700Sstevel@tonic-gate 		for (;;) {
29710Sstevel@tonic-gate 			/*
29720Sstevel@tonic-gate 			 * Is this mapping of interest?
29730Sstevel@tonic-gate 			 */
29740Sstevel@tonic-gate 			if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
29750Sstevel@tonic-gate 			    PTE_GET(old, PT_WRITABLE) == 0)
29760Sstevel@tonic-gate 				break;
29770Sstevel@tonic-gate 
29780Sstevel@tonic-gate 			/*
29790Sstevel@tonic-gate 			 * Clear ref/mod writable bits. This requires cross
29800Sstevel@tonic-gate 			 * calls to ensure any executing TLBs see cleared bits.
29810Sstevel@tonic-gate 			 */
29820Sstevel@tonic-gate 			new = old;
29830Sstevel@tonic-gate 			PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
29840Sstevel@tonic-gate 			old = hati_update_pte(ht, entry, old, new);
29850Sstevel@tonic-gate 			if (old != 0)
29860Sstevel@tonic-gate 				continue;
29870Sstevel@tonic-gate 
29880Sstevel@tonic-gate 			break;
29890Sstevel@tonic-gate 		}
29900Sstevel@tonic-gate 	}
29910Sstevel@tonic-gate 	x86_hm_exit(pp);
29920Sstevel@tonic-gate 	while (pszc < pp->p_szc) {
29930Sstevel@tonic-gate 		page_t *tpp;
29940Sstevel@tonic-gate 		pszc++;
29950Sstevel@tonic-gate 		tpp = PP_GROUPLEADER(pp, pszc);
29960Sstevel@tonic-gate 		if (pp != tpp) {
29970Sstevel@tonic-gate 			pp = tpp;
29980Sstevel@tonic-gate 			goto next_size;
29990Sstevel@tonic-gate 		}
30000Sstevel@tonic-gate 	}
30010Sstevel@tonic-gate }
30020Sstevel@tonic-gate 
30030Sstevel@tonic-gate /*
30040Sstevel@tonic-gate  * void hat_page_setattr(pp, flag)
30050Sstevel@tonic-gate  * void hat_page_clrattr(pp, flag)
30060Sstevel@tonic-gate  *	used to set/clr ref/mod bits.
30070Sstevel@tonic-gate  */
30080Sstevel@tonic-gate void
30090Sstevel@tonic-gate hat_page_setattr(struct page *pp, uint_t flag)
30100Sstevel@tonic-gate {
30110Sstevel@tonic-gate 	vnode_t		*vp = pp->p_vnode;
30120Sstevel@tonic-gate 	kmutex_t	*vphm = NULL;
30130Sstevel@tonic-gate 	page_t		**listp;
30144324Sqiao 	int		noshuffle;
30154324Sqiao 
30164324Sqiao 	noshuffle = flag & P_NSH;
30174324Sqiao 	flag &= ~P_NSH;
30180Sstevel@tonic-gate 
30190Sstevel@tonic-gate 	if (PP_GETRM(pp, flag) == flag)
30200Sstevel@tonic-gate 		return;
30210Sstevel@tonic-gate 
30224324Sqiao 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
30234324Sqiao 	    !noshuffle) {
30240Sstevel@tonic-gate 		vphm = page_vnode_mutex(vp);
30250Sstevel@tonic-gate 		mutex_enter(vphm);
30260Sstevel@tonic-gate 	}
30270Sstevel@tonic-gate 
30280Sstevel@tonic-gate 	PP_SETRM(pp, flag);
30290Sstevel@tonic-gate 
30300Sstevel@tonic-gate 	if (vphm != NULL) {
30310Sstevel@tonic-gate 
30320Sstevel@tonic-gate 		/*
30330Sstevel@tonic-gate 		 * Some File Systems examine v_pages for NULL w/o
30340Sstevel@tonic-gate 		 * grabbing the vphm mutex. Must not let it become NULL when
30350Sstevel@tonic-gate 		 * pp is the only page on the list.
30360Sstevel@tonic-gate 		 */
30370Sstevel@tonic-gate 		if (pp->p_vpnext != pp) {
30380Sstevel@tonic-gate 			page_vpsub(&vp->v_pages, pp);
30390Sstevel@tonic-gate 			if (vp->v_pages != NULL)
30400Sstevel@tonic-gate 				listp = &vp->v_pages->p_vpprev->p_vpnext;
30410Sstevel@tonic-gate 			else
30420Sstevel@tonic-gate 				listp = &vp->v_pages;
30430Sstevel@tonic-gate 			page_vpadd(listp, pp);
30440Sstevel@tonic-gate 		}
30450Sstevel@tonic-gate 		mutex_exit(vphm);
30460Sstevel@tonic-gate 	}
30470Sstevel@tonic-gate }
30480Sstevel@tonic-gate 
30490Sstevel@tonic-gate void
30500Sstevel@tonic-gate hat_page_clrattr(struct page *pp, uint_t flag)
30510Sstevel@tonic-gate {
30520Sstevel@tonic-gate 	vnode_t		*vp = pp->p_vnode;
30530Sstevel@tonic-gate 	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
30540Sstevel@tonic-gate 
30550Sstevel@tonic-gate 	/*
30562999Sstans 	 * Caller is expected to hold page's io lock for VMODSORT to work
30572999Sstans 	 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
30582999Sstans 	 * bit is cleared.
30592999Sstans 	 * We don't have assert to avoid tripping some existing third party
30602999Sstans 	 * code. The dirty page is moved back to top of the v_page list
30612999Sstans 	 * after IO is done in pvn_write_done().
30620Sstevel@tonic-gate 	 */
30630Sstevel@tonic-gate 	PP_CLRRM(pp, flag);
30640Sstevel@tonic-gate 
30652999Sstans 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
30660Sstevel@tonic-gate 
30670Sstevel@tonic-gate 		/*
30680Sstevel@tonic-gate 		 * VMODSORT works by removing write permissions and getting
30690Sstevel@tonic-gate 		 * a fault when a page is made dirty. At this point
30700Sstevel@tonic-gate 		 * we need to remove write permission from all mappings
30710Sstevel@tonic-gate 		 * to this page.
30720Sstevel@tonic-gate 		 */
30730Sstevel@tonic-gate 		hati_page_clrwrt(pp);
30740Sstevel@tonic-gate 	}
30750Sstevel@tonic-gate }
30760Sstevel@tonic-gate 
30770Sstevel@tonic-gate /*
30780Sstevel@tonic-gate  *	If flag is specified, returns 0 if attribute is disabled
30790Sstevel@tonic-gate  *	and non zero if enabled.  If flag specifes multiple attributs
30800Sstevel@tonic-gate  *	then returns 0 if ALL atriibutes are disabled.  This is an advisory
30810Sstevel@tonic-gate  *	call.
30820Sstevel@tonic-gate  */
30830Sstevel@tonic-gate uint_t
30840Sstevel@tonic-gate hat_page_getattr(struct page *pp, uint_t flag)
30850Sstevel@tonic-gate {
30860Sstevel@tonic-gate 	return (PP_GETRM(pp, flag));
30870Sstevel@tonic-gate }
30880Sstevel@tonic-gate 
30890Sstevel@tonic-gate 
30900Sstevel@tonic-gate /*
30910Sstevel@tonic-gate  * common code used by hat_pageunload() and hment_steal()
30920Sstevel@tonic-gate  */
30930Sstevel@tonic-gate hment_t *
30940Sstevel@tonic-gate hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
30950Sstevel@tonic-gate {
30960Sstevel@tonic-gate 	x86pte_t old_pte;
30970Sstevel@tonic-gate 	pfn_t pfn = pp->p_pagenum;
30980Sstevel@tonic-gate 	hment_t *hm;
30990Sstevel@tonic-gate 
31000Sstevel@tonic-gate 	/*
31010Sstevel@tonic-gate 	 * We need to acquire a hold on the htable in order to
31020Sstevel@tonic-gate 	 * do the invalidate. We know the htable must exist, since
31030Sstevel@tonic-gate 	 * unmap's don't release the htable until after removing any
31040Sstevel@tonic-gate 	 * hment. Having x86_hm_enter() keeps that from proceeding.
31050Sstevel@tonic-gate 	 */
31060Sstevel@tonic-gate 	htable_acquire(ht);
31070Sstevel@tonic-gate 
31080Sstevel@tonic-gate 	/*
31090Sstevel@tonic-gate 	 * Invalidate the PTE and remove the hment.
31100Sstevel@tonic-gate 	 */
31113446Smrj 	old_pte = x86pte_inval(ht, entry, 0, NULL);
311247Sjosephb 	if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
31133446Smrj 		panic("x86pte_inval() failure found PTE = " FMT_PTE
311447Sjosephb 		    " pfn being unmapped is %lx ht=0x%lx entry=0x%x",
311547Sjosephb 		    old_pte, pfn, (uintptr_t)ht, entry);
311647Sjosephb 	}
31170Sstevel@tonic-gate 
31180Sstevel@tonic-gate 	/*
31190Sstevel@tonic-gate 	 * Clean up all the htable information for this mapping
31200Sstevel@tonic-gate 	 */
31210Sstevel@tonic-gate 	ASSERT(ht->ht_valid_cnt > 0);
31220Sstevel@tonic-gate 	HTABLE_DEC(ht->ht_valid_cnt);
31230Sstevel@tonic-gate 	PGCNT_DEC(ht->ht_hat, ht->ht_level);
31240Sstevel@tonic-gate 
31250Sstevel@tonic-gate 	/*
31260Sstevel@tonic-gate 	 * sync ref/mod bits to the page_t
31270Sstevel@tonic-gate 	 */
31283446Smrj 	if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC)
31290Sstevel@tonic-gate 		hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
31300Sstevel@tonic-gate 
31310Sstevel@tonic-gate 	/*
31320Sstevel@tonic-gate 	 * Remove the mapping list entry for this page.
31330Sstevel@tonic-gate 	 */
31340Sstevel@tonic-gate 	hm = hment_remove(pp, ht, entry);
31350Sstevel@tonic-gate 
31360Sstevel@tonic-gate 	/*
31370Sstevel@tonic-gate 	 * drop the mapping list lock so that we might free the
31380Sstevel@tonic-gate 	 * hment and htable.
31390Sstevel@tonic-gate 	 */
31400Sstevel@tonic-gate 	x86_hm_exit(pp);
31410Sstevel@tonic-gate 	htable_release(ht);
31420Sstevel@tonic-gate 	return (hm);
31430Sstevel@tonic-gate }
31440Sstevel@tonic-gate 
31451841Spraks extern int	vpm_enable;
31460Sstevel@tonic-gate /*
31470Sstevel@tonic-gate  * Unload all translations to a page. If the page is a subpage of a large
31480Sstevel@tonic-gate  * page, the large page mappings are also removed.
31490Sstevel@tonic-gate  *
31500Sstevel@tonic-gate  * The forceflags are unused.
31510Sstevel@tonic-gate  */
31520Sstevel@tonic-gate 
31530Sstevel@tonic-gate /*ARGSUSED*/
31540Sstevel@tonic-gate static int
31550Sstevel@tonic-gate hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
31560Sstevel@tonic-gate {
31570Sstevel@tonic-gate 	page_t		*cur_pp = pp;
31580Sstevel@tonic-gate 	hment_t		*hm;
31590Sstevel@tonic-gate 	hment_t		*prev;
31600Sstevel@tonic-gate 	htable_t	*ht;
31610Sstevel@tonic-gate 	uint_t		entry;
31620Sstevel@tonic-gate 	level_t		level;
31630Sstevel@tonic-gate 
31641841Spraks #if defined(__amd64)
31651841Spraks 	/*
31661841Spraks 	 * clear the vpm ref.
31671841Spraks 	 */
31681841Spraks 	if (vpm_enable) {
31691841Spraks 		pp->p_vpmref = 0;
31701841Spraks 	}
31711841Spraks #endif
31720Sstevel@tonic-gate 	/*
31730Sstevel@tonic-gate 	 * The loop with next_size handles pages with multiple pagesize mappings
31740Sstevel@tonic-gate 	 */
31750Sstevel@tonic-gate next_size:
31760Sstevel@tonic-gate 	for (;;) {
31770Sstevel@tonic-gate 
31780Sstevel@tonic-gate 		/*
31790Sstevel@tonic-gate 		 * Get a mapping list entry
31800Sstevel@tonic-gate 		 */
31810Sstevel@tonic-gate 		x86_hm_enter(cur_pp);
31820Sstevel@tonic-gate 		for (prev = NULL; ; prev = hm) {
31830Sstevel@tonic-gate 			hm = hment_walk(cur_pp, &ht, &entry, prev);
31840Sstevel@tonic-gate 			if (hm == NULL) {
31850Sstevel@tonic-gate 				x86_hm_exit(cur_pp);
31860Sstevel@tonic-gate 
31870Sstevel@tonic-gate 				/*
31880Sstevel@tonic-gate 				 * If not part of a larger page, we're done.
31890Sstevel@tonic-gate 				 */
31903446Smrj 				if (cur_pp->p_szc <= pg_szcd) {
31910Sstevel@tonic-gate 					return (0);
31923446Smrj 				}
31930Sstevel@tonic-gate 
31940Sstevel@tonic-gate 				/*
31950Sstevel@tonic-gate 				 * Else check the next larger page size.
31960Sstevel@tonic-gate 				 * hat_page_demote() may decrease p_szc
31970Sstevel@tonic-gate 				 * but that's ok we'll just take an extra
31980Sstevel@tonic-gate 				 * trip discover there're no larger mappings
31990Sstevel@tonic-gate 				 * and return.
32000Sstevel@tonic-gate 				 */
32010Sstevel@tonic-gate 				++pg_szcd;
32020Sstevel@tonic-gate 				cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
32030Sstevel@tonic-gate 				goto next_size;
32040Sstevel@tonic-gate 			}
32050Sstevel@tonic-gate 
32060Sstevel@tonic-gate 			/*
32070Sstevel@tonic-gate 			 * If this mapping size matches, remove it.
32080Sstevel@tonic-gate 			 */
32090Sstevel@tonic-gate 			level = ht->ht_level;
32100Sstevel@tonic-gate 			if (level == pg_szcd)
32110Sstevel@tonic-gate 				break;
32120Sstevel@tonic-gate 		}
32130Sstevel@tonic-gate 
32140Sstevel@tonic-gate 		/*
32150Sstevel@tonic-gate 		 * Remove the mapping list entry for this page.
32160Sstevel@tonic-gate 		 * Note this does the x86_hm_exit() for us.
32170Sstevel@tonic-gate 		 */
32180Sstevel@tonic-gate 		hm = hati_page_unmap(cur_pp, ht, entry);
32190Sstevel@tonic-gate 		if (hm != NULL)
32200Sstevel@tonic-gate 			hment_free(hm);
32210Sstevel@tonic-gate 	}
32220Sstevel@tonic-gate }
32230Sstevel@tonic-gate 
32240Sstevel@tonic-gate int
32250Sstevel@tonic-gate hat_pageunload(struct page *pp, uint_t forceflag)
32260Sstevel@tonic-gate {
32270Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp));
32280Sstevel@tonic-gate 	return (hati_pageunload(pp, 0, forceflag));
32290Sstevel@tonic-gate }
32300Sstevel@tonic-gate 
32310Sstevel@tonic-gate /*
32320Sstevel@tonic-gate  * Unload all large mappings to pp and reduce by 1 p_szc field of every large
32330Sstevel@tonic-gate  * page level that included pp.
32340Sstevel@tonic-gate  *
32350Sstevel@tonic-gate  * pp must be locked EXCL. Even though no other constituent pages are locked
32360Sstevel@tonic-gate  * it's legal to unload large mappings to pp because all constituent pages of
32370Sstevel@tonic-gate  * large locked mappings have to be locked SHARED.  therefore if we have EXCL
32380Sstevel@tonic-gate  * lock on one of constituent pages none of the large mappings to pp are
32390Sstevel@tonic-gate  * locked.
32400Sstevel@tonic-gate  *
32410Sstevel@tonic-gate  * Change (always decrease) p_szc field starting from the last constituent
32420Sstevel@tonic-gate  * page and ending with root constituent page so that root's pszc always shows
32430Sstevel@tonic-gate  * the area where hat_page_demote() may be active.
32440Sstevel@tonic-gate  *
32450Sstevel@tonic-gate  * This mechanism is only used for file system pages where it's not always
32460Sstevel@tonic-gate  * possible to get EXCL locks on all constituent pages to demote the size code
32470Sstevel@tonic-gate  * (as is done for anonymous or kernel large pages).
32480Sstevel@tonic-gate  */
32490Sstevel@tonic-gate void
32500Sstevel@tonic-gate hat_page_demote(page_t *pp)
32510Sstevel@tonic-gate {
32520Sstevel@tonic-gate 	uint_t		pszc;
32530Sstevel@tonic-gate 	uint_t		rszc;
32540Sstevel@tonic-gate 	uint_t		szc;
32550Sstevel@tonic-gate 	page_t		*rootpp;
32560Sstevel@tonic-gate 	page_t		*firstpp;
32570Sstevel@tonic-gate 	page_t		*lastpp;
32580Sstevel@tonic-gate 	pgcnt_t		pgcnt;
32590Sstevel@tonic-gate 
32600Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp));
32610Sstevel@tonic-gate 	ASSERT(!PP_ISFREE(pp));
32620Sstevel@tonic-gate 	ASSERT(page_szc_lock_assert(pp));
32630Sstevel@tonic-gate 
32640Sstevel@tonic-gate 	if (pp->p_szc == 0)
32650Sstevel@tonic-gate 		return;
32660Sstevel@tonic-gate 
32670Sstevel@tonic-gate 	rootpp = PP_GROUPLEADER(pp, 1);
32680Sstevel@tonic-gate 	(void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
32690Sstevel@tonic-gate 
32700Sstevel@tonic-gate 	/*
32710Sstevel@tonic-gate 	 * all large mappings to pp are gone
32720Sstevel@tonic-gate 	 * and no new can be setup since pp is locked exclusively.
32730Sstevel@tonic-gate 	 *
32740Sstevel@tonic-gate 	 * Lock the root to make sure there's only one hat_page_demote()
32750Sstevel@tonic-gate 	 * outstanding within the area of this root's pszc.
32760Sstevel@tonic-gate 	 *
32770Sstevel@tonic-gate 	 * Second potential hat_page_demote() is already eliminated by upper
32780Sstevel@tonic-gate 	 * VM layer via page_szc_lock() but we don't rely on it and use our
32790Sstevel@tonic-gate 	 * own locking (so that upper layer locking can be changed without
32800Sstevel@tonic-gate 	 * assumptions that hat depends on upper layer VM to prevent multiple
32810Sstevel@tonic-gate 	 * hat_page_demote() to be issued simultaneously to the same large
32820Sstevel@tonic-gate 	 * page).
32830Sstevel@tonic-gate 	 */
32840Sstevel@tonic-gate again:
32850Sstevel@tonic-gate 	pszc = pp->p_szc;
32860Sstevel@tonic-gate 	if (pszc == 0)
32870Sstevel@tonic-gate 		return;
32880Sstevel@tonic-gate 	rootpp = PP_GROUPLEADER(pp, pszc);
32890Sstevel@tonic-gate 	x86_hm_enter(rootpp);
32900Sstevel@tonic-gate 	/*
32910Sstevel@tonic-gate 	 * If root's p_szc is different from pszc we raced with another
32920Sstevel@tonic-gate 	 * hat_page_demote().  Drop the lock and try to find the root again.
32930Sstevel@tonic-gate 	 * If root's p_szc is greater than pszc previous hat_page_demote() is
32940Sstevel@tonic-gate 	 * not done yet.  Take and release mlist lock of root's root to wait
32950Sstevel@tonic-gate 	 * for previous hat_page_demote() to complete.
32960Sstevel@tonic-gate 	 */
32970Sstevel@tonic-gate 	if ((rszc = rootpp->p_szc) != pszc) {
32980Sstevel@tonic-gate 		x86_hm_exit(rootpp);
32990Sstevel@tonic-gate 		if (rszc > pszc) {
33000Sstevel@tonic-gate 			/* p_szc of a locked non free page can't increase */
33010Sstevel@tonic-gate 			ASSERT(pp != rootpp);
33020Sstevel@tonic-gate 
33030Sstevel@tonic-gate 			rootpp = PP_GROUPLEADER(rootpp, rszc);
33040Sstevel@tonic-gate 			x86_hm_enter(rootpp);
33050Sstevel@tonic-gate 			x86_hm_exit(rootpp);
33060Sstevel@tonic-gate 		}
33070Sstevel@tonic-gate 		goto again;
33080Sstevel@tonic-gate 	}
33090Sstevel@tonic-gate 	ASSERT(pp->p_szc == pszc);
33100Sstevel@tonic-gate 
33110Sstevel@tonic-gate 	/*
33120Sstevel@tonic-gate 	 * Decrement by 1 p_szc of every constituent page of a region that
33130Sstevel@tonic-gate 	 * covered pp. For example if original szc is 3 it gets changed to 2
33140Sstevel@tonic-gate 	 * everywhere except in region 2 that covered pp. Region 2 that
33150Sstevel@tonic-gate 	 * covered pp gets demoted to 1 everywhere except in region 1 that
33160Sstevel@tonic-gate 	 * covered pp. The region 1 that covered pp is demoted to region
33170Sstevel@tonic-gate 	 * 0. It's done this way because from region 3 we removed level 3
33180Sstevel@tonic-gate 	 * mappings, from region 2 that covered pp we removed level 2 mappings
33190Sstevel@tonic-gate 	 * and from region 1 that covered pp we removed level 1 mappings.  All
33200Sstevel@tonic-gate 	 * changes are done from from high pfn's to low pfn's so that roots
33210Sstevel@tonic-gate 	 * are changed last allowing one to know the largest region where
33220Sstevel@tonic-gate 	 * hat_page_demote() is stil active by only looking at the root page.
33230Sstevel@tonic-gate 	 *
33240Sstevel@tonic-gate 	 * This algorithm is implemented in 2 while loops. First loop changes
33250Sstevel@tonic-gate 	 * p_szc of pages to the right of pp's level 1 region and second
33260Sstevel@tonic-gate 	 * loop changes p_szc of pages of level 1 region that covers pp
33270Sstevel@tonic-gate 	 * and all pages to the left of level 1 region that covers pp.
33280Sstevel@tonic-gate 	 * In the first loop p_szc keeps dropping with every iteration
33290Sstevel@tonic-gate 	 * and in the second loop it keeps increasing with every iteration.
33300Sstevel@tonic-gate 	 *
33310Sstevel@tonic-gate 	 * First loop description: Demote pages to the right of pp outside of
33320Sstevel@tonic-gate 	 * level 1 region that covers pp.  In every iteration of the while
33330Sstevel@tonic-gate 	 * loop below find the last page of szc region and the first page of
33340Sstevel@tonic-gate 	 * (szc - 1) region that is immediately to the right of (szc - 1)
33350Sstevel@tonic-gate 	 * region that covers pp.  From last such page to first such page
33360Sstevel@tonic-gate 	 * change every page's szc to szc - 1. Decrement szc and continue
33370Sstevel@tonic-gate 	 * looping until szc is 1. If pp belongs to the last (szc - 1) region
33380Sstevel@tonic-gate 	 * of szc region skip to the next iteration.
33390Sstevel@tonic-gate 	 */
33400Sstevel@tonic-gate 	szc = pszc;
33410Sstevel@tonic-gate 	while (szc > 1) {
33420Sstevel@tonic-gate 		lastpp = PP_GROUPLEADER(pp, szc);
33430Sstevel@tonic-gate 		pgcnt = page_get_pagecnt(szc);
33440Sstevel@tonic-gate 		lastpp += pgcnt - 1;
33450Sstevel@tonic-gate 		firstpp = PP_GROUPLEADER(pp, (szc - 1));
33460Sstevel@tonic-gate 		pgcnt = page_get_pagecnt(szc - 1);
33470Sstevel@tonic-gate 		if (lastpp - firstpp < pgcnt) {
33480Sstevel@tonic-gate 			szc--;
33490Sstevel@tonic-gate 			continue;
33500Sstevel@tonic-gate 		}
33510Sstevel@tonic-gate 		firstpp += pgcnt;
33520Sstevel@tonic-gate 		while (lastpp != firstpp) {
33530Sstevel@tonic-gate 			ASSERT(lastpp->p_szc == pszc);
33540Sstevel@tonic-gate 			lastpp->p_szc = szc - 1;
33550Sstevel@tonic-gate 			lastpp--;
33560Sstevel@tonic-gate 		}
33570Sstevel@tonic-gate 		firstpp->p_szc = szc - 1;
33580Sstevel@tonic-gate 		szc--;
33590Sstevel@tonic-gate 	}
33600Sstevel@tonic-gate 
33610Sstevel@tonic-gate 	/*
33620Sstevel@tonic-gate 	 * Second loop description:
33630Sstevel@tonic-gate 	 * First iteration changes p_szc to 0 of every
33640Sstevel@tonic-gate 	 * page of level 1 region that covers pp.
33650Sstevel@tonic-gate 	 * Subsequent iterations find last page of szc region
33660Sstevel@tonic-gate 	 * immediately to the left of szc region that covered pp
33670Sstevel@tonic-gate 	 * and first page of (szc + 1) region that covers pp.
33680Sstevel@tonic-gate 	 * From last to first page change p_szc of every page to szc.
33690Sstevel@tonic-gate 	 * Increment szc and continue looping until szc is pszc.
33700Sstevel@tonic-gate 	 * If pp belongs to the fist szc region of (szc + 1) region
33710Sstevel@tonic-gate 	 * skip to the next iteration.
33720Sstevel@tonic-gate 	 *
33730Sstevel@tonic-gate 	 */
33740Sstevel@tonic-gate 	szc = 0;
33750Sstevel@tonic-gate 	while (szc < pszc) {
33760Sstevel@tonic-gate 		firstpp = PP_GROUPLEADER(pp, (szc + 1));
33770Sstevel@tonic-gate 		if (szc == 0) {
33780Sstevel@tonic-gate 			pgcnt = page_get_pagecnt(1);
33790Sstevel@tonic-gate 			lastpp = firstpp + (pgcnt - 1);
33800Sstevel@tonic-gate 		} else {
33810Sstevel@tonic-gate 			lastpp = PP_GROUPLEADER(pp, szc);
33820Sstevel@tonic-gate 			if (firstpp == lastpp) {
33830Sstevel@tonic-gate 				szc++;
33840Sstevel@tonic-gate 				continue;
33850Sstevel@tonic-gate 			}
33860Sstevel@tonic-gate 			lastpp--;
33870Sstevel@tonic-gate 			pgcnt = page_get_pagecnt(szc);
33880Sstevel@tonic-gate 		}
33890Sstevel@tonic-gate 		while (lastpp != firstpp) {
33900Sstevel@tonic-gate 			ASSERT(lastpp->p_szc == pszc);
33910Sstevel@tonic-gate 			lastpp->p_szc = szc;
33920Sstevel@tonic-gate 			lastpp--;
33930Sstevel@tonic-gate 		}
33940Sstevel@tonic-gate 		firstpp->p_szc = szc;
33950Sstevel@tonic-gate 		if (firstpp == rootpp)
33960Sstevel@tonic-gate 			break;
33970Sstevel@tonic-gate 		szc++;
33980Sstevel@tonic-gate 	}
33990Sstevel@tonic-gate 	x86_hm_exit(rootpp);
34000Sstevel@tonic-gate }
34010Sstevel@tonic-gate 
34020Sstevel@tonic-gate /*
34030Sstevel@tonic-gate  * get hw stats from hardware into page struct and reset hw stats
34040Sstevel@tonic-gate  * returns attributes of page
34050Sstevel@tonic-gate  * Flags for hat_pagesync, hat_getstat, hat_sync
34060Sstevel@tonic-gate  *
34070Sstevel@tonic-gate  * define	HAT_SYNC_ZERORM		0x01
34080Sstevel@tonic-gate  *
34090Sstevel@tonic-gate  * Additional flags for hat_pagesync
34100Sstevel@tonic-gate  *
34110Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_REF	0x02
34120Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_MOD	0x04
34130Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_RM	0x06
34140Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_SHARED	0x08
34150Sstevel@tonic-gate  */
34160Sstevel@tonic-gate uint_t
34170Sstevel@tonic-gate hat_pagesync(struct page *pp, uint_t flags)
34180Sstevel@tonic-gate {
34190Sstevel@tonic-gate 	hment_t		*hm = NULL;
34200Sstevel@tonic-gate 	htable_t	*ht;
34210Sstevel@tonic-gate 	uint_t		entry;
34220Sstevel@tonic-gate 	x86pte_t	old, save_old;
34230Sstevel@tonic-gate 	x86pte_t	new;
34240Sstevel@tonic-gate 	uchar_t		nrmbits = P_REF|P_MOD|P_RO;
34250Sstevel@tonic-gate 	extern ulong_t	po_share;
34260Sstevel@tonic-gate 	page_t		*save_pp = pp;
34270Sstevel@tonic-gate 	uint_t		pszc = 0;
34280Sstevel@tonic-gate 
34290Sstevel@tonic-gate 	ASSERT(PAGE_LOCKED(pp) || panicstr);
34300Sstevel@tonic-gate 
34310Sstevel@tonic-gate 	if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
34320Sstevel@tonic-gate 		return (pp->p_nrm & nrmbits);
34330Sstevel@tonic-gate 
34340Sstevel@tonic-gate 	if ((flags & HAT_SYNC_ZERORM) == 0) {
34350Sstevel@tonic-gate 
34360Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
34370Sstevel@tonic-gate 			return (pp->p_nrm & nrmbits);
34380Sstevel@tonic-gate 
34390Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
34400Sstevel@tonic-gate 			return (pp->p_nrm & nrmbits);
34410Sstevel@tonic-gate 
34420Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
34430Sstevel@tonic-gate 		    hat_page_getshare(pp) > po_share) {
34440Sstevel@tonic-gate 			if (PP_ISRO(pp))
34450Sstevel@tonic-gate 				PP_SETREF(pp);
34460Sstevel@tonic-gate 			return (pp->p_nrm & nrmbits);
34470Sstevel@tonic-gate 		}
34480Sstevel@tonic-gate 	}
34490Sstevel@tonic-gate 
34500Sstevel@tonic-gate next_size:
34510Sstevel@tonic-gate 	/*
34520Sstevel@tonic-gate 	 * walk thru the mapping list syncing (and clearing) ref/mod bits.
34530Sstevel@tonic-gate 	 */
34540Sstevel@tonic-gate 	x86_hm_enter(pp);
34550Sstevel@tonic-gate 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
34560Sstevel@tonic-gate 		if (ht->ht_level < pszc)
34570Sstevel@tonic-gate 			continue;
34580Sstevel@tonic-gate 		old = x86pte_get(ht, entry);
34590Sstevel@tonic-gate try_again:
34600Sstevel@tonic-gate 
34610Sstevel@tonic-gate 		ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
34620Sstevel@tonic-gate 
34630Sstevel@tonic-gate 		if (PTE_GET(old, PT_REF | PT_MOD) == 0)
34640Sstevel@tonic-gate 			continue;
34650Sstevel@tonic-gate 
34660Sstevel@tonic-gate 		save_old = old;
34670Sstevel@tonic-gate 		if ((flags & HAT_SYNC_ZERORM) != 0) {
34680Sstevel@tonic-gate 
34690Sstevel@tonic-gate 			/*
34700Sstevel@tonic-gate 			 * Need to clear ref or mod bits. Need to demap
34710Sstevel@tonic-gate 			 * to make sure any executing TLBs see cleared bits.
34720Sstevel@tonic-gate 			 */
34730Sstevel@tonic-gate 			new = old;
34740Sstevel@tonic-gate 			PTE_CLR(new, PT_REF | PT_MOD);
34750Sstevel@tonic-gate 			old = hati_update_pte(ht, entry, old, new);
34760Sstevel@tonic-gate 			if (old != 0)
34770Sstevel@tonic-gate 				goto try_again;
34780Sstevel@tonic-gate 
34790Sstevel@tonic-gate 			old = save_old;
34800Sstevel@tonic-gate 		}
34810Sstevel@tonic-gate 
34820Sstevel@tonic-gate 		/*
34830Sstevel@tonic-gate 		 * Sync the PTE
34840Sstevel@tonic-gate 		 */
34853446Smrj 		if (!(flags & HAT_SYNC_ZERORM) &&
34863446Smrj 		    PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC)
34870Sstevel@tonic-gate 			hati_sync_pte_to_page(pp, old, ht->ht_level);
34880Sstevel@tonic-gate 
34890Sstevel@tonic-gate 		/*
34900Sstevel@tonic-gate 		 * can stop short if we found a ref'd or mod'd page
34910Sstevel@tonic-gate 		 */
34920Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) ||
34930Sstevel@tonic-gate 		    (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) {
34940Sstevel@tonic-gate 			x86_hm_exit(pp);
34953446Smrj 			goto done;
34960Sstevel@tonic-gate 		}
34970Sstevel@tonic-gate 	}
34980Sstevel@tonic-gate 	x86_hm_exit(pp);
34990Sstevel@tonic-gate 	while (pszc < pp->p_szc) {
35000Sstevel@tonic-gate 		page_t *tpp;
35010Sstevel@tonic-gate 		pszc++;
35020Sstevel@tonic-gate 		tpp = PP_GROUPLEADER(pp, pszc);
35030Sstevel@tonic-gate 		if (pp != tpp) {
35040Sstevel@tonic-gate 			pp = tpp;
35050Sstevel@tonic-gate 			goto next_size;
35060Sstevel@tonic-gate 		}
35070Sstevel@tonic-gate 	}
35083446Smrj done:
35090Sstevel@tonic-gate 	return (save_pp->p_nrm & nrmbits);
35100Sstevel@tonic-gate }
35110Sstevel@tonic-gate 
35120Sstevel@tonic-gate /*
35130Sstevel@tonic-gate  * returns approx number of mappings to this pp.  A return of 0 implies
35140Sstevel@tonic-gate  * there are no mappings to the page.
35150Sstevel@tonic-gate  */
35160Sstevel@tonic-gate ulong_t
35170Sstevel@tonic-gate hat_page_getshare(page_t *pp)
35180Sstevel@tonic-gate {
35190Sstevel@tonic-gate 	uint_t cnt;
35200Sstevel@tonic-gate 	cnt = hment_mapcnt(pp);
35211841Spraks #if defined(__amd64)
35221841Spraks 	if (vpm_enable && pp->p_vpmref) {
35231841Spraks 		cnt += 1;
35241841Spraks 	}
35251841Spraks #endif
35260Sstevel@tonic-gate 	return (cnt);
35270Sstevel@tonic-gate }
35280Sstevel@tonic-gate 
35290Sstevel@tonic-gate /*
35304528Spaulsan  * Return 1 the number of mappings exceeds sh_thresh. Return 0
35314528Spaulsan  * otherwise.
35324528Spaulsan  */
35334528Spaulsan int
35344528Spaulsan hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
35354528Spaulsan {
35364528Spaulsan 	return (hat_page_getshare(pp) > sh_thresh);
35374528Spaulsan }
35384528Spaulsan 
35394528Spaulsan /*
35400Sstevel@tonic-gate  * hat_softlock isn't supported anymore
35410Sstevel@tonic-gate  */
35420Sstevel@tonic-gate /*ARGSUSED*/
35430Sstevel@tonic-gate faultcode_t
35440Sstevel@tonic-gate hat_softlock(
35450Sstevel@tonic-gate 	hat_t *hat,
35460Sstevel@tonic-gate 	caddr_t addr,
35470Sstevel@tonic-gate 	size_t *len,
35480Sstevel@tonic-gate 	struct page **page_array,
35490Sstevel@tonic-gate 	uint_t flags)
35500Sstevel@tonic-gate {
35510Sstevel@tonic-gate 	return (FC_NOSUPPORT);
35520Sstevel@tonic-gate }
35530Sstevel@tonic-gate 
35540Sstevel@tonic-gate 
35550Sstevel@tonic-gate 
35560Sstevel@tonic-gate /*
35570Sstevel@tonic-gate  * Routine to expose supported HAT features to platform independent code.
35580Sstevel@tonic-gate  */
35590Sstevel@tonic-gate /*ARGSUSED*/
35600Sstevel@tonic-gate int
35610Sstevel@tonic-gate hat_supported(enum hat_features feature, void *arg)
35620Sstevel@tonic-gate {
35630Sstevel@tonic-gate 	switch (feature) {
35640Sstevel@tonic-gate 
35650Sstevel@tonic-gate 	case HAT_SHARED_PT:	/* this is really ISM */
35660Sstevel@tonic-gate 		return (1);
35670Sstevel@tonic-gate 
35680Sstevel@tonic-gate 	case HAT_DYNAMIC_ISM_UNMAP:
35690Sstevel@tonic-gate 		return (0);
35700Sstevel@tonic-gate 
35710Sstevel@tonic-gate 	case HAT_VMODSORT:
35720Sstevel@tonic-gate 		return (1);
35730Sstevel@tonic-gate 
35744528Spaulsan 	case HAT_SHARED_REGIONS:
35754528Spaulsan 		return (0);
35764528Spaulsan 
35770Sstevel@tonic-gate 	default:
35780Sstevel@tonic-gate 		panic("hat_supported() - unknown feature");
35790Sstevel@tonic-gate 	}
35800Sstevel@tonic-gate 	return (0);
35810Sstevel@tonic-gate }
35820Sstevel@tonic-gate 
35830Sstevel@tonic-gate /*
35840Sstevel@tonic-gate  * Called when a thread is exiting and has been switched to the kernel AS
35850Sstevel@tonic-gate  */
35860Sstevel@tonic-gate void
35870Sstevel@tonic-gate hat_thread_exit(kthread_t *thd)
35880Sstevel@tonic-gate {
35890Sstevel@tonic-gate 	ASSERT(thd->t_procp->p_as == &kas);
35900Sstevel@tonic-gate 	hat_switch(thd->t_procp->p_as->a_hat);
35910Sstevel@tonic-gate }
35920Sstevel@tonic-gate 
35930Sstevel@tonic-gate /*
35940Sstevel@tonic-gate  * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
35950Sstevel@tonic-gate  */
35960Sstevel@tonic-gate /*ARGSUSED*/
35970Sstevel@tonic-gate void
35980Sstevel@tonic-gate hat_setup(hat_t *hat, int flags)
35990Sstevel@tonic-gate {
36000Sstevel@tonic-gate 	kpreempt_disable();
36010Sstevel@tonic-gate 
36020Sstevel@tonic-gate 	hat_switch(hat);
36030Sstevel@tonic-gate 
36040Sstevel@tonic-gate 	kpreempt_enable();
36050Sstevel@tonic-gate }
36060Sstevel@tonic-gate 
36070Sstevel@tonic-gate /*
36080Sstevel@tonic-gate  * Prepare for a CPU private mapping for the given address.
36090Sstevel@tonic-gate  *
36100Sstevel@tonic-gate  * The address can only be used from a single CPU and can be remapped
36110Sstevel@tonic-gate  * using hat_mempte_remap().  Return the address of the PTE.
36120Sstevel@tonic-gate  *
36130Sstevel@tonic-gate  * We do the htable_create() if necessary and increment the valid count so
36140Sstevel@tonic-gate  * the htable can't disappear.  We also hat_devload() the page table into
36150Sstevel@tonic-gate  * kernel so that the PTE is quickly accessed.
36160Sstevel@tonic-gate  */
36173446Smrj hat_mempte_t
36183446Smrj hat_mempte_setup(caddr_t addr)
36190Sstevel@tonic-gate {
36200Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
36210Sstevel@tonic-gate 	htable_t	*ht;
36220Sstevel@tonic-gate 	uint_t		entry;
36230Sstevel@tonic-gate 	x86pte_t	oldpte;
36243446Smrj 	hat_mempte_t	p;
36250Sstevel@tonic-gate 
36260Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
36270Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(va));
36284004Sjosephb 	++curthread->t_hatdepth;
36290Sstevel@tonic-gate 	ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
36300Sstevel@tonic-gate 	if (ht == NULL) {
36310Sstevel@tonic-gate 		ht = htable_create(kas.a_hat, va, 0, NULL);
36320Sstevel@tonic-gate 		entry = htable_va2entry(va, ht);
36330Sstevel@tonic-gate 		ASSERT(ht->ht_level == 0);
36340Sstevel@tonic-gate 		oldpte = x86pte_get(ht, entry);
36350Sstevel@tonic-gate 	}
36360Sstevel@tonic-gate 	if (PTE_ISVALID(oldpte))
36370Sstevel@tonic-gate 		panic("hat_mempte_setup(): address already mapped"
36380Sstevel@tonic-gate 		    "ht=%p, entry=%d, pte=" FMT_PTE, ht, entry, oldpte);
36390Sstevel@tonic-gate 
36400Sstevel@tonic-gate 	/*
36410Sstevel@tonic-gate 	 * increment ht_valid_cnt so that the pagetable can't disappear
36420Sstevel@tonic-gate 	 */
36430Sstevel@tonic-gate 	HTABLE_INC(ht->ht_valid_cnt);
36440Sstevel@tonic-gate 
36450Sstevel@tonic-gate 	/*
36463446Smrj 	 * return the PTE physical address to the caller.
36470Sstevel@tonic-gate 	 */
36480Sstevel@tonic-gate 	htable_release(ht);
36493446Smrj 	p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry);
36504004Sjosephb 	--curthread->t_hatdepth;
36513446Smrj 	return (p);
36520Sstevel@tonic-gate }
36530Sstevel@tonic-gate 
36540Sstevel@tonic-gate /*
36550Sstevel@tonic-gate  * Release a CPU private mapping for the given address.
36560Sstevel@tonic-gate  * We decrement the htable valid count so it might be destroyed.
36570Sstevel@tonic-gate  */
36583446Smrj /*ARGSUSED1*/
36590Sstevel@tonic-gate void
36603446Smrj hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa)
36610Sstevel@tonic-gate {
36620Sstevel@tonic-gate 	htable_t	*ht;
36630Sstevel@tonic-gate 
36640Sstevel@tonic-gate 	/*
36653446Smrj 	 * invalidate any left over mapping and decrement the htable valid count
36660Sstevel@tonic-gate 	 */
36673446Smrj 	{
36683446Smrj 		x86pte_t *pteptr;
36693446Smrj 
36703446Smrj 		pteptr = x86pte_mapin(mmu_btop(pte_pa),
36713446Smrj 		    (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
36723446Smrj 		if (mmu.pae_hat)
36733446Smrj 			*pteptr = 0;
36743446Smrj 		else
36753446Smrj 			*(x86pte32_t *)pteptr = 0;
36763446Smrj 		mmu_tlbflush_entry(addr);
36773446Smrj 		x86pte_mapout();
36783446Smrj 	}
36793446Smrj 
36800Sstevel@tonic-gate 	ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
36810Sstevel@tonic-gate 	if (ht == NULL)
36820Sstevel@tonic-gate 		panic("hat_mempte_release(): invalid address");
36830Sstevel@tonic-gate 	ASSERT(ht->ht_level == 0);
36840Sstevel@tonic-gate 	HTABLE_DEC(ht->ht_valid_cnt);
36850Sstevel@tonic-gate 	htable_release(ht);
36860Sstevel@tonic-gate }
36870Sstevel@tonic-gate 
36880Sstevel@tonic-gate /*
36890Sstevel@tonic-gate  * Apply a temporary CPU private mapping to a page. We flush the TLB only
36900Sstevel@tonic-gate  * on this CPU, so this ought to have been called with preemption disabled.
36910Sstevel@tonic-gate  */
36920Sstevel@tonic-gate void
36930Sstevel@tonic-gate hat_mempte_remap(
36943446Smrj 	pfn_t		pfn,
36953446Smrj 	caddr_t		addr,
36963446Smrj 	hat_mempte_t	pte_pa,
36973446Smrj 	uint_t		attr,
36983446Smrj 	uint_t		flags)
36990Sstevel@tonic-gate {
37000Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
37010Sstevel@tonic-gate 	x86pte_t	pte;
37020Sstevel@tonic-gate 
37030Sstevel@tonic-gate 	/*
37040Sstevel@tonic-gate 	 * Remap the given PTE to the new page's PFN. Invalidate only
37050Sstevel@tonic-gate 	 * on this CPU.
37060Sstevel@tonic-gate 	 */
37070Sstevel@tonic-gate #ifdef DEBUG
37080Sstevel@tonic-gate 	htable_t	*ht;
37090Sstevel@tonic-gate 	uint_t		entry;
37100Sstevel@tonic-gate 
37110Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
37120Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(va));
37130Sstevel@tonic-gate 	ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
37140Sstevel@tonic-gate 	ASSERT(ht != NULL);
37150Sstevel@tonic-gate 	ASSERT(ht->ht_level == 0);
37160Sstevel@tonic-gate 	ASSERT(ht->ht_valid_cnt > 0);
37173446Smrj 	ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
37180Sstevel@tonic-gate 	htable_release(ht);
37190Sstevel@tonic-gate #endif
37200Sstevel@tonic-gate 	pte = hati_mkpte(pfn, attr, 0, flags);
37213446Smrj 	{
37223446Smrj 		x86pte_t *pteptr;
37233446Smrj 
37243446Smrj 		pteptr = x86pte_mapin(mmu_btop(pte_pa),
37253446Smrj 		    (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
37263446Smrj 		if (mmu.pae_hat)
37273446Smrj 			*(x86pte_t *)pteptr = pte;
37283446Smrj 		else
37293446Smrj 			*(x86pte32_t *)pteptr = (x86pte32_t)pte;
37303446Smrj 		mmu_tlbflush_entry(addr);
37313446Smrj 		x86pte_mapout();
37323446Smrj 	}
37330Sstevel@tonic-gate }
37340Sstevel@tonic-gate 
37350Sstevel@tonic-gate 
37360Sstevel@tonic-gate 
37370Sstevel@tonic-gate /*
37380Sstevel@tonic-gate  * Hat locking functions
37390Sstevel@tonic-gate  * XXX - these two functions are currently being used by hatstats
37400Sstevel@tonic-gate  * 	they can be removed by using a per-as mutex for hatstats.
37410Sstevel@tonic-gate  */
37420Sstevel@tonic-gate void
37430Sstevel@tonic-gate hat_enter(hat_t *hat)
37440Sstevel@tonic-gate {
37450Sstevel@tonic-gate 	mutex_enter(&hat->hat_mutex);
37460Sstevel@tonic-gate }
37470Sstevel@tonic-gate 
37480Sstevel@tonic-gate void
37490Sstevel@tonic-gate hat_exit(hat_t *hat)
37500Sstevel@tonic-gate {
37510Sstevel@tonic-gate 	mutex_exit(&hat->hat_mutex);
37520Sstevel@tonic-gate }
37530Sstevel@tonic-gate 
37540Sstevel@tonic-gate /*
37553446Smrj  * HAT part of cpu initialization.
37560Sstevel@tonic-gate  */
37570Sstevel@tonic-gate void
37580Sstevel@tonic-gate hat_cpu_online(struct cpu *cpup)
37590Sstevel@tonic-gate {
37600Sstevel@tonic-gate 	if (cpup != CPU) {
37613446Smrj 		x86pte_cpu_init(cpup);
37620Sstevel@tonic-gate 		hat_vlp_setup(cpup);
37630Sstevel@tonic-gate 	}
37640Sstevel@tonic-gate 	CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
37650Sstevel@tonic-gate }
37660Sstevel@tonic-gate 
37670Sstevel@tonic-gate /*
37683446Smrj  * HAT part of cpu deletion.
37693446Smrj  * (currently, we only call this after the cpu is safely passivated.)
37703446Smrj  */
37713446Smrj void
37723446Smrj hat_cpu_offline(struct cpu *cpup)
37733446Smrj {
37743446Smrj 	ASSERT(cpup != CPU);
37753446Smrj 
37763446Smrj 	CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id);
37773446Smrj 	x86pte_cpu_fini(cpup);
37783446Smrj 	hat_vlp_teardown(cpup);
37793446Smrj }
37803446Smrj 
37813446Smrj /*
37820Sstevel@tonic-gate  * Function called after all CPUs are brought online.
37830Sstevel@tonic-gate  * Used to remove low address boot mappings.
37840Sstevel@tonic-gate  */
37850Sstevel@tonic-gate void
37860Sstevel@tonic-gate clear_boot_mappings(uintptr_t low, uintptr_t high)
37870Sstevel@tonic-gate {
37880Sstevel@tonic-gate 	uintptr_t vaddr = low;
37890Sstevel@tonic-gate 	htable_t *ht = NULL;
37900Sstevel@tonic-gate 	level_t level;
37910Sstevel@tonic-gate 	uint_t entry;
37920Sstevel@tonic-gate 	x86pte_t pte;
37930Sstevel@tonic-gate 
37940Sstevel@tonic-gate 	/*
37950Sstevel@tonic-gate 	 * On 1st CPU we can unload the prom mappings, basically we blow away
37963446Smrj 	 * all virtual mappings under _userlimit.
37970Sstevel@tonic-gate 	 */
37980Sstevel@tonic-gate 	while (vaddr < high) {
37990Sstevel@tonic-gate 		pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
38000Sstevel@tonic-gate 		if (ht == NULL)
38010Sstevel@tonic-gate 			break;
38020Sstevel@tonic-gate 
38030Sstevel@tonic-gate 		level = ht->ht_level;
38040Sstevel@tonic-gate 		entry = htable_va2entry(vaddr, ht);
38050Sstevel@tonic-gate 		ASSERT(level <= mmu.max_page_level);
38060Sstevel@tonic-gate 		ASSERT(PTE_ISPAGE(pte, level));
38070Sstevel@tonic-gate 
38080Sstevel@tonic-gate 		/*
38090Sstevel@tonic-gate 		 * Unload the mapping from the page tables.
38100Sstevel@tonic-gate 		 */
38113446Smrj 		(void) x86pte_inval(ht, entry, 0, NULL);
38120Sstevel@tonic-gate 		ASSERT(ht->ht_valid_cnt > 0);
38130Sstevel@tonic-gate 		HTABLE_DEC(ht->ht_valid_cnt);
38140Sstevel@tonic-gate 		PGCNT_DEC(ht->ht_hat, ht->ht_level);
38150Sstevel@tonic-gate 
38160Sstevel@tonic-gate 		vaddr += LEVEL_SIZE(ht->ht_level);
38170Sstevel@tonic-gate 	}
38180Sstevel@tonic-gate 	if (ht)
38190Sstevel@tonic-gate 		htable_release(ht);
38200Sstevel@tonic-gate }
38210Sstevel@tonic-gate 
38220Sstevel@tonic-gate /*
38230Sstevel@tonic-gate  * Atomically update a new translation for a single page.  If the
38240Sstevel@tonic-gate  * currently installed PTE doesn't match the value we expect to find,
38250Sstevel@tonic-gate  * it's not updated and we return the PTE we found.
38260Sstevel@tonic-gate  *
38270Sstevel@tonic-gate  * If activating nosync or NOWRITE and the page was modified we need to sync
38280Sstevel@tonic-gate  * with the page_t. Also sync with page_t if clearing ref/mod bits.
38290Sstevel@tonic-gate  */
38300Sstevel@tonic-gate static x86pte_t
38310Sstevel@tonic-gate hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
38320Sstevel@tonic-gate {
38330Sstevel@tonic-gate 	page_t		*pp;
38340Sstevel@tonic-gate 	uint_t		rm = 0;
38350Sstevel@tonic-gate 	x86pte_t	replaced;
38360Sstevel@tonic-gate 
38373446Smrj 	if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC &&
38380Sstevel@tonic-gate 	    PTE_GET(expected, PT_MOD | PT_REF) &&
38390Sstevel@tonic-gate 	    (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
38404381Sjosephb 	    !PTE_GET(new, PT_MOD | PT_REF))) {
38410Sstevel@tonic-gate 
38423446Smrj 		ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level)));
38430Sstevel@tonic-gate 		pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
38440Sstevel@tonic-gate 		ASSERT(pp != NULL);
38450Sstevel@tonic-gate 		if (PTE_GET(expected, PT_MOD))
38460Sstevel@tonic-gate 			rm |= P_MOD;
38470Sstevel@tonic-gate 		if (PTE_GET(expected, PT_REF))
38480Sstevel@tonic-gate 			rm |= P_REF;
38490Sstevel@tonic-gate 		PTE_CLR(new, PT_MOD | PT_REF);
38500Sstevel@tonic-gate 	}
38510Sstevel@tonic-gate 
38520Sstevel@tonic-gate 	replaced = x86pte_update(ht, entry, expected, new);
38530Sstevel@tonic-gate 	if (replaced != expected)
38540Sstevel@tonic-gate 		return (replaced);
38550Sstevel@tonic-gate 
38560Sstevel@tonic-gate 	if (rm) {
38570Sstevel@tonic-gate 		/*
38580Sstevel@tonic-gate 		 * sync to all constituent pages of a large page
38590Sstevel@tonic-gate 		 */
38600Sstevel@tonic-gate 		pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
38610Sstevel@tonic-gate 		ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
38620Sstevel@tonic-gate 		while (pgcnt-- > 0) {
38630Sstevel@tonic-gate 			/*
38640Sstevel@tonic-gate 			 * hat_page_demote() can't decrease
38650Sstevel@tonic-gate 			 * pszc below this mapping size
38660Sstevel@tonic-gate 			 * since large mapping existed after we
38670Sstevel@tonic-gate 			 * took mlist lock.
38680Sstevel@tonic-gate 			 */
38690Sstevel@tonic-gate 			ASSERT(pp->p_szc >= ht->ht_level);
38700Sstevel@tonic-gate 			hat_page_setattr(pp, rm);
38710Sstevel@tonic-gate 			++pp;
38720Sstevel@tonic-gate 		}
38730Sstevel@tonic-gate 	}
38740Sstevel@tonic-gate 
38750Sstevel@tonic-gate 	return (0);
38760Sstevel@tonic-gate }
38770Sstevel@tonic-gate 
38784528Spaulsan /* ARGSUSED */
38794528Spaulsan void
38804528Spaulsan hat_join_srd(struct hat *sfmmup, vnode_t *evp)
38814528Spaulsan {
38824528Spaulsan }
38834528Spaulsan 
38844528Spaulsan /* ARGSUSED */
38854528Spaulsan hat_region_cookie_t
38864528Spaulsan hat_join_region(struct hat *sfmmup,
38874528Spaulsan     caddr_t r_saddr,
38884528Spaulsan     size_t r_size,
38894528Spaulsan     void *r_obj,
38904528Spaulsan     u_offset_t r_objoff,
38914528Spaulsan     uchar_t r_perm,
38924528Spaulsan     uchar_t r_pgszc,
38934528Spaulsan     hat_rgn_cb_func_t r_cb_function,
38944528Spaulsan     uint_t flags)
38954528Spaulsan {
38964528Spaulsan 	panic("No shared region support on x86");
38974528Spaulsan 	return (HAT_INVALID_REGION_COOKIE);
38984528Spaulsan }
38994528Spaulsan 
39004528Spaulsan /* ARGSUSED */
39014528Spaulsan void
39024528Spaulsan hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags)
39034528Spaulsan {
39044528Spaulsan 	panic("No shared region support on x86");
39054528Spaulsan }
39064528Spaulsan 
39074528Spaulsan /* ARGSUSED */
39084528Spaulsan void
39094528Spaulsan hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie)
39104528Spaulsan {
39114528Spaulsan 	panic("No shared region support on x86");
39124528Spaulsan }
39134528Spaulsan 
39144528Spaulsan 
39150Sstevel@tonic-gate /*
39160Sstevel@tonic-gate  * Kernel Physical Mapping (kpm) facility
39170Sstevel@tonic-gate  *
39180Sstevel@tonic-gate  * Most of the routines needed to support segkpm are almost no-ops on the
39190Sstevel@tonic-gate  * x86 platform.  We map in the entire segment when it is created and leave
39200Sstevel@tonic-gate  * it mapped in, so there is no additional work required to set up and tear
39210Sstevel@tonic-gate  * down individual mappings.  All of these routines were created to support
39220Sstevel@tonic-gate  * SPARC platforms that have to avoid aliasing in their virtually indexed
39230Sstevel@tonic-gate  * caches.
39240Sstevel@tonic-gate  *
39250Sstevel@tonic-gate  * Most of the routines have sanity checks in them (e.g. verifying that the
39260Sstevel@tonic-gate  * passed-in page is locked).  We don't actually care about most of these
39270Sstevel@tonic-gate  * checks on x86, but we leave them in place to identify problems in the
39280Sstevel@tonic-gate  * upper levels.
39290Sstevel@tonic-gate  */
39300Sstevel@tonic-gate 
39310Sstevel@tonic-gate /*
39320Sstevel@tonic-gate  * Map in a locked page and return the vaddr.
39330Sstevel@tonic-gate  */
39340Sstevel@tonic-gate /*ARGSUSED*/
39350Sstevel@tonic-gate caddr_t
39360Sstevel@tonic-gate hat_kpm_mapin(struct page *pp, struct kpme *kpme)
39370Sstevel@tonic-gate {
39380Sstevel@tonic-gate 	caddr_t		vaddr;
39390Sstevel@tonic-gate 
39400Sstevel@tonic-gate #ifdef DEBUG
39410Sstevel@tonic-gate 	if (kpm_enable == 0) {
39420Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
39430Sstevel@tonic-gate 		return ((caddr_t)NULL);
39440Sstevel@tonic-gate 	}
39450Sstevel@tonic-gate 
39460Sstevel@tonic-gate 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
39470Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
39480Sstevel@tonic-gate 		return ((caddr_t)NULL);
39490Sstevel@tonic-gate 	}
39500Sstevel@tonic-gate #endif
39510Sstevel@tonic-gate 
39520Sstevel@tonic-gate 	vaddr = hat_kpm_page2va(pp, 1);
39530Sstevel@tonic-gate 
39540Sstevel@tonic-gate 	return (vaddr);
39550Sstevel@tonic-gate }
39560Sstevel@tonic-gate 
39570Sstevel@tonic-gate /*
39580Sstevel@tonic-gate  * Mapout a locked page.
39590Sstevel@tonic-gate  */
39600Sstevel@tonic-gate /*ARGSUSED*/
39610Sstevel@tonic-gate void
39620Sstevel@tonic-gate hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
39630Sstevel@tonic-gate {
39640Sstevel@tonic-gate #ifdef DEBUG
39650Sstevel@tonic-gate 	if (kpm_enable == 0) {
39660Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
39670Sstevel@tonic-gate 		return;
39680Sstevel@tonic-gate 	}
39690Sstevel@tonic-gate 
39700Sstevel@tonic-gate 	if (IS_KPM_ADDR(vaddr) == 0) {
39710Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
39720Sstevel@tonic-gate 		return;
39730Sstevel@tonic-gate 	}
39740Sstevel@tonic-gate 
39750Sstevel@tonic-gate 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
39760Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
39770Sstevel@tonic-gate 		return;
39780Sstevel@tonic-gate 	}
39790Sstevel@tonic-gate #endif
39800Sstevel@tonic-gate }
39810Sstevel@tonic-gate 
39820Sstevel@tonic-gate /*
39830Sstevel@tonic-gate  * Return the kpm virtual address for a specific pfn
39840Sstevel@tonic-gate  */
39850Sstevel@tonic-gate caddr_t
39860Sstevel@tonic-gate hat_kpm_pfn2va(pfn_t pfn)
39870Sstevel@tonic-gate {
39883446Smrj 	uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
39890Sstevel@tonic-gate 
39900Sstevel@tonic-gate 	return ((caddr_t)vaddr);
39910Sstevel@tonic-gate }
39920Sstevel@tonic-gate 
39930Sstevel@tonic-gate /*
39940Sstevel@tonic-gate  * Return the kpm virtual address for the page at pp.
39950Sstevel@tonic-gate  */
39960Sstevel@tonic-gate /*ARGSUSED*/
39970Sstevel@tonic-gate caddr_t
39980Sstevel@tonic-gate hat_kpm_page2va(struct page *pp, int checkswap)
39990Sstevel@tonic-gate {
40000Sstevel@tonic-gate 	return (hat_kpm_pfn2va(pp->p_pagenum));
40010Sstevel@tonic-gate }
40020Sstevel@tonic-gate 
40030Sstevel@tonic-gate /*
40040Sstevel@tonic-gate  * Return the page frame number for the kpm virtual address vaddr.
40050Sstevel@tonic-gate  */
40060Sstevel@tonic-gate pfn_t
40070Sstevel@tonic-gate hat_kpm_va2pfn(caddr_t vaddr)
40080Sstevel@tonic-gate {
40090Sstevel@tonic-gate 	pfn_t		pfn;
40100Sstevel@tonic-gate 
40110Sstevel@tonic-gate 	ASSERT(IS_KPM_ADDR(vaddr));
40120Sstevel@tonic-gate 
40130Sstevel@tonic-gate 	pfn = (pfn_t)btop(vaddr - kpm_vbase);
40140Sstevel@tonic-gate 
40150Sstevel@tonic-gate 	return (pfn);
40160Sstevel@tonic-gate }
40170Sstevel@tonic-gate 
40180Sstevel@tonic-gate 
40190Sstevel@tonic-gate /*
40200Sstevel@tonic-gate  * Return the page for the kpm virtual address vaddr.
40210Sstevel@tonic-gate  */
40220Sstevel@tonic-gate page_t *
40230Sstevel@tonic-gate hat_kpm_vaddr2page(caddr_t vaddr)
40240Sstevel@tonic-gate {
40250Sstevel@tonic-gate 	pfn_t		pfn;
40260Sstevel@tonic-gate 
40270Sstevel@tonic-gate 	ASSERT(IS_KPM_ADDR(vaddr));
40280Sstevel@tonic-gate 
40290Sstevel@tonic-gate 	pfn = hat_kpm_va2pfn(vaddr);
40300Sstevel@tonic-gate 
40310Sstevel@tonic-gate 	return (page_numtopp_nolock(pfn));
40320Sstevel@tonic-gate }
40330Sstevel@tonic-gate 
40340Sstevel@tonic-gate /*
40350Sstevel@tonic-gate  * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
40360Sstevel@tonic-gate  * KPM page.  This should never happen on x86
40370Sstevel@tonic-gate  */
40380Sstevel@tonic-gate int
40390Sstevel@tonic-gate hat_kpm_fault(hat_t *hat, caddr_t vaddr)
40400Sstevel@tonic-gate {
40410Sstevel@tonic-gate 	panic("pagefault in seg_kpm.  hat: 0x%p  vaddr: 0x%p", hat, vaddr);
40420Sstevel@tonic-gate 
40430Sstevel@tonic-gate 	return (0);
40440Sstevel@tonic-gate }
40450Sstevel@tonic-gate 
40460Sstevel@tonic-gate /*ARGSUSED*/
40470Sstevel@tonic-gate void
40480Sstevel@tonic-gate hat_kpm_mseghash_clear(int nentries)
40490Sstevel@tonic-gate {}
40500Sstevel@tonic-gate 
40510Sstevel@tonic-gate /*ARGSUSED*/
40520Sstevel@tonic-gate void
40530Sstevel@tonic-gate hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
40540Sstevel@tonic-gate {}
4055