xref: /onnv-gate/usr/src/uts/i86pc/vm/hat_i86.c (revision 510:133cdd195ce9)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
50Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
60Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
70Sstevel@tonic-gate  * with the License.
80Sstevel@tonic-gate  *
90Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
100Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
110Sstevel@tonic-gate  * See the License for the specific language governing permissions
120Sstevel@tonic-gate  * and limitations under the License.
130Sstevel@tonic-gate  *
140Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
150Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
160Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
170Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
180Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
190Sstevel@tonic-gate  *
200Sstevel@tonic-gate  * CDDL HEADER END
210Sstevel@tonic-gate  */
220Sstevel@tonic-gate /*
230Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
290Sstevel@tonic-gate /*
300Sstevel@tonic-gate  * VM - Hardware Address Translation management for i386 and amd64
310Sstevel@tonic-gate  *
320Sstevel@tonic-gate  * Implementation of the interfaces described in <common/vm/hat.h>
330Sstevel@tonic-gate  *
340Sstevel@tonic-gate  * Nearly all the details of how the hardware is managed should not be
350Sstevel@tonic-gate  * visible outside this layer except for misc. machine specific functions
360Sstevel@tonic-gate  * that work in conjunction with this code.
370Sstevel@tonic-gate  *
380Sstevel@tonic-gate  * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
390Sstevel@tonic-gate  */
400Sstevel@tonic-gate 
410Sstevel@tonic-gate #include <sys/machparam.h>
420Sstevel@tonic-gate #include <sys/machsystm.h>
430Sstevel@tonic-gate #include <sys/mman.h>
440Sstevel@tonic-gate #include <sys/types.h>
450Sstevel@tonic-gate #include <sys/systm.h>
460Sstevel@tonic-gate #include <sys/cpuvar.h>
470Sstevel@tonic-gate #include <sys/thread.h>
480Sstevel@tonic-gate #include <sys/proc.h>
490Sstevel@tonic-gate #include <sys/cpu.h>
500Sstevel@tonic-gate #include <sys/kmem.h>
510Sstevel@tonic-gate #include <sys/disp.h>
520Sstevel@tonic-gate #include <sys/shm.h>
530Sstevel@tonic-gate #include <sys/sysmacros.h>
540Sstevel@tonic-gate #include <sys/machparam.h>
550Sstevel@tonic-gate #include <sys/vmem.h>
560Sstevel@tonic-gate #include <sys/vmsystm.h>
570Sstevel@tonic-gate #include <sys/promif.h>
580Sstevel@tonic-gate #include <sys/var.h>
590Sstevel@tonic-gate #include <sys/x86_archext.h>
600Sstevel@tonic-gate #include <sys/atomic.h>
610Sstevel@tonic-gate #include <sys/bitmap.h>
620Sstevel@tonic-gate 
630Sstevel@tonic-gate #include <vm/seg_kmem.h>
640Sstevel@tonic-gate #include <vm/hat_i86.h>
650Sstevel@tonic-gate #include <vm/as.h>
660Sstevel@tonic-gate #include <vm/seg.h>
670Sstevel@tonic-gate #include <vm/page.h>
680Sstevel@tonic-gate #include <vm/seg_kp.h>
690Sstevel@tonic-gate #include <vm/seg_kpm.h>
700Sstevel@tonic-gate #include <vm/vm_dep.h>
710Sstevel@tonic-gate 
720Sstevel@tonic-gate #include <sys/cmn_err.h>
730Sstevel@tonic-gate 
740Sstevel@tonic-gate 
750Sstevel@tonic-gate /*
760Sstevel@tonic-gate  * Basic parameters for hat operation.
770Sstevel@tonic-gate  */
780Sstevel@tonic-gate struct hat_mmu_info mmu;
790Sstevel@tonic-gate uint_t force_pae_off = 0;	/* for testing, change with kernel debugger */
800Sstevel@tonic-gate uint_t force_pae_on = 0;	/* for testing, change with kernel debugger */
810Sstevel@tonic-gate 
820Sstevel@tonic-gate /*
830Sstevel@tonic-gate  * The page that is the kernel's top level pagetable.
840Sstevel@tonic-gate  *
850Sstevel@tonic-gate  * For 32 bit VLP support, the kernel hat will use the 1st 4 entries
860Sstevel@tonic-gate  * on this 4K page for its top level page table. The remaining groups of
870Sstevel@tonic-gate  * 4 entries are used for per processor copies of user VLP pagetables for
880Sstevel@tonic-gate  * running threads.  See hat_switch() and reload_pae32() for details.
890Sstevel@tonic-gate  *
900Sstevel@tonic-gate  * vlp_page[0] - 0th level==2 PTE for kernel HAT (will be zero)
910Sstevel@tonic-gate  * vlp_page[1] - 1st level==2 PTE for kernel HAT (will be zero)
920Sstevel@tonic-gate  * vlp_page[2] - 2nd level==2 PTE for kernel HAT (zero for small memory)
930Sstevel@tonic-gate  * vlp_page[3] - 3rd level==2 PTE for kernel
940Sstevel@tonic-gate  *
950Sstevel@tonic-gate  * vlp_page[4] - 0th level==2 PTE for user thread on cpu 0
960Sstevel@tonic-gate  * vlp_page[5] - 1st level==2 PTE for user thread on cpu 0
970Sstevel@tonic-gate  * vlp_page[6] - 2nd level==2 PTE for user thread on cpu 0
980Sstevel@tonic-gate  * vlp_page[7] - probably copy of kernel PTE
990Sstevel@tonic-gate  *
1000Sstevel@tonic-gate  * vlp_page[8]  - 0th level==2 PTE for user thread on cpu 1
1010Sstevel@tonic-gate  * vlp_page[9]  - 1st level==2 PTE for user thread on cpu 1
1020Sstevel@tonic-gate  * vlp_page[10] - 2nd level==2 PTE for user thread on cpu 1
1030Sstevel@tonic-gate  * vlp_page[11] - probably copy of kernel PTE
1040Sstevel@tonic-gate  * ...
1050Sstevel@tonic-gate  *
1060Sstevel@tonic-gate  * when / where the kernel PTE's are (entry 2 or 3 or none) depends
1070Sstevel@tonic-gate  * on kernelbase.
1080Sstevel@tonic-gate  */
1090Sstevel@tonic-gate static x86pte_t *vlp_page;
1100Sstevel@tonic-gate 
1110Sstevel@tonic-gate /*
1120Sstevel@tonic-gate  * forward declaration of internal utility routines
1130Sstevel@tonic-gate  */
1140Sstevel@tonic-gate static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
1150Sstevel@tonic-gate 	x86pte_t new);
1160Sstevel@tonic-gate 
1170Sstevel@tonic-gate /*
1180Sstevel@tonic-gate  * The kernel address space exists in all HATs. To implement this the
1190Sstevel@tonic-gate  * kernel reserves a fixed number of entries in every topmost level page
1200Sstevel@tonic-gate  * table. The values are setup in hat_init() and then copied to every hat
1210Sstevel@tonic-gate  * created by hat_alloc(). This means that kernelbase must be:
1220Sstevel@tonic-gate  *
1230Sstevel@tonic-gate  *	  4Meg aligned for 32 bit kernels
1240Sstevel@tonic-gate  *	512Gig aligned for x86_64 64 bit kernel
1250Sstevel@tonic-gate  *
1260Sstevel@tonic-gate  * The PAE 32 bit hat is handled as a special case. Otherwise requiring 1Gig
1270Sstevel@tonic-gate  * alignment would use too much VA for the kernel.
1280Sstevel@tonic-gate  *
1290Sstevel@tonic-gate  */
1300Sstevel@tonic-gate static uint_t	khat_start;	/* index of 1st entry in kernel's top ptable */
1310Sstevel@tonic-gate static uint_t	khat_entries;	/* number of entries in kernel's top ptable */
1320Sstevel@tonic-gate 
1330Sstevel@tonic-gate #if defined(__i386)
1340Sstevel@tonic-gate 
1350Sstevel@tonic-gate static htable_t	*khat_pae32_htable = NULL;
1360Sstevel@tonic-gate static uint_t	khat_pae32_start;
1370Sstevel@tonic-gate static uint_t	khat_pae32_entries;
1380Sstevel@tonic-gate 
1390Sstevel@tonic-gate #endif
1400Sstevel@tonic-gate 
1410Sstevel@tonic-gate /*
1420Sstevel@tonic-gate  * Locks, etc. to control use of the hat reserves when recursively
1430Sstevel@tonic-gate  * allocating pagetables for the hat data structures.
1440Sstevel@tonic-gate  */
1450Sstevel@tonic-gate static kmutex_t hat_reserves_lock;
1460Sstevel@tonic-gate static kcondvar_t hat_reserves_cv;
1470Sstevel@tonic-gate kthread_t *hat_reserves_thread;
1480Sstevel@tonic-gate uint_t use_boot_reserve = 1;	/* cleared after early boot process */
1490Sstevel@tonic-gate uint_t can_steal_post_boot = 0;	/* set late in boot to enable stealing */
1500Sstevel@tonic-gate 
1510Sstevel@tonic-gate /*
1520Sstevel@tonic-gate  * A cpuset for all cpus. This is used for kernel address cross calls, since
1530Sstevel@tonic-gate  * the kernel addresses apply to all cpus.
1540Sstevel@tonic-gate  */
1550Sstevel@tonic-gate cpuset_t khat_cpuset;
1560Sstevel@tonic-gate 
1570Sstevel@tonic-gate /*
1580Sstevel@tonic-gate  * management stuff for hat structures
1590Sstevel@tonic-gate  */
1600Sstevel@tonic-gate kmutex_t	hat_list_lock;
1610Sstevel@tonic-gate kcondvar_t	hat_list_cv;
1620Sstevel@tonic-gate kmem_cache_t	*hat_cache;
1630Sstevel@tonic-gate kmem_cache_t	*hat_hash_cache;
1640Sstevel@tonic-gate kmem_cache_t	*vlp_hash_cache;
1650Sstevel@tonic-gate 
1660Sstevel@tonic-gate /*
1670Sstevel@tonic-gate  * Simple statistics
1680Sstevel@tonic-gate  */
1690Sstevel@tonic-gate struct hatstats hatstat;
1700Sstevel@tonic-gate 
1710Sstevel@tonic-gate /*
1720Sstevel@tonic-gate  * macros to detect addresses in use by kernel only during boot
1730Sstevel@tonic-gate  */
1740Sstevel@tonic-gate #if defined(__amd64)
1750Sstevel@tonic-gate 
1760Sstevel@tonic-gate #define	BOOT_VA(va) ((va) < kernelbase ||			\
1770Sstevel@tonic-gate 	((va) >= BOOT_DOUBLEMAP_BASE &&				\
1780Sstevel@tonic-gate 	(va) < BOOT_DOUBLEMAP_BASE + BOOT_DOUBLEMAP_SIZE))
1790Sstevel@tonic-gate 
1800Sstevel@tonic-gate #elif defined(__i386)
1810Sstevel@tonic-gate 
1820Sstevel@tonic-gate #define	BOOT_VA(va) ((va) < kernelbase)
1830Sstevel@tonic-gate 
1840Sstevel@tonic-gate #endif	/* __i386 */
1850Sstevel@tonic-gate 
1860Sstevel@tonic-gate /*
1870Sstevel@tonic-gate  * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's.
1880Sstevel@tonic-gate  */
1890Sstevel@tonic-gate extern void atomic_orb(uchar_t *addr, uchar_t val);
1900Sstevel@tonic-gate extern void atomic_andb(uchar_t *addr, uchar_t val);
1910Sstevel@tonic-gate 
1920Sstevel@tonic-gate #define	PP_GETRM(pp, rmmask)    (pp->p_nrm & rmmask)
1930Sstevel@tonic-gate #define	PP_ISMOD(pp)		PP_GETRM(pp, P_MOD)
1940Sstevel@tonic-gate #define	PP_ISREF(pp)		PP_GETRM(pp, P_REF)
1950Sstevel@tonic-gate #define	PP_ISRO(pp)		PP_GETRM(pp, P_RO)
1960Sstevel@tonic-gate 
1970Sstevel@tonic-gate #define	PP_SETRM(pp, rm)	atomic_orb(&(pp->p_nrm), rm)
1980Sstevel@tonic-gate #define	PP_SETMOD(pp)		PP_SETRM(pp, P_MOD)
1990Sstevel@tonic-gate #define	PP_SETREF(pp)		PP_SETRM(pp, P_REF)
2000Sstevel@tonic-gate #define	PP_SETRO(pp)		PP_SETRM(pp, P_RO)
2010Sstevel@tonic-gate 
2020Sstevel@tonic-gate #define	PP_CLRRM(pp, rm)	atomic_andb(&(pp->p_nrm), ~(rm))
2030Sstevel@tonic-gate #define	PP_CLRMOD(pp)   	PP_CLRRM(pp, P_MOD)
2040Sstevel@tonic-gate #define	PP_CLRREF(pp)   	PP_CLRRM(pp, P_REF)
2050Sstevel@tonic-gate #define	PP_CLRRO(pp)    	PP_CLRRM(pp, P_RO)
2060Sstevel@tonic-gate #define	PP_CLRALL(pp)		PP_CLRRM(pp, P_MOD | P_REF | P_RO)
2070Sstevel@tonic-gate 
2080Sstevel@tonic-gate /*
2090Sstevel@tonic-gate  * some useful tracing macros
2100Sstevel@tonic-gate  */
2110Sstevel@tonic-gate 
2120Sstevel@tonic-gate int hattrace = 0;
2130Sstevel@tonic-gate #ifdef DEBUG
2140Sstevel@tonic-gate 
2150Sstevel@tonic-gate #define	HATIN(r, h, a, l)	\
2160Sstevel@tonic-gate 	if (hattrace) prom_printf("->%s hat=%p, adr=%p, len=%lx\n", #r, h, a, l)
2170Sstevel@tonic-gate 
2180Sstevel@tonic-gate #define	HATOUT(r, h, a)		\
2190Sstevel@tonic-gate 	if (hattrace) prom_printf("<-%s hat=%p, adr=%p\n", #r, h, a)
2200Sstevel@tonic-gate #else
2210Sstevel@tonic-gate 
2220Sstevel@tonic-gate #define	HATIN(r, h, a, l)
2230Sstevel@tonic-gate #define	HATOUT(r, h, a)
2240Sstevel@tonic-gate 
2250Sstevel@tonic-gate #endif
2260Sstevel@tonic-gate 
2270Sstevel@tonic-gate 
2280Sstevel@tonic-gate /*
2290Sstevel@tonic-gate  * kmem cache constructor for struct hat
2300Sstevel@tonic-gate  */
2310Sstevel@tonic-gate /*ARGSUSED*/
2320Sstevel@tonic-gate static int
2330Sstevel@tonic-gate hati_constructor(void *buf, void *handle, int kmflags)
2340Sstevel@tonic-gate {
2350Sstevel@tonic-gate 	hat_t	*hat = buf;
2360Sstevel@tonic-gate 
2370Sstevel@tonic-gate 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
2380Sstevel@tonic-gate 	bzero(hat->hat_pages_mapped,
2390Sstevel@tonic-gate 	    sizeof (pgcnt_t) * (mmu.max_page_level + 1));
2400Sstevel@tonic-gate 	hat->hat_stats = 0;
2410Sstevel@tonic-gate 	hat->hat_flags = 0;
2420Sstevel@tonic-gate 	mutex_init(&hat->hat_switch_mutex, NULL, MUTEX_DRIVER,
2430Sstevel@tonic-gate 	    (void *)ipltospl(DISP_LEVEL));
2440Sstevel@tonic-gate 	CPUSET_ZERO(hat->hat_cpus);
2450Sstevel@tonic-gate 	hat->hat_htable = NULL;
2460Sstevel@tonic-gate 	hat->hat_ht_hash = NULL;
2470Sstevel@tonic-gate 	return (0);
2480Sstevel@tonic-gate }
2490Sstevel@tonic-gate 
2500Sstevel@tonic-gate /*
2510Sstevel@tonic-gate  * Allocate a hat structure for as. We also create the top level
2520Sstevel@tonic-gate  * htable and initialize it to contain the kernel hat entries.
2530Sstevel@tonic-gate  */
2540Sstevel@tonic-gate hat_t *
2550Sstevel@tonic-gate hat_alloc(struct as *as)
2560Sstevel@tonic-gate {
2570Sstevel@tonic-gate 	hat_t		*hat;
2580Sstevel@tonic-gate 	htable_t	*ht;	/* top level htable */
2590Sstevel@tonic-gate 	uint_t		use_vlp;
2600Sstevel@tonic-gate 
2610Sstevel@tonic-gate 	/*
2620Sstevel@tonic-gate 	 * Once we start creating user process HATs we can enable
2630Sstevel@tonic-gate 	 * the htable_steal() code.
2640Sstevel@tonic-gate 	 */
2650Sstevel@tonic-gate 	if (can_steal_post_boot == 0)
2660Sstevel@tonic-gate 		can_steal_post_boot = 1;
2670Sstevel@tonic-gate 
2680Sstevel@tonic-gate 	ASSERT(AS_WRITE_HELD(as, &as->a_lock));
2690Sstevel@tonic-gate 	hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
2700Sstevel@tonic-gate 	hat->hat_as = as;
2710Sstevel@tonic-gate 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
2720Sstevel@tonic-gate 	ASSERT(hat->hat_flags == 0);
2730Sstevel@tonic-gate 
2740Sstevel@tonic-gate 	/*
2750Sstevel@tonic-gate 	 * a 32 bit process uses a VLP style hat when using PAE
2760Sstevel@tonic-gate 	 */
2770Sstevel@tonic-gate #if defined(__amd64)
2780Sstevel@tonic-gate 	use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32);
2790Sstevel@tonic-gate #elif defined(__i386)
2800Sstevel@tonic-gate 	use_vlp = mmu.pae_hat;
2810Sstevel@tonic-gate #endif
2820Sstevel@tonic-gate 	if (use_vlp) {
2830Sstevel@tonic-gate 		hat->hat_flags = HAT_VLP;
2840Sstevel@tonic-gate 		bzero(hat->hat_vlp_ptes, VLP_SIZE);
2850Sstevel@tonic-gate 	}
2860Sstevel@tonic-gate 
2870Sstevel@tonic-gate 	/*
2880Sstevel@tonic-gate 	 * Allocate the htable hash
2890Sstevel@tonic-gate 	 */
2900Sstevel@tonic-gate 	if ((hat->hat_flags & HAT_VLP)) {
2910Sstevel@tonic-gate 		hat->hat_num_hash = mmu.vlp_hash_cnt;
2920Sstevel@tonic-gate 		hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
2930Sstevel@tonic-gate 	} else {
2940Sstevel@tonic-gate 		hat->hat_num_hash = mmu.hash_cnt;
2950Sstevel@tonic-gate 		hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
2960Sstevel@tonic-gate 	}
2970Sstevel@tonic-gate 	bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
2980Sstevel@tonic-gate 
2990Sstevel@tonic-gate 	/*
3000Sstevel@tonic-gate 	 * Initialize Kernel HAT entries at the top of the top level page
3010Sstevel@tonic-gate 	 * table for the new hat.
3020Sstevel@tonic-gate 	 *
3030Sstevel@tonic-gate 	 * Note that we don't call htable_release() for the top level, that
3040Sstevel@tonic-gate 	 * happens when the hat is destroyed in hat_free_end()
3050Sstevel@tonic-gate 	 */
3060Sstevel@tonic-gate 	hat->hat_htable = NULL;
3070Sstevel@tonic-gate 	hat->hat_ht_cached = NULL;
3080Sstevel@tonic-gate 	ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
3090Sstevel@tonic-gate 	if (!(hat->hat_flags & HAT_VLP))
3100Sstevel@tonic-gate 		x86pte_copy(kas.a_hat->hat_htable, ht, khat_start,
3110Sstevel@tonic-gate 		    khat_entries);
3120Sstevel@tonic-gate #if defined(__i386)
3130Sstevel@tonic-gate 	else if (khat_entries > 0)
3140Sstevel@tonic-gate 		bcopy(vlp_page + khat_start, hat->hat_vlp_ptes + khat_start,
3150Sstevel@tonic-gate 		    khat_entries * sizeof (x86pte_t));
3160Sstevel@tonic-gate #endif
3170Sstevel@tonic-gate 	hat->hat_htable = ht;
3180Sstevel@tonic-gate 
3190Sstevel@tonic-gate #if defined(__i386)
3200Sstevel@tonic-gate 	/*
3210Sstevel@tonic-gate 	 * PAE32 HAT alignment is less restrictive than the others to keep
3220Sstevel@tonic-gate 	 * the kernel from using too much VA. Because of this we may need
3230Sstevel@tonic-gate 	 * one layer further down when kernelbase isn't 1Gig aligned.
3240Sstevel@tonic-gate 	 * See hat_free_end() for the htable_release() that goes with this
3250Sstevel@tonic-gate 	 * htable_create()
3260Sstevel@tonic-gate 	 */
3270Sstevel@tonic-gate 	if (khat_pae32_htable != NULL) {
3280Sstevel@tonic-gate 		ht = htable_create(hat, kernelbase,
3290Sstevel@tonic-gate 		    khat_pae32_htable->ht_level, NULL);
3300Sstevel@tonic-gate 		x86pte_copy(khat_pae32_htable, ht, khat_pae32_start,
3310Sstevel@tonic-gate 		    khat_pae32_entries);
3320Sstevel@tonic-gate 		ht->ht_valid_cnt = khat_pae32_entries;
3330Sstevel@tonic-gate 	}
3340Sstevel@tonic-gate #endif
3350Sstevel@tonic-gate 
3360Sstevel@tonic-gate 	/*
3370Sstevel@tonic-gate 	 * Put it in the global list of all hats (used by stealing, etc.)
3380Sstevel@tonic-gate 	 */
3390Sstevel@tonic-gate 	mutex_enter(&hat_list_lock);
3400Sstevel@tonic-gate 	if (kas.a_hat->hat_next != NULL) {
3410Sstevel@tonic-gate 		hat->hat_next = kas.a_hat->hat_next;
3420Sstevel@tonic-gate 		hat->hat_prev = kas.a_hat->hat_next->hat_prev;
3430Sstevel@tonic-gate 		kas.a_hat->hat_next->hat_prev->hat_next = hat;
3440Sstevel@tonic-gate 		kas.a_hat->hat_next->hat_prev = hat;
3450Sstevel@tonic-gate 	} else {
3460Sstevel@tonic-gate 		hat->hat_next = hat;
3470Sstevel@tonic-gate 		hat->hat_prev = hat;
3480Sstevel@tonic-gate 	}
3490Sstevel@tonic-gate 	kas.a_hat->hat_next = hat;
3500Sstevel@tonic-gate 	mutex_exit(&hat_list_lock);
3510Sstevel@tonic-gate 
3520Sstevel@tonic-gate 
3530Sstevel@tonic-gate 	return (hat);
3540Sstevel@tonic-gate }
3550Sstevel@tonic-gate 
3560Sstevel@tonic-gate /*
3570Sstevel@tonic-gate  * process has finished executing but as has not been cleaned up yet.
3580Sstevel@tonic-gate  */
3590Sstevel@tonic-gate /*ARGSUSED*/
3600Sstevel@tonic-gate void
3610Sstevel@tonic-gate hat_free_start(hat_t *hat)
3620Sstevel@tonic-gate {
3630Sstevel@tonic-gate 	ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock));
3640Sstevel@tonic-gate 	mutex_enter(&hat_list_lock);
3650Sstevel@tonic-gate 	hat->hat_flags |= HAT_FREEING;
3660Sstevel@tonic-gate 	mutex_exit(&hat_list_lock);
3670Sstevel@tonic-gate }
3680Sstevel@tonic-gate 
3690Sstevel@tonic-gate /*
3700Sstevel@tonic-gate  * An address space is being destroyed, so we destroy the associated hat.
3710Sstevel@tonic-gate  */
3720Sstevel@tonic-gate void
3730Sstevel@tonic-gate hat_free_end(hat_t *hat)
3740Sstevel@tonic-gate {
3750Sstevel@tonic-gate 	int i;
3760Sstevel@tonic-gate 	kmem_cache_t *cache;
3770Sstevel@tonic-gate 
3780Sstevel@tonic-gate #ifdef DEBUG
3790Sstevel@tonic-gate 	for (i = 0; i <= mmu.max_page_level; i++)
3800Sstevel@tonic-gate 		ASSERT(hat->hat_pages_mapped[i] == 0);
3810Sstevel@tonic-gate #endif
3820Sstevel@tonic-gate 	ASSERT(hat->hat_flags & HAT_FREEING);
3830Sstevel@tonic-gate 
3840Sstevel@tonic-gate 	/*
3850Sstevel@tonic-gate 	 * must not be running on the given hat
3860Sstevel@tonic-gate 	 */
3870Sstevel@tonic-gate 	ASSERT(CPU->cpu_current_hat != hat);
3880Sstevel@tonic-gate 
3890Sstevel@tonic-gate 	/*
3900Sstevel@tonic-gate 	 * If the hat is currently a stealing victim, wait for the stealing
3910Sstevel@tonic-gate 	 * to finish.  Once we've removed it from the list, nobody can
3920Sstevel@tonic-gate 	 * find these htables anymore.
3930Sstevel@tonic-gate 	 */
3940Sstevel@tonic-gate 	mutex_enter(&hat_list_lock);
3950Sstevel@tonic-gate 	while (hat->hat_flags & HAT_VICTIM)
3960Sstevel@tonic-gate 		cv_wait(&hat_list_cv, &hat_list_lock);
3970Sstevel@tonic-gate 	hat->hat_next->hat_prev = hat->hat_prev;
3980Sstevel@tonic-gate 	hat->hat_prev->hat_next = hat->hat_next;
3990Sstevel@tonic-gate 	if (kas.a_hat->hat_next == hat) {
4000Sstevel@tonic-gate 		kas.a_hat->hat_next = hat->hat_next;
4010Sstevel@tonic-gate 		if (kas.a_hat->hat_next == hat)
4020Sstevel@tonic-gate 			kas.a_hat->hat_next = NULL;
4030Sstevel@tonic-gate 	}
4040Sstevel@tonic-gate 	mutex_exit(&hat_list_lock);
4050Sstevel@tonic-gate 
4060Sstevel@tonic-gate 	/*
4070Sstevel@tonic-gate 	 * Make a pass through the htables freeing them all up.
4080Sstevel@tonic-gate 	 */
4090Sstevel@tonic-gate 	htable_purge_hat(hat);
4100Sstevel@tonic-gate 
4110Sstevel@tonic-gate 	/*
4120Sstevel@tonic-gate 	 * Decide which kmem cache the hash table came from, then free it.
4130Sstevel@tonic-gate 	 */
4140Sstevel@tonic-gate 	if (hat->hat_flags & HAT_VLP)
4150Sstevel@tonic-gate 		cache = vlp_hash_cache;
4160Sstevel@tonic-gate 	else
4170Sstevel@tonic-gate 		cache = hat_hash_cache;
4180Sstevel@tonic-gate 	kmem_cache_free(cache, hat->hat_ht_hash);
4190Sstevel@tonic-gate 	hat->hat_ht_hash = NULL;
4200Sstevel@tonic-gate 
4210Sstevel@tonic-gate 	hat->hat_flags = 0;
4220Sstevel@tonic-gate 	kmem_cache_free(hat_cache, hat);
4230Sstevel@tonic-gate }
4240Sstevel@tonic-gate 
4250Sstevel@tonic-gate /*
4260Sstevel@tonic-gate  * round kernelbase down to a supported value to use for _userlimit
4270Sstevel@tonic-gate  *
4280Sstevel@tonic-gate  * userlimit must be aligned down to an entry in the top level htable.
4290Sstevel@tonic-gate  * The one exception is for 32 bit HAT's running PAE.
4300Sstevel@tonic-gate  */
4310Sstevel@tonic-gate uintptr_t
4320Sstevel@tonic-gate hat_kernelbase(uintptr_t va)
4330Sstevel@tonic-gate {
4340Sstevel@tonic-gate #if defined(__i386)
4350Sstevel@tonic-gate 	va &= LEVEL_MASK(1);
4360Sstevel@tonic-gate #endif
4370Sstevel@tonic-gate 	if (IN_VA_HOLE(va))
4380Sstevel@tonic-gate 		panic("_userlimit %p will fall in VA hole\n", (void *)va);
4390Sstevel@tonic-gate 	return (va);
4400Sstevel@tonic-gate }
4410Sstevel@tonic-gate 
4420Sstevel@tonic-gate /*
4430Sstevel@tonic-gate  * Initialize hat data structures based on processor MMU information.
4440Sstevel@tonic-gate  */
4450Sstevel@tonic-gate void
4460Sstevel@tonic-gate mmu_init(void)
4470Sstevel@tonic-gate {
4480Sstevel@tonic-gate 	uint_t max_htables;
4490Sstevel@tonic-gate 	uint_t pa_bits;
4500Sstevel@tonic-gate 	uint_t va_bits;
4510Sstevel@tonic-gate 	int i;
4520Sstevel@tonic-gate 
4530Sstevel@tonic-gate 	/*
4540Sstevel@tonic-gate 	 * if CPU enabled the page table global bit, use it for the kernel
4550Sstevel@tonic-gate 	 * This is bit 7 in CR4 (PGE - Page Global Enable)
4560Sstevel@tonic-gate 	 */
4570Sstevel@tonic-gate 	if ((x86_feature & X86_PGE) != 0 && (getcr4() & 0x80) != 0)
4580Sstevel@tonic-gate 		mmu.pt_global = PT_GLOBAL;
4590Sstevel@tonic-gate 
4600Sstevel@tonic-gate 	/*
4610Sstevel@tonic-gate 	 * We use PAE except when we aren't on an AMD64 and this is
4620Sstevel@tonic-gate 	 * a 32 bit kernel with all physical addresses less than 4 Gig.
4630Sstevel@tonic-gate 	 */
4640Sstevel@tonic-gate 	mmu.pae_hat = 1;
4650Sstevel@tonic-gate 	if (x86_feature & X86_NX) {
4660Sstevel@tonic-gate 		mmu.pt_nx = PT_NX;
4670Sstevel@tonic-gate 	} else {
4680Sstevel@tonic-gate 		mmu.pt_nx = 0;
4690Sstevel@tonic-gate #if defined(__i386)
4700Sstevel@tonic-gate 		if (!PFN_ABOVE4G(physmax))
4710Sstevel@tonic-gate 			mmu.pae_hat = 0;
4720Sstevel@tonic-gate #endif
4730Sstevel@tonic-gate 	}
4740Sstevel@tonic-gate 
4750Sstevel@tonic-gate #if defined(__i386)
4760Sstevel@tonic-gate 	/*
4770Sstevel@tonic-gate 	 * Setting one of these two lets you force testing of the different
4780Sstevel@tonic-gate 	 * hat modes for 32 bit, regardless of the hardware setup.
4790Sstevel@tonic-gate 	 */
4800Sstevel@tonic-gate 	if (force_pae_on) {
4810Sstevel@tonic-gate 		mmu.pae_hat = 1;
4820Sstevel@tonic-gate 	} else if (force_pae_off) {
4830Sstevel@tonic-gate 		mmu.pae_hat = 0;
4840Sstevel@tonic-gate 		mmu.pt_nx = 0;
4850Sstevel@tonic-gate 	}
4860Sstevel@tonic-gate #endif
4870Sstevel@tonic-gate 
4880Sstevel@tonic-gate 	/*
4890Sstevel@tonic-gate 	 * Use CPU info to set various MMU parameters
4900Sstevel@tonic-gate 	 */
4910Sstevel@tonic-gate 	cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
4920Sstevel@tonic-gate 
4930Sstevel@tonic-gate 	if (va_bits < sizeof (void *) * NBBY) {
4940Sstevel@tonic-gate 		mmu.hole_start = (1ul << (va_bits - 1));
4950Sstevel@tonic-gate 		mmu.hole_end = 0ul - mmu.hole_start - 1;
4960Sstevel@tonic-gate 	} else {
4970Sstevel@tonic-gate 		mmu.hole_end = 0;
4980Sstevel@tonic-gate 		mmu.hole_start = mmu.hole_end - 1;
4990Sstevel@tonic-gate 	}
5000Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121)
5010Sstevel@tonic-gate 	/*
5020Sstevel@tonic-gate 	 * If erratum 121 has already been detected at this time, hole_start
5030Sstevel@tonic-gate 	 * contains the value to be subtracted from mmu.hole_start.
5040Sstevel@tonic-gate 	 */
5050Sstevel@tonic-gate 	ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
5060Sstevel@tonic-gate 	hole_start = mmu.hole_start - hole_start;
5070Sstevel@tonic-gate #else
5080Sstevel@tonic-gate 	hole_start = mmu.hole_start;
5090Sstevel@tonic-gate #endif
5100Sstevel@tonic-gate 	hole_end = mmu.hole_end;
5110Sstevel@tonic-gate 
5120Sstevel@tonic-gate 	mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
5130Sstevel@tonic-gate 	if (mmu.pae_hat == 0 && pa_bits > 32)
5140Sstevel@tonic-gate 		mmu.highest_pfn = PFN_4G - 1;
5150Sstevel@tonic-gate 
5160Sstevel@tonic-gate 	if (mmu.pae_hat) {
5170Sstevel@tonic-gate 		mmu.pte_size = 8;	/* 8 byte PTEs */
5180Sstevel@tonic-gate 		mmu.pte_size_shift = 3;
5190Sstevel@tonic-gate 	} else {
5200Sstevel@tonic-gate 		mmu.pte_size = 4;	/* 4 byte PTEs */
5210Sstevel@tonic-gate 		mmu.pte_size_shift = 2;
5220Sstevel@tonic-gate 	}
5230Sstevel@tonic-gate 
5240Sstevel@tonic-gate 	if (mmu.pae_hat && (x86_feature & X86_PAE) == 0)
5250Sstevel@tonic-gate 		panic("Processor does not support PAE");
5260Sstevel@tonic-gate 
5270Sstevel@tonic-gate 	if ((x86_feature & X86_CX8) == 0)
5280Sstevel@tonic-gate 		panic("Processor does not support cmpxchg8b instruction");
5290Sstevel@tonic-gate 
5300Sstevel@tonic-gate 	/*
5310Sstevel@tonic-gate 	 * Initialize parameters based on the 64 or 32 bit kernels and
5320Sstevel@tonic-gate 	 * for the 32 bit kernel decide if we should use PAE.
5330Sstevel@tonic-gate 	 */
5340Sstevel@tonic-gate 	if (x86_feature & X86_LARGEPAGE)
5350Sstevel@tonic-gate 		mmu.max_page_level = 1;
5360Sstevel@tonic-gate 	else
5370Sstevel@tonic-gate 		mmu.max_page_level = 0;
5380Sstevel@tonic-gate 	mmu_page_sizes = mmu.max_page_level + 1;
5390Sstevel@tonic-gate 	mmu_exported_page_sizes = mmu_page_sizes;
5400Sstevel@tonic-gate 
5410Sstevel@tonic-gate #if defined(__amd64)
5420Sstevel@tonic-gate 
5430Sstevel@tonic-gate 	mmu.num_level = 4;
5440Sstevel@tonic-gate 	mmu.max_level = 3;
5450Sstevel@tonic-gate 	mmu.ptes_per_table = 512;
5460Sstevel@tonic-gate 	mmu.top_level_count = 512;
5470Sstevel@tonic-gate 
5480Sstevel@tonic-gate 	mmu.level_shift[0] = 12;
5490Sstevel@tonic-gate 	mmu.level_shift[1] = 21;
5500Sstevel@tonic-gate 	mmu.level_shift[2] = 30;
5510Sstevel@tonic-gate 	mmu.level_shift[3] = 39;
5520Sstevel@tonic-gate 
5530Sstevel@tonic-gate #elif defined(__i386)
5540Sstevel@tonic-gate 
5550Sstevel@tonic-gate 	if (mmu.pae_hat) {
5560Sstevel@tonic-gate 		mmu.num_level = 3;
5570Sstevel@tonic-gate 		mmu.max_level = 2;
5580Sstevel@tonic-gate 		mmu.ptes_per_table = 512;
5590Sstevel@tonic-gate 		mmu.top_level_count = 4;
5600Sstevel@tonic-gate 
5610Sstevel@tonic-gate 		mmu.level_shift[0] = 12;
5620Sstevel@tonic-gate 		mmu.level_shift[1] = 21;
5630Sstevel@tonic-gate 		mmu.level_shift[2] = 30;
5640Sstevel@tonic-gate 
5650Sstevel@tonic-gate 	} else {
5660Sstevel@tonic-gate 		mmu.num_level = 2;
5670Sstevel@tonic-gate 		mmu.max_level = 1;
5680Sstevel@tonic-gate 		mmu.ptes_per_table = 1024;
5690Sstevel@tonic-gate 		mmu.top_level_count = 1024;
5700Sstevel@tonic-gate 
5710Sstevel@tonic-gate 		mmu.level_shift[0] = 12;
5720Sstevel@tonic-gate 		mmu.level_shift[1] = 22;
5730Sstevel@tonic-gate 	}
5740Sstevel@tonic-gate 
5750Sstevel@tonic-gate #endif	/* __i386 */
5760Sstevel@tonic-gate 
5770Sstevel@tonic-gate 	for (i = 0; i < mmu.num_level; ++i) {
5780Sstevel@tonic-gate 		mmu.level_size[i] = 1UL << mmu.level_shift[i];
5790Sstevel@tonic-gate 		mmu.level_offset[i] = mmu.level_size[i] - 1;
5800Sstevel@tonic-gate 		mmu.level_mask[i] = ~mmu.level_offset[i];
5810Sstevel@tonic-gate 	}
5820Sstevel@tonic-gate 
5830Sstevel@tonic-gate 	mmu.pte_bits[0] = PT_VALID;
5840Sstevel@tonic-gate 	for (i = 1; i <= mmu.max_page_level; ++i)
5850Sstevel@tonic-gate 		mmu.pte_bits[i] = PT_VALID | PT_PAGESIZE;
5860Sstevel@tonic-gate 
5870Sstevel@tonic-gate 	/*
5880Sstevel@tonic-gate 	 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
5890Sstevel@tonic-gate 	 */
5900Sstevel@tonic-gate 	for (i = 1; i < mmu.num_level; ++i)
5910Sstevel@tonic-gate 		mmu.ptp_bits[i] = PT_PTPBITS;
5920Sstevel@tonic-gate #if defined(__i386)
5930Sstevel@tonic-gate 	mmu.ptp_bits[2] = PT_VALID;
5940Sstevel@tonic-gate #endif
5950Sstevel@tonic-gate 
5960Sstevel@tonic-gate 	/*
5970Sstevel@tonic-gate 	 * Compute how many hash table entries to have per process for htables.
5980Sstevel@tonic-gate 	 * We start with 1 page's worth of entries.
5990Sstevel@tonic-gate 	 *
6000Sstevel@tonic-gate 	 * If physical memory is small, reduce the amount need to cover it.
6010Sstevel@tonic-gate 	 */
6020Sstevel@tonic-gate 	max_htables = physmax / mmu.ptes_per_table;
6030Sstevel@tonic-gate 	mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
6040Sstevel@tonic-gate 	while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
6050Sstevel@tonic-gate 		mmu.hash_cnt >>= 1;
6060Sstevel@tonic-gate 	mmu.vlp_hash_cnt = mmu.hash_cnt;
6070Sstevel@tonic-gate 
6080Sstevel@tonic-gate #if defined(__amd64)
6090Sstevel@tonic-gate 	/*
6100Sstevel@tonic-gate 	 * If running in 64 bits and physical memory is large,
6110Sstevel@tonic-gate 	 * increase the size of the cache to cover all of memory for
6120Sstevel@tonic-gate 	 * a 64 bit process.
6130Sstevel@tonic-gate 	 */
6140Sstevel@tonic-gate #define	HASH_MAX_LENGTH 4
6150Sstevel@tonic-gate 	while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
6160Sstevel@tonic-gate 		mmu.hash_cnt <<= 1;
6170Sstevel@tonic-gate #endif
6180Sstevel@tonic-gate 
6190Sstevel@tonic-gate 	/*
6200Sstevel@tonic-gate 	 * This code knows that there are only 2 pagesizes.
6210Sstevel@tonic-gate 	 * We ignore 4MB (non-PAE) for now. The value is only used
6220Sstevel@tonic-gate 	 * for optimizing demaps across large ranges.
6230Sstevel@tonic-gate 	 * These return zero if no information is known.
6240Sstevel@tonic-gate 	 */
6250Sstevel@tonic-gate 	mmu.tlb_entries[0] = cpuid_get_dtlb_nent(NULL, MMU_PAGESIZE);
6260Sstevel@tonic-gate 	mmu.tlb_entries[1] = cpuid_get_dtlb_nent(NULL, 2 * 1024 * 1024);
6270Sstevel@tonic-gate }
6280Sstevel@tonic-gate 
6290Sstevel@tonic-gate 
6300Sstevel@tonic-gate /*
6310Sstevel@tonic-gate  * initialize hat data structures
6320Sstevel@tonic-gate  */
6330Sstevel@tonic-gate void
6340Sstevel@tonic-gate hat_init()
6350Sstevel@tonic-gate {
6360Sstevel@tonic-gate #if defined(__i386)
6370Sstevel@tonic-gate 	/*
6380Sstevel@tonic-gate 	 * _userlimit must be aligned correctly
6390Sstevel@tonic-gate 	 */
6400Sstevel@tonic-gate 	if ((_userlimit & LEVEL_MASK(1)) != _userlimit) {
6410Sstevel@tonic-gate 		prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n",
6420Sstevel@tonic-gate 		    (void *)_userlimit, (void *)LEVEL_SIZE(1));
6430Sstevel@tonic-gate 		halt("hat_init(): Unable to continue");
6440Sstevel@tonic-gate 	}
6450Sstevel@tonic-gate #endif
6460Sstevel@tonic-gate 
6470Sstevel@tonic-gate 	cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
6480Sstevel@tonic-gate 
6490Sstevel@tonic-gate 	/*
6500Sstevel@tonic-gate 	 * initialize kmem caches
6510Sstevel@tonic-gate 	 */
6520Sstevel@tonic-gate 	htable_init();
6530Sstevel@tonic-gate 	hment_init();
6540Sstevel@tonic-gate 
6550Sstevel@tonic-gate 	hat_cache = kmem_cache_create("hat_t",
6560Sstevel@tonic-gate 	    sizeof (hat_t), 0, hati_constructor, NULL, NULL,
6570Sstevel@tonic-gate 	    NULL, 0, 0);
6580Sstevel@tonic-gate 
6590Sstevel@tonic-gate 	hat_hash_cache = kmem_cache_create("HatHash",
6600Sstevel@tonic-gate 	    mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
6610Sstevel@tonic-gate 	    NULL, 0, 0);
6620Sstevel@tonic-gate 
6630Sstevel@tonic-gate 	/*
6640Sstevel@tonic-gate 	 * VLP hats can use a smaller hash table size on large memroy machines
6650Sstevel@tonic-gate 	 */
6660Sstevel@tonic-gate 	if (mmu.hash_cnt == mmu.vlp_hash_cnt) {
6670Sstevel@tonic-gate 		vlp_hash_cache = hat_hash_cache;
6680Sstevel@tonic-gate 	} else {
6690Sstevel@tonic-gate 		vlp_hash_cache = kmem_cache_create("HatVlpHash",
6700Sstevel@tonic-gate 		    mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
6710Sstevel@tonic-gate 		    NULL, 0, 0);
6720Sstevel@tonic-gate 	}
6730Sstevel@tonic-gate 
6740Sstevel@tonic-gate 	/*
6750Sstevel@tonic-gate 	 * Set up the kernel's hat
6760Sstevel@tonic-gate 	 */
6770Sstevel@tonic-gate 	AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
6780Sstevel@tonic-gate 	kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
6790Sstevel@tonic-gate 	mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
6800Sstevel@tonic-gate 	kas.a_hat->hat_as = &kas;
6810Sstevel@tonic-gate 	kas.a_hat->hat_flags = 0;
6820Sstevel@tonic-gate 	AS_LOCK_EXIT(&kas, &kas.a_lock);
6830Sstevel@tonic-gate 
6840Sstevel@tonic-gate 	CPUSET_ZERO(khat_cpuset);
6850Sstevel@tonic-gate 	CPUSET_ADD(khat_cpuset, CPU->cpu_id);
6860Sstevel@tonic-gate 
6870Sstevel@tonic-gate 	/*
6880Sstevel@tonic-gate 	 * The kernel hat's next pointer serves as the head of the hat list .
6890Sstevel@tonic-gate 	 */
6900Sstevel@tonic-gate 	kas.a_hat->hat_next = NULL;
6910Sstevel@tonic-gate 
6920Sstevel@tonic-gate 	/*
6930Sstevel@tonic-gate 	 * Allocate an htable hash bucket for the kernel
6940Sstevel@tonic-gate 	 * XX64 - tune for 64 bit procs
6950Sstevel@tonic-gate 	 */
6960Sstevel@tonic-gate 	kas.a_hat->hat_num_hash = mmu.hash_cnt;
6970Sstevel@tonic-gate 	kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
6980Sstevel@tonic-gate 	bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
6990Sstevel@tonic-gate 
7000Sstevel@tonic-gate 	/*
7010Sstevel@tonic-gate 	 * zero out the top level and cached htable pointers
7020Sstevel@tonic-gate 	 */
7030Sstevel@tonic-gate 	kas.a_hat->hat_ht_cached = NULL;
7040Sstevel@tonic-gate 	kas.a_hat->hat_htable = NULL;
7050Sstevel@tonic-gate }
7060Sstevel@tonic-gate 
7070Sstevel@tonic-gate /*
7080Sstevel@tonic-gate  * Prepare CPU specific pagetables for VLP processes on 64 bit kernels.
7090Sstevel@tonic-gate  *
7100Sstevel@tonic-gate  * Each CPU has a set of 2 pagetables that are reused for any 32 bit
7110Sstevel@tonic-gate  * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and
7120Sstevel@tonic-gate  * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes.
7130Sstevel@tonic-gate  */
7140Sstevel@tonic-gate /*ARGSUSED*/
7150Sstevel@tonic-gate static void
7160Sstevel@tonic-gate hat_vlp_setup(struct cpu *cpu)
7170Sstevel@tonic-gate {
7180Sstevel@tonic-gate #if defined(__amd64)
7190Sstevel@tonic-gate 	struct hat_cpu_info *hci = cpu->cpu_hat_info;
7200Sstevel@tonic-gate 	pfn_t pfn;
7210Sstevel@tonic-gate 
7220Sstevel@tonic-gate 	/*
7230Sstevel@tonic-gate 	 * allocate the level==2 page table for the bottom most
7240Sstevel@tonic-gate 	 * 512Gig of address space (this is where 32 bit apps live)
7250Sstevel@tonic-gate 	 */
7260Sstevel@tonic-gate 	ASSERT(hci != NULL);
7270Sstevel@tonic-gate 	hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
7280Sstevel@tonic-gate 
7290Sstevel@tonic-gate 	/*
7300Sstevel@tonic-gate 	 * Allocate a top level pagetable and copy the kernel's
7310Sstevel@tonic-gate 	 * entries into it. Then link in hci_vlp_l2ptes in the 1st entry.
7320Sstevel@tonic-gate 	 */
7330Sstevel@tonic-gate 	hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
7340Sstevel@tonic-gate 	hci->hci_vlp_pfn =
7350Sstevel@tonic-gate 	    hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes);
7360Sstevel@tonic-gate 	ASSERT(hci->hci_vlp_pfn != PFN_INVALID);
7370Sstevel@tonic-gate 	bcopy(vlp_page + khat_start, hci->hci_vlp_l3ptes + khat_start,
7380Sstevel@tonic-gate 	    khat_entries * sizeof (x86pte_t));
7390Sstevel@tonic-gate 
7400Sstevel@tonic-gate 	pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes);
7410Sstevel@tonic-gate 	ASSERT(pfn != PFN_INVALID);
7420Sstevel@tonic-gate 	hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2);
7430Sstevel@tonic-gate #endif /* __amd64 */
7440Sstevel@tonic-gate }
7450Sstevel@tonic-gate 
7460Sstevel@tonic-gate /*
7470Sstevel@tonic-gate  * Finish filling in the kernel hat.
7480Sstevel@tonic-gate  * Pre fill in all top level kernel page table entries for the kernel's
7490Sstevel@tonic-gate  * part of the address range.  From this point on we can't use any new
7500Sstevel@tonic-gate  * kernel large pages if they need PTE's at max_level
7510Sstevel@tonic-gate  */
7520Sstevel@tonic-gate void
7530Sstevel@tonic-gate hat_init_finish(void)
7540Sstevel@tonic-gate {
7550Sstevel@tonic-gate 	htable_t	*top = kas.a_hat->hat_htable;
7560Sstevel@tonic-gate 	htable_t	*ht;
7570Sstevel@tonic-gate 	uint_t		e;
7580Sstevel@tonic-gate 	x86pte_t	pte;
7590Sstevel@tonic-gate 	uintptr_t	va = kernelbase;
7600Sstevel@tonic-gate 
7610Sstevel@tonic-gate 
7620Sstevel@tonic-gate #if defined(__i386)
7630Sstevel@tonic-gate 	ASSERT((va & LEVEL_MASK(1)) == va);
7640Sstevel@tonic-gate 
7650Sstevel@tonic-gate 	/*
7660Sstevel@tonic-gate 	 * Deal with kernelbase not 1Gig aligned for 32 bit PAE hats.
7670Sstevel@tonic-gate 	 */
7680Sstevel@tonic-gate 	if (!mmu.pae_hat || (va & LEVEL_OFFSET(mmu.max_level)) == 0) {
7690Sstevel@tonic-gate 		khat_pae32_htable = NULL;
7700Sstevel@tonic-gate 	} else {
7710Sstevel@tonic-gate 		ASSERT(mmu.max_level == 2);
7720Sstevel@tonic-gate 		ASSERT((va & LEVEL_OFFSET(mmu.max_level - 1)) == 0);
7730Sstevel@tonic-gate 		khat_pae32_htable =
7740Sstevel@tonic-gate 		    htable_create(kas.a_hat, va, mmu.max_level - 1, NULL);
7750Sstevel@tonic-gate 		khat_pae32_start = htable_va2entry(va, khat_pae32_htable);
7760Sstevel@tonic-gate 		khat_pae32_entries = mmu.ptes_per_table - khat_pae32_start;
7770Sstevel@tonic-gate 		for (e = khat_pae32_start; e < mmu.ptes_per_table;
7780Sstevel@tonic-gate 		    ++e, va += LEVEL_SIZE(mmu.max_level - 1)) {
7790Sstevel@tonic-gate 			pte = x86pte_get(khat_pae32_htable, e);
7800Sstevel@tonic-gate 			if (PTE_ISVALID(pte))
7810Sstevel@tonic-gate 				continue;
7820Sstevel@tonic-gate 			ht = htable_create(kas.a_hat, va, mmu.max_level - 2,
7830Sstevel@tonic-gate 			    NULL);
7840Sstevel@tonic-gate 			ASSERT(ht != NULL);
7850Sstevel@tonic-gate 		}
7860Sstevel@tonic-gate 	}
7870Sstevel@tonic-gate #endif
7880Sstevel@tonic-gate 
7890Sstevel@tonic-gate 	/*
7900Sstevel@tonic-gate 	 * The kernel hat will need fixed values in the highest level
7910Sstevel@tonic-gate 	 * ptable for copying to all other hat's. This implies
7920Sstevel@tonic-gate 	 * alignment restrictions on _userlimit.
7930Sstevel@tonic-gate 	 *
7940Sstevel@tonic-gate 	 * Note we don't htable_release() these htables. This keeps them
7950Sstevel@tonic-gate 	 * from ever being stolen or free'd.
7960Sstevel@tonic-gate 	 *
7970Sstevel@tonic-gate 	 * top_level_count is used instead of ptes_per_table, since
7980Sstevel@tonic-gate 	 * on 32-bit PAE we only have 4 usable entries at the top level ptable.
7990Sstevel@tonic-gate 	 */
8000Sstevel@tonic-gate 	if (va == 0)
8010Sstevel@tonic-gate 		khat_start = mmu.top_level_count;
8020Sstevel@tonic-gate 	else
8030Sstevel@tonic-gate 		khat_start = htable_va2entry(va, kas.a_hat->hat_htable);
8040Sstevel@tonic-gate 	khat_entries = mmu.top_level_count - khat_start;
8050Sstevel@tonic-gate 	for (e = khat_start; e < mmu.top_level_count;
8060Sstevel@tonic-gate 	    ++e, va += LEVEL_SIZE(mmu.max_level)) {
8070Sstevel@tonic-gate 		pte = x86pte_get(top, e);
8080Sstevel@tonic-gate 		if (PTE_ISVALID(pte))
8090Sstevel@tonic-gate 			continue;
8100Sstevel@tonic-gate 		ht = htable_create(kas.a_hat, va, mmu.max_level - 1, NULL);
8110Sstevel@tonic-gate 		ASSERT(ht != NULL);
8120Sstevel@tonic-gate 	}
8130Sstevel@tonic-gate 
8140Sstevel@tonic-gate 	/*
8150Sstevel@tonic-gate 	 * We are now effectively running on the kernel hat.
8160Sstevel@tonic-gate 	 * Clearing use_boot_reserve shuts off using the pre-allocated boot
8170Sstevel@tonic-gate 	 * reserve for all HAT allocations.  From here on, the reserves are
8180Sstevel@tonic-gate 	 * only used when mapping in memory for the hat's own allocations.
8190Sstevel@tonic-gate 	 */
8200Sstevel@tonic-gate 	use_boot_reserve = 0;
8210Sstevel@tonic-gate 	htable_adjust_reserve();
8220Sstevel@tonic-gate 
8230Sstevel@tonic-gate 	/*
8240Sstevel@tonic-gate 	 * 32 bit kernels use only 4 of the 512 entries in its top level
8250Sstevel@tonic-gate 	 * pagetable. We'll use the remainder for the "per CPU" page tables
8260Sstevel@tonic-gate 	 * for VLP processes.
8270Sstevel@tonic-gate 	 *
8280Sstevel@tonic-gate 	 * We map the top level kernel pagetable into the kernel's AS to make
8290Sstevel@tonic-gate 	 * it easy to use bcopy for kernel entry PTEs.
8300Sstevel@tonic-gate 	 *
8310Sstevel@tonic-gate 	 * We were guaranteed to get a physical address < 4Gig, since the 32 bit
8320Sstevel@tonic-gate 	 * boot loader uses non-PAE page tables.
8330Sstevel@tonic-gate 	 */
8340Sstevel@tonic-gate 	if (mmu.pae_hat) {
8350Sstevel@tonic-gate 		vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
8360Sstevel@tonic-gate 		hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE,
8370Sstevel@tonic-gate 		    kas.a_hat->hat_htable->ht_pfn,
8380Sstevel@tonic-gate 		    PROT_READ | PROT_WRITE | HAT_NOSYNC | HAT_UNORDERED_OK,
8390Sstevel@tonic-gate 		    HAT_LOAD | HAT_LOAD_NOCONSIST);
8400Sstevel@tonic-gate 	}
8410Sstevel@tonic-gate 	hat_vlp_setup(CPU);
8420Sstevel@tonic-gate }
8430Sstevel@tonic-gate 
8440Sstevel@tonic-gate /*
8450Sstevel@tonic-gate  * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
8460Sstevel@tonic-gate  * are 32 bit, so for safety we must use cas64() to install these.
8470Sstevel@tonic-gate  */
8480Sstevel@tonic-gate #ifdef __i386
8490Sstevel@tonic-gate static void
8500Sstevel@tonic-gate reload_pae32(hat_t *hat, cpu_t *cpu)
8510Sstevel@tonic-gate {
8520Sstevel@tonic-gate 	x86pte_t *src;
8530Sstevel@tonic-gate 	x86pte_t *dest;
8540Sstevel@tonic-gate 	x86pte_t pte;
8550Sstevel@tonic-gate 	int i;
8560Sstevel@tonic-gate 
8570Sstevel@tonic-gate 	/*
8580Sstevel@tonic-gate 	 * Load the 4 entries of the level 2 page table into this
8590Sstevel@tonic-gate 	 * cpu's range of the vlp_page and point cr3 at them.
8600Sstevel@tonic-gate 	 */
8610Sstevel@tonic-gate 	ASSERT(mmu.pae_hat);
8620Sstevel@tonic-gate 	src = hat->hat_vlp_ptes;
8630Sstevel@tonic-gate 	dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
8640Sstevel@tonic-gate 	for (i = 0; i < VLP_NUM_PTES; ++i) {
8650Sstevel@tonic-gate 		for (;;) {
8660Sstevel@tonic-gate 			pte = dest[i];
8670Sstevel@tonic-gate 			if (pte == src[i])
8680Sstevel@tonic-gate 				break;
8690Sstevel@tonic-gate 			if (cas64(dest + i, pte, src[i]) != src[i])
8700Sstevel@tonic-gate 				break;
8710Sstevel@tonic-gate 		}
8720Sstevel@tonic-gate 	}
8730Sstevel@tonic-gate }
8740Sstevel@tonic-gate #endif
8750Sstevel@tonic-gate 
8760Sstevel@tonic-gate /*
8770Sstevel@tonic-gate  * Switch to a new active hat, maintaining bit masks to track active CPUs.
8780Sstevel@tonic-gate  */
8790Sstevel@tonic-gate void
8800Sstevel@tonic-gate hat_switch(hat_t *hat)
8810Sstevel@tonic-gate {
8820Sstevel@tonic-gate 	uintptr_t	newcr3;
8830Sstevel@tonic-gate 	cpu_t		*cpu = CPU;
8840Sstevel@tonic-gate 	hat_t		*old = cpu->cpu_current_hat;
8850Sstevel@tonic-gate 
8860Sstevel@tonic-gate 	/*
8870Sstevel@tonic-gate 	 * set up this information first, so we don't miss any cross calls
8880Sstevel@tonic-gate 	 */
8890Sstevel@tonic-gate 	if (old != NULL) {
8900Sstevel@tonic-gate 		if (old == hat)
8910Sstevel@tonic-gate 			return;
8920Sstevel@tonic-gate 		if (old != kas.a_hat)
8930Sstevel@tonic-gate 			CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
8940Sstevel@tonic-gate 	}
8950Sstevel@tonic-gate 
8960Sstevel@tonic-gate 	/*
8970Sstevel@tonic-gate 	 * Wait for any in flight pagetable invalidates on this hat to finish.
8980Sstevel@tonic-gate 	 * This is a spin lock at DISP_LEVEL
8990Sstevel@tonic-gate 	 */
9000Sstevel@tonic-gate 	if (hat != kas.a_hat) {
9010Sstevel@tonic-gate 		mutex_enter(&hat->hat_switch_mutex);
9020Sstevel@tonic-gate 		CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
9030Sstevel@tonic-gate 		mutex_exit(&hat->hat_switch_mutex);
9040Sstevel@tonic-gate 	}
9050Sstevel@tonic-gate 	cpu->cpu_current_hat = hat;
9060Sstevel@tonic-gate 
9070Sstevel@tonic-gate 	/*
9080Sstevel@tonic-gate 	 * now go ahead and load cr3
9090Sstevel@tonic-gate 	 */
9100Sstevel@tonic-gate 	if (hat->hat_flags & HAT_VLP) {
9110Sstevel@tonic-gate #if defined(__amd64)
9120Sstevel@tonic-gate 		x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
9130Sstevel@tonic-gate 
9140Sstevel@tonic-gate 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
9150Sstevel@tonic-gate 		newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn);
9160Sstevel@tonic-gate #elif defined(__i386)
9170Sstevel@tonic-gate 		reload_pae32(hat, cpu);
9180Sstevel@tonic-gate 		newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) +
9190Sstevel@tonic-gate 		    (cpu->cpu_id + 1) * VLP_SIZE;
9200Sstevel@tonic-gate #endif
9210Sstevel@tonic-gate 	} else {
9220Sstevel@tonic-gate 		newcr3 = MAKECR3(hat->hat_htable->ht_pfn);
9230Sstevel@tonic-gate 	}
9240Sstevel@tonic-gate 	setcr3(newcr3);
9250Sstevel@tonic-gate 	ASSERT(cpu == CPU);
9260Sstevel@tonic-gate }
9270Sstevel@tonic-gate 
9280Sstevel@tonic-gate /*
9290Sstevel@tonic-gate  * Utility to return a valid x86pte_t from protections, pfn, and level number
9300Sstevel@tonic-gate  */
9310Sstevel@tonic-gate static x86pte_t
9320Sstevel@tonic-gate hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
9330Sstevel@tonic-gate {
9340Sstevel@tonic-gate 	x86pte_t	pte;
9350Sstevel@tonic-gate 	uint_t		cache_attr = attr & HAT_ORDER_MASK;
9360Sstevel@tonic-gate 
9370Sstevel@tonic-gate 	pte = MAKEPTE(pfn, level);
9380Sstevel@tonic-gate 
9390Sstevel@tonic-gate 	if (attr & PROT_WRITE)
9400Sstevel@tonic-gate 		PTE_SET(pte, PT_WRITABLE);
9410Sstevel@tonic-gate 
9420Sstevel@tonic-gate 	if (attr & PROT_USER)
9430Sstevel@tonic-gate 		PTE_SET(pte, PT_USER);
9440Sstevel@tonic-gate 
9450Sstevel@tonic-gate 	if (!(attr & PROT_EXEC))
9460Sstevel@tonic-gate 		PTE_SET(pte, mmu.pt_nx);
9470Sstevel@tonic-gate 
9480Sstevel@tonic-gate 	/*
9490Sstevel@tonic-gate 	 * set the software bits used track ref/mod sync's and hments
9500Sstevel@tonic-gate 	 */
9510Sstevel@tonic-gate 	if (attr & HAT_NOSYNC)
9520Sstevel@tonic-gate 		PTE_SET(pte, PT_NOSYNC);
9530Sstevel@tonic-gate 	if (flags & HAT_LOAD_NOCONSIST)
9540Sstevel@tonic-gate 		PTE_SET(pte, PT_NOCONSIST | PT_NOSYNC);
9550Sstevel@tonic-gate 
9560Sstevel@tonic-gate 	/*
9570Sstevel@tonic-gate 	 * Set the caching attributes in the PTE. The combination
9580Sstevel@tonic-gate 	 * of attributes are poorly defined, so we pay attention
9590Sstevel@tonic-gate 	 * to them in the given order.
9600Sstevel@tonic-gate 	 *
9610Sstevel@tonic-gate 	 * The test for HAT_STRICTORDER is different because it's defined
9620Sstevel@tonic-gate 	 * as "0" - which was a stupid thing to do, but is too late to change!
9630Sstevel@tonic-gate 	 */
9640Sstevel@tonic-gate 	if (cache_attr == HAT_STRICTORDER) {
9650Sstevel@tonic-gate 		PTE_SET(pte, PT_NOCACHE);
9660Sstevel@tonic-gate 	/*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
9670Sstevel@tonic-gate 	} else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
9680Sstevel@tonic-gate 		/* nothing to set */;
9690Sstevel@tonic-gate 	} else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
9700Sstevel@tonic-gate 		PTE_SET(pte, PT_NOCACHE);
9710Sstevel@tonic-gate 		if (x86_feature & X86_PAT)
9720Sstevel@tonic-gate 			PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
9730Sstevel@tonic-gate 		else
9740Sstevel@tonic-gate 			PTE_SET(pte, PT_WRITETHRU);
9750Sstevel@tonic-gate 	} else {
9760Sstevel@tonic-gate 		panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
9770Sstevel@tonic-gate 	}
9780Sstevel@tonic-gate 
9790Sstevel@tonic-gate 	return (pte);
9800Sstevel@tonic-gate }
9810Sstevel@tonic-gate 
9820Sstevel@tonic-gate /*
9830Sstevel@tonic-gate  * Duplicate address translations of the parent to the child.
9840Sstevel@tonic-gate  * This function really isn't used anymore.
9850Sstevel@tonic-gate  */
9860Sstevel@tonic-gate /*ARGSUSED*/
9870Sstevel@tonic-gate int
9880Sstevel@tonic-gate hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
9890Sstevel@tonic-gate {
9900Sstevel@tonic-gate 	ASSERT((uintptr_t)addr < kernelbase);
9910Sstevel@tonic-gate 	ASSERT(new != kas.a_hat);
9920Sstevel@tonic-gate 	ASSERT(old != kas.a_hat);
9930Sstevel@tonic-gate 	return (0);
9940Sstevel@tonic-gate }
9950Sstevel@tonic-gate 
9960Sstevel@tonic-gate /*
9970Sstevel@tonic-gate  * Allocate any hat resources required for a process being swapped in.
9980Sstevel@tonic-gate  */
9990Sstevel@tonic-gate /*ARGSUSED*/
10000Sstevel@tonic-gate void
10010Sstevel@tonic-gate hat_swapin(hat_t *hat)
10020Sstevel@tonic-gate {
10030Sstevel@tonic-gate 	/* do nothing - we let everything fault back in */
10040Sstevel@tonic-gate }
10050Sstevel@tonic-gate 
10060Sstevel@tonic-gate /*
10070Sstevel@tonic-gate  * Unload all translations associated with an address space of a process
10080Sstevel@tonic-gate  * that is being swapped out.
10090Sstevel@tonic-gate  */
10100Sstevel@tonic-gate void
10110Sstevel@tonic-gate hat_swapout(hat_t *hat)
10120Sstevel@tonic-gate {
10130Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)0;
10140Sstevel@tonic-gate 	uintptr_t	eaddr = _userlimit;
10150Sstevel@tonic-gate 	htable_t	*ht = NULL;
10160Sstevel@tonic-gate 	level_t		l;
10170Sstevel@tonic-gate 
10180Sstevel@tonic-gate 	/*
10190Sstevel@tonic-gate 	 * We can't just call hat_unload(hat, 0, _userlimit...)  here, because
10200Sstevel@tonic-gate 	 * seg_spt and shared pagetables can't be swapped out.
10210Sstevel@tonic-gate 	 * Take a look at segspt_shmswapout() - it's a big no-op.
10220Sstevel@tonic-gate 	 *
10230Sstevel@tonic-gate 	 * Instead we'll walk through all the address space and unload
10240Sstevel@tonic-gate 	 * any mappings which we are sure are not shared, not locked.
10250Sstevel@tonic-gate 	 */
10260Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
10270Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
10280Sstevel@tonic-gate 	ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
10290Sstevel@tonic-gate 	if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
10300Sstevel@tonic-gate 		eaddr = (uintptr_t)hat->hat_as->a_userlimit;
10310Sstevel@tonic-gate 
10320Sstevel@tonic-gate 	while (vaddr < eaddr) {
10330Sstevel@tonic-gate 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
10340Sstevel@tonic-gate 		if (ht == NULL)
10350Sstevel@tonic-gate 			break;
10360Sstevel@tonic-gate 
10370Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(vaddr));
10380Sstevel@tonic-gate 
10390Sstevel@tonic-gate 		/*
10400Sstevel@tonic-gate 		 * If the page table is shared skip its entire range.
10410Sstevel@tonic-gate 		 * This code knows that only level 0 page tables are shared
10420Sstevel@tonic-gate 		 */
10430Sstevel@tonic-gate 		l = ht->ht_level;
10440Sstevel@tonic-gate 		if (ht->ht_flags & HTABLE_SHARED_PFN) {
10450Sstevel@tonic-gate 			ASSERT(l == 0);
10460Sstevel@tonic-gate 			vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
10470Sstevel@tonic-gate 			htable_release(ht);
10480Sstevel@tonic-gate 			ht = NULL;
10490Sstevel@tonic-gate 			continue;
10500Sstevel@tonic-gate 		}
10510Sstevel@tonic-gate 
10520Sstevel@tonic-gate 		/*
10530Sstevel@tonic-gate 		 * If the page table has no locked entries, unload this one.
10540Sstevel@tonic-gate 		 */
10550Sstevel@tonic-gate 		if (ht->ht_lock_cnt == 0)
10560Sstevel@tonic-gate 			hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
10570Sstevel@tonic-gate 			    HAT_UNLOAD_UNMAP);
10580Sstevel@tonic-gate 
10590Sstevel@tonic-gate 		/*
10600Sstevel@tonic-gate 		 * If we have a level 0 page table with locked entries,
10610Sstevel@tonic-gate 		 * skip the entire page table, otherwise skip just one entry.
10620Sstevel@tonic-gate 		 */
10630Sstevel@tonic-gate 		if (ht->ht_lock_cnt > 0 && l == 0)
10640Sstevel@tonic-gate 			vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
10650Sstevel@tonic-gate 		else
10660Sstevel@tonic-gate 			vaddr += LEVEL_SIZE(l);
10670Sstevel@tonic-gate 	}
10680Sstevel@tonic-gate 	if (ht)
10690Sstevel@tonic-gate 		htable_release(ht);
10700Sstevel@tonic-gate 
10710Sstevel@tonic-gate 	/*
10720Sstevel@tonic-gate 	 * We're in swapout because the system is low on memory, so
10730Sstevel@tonic-gate 	 * go back and flush all the htables off the cached list.
10740Sstevel@tonic-gate 	 */
10750Sstevel@tonic-gate 	htable_purge_hat(hat);
10760Sstevel@tonic-gate }
10770Sstevel@tonic-gate 
10780Sstevel@tonic-gate /*
10790Sstevel@tonic-gate  * returns number of bytes that have valid mappings in hat.
10800Sstevel@tonic-gate  */
10810Sstevel@tonic-gate size_t
10820Sstevel@tonic-gate hat_get_mapped_size(hat_t *hat)
10830Sstevel@tonic-gate {
10840Sstevel@tonic-gate 	size_t total = 0;
10850Sstevel@tonic-gate 	int l;
10860Sstevel@tonic-gate 
10870Sstevel@tonic-gate 	for (l = 0; l <= mmu.max_page_level; l++)
10880Sstevel@tonic-gate 		total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
10890Sstevel@tonic-gate 
10900Sstevel@tonic-gate 	return (total);
10910Sstevel@tonic-gate }
10920Sstevel@tonic-gate 
10930Sstevel@tonic-gate /*
10940Sstevel@tonic-gate  * enable/disable collection of stats for hat.
10950Sstevel@tonic-gate  */
10960Sstevel@tonic-gate int
10970Sstevel@tonic-gate hat_stats_enable(hat_t *hat)
10980Sstevel@tonic-gate {
10990Sstevel@tonic-gate 	atomic_add_32(&hat->hat_stats, 1);
11000Sstevel@tonic-gate 	return (1);
11010Sstevel@tonic-gate }
11020Sstevel@tonic-gate 
11030Sstevel@tonic-gate void
11040Sstevel@tonic-gate hat_stats_disable(hat_t *hat)
11050Sstevel@tonic-gate {
11060Sstevel@tonic-gate 	atomic_add_32(&hat->hat_stats, -1);
11070Sstevel@tonic-gate }
11080Sstevel@tonic-gate 
11090Sstevel@tonic-gate /*
11100Sstevel@tonic-gate  * Utility to sync the ref/mod bits from a page table entry to the page_t
11110Sstevel@tonic-gate  * We must be holding the mapping list lock when this is called.
11120Sstevel@tonic-gate  */
11130Sstevel@tonic-gate static void
11140Sstevel@tonic-gate hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
11150Sstevel@tonic-gate {
11160Sstevel@tonic-gate 	uint_t	rm = 0;
11170Sstevel@tonic-gate 	pgcnt_t	pgcnt;
11180Sstevel@tonic-gate 
11190Sstevel@tonic-gate 	if (PTE_GET(pte, PT_NOSYNC))
11200Sstevel@tonic-gate 		return;
11210Sstevel@tonic-gate 
11220Sstevel@tonic-gate 	if (PTE_GET(pte, PT_REF))
11230Sstevel@tonic-gate 		rm |= P_REF;
11240Sstevel@tonic-gate 
11250Sstevel@tonic-gate 	if (PTE_GET(pte, PT_MOD))
11260Sstevel@tonic-gate 		rm |= P_MOD;
11270Sstevel@tonic-gate 
11280Sstevel@tonic-gate 	if (rm == 0)
11290Sstevel@tonic-gate 		return;
11300Sstevel@tonic-gate 
11310Sstevel@tonic-gate 	/*
11320Sstevel@tonic-gate 	 * sync to all constituent pages of a large page
11330Sstevel@tonic-gate 	 */
11340Sstevel@tonic-gate 	ASSERT(x86_hm_held(pp));
11350Sstevel@tonic-gate 	pgcnt = page_get_pagecnt(level);
11360Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
11370Sstevel@tonic-gate 	for (; pgcnt > 0; --pgcnt) {
11380Sstevel@tonic-gate 		/*
11390Sstevel@tonic-gate 		 * hat_page_demote() can't decrease
11400Sstevel@tonic-gate 		 * pszc below this mapping size
11410Sstevel@tonic-gate 		 * since this large mapping existed after we
11420Sstevel@tonic-gate 		 * took mlist lock.
11430Sstevel@tonic-gate 		 */
11440Sstevel@tonic-gate 		ASSERT(pp->p_szc >= level);
11450Sstevel@tonic-gate 		hat_page_setattr(pp, rm);
11460Sstevel@tonic-gate 		++pp;
11470Sstevel@tonic-gate 	}
11480Sstevel@tonic-gate }
11490Sstevel@tonic-gate 
11500Sstevel@tonic-gate /*
11510Sstevel@tonic-gate  * This the set of PTE bits for PFN, permissions and caching
11520Sstevel@tonic-gate  * that require a TLB flush (hat_demap) if changed on a HAT_LOAD_REMAP
11530Sstevel@tonic-gate  */
11540Sstevel@tonic-gate #define	PT_REMAP_BITS							\
11550Sstevel@tonic-gate 	(PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU |		\
11560Sstevel@tonic-gate 	PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE)
11570Sstevel@tonic-gate 
1158*510Skchow #define	REMAPASSERT(EX)	if (!(EX)) panic("hati_pte_map: " #EX)
11590Sstevel@tonic-gate /*
11600Sstevel@tonic-gate  * Do the low-level work to get a mapping entered into a HAT's pagetables
11610Sstevel@tonic-gate  * and in the mapping list of the associated page_t.
11620Sstevel@tonic-gate  */
11630Sstevel@tonic-gate static void
11640Sstevel@tonic-gate hati_pte_map(
11650Sstevel@tonic-gate 	htable_t	*ht,
11660Sstevel@tonic-gate 	uint_t		entry,
11670Sstevel@tonic-gate 	page_t		*pp,
11680Sstevel@tonic-gate 	x86pte_t	pte,
11690Sstevel@tonic-gate 	int		flags,
11700Sstevel@tonic-gate 	void		*pte_ptr)
11710Sstevel@tonic-gate {
11720Sstevel@tonic-gate 	hat_t		*hat = ht->ht_hat;
11730Sstevel@tonic-gate 	x86pte_t	old_pte;
11740Sstevel@tonic-gate 	level_t		l = ht->ht_level;
11750Sstevel@tonic-gate 	hment_t		*hm;
11760Sstevel@tonic-gate 	uint_t		is_consist;
11770Sstevel@tonic-gate 
11780Sstevel@tonic-gate 	/*
11790Sstevel@tonic-gate 	 * Is this a consistant (ie. need mapping list lock) mapping?
11800Sstevel@tonic-gate 	 */
11810Sstevel@tonic-gate 	is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
11820Sstevel@tonic-gate 
11830Sstevel@tonic-gate 	/*
11840Sstevel@tonic-gate 	 * Track locked mapping count in the htable.  Do this first,
11850Sstevel@tonic-gate 	 * as we track locking even if there already is a mapping present.
11860Sstevel@tonic-gate 	 */
11870Sstevel@tonic-gate 	if ((flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat)
11880Sstevel@tonic-gate 		HTABLE_LOCK_INC(ht);
11890Sstevel@tonic-gate 
11900Sstevel@tonic-gate 	/*
11910Sstevel@tonic-gate 	 * Acquire the page's mapping list lock and get an hment to use.
11920Sstevel@tonic-gate 	 * Note that hment_prepare() might return NULL.
11930Sstevel@tonic-gate 	 */
11940Sstevel@tonic-gate 	if (is_consist) {
11950Sstevel@tonic-gate 		x86_hm_enter(pp);
11960Sstevel@tonic-gate 		hm = hment_prepare(ht, entry, pp);
11970Sstevel@tonic-gate 	}
11980Sstevel@tonic-gate 
11990Sstevel@tonic-gate 	/*
12000Sstevel@tonic-gate 	 * Set the new pte, retrieving the old one at the same time.
12010Sstevel@tonic-gate 	 */
12020Sstevel@tonic-gate 	old_pte = x86pte_set(ht, entry, pte, pte_ptr);
12030Sstevel@tonic-gate 
12040Sstevel@tonic-gate 	/*
12050Sstevel@tonic-gate 	 * If the mapping didn't change there is nothing more to do.
12060Sstevel@tonic-gate 	 */
12070Sstevel@tonic-gate 	if (PTE_EQUIV(pte, old_pte)) {
12080Sstevel@tonic-gate 		if (is_consist) {
12090Sstevel@tonic-gate 			x86_hm_exit(pp);
12100Sstevel@tonic-gate 			if (hm != NULL)
12110Sstevel@tonic-gate 				hment_free(hm);
12120Sstevel@tonic-gate 		}
12130Sstevel@tonic-gate 		return;
12140Sstevel@tonic-gate 	}
12150Sstevel@tonic-gate 
12160Sstevel@tonic-gate 	/*
12170Sstevel@tonic-gate 	 * Install a new mapping in the page's mapping list
12180Sstevel@tonic-gate 	 */
12190Sstevel@tonic-gate 	if (!PTE_ISVALID(old_pte)) {
12200Sstevel@tonic-gate 		if (is_consist) {
12210Sstevel@tonic-gate 			hment_assign(ht, entry, pp, hm);
12220Sstevel@tonic-gate 			x86_hm_exit(pp);
12230Sstevel@tonic-gate 		} else {
12240Sstevel@tonic-gate 			ASSERT(flags & HAT_LOAD_NOCONSIST);
12250Sstevel@tonic-gate 		}
12260Sstevel@tonic-gate 		HTABLE_INC(ht->ht_valid_cnt);
12270Sstevel@tonic-gate 		PGCNT_INC(hat, l);
12280Sstevel@tonic-gate 		return;
12290Sstevel@tonic-gate 	}
12300Sstevel@tonic-gate 
12310Sstevel@tonic-gate 	/*
12320Sstevel@tonic-gate 	 * Remap's are more complicated:
12330Sstevel@tonic-gate 	 *  - HAT_LOAD_REMAP must be specified if changing the pfn.
12340Sstevel@tonic-gate 	 *    We also require that NOCONSIST be specified.
12350Sstevel@tonic-gate 	 *  - Otherwise only permission or caching bits may change.
12360Sstevel@tonic-gate 	 */
12370Sstevel@tonic-gate 	if (!PTE_ISPAGE(old_pte, l))
12380Sstevel@tonic-gate 		panic("non-null/page mapping pte=" FMT_PTE, old_pte);
12390Sstevel@tonic-gate 
12400Sstevel@tonic-gate 	if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1241*510Skchow 		REMAPASSERT(flags & HAT_LOAD_REMAP);
1242*510Skchow 		REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
1243*510Skchow 		REMAPASSERT(PTE_GET(old_pte, PT_NOCONSIST));
1244*510Skchow 		REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
12450Sstevel@tonic-gate 		    pf_is_memory(PTE2PFN(pte, l)));
1246*510Skchow 		REMAPASSERT(!is_consist);
12470Sstevel@tonic-gate 	}
12480Sstevel@tonic-gate 
12490Sstevel@tonic-gate 	/*
12500Sstevel@tonic-gate 	 * We only let remaps change the bits for PFNs, permissions
12510Sstevel@tonic-gate 	 * or caching type.
12520Sstevel@tonic-gate 	 */
12530Sstevel@tonic-gate 	ASSERT(PTE_GET(old_pte, ~(PT_REMAP_BITS | PT_REF | PT_MOD)) ==
12540Sstevel@tonic-gate 	    PTE_GET(pte, ~PT_REMAP_BITS));
12550Sstevel@tonic-gate 
12560Sstevel@tonic-gate 	/*
12570Sstevel@tonic-gate 	 * A remap requires invalidating the TLBs, since remapping the
12580Sstevel@tonic-gate 	 * same PFN requires NOCONSIST, we don't have to sync R/M bits.
12590Sstevel@tonic-gate 	 */
12600Sstevel@tonic-gate 	hat_demap(hat, htable_e2va(ht, entry));
12610Sstevel@tonic-gate 
12620Sstevel@tonic-gate 	/*
12630Sstevel@tonic-gate 	 * We don't create any mapping list entries on a remap, so release
12640Sstevel@tonic-gate 	 * any allocated hment after we drop the mapping list lock.
12650Sstevel@tonic-gate 	 */
12660Sstevel@tonic-gate 	if (is_consist) {
12670Sstevel@tonic-gate 		x86_hm_exit(pp);
12680Sstevel@tonic-gate 		if (hm != NULL)
12690Sstevel@tonic-gate 			hment_free(hm);
12700Sstevel@tonic-gate 	}
12710Sstevel@tonic-gate }
12720Sstevel@tonic-gate 
12730Sstevel@tonic-gate /*
12740Sstevel@tonic-gate  * The t_hatdepth field is an 8-bit counter.  We use the lower seven bits
12750Sstevel@tonic-gate  * to track exactly how deep we are in the memload->kmem_alloc recursion.
12760Sstevel@tonic-gate  * If the depth is greater than 1, that indicates that we are performing a
12770Sstevel@tonic-gate  * hat operation to satisfy another hat operation.  To prevent infinite
12780Sstevel@tonic-gate  * recursion, we switch over to using pre-allocated "reserves" of htables
12790Sstevel@tonic-gate  * and hments.
12800Sstevel@tonic-gate  *
12810Sstevel@tonic-gate  * The uppermost bit is used to indicate that we are transitioning away
12820Sstevel@tonic-gate  * from being the reserves thread.  See hati_reserves_exit() for the
12830Sstevel@tonic-gate  * details.
12840Sstevel@tonic-gate  */
12850Sstevel@tonic-gate #define	EXITING_FLAG		(1 << 7)
12860Sstevel@tonic-gate #define	DEPTH_MASK		(~EXITING_FLAG)
12870Sstevel@tonic-gate #define	HAT_DEPTH(t)		((t)->t_hatdepth & DEPTH_MASK)
12880Sstevel@tonic-gate #define	EXITING_RESERVES(t)	((t)->t_hatdepth & EXITING_FLAG)
12890Sstevel@tonic-gate 
12900Sstevel@tonic-gate /*
12910Sstevel@tonic-gate  * Access to reserves for HAT_NO_KALLOC is single threaded.
12920Sstevel@tonic-gate  * If someone else is in the reserves, we'll politely wait for them
12930Sstevel@tonic-gate  * to finish. This keeps normal hat_memload()s from eating up
12940Sstevel@tonic-gate  * the mappings needed to replenish the reserve.
12950Sstevel@tonic-gate  */
12960Sstevel@tonic-gate static void
12970Sstevel@tonic-gate hati_reserves_enter(uint_t kmem_for_hat)
12980Sstevel@tonic-gate {
12990Sstevel@tonic-gate 	/*
13000Sstevel@tonic-gate 	 * 64 is an arbitrary number to catch serious problems.  I'm not
13010Sstevel@tonic-gate 	 * sure what the absolute maximum depth is, but it should be
13020Sstevel@tonic-gate 	 * substantially less than this.
13030Sstevel@tonic-gate 	 */
13040Sstevel@tonic-gate 	ASSERT(HAT_DEPTH(curthread) < 64);
13050Sstevel@tonic-gate 
13060Sstevel@tonic-gate 	/*
13070Sstevel@tonic-gate 	 * If we are doing a memload to satisfy a kmem operation, we enter
13080Sstevel@tonic-gate 	 * the reserves immediately; we don't wait to recurse to a second
13090Sstevel@tonic-gate 	 * level of memload.
13100Sstevel@tonic-gate 	 */
13110Sstevel@tonic-gate 	ASSERT(kmem_for_hat < 2);
13120Sstevel@tonic-gate 	curthread->t_hatdepth += (1 + kmem_for_hat);
13130Sstevel@tonic-gate 
13140Sstevel@tonic-gate 	if (hat_reserves_thread == curthread || use_boot_reserve)
13150Sstevel@tonic-gate 		return;
13160Sstevel@tonic-gate 
13170Sstevel@tonic-gate 	if (HAT_DEPTH(curthread) > 1 || hat_reserves_thread != NULL) {
13180Sstevel@tonic-gate 		mutex_enter(&hat_reserves_lock);
13190Sstevel@tonic-gate 		while (hat_reserves_thread != NULL)
13200Sstevel@tonic-gate 			cv_wait(&hat_reserves_cv, &hat_reserves_lock);
13210Sstevel@tonic-gate 
13220Sstevel@tonic-gate 		if (HAT_DEPTH(curthread) > 1)
13230Sstevel@tonic-gate 			hat_reserves_thread = curthread;
13240Sstevel@tonic-gate 
13250Sstevel@tonic-gate 		mutex_exit(&hat_reserves_lock);
13260Sstevel@tonic-gate 	}
13270Sstevel@tonic-gate }
13280Sstevel@tonic-gate 
13290Sstevel@tonic-gate /*
13300Sstevel@tonic-gate  * If we are the reserves_thread and we've finally finished with all our
13310Sstevel@tonic-gate  * memloads (ie. no longer doing hat slabs), we can release our use of the
13320Sstevel@tonic-gate  * reserve.
13330Sstevel@tonic-gate  */
13340Sstevel@tonic-gate static void
13350Sstevel@tonic-gate hati_reserves_exit(uint_t kmem_for_hat)
13360Sstevel@tonic-gate {
13370Sstevel@tonic-gate 	ASSERT(kmem_for_hat < 2);
13380Sstevel@tonic-gate 	curthread->t_hatdepth -= (1 + kmem_for_hat);
13390Sstevel@tonic-gate 
13400Sstevel@tonic-gate 	/*
13410Sstevel@tonic-gate 	 * Simple case: either we are not the reserves thread, or we are
13420Sstevel@tonic-gate 	 * the reserves thread and we are nested deeply enough that we
13430Sstevel@tonic-gate 	 * should still be the reserves thread.
13440Sstevel@tonic-gate 	 *
13450Sstevel@tonic-gate 	 * Note: we may not become the reserves thread after we recursively
13460Sstevel@tonic-gate 	 * enter our second HAT routine, but we don't stop being the
13470Sstevel@tonic-gate 	 * reserves thread until we exit the toplevel HAT routine.  This is
13480Sstevel@tonic-gate 	 * to work around vmem's inability to determine when an allocation
13490Sstevel@tonic-gate 	 * should be satisfied from the hat_memload arena, which can lead
13500Sstevel@tonic-gate 	 * to an infinite loop of memload->vmem_populate->memload->.
13510Sstevel@tonic-gate 	 */
13520Sstevel@tonic-gate 	if (curthread != hat_reserves_thread || HAT_DEPTH(curthread) > 0 ||
13530Sstevel@tonic-gate 	    use_boot_reserve)
13540Sstevel@tonic-gate 		return;
13550Sstevel@tonic-gate 
13560Sstevel@tonic-gate 	mutex_enter(&hat_reserves_lock);
13570Sstevel@tonic-gate 	ASSERT(hat_reserves_thread == curthread);
13580Sstevel@tonic-gate 	hat_reserves_thread = NULL;
13590Sstevel@tonic-gate 	cv_broadcast(&hat_reserves_cv);
13600Sstevel@tonic-gate 	mutex_exit(&hat_reserves_lock);
13610Sstevel@tonic-gate 
13620Sstevel@tonic-gate 	/*
13630Sstevel@tonic-gate 	 * As we leave the reserves, we want to be sure the reserve lists
13640Sstevel@tonic-gate 	 * aren't overstocked.  Freeing excess reserves requires that we
13650Sstevel@tonic-gate 	 * call kmem_free(), which may require additional allocations,
13660Sstevel@tonic-gate 	 * causing us to re-enter the reserves.  To avoid infinite
13670Sstevel@tonic-gate 	 * recursion, we only try to adjust reserves at the very top level.
13680Sstevel@tonic-gate 	 */
13690Sstevel@tonic-gate 	if (!kmem_for_hat && !EXITING_RESERVES(curthread)) {
13700Sstevel@tonic-gate 		curthread->t_hatdepth |= EXITING_FLAG;
13710Sstevel@tonic-gate 		htable_adjust_reserve();
13720Sstevel@tonic-gate 		hment_adjust_reserve();
13730Sstevel@tonic-gate 		curthread->t_hatdepth &= (~EXITING_FLAG);
13740Sstevel@tonic-gate 	}
13750Sstevel@tonic-gate 
13760Sstevel@tonic-gate 	/*
13770Sstevel@tonic-gate 	 * just in case something went wrong in doing adjust reserves
13780Sstevel@tonic-gate 	 */
13790Sstevel@tonic-gate 	ASSERT(hat_reserves_thread != curthread);
13800Sstevel@tonic-gate }
13810Sstevel@tonic-gate 
13820Sstevel@tonic-gate /*
13830Sstevel@tonic-gate  * Internal routine to load a single page table entry.
13840Sstevel@tonic-gate  */
13850Sstevel@tonic-gate static void
13860Sstevel@tonic-gate hati_load_common(
13870Sstevel@tonic-gate 	hat_t		*hat,
13880Sstevel@tonic-gate 	uintptr_t	va,
13890Sstevel@tonic-gate 	page_t		*pp,
13900Sstevel@tonic-gate 	uint_t		attr,
13910Sstevel@tonic-gate 	uint_t		flags,
13920Sstevel@tonic-gate 	level_t		level,
13930Sstevel@tonic-gate 	pfn_t		pfn)
13940Sstevel@tonic-gate {
13950Sstevel@tonic-gate 	htable_t	*ht;
13960Sstevel@tonic-gate 	uint_t		entry;
13970Sstevel@tonic-gate 	x86pte_t	pte;
13980Sstevel@tonic-gate 	uint_t		kmem_for_hat = (flags & HAT_NO_KALLOC) ? 1 : 0;
13990Sstevel@tonic-gate 
14000Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
14010Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
14020Sstevel@tonic-gate 
14030Sstevel@tonic-gate 	if (flags & HAT_LOAD_SHARE)
14040Sstevel@tonic-gate 		hat->hat_flags |= HAT_SHARED;
14050Sstevel@tonic-gate 
14060Sstevel@tonic-gate 	/*
14070Sstevel@tonic-gate 	 * Find the page table that maps this page if it already exists.
14080Sstevel@tonic-gate 	 */
14090Sstevel@tonic-gate 	ht = htable_lookup(hat, va, level);
14100Sstevel@tonic-gate 
14110Sstevel@tonic-gate 	/*
14120Sstevel@tonic-gate 	 * All threads go through hati_reserves_enter() to at least wait
14130Sstevel@tonic-gate 	 * for any existing reserves user to finish. This helps reduce
14140Sstevel@tonic-gate 	 * pressure on the reserves. In addition, if this thread needs
14150Sstevel@tonic-gate 	 * to become the new reserve user it will.
14160Sstevel@tonic-gate 	 */
14170Sstevel@tonic-gate 	hati_reserves_enter(kmem_for_hat);
14180Sstevel@tonic-gate 
14190Sstevel@tonic-gate 	ASSERT(HAT_DEPTH(curthread) == 1 || va >= kernelbase);
14200Sstevel@tonic-gate 
14210Sstevel@tonic-gate 	/*
14220Sstevel@tonic-gate 	 * Kernel memloads for HAT data should never use hments!
14230Sstevel@tonic-gate 	 * If it did that would seriously complicate the reserves system, since
14240Sstevel@tonic-gate 	 * hment_alloc() would need to know about HAT_NO_KALLOC.
14250Sstevel@tonic-gate 	 *
14260Sstevel@tonic-gate 	 * We also must have HAT_LOAD_NOCONSIST if page_t is NULL.
14270Sstevel@tonic-gate 	 */
14280Sstevel@tonic-gate 	if (HAT_DEPTH(curthread) > 1 || pp == NULL)
14290Sstevel@tonic-gate 		flags |= HAT_LOAD_NOCONSIST;
14300Sstevel@tonic-gate 
14310Sstevel@tonic-gate 	if (ht == NULL) {
14320Sstevel@tonic-gate 		ht = htable_create(hat, va, level, NULL);
14330Sstevel@tonic-gate 		ASSERT(ht != NULL);
14340Sstevel@tonic-gate 	}
14350Sstevel@tonic-gate 	entry = htable_va2entry(va, ht);
14360Sstevel@tonic-gate 
14370Sstevel@tonic-gate 	/*
14380Sstevel@tonic-gate 	 * a bunch of paranoid error checking
14390Sstevel@tonic-gate 	 */
14400Sstevel@tonic-gate 	ASSERT(ht->ht_busy > 0);
14410Sstevel@tonic-gate 	if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht))
14420Sstevel@tonic-gate 		panic("hati_load_common: bad htable %p, va %p", ht, (void *)va);
14430Sstevel@tonic-gate 	ASSERT(ht->ht_level == level);
14440Sstevel@tonic-gate 
14450Sstevel@tonic-gate 	/*
14460Sstevel@tonic-gate 	 * construct the new PTE
14470Sstevel@tonic-gate 	 */
14480Sstevel@tonic-gate 	if (hat == kas.a_hat)
14490Sstevel@tonic-gate 		attr &= ~PROT_USER;
14500Sstevel@tonic-gate 	pte = hati_mkpte(pfn, attr, level, flags);
14510Sstevel@tonic-gate 	if (hat == kas.a_hat && va >= kernelbase)
14520Sstevel@tonic-gate 		PTE_SET(pte, mmu.pt_global);
14530Sstevel@tonic-gate 
14540Sstevel@tonic-gate 	/*
14550Sstevel@tonic-gate 	 * establish the mapping
14560Sstevel@tonic-gate 	 */
14570Sstevel@tonic-gate 	hati_pte_map(ht, entry, pp, pte, flags, NULL);
14580Sstevel@tonic-gate 
14590Sstevel@tonic-gate 	/*
14600Sstevel@tonic-gate 	 * release the htable and any reserves
14610Sstevel@tonic-gate 	 */
14620Sstevel@tonic-gate 	htable_release(ht);
14630Sstevel@tonic-gate 	hati_reserves_exit(kmem_for_hat);
14640Sstevel@tonic-gate }
14650Sstevel@tonic-gate 
14660Sstevel@tonic-gate /*
14670Sstevel@tonic-gate  * special case of hat_memload to deal with some kernel addrs for performance
14680Sstevel@tonic-gate  */
14690Sstevel@tonic-gate static void
14700Sstevel@tonic-gate hat_kmap_load(
14710Sstevel@tonic-gate 	caddr_t		addr,
14720Sstevel@tonic-gate 	page_t		*pp,
14730Sstevel@tonic-gate 	uint_t		attr,
14740Sstevel@tonic-gate 	uint_t		flags)
14750Sstevel@tonic-gate {
14760Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
14770Sstevel@tonic-gate 	x86pte_t	pte;
14780Sstevel@tonic-gate 	pfn_t		pfn = page_pptonum(pp);
14790Sstevel@tonic-gate 	pgcnt_t		pg_off = mmu_btop(va - mmu.kmap_addr);
14800Sstevel@tonic-gate 	htable_t	*ht;
14810Sstevel@tonic-gate 	uint_t		entry;
14820Sstevel@tonic-gate 	void		*pte_ptr;
14830Sstevel@tonic-gate 
14840Sstevel@tonic-gate 	/*
14850Sstevel@tonic-gate 	 * construct the requested PTE
14860Sstevel@tonic-gate 	 */
14870Sstevel@tonic-gate 	attr &= ~PROT_USER;
14880Sstevel@tonic-gate 	attr |= HAT_STORECACHING_OK;
14890Sstevel@tonic-gate 	pte = hati_mkpte(pfn, attr, 0, flags);
14900Sstevel@tonic-gate 	PTE_SET(pte, mmu.pt_global);
14910Sstevel@tonic-gate 
14920Sstevel@tonic-gate 	/*
14930Sstevel@tonic-gate 	 * Figure out the pte_ptr and htable and use common code to finish up
14940Sstevel@tonic-gate 	 */
14950Sstevel@tonic-gate 	if (mmu.pae_hat)
14960Sstevel@tonic-gate 		pte_ptr = mmu.kmap_ptes + pg_off;
14970Sstevel@tonic-gate 	else
14980Sstevel@tonic-gate 		pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
14990Sstevel@tonic-gate 	ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
15000Sstevel@tonic-gate 	    LEVEL_SHIFT(1)];
15010Sstevel@tonic-gate 	entry = htable_va2entry(va, ht);
15020Sstevel@tonic-gate 	hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
15030Sstevel@tonic-gate }
15040Sstevel@tonic-gate 
15050Sstevel@tonic-gate /*
15060Sstevel@tonic-gate  * hat_memload() - load a translation to the given page struct
15070Sstevel@tonic-gate  *
15080Sstevel@tonic-gate  * Flags for hat_memload/hat_devload/hat_*attr.
15090Sstevel@tonic-gate  *
15100Sstevel@tonic-gate  * 	HAT_LOAD	Default flags to load a translation to the page.
15110Sstevel@tonic-gate  *
15120Sstevel@tonic-gate  * 	HAT_LOAD_LOCK	Lock down mapping resources; hat_map(), hat_memload(),
15130Sstevel@tonic-gate  *			and hat_devload().
15140Sstevel@tonic-gate  *
15150Sstevel@tonic-gate  *	HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
15160Sstevel@tonic-gate  *			sets PT_NOCONSIST (soft bit)
15170Sstevel@tonic-gate  *
15180Sstevel@tonic-gate  *	HAT_LOAD_SHARE	A flag to hat_memload() to indicate h/w page tables
15190Sstevel@tonic-gate  *			that map some user pages (not kas) is shared by more
15200Sstevel@tonic-gate  *			than one process (eg. ISM).
15210Sstevel@tonic-gate  *
15220Sstevel@tonic-gate  *	HAT_LOAD_REMAP	Reload a valid pte with a different page frame.
15230Sstevel@tonic-gate  *
15240Sstevel@tonic-gate  *	HAT_NO_KALLOC	Do not kmem_alloc while creating the mapping; at this
15250Sstevel@tonic-gate  *			point, it's setting up mapping to allocate internal
15260Sstevel@tonic-gate  *			hat layer data structures.  This flag forces hat layer
15270Sstevel@tonic-gate  *			to tap its reserves in order to prevent infinite
15280Sstevel@tonic-gate  *			recursion.
15290Sstevel@tonic-gate  *
15300Sstevel@tonic-gate  * The following is a protection attribute (like PROT_READ, etc.)
15310Sstevel@tonic-gate  *
15320Sstevel@tonic-gate  *	HAT_NOSYNC	set PT_NOSYNC (soft bit) - this mapping's ref/mod bits
15330Sstevel@tonic-gate  *			are never cleared.
15340Sstevel@tonic-gate  *
15350Sstevel@tonic-gate  * Installing new valid PTE's and creation of the mapping list
15360Sstevel@tonic-gate  * entry are controlled under the same lock. It's derived from the
15370Sstevel@tonic-gate  * page_t being mapped.
15380Sstevel@tonic-gate  */
15390Sstevel@tonic-gate static uint_t supported_memload_flags =
15400Sstevel@tonic-gate 	HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
15410Sstevel@tonic-gate 	HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
15420Sstevel@tonic-gate 
15430Sstevel@tonic-gate void
15440Sstevel@tonic-gate hat_memload(
15450Sstevel@tonic-gate 	hat_t		*hat,
15460Sstevel@tonic-gate 	caddr_t		addr,
15470Sstevel@tonic-gate 	page_t		*pp,
15480Sstevel@tonic-gate 	uint_t		attr,
15490Sstevel@tonic-gate 	uint_t		flags)
15500Sstevel@tonic-gate {
15510Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
15520Sstevel@tonic-gate 	level_t		level = 0;
15530Sstevel@tonic-gate 	pfn_t		pfn = page_pptonum(pp);
15540Sstevel@tonic-gate 
15550Sstevel@tonic-gate 	HATIN(hat_memload, hat, addr, (size_t)MMU_PAGESIZE);
15560Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
15570Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat || va <= kernelbase);
15580Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
15590Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
15600Sstevel@tonic-gate 	ASSERT((flags & supported_memload_flags) == flags);
15610Sstevel@tonic-gate 
15620Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(va));
15630Sstevel@tonic-gate 	ASSERT(!PP_ISFREE(pp));
15640Sstevel@tonic-gate 
15650Sstevel@tonic-gate 	/*
15660Sstevel@tonic-gate 	 * kernel address special case for performance.
15670Sstevel@tonic-gate 	 */
15680Sstevel@tonic-gate 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
15690Sstevel@tonic-gate 		ASSERT(hat == kas.a_hat);
15700Sstevel@tonic-gate 		hat_kmap_load(addr, pp, attr, flags);
15710Sstevel@tonic-gate 		return;
15720Sstevel@tonic-gate 	}
15730Sstevel@tonic-gate 
15740Sstevel@tonic-gate 	/*
15750Sstevel@tonic-gate 	 * This is used for memory with normal caching enabled, so
15760Sstevel@tonic-gate 	 * always set HAT_STORECACHING_OK.
15770Sstevel@tonic-gate 	 */
15780Sstevel@tonic-gate 	attr |= HAT_STORECACHING_OK;
15790Sstevel@tonic-gate 	hati_load_common(hat, va, pp, attr, flags, level, pfn);
15800Sstevel@tonic-gate 	HATOUT(hat_memload, hat, addr);
15810Sstevel@tonic-gate }
15820Sstevel@tonic-gate 
15830Sstevel@tonic-gate /*
15840Sstevel@tonic-gate  * Load the given array of page structs using large pages when possible
15850Sstevel@tonic-gate  */
15860Sstevel@tonic-gate void
15870Sstevel@tonic-gate hat_memload_array(
15880Sstevel@tonic-gate 	hat_t		*hat,
15890Sstevel@tonic-gate 	caddr_t		addr,
15900Sstevel@tonic-gate 	size_t		len,
15910Sstevel@tonic-gate 	page_t		**pages,
15920Sstevel@tonic-gate 	uint_t		attr,
15930Sstevel@tonic-gate 	uint_t		flags)
15940Sstevel@tonic-gate {
15950Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
15960Sstevel@tonic-gate 	uintptr_t	eaddr = va + len;
15970Sstevel@tonic-gate 	level_t		level;
15980Sstevel@tonic-gate 	size_t		pgsize;
15990Sstevel@tonic-gate 	pgcnt_t		pgindx = 0;
16000Sstevel@tonic-gate 	pfn_t		pfn;
16010Sstevel@tonic-gate 	pgcnt_t		i;
16020Sstevel@tonic-gate 
16030Sstevel@tonic-gate 	HATIN(hat_memload_array, hat, addr, len);
16040Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
16050Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat || va + len <= kernelbase);
16060Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
16070Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
16080Sstevel@tonic-gate 	ASSERT((flags & supported_memload_flags) == flags);
16090Sstevel@tonic-gate 
16100Sstevel@tonic-gate 	/*
16110Sstevel@tonic-gate 	 * memload is used for memory with full caching enabled, so
16120Sstevel@tonic-gate 	 * set HAT_STORECACHING_OK.
16130Sstevel@tonic-gate 	 */
16140Sstevel@tonic-gate 	attr |= HAT_STORECACHING_OK;
16150Sstevel@tonic-gate 
16160Sstevel@tonic-gate 	/*
16170Sstevel@tonic-gate 	 * handle all pages using largest possible pagesize
16180Sstevel@tonic-gate 	 */
16190Sstevel@tonic-gate 	while (va < eaddr) {
16200Sstevel@tonic-gate 		/*
16210Sstevel@tonic-gate 		 * decide what level mapping to use (ie. pagesize)
16220Sstevel@tonic-gate 		 */
16230Sstevel@tonic-gate 		pfn = page_pptonum(pages[pgindx]);
16240Sstevel@tonic-gate 		for (level = mmu.max_page_level; ; --level) {
16250Sstevel@tonic-gate 			pgsize = LEVEL_SIZE(level);
16260Sstevel@tonic-gate 			if (level == 0)
16270Sstevel@tonic-gate 				break;
16280Sstevel@tonic-gate 			if (!IS_P2ALIGNED(va, pgsize) ||
16290Sstevel@tonic-gate 			    (eaddr - va) < pgsize ||
16300Sstevel@tonic-gate 			    !IS_P2ALIGNED(pfn << MMU_PAGESHIFT, pgsize))
16310Sstevel@tonic-gate 				continue;
16320Sstevel@tonic-gate 
16330Sstevel@tonic-gate 			/*
16340Sstevel@tonic-gate 			 * To use a large mapping of this size, all the
16350Sstevel@tonic-gate 			 * pages we are passed must be sequential subpages
16360Sstevel@tonic-gate 			 * of the large page.
16370Sstevel@tonic-gate 			 * hat_page_demote() can't change p_szc because
16380Sstevel@tonic-gate 			 * all pages are locked.
16390Sstevel@tonic-gate 			 */
16400Sstevel@tonic-gate 			if (pages[pgindx]->p_szc >= level) {
16410Sstevel@tonic-gate 				for (i = 0; i < mmu_btop(pgsize); ++i) {
16420Sstevel@tonic-gate 					if (pfn + i !=
16430Sstevel@tonic-gate 					    page_pptonum(pages[pgindx + i]))
16440Sstevel@tonic-gate 						break;
16450Sstevel@tonic-gate 					ASSERT(pages[pgindx + i]->p_szc >=
16460Sstevel@tonic-gate 					    level);
16470Sstevel@tonic-gate 					ASSERT(pages[pgindx] + i ==
16480Sstevel@tonic-gate 					    pages[pgindx + i]);
16490Sstevel@tonic-gate 				}
16500Sstevel@tonic-gate 				if (i == mmu_btop(pgsize))
16510Sstevel@tonic-gate 					break;
16520Sstevel@tonic-gate 			}
16530Sstevel@tonic-gate 		}
16540Sstevel@tonic-gate 
16550Sstevel@tonic-gate 		/*
16560Sstevel@tonic-gate 		 * Shared page tables for DISM might have a pre-existing
16570Sstevel@tonic-gate 		 * level 0 page table that wasn't unlinked from all the
16580Sstevel@tonic-gate 		 * sharing hats. If we hit this for a large page, back off
16590Sstevel@tonic-gate 		 * to using level 0 pages.
16600Sstevel@tonic-gate 		 *
16610Sstevel@tonic-gate 		 * This can't be made better (ie. use large pages) until we
16620Sstevel@tonic-gate 		 * track all the htable's sharing and rewrite hat_pageunload().
16630Sstevel@tonic-gate 		 * Note that would cost a pointer in htable_t for a rare case.
16640Sstevel@tonic-gate 		 *
16650Sstevel@tonic-gate 		 * Since the 32 bit kernel caches empty page tables, check
16660Sstevel@tonic-gate 		 * the kernel too.
16670Sstevel@tonic-gate 		 */
16680Sstevel@tonic-gate 		if ((hat == kas.a_hat || (hat->hat_flags & HAT_SHARED)) &&
16690Sstevel@tonic-gate 		    level > 0) {
16700Sstevel@tonic-gate 			htable_t *lower;
16710Sstevel@tonic-gate 
16720Sstevel@tonic-gate 			lower = htable_getpte(hat, va, NULL, NULL, level - 1);
16730Sstevel@tonic-gate 			if (lower != NULL) {
16740Sstevel@tonic-gate 				level = 0;
16750Sstevel@tonic-gate 				pgsize = LEVEL_SIZE(0);
16760Sstevel@tonic-gate 				htable_release(lower);
16770Sstevel@tonic-gate 			}
16780Sstevel@tonic-gate 		}
16790Sstevel@tonic-gate 
16800Sstevel@tonic-gate 		/*
16810Sstevel@tonic-gate 		 * load this page mapping
16820Sstevel@tonic-gate 		 */
16830Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(va));
16840Sstevel@tonic-gate 		hati_load_common(hat, va, pages[pgindx], attr, flags,
16850Sstevel@tonic-gate 		    level, pfn);
16860Sstevel@tonic-gate 
16870Sstevel@tonic-gate 		/*
16880Sstevel@tonic-gate 		 * move to next page
16890Sstevel@tonic-gate 		 */
16900Sstevel@tonic-gate 		va += pgsize;
16910Sstevel@tonic-gate 		pgindx += mmu_btop(pgsize);
16920Sstevel@tonic-gate 	}
16930Sstevel@tonic-gate 	HATOUT(hat_memload_array, hat, addr);
16940Sstevel@tonic-gate }
16950Sstevel@tonic-gate 
16960Sstevel@tonic-gate /*
16970Sstevel@tonic-gate  * void hat_devload(hat, addr, len, pf, attr, flags)
16980Sstevel@tonic-gate  *	load/lock the given page frame number
16990Sstevel@tonic-gate  *
17000Sstevel@tonic-gate  * Advisory ordering attributes. Apply only to device mappings.
17010Sstevel@tonic-gate  *
17020Sstevel@tonic-gate  * HAT_STRICTORDER: the CPU must issue the references in order, as the
17030Sstevel@tonic-gate  *	programmer specified.  This is the default.
17040Sstevel@tonic-gate  * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
17050Sstevel@tonic-gate  *	of reordering; store or load with store or load).
17060Sstevel@tonic-gate  * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
17070Sstevel@tonic-gate  *	to consecutive locations (for example, turn two consecutive byte
17080Sstevel@tonic-gate  *	stores into one halfword store), and it may batch individual loads
17090Sstevel@tonic-gate  *	(for example, turn two consecutive byte loads into one halfword load).
17100Sstevel@tonic-gate  *	This also implies re-ordering.
17110Sstevel@tonic-gate  * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
17120Sstevel@tonic-gate  *	until another store occurs.  The default is to fetch new data
17130Sstevel@tonic-gate  *	on every load.  This also implies merging.
17140Sstevel@tonic-gate  * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
17150Sstevel@tonic-gate  *	the device (perhaps with other data) at a later time.  The default is
17160Sstevel@tonic-gate  *	to push the data right away.  This also implies load caching.
17170Sstevel@tonic-gate  *
17180Sstevel@tonic-gate  * Equivalent of hat_memload(), but can be used for device memory where
17190Sstevel@tonic-gate  * there are no page_t's and we support additional flags (write merging, etc).
17200Sstevel@tonic-gate  * Note that we can have large page mappings with this interface.
17210Sstevel@tonic-gate  */
17220Sstevel@tonic-gate int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
17230Sstevel@tonic-gate 	HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
17240Sstevel@tonic-gate 	HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
17250Sstevel@tonic-gate 
17260Sstevel@tonic-gate void
17270Sstevel@tonic-gate hat_devload(
17280Sstevel@tonic-gate 	hat_t		*hat,
17290Sstevel@tonic-gate 	caddr_t		addr,
17300Sstevel@tonic-gate 	size_t		len,
17310Sstevel@tonic-gate 	pfn_t		pfn,
17320Sstevel@tonic-gate 	uint_t		attr,
17330Sstevel@tonic-gate 	int		flags)
17340Sstevel@tonic-gate {
17350Sstevel@tonic-gate 	uintptr_t	va = ALIGN2PAGE(addr);
17360Sstevel@tonic-gate 	uintptr_t	eva = va + len;
17370Sstevel@tonic-gate 	level_t		level;
17380Sstevel@tonic-gate 	size_t		pgsize;
17390Sstevel@tonic-gate 	page_t		*pp;
17400Sstevel@tonic-gate 	int		f;	/* per PTE copy of flags  - maybe modified */
17410Sstevel@tonic-gate 	uint_t		a;	/* per PTE copy of attr */
17420Sstevel@tonic-gate 
17430Sstevel@tonic-gate 	HATIN(hat_devload, hat, addr, len);
17440Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
17450Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat || eva <= kernelbase);
17460Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
17470Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
17480Sstevel@tonic-gate 	ASSERT((flags & supported_devload_flags) == flags);
17490Sstevel@tonic-gate 
17500Sstevel@tonic-gate 	/*
17510Sstevel@tonic-gate 	 * handle all pages
17520Sstevel@tonic-gate 	 */
17530Sstevel@tonic-gate 	while (va < eva) {
17540Sstevel@tonic-gate 
17550Sstevel@tonic-gate 		/*
17560Sstevel@tonic-gate 		 * decide what level mapping to use (ie. pagesize)
17570Sstevel@tonic-gate 		 */
17580Sstevel@tonic-gate 		for (level = mmu.max_page_level; ; --level) {
17590Sstevel@tonic-gate 			pgsize = LEVEL_SIZE(level);
17600Sstevel@tonic-gate 			if (level == 0)
17610Sstevel@tonic-gate 				break;
17620Sstevel@tonic-gate 			if (IS_P2ALIGNED(va, pgsize) &&
17630Sstevel@tonic-gate 			    (eva - va) >= pgsize &&
17640Sstevel@tonic-gate 			    IS_P2ALIGNED(pfn, mmu_btop(pgsize)))
17650Sstevel@tonic-gate 				break;
17660Sstevel@tonic-gate 		}
17670Sstevel@tonic-gate 
17680Sstevel@tonic-gate 		/*
17690Sstevel@tonic-gate 		 * Some kernel addresses have permanently existing page tables,
17700Sstevel@tonic-gate 		 * so be sure to use a compatible pagesize.
17710Sstevel@tonic-gate 		 */
17720Sstevel@tonic-gate 		if (hat == kas.a_hat && level > 0) {
17730Sstevel@tonic-gate 			htable_t *lower;
17740Sstevel@tonic-gate 
17750Sstevel@tonic-gate 			lower = htable_getpte(hat, va, NULL, NULL, level - 1);
17760Sstevel@tonic-gate 			if (lower != NULL) {
17770Sstevel@tonic-gate 				level = 0;
17780Sstevel@tonic-gate 				pgsize = LEVEL_SIZE(0);
17790Sstevel@tonic-gate 				htable_release(lower);
17800Sstevel@tonic-gate 			}
17810Sstevel@tonic-gate 		}
17820Sstevel@tonic-gate 
17830Sstevel@tonic-gate 		/*
17840Sstevel@tonic-gate 		 * If it is memory get page_t and allow caching (this happens
17850Sstevel@tonic-gate 		 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
17860Sstevel@tonic-gate 		 * to override that. If we don't have a page_t, make sure
17870Sstevel@tonic-gate 		 * NOCONSIST is set.
17880Sstevel@tonic-gate 		 */
17890Sstevel@tonic-gate 		a = attr;
17900Sstevel@tonic-gate 		f = flags;
17910Sstevel@tonic-gate 		if (pf_is_memory(pfn)) {
17920Sstevel@tonic-gate 			if (!(a & HAT_PLAT_NOCACHE))
17930Sstevel@tonic-gate 				a |= HAT_STORECACHING_OK;
17940Sstevel@tonic-gate 
17950Sstevel@tonic-gate 			if (f & HAT_LOAD_NOCONSIST)
17960Sstevel@tonic-gate 				pp = NULL;
17970Sstevel@tonic-gate 			else
17980Sstevel@tonic-gate 				pp = page_numtopp_nolock(pfn);
17990Sstevel@tonic-gate 		} else {
18000Sstevel@tonic-gate 			pp = NULL;
18010Sstevel@tonic-gate 			f |= HAT_LOAD_NOCONSIST;
18020Sstevel@tonic-gate 		}
18030Sstevel@tonic-gate 
18040Sstevel@tonic-gate 		/*
18050Sstevel@tonic-gate 		 * load this page mapping
18060Sstevel@tonic-gate 		 */
18070Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(va));
18080Sstevel@tonic-gate 		hati_load_common(hat, va, pp, a, f, level, pfn);
18090Sstevel@tonic-gate 
18100Sstevel@tonic-gate 		/*
18110Sstevel@tonic-gate 		 * move to next page
18120Sstevel@tonic-gate 		 */
18130Sstevel@tonic-gate 		va += pgsize;
18140Sstevel@tonic-gate 		pfn += mmu_btop(pgsize);
18150Sstevel@tonic-gate 	}
18160Sstevel@tonic-gate 	HATOUT(hat_devload, hat, addr);
18170Sstevel@tonic-gate }
18180Sstevel@tonic-gate 
18190Sstevel@tonic-gate /*
18200Sstevel@tonic-gate  * void hat_unlock(hat, addr, len)
18210Sstevel@tonic-gate  *	unlock the mappings to a given range of addresses
18220Sstevel@tonic-gate  *
18230Sstevel@tonic-gate  * Locks are tracked by ht_lock_cnt in the htable.
18240Sstevel@tonic-gate  */
18250Sstevel@tonic-gate void
18260Sstevel@tonic-gate hat_unlock(hat_t *hat, caddr_t addr, size_t len)
18270Sstevel@tonic-gate {
18280Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
18290Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
18300Sstevel@tonic-gate 	htable_t	*ht = NULL;
18310Sstevel@tonic-gate 
18320Sstevel@tonic-gate 	/*
18330Sstevel@tonic-gate 	 * kernel entries are always locked, we don't track lock counts
18340Sstevel@tonic-gate 	 */
18350Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat || eaddr <= kernelbase);
18360Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
18370Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
18380Sstevel@tonic-gate 	if (hat == kas.a_hat)
18390Sstevel@tonic-gate 		return;
18400Sstevel@tonic-gate 	if (eaddr > _userlimit)
18410Sstevel@tonic-gate 		panic("hat_unlock() address out of range - above _userlimit");
18420Sstevel@tonic-gate 
18430Sstevel@tonic-gate 	ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
18440Sstevel@tonic-gate 	while (vaddr < eaddr) {
18450Sstevel@tonic-gate 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
18460Sstevel@tonic-gate 		if (ht == NULL)
18470Sstevel@tonic-gate 			break;
18480Sstevel@tonic-gate 
18490Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(vaddr));
18500Sstevel@tonic-gate 
18510Sstevel@tonic-gate 		if (ht->ht_lock_cnt < 1)
18520Sstevel@tonic-gate 			panic("hat_unlock(): lock_cnt < 1, "
18530Sstevel@tonic-gate 			    "htable=%p, vaddr=%p\n", ht, (caddr_t)vaddr);
18540Sstevel@tonic-gate 		HTABLE_LOCK_DEC(ht);
18550Sstevel@tonic-gate 
18560Sstevel@tonic-gate 		vaddr += LEVEL_SIZE(ht->ht_level);
18570Sstevel@tonic-gate 	}
18580Sstevel@tonic-gate 	if (ht)
18590Sstevel@tonic-gate 		htable_release(ht);
18600Sstevel@tonic-gate }
18610Sstevel@tonic-gate 
18620Sstevel@tonic-gate /*
18630Sstevel@tonic-gate  * Cross call service routine to demap a virtual page on
18640Sstevel@tonic-gate  * the current CPU or flush all mappings in TLB.
18650Sstevel@tonic-gate  */
18660Sstevel@tonic-gate /*ARGSUSED*/
18670Sstevel@tonic-gate static int
18680Sstevel@tonic-gate hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
18690Sstevel@tonic-gate {
18700Sstevel@tonic-gate 	hat_t	*hat = (hat_t *)a1;
18710Sstevel@tonic-gate 	caddr_t	addr = (caddr_t)a2;
18720Sstevel@tonic-gate 
18730Sstevel@tonic-gate 	/*
18740Sstevel@tonic-gate 	 * If the target hat isn't the kernel and this CPU isn't operating
18750Sstevel@tonic-gate 	 * in the target hat, we can ignore the cross call.
18760Sstevel@tonic-gate 	 */
18770Sstevel@tonic-gate 	if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
18780Sstevel@tonic-gate 		return (0);
18790Sstevel@tonic-gate 
18800Sstevel@tonic-gate 	/*
18810Sstevel@tonic-gate 	 * For a normal address, we just flush one page mapping
18820Sstevel@tonic-gate 	 */
18830Sstevel@tonic-gate 	if ((uintptr_t)addr != DEMAP_ALL_ADDR) {
18840Sstevel@tonic-gate 		mmu_tlbflush_entry((caddr_t)addr);
18850Sstevel@tonic-gate 		return (0);
18860Sstevel@tonic-gate 	}
18870Sstevel@tonic-gate 
18880Sstevel@tonic-gate 	/*
18890Sstevel@tonic-gate 	 * Otherwise we reload cr3 to effect a complete TLB flush.
18900Sstevel@tonic-gate 	 *
18910Sstevel@tonic-gate 	 * A reload of cr3 on a VLP process also means we must also recopy in
18920Sstevel@tonic-gate 	 * the pte values from the struct hat
18930Sstevel@tonic-gate 	 */
18940Sstevel@tonic-gate 	if (hat->hat_flags & HAT_VLP) {
18950Sstevel@tonic-gate #if defined(__amd64)
18960Sstevel@tonic-gate 		x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes;
18970Sstevel@tonic-gate 
18980Sstevel@tonic-gate 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
18990Sstevel@tonic-gate #elif defined(__i386)
19000Sstevel@tonic-gate 		reload_pae32(hat, CPU);
19010Sstevel@tonic-gate #endif
19020Sstevel@tonic-gate 	}
19030Sstevel@tonic-gate 	reload_cr3();
19040Sstevel@tonic-gate 	return (0);
19050Sstevel@tonic-gate }
19060Sstevel@tonic-gate 
19070Sstevel@tonic-gate /*
19080Sstevel@tonic-gate  * Internal routine to do cross calls to invalidate a range of pages on
19090Sstevel@tonic-gate  * all CPUs using a given hat.
19100Sstevel@tonic-gate  */
19110Sstevel@tonic-gate void
19120Sstevel@tonic-gate hat_demap(hat_t *hat, uintptr_t va)
19130Sstevel@tonic-gate {
19140Sstevel@tonic-gate 	extern int	flushes_require_xcalls;	/* from mp_startup.c */
19150Sstevel@tonic-gate 	cpuset_t	justme;
19160Sstevel@tonic-gate 
19170Sstevel@tonic-gate 	/*
19180Sstevel@tonic-gate 	 * If the hat is being destroyed, there are no more users, so
19190Sstevel@tonic-gate 	 * demap need not do anything.
19200Sstevel@tonic-gate 	 */
19210Sstevel@tonic-gate 	if (hat->hat_flags & HAT_FREEING)
19220Sstevel@tonic-gate 		return;
19230Sstevel@tonic-gate 
19240Sstevel@tonic-gate 	/*
19250Sstevel@tonic-gate 	 * If demapping from a shared pagetable, we best demap the
19260Sstevel@tonic-gate 	 * entire set of user TLBs, since we don't know what addresses
19270Sstevel@tonic-gate 	 * these were shared at.
19280Sstevel@tonic-gate 	 */
19290Sstevel@tonic-gate 	if (hat->hat_flags & HAT_SHARED) {
19300Sstevel@tonic-gate 		hat = kas.a_hat;
19310Sstevel@tonic-gate 		va = DEMAP_ALL_ADDR;
19320Sstevel@tonic-gate 	}
19330Sstevel@tonic-gate 
19340Sstevel@tonic-gate 	/*
19350Sstevel@tonic-gate 	 * if not running with multiple CPUs, don't use cross calls
19360Sstevel@tonic-gate 	 */
19370Sstevel@tonic-gate 	if (panicstr || !flushes_require_xcalls) {
19380Sstevel@tonic-gate 		(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
19390Sstevel@tonic-gate 		return;
19400Sstevel@tonic-gate 	}
19410Sstevel@tonic-gate 
19420Sstevel@tonic-gate 
19430Sstevel@tonic-gate 	/*
19440Sstevel@tonic-gate 	 * All CPUs must see kernel hat changes.
19450Sstevel@tonic-gate 	 */
19460Sstevel@tonic-gate 	if (hat == kas.a_hat) {
19470Sstevel@tonic-gate 		kpreempt_disable();
19480Sstevel@tonic-gate 		xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL,
19490Sstevel@tonic-gate 		    X_CALL_HIPRI, khat_cpuset, hati_demap_func);
19500Sstevel@tonic-gate 		kpreempt_enable();
19510Sstevel@tonic-gate 		return;
19520Sstevel@tonic-gate 	}
19530Sstevel@tonic-gate 
19540Sstevel@tonic-gate 	/*
19550Sstevel@tonic-gate 	 * Otherwise we notify CPUs currently running in this HAT
19560Sstevel@tonic-gate 	 */
19570Sstevel@tonic-gate 	hat_enter(hat);
19580Sstevel@tonic-gate 	kpreempt_disable();
19590Sstevel@tonic-gate 	CPUSET_ONLY(justme, CPU->cpu_id);
19600Sstevel@tonic-gate 	if (CPUSET_ISEQUAL(hat->hat_cpus, justme))
19610Sstevel@tonic-gate 		(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
19620Sstevel@tonic-gate 	else
19630Sstevel@tonic-gate 		xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL,
19640Sstevel@tonic-gate 		    X_CALL_HIPRI, hat->hat_cpus, hati_demap_func);
19650Sstevel@tonic-gate 	kpreempt_enable();
19660Sstevel@tonic-gate 	hat_exit(hat);
19670Sstevel@tonic-gate }
19680Sstevel@tonic-gate 
19690Sstevel@tonic-gate /*
19700Sstevel@tonic-gate  * Interior routine for HAT_UNLOADs from hat_unload_callback(),
19710Sstevel@tonic-gate  * hat_kmap_unload() OR from hat_steal() code.  This routine doesn't
19720Sstevel@tonic-gate  * handle releasing of the htables.
19730Sstevel@tonic-gate  */
19740Sstevel@tonic-gate void
19750Sstevel@tonic-gate hat_pte_unmap(
19760Sstevel@tonic-gate 	htable_t	*ht,
19770Sstevel@tonic-gate 	uint_t		entry,
19780Sstevel@tonic-gate 	uint_t		flags,
19790Sstevel@tonic-gate 	x86pte_t	old_pte,
19800Sstevel@tonic-gate 	void		*pte_ptr)
19810Sstevel@tonic-gate {
19820Sstevel@tonic-gate 	hat_t		*hat = ht->ht_hat;
19830Sstevel@tonic-gate 	hment_t		*hm = NULL;
19840Sstevel@tonic-gate 	page_t		*pp = NULL;
19850Sstevel@tonic-gate 	level_t		l = ht->ht_level;
19860Sstevel@tonic-gate 	pfn_t		pfn;
19870Sstevel@tonic-gate 
19880Sstevel@tonic-gate 	/*
19890Sstevel@tonic-gate 	 * We always track the locking counts, even if nothing is unmapped
19900Sstevel@tonic-gate 	 */
19910Sstevel@tonic-gate 	if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
19920Sstevel@tonic-gate 		ASSERT(ht->ht_lock_cnt > 0);
19930Sstevel@tonic-gate 		HTABLE_LOCK_DEC(ht);
19940Sstevel@tonic-gate 	}
19950Sstevel@tonic-gate 
19960Sstevel@tonic-gate 	/*
19970Sstevel@tonic-gate 	 * Figure out which page's mapping list lock to acquire using the PFN
19980Sstevel@tonic-gate 	 * passed in "old" PTE. We then attempt to invalidate the PTE.
19990Sstevel@tonic-gate 	 * If another thread, probably a hat_pageunload, has asynchronously
20000Sstevel@tonic-gate 	 * unmapped/remapped this address we'll loop here.
20010Sstevel@tonic-gate 	 */
20020Sstevel@tonic-gate 	ASSERT(ht->ht_busy > 0);
20030Sstevel@tonic-gate 	while (PTE_ISVALID(old_pte)) {
20040Sstevel@tonic-gate 		pfn = PTE2PFN(old_pte, l);
20050Sstevel@tonic-gate 		if (PTE_GET(old_pte, PT_NOCONSIST)) {
20060Sstevel@tonic-gate 			pp = NULL;
20070Sstevel@tonic-gate 		} else {
20080Sstevel@tonic-gate 			pp = page_numtopp_nolock(pfn);
200947Sjosephb 			if (pp == NULL) {
201047Sjosephb 				panic("no page_t, not NOCONSIST: old_pte="
201147Sjosephb 				    FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
201247Sjosephb 				    old_pte, (uintptr_t)ht, entry,
201347Sjosephb 				    (uintptr_t)pte_ptr);
201447Sjosephb 			}
20150Sstevel@tonic-gate 			x86_hm_enter(pp);
20160Sstevel@tonic-gate 		}
201747Sjosephb 
201847Sjosephb 		/*
201947Sjosephb 		 * If freeing the address space, check that the PTE
202047Sjosephb 		 * hasn't changed, as the mappings are no longer in use by
202147Sjosephb 		 * any thread, invalidation is unnecessary.
202247Sjosephb 		 * If not freeing, do a full invalidate.
202347Sjosephb 		 */
202447Sjosephb 		if (hat->hat_flags & HAT_FREEING)
202547Sjosephb 			old_pte = x86pte_get(ht, entry);
202647Sjosephb 		else
202747Sjosephb 			old_pte =
202847Sjosephb 			    x86pte_invalidate_pfn(ht, entry, pfn, pte_ptr);
20290Sstevel@tonic-gate 
20300Sstevel@tonic-gate 		/*
20310Sstevel@tonic-gate 		 * If the page hadn't changed we've unmapped it and can proceed
20320Sstevel@tonic-gate 		 */
20330Sstevel@tonic-gate 		if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
20340Sstevel@tonic-gate 			break;
20350Sstevel@tonic-gate 
20360Sstevel@tonic-gate 		/*
20370Sstevel@tonic-gate 		 * Otherwise, we'll have to retry with the current old_pte.
20380Sstevel@tonic-gate 		 * Drop the hment lock, since the pfn may have changed.
20390Sstevel@tonic-gate 		 */
20400Sstevel@tonic-gate 		if (pp != NULL) {
20410Sstevel@tonic-gate 			x86_hm_exit(pp);
20420Sstevel@tonic-gate 			pp = NULL;
20430Sstevel@tonic-gate 		} else {
20440Sstevel@tonic-gate 			ASSERT(PTE_GET(old_pte, PT_NOCONSIST));
20450Sstevel@tonic-gate 		}
20460Sstevel@tonic-gate 	}
20470Sstevel@tonic-gate 
20480Sstevel@tonic-gate 	/*
20490Sstevel@tonic-gate 	 * If the old mapping wasn't valid, there's nothing more to do
20500Sstevel@tonic-gate 	 */
20510Sstevel@tonic-gate 	if (!PTE_ISVALID(old_pte)) {
20520Sstevel@tonic-gate 		if (pp != NULL)
20530Sstevel@tonic-gate 			x86_hm_exit(pp);
20540Sstevel@tonic-gate 		return;
20550Sstevel@tonic-gate 	}
20560Sstevel@tonic-gate 
20570Sstevel@tonic-gate 	/*
20580Sstevel@tonic-gate 	 * Take care of syncing any MOD/REF bits and removing the hment.
20590Sstevel@tonic-gate 	 */
20600Sstevel@tonic-gate 	if (pp != NULL) {
20610Sstevel@tonic-gate 		if (!(flags & HAT_UNLOAD_NOSYNC))
20620Sstevel@tonic-gate 			hati_sync_pte_to_page(pp, old_pte, l);
20630Sstevel@tonic-gate 		hm = hment_remove(pp, ht, entry);
20640Sstevel@tonic-gate 		x86_hm_exit(pp);
20650Sstevel@tonic-gate 		if (hm != NULL)
20660Sstevel@tonic-gate 			hment_free(hm);
20670Sstevel@tonic-gate 	}
20680Sstevel@tonic-gate 
20690Sstevel@tonic-gate 	/*
20700Sstevel@tonic-gate 	 * Handle book keeping in the htable and hat
20710Sstevel@tonic-gate 	 */
20720Sstevel@tonic-gate 	ASSERT(ht->ht_valid_cnt > 0);
20730Sstevel@tonic-gate 	HTABLE_DEC(ht->ht_valid_cnt);
20740Sstevel@tonic-gate 	PGCNT_DEC(hat, l);
20750Sstevel@tonic-gate }
20760Sstevel@tonic-gate 
20770Sstevel@tonic-gate /*
20780Sstevel@tonic-gate  * very cheap unload implementation to special case some kernel addresses
20790Sstevel@tonic-gate  */
20800Sstevel@tonic-gate static void
20810Sstevel@tonic-gate hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
20820Sstevel@tonic-gate {
20830Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
20840Sstevel@tonic-gate 	uintptr_t	eva = va + len;
20850Sstevel@tonic-gate 	pgcnt_t		pg_off;
20860Sstevel@tonic-gate 	htable_t	*ht;
20870Sstevel@tonic-gate 	uint_t		entry;
20880Sstevel@tonic-gate 	void		*pte_ptr;
20890Sstevel@tonic-gate 	x86pte_t	old_pte;
20900Sstevel@tonic-gate 
20910Sstevel@tonic-gate 	for (; va < eva; va += MMU_PAGESIZE) {
20920Sstevel@tonic-gate 		/*
20930Sstevel@tonic-gate 		 * Get the PTE
20940Sstevel@tonic-gate 		 */
20950Sstevel@tonic-gate 		pg_off = mmu_btop(va - mmu.kmap_addr);
20960Sstevel@tonic-gate 		if (mmu.pae_hat) {
20970Sstevel@tonic-gate 			pte_ptr = mmu.kmap_ptes + pg_off;
209847Sjosephb 			ATOMIC_LOAD64((x86pte_t *)pte_ptr, old_pte);
20990Sstevel@tonic-gate 		} else {
21000Sstevel@tonic-gate 			pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
21010Sstevel@tonic-gate 			old_pte = *(x86pte32_t *)pte_ptr;
21020Sstevel@tonic-gate 		}
21030Sstevel@tonic-gate 
21040Sstevel@tonic-gate 		/*
21050Sstevel@tonic-gate 		 * get the htable / entry
21060Sstevel@tonic-gate 		 */
21070Sstevel@tonic-gate 		ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
21080Sstevel@tonic-gate 		    >> LEVEL_SHIFT(1)];
21090Sstevel@tonic-gate 		entry = htable_va2entry(va, ht);
21100Sstevel@tonic-gate 
21110Sstevel@tonic-gate 		/*
21120Sstevel@tonic-gate 		 * use mostly common code to unmap it.
21130Sstevel@tonic-gate 		 */
21140Sstevel@tonic-gate 		hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr);
21150Sstevel@tonic-gate 	}
21160Sstevel@tonic-gate }
21170Sstevel@tonic-gate 
21180Sstevel@tonic-gate 
21190Sstevel@tonic-gate /*
21200Sstevel@tonic-gate  * unload a range of virtual address space (no callback)
21210Sstevel@tonic-gate  */
21220Sstevel@tonic-gate void
21230Sstevel@tonic-gate hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
21240Sstevel@tonic-gate {
21250Sstevel@tonic-gate 	uintptr_t va = (uintptr_t)addr;
21260Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat || va + len <= kernelbase);
21270Sstevel@tonic-gate 
21280Sstevel@tonic-gate 	/*
21290Sstevel@tonic-gate 	 * special case for performance.
21300Sstevel@tonic-gate 	 */
21310Sstevel@tonic-gate 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
21320Sstevel@tonic-gate 		ASSERT(hat == kas.a_hat);
21330Sstevel@tonic-gate 		hat_kmap_unload(addr, len, flags);
21340Sstevel@tonic-gate 		return;
21350Sstevel@tonic-gate 	}
21360Sstevel@tonic-gate 	hat_unload_callback(hat, addr, len, flags, NULL);
21370Sstevel@tonic-gate }
21380Sstevel@tonic-gate 
21390Sstevel@tonic-gate /*
21400Sstevel@tonic-gate  * Do the callbacks for ranges being unloaded.
21410Sstevel@tonic-gate  */
21420Sstevel@tonic-gate typedef struct range_info {
21430Sstevel@tonic-gate 	uintptr_t	rng_va;
21440Sstevel@tonic-gate 	ulong_t		rng_cnt;
21450Sstevel@tonic-gate 	level_t		rng_level;
21460Sstevel@tonic-gate } range_info_t;
21470Sstevel@tonic-gate 
21480Sstevel@tonic-gate static void
21490Sstevel@tonic-gate handle_ranges(hat_callback_t *cb, uint_t cnt, range_info_t *range)
21500Sstevel@tonic-gate {
21510Sstevel@tonic-gate 	/*
21520Sstevel@tonic-gate 	 * do callbacks to upper level VM system
21530Sstevel@tonic-gate 	 */
21540Sstevel@tonic-gate 	while (cb != NULL && cnt > 0) {
21550Sstevel@tonic-gate 		--cnt;
21560Sstevel@tonic-gate 		cb->hcb_start_addr = (caddr_t)range[cnt].rng_va;
21570Sstevel@tonic-gate 		cb->hcb_end_addr = cb->hcb_start_addr;
21580Sstevel@tonic-gate 		cb->hcb_end_addr +=
21590Sstevel@tonic-gate 		    range[cnt].rng_cnt << LEVEL_SIZE(range[cnt].rng_level);
21600Sstevel@tonic-gate 		cb->hcb_function(cb);
21610Sstevel@tonic-gate 	}
21620Sstevel@tonic-gate }
21630Sstevel@tonic-gate 
21640Sstevel@tonic-gate /*
21650Sstevel@tonic-gate  * Unload a given range of addresses (has optional callback)
21660Sstevel@tonic-gate  *
21670Sstevel@tonic-gate  * Flags:
21680Sstevel@tonic-gate  * define	HAT_UNLOAD		0x00
21690Sstevel@tonic-gate  * define	HAT_UNLOAD_NOSYNC	0x02
21700Sstevel@tonic-gate  * define	HAT_UNLOAD_UNLOCK	0x04
21710Sstevel@tonic-gate  * define	HAT_UNLOAD_OTHER	0x08 - not used
21720Sstevel@tonic-gate  * define	HAT_UNLOAD_UNMAP	0x10 - same as HAT_UNLOAD
21730Sstevel@tonic-gate  */
21740Sstevel@tonic-gate #define	MAX_UNLOAD_CNT (8)
21750Sstevel@tonic-gate void
21760Sstevel@tonic-gate hat_unload_callback(
21770Sstevel@tonic-gate 	hat_t		*hat,
21780Sstevel@tonic-gate 	caddr_t		addr,
21790Sstevel@tonic-gate 	size_t		len,
21800Sstevel@tonic-gate 	uint_t		flags,
21810Sstevel@tonic-gate 	hat_callback_t	*cb)
21820Sstevel@tonic-gate {
21830Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
21840Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
21850Sstevel@tonic-gate 	htable_t	*ht = NULL;
21860Sstevel@tonic-gate 	uint_t		entry;
218747Sjosephb 	uintptr_t	contig_va = (uintptr_t)-1L;
21880Sstevel@tonic-gate 	range_info_t	r[MAX_UNLOAD_CNT];
21890Sstevel@tonic-gate 	uint_t		r_cnt = 0;
21900Sstevel@tonic-gate 	x86pte_t	old_pte;
21910Sstevel@tonic-gate 
21920Sstevel@tonic-gate 	HATIN(hat_unload_callback, hat, addr, len);
21930Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat || eaddr <= kernelbase);
21940Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
21950Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
21960Sstevel@tonic-gate 
21970Sstevel@tonic-gate 	while (vaddr < eaddr) {
21980Sstevel@tonic-gate 		old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
21990Sstevel@tonic-gate 		if (ht == NULL)
22000Sstevel@tonic-gate 			break;
22010Sstevel@tonic-gate 
22020Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(vaddr));
22030Sstevel@tonic-gate 
22040Sstevel@tonic-gate 		if (vaddr < (uintptr_t)addr)
22050Sstevel@tonic-gate 			panic("hat_unload_callback(): unmap inside large page");
22060Sstevel@tonic-gate 
22070Sstevel@tonic-gate 		/*
22080Sstevel@tonic-gate 		 * We'll do the call backs for contiguous ranges
22090Sstevel@tonic-gate 		 */
221047Sjosephb 		if (vaddr != contig_va ||
22110Sstevel@tonic-gate 		    (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) {
22120Sstevel@tonic-gate 			if (r_cnt == MAX_UNLOAD_CNT) {
22130Sstevel@tonic-gate 				handle_ranges(cb, r_cnt, r);
22140Sstevel@tonic-gate 				r_cnt = 0;
22150Sstevel@tonic-gate 			}
22160Sstevel@tonic-gate 			r[r_cnt].rng_va = vaddr;
22170Sstevel@tonic-gate 			r[r_cnt].rng_cnt = 0;
22180Sstevel@tonic-gate 			r[r_cnt].rng_level = ht->ht_level;
22190Sstevel@tonic-gate 			++r_cnt;
22200Sstevel@tonic-gate 		}
22210Sstevel@tonic-gate 
22220Sstevel@tonic-gate 		/*
22230Sstevel@tonic-gate 		 * Unload one mapping from the page tables.
22240Sstevel@tonic-gate 		 */
22250Sstevel@tonic-gate 		entry = htable_va2entry(vaddr, ht);
22260Sstevel@tonic-gate 		hat_pte_unmap(ht, entry, flags, old_pte, NULL);
22270Sstevel@tonic-gate 
22280Sstevel@tonic-gate 		ASSERT(ht->ht_level <= mmu.max_page_level);
22290Sstevel@tonic-gate 		vaddr += LEVEL_SIZE(ht->ht_level);
223047Sjosephb 		contig_va = vaddr;
22310Sstevel@tonic-gate 		++r[r_cnt - 1].rng_cnt;
22320Sstevel@tonic-gate 	}
22330Sstevel@tonic-gate 	if (ht)
22340Sstevel@tonic-gate 		htable_release(ht);
22350Sstevel@tonic-gate 
22360Sstevel@tonic-gate 	/*
22370Sstevel@tonic-gate 	 * handle last range for callbacks
22380Sstevel@tonic-gate 	 */
22390Sstevel@tonic-gate 	if (r_cnt > 0)
22400Sstevel@tonic-gate 		handle_ranges(cb, r_cnt, r);
22410Sstevel@tonic-gate 
22420Sstevel@tonic-gate 	HATOUT(hat_unload_callback, hat, addr);
22430Sstevel@tonic-gate }
22440Sstevel@tonic-gate 
22450Sstevel@tonic-gate /*
22460Sstevel@tonic-gate  * synchronize mapping with software data structures
22470Sstevel@tonic-gate  *
22480Sstevel@tonic-gate  * This interface is currently only used by the working set monitor
22490Sstevel@tonic-gate  * driver.
22500Sstevel@tonic-gate  */
22510Sstevel@tonic-gate /*ARGSUSED*/
22520Sstevel@tonic-gate void
22530Sstevel@tonic-gate hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
22540Sstevel@tonic-gate {
22550Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
22560Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
22570Sstevel@tonic-gate 	htable_t	*ht = NULL;
22580Sstevel@tonic-gate 	uint_t		entry;
22590Sstevel@tonic-gate 	x86pte_t	pte;
22600Sstevel@tonic-gate 	x86pte_t	save_pte;
22610Sstevel@tonic-gate 	x86pte_t	new;
22620Sstevel@tonic-gate 	page_t		*pp;
22630Sstevel@tonic-gate 
22640Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(vaddr));
22650Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
22660Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
22670Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat || eaddr <= kernelbase);
22680Sstevel@tonic-gate 
22690Sstevel@tonic-gate 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
22700Sstevel@tonic-gate try_again:
22710Sstevel@tonic-gate 		pte = htable_walk(hat, &ht, &vaddr, eaddr);
22720Sstevel@tonic-gate 		if (ht == NULL)
22730Sstevel@tonic-gate 			break;
22740Sstevel@tonic-gate 		entry = htable_va2entry(vaddr, ht);
22750Sstevel@tonic-gate 
22760Sstevel@tonic-gate 		if (PTE_GET(pte, PT_NOSYNC) ||
22770Sstevel@tonic-gate 		    PTE_GET(pte, PT_REF | PT_MOD) == 0)
22780Sstevel@tonic-gate 			continue;
22790Sstevel@tonic-gate 
22800Sstevel@tonic-gate 		/*
22810Sstevel@tonic-gate 		 * We need to acquire the mapping list lock to protect
22820Sstevel@tonic-gate 		 * against hat_pageunload(), hat_unload(), etc.
22830Sstevel@tonic-gate 		 */
22840Sstevel@tonic-gate 		pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
22850Sstevel@tonic-gate 		if (pp == NULL)
22860Sstevel@tonic-gate 			break;
22870Sstevel@tonic-gate 		x86_hm_enter(pp);
22880Sstevel@tonic-gate 		save_pte = pte;
22890Sstevel@tonic-gate 		pte = x86pte_get(ht, entry);
22900Sstevel@tonic-gate 		if (pte != save_pte) {
22910Sstevel@tonic-gate 			x86_hm_exit(pp);
22920Sstevel@tonic-gate 			goto try_again;
22930Sstevel@tonic-gate 		}
22940Sstevel@tonic-gate 		if (PTE_GET(pte, PT_NOSYNC) ||
22950Sstevel@tonic-gate 		    PTE_GET(pte, PT_REF | PT_MOD) == 0) {
22960Sstevel@tonic-gate 			x86_hm_exit(pp);
22970Sstevel@tonic-gate 			continue;
22980Sstevel@tonic-gate 		}
22990Sstevel@tonic-gate 
23000Sstevel@tonic-gate 		/*
23010Sstevel@tonic-gate 		 * Need to clear ref or mod bits. We may compete with
23020Sstevel@tonic-gate 		 * hardware updating the R/M bits and have to try again.
23030Sstevel@tonic-gate 		 */
23040Sstevel@tonic-gate 		if (flags == HAT_SYNC_ZERORM) {
23050Sstevel@tonic-gate 			new = pte;
23060Sstevel@tonic-gate 			PTE_CLR(new, PT_REF | PT_MOD);
23070Sstevel@tonic-gate 			pte = hati_update_pte(ht, entry, pte, new);
23080Sstevel@tonic-gate 			if (pte != 0) {
23090Sstevel@tonic-gate 				x86_hm_exit(pp);
23100Sstevel@tonic-gate 				goto try_again;
23110Sstevel@tonic-gate 			}
23120Sstevel@tonic-gate 		} else {
23130Sstevel@tonic-gate 			/*
23140Sstevel@tonic-gate 			 * sync the PTE to the page_t
23150Sstevel@tonic-gate 			 */
23160Sstevel@tonic-gate 			hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
23170Sstevel@tonic-gate 		}
23180Sstevel@tonic-gate 		x86_hm_exit(pp);
23190Sstevel@tonic-gate 	}
23200Sstevel@tonic-gate 	if (ht)
23210Sstevel@tonic-gate 		htable_release(ht);
23220Sstevel@tonic-gate }
23230Sstevel@tonic-gate 
23240Sstevel@tonic-gate /*
23250Sstevel@tonic-gate  * void	hat_map(hat, addr, len, flags)
23260Sstevel@tonic-gate  */
23270Sstevel@tonic-gate /*ARGSUSED*/
23280Sstevel@tonic-gate void
23290Sstevel@tonic-gate hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
23300Sstevel@tonic-gate {
23310Sstevel@tonic-gate 	/* does nothing */
23320Sstevel@tonic-gate }
23330Sstevel@tonic-gate 
23340Sstevel@tonic-gate /*
23350Sstevel@tonic-gate  * uint_t hat_getattr(hat, addr, *attr)
23360Sstevel@tonic-gate  *	returns attr for <hat,addr> in *attr.  returns 0 if there was a
23370Sstevel@tonic-gate  *	mapping and *attr is valid, nonzero if there was no mapping and
23380Sstevel@tonic-gate  *	*attr is not valid.
23390Sstevel@tonic-gate  */
23400Sstevel@tonic-gate uint_t
23410Sstevel@tonic-gate hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
23420Sstevel@tonic-gate {
23430Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
23440Sstevel@tonic-gate 	htable_t	*ht = NULL;
23450Sstevel@tonic-gate 	x86pte_t	pte;
23460Sstevel@tonic-gate 
23470Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat || vaddr < kernelbase);
23480Sstevel@tonic-gate 
23490Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
23500Sstevel@tonic-gate 		return ((uint_t)-1);
23510Sstevel@tonic-gate 
23520Sstevel@tonic-gate 	ht = htable_getpte(hat, vaddr, NULL, &pte, MAX_PAGE_LEVEL);
23530Sstevel@tonic-gate 	if (ht == NULL)
23540Sstevel@tonic-gate 		return ((uint_t)-1);
23550Sstevel@tonic-gate 
23560Sstevel@tonic-gate 	if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
23570Sstevel@tonic-gate 		htable_release(ht);
23580Sstevel@tonic-gate 		return ((uint_t)-1);
23590Sstevel@tonic-gate 	}
23600Sstevel@tonic-gate 
23610Sstevel@tonic-gate 	*attr = PROT_READ;
23620Sstevel@tonic-gate 	if (PTE_GET(pte, PT_WRITABLE))
23630Sstevel@tonic-gate 		*attr |= PROT_WRITE;
23640Sstevel@tonic-gate 	if (PTE_GET(pte, PT_USER))
23650Sstevel@tonic-gate 		*attr |= PROT_USER;
23660Sstevel@tonic-gate 	if (!PTE_GET(pte, mmu.pt_nx))
23670Sstevel@tonic-gate 		*attr |= PROT_EXEC;
23680Sstevel@tonic-gate 	if (PTE_GET(pte, PT_NOSYNC))
23690Sstevel@tonic-gate 		*attr |= HAT_NOSYNC;
23700Sstevel@tonic-gate 	htable_release(ht);
23710Sstevel@tonic-gate 	return (0);
23720Sstevel@tonic-gate }
23730Sstevel@tonic-gate 
23740Sstevel@tonic-gate /*
23750Sstevel@tonic-gate  * hat_updateattr() applies the given attribute change to an existing mapping
23760Sstevel@tonic-gate  */
23770Sstevel@tonic-gate #define	HAT_LOAD_ATTR		1
23780Sstevel@tonic-gate #define	HAT_SET_ATTR		2
23790Sstevel@tonic-gate #define	HAT_CLR_ATTR		3
23800Sstevel@tonic-gate 
23810Sstevel@tonic-gate static void
23820Sstevel@tonic-gate hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
23830Sstevel@tonic-gate {
23840Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
23850Sstevel@tonic-gate 	uintptr_t	eaddr = (uintptr_t)addr + len;
23860Sstevel@tonic-gate 	htable_t	*ht = NULL;
23870Sstevel@tonic-gate 	uint_t		entry;
23880Sstevel@tonic-gate 	x86pte_t	oldpte, newpte;
23890Sstevel@tonic-gate 	page_t		*pp;
23900Sstevel@tonic-gate 
23910Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
23920Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
23930Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
23940Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
23950Sstevel@tonic-gate 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
23960Sstevel@tonic-gate try_again:
23970Sstevel@tonic-gate 		oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
23980Sstevel@tonic-gate 		if (ht == NULL)
23990Sstevel@tonic-gate 			break;
24000Sstevel@tonic-gate 		if (PTE_GET(oldpte, PT_NOCONSIST))
24010Sstevel@tonic-gate 			continue;
24020Sstevel@tonic-gate 
24030Sstevel@tonic-gate 		pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
24040Sstevel@tonic-gate 		if (pp == NULL)
24050Sstevel@tonic-gate 			continue;
24060Sstevel@tonic-gate 		x86_hm_enter(pp);
24070Sstevel@tonic-gate 
24080Sstevel@tonic-gate 		newpte = oldpte;
24090Sstevel@tonic-gate 		/*
24100Sstevel@tonic-gate 		 * We found a page table entry in the desired range,
24110Sstevel@tonic-gate 		 * figure out the new attributes.
24120Sstevel@tonic-gate 		 */
24130Sstevel@tonic-gate 		if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
24140Sstevel@tonic-gate 			if ((attr & PROT_WRITE) &&
24150Sstevel@tonic-gate 			    !PTE_GET(oldpte, PT_WRITABLE))
24160Sstevel@tonic-gate 				newpte |= PT_WRITABLE;
24170Sstevel@tonic-gate 
24180Sstevel@tonic-gate 			if ((attr & HAT_NOSYNC) && !PTE_GET(oldpte, PT_NOSYNC))
24190Sstevel@tonic-gate 				newpte |= PT_NOSYNC;
24200Sstevel@tonic-gate 
24210Sstevel@tonic-gate 			if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
24220Sstevel@tonic-gate 				newpte &= ~mmu.pt_nx;
24230Sstevel@tonic-gate 		}
24240Sstevel@tonic-gate 
24250Sstevel@tonic-gate 		if (what == HAT_LOAD_ATTR) {
24260Sstevel@tonic-gate 			if (!(attr & PROT_WRITE) &&
24270Sstevel@tonic-gate 			    PTE_GET(oldpte, PT_WRITABLE))
24280Sstevel@tonic-gate 				newpte &= ~PT_WRITABLE;
24290Sstevel@tonic-gate 
24300Sstevel@tonic-gate 			if (!(attr & HAT_NOSYNC) && PTE_GET(oldpte, PT_NOSYNC))
24310Sstevel@tonic-gate 				newpte &= ~PT_NOSYNC;
24320Sstevel@tonic-gate 
24330Sstevel@tonic-gate 			if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
24340Sstevel@tonic-gate 				newpte |= mmu.pt_nx;
24350Sstevel@tonic-gate 		}
24360Sstevel@tonic-gate 
24370Sstevel@tonic-gate 		if (what == HAT_CLR_ATTR) {
24380Sstevel@tonic-gate 			if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
24390Sstevel@tonic-gate 				newpte &= ~PT_WRITABLE;
24400Sstevel@tonic-gate 
24410Sstevel@tonic-gate 			if ((attr & HAT_NOSYNC) && PTE_GET(oldpte, PT_NOSYNC))
24420Sstevel@tonic-gate 				newpte &= ~PT_NOSYNC;
24430Sstevel@tonic-gate 
24440Sstevel@tonic-gate 			if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
24450Sstevel@tonic-gate 				newpte |= mmu.pt_nx;
24460Sstevel@tonic-gate 		}
24470Sstevel@tonic-gate 
24480Sstevel@tonic-gate 		/*
24490Sstevel@tonic-gate 		 * what about PROT_READ or others? this code only handles:
24500Sstevel@tonic-gate 		 * EXEC, WRITE, NOSYNC
24510Sstevel@tonic-gate 		 */
24520Sstevel@tonic-gate 
24530Sstevel@tonic-gate 		/*
24540Sstevel@tonic-gate 		 * If new PTE really changed, update the table.
24550Sstevel@tonic-gate 		 */
24560Sstevel@tonic-gate 		if (newpte != oldpte) {
24570Sstevel@tonic-gate 			entry = htable_va2entry(vaddr, ht);
24580Sstevel@tonic-gate 			oldpte = hati_update_pte(ht, entry, oldpte, newpte);
24590Sstevel@tonic-gate 			if (oldpte != 0) {
24600Sstevel@tonic-gate 				x86_hm_exit(pp);
24610Sstevel@tonic-gate 				goto try_again;
24620Sstevel@tonic-gate 			}
24630Sstevel@tonic-gate 		}
24640Sstevel@tonic-gate 		x86_hm_exit(pp);
24650Sstevel@tonic-gate 	}
24660Sstevel@tonic-gate 	if (ht)
24670Sstevel@tonic-gate 		htable_release(ht);
24680Sstevel@tonic-gate }
24690Sstevel@tonic-gate 
24700Sstevel@tonic-gate /*
24710Sstevel@tonic-gate  * Various wrappers for hat_updateattr()
24720Sstevel@tonic-gate  */
24730Sstevel@tonic-gate void
24740Sstevel@tonic-gate hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
24750Sstevel@tonic-gate {
24760Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= kernelbase);
24770Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
24780Sstevel@tonic-gate }
24790Sstevel@tonic-gate 
24800Sstevel@tonic-gate void
24810Sstevel@tonic-gate hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
24820Sstevel@tonic-gate {
24830Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= kernelbase);
24840Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
24850Sstevel@tonic-gate }
24860Sstevel@tonic-gate 
24870Sstevel@tonic-gate void
24880Sstevel@tonic-gate hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
24890Sstevel@tonic-gate {
24900Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= kernelbase);
24910Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
24920Sstevel@tonic-gate }
24930Sstevel@tonic-gate 
24940Sstevel@tonic-gate void
24950Sstevel@tonic-gate hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
24960Sstevel@tonic-gate {
24970Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= kernelbase);
24980Sstevel@tonic-gate 	hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
24990Sstevel@tonic-gate }
25000Sstevel@tonic-gate 
25010Sstevel@tonic-gate /*ARGSUSED*/
25020Sstevel@tonic-gate void
25030Sstevel@tonic-gate hat_chgattr_pagedir(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
25040Sstevel@tonic-gate {
25050Sstevel@tonic-gate 	panic("hat_chgattr_pgdir() not supported - used by 80387 emulation");
25060Sstevel@tonic-gate }
25070Sstevel@tonic-gate 
25080Sstevel@tonic-gate /*
25090Sstevel@tonic-gate  * size_t hat_getpagesize(hat, addr)
25100Sstevel@tonic-gate  *	returns pagesize in bytes for <hat, addr>. returns -1 of there is
25110Sstevel@tonic-gate  *	no mapping. This is an advisory call.
25120Sstevel@tonic-gate  */
25130Sstevel@tonic-gate ssize_t
25140Sstevel@tonic-gate hat_getpagesize(hat_t *hat, caddr_t addr)
25150Sstevel@tonic-gate {
25160Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
25170Sstevel@tonic-gate 	htable_t	*ht;
25180Sstevel@tonic-gate 	size_t		pagesize;
25190Sstevel@tonic-gate 
25200Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat || vaddr < kernelbase);
25210Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
25220Sstevel@tonic-gate 		return (-1);
25230Sstevel@tonic-gate 	ht = htable_getpage(hat, vaddr, NULL);
25240Sstevel@tonic-gate 	if (ht == NULL)
25250Sstevel@tonic-gate 		return (-1);
25260Sstevel@tonic-gate 	pagesize = LEVEL_SIZE(ht->ht_level);
25270Sstevel@tonic-gate 	htable_release(ht);
25280Sstevel@tonic-gate 	return (pagesize);
25290Sstevel@tonic-gate }
25300Sstevel@tonic-gate 
25310Sstevel@tonic-gate 
25320Sstevel@tonic-gate 
25330Sstevel@tonic-gate /*
25340Sstevel@tonic-gate  * pfn_t hat_getpfnum(hat, addr)
25350Sstevel@tonic-gate  *	returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
25360Sstevel@tonic-gate  */
25370Sstevel@tonic-gate pfn_t
25380Sstevel@tonic-gate hat_getpfnum(hat_t *hat, caddr_t addr)
25390Sstevel@tonic-gate {
25400Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
25410Sstevel@tonic-gate 	htable_t	*ht;
25420Sstevel@tonic-gate 	uint_t		entry;
25430Sstevel@tonic-gate 	pfn_t		pfn = PFN_INVALID;
25440Sstevel@tonic-gate 
25450Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat || vaddr < kernelbase);
25460Sstevel@tonic-gate 	if (khat_running == 0)
25470Sstevel@tonic-gate 		panic("hat_getpfnum(): called too early\n");
25480Sstevel@tonic-gate 
25490Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
25500Sstevel@tonic-gate 		return (PFN_INVALID);
25510Sstevel@tonic-gate 
25520Sstevel@tonic-gate 	/*
25530Sstevel@tonic-gate 	 * A very common use of hat_getpfnum() is from the DDI for kernel pages.
25540Sstevel@tonic-gate 	 * Use the kmap_ptes (which also covers the 32 bit heap) to speed
25550Sstevel@tonic-gate 	 * this up.
25560Sstevel@tonic-gate 	 */
25570Sstevel@tonic-gate 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
25580Sstevel@tonic-gate 		x86pte_t pte;
25590Sstevel@tonic-gate 		pgcnt_t pg_off;
25600Sstevel@tonic-gate 
25610Sstevel@tonic-gate 		pg_off = mmu_btop(vaddr - mmu.kmap_addr);
25620Sstevel@tonic-gate 		if (mmu.pae_hat) {
256347Sjosephb 			ATOMIC_LOAD64(mmu.kmap_ptes + pg_off, pte);
25640Sstevel@tonic-gate 		} else {
25650Sstevel@tonic-gate 			pte = ((x86pte32_t *)mmu.kmap_ptes)[pg_off];
25660Sstevel@tonic-gate 		}
25670Sstevel@tonic-gate 		if (!PTE_ISVALID(pte))
25680Sstevel@tonic-gate 			return (PFN_INVALID);
25690Sstevel@tonic-gate 		/*LINTED [use of constant 0 causes a silly lint warning] */
25700Sstevel@tonic-gate 		return (PTE2PFN(pte, 0));
25710Sstevel@tonic-gate 	}
25720Sstevel@tonic-gate 
25730Sstevel@tonic-gate 	ht = htable_getpage(hat, vaddr, &entry);
25740Sstevel@tonic-gate 	if (ht == NULL)
25750Sstevel@tonic-gate 		return (PFN_INVALID);
25760Sstevel@tonic-gate 	ASSERT(vaddr >= ht->ht_vaddr);
25770Sstevel@tonic-gate 	ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
25780Sstevel@tonic-gate 	pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
25790Sstevel@tonic-gate 	if (ht->ht_level > 0)
25800Sstevel@tonic-gate 		pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
25810Sstevel@tonic-gate 	htable_release(ht);
25820Sstevel@tonic-gate 	return (pfn);
25830Sstevel@tonic-gate }
25840Sstevel@tonic-gate 
25850Sstevel@tonic-gate /*
25860Sstevel@tonic-gate  * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged.
25870Sstevel@tonic-gate  * Use hat_getpfnum(kas.a_hat, ...) instead.
25880Sstevel@tonic-gate  *
25890Sstevel@tonic-gate  * We'd like to return PFN_INVALID if the mappings have underlying page_t's
25900Sstevel@tonic-gate  * but can't right now due to the fact that some software has grown to use
25910Sstevel@tonic-gate  * this interface incorrectly. So for now when the interface is misused,
25920Sstevel@tonic-gate  * return a warning to the user that in the future it won't work in the
25930Sstevel@tonic-gate  * way they're abusing it, and carry on.
25940Sstevel@tonic-gate  *
25950Sstevel@tonic-gate  * Note that hat_getkpfnum() is never supported on amd64.
25960Sstevel@tonic-gate  */
25970Sstevel@tonic-gate #if !defined(__amd64)
25980Sstevel@tonic-gate pfn_t
25990Sstevel@tonic-gate hat_getkpfnum(caddr_t addr)
26000Sstevel@tonic-gate {
26010Sstevel@tonic-gate 	pfn_t	pfn;
26020Sstevel@tonic-gate 	int badcaller = 0;
26030Sstevel@tonic-gate 
26040Sstevel@tonic-gate 
26050Sstevel@tonic-gate 	if (khat_running == 0)
26060Sstevel@tonic-gate 		panic("hat_getkpfnum(): called too early\n");
26070Sstevel@tonic-gate 	if ((uintptr_t)addr < kernelbase)
26080Sstevel@tonic-gate 		return (PFN_INVALID);
26090Sstevel@tonic-gate 
26100Sstevel@tonic-gate 
26110Sstevel@tonic-gate 	if (segkpm && IS_KPM_ADDR(addr)) {
26120Sstevel@tonic-gate 		badcaller = 1;
26130Sstevel@tonic-gate 		pfn = hat_kpm_va2pfn(addr);
26140Sstevel@tonic-gate 	} else {
26150Sstevel@tonic-gate 		pfn = hat_getpfnum(kas.a_hat, addr);
26160Sstevel@tonic-gate 		badcaller = pf_is_memory(pfn);
26170Sstevel@tonic-gate 	}
26180Sstevel@tonic-gate 
26190Sstevel@tonic-gate 	if (badcaller)
26200Sstevel@tonic-gate 		hat_getkpfnum_badcall(caller());
26210Sstevel@tonic-gate 	return (pfn);
26220Sstevel@tonic-gate }
26230Sstevel@tonic-gate #endif /* __amd64 */
26240Sstevel@tonic-gate 
26250Sstevel@tonic-gate /*
26260Sstevel@tonic-gate  * int hat_probe(hat, addr)
26270Sstevel@tonic-gate  *	return 0 if no valid mapping is present.  Faster version
26280Sstevel@tonic-gate  *	of hat_getattr in certain architectures.
26290Sstevel@tonic-gate  */
26300Sstevel@tonic-gate int
26310Sstevel@tonic-gate hat_probe(hat_t *hat, caddr_t addr)
26320Sstevel@tonic-gate {
26330Sstevel@tonic-gate 	uintptr_t	vaddr = ALIGN2PAGE(addr);
26340Sstevel@tonic-gate 	uint_t		entry;
26350Sstevel@tonic-gate 	htable_t	*ht;
26360Sstevel@tonic-gate 	pgcnt_t		pg_off;
26370Sstevel@tonic-gate 
26380Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat || vaddr < kernelbase);
26390Sstevel@tonic-gate 	ASSERT(hat == kas.a_hat ||
26400Sstevel@tonic-gate 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
26410Sstevel@tonic-gate 	if (IN_VA_HOLE(vaddr))
26420Sstevel@tonic-gate 		return (0);
26430Sstevel@tonic-gate 
26440Sstevel@tonic-gate 	/*
26450Sstevel@tonic-gate 	 * Most common use of hat_probe is from segmap. We special case it
26460Sstevel@tonic-gate 	 * for performance.
26470Sstevel@tonic-gate 	 */
26480Sstevel@tonic-gate 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
26490Sstevel@tonic-gate 		pg_off = mmu_btop(vaddr - mmu.kmap_addr);
26500Sstevel@tonic-gate 		if (mmu.pae_hat)
26510Sstevel@tonic-gate 			return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
26520Sstevel@tonic-gate 		else
26530Sstevel@tonic-gate 			return (PTE_ISVALID(
26540Sstevel@tonic-gate 			    ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
26550Sstevel@tonic-gate 	}
26560Sstevel@tonic-gate 
26570Sstevel@tonic-gate 	ht = htable_getpage(hat, vaddr, &entry);
26580Sstevel@tonic-gate 	if (ht == NULL)
26590Sstevel@tonic-gate 		return (0);
26600Sstevel@tonic-gate 	htable_release(ht);
26610Sstevel@tonic-gate 	return (1);
26620Sstevel@tonic-gate }
26630Sstevel@tonic-gate 
26640Sstevel@tonic-gate /*
26650Sstevel@tonic-gate  * Simple implementation of ISM. hat_share() is just like hat_memload_array(),
26660Sstevel@tonic-gate  * except that we use the ism_hat's existing mappings to determine the pages
26670Sstevel@tonic-gate  * and protections to use for this hat. In case we find a properly aligned
26680Sstevel@tonic-gate  * and sized pagetable of 4K mappings, we will attempt to share the pagetable
26690Sstevel@tonic-gate  * itself.
26700Sstevel@tonic-gate  */
26710Sstevel@tonic-gate /*ARGSUSED*/
26720Sstevel@tonic-gate int
26730Sstevel@tonic-gate hat_share(
26740Sstevel@tonic-gate 	hat_t		*hat,
26750Sstevel@tonic-gate 	caddr_t		addr,
26760Sstevel@tonic-gate 	hat_t		*ism_hat,
26770Sstevel@tonic-gate 	caddr_t		src_addr,
26780Sstevel@tonic-gate 	size_t		len,	/* almost useless value, see below.. */
26790Sstevel@tonic-gate 	uint_t		ismszc)
26800Sstevel@tonic-gate {
26810Sstevel@tonic-gate 	uintptr_t	vaddr_start = (uintptr_t)addr;
26820Sstevel@tonic-gate 	uintptr_t	vaddr;
26830Sstevel@tonic-gate 	uintptr_t	pt_vaddr;
26840Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr_start + len;
26850Sstevel@tonic-gate 	uintptr_t	ism_addr_start = (uintptr_t)src_addr;
26860Sstevel@tonic-gate 	uintptr_t	ism_addr = ism_addr_start;
26870Sstevel@tonic-gate 	uintptr_t	e_ism_addr = ism_addr + len;
26880Sstevel@tonic-gate 	htable_t	*ism_ht = NULL;
26890Sstevel@tonic-gate 	htable_t	*ht;
26900Sstevel@tonic-gate 	x86pte_t	pte;
26910Sstevel@tonic-gate 	page_t		*pp;
26920Sstevel@tonic-gate 	pfn_t		pfn;
26930Sstevel@tonic-gate 	level_t		l;
26940Sstevel@tonic-gate 	pgcnt_t		pgcnt;
26950Sstevel@tonic-gate 	uint_t		prot;
26960Sstevel@tonic-gate 	uint_t		valid_cnt;
26970Sstevel@tonic-gate 
26980Sstevel@tonic-gate 	/*
26990Sstevel@tonic-gate 	 * We might be asked to share an empty DISM hat by as_dup()
27000Sstevel@tonic-gate 	 */
27010Sstevel@tonic-gate 	ASSERT(hat != kas.a_hat);
27020Sstevel@tonic-gate 	ASSERT(eaddr <= kernelbase);
27030Sstevel@tonic-gate 	if (!(ism_hat->hat_flags & HAT_SHARED)) {
27040Sstevel@tonic-gate 		ASSERT(hat_get_mapped_size(ism_hat) == 0);
27050Sstevel@tonic-gate 		return (0);
27060Sstevel@tonic-gate 	}
27070Sstevel@tonic-gate 
27080Sstevel@tonic-gate 	/*
27090Sstevel@tonic-gate 	 * The SPT segment driver often passes us a size larger than there are
27100Sstevel@tonic-gate 	 * valid mappings. That's because it rounds the segment size up to a
27110Sstevel@tonic-gate 	 * large pagesize, even if the actual memory mapped by ism_hat is less.
27120Sstevel@tonic-gate 	 */
27130Sstevel@tonic-gate 	HATIN(hat_share, hat, addr, len);
27140Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr_start));
27150Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(ism_addr_start));
27160Sstevel@tonic-gate 	ASSERT(ism_hat->hat_flags & HAT_SHARED);
27170Sstevel@tonic-gate 	while (ism_addr < e_ism_addr) {
27180Sstevel@tonic-gate 		/*
27190Sstevel@tonic-gate 		 * use htable_walk to get the next valid ISM mapping
27200Sstevel@tonic-gate 		 */
27210Sstevel@tonic-gate 		pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
27220Sstevel@tonic-gate 		if (ism_ht == NULL)
27230Sstevel@tonic-gate 			break;
27240Sstevel@tonic-gate 
27250Sstevel@tonic-gate 		/*
27260Sstevel@tonic-gate 		 * Find the largest page size we can use, based on the
27270Sstevel@tonic-gate 		 * ISM mapping size, our address alignment and the remaining
27280Sstevel@tonic-gate 		 * map length.
27290Sstevel@tonic-gate 		 */
27300Sstevel@tonic-gate 		vaddr = vaddr_start + (ism_addr - ism_addr_start);
27310Sstevel@tonic-gate 		for (l = ism_ht->ht_level; l > 0; --l) {
27320Sstevel@tonic-gate 			if (LEVEL_SIZE(l) <= eaddr - vaddr &&
27330Sstevel@tonic-gate 			    (vaddr & LEVEL_OFFSET(l)) == 0)
27340Sstevel@tonic-gate 				break;
27350Sstevel@tonic-gate 		}
27360Sstevel@tonic-gate 
27370Sstevel@tonic-gate 		/*
27380Sstevel@tonic-gate 		 * attempt to share the pagetable
27390Sstevel@tonic-gate 		 *
27400Sstevel@tonic-gate 		 * - only 4K pagetables are shared (ie. level == 0)
27410Sstevel@tonic-gate 		 * - the hat_share() length must cover the whole pagetable
27420Sstevel@tonic-gate 		 * - the shared address must align at level 1
27430Sstevel@tonic-gate 		 * - a shared PTE for this address already exists OR
27440Sstevel@tonic-gate 		 * - no page table for this address exists yet
27450Sstevel@tonic-gate 		 */
27460Sstevel@tonic-gate 		pt_vaddr =
27470Sstevel@tonic-gate 		    vaddr_start + (ism_ht->ht_vaddr - ism_addr_start);
27480Sstevel@tonic-gate 		if (ism_ht->ht_level == 0 &&
27490Sstevel@tonic-gate 		    ism_ht->ht_vaddr + LEVEL_SIZE(1) <= e_ism_addr &&
27500Sstevel@tonic-gate 		    (pt_vaddr & LEVEL_OFFSET(1)) == 0) {
27510Sstevel@tonic-gate 
27520Sstevel@tonic-gate 			ht = htable_lookup(hat, pt_vaddr, 0);
27530Sstevel@tonic-gate 			if (ht == NULL)
27540Sstevel@tonic-gate 				ht = htable_create(hat, pt_vaddr, 0, ism_ht);
27550Sstevel@tonic-gate 
27560Sstevel@tonic-gate 			if (ht->ht_level > 0 ||
27570Sstevel@tonic-gate 			    !(ht->ht_flags & HTABLE_SHARED_PFN)) {
27580Sstevel@tonic-gate 
27590Sstevel@tonic-gate 				htable_release(ht);
27600Sstevel@tonic-gate 
27610Sstevel@tonic-gate 			} else {
27620Sstevel@tonic-gate 
27630Sstevel@tonic-gate 				/*
27640Sstevel@tonic-gate 				 * share the page table
27650Sstevel@tonic-gate 				 */
27660Sstevel@tonic-gate 				ASSERT(ht->ht_level == 0);
27670Sstevel@tonic-gate 				ASSERT(ht->ht_shares == ism_ht);
27680Sstevel@tonic-gate 				valid_cnt = ism_ht->ht_valid_cnt;
27690Sstevel@tonic-gate 				atomic_add_long(&hat->hat_pages_mapped[0],
27700Sstevel@tonic-gate 				    valid_cnt - ht->ht_valid_cnt);
27710Sstevel@tonic-gate 				ht->ht_valid_cnt = valid_cnt;
27720Sstevel@tonic-gate 				htable_release(ht);
27730Sstevel@tonic-gate 				ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(1);
27740Sstevel@tonic-gate 				htable_release(ism_ht);
27750Sstevel@tonic-gate 				ism_ht = NULL;
27760Sstevel@tonic-gate 				continue;
27770Sstevel@tonic-gate 			}
27780Sstevel@tonic-gate 		}
27790Sstevel@tonic-gate 
27800Sstevel@tonic-gate 		/*
27810Sstevel@tonic-gate 		 * Unable to share the page table. Instead we will
27820Sstevel@tonic-gate 		 * create new mappings from the values in the ISM mappings.
27830Sstevel@tonic-gate 		 *
27840Sstevel@tonic-gate 		 * The ISM mapping might be larger than the share area,
27850Sstevel@tonic-gate 		 * be careful to trunctate it if needed.
27860Sstevel@tonic-gate 		 */
27870Sstevel@tonic-gate 		if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
27880Sstevel@tonic-gate 			pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
27890Sstevel@tonic-gate 		} else {
27900Sstevel@tonic-gate 			pgcnt = mmu_btop(eaddr - vaddr);
27910Sstevel@tonic-gate 			l = 0;
27920Sstevel@tonic-gate 		}
27930Sstevel@tonic-gate 
27940Sstevel@tonic-gate 		pfn = PTE2PFN(pte, ism_ht->ht_level);
27950Sstevel@tonic-gate 		ASSERT(pfn != PFN_INVALID);
27960Sstevel@tonic-gate 		while (pgcnt > 0) {
27970Sstevel@tonic-gate 			/*
27980Sstevel@tonic-gate 			 * Make a new pte for the PFN for this level.
27990Sstevel@tonic-gate 			 * Copy protections for the pte from the ISM pte.
28000Sstevel@tonic-gate 			 */
28010Sstevel@tonic-gate 			pp = page_numtopp_nolock(pfn);
28020Sstevel@tonic-gate 			ASSERT(pp != NULL);
28030Sstevel@tonic-gate 
28040Sstevel@tonic-gate 			prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
28050Sstevel@tonic-gate 			if (PTE_GET(pte, PT_WRITABLE))
28060Sstevel@tonic-gate 				prot |= PROT_WRITE;
28070Sstevel@tonic-gate 			if (!PTE_GET(pte, PT_NX))
28080Sstevel@tonic-gate 				prot |= PROT_EXEC;
28090Sstevel@tonic-gate 
28100Sstevel@tonic-gate 			/*
28110Sstevel@tonic-gate 			 * XX64 -- can shm ever be written to swap?
28120Sstevel@tonic-gate 			 * if not we could use HAT_NOSYNC here.
28130Sstevel@tonic-gate 			 */
28140Sstevel@tonic-gate 			hati_load_common(hat, vaddr, pp, prot,
28150Sstevel@tonic-gate 			    HAT_LOAD, l, pfn);
28160Sstevel@tonic-gate 
28170Sstevel@tonic-gate 			vaddr += LEVEL_SIZE(l);
28180Sstevel@tonic-gate 			ism_addr += LEVEL_SIZE(l);
28190Sstevel@tonic-gate 			pfn += mmu_btop(LEVEL_SIZE(l));
28200Sstevel@tonic-gate 			pgcnt -= mmu_btop(LEVEL_SIZE(l));
28210Sstevel@tonic-gate 		}
28220Sstevel@tonic-gate 	}
28230Sstevel@tonic-gate 	if (ism_ht != NULL)
28240Sstevel@tonic-gate 		htable_release(ism_ht);
28250Sstevel@tonic-gate 
28260Sstevel@tonic-gate 	HATOUT(hat_share, hat, addr);
28270Sstevel@tonic-gate 	return (0);
28280Sstevel@tonic-gate }
28290Sstevel@tonic-gate 
28300Sstevel@tonic-gate 
28310Sstevel@tonic-gate /*
28320Sstevel@tonic-gate  * hat_unshare() is similar to hat_unload_callback(), but
28330Sstevel@tonic-gate  * we have to look for empty shared pagetables. Note that
28340Sstevel@tonic-gate  * hat_unshare() is always invoked against an entire segment.
28350Sstevel@tonic-gate  */
28360Sstevel@tonic-gate /*ARGSUSED*/
28370Sstevel@tonic-gate void
28380Sstevel@tonic-gate hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
28390Sstevel@tonic-gate {
28400Sstevel@tonic-gate 	uintptr_t	vaddr = (uintptr_t)addr;
28410Sstevel@tonic-gate 	uintptr_t	eaddr = vaddr + len;
28420Sstevel@tonic-gate 	htable_t	*ht = NULL;
28430Sstevel@tonic-gate 	uint_t		need_demaps = 0;
28440Sstevel@tonic-gate 
28450Sstevel@tonic-gate 	ASSERT(hat != kas.a_hat);
28460Sstevel@tonic-gate 	ASSERT(eaddr <= kernelbase);
28470Sstevel@tonic-gate 	HATIN(hat_unshare, hat, addr, len);
28480Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(vaddr));
28490Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(eaddr));
28500Sstevel@tonic-gate 
28510Sstevel@tonic-gate 	/*
28520Sstevel@tonic-gate 	 * First go through and remove any shared pagetables.
28530Sstevel@tonic-gate 	 *
28540Sstevel@tonic-gate 	 * Note that it's ok to delay the demap until the entire range is
28550Sstevel@tonic-gate 	 * finished, because if hat_pageunload() were to unload a shared
28560Sstevel@tonic-gate 	 * pagetable page, its hat_demap() will do a global user TLB invalidate.
28570Sstevel@tonic-gate 	 */
28580Sstevel@tonic-gate 	while (vaddr < eaddr) {
28590Sstevel@tonic-gate 		ASSERT(!IN_VA_HOLE(vaddr));
28600Sstevel@tonic-gate 		/*
28610Sstevel@tonic-gate 		 * find the pagetable that would map the current address
28620Sstevel@tonic-gate 		 */
28630Sstevel@tonic-gate 		ht = htable_lookup(hat, vaddr, 0);
28640Sstevel@tonic-gate 		if (ht != NULL) {
28650Sstevel@tonic-gate 			if (ht->ht_flags & HTABLE_SHARED_PFN) {
28660Sstevel@tonic-gate 				/*
28670Sstevel@tonic-gate 				 * clear mapped pages count, set valid_cnt to 0
28680Sstevel@tonic-gate 				 * and let htable_release() finish the job
28690Sstevel@tonic-gate 				 */
28700Sstevel@tonic-gate 				atomic_add_long(&hat->hat_pages_mapped[0],
28710Sstevel@tonic-gate 				    -ht->ht_valid_cnt);
28720Sstevel@tonic-gate 				ht->ht_valid_cnt = 0;
28730Sstevel@tonic-gate 				need_demaps = 1;
28740Sstevel@tonic-gate 			}
28750Sstevel@tonic-gate 			htable_release(ht);
28760Sstevel@tonic-gate 		}
28770Sstevel@tonic-gate 		vaddr = (vaddr & LEVEL_MASK(1)) + LEVEL_SIZE(1);
28780Sstevel@tonic-gate 	}
28790Sstevel@tonic-gate 
28800Sstevel@tonic-gate 	/*
28810Sstevel@tonic-gate 	 * flush the TLBs - since we're probably dealing with MANY mappings
28820Sstevel@tonic-gate 	 * we do just one CR3 reload.
28830Sstevel@tonic-gate 	 */
28840Sstevel@tonic-gate 	if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
28850Sstevel@tonic-gate 		hat_demap(hat, DEMAP_ALL_ADDR);
28860Sstevel@tonic-gate 
28870Sstevel@tonic-gate 	/*
28880Sstevel@tonic-gate 	 * Now go back and clean up any unaligned mappings that
28890Sstevel@tonic-gate 	 * couldn't share pagetables.
28900Sstevel@tonic-gate 	 */
28910Sstevel@tonic-gate 	hat_unload(hat, addr, len, HAT_UNLOAD_UNMAP);
28920Sstevel@tonic-gate 
28930Sstevel@tonic-gate 	HATOUT(hat_unshare, hat, addr);
28940Sstevel@tonic-gate }
28950Sstevel@tonic-gate 
28960Sstevel@tonic-gate 
28970Sstevel@tonic-gate /*
28980Sstevel@tonic-gate  * hat_reserve() does nothing
28990Sstevel@tonic-gate  */
29000Sstevel@tonic-gate /*ARGSUSED*/
29010Sstevel@tonic-gate void
29020Sstevel@tonic-gate hat_reserve(struct as *as, caddr_t addr, size_t len)
29030Sstevel@tonic-gate {
29040Sstevel@tonic-gate }
29050Sstevel@tonic-gate 
29060Sstevel@tonic-gate 
29070Sstevel@tonic-gate /*
29080Sstevel@tonic-gate  * Called when all mappings to a page should have write permission removed.
29090Sstevel@tonic-gate  * Mostly stolem from hat_pagesync()
29100Sstevel@tonic-gate  */
29110Sstevel@tonic-gate static void
29120Sstevel@tonic-gate hati_page_clrwrt(struct page *pp)
29130Sstevel@tonic-gate {
29140Sstevel@tonic-gate 	hment_t		*hm = NULL;
29150Sstevel@tonic-gate 	htable_t	*ht;
29160Sstevel@tonic-gate 	uint_t		entry;
29170Sstevel@tonic-gate 	x86pte_t	old;
29180Sstevel@tonic-gate 	x86pte_t	new;
29190Sstevel@tonic-gate 	uint_t		pszc = 0;
29200Sstevel@tonic-gate 
29210Sstevel@tonic-gate next_size:
29220Sstevel@tonic-gate 	/*
29230Sstevel@tonic-gate 	 * walk thru the mapping list clearing write permission
29240Sstevel@tonic-gate 	 */
29250Sstevel@tonic-gate 	x86_hm_enter(pp);
29260Sstevel@tonic-gate 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
29270Sstevel@tonic-gate 		if (ht->ht_level < pszc)
29280Sstevel@tonic-gate 			continue;
29290Sstevel@tonic-gate 		old = x86pte_get(ht, entry);
29300Sstevel@tonic-gate 
29310Sstevel@tonic-gate 		for (;;) {
29320Sstevel@tonic-gate 			/*
29330Sstevel@tonic-gate 			 * Is this mapping of interest?
29340Sstevel@tonic-gate 			 */
29350Sstevel@tonic-gate 			if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
29360Sstevel@tonic-gate 			    PTE_GET(old, PT_WRITABLE) == 0)
29370Sstevel@tonic-gate 				break;
29380Sstevel@tonic-gate 
29390Sstevel@tonic-gate 			/*
29400Sstevel@tonic-gate 			 * Clear ref/mod writable bits. This requires cross
29410Sstevel@tonic-gate 			 * calls to ensure any executing TLBs see cleared bits.
29420Sstevel@tonic-gate 			 */
29430Sstevel@tonic-gate 			new = old;
29440Sstevel@tonic-gate 			PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
29450Sstevel@tonic-gate 			old = hati_update_pte(ht, entry, old, new);
29460Sstevel@tonic-gate 			if (old != 0)
29470Sstevel@tonic-gate 				continue;
29480Sstevel@tonic-gate 
29490Sstevel@tonic-gate 			break;
29500Sstevel@tonic-gate 		}
29510Sstevel@tonic-gate 	}
29520Sstevel@tonic-gate 	x86_hm_exit(pp);
29530Sstevel@tonic-gate 	while (pszc < pp->p_szc) {
29540Sstevel@tonic-gate 		page_t *tpp;
29550Sstevel@tonic-gate 		pszc++;
29560Sstevel@tonic-gate 		tpp = PP_GROUPLEADER(pp, pszc);
29570Sstevel@tonic-gate 		if (pp != tpp) {
29580Sstevel@tonic-gate 			pp = tpp;
29590Sstevel@tonic-gate 			goto next_size;
29600Sstevel@tonic-gate 		}
29610Sstevel@tonic-gate 	}
29620Sstevel@tonic-gate }
29630Sstevel@tonic-gate 
29640Sstevel@tonic-gate /*
29650Sstevel@tonic-gate  * void hat_page_setattr(pp, flag)
29660Sstevel@tonic-gate  * void hat_page_clrattr(pp, flag)
29670Sstevel@tonic-gate  *	used to set/clr ref/mod bits.
29680Sstevel@tonic-gate  */
29690Sstevel@tonic-gate void
29700Sstevel@tonic-gate hat_page_setattr(struct page *pp, uint_t flag)
29710Sstevel@tonic-gate {
29720Sstevel@tonic-gate 	vnode_t		*vp = pp->p_vnode;
29730Sstevel@tonic-gate 	kmutex_t	*vphm = NULL;
29740Sstevel@tonic-gate 	page_t		**listp;
29750Sstevel@tonic-gate 
29760Sstevel@tonic-gate 	if (PP_GETRM(pp, flag) == flag)
29770Sstevel@tonic-gate 		return;
29780Sstevel@tonic-gate 
29790Sstevel@tonic-gate 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
29800Sstevel@tonic-gate 		vphm = page_vnode_mutex(vp);
29810Sstevel@tonic-gate 		mutex_enter(vphm);
29820Sstevel@tonic-gate 	}
29830Sstevel@tonic-gate 
29840Sstevel@tonic-gate 	PP_SETRM(pp, flag);
29850Sstevel@tonic-gate 
29860Sstevel@tonic-gate 	if (vphm != NULL) {
29870Sstevel@tonic-gate 
29880Sstevel@tonic-gate 		/*
29890Sstevel@tonic-gate 		 * Some File Systems examine v_pages for NULL w/o
29900Sstevel@tonic-gate 		 * grabbing the vphm mutex. Must not let it become NULL when
29910Sstevel@tonic-gate 		 * pp is the only page on the list.
29920Sstevel@tonic-gate 		 */
29930Sstevel@tonic-gate 		if (pp->p_vpnext != pp) {
29940Sstevel@tonic-gate 			page_vpsub(&vp->v_pages, pp);
29950Sstevel@tonic-gate 			if (vp->v_pages != NULL)
29960Sstevel@tonic-gate 				listp = &vp->v_pages->p_vpprev->p_vpnext;
29970Sstevel@tonic-gate 			else
29980Sstevel@tonic-gate 				listp = &vp->v_pages;
29990Sstevel@tonic-gate 			page_vpadd(listp, pp);
30000Sstevel@tonic-gate 		}
30010Sstevel@tonic-gate 		mutex_exit(vphm);
30020Sstevel@tonic-gate 	}
30030Sstevel@tonic-gate }
30040Sstevel@tonic-gate 
30050Sstevel@tonic-gate void
30060Sstevel@tonic-gate hat_page_clrattr(struct page *pp, uint_t flag)
30070Sstevel@tonic-gate {
30080Sstevel@tonic-gate 	vnode_t		*vp = pp->p_vnode;
30090Sstevel@tonic-gate 	kmutex_t	*vphm = NULL;
30100Sstevel@tonic-gate 	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
30110Sstevel@tonic-gate 
30120Sstevel@tonic-gate 	/*
30130Sstevel@tonic-gate 	 * for vnode with a sorted v_pages list, we need to change
30140Sstevel@tonic-gate 	 * the attributes and the v_pages list together under page_vnode_mutex.
30150Sstevel@tonic-gate 	 */
30160Sstevel@tonic-gate 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
30170Sstevel@tonic-gate 		vphm = page_vnode_mutex(vp);
30180Sstevel@tonic-gate 		mutex_enter(vphm);
30190Sstevel@tonic-gate 	}
30200Sstevel@tonic-gate 
30210Sstevel@tonic-gate 	PP_CLRRM(pp, flag);
30220Sstevel@tonic-gate 
30230Sstevel@tonic-gate 	if (vphm != NULL) {
30240Sstevel@tonic-gate 
30250Sstevel@tonic-gate 		/*
30260Sstevel@tonic-gate 		 * Some File Systems examine v_pages for NULL w/o
30270Sstevel@tonic-gate 		 * grabbing the vphm mutex. Must not let it become NULL when
30280Sstevel@tonic-gate 		 * pp is the only page on the list.
30290Sstevel@tonic-gate 		 */
30300Sstevel@tonic-gate 		if (pp->p_vpnext != pp) {
30310Sstevel@tonic-gate 			page_vpsub(&vp->v_pages, pp);
30320Sstevel@tonic-gate 			page_vpadd(&vp->v_pages, pp);
30330Sstevel@tonic-gate 		}
30340Sstevel@tonic-gate 		mutex_exit(vphm);
30350Sstevel@tonic-gate 
30360Sstevel@tonic-gate 		/*
30370Sstevel@tonic-gate 		 * VMODSORT works by removing write permissions and getting
30380Sstevel@tonic-gate 		 * a fault when a page is made dirty. At this point
30390Sstevel@tonic-gate 		 * we need to remove write permission from all mappings
30400Sstevel@tonic-gate 		 * to this page.
30410Sstevel@tonic-gate 		 */
30420Sstevel@tonic-gate 		hati_page_clrwrt(pp);
30430Sstevel@tonic-gate 	}
30440Sstevel@tonic-gate }
30450Sstevel@tonic-gate 
30460Sstevel@tonic-gate /*
30470Sstevel@tonic-gate  *	If flag is specified, returns 0 if attribute is disabled
30480Sstevel@tonic-gate  *	and non zero if enabled.  If flag specifes multiple attributs
30490Sstevel@tonic-gate  *	then returns 0 if ALL atriibutes are disabled.  This is an advisory
30500Sstevel@tonic-gate  *	call.
30510Sstevel@tonic-gate  */
30520Sstevel@tonic-gate uint_t
30530Sstevel@tonic-gate hat_page_getattr(struct page *pp, uint_t flag)
30540Sstevel@tonic-gate {
30550Sstevel@tonic-gate 	return (PP_GETRM(pp, flag));
30560Sstevel@tonic-gate }
30570Sstevel@tonic-gate 
30580Sstevel@tonic-gate 
30590Sstevel@tonic-gate /*
30600Sstevel@tonic-gate  * common code used by hat_pageunload() and hment_steal()
30610Sstevel@tonic-gate  */
30620Sstevel@tonic-gate hment_t *
30630Sstevel@tonic-gate hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
30640Sstevel@tonic-gate {
30650Sstevel@tonic-gate 	x86pte_t old_pte;
30660Sstevel@tonic-gate 	pfn_t pfn = pp->p_pagenum;
30670Sstevel@tonic-gate 	hment_t *hm;
30680Sstevel@tonic-gate 
30690Sstevel@tonic-gate 	/*
30700Sstevel@tonic-gate 	 * We need to acquire a hold on the htable in order to
30710Sstevel@tonic-gate 	 * do the invalidate. We know the htable must exist, since
30720Sstevel@tonic-gate 	 * unmap's don't release the htable until after removing any
30730Sstevel@tonic-gate 	 * hment. Having x86_hm_enter() keeps that from proceeding.
30740Sstevel@tonic-gate 	 */
30750Sstevel@tonic-gate 	htable_acquire(ht);
30760Sstevel@tonic-gate 
30770Sstevel@tonic-gate 	/*
30780Sstevel@tonic-gate 	 * Invalidate the PTE and remove the hment.
30790Sstevel@tonic-gate 	 */
30800Sstevel@tonic-gate 	old_pte = x86pte_invalidate_pfn(ht, entry, pfn, NULL);
308147Sjosephb 	if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
308247Sjosephb 		panic("x86pte_invalidate_pfn() failure found PTE = " FMT_PTE
308347Sjosephb 		    " pfn being unmapped is %lx ht=0x%lx entry=0x%x",
308447Sjosephb 		    old_pte, pfn, (uintptr_t)ht, entry);
308547Sjosephb 	}
30860Sstevel@tonic-gate 
30870Sstevel@tonic-gate 	/*
30880Sstevel@tonic-gate 	 * Clean up all the htable information for this mapping
30890Sstevel@tonic-gate 	 */
30900Sstevel@tonic-gate 	ASSERT(ht->ht_valid_cnt > 0);
30910Sstevel@tonic-gate 	HTABLE_DEC(ht->ht_valid_cnt);
30920Sstevel@tonic-gate 	PGCNT_DEC(ht->ht_hat, ht->ht_level);
30930Sstevel@tonic-gate 
30940Sstevel@tonic-gate 	/*
30950Sstevel@tonic-gate 	 * sync ref/mod bits to the page_t
30960Sstevel@tonic-gate 	 */
30970Sstevel@tonic-gate 	if (PTE_GET(old_pte, PT_NOSYNC) == 0)
30980Sstevel@tonic-gate 		hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
30990Sstevel@tonic-gate 
31000Sstevel@tonic-gate 	/*
31010Sstevel@tonic-gate 	 * Remove the mapping list entry for this page.
31020Sstevel@tonic-gate 	 */
31030Sstevel@tonic-gate 	hm = hment_remove(pp, ht, entry);
31040Sstevel@tonic-gate 
31050Sstevel@tonic-gate 	/*
31060Sstevel@tonic-gate 	 * drop the mapping list lock so that we might free the
31070Sstevel@tonic-gate 	 * hment and htable.
31080Sstevel@tonic-gate 	 */
31090Sstevel@tonic-gate 	x86_hm_exit(pp);
31100Sstevel@tonic-gate 	htable_release(ht);
31110Sstevel@tonic-gate 	return (hm);
31120Sstevel@tonic-gate }
31130Sstevel@tonic-gate 
31140Sstevel@tonic-gate /*
31150Sstevel@tonic-gate  * Unload all translations to a page. If the page is a subpage of a large
31160Sstevel@tonic-gate  * page, the large page mappings are also removed.
31170Sstevel@tonic-gate  *
31180Sstevel@tonic-gate  * The forceflags are unused.
31190Sstevel@tonic-gate  */
31200Sstevel@tonic-gate 
31210Sstevel@tonic-gate /*ARGSUSED*/
31220Sstevel@tonic-gate static int
31230Sstevel@tonic-gate hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
31240Sstevel@tonic-gate {
31250Sstevel@tonic-gate 	page_t		*cur_pp = pp;
31260Sstevel@tonic-gate 	hment_t		*hm;
31270Sstevel@tonic-gate 	hment_t		*prev;
31280Sstevel@tonic-gate 	htable_t	*ht;
31290Sstevel@tonic-gate 	uint_t		entry;
31300Sstevel@tonic-gate 	level_t		level;
31310Sstevel@tonic-gate 
31320Sstevel@tonic-gate 	/*
31330Sstevel@tonic-gate 	 * The loop with next_size handles pages with multiple pagesize mappings
31340Sstevel@tonic-gate 	 */
31350Sstevel@tonic-gate next_size:
31360Sstevel@tonic-gate 	for (;;) {
31370Sstevel@tonic-gate 
31380Sstevel@tonic-gate 		/*
31390Sstevel@tonic-gate 		 * Get a mapping list entry
31400Sstevel@tonic-gate 		 */
31410Sstevel@tonic-gate 		x86_hm_enter(cur_pp);
31420Sstevel@tonic-gate 		for (prev = NULL; ; prev = hm) {
31430Sstevel@tonic-gate 			hm = hment_walk(cur_pp, &ht, &entry, prev);
31440Sstevel@tonic-gate 			if (hm == NULL) {
31450Sstevel@tonic-gate 				x86_hm_exit(cur_pp);
31460Sstevel@tonic-gate 
31470Sstevel@tonic-gate 				/*
31480Sstevel@tonic-gate 				 * If not part of a larger page, we're done.
31490Sstevel@tonic-gate 				 */
31500Sstevel@tonic-gate 				if (cur_pp->p_szc <= pg_szcd)
31510Sstevel@tonic-gate 					return (0);
31520Sstevel@tonic-gate 
31530Sstevel@tonic-gate 				/*
31540Sstevel@tonic-gate 				 * Else check the next larger page size.
31550Sstevel@tonic-gate 				 * hat_page_demote() may decrease p_szc
31560Sstevel@tonic-gate 				 * but that's ok we'll just take an extra
31570Sstevel@tonic-gate 				 * trip discover there're no larger mappings
31580Sstevel@tonic-gate 				 * and return.
31590Sstevel@tonic-gate 				 */
31600Sstevel@tonic-gate 				++pg_szcd;
31610Sstevel@tonic-gate 				cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
31620Sstevel@tonic-gate 				goto next_size;
31630Sstevel@tonic-gate 			}
31640Sstevel@tonic-gate 
31650Sstevel@tonic-gate 			/*
31660Sstevel@tonic-gate 			 * If this mapping size matches, remove it.
31670Sstevel@tonic-gate 			 */
31680Sstevel@tonic-gate 			level = ht->ht_level;
31690Sstevel@tonic-gate 			if (level == pg_szcd)
31700Sstevel@tonic-gate 				break;
31710Sstevel@tonic-gate 		}
31720Sstevel@tonic-gate 
31730Sstevel@tonic-gate 		/*
31740Sstevel@tonic-gate 		 * Remove the mapping list entry for this page.
31750Sstevel@tonic-gate 		 * Note this does the x86_hm_exit() for us.
31760Sstevel@tonic-gate 		 */
31770Sstevel@tonic-gate 		hm = hati_page_unmap(cur_pp, ht, entry);
31780Sstevel@tonic-gate 		if (hm != NULL)
31790Sstevel@tonic-gate 			hment_free(hm);
31800Sstevel@tonic-gate 	}
31810Sstevel@tonic-gate }
31820Sstevel@tonic-gate 
31830Sstevel@tonic-gate int
31840Sstevel@tonic-gate hat_pageunload(struct page *pp, uint_t forceflag)
31850Sstevel@tonic-gate {
31860Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp));
31870Sstevel@tonic-gate 	return (hati_pageunload(pp, 0, forceflag));
31880Sstevel@tonic-gate }
31890Sstevel@tonic-gate 
31900Sstevel@tonic-gate /*
31910Sstevel@tonic-gate  * Unload all large mappings to pp and reduce by 1 p_szc field of every large
31920Sstevel@tonic-gate  * page level that included pp.
31930Sstevel@tonic-gate  *
31940Sstevel@tonic-gate  * pp must be locked EXCL. Even though no other constituent pages are locked
31950Sstevel@tonic-gate  * it's legal to unload large mappings to pp because all constituent pages of
31960Sstevel@tonic-gate  * large locked mappings have to be locked SHARED.  therefore if we have EXCL
31970Sstevel@tonic-gate  * lock on one of constituent pages none of the large mappings to pp are
31980Sstevel@tonic-gate  * locked.
31990Sstevel@tonic-gate  *
32000Sstevel@tonic-gate  * Change (always decrease) p_szc field starting from the last constituent
32010Sstevel@tonic-gate  * page and ending with root constituent page so that root's pszc always shows
32020Sstevel@tonic-gate  * the area where hat_page_demote() may be active.
32030Sstevel@tonic-gate  *
32040Sstevel@tonic-gate  * This mechanism is only used for file system pages where it's not always
32050Sstevel@tonic-gate  * possible to get EXCL locks on all constituent pages to demote the size code
32060Sstevel@tonic-gate  * (as is done for anonymous or kernel large pages).
32070Sstevel@tonic-gate  */
32080Sstevel@tonic-gate void
32090Sstevel@tonic-gate hat_page_demote(page_t *pp)
32100Sstevel@tonic-gate {
32110Sstevel@tonic-gate 	uint_t		pszc;
32120Sstevel@tonic-gate 	uint_t		rszc;
32130Sstevel@tonic-gate 	uint_t		szc;
32140Sstevel@tonic-gate 	page_t		*rootpp;
32150Sstevel@tonic-gate 	page_t		*firstpp;
32160Sstevel@tonic-gate 	page_t		*lastpp;
32170Sstevel@tonic-gate 	pgcnt_t		pgcnt;
32180Sstevel@tonic-gate 
32190Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp));
32200Sstevel@tonic-gate 	ASSERT(!PP_ISFREE(pp));
32210Sstevel@tonic-gate 	ASSERT(page_szc_lock_assert(pp));
32220Sstevel@tonic-gate 
32230Sstevel@tonic-gate 	if (pp->p_szc == 0)
32240Sstevel@tonic-gate 		return;
32250Sstevel@tonic-gate 
32260Sstevel@tonic-gate 	rootpp = PP_GROUPLEADER(pp, 1);
32270Sstevel@tonic-gate 	(void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
32280Sstevel@tonic-gate 
32290Sstevel@tonic-gate 	/*
32300Sstevel@tonic-gate 	 * all large mappings to pp are gone
32310Sstevel@tonic-gate 	 * and no new can be setup since pp is locked exclusively.
32320Sstevel@tonic-gate 	 *
32330Sstevel@tonic-gate 	 * Lock the root to make sure there's only one hat_page_demote()
32340Sstevel@tonic-gate 	 * outstanding within the area of this root's pszc.
32350Sstevel@tonic-gate 	 *
32360Sstevel@tonic-gate 	 * Second potential hat_page_demote() is already eliminated by upper
32370Sstevel@tonic-gate 	 * VM layer via page_szc_lock() but we don't rely on it and use our
32380Sstevel@tonic-gate 	 * own locking (so that upper layer locking can be changed without
32390Sstevel@tonic-gate 	 * assumptions that hat depends on upper layer VM to prevent multiple
32400Sstevel@tonic-gate 	 * hat_page_demote() to be issued simultaneously to the same large
32410Sstevel@tonic-gate 	 * page).
32420Sstevel@tonic-gate 	 */
32430Sstevel@tonic-gate again:
32440Sstevel@tonic-gate 	pszc = pp->p_szc;
32450Sstevel@tonic-gate 	if (pszc == 0)
32460Sstevel@tonic-gate 		return;
32470Sstevel@tonic-gate 	rootpp = PP_GROUPLEADER(pp, pszc);
32480Sstevel@tonic-gate 	x86_hm_enter(rootpp);
32490Sstevel@tonic-gate 	/*
32500Sstevel@tonic-gate 	 * If root's p_szc is different from pszc we raced with another
32510Sstevel@tonic-gate 	 * hat_page_demote().  Drop the lock and try to find the root again.
32520Sstevel@tonic-gate 	 * If root's p_szc is greater than pszc previous hat_page_demote() is
32530Sstevel@tonic-gate 	 * not done yet.  Take and release mlist lock of root's root to wait
32540Sstevel@tonic-gate 	 * for previous hat_page_demote() to complete.
32550Sstevel@tonic-gate 	 */
32560Sstevel@tonic-gate 	if ((rszc = rootpp->p_szc) != pszc) {
32570Sstevel@tonic-gate 		x86_hm_exit(rootpp);
32580Sstevel@tonic-gate 		if (rszc > pszc) {
32590Sstevel@tonic-gate 			/* p_szc of a locked non free page can't increase */
32600Sstevel@tonic-gate 			ASSERT(pp != rootpp);
32610Sstevel@tonic-gate 
32620Sstevel@tonic-gate 			rootpp = PP_GROUPLEADER(rootpp, rszc);
32630Sstevel@tonic-gate 			x86_hm_enter(rootpp);
32640Sstevel@tonic-gate 			x86_hm_exit(rootpp);
32650Sstevel@tonic-gate 		}
32660Sstevel@tonic-gate 		goto again;
32670Sstevel@tonic-gate 	}
32680Sstevel@tonic-gate 	ASSERT(pp->p_szc == pszc);
32690Sstevel@tonic-gate 
32700Sstevel@tonic-gate 	/*
32710Sstevel@tonic-gate 	 * Decrement by 1 p_szc of every constituent page of a region that
32720Sstevel@tonic-gate 	 * covered pp. For example if original szc is 3 it gets changed to 2
32730Sstevel@tonic-gate 	 * everywhere except in region 2 that covered pp. Region 2 that
32740Sstevel@tonic-gate 	 * covered pp gets demoted to 1 everywhere except in region 1 that
32750Sstevel@tonic-gate 	 * covered pp. The region 1 that covered pp is demoted to region
32760Sstevel@tonic-gate 	 * 0. It's done this way because from region 3 we removed level 3
32770Sstevel@tonic-gate 	 * mappings, from region 2 that covered pp we removed level 2 mappings
32780Sstevel@tonic-gate 	 * and from region 1 that covered pp we removed level 1 mappings.  All
32790Sstevel@tonic-gate 	 * changes are done from from high pfn's to low pfn's so that roots
32800Sstevel@tonic-gate 	 * are changed last allowing one to know the largest region where
32810Sstevel@tonic-gate 	 * hat_page_demote() is stil active by only looking at the root page.
32820Sstevel@tonic-gate 	 *
32830Sstevel@tonic-gate 	 * This algorithm is implemented in 2 while loops. First loop changes
32840Sstevel@tonic-gate 	 * p_szc of pages to the right of pp's level 1 region and second
32850Sstevel@tonic-gate 	 * loop changes p_szc of pages of level 1 region that covers pp
32860Sstevel@tonic-gate 	 * and all pages to the left of level 1 region that covers pp.
32870Sstevel@tonic-gate 	 * In the first loop p_szc keeps dropping with every iteration
32880Sstevel@tonic-gate 	 * and in the second loop it keeps increasing with every iteration.
32890Sstevel@tonic-gate 	 *
32900Sstevel@tonic-gate 	 * First loop description: Demote pages to the right of pp outside of
32910Sstevel@tonic-gate 	 * level 1 region that covers pp.  In every iteration of the while
32920Sstevel@tonic-gate 	 * loop below find the last page of szc region and the first page of
32930Sstevel@tonic-gate 	 * (szc - 1) region that is immediately to the right of (szc - 1)
32940Sstevel@tonic-gate 	 * region that covers pp.  From last such page to first such page
32950Sstevel@tonic-gate 	 * change every page's szc to szc - 1. Decrement szc and continue
32960Sstevel@tonic-gate 	 * looping until szc is 1. If pp belongs to the last (szc - 1) region
32970Sstevel@tonic-gate 	 * of szc region skip to the next iteration.
32980Sstevel@tonic-gate 	 */
32990Sstevel@tonic-gate 	szc = pszc;
33000Sstevel@tonic-gate 	while (szc > 1) {
33010Sstevel@tonic-gate 		lastpp = PP_GROUPLEADER(pp, szc);
33020Sstevel@tonic-gate 		pgcnt = page_get_pagecnt(szc);
33030Sstevel@tonic-gate 		lastpp += pgcnt - 1;
33040Sstevel@tonic-gate 		firstpp = PP_GROUPLEADER(pp, (szc - 1));
33050Sstevel@tonic-gate 		pgcnt = page_get_pagecnt(szc - 1);
33060Sstevel@tonic-gate 		if (lastpp - firstpp < pgcnt) {
33070Sstevel@tonic-gate 			szc--;
33080Sstevel@tonic-gate 			continue;
33090Sstevel@tonic-gate 		}
33100Sstevel@tonic-gate 		firstpp += pgcnt;
33110Sstevel@tonic-gate 		while (lastpp != firstpp) {
33120Sstevel@tonic-gate 			ASSERT(lastpp->p_szc == pszc);
33130Sstevel@tonic-gate 			lastpp->p_szc = szc - 1;
33140Sstevel@tonic-gate 			lastpp--;
33150Sstevel@tonic-gate 		}
33160Sstevel@tonic-gate 		firstpp->p_szc = szc - 1;
33170Sstevel@tonic-gate 		szc--;
33180Sstevel@tonic-gate 	}
33190Sstevel@tonic-gate 
33200Sstevel@tonic-gate 	/*
33210Sstevel@tonic-gate 	 * Second loop description:
33220Sstevel@tonic-gate 	 * First iteration changes p_szc to 0 of every
33230Sstevel@tonic-gate 	 * page of level 1 region that covers pp.
33240Sstevel@tonic-gate 	 * Subsequent iterations find last page of szc region
33250Sstevel@tonic-gate 	 * immediately to the left of szc region that covered pp
33260Sstevel@tonic-gate 	 * and first page of (szc + 1) region that covers pp.
33270Sstevel@tonic-gate 	 * From last to first page change p_szc of every page to szc.
33280Sstevel@tonic-gate 	 * Increment szc and continue looping until szc is pszc.
33290Sstevel@tonic-gate 	 * If pp belongs to the fist szc region of (szc + 1) region
33300Sstevel@tonic-gate 	 * skip to the next iteration.
33310Sstevel@tonic-gate 	 *
33320Sstevel@tonic-gate 	 */
33330Sstevel@tonic-gate 	szc = 0;
33340Sstevel@tonic-gate 	while (szc < pszc) {
33350Sstevel@tonic-gate 		firstpp = PP_GROUPLEADER(pp, (szc + 1));
33360Sstevel@tonic-gate 		if (szc == 0) {
33370Sstevel@tonic-gate 			pgcnt = page_get_pagecnt(1);
33380Sstevel@tonic-gate 			lastpp = firstpp + (pgcnt - 1);
33390Sstevel@tonic-gate 		} else {
33400Sstevel@tonic-gate 			lastpp = PP_GROUPLEADER(pp, szc);
33410Sstevel@tonic-gate 			if (firstpp == lastpp) {
33420Sstevel@tonic-gate 				szc++;
33430Sstevel@tonic-gate 				continue;
33440Sstevel@tonic-gate 			}
33450Sstevel@tonic-gate 			lastpp--;
33460Sstevel@tonic-gate 			pgcnt = page_get_pagecnt(szc);
33470Sstevel@tonic-gate 		}
33480Sstevel@tonic-gate 		while (lastpp != firstpp) {
33490Sstevel@tonic-gate 			ASSERT(lastpp->p_szc == pszc);
33500Sstevel@tonic-gate 			lastpp->p_szc = szc;
33510Sstevel@tonic-gate 			lastpp--;
33520Sstevel@tonic-gate 		}
33530Sstevel@tonic-gate 		firstpp->p_szc = szc;
33540Sstevel@tonic-gate 		if (firstpp == rootpp)
33550Sstevel@tonic-gate 			break;
33560Sstevel@tonic-gate 		szc++;
33570Sstevel@tonic-gate 	}
33580Sstevel@tonic-gate 	x86_hm_exit(rootpp);
33590Sstevel@tonic-gate }
33600Sstevel@tonic-gate 
33610Sstevel@tonic-gate /*
33620Sstevel@tonic-gate  * get hw stats from hardware into page struct and reset hw stats
33630Sstevel@tonic-gate  * returns attributes of page
33640Sstevel@tonic-gate  * Flags for hat_pagesync, hat_getstat, hat_sync
33650Sstevel@tonic-gate  *
33660Sstevel@tonic-gate  * define	HAT_SYNC_ZERORM		0x01
33670Sstevel@tonic-gate  *
33680Sstevel@tonic-gate  * Additional flags for hat_pagesync
33690Sstevel@tonic-gate  *
33700Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_REF	0x02
33710Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_MOD	0x04
33720Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_RM	0x06
33730Sstevel@tonic-gate  * define	HAT_SYNC_STOPON_SHARED	0x08
33740Sstevel@tonic-gate  */
33750Sstevel@tonic-gate uint_t
33760Sstevel@tonic-gate hat_pagesync(struct page *pp, uint_t flags)
33770Sstevel@tonic-gate {
33780Sstevel@tonic-gate 	hment_t		*hm = NULL;
33790Sstevel@tonic-gate 	htable_t	*ht;
33800Sstevel@tonic-gate 	uint_t		entry;
33810Sstevel@tonic-gate 	x86pte_t	old, save_old;
33820Sstevel@tonic-gate 	x86pte_t	new;
33830Sstevel@tonic-gate 	uchar_t		nrmbits = P_REF|P_MOD|P_RO;
33840Sstevel@tonic-gate 	extern ulong_t	po_share;
33850Sstevel@tonic-gate 	page_t		*save_pp = pp;
33860Sstevel@tonic-gate 	uint_t		pszc = 0;
33870Sstevel@tonic-gate 
33880Sstevel@tonic-gate 	ASSERT(PAGE_LOCKED(pp) || panicstr);
33890Sstevel@tonic-gate 
33900Sstevel@tonic-gate 	if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
33910Sstevel@tonic-gate 		return (pp->p_nrm & nrmbits);
33920Sstevel@tonic-gate 
33930Sstevel@tonic-gate 	if ((flags & HAT_SYNC_ZERORM) == 0) {
33940Sstevel@tonic-gate 
33950Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
33960Sstevel@tonic-gate 			return (pp->p_nrm & nrmbits);
33970Sstevel@tonic-gate 
33980Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
33990Sstevel@tonic-gate 			return (pp->p_nrm & nrmbits);
34000Sstevel@tonic-gate 
34010Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
34020Sstevel@tonic-gate 		    hat_page_getshare(pp) > po_share) {
34030Sstevel@tonic-gate 			if (PP_ISRO(pp))
34040Sstevel@tonic-gate 				PP_SETREF(pp);
34050Sstevel@tonic-gate 			return (pp->p_nrm & nrmbits);
34060Sstevel@tonic-gate 		}
34070Sstevel@tonic-gate 	}
34080Sstevel@tonic-gate 
34090Sstevel@tonic-gate next_size:
34100Sstevel@tonic-gate 	/*
34110Sstevel@tonic-gate 	 * walk thru the mapping list syncing (and clearing) ref/mod bits.
34120Sstevel@tonic-gate 	 */
34130Sstevel@tonic-gate 	x86_hm_enter(pp);
34140Sstevel@tonic-gate 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
34150Sstevel@tonic-gate 		if (ht->ht_level < pszc)
34160Sstevel@tonic-gate 			continue;
34170Sstevel@tonic-gate 		old = x86pte_get(ht, entry);
34180Sstevel@tonic-gate try_again:
34190Sstevel@tonic-gate 
34200Sstevel@tonic-gate 		ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
34210Sstevel@tonic-gate 
34220Sstevel@tonic-gate 		if (PTE_GET(old, PT_REF | PT_MOD) == 0)
34230Sstevel@tonic-gate 			continue;
34240Sstevel@tonic-gate 
34250Sstevel@tonic-gate 		save_old = old;
34260Sstevel@tonic-gate 		if ((flags & HAT_SYNC_ZERORM) != 0) {
34270Sstevel@tonic-gate 
34280Sstevel@tonic-gate 			/*
34290Sstevel@tonic-gate 			 * Need to clear ref or mod bits. Need to demap
34300Sstevel@tonic-gate 			 * to make sure any executing TLBs see cleared bits.
34310Sstevel@tonic-gate 			 */
34320Sstevel@tonic-gate 			new = old;
34330Sstevel@tonic-gate 			PTE_CLR(new, PT_REF | PT_MOD);
34340Sstevel@tonic-gate 			old = hati_update_pte(ht, entry, old, new);
34350Sstevel@tonic-gate 			if (old != 0)
34360Sstevel@tonic-gate 				goto try_again;
34370Sstevel@tonic-gate 
34380Sstevel@tonic-gate 			old = save_old;
34390Sstevel@tonic-gate 		}
34400Sstevel@tonic-gate 
34410Sstevel@tonic-gate 		/*
34420Sstevel@tonic-gate 		 * Sync the PTE
34430Sstevel@tonic-gate 		 */
34440Sstevel@tonic-gate 		if (!(flags & HAT_SYNC_ZERORM) && PTE_GET(old, PT_NOSYNC) == 0)
34450Sstevel@tonic-gate 			hati_sync_pte_to_page(pp, old, ht->ht_level);
34460Sstevel@tonic-gate 
34470Sstevel@tonic-gate 		/*
34480Sstevel@tonic-gate 		 * can stop short if we found a ref'd or mod'd page
34490Sstevel@tonic-gate 		 */
34500Sstevel@tonic-gate 		if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) ||
34510Sstevel@tonic-gate 		    (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) {
34520Sstevel@tonic-gate 			x86_hm_exit(pp);
34530Sstevel@tonic-gate 			return (save_pp->p_nrm & nrmbits);
34540Sstevel@tonic-gate 		}
34550Sstevel@tonic-gate 	}
34560Sstevel@tonic-gate 	x86_hm_exit(pp);
34570Sstevel@tonic-gate 	while (pszc < pp->p_szc) {
34580Sstevel@tonic-gate 		page_t *tpp;
34590Sstevel@tonic-gate 		pszc++;
34600Sstevel@tonic-gate 		tpp = PP_GROUPLEADER(pp, pszc);
34610Sstevel@tonic-gate 		if (pp != tpp) {
34620Sstevel@tonic-gate 			pp = tpp;
34630Sstevel@tonic-gate 			goto next_size;
34640Sstevel@tonic-gate 		}
34650Sstevel@tonic-gate 	}
34660Sstevel@tonic-gate 	return (save_pp->p_nrm & nrmbits);
34670Sstevel@tonic-gate }
34680Sstevel@tonic-gate 
34690Sstevel@tonic-gate /*
34700Sstevel@tonic-gate  * returns approx number of mappings to this pp.  A return of 0 implies
34710Sstevel@tonic-gate  * there are no mappings to the page.
34720Sstevel@tonic-gate  */
34730Sstevel@tonic-gate ulong_t
34740Sstevel@tonic-gate hat_page_getshare(page_t *pp)
34750Sstevel@tonic-gate {
34760Sstevel@tonic-gate 	uint_t cnt;
34770Sstevel@tonic-gate 	cnt = hment_mapcnt(pp);
34780Sstevel@tonic-gate 	return (cnt);
34790Sstevel@tonic-gate }
34800Sstevel@tonic-gate 
34810Sstevel@tonic-gate /*
34820Sstevel@tonic-gate  * hat_softlock isn't supported anymore
34830Sstevel@tonic-gate  */
34840Sstevel@tonic-gate /*ARGSUSED*/
34850Sstevel@tonic-gate faultcode_t
34860Sstevel@tonic-gate hat_softlock(
34870Sstevel@tonic-gate 	hat_t *hat,
34880Sstevel@tonic-gate 	caddr_t addr,
34890Sstevel@tonic-gate 	size_t *len,
34900Sstevel@tonic-gate 	struct page **page_array,
34910Sstevel@tonic-gate 	uint_t flags)
34920Sstevel@tonic-gate {
34930Sstevel@tonic-gate 	return (FC_NOSUPPORT);
34940Sstevel@tonic-gate }
34950Sstevel@tonic-gate 
34960Sstevel@tonic-gate 
34970Sstevel@tonic-gate 
34980Sstevel@tonic-gate /*
34990Sstevel@tonic-gate  * Routine to expose supported HAT features to platform independent code.
35000Sstevel@tonic-gate  */
35010Sstevel@tonic-gate /*ARGSUSED*/
35020Sstevel@tonic-gate int
35030Sstevel@tonic-gate hat_supported(enum hat_features feature, void *arg)
35040Sstevel@tonic-gate {
35050Sstevel@tonic-gate 	switch (feature) {
35060Sstevel@tonic-gate 
35070Sstevel@tonic-gate 	case HAT_SHARED_PT:	/* this is really ISM */
35080Sstevel@tonic-gate 		return (1);
35090Sstevel@tonic-gate 
35100Sstevel@tonic-gate 	case HAT_DYNAMIC_ISM_UNMAP:
35110Sstevel@tonic-gate 		return (0);
35120Sstevel@tonic-gate 
35130Sstevel@tonic-gate 	case HAT_VMODSORT:
35140Sstevel@tonic-gate 		return (1);
35150Sstevel@tonic-gate 
35160Sstevel@tonic-gate 	default:
35170Sstevel@tonic-gate 		panic("hat_supported() - unknown feature");
35180Sstevel@tonic-gate 	}
35190Sstevel@tonic-gate 	return (0);
35200Sstevel@tonic-gate }
35210Sstevel@tonic-gate 
35220Sstevel@tonic-gate /*
35230Sstevel@tonic-gate  * Called when a thread is exiting and has been switched to the kernel AS
35240Sstevel@tonic-gate  */
35250Sstevel@tonic-gate void
35260Sstevel@tonic-gate hat_thread_exit(kthread_t *thd)
35270Sstevel@tonic-gate {
35280Sstevel@tonic-gate 	ASSERT(thd->t_procp->p_as == &kas);
35290Sstevel@tonic-gate 	hat_switch(thd->t_procp->p_as->a_hat);
35300Sstevel@tonic-gate }
35310Sstevel@tonic-gate 
35320Sstevel@tonic-gate /*
35330Sstevel@tonic-gate  * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
35340Sstevel@tonic-gate  */
35350Sstevel@tonic-gate /*ARGSUSED*/
35360Sstevel@tonic-gate void
35370Sstevel@tonic-gate hat_setup(hat_t *hat, int flags)
35380Sstevel@tonic-gate {
35390Sstevel@tonic-gate 	kpreempt_disable();
35400Sstevel@tonic-gate 
35410Sstevel@tonic-gate 	hat_switch(hat);
35420Sstevel@tonic-gate 
35430Sstevel@tonic-gate 	kpreempt_enable();
35440Sstevel@tonic-gate }
35450Sstevel@tonic-gate 
35460Sstevel@tonic-gate /*
35470Sstevel@tonic-gate  * Prepare for a CPU private mapping for the given address.
35480Sstevel@tonic-gate  *
35490Sstevel@tonic-gate  * The address can only be used from a single CPU and can be remapped
35500Sstevel@tonic-gate  * using hat_mempte_remap().  Return the address of the PTE.
35510Sstevel@tonic-gate  *
35520Sstevel@tonic-gate  * We do the htable_create() if necessary and increment the valid count so
35530Sstevel@tonic-gate  * the htable can't disappear.  We also hat_devload() the page table into
35540Sstevel@tonic-gate  * kernel so that the PTE is quickly accessed.
35550Sstevel@tonic-gate  */
35560Sstevel@tonic-gate void *
35570Sstevel@tonic-gate hat_mempte_kern_setup(caddr_t addr, void *pt)
35580Sstevel@tonic-gate {
35590Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
35600Sstevel@tonic-gate 	htable_t	*ht;
35610Sstevel@tonic-gate 	uint_t		entry;
35620Sstevel@tonic-gate 	x86pte_t	oldpte;
35630Sstevel@tonic-gate 	caddr_t		p = (caddr_t)pt;
35640Sstevel@tonic-gate 
35650Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
35660Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(va));
35670Sstevel@tonic-gate 	ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
35680Sstevel@tonic-gate 	if (ht == NULL) {
35690Sstevel@tonic-gate 		/*
35700Sstevel@tonic-gate 		 * Note that we don't need a hat_reserves_exit() check
35710Sstevel@tonic-gate 		 * for this htable_create(), since that'll be done by the
35720Sstevel@tonic-gate 		 * hat_devload() just below.
35730Sstevel@tonic-gate 		 */
35740Sstevel@tonic-gate 		ht = htable_create(kas.a_hat, va, 0, NULL);
35750Sstevel@tonic-gate 		entry = htable_va2entry(va, ht);
35760Sstevel@tonic-gate 		ASSERT(ht->ht_level == 0);
35770Sstevel@tonic-gate 		oldpte = x86pte_get(ht, entry);
35780Sstevel@tonic-gate 	}
35790Sstevel@tonic-gate 	if (PTE_ISVALID(oldpte))
35800Sstevel@tonic-gate 		panic("hat_mempte_setup(): address already mapped"
35810Sstevel@tonic-gate 		    "ht=%p, entry=%d, pte=" FMT_PTE, ht, entry, oldpte);
35820Sstevel@tonic-gate 
35830Sstevel@tonic-gate 	/*
35840Sstevel@tonic-gate 	 * increment ht_valid_cnt so that the pagetable can't disappear
35850Sstevel@tonic-gate 	 */
35860Sstevel@tonic-gate 	HTABLE_INC(ht->ht_valid_cnt);
35870Sstevel@tonic-gate 
35880Sstevel@tonic-gate 	/*
35890Sstevel@tonic-gate 	 * now we need to map the page holding the pagetable for va into
35900Sstevel@tonic-gate 	 * the kernel's address space.
35910Sstevel@tonic-gate 	 */
35920Sstevel@tonic-gate 	hat_devload(kas.a_hat, p, MMU_PAGESIZE, ht->ht_pfn,
35930Sstevel@tonic-gate 	    PROT_READ | PROT_WRITE | HAT_NOSYNC | HAT_UNORDERED_OK,
35940Sstevel@tonic-gate 	    HAT_LOAD | HAT_LOAD_NOCONSIST);
35950Sstevel@tonic-gate 
35960Sstevel@tonic-gate 	/*
35970Sstevel@tonic-gate 	 * return the PTE address to the caller.
35980Sstevel@tonic-gate 	 */
35990Sstevel@tonic-gate 	htable_release(ht);
36000Sstevel@tonic-gate 	p += entry << mmu.pte_size_shift;
36010Sstevel@tonic-gate 	return ((void *)p);
36020Sstevel@tonic-gate }
36030Sstevel@tonic-gate 
36040Sstevel@tonic-gate /*
36050Sstevel@tonic-gate  * Prepare for a CPU private mapping for the given address.
36060Sstevel@tonic-gate  */
36070Sstevel@tonic-gate void *
36080Sstevel@tonic-gate hat_mempte_setup(caddr_t addr)
36090Sstevel@tonic-gate {
36100Sstevel@tonic-gate 	x86pte_t	*p;
36110Sstevel@tonic-gate 
36120Sstevel@tonic-gate 	p = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
36130Sstevel@tonic-gate 	return (hat_mempte_kern_setup(addr, p));
36140Sstevel@tonic-gate }
36150Sstevel@tonic-gate 
36160Sstevel@tonic-gate /*
36170Sstevel@tonic-gate  * Release a CPU private mapping for the given address.
36180Sstevel@tonic-gate  * We decrement the htable valid count so it might be destroyed.
36190Sstevel@tonic-gate  */
36200Sstevel@tonic-gate void
36210Sstevel@tonic-gate hat_mempte_release(caddr_t addr, void *pteptr)
36220Sstevel@tonic-gate {
36230Sstevel@tonic-gate 	htable_t	*ht;
36240Sstevel@tonic-gate 	uintptr_t	va = ALIGN2PAGE(pteptr);
36250Sstevel@tonic-gate 
36260Sstevel@tonic-gate 	/*
36270Sstevel@tonic-gate 	 * first invalidate any left over mapping and decrement the
36280Sstevel@tonic-gate 	 * htable's mapping count
36290Sstevel@tonic-gate 	 */
36300Sstevel@tonic-gate 	if (mmu.pae_hat)
36310Sstevel@tonic-gate 		*(x86pte_t *)pteptr = 0;
36320Sstevel@tonic-gate 	else
36330Sstevel@tonic-gate 		*(x86pte32_t *)pteptr = 0;
36340Sstevel@tonic-gate 	mmu_tlbflush_entry(addr);
36350Sstevel@tonic-gate 	ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
36360Sstevel@tonic-gate 	if (ht == NULL)
36370Sstevel@tonic-gate 		panic("hat_mempte_release(): invalid address");
36380Sstevel@tonic-gate 	ASSERT(ht->ht_level == 0);
36390Sstevel@tonic-gate 	HTABLE_DEC(ht->ht_valid_cnt);
36400Sstevel@tonic-gate 	htable_release(ht);
36410Sstevel@tonic-gate 
36420Sstevel@tonic-gate 	/*
36430Sstevel@tonic-gate 	 * now blow away the kernel mapping to the page table page
36440Sstevel@tonic-gate 	 * XX64 -- see comment in hat_mempte_setup()
36450Sstevel@tonic-gate 	 */
36460Sstevel@tonic-gate 	hat_unload_callback(kas.a_hat, (caddr_t)va, MMU_PAGESIZE,
36470Sstevel@tonic-gate 	    HAT_UNLOAD, NULL);
36480Sstevel@tonic-gate }
36490Sstevel@tonic-gate 
36500Sstevel@tonic-gate /*
36510Sstevel@tonic-gate  * Apply a temporary CPU private mapping to a page. We flush the TLB only
36520Sstevel@tonic-gate  * on this CPU, so this ought to have been called with preemption disabled.
36530Sstevel@tonic-gate  */
36540Sstevel@tonic-gate void
36550Sstevel@tonic-gate hat_mempte_remap(
36560Sstevel@tonic-gate 	pfn_t pfn,
36570Sstevel@tonic-gate 	caddr_t addr,
36580Sstevel@tonic-gate 	void *pteptr,
36590Sstevel@tonic-gate 	uint_t attr,
36600Sstevel@tonic-gate 	uint_t flags)
36610Sstevel@tonic-gate {
36620Sstevel@tonic-gate 	uintptr_t	va = (uintptr_t)addr;
36630Sstevel@tonic-gate 	x86pte_t	pte;
36640Sstevel@tonic-gate 
36650Sstevel@tonic-gate 	/*
36660Sstevel@tonic-gate 	 * Remap the given PTE to the new page's PFN. Invalidate only
36670Sstevel@tonic-gate 	 * on this CPU.
36680Sstevel@tonic-gate 	 */
36690Sstevel@tonic-gate #ifdef DEBUG
36700Sstevel@tonic-gate 	htable_t	*ht;
36710Sstevel@tonic-gate 	uint_t		entry;
36720Sstevel@tonic-gate 
36730Sstevel@tonic-gate 	ASSERT(IS_PAGEALIGNED(va));
36740Sstevel@tonic-gate 	ASSERT(!IN_VA_HOLE(va));
36750Sstevel@tonic-gate 	ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
36760Sstevel@tonic-gate 	ASSERT(ht != NULL);
36770Sstevel@tonic-gate 	ASSERT(ht->ht_level == 0);
36780Sstevel@tonic-gate 	ASSERT(ht->ht_valid_cnt > 0);
36790Sstevel@tonic-gate 	htable_release(ht);
36800Sstevel@tonic-gate #endif
36810Sstevel@tonic-gate 	pte = hati_mkpte(pfn, attr, 0, flags);
36820Sstevel@tonic-gate 	if (mmu.pae_hat)
36830Sstevel@tonic-gate 		*(x86pte_t *)pteptr = pte;
36840Sstevel@tonic-gate 	else
36850Sstevel@tonic-gate 		*(x86pte32_t *)pteptr = (x86pte32_t)pte;
36860Sstevel@tonic-gate 	mmu_tlbflush_entry(addr);
36870Sstevel@tonic-gate }
36880Sstevel@tonic-gate 
36890Sstevel@tonic-gate 
36900Sstevel@tonic-gate 
36910Sstevel@tonic-gate /*
36920Sstevel@tonic-gate  * Hat locking functions
36930Sstevel@tonic-gate  * XXX - these two functions are currently being used by hatstats
36940Sstevel@tonic-gate  * 	they can be removed by using a per-as mutex for hatstats.
36950Sstevel@tonic-gate  */
36960Sstevel@tonic-gate void
36970Sstevel@tonic-gate hat_enter(hat_t *hat)
36980Sstevel@tonic-gate {
36990Sstevel@tonic-gate 	mutex_enter(&hat->hat_mutex);
37000Sstevel@tonic-gate }
37010Sstevel@tonic-gate 
37020Sstevel@tonic-gate void
37030Sstevel@tonic-gate hat_exit(hat_t *hat)
37040Sstevel@tonic-gate {
37050Sstevel@tonic-gate 	mutex_exit(&hat->hat_mutex);
37060Sstevel@tonic-gate }
37070Sstevel@tonic-gate 
37080Sstevel@tonic-gate 
37090Sstevel@tonic-gate /*
37100Sstevel@tonic-gate  * Used by hat_kern_setup() to create initial kernel HAT mappings from
37110Sstevel@tonic-gate  * the boot loader's mappings.
37120Sstevel@tonic-gate  *
37130Sstevel@tonic-gate  * - size is either PAGESIZE or some multiple of a level one pagesize
37140Sstevel@tonic-gate  * - there may not be page_t's for every pfn. (ie. the nucleus pages)
37150Sstevel@tonic-gate  * - pfn's are continguous for the given va range (va to va + size * cnt)
37160Sstevel@tonic-gate  */
37170Sstevel@tonic-gate void
37180Sstevel@tonic-gate hati_kern_setup_load(
37190Sstevel@tonic-gate 	uintptr_t va,	/* starting va of range to map */
37200Sstevel@tonic-gate 	size_t size,	/* either PAGESIZE or multiple of large page size */
37210Sstevel@tonic-gate 	pfn_t pfn,	/* starting PFN */
37220Sstevel@tonic-gate 	pgcnt_t cnt,	/* number of mappings, (cnt * size) == total size */
37230Sstevel@tonic-gate 	uint_t prot)	/* protections (PROT_READ, PROT_WRITE, PROT_EXEC) */
37240Sstevel@tonic-gate {
37250Sstevel@tonic-gate 	level_t level = (size == MMU_PAGESIZE ? 0 : 1);
37260Sstevel@tonic-gate 	size_t bytes = size * cnt;
37270Sstevel@tonic-gate 	size_t pgsize = LEVEL_SIZE(level);
37280Sstevel@tonic-gate 	page_t *pp;
37290Sstevel@tonic-gate 	uint_t flags = HAT_LOAD;
37300Sstevel@tonic-gate 
37310Sstevel@tonic-gate 	/*
37320Sstevel@tonic-gate 	 * We're only going to throw away mappings below kernelbase or in
37330Sstevel@tonic-gate 	 * boot's special double-mapping region, so set noconsist to avoid
37340Sstevel@tonic-gate 	 * using hments
37350Sstevel@tonic-gate 	 */
37360Sstevel@tonic-gate 	if (BOOT_VA(va))
37370Sstevel@tonic-gate 		flags |= HAT_LOAD_NOCONSIST;
37380Sstevel@tonic-gate 
37390Sstevel@tonic-gate 	prot |= HAT_STORECACHING_OK;
37400Sstevel@tonic-gate 	while (bytes != 0) {
37410Sstevel@tonic-gate 		ASSERT(bytes >= pgsize);
37420Sstevel@tonic-gate 
37430Sstevel@tonic-gate 		pp = NULL;
37440Sstevel@tonic-gate 		if (pf_is_memory(pfn) && !BOOT_VA(va) && level == 0)
37450Sstevel@tonic-gate 			pp = page_numtopp_nolock(pfn);
37460Sstevel@tonic-gate 
37470Sstevel@tonic-gate 		hati_load_common(kas.a_hat, va, pp, prot, flags, level, pfn);
37480Sstevel@tonic-gate 
37490Sstevel@tonic-gate 		va += pgsize;
37500Sstevel@tonic-gate 		pfn += mmu_btop(pgsize);
37510Sstevel@tonic-gate 		bytes -= pgsize;
37520Sstevel@tonic-gate 	}
37530Sstevel@tonic-gate }
37540Sstevel@tonic-gate 
37550Sstevel@tonic-gate /*
37560Sstevel@tonic-gate  * HAT part of cpu intialization.
37570Sstevel@tonic-gate  */
37580Sstevel@tonic-gate void
37590Sstevel@tonic-gate hat_cpu_online(struct cpu *cpup)
37600Sstevel@tonic-gate {
37610Sstevel@tonic-gate 	if (cpup != CPU) {
37620Sstevel@tonic-gate 		x86pte_cpu_init(cpup, NULL);
37630Sstevel@tonic-gate 		hat_vlp_setup(cpup);
37640Sstevel@tonic-gate 	}
37650Sstevel@tonic-gate 	CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
37660Sstevel@tonic-gate }
37670Sstevel@tonic-gate 
37680Sstevel@tonic-gate /*
37690Sstevel@tonic-gate  * Function called after all CPUs are brought online.
37700Sstevel@tonic-gate  * Used to remove low address boot mappings.
37710Sstevel@tonic-gate  */
37720Sstevel@tonic-gate void
37730Sstevel@tonic-gate clear_boot_mappings(uintptr_t low, uintptr_t high)
37740Sstevel@tonic-gate {
37750Sstevel@tonic-gate 	uintptr_t vaddr = low;
37760Sstevel@tonic-gate 	htable_t *ht = NULL;
37770Sstevel@tonic-gate 	level_t level;
37780Sstevel@tonic-gate 	uint_t entry;
37790Sstevel@tonic-gate 	x86pte_t pte;
37800Sstevel@tonic-gate 
37810Sstevel@tonic-gate 	/*
37820Sstevel@tonic-gate 	 * On 1st CPU we can unload the prom mappings, basically we blow away
37830Sstevel@tonic-gate 	 * all virtual mappings under kernelbase.
37840Sstevel@tonic-gate 	 */
37850Sstevel@tonic-gate 	while (vaddr < high) {
37860Sstevel@tonic-gate 		pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
37870Sstevel@tonic-gate 		if (ht == NULL)
37880Sstevel@tonic-gate 			break;
37890Sstevel@tonic-gate 
37900Sstevel@tonic-gate 		level = ht->ht_level;
37910Sstevel@tonic-gate 		entry = htable_va2entry(vaddr, ht);
37920Sstevel@tonic-gate 		ASSERT(level <= mmu.max_page_level);
37930Sstevel@tonic-gate 		ASSERT(PTE_ISPAGE(pte, level));
37940Sstevel@tonic-gate 
37950Sstevel@tonic-gate 		/*
37960Sstevel@tonic-gate 		 * Unload the mapping from the page tables.
37970Sstevel@tonic-gate 		 */
37980Sstevel@tonic-gate 		(void) x86pte_set(ht, entry, 0, NULL);
37990Sstevel@tonic-gate 		ASSERT(ht->ht_valid_cnt > 0);
38000Sstevel@tonic-gate 		HTABLE_DEC(ht->ht_valid_cnt);
38010Sstevel@tonic-gate 		PGCNT_DEC(ht->ht_hat, ht->ht_level);
38020Sstevel@tonic-gate 
38030Sstevel@tonic-gate 		vaddr += LEVEL_SIZE(ht->ht_level);
38040Sstevel@tonic-gate 	}
38050Sstevel@tonic-gate 	if (ht)
38060Sstevel@tonic-gate 		htable_release(ht);
38070Sstevel@tonic-gate 
38080Sstevel@tonic-gate 	/*
38090Sstevel@tonic-gate 	 * cross call for a complete invalidate.
38100Sstevel@tonic-gate 	 */
38110Sstevel@tonic-gate 	hat_demap(kas.a_hat, DEMAP_ALL_ADDR);
38120Sstevel@tonic-gate }
38130Sstevel@tonic-gate 
38140Sstevel@tonic-gate /*
38150Sstevel@tonic-gate  * Initialize a special area in the kernel that always holds some PTEs for
38160Sstevel@tonic-gate  * faster performance. This always holds segmap's PTEs.
38170Sstevel@tonic-gate  * In the 32 bit kernel this maps the kernel heap too.
38180Sstevel@tonic-gate  */
38190Sstevel@tonic-gate void
38200Sstevel@tonic-gate hat_kmap_init(uintptr_t base, size_t len)
38210Sstevel@tonic-gate {
38220Sstevel@tonic-gate 	uintptr_t map_addr;	/* base rounded down to large page size */
38230Sstevel@tonic-gate 	uintptr_t map_eaddr;	/* base + len rounded up */
38240Sstevel@tonic-gate 	size_t map_len;
38250Sstevel@tonic-gate 	caddr_t ptes;		/* mapping area in kernel as for ptes */
38260Sstevel@tonic-gate 	size_t window_size;	/* size of mapping area for ptes */
38270Sstevel@tonic-gate 	ulong_t htable_cnt;	/* # of page tables to cover map_len */
38280Sstevel@tonic-gate 	ulong_t i;
38290Sstevel@tonic-gate 	htable_t *ht;
38300Sstevel@tonic-gate 
38310Sstevel@tonic-gate 	/*
38320Sstevel@tonic-gate 	 * we have to map in an area that matches an entire page table
38330Sstevel@tonic-gate 	 */
38340Sstevel@tonic-gate 	map_addr = base & LEVEL_MASK(1);
38350Sstevel@tonic-gate 	map_eaddr = (base + len + LEVEL_SIZE(1) - 1) & LEVEL_MASK(1);
38360Sstevel@tonic-gate 	map_len = map_eaddr - map_addr;
38370Sstevel@tonic-gate 	window_size = mmu_btop(map_len) * mmu.pte_size;
38380Sstevel@tonic-gate 	htable_cnt = mmu_btop(map_len) / mmu.ptes_per_table;
38390Sstevel@tonic-gate 
38400Sstevel@tonic-gate 	/*
38410Sstevel@tonic-gate 	 * allocate vmem for the kmap_ptes
38420Sstevel@tonic-gate 	 */
38430Sstevel@tonic-gate 	ptes = vmem_xalloc(heap_arena, window_size, MMU_PAGESIZE, 0,
38440Sstevel@tonic-gate 	    0, NULL, NULL, VM_SLEEP);
38450Sstevel@tonic-gate 	mmu.kmap_htables =
38460Sstevel@tonic-gate 	    kmem_alloc(htable_cnt * sizeof (htable_t *), KM_SLEEP);
38470Sstevel@tonic-gate 
38480Sstevel@tonic-gate 	/*
38490Sstevel@tonic-gate 	 * Map the page tables that cover kmap into the allocated range.
38500Sstevel@tonic-gate 	 * Note we don't ever htable_release() the kmap page tables - they
38510Sstevel@tonic-gate 	 * can't ever be stolen, freed, etc.
38520Sstevel@tonic-gate 	 */
38530Sstevel@tonic-gate 	for (i = 0; i < htable_cnt; ++i) {
38540Sstevel@tonic-gate 		ht = htable_create(kas.a_hat, map_addr + i * LEVEL_SIZE(1),
38550Sstevel@tonic-gate 		    0, NULL);
38560Sstevel@tonic-gate 		mmu.kmap_htables[i] = ht;
38570Sstevel@tonic-gate 
38580Sstevel@tonic-gate 		hat_devload(kas.a_hat, ptes + i * MMU_PAGESIZE, MMU_PAGESIZE,
38590Sstevel@tonic-gate 		    ht->ht_pfn,
38600Sstevel@tonic-gate 		    PROT_READ | PROT_WRITE | HAT_NOSYNC | HAT_UNORDERED_OK,
38610Sstevel@tonic-gate 		    HAT_LOAD | HAT_LOAD_NOCONSIST);
38620Sstevel@tonic-gate 
38630Sstevel@tonic-gate 	}
38640Sstevel@tonic-gate 
38650Sstevel@tonic-gate 	/*
38660Sstevel@tonic-gate 	 * set information in mmu to activate handling of kmap
38670Sstevel@tonic-gate 	 */
38680Sstevel@tonic-gate 	mmu.kmap_addr = base;
38690Sstevel@tonic-gate 	mmu.kmap_eaddr = base + len;
38700Sstevel@tonic-gate 	mmu.kmap_ptes =
38710Sstevel@tonic-gate 	    (x86pte_t *)(ptes + mmu.pte_size * mmu_btop(base - map_addr));
38720Sstevel@tonic-gate }
38730Sstevel@tonic-gate 
38740Sstevel@tonic-gate /*
38750Sstevel@tonic-gate  * Atomically update a new translation for a single page.  If the
38760Sstevel@tonic-gate  * currently installed PTE doesn't match the value we expect to find,
38770Sstevel@tonic-gate  * it's not updated and we return the PTE we found.
38780Sstevel@tonic-gate  *
38790Sstevel@tonic-gate  * If activating nosync or NOWRITE and the page was modified we need to sync
38800Sstevel@tonic-gate  * with the page_t. Also sync with page_t if clearing ref/mod bits.
38810Sstevel@tonic-gate  */
38820Sstevel@tonic-gate static x86pte_t
38830Sstevel@tonic-gate hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
38840Sstevel@tonic-gate {
38850Sstevel@tonic-gate 	page_t		*pp;
38860Sstevel@tonic-gate 	uint_t		rm = 0;
38870Sstevel@tonic-gate 	x86pte_t	replaced;
38880Sstevel@tonic-gate 
38890Sstevel@tonic-gate 	if (!PTE_GET(expected, PT_NOSYNC | PT_NOCONSIST) &&
38900Sstevel@tonic-gate 	    PTE_GET(expected, PT_MOD | PT_REF) &&
38910Sstevel@tonic-gate 	    (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
38920Sstevel@tonic-gate 		!PTE_GET(new, PT_MOD | PT_REF))) {
38930Sstevel@tonic-gate 
38940Sstevel@tonic-gate 		pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
38950Sstevel@tonic-gate 		ASSERT(pp != NULL);
38960Sstevel@tonic-gate 		if (PTE_GET(expected, PT_MOD))
38970Sstevel@tonic-gate 			rm |= P_MOD;
38980Sstevel@tonic-gate 		if (PTE_GET(expected, PT_REF))
38990Sstevel@tonic-gate 			rm |= P_REF;
39000Sstevel@tonic-gate 		PTE_CLR(new, PT_MOD | PT_REF);
39010Sstevel@tonic-gate 	}
39020Sstevel@tonic-gate 
39030Sstevel@tonic-gate 	replaced = x86pte_update(ht, entry, expected, new);
39040Sstevel@tonic-gate 	if (replaced != expected)
39050Sstevel@tonic-gate 		return (replaced);
39060Sstevel@tonic-gate 
39070Sstevel@tonic-gate 	if (rm) {
39080Sstevel@tonic-gate 		/*
39090Sstevel@tonic-gate 		 * sync to all constituent pages of a large page
39100Sstevel@tonic-gate 		 */
39110Sstevel@tonic-gate 		pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
39120Sstevel@tonic-gate 		ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
39130Sstevel@tonic-gate 		while (pgcnt-- > 0) {
39140Sstevel@tonic-gate 			/*
39150Sstevel@tonic-gate 			 * hat_page_demote() can't decrease
39160Sstevel@tonic-gate 			 * pszc below this mapping size
39170Sstevel@tonic-gate 			 * since large mapping existed after we
39180Sstevel@tonic-gate 			 * took mlist lock.
39190Sstevel@tonic-gate 			 */
39200Sstevel@tonic-gate 			ASSERT(pp->p_szc >= ht->ht_level);
39210Sstevel@tonic-gate 			hat_page_setattr(pp, rm);
39220Sstevel@tonic-gate 			++pp;
39230Sstevel@tonic-gate 		}
39240Sstevel@tonic-gate 	}
39250Sstevel@tonic-gate 
39260Sstevel@tonic-gate 	return (0);
39270Sstevel@tonic-gate }
39280Sstevel@tonic-gate 
39290Sstevel@tonic-gate /*
39300Sstevel@tonic-gate  * Kernel Physical Mapping (kpm) facility
39310Sstevel@tonic-gate  *
39320Sstevel@tonic-gate  * Most of the routines needed to support segkpm are almost no-ops on the
39330Sstevel@tonic-gate  * x86 platform.  We map in the entire segment when it is created and leave
39340Sstevel@tonic-gate  * it mapped in, so there is no additional work required to set up and tear
39350Sstevel@tonic-gate  * down individual mappings.  All of these routines were created to support
39360Sstevel@tonic-gate  * SPARC platforms that have to avoid aliasing in their virtually indexed
39370Sstevel@tonic-gate  * caches.
39380Sstevel@tonic-gate  *
39390Sstevel@tonic-gate  * Most of the routines have sanity checks in them (e.g. verifying that the
39400Sstevel@tonic-gate  * passed-in page is locked).  We don't actually care about most of these
39410Sstevel@tonic-gate  * checks on x86, but we leave them in place to identify problems in the
39420Sstevel@tonic-gate  * upper levels.
39430Sstevel@tonic-gate  */
39440Sstevel@tonic-gate 
39450Sstevel@tonic-gate /*
39460Sstevel@tonic-gate  * Map in a locked page and return the vaddr.
39470Sstevel@tonic-gate  */
39480Sstevel@tonic-gate /*ARGSUSED*/
39490Sstevel@tonic-gate caddr_t
39500Sstevel@tonic-gate hat_kpm_mapin(struct page *pp, struct kpme *kpme)
39510Sstevel@tonic-gate {
39520Sstevel@tonic-gate 	caddr_t		vaddr;
39530Sstevel@tonic-gate 
39540Sstevel@tonic-gate #ifdef DEBUG
39550Sstevel@tonic-gate 	if (kpm_enable == 0) {
39560Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
39570Sstevel@tonic-gate 		return ((caddr_t)NULL);
39580Sstevel@tonic-gate 	}
39590Sstevel@tonic-gate 
39600Sstevel@tonic-gate 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
39610Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
39620Sstevel@tonic-gate 		return ((caddr_t)NULL);
39630Sstevel@tonic-gate 	}
39640Sstevel@tonic-gate #endif
39650Sstevel@tonic-gate 
39660Sstevel@tonic-gate 	vaddr = hat_kpm_page2va(pp, 1);
39670Sstevel@tonic-gate 
39680Sstevel@tonic-gate 	return (vaddr);
39690Sstevel@tonic-gate }
39700Sstevel@tonic-gate 
39710Sstevel@tonic-gate /*
39720Sstevel@tonic-gate  * Mapout a locked page.
39730Sstevel@tonic-gate  */
39740Sstevel@tonic-gate /*ARGSUSED*/
39750Sstevel@tonic-gate void
39760Sstevel@tonic-gate hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
39770Sstevel@tonic-gate {
39780Sstevel@tonic-gate #ifdef DEBUG
39790Sstevel@tonic-gate 	if (kpm_enable == 0) {
39800Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
39810Sstevel@tonic-gate 		return;
39820Sstevel@tonic-gate 	}
39830Sstevel@tonic-gate 
39840Sstevel@tonic-gate 	if (IS_KPM_ADDR(vaddr) == 0) {
39850Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
39860Sstevel@tonic-gate 		return;
39870Sstevel@tonic-gate 	}
39880Sstevel@tonic-gate 
39890Sstevel@tonic-gate 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
39900Sstevel@tonic-gate 		cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
39910Sstevel@tonic-gate 		return;
39920Sstevel@tonic-gate 	}
39930Sstevel@tonic-gate #endif
39940Sstevel@tonic-gate }
39950Sstevel@tonic-gate 
39960Sstevel@tonic-gate /*
39970Sstevel@tonic-gate  * Return the kpm virtual address for a specific pfn
39980Sstevel@tonic-gate  */
39990Sstevel@tonic-gate caddr_t
40000Sstevel@tonic-gate hat_kpm_pfn2va(pfn_t pfn)
40010Sstevel@tonic-gate {
40020Sstevel@tonic-gate 	uintptr_t vaddr;
40030Sstevel@tonic-gate 
40040Sstevel@tonic-gate 	ASSERT(kpm_enable);
40050Sstevel@tonic-gate 
40060Sstevel@tonic-gate 	vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
40070Sstevel@tonic-gate 
40080Sstevel@tonic-gate 	return ((caddr_t)vaddr);
40090Sstevel@tonic-gate }
40100Sstevel@tonic-gate 
40110Sstevel@tonic-gate /*
40120Sstevel@tonic-gate  * Return the kpm virtual address for the page at pp.
40130Sstevel@tonic-gate  */
40140Sstevel@tonic-gate /*ARGSUSED*/
40150Sstevel@tonic-gate caddr_t
40160Sstevel@tonic-gate hat_kpm_page2va(struct page *pp, int checkswap)
40170Sstevel@tonic-gate {
40180Sstevel@tonic-gate 	return (hat_kpm_pfn2va(pp->p_pagenum));
40190Sstevel@tonic-gate }
40200Sstevel@tonic-gate 
40210Sstevel@tonic-gate /*
40220Sstevel@tonic-gate  * Return the page frame number for the kpm virtual address vaddr.
40230Sstevel@tonic-gate  */
40240Sstevel@tonic-gate pfn_t
40250Sstevel@tonic-gate hat_kpm_va2pfn(caddr_t vaddr)
40260Sstevel@tonic-gate {
40270Sstevel@tonic-gate 	pfn_t		pfn;
40280Sstevel@tonic-gate 
40290Sstevel@tonic-gate 	ASSERT(IS_KPM_ADDR(vaddr));
40300Sstevel@tonic-gate 
40310Sstevel@tonic-gate 	pfn = (pfn_t)btop(vaddr - kpm_vbase);
40320Sstevel@tonic-gate 
40330Sstevel@tonic-gate 	return (pfn);
40340Sstevel@tonic-gate }
40350Sstevel@tonic-gate 
40360Sstevel@tonic-gate 
40370Sstevel@tonic-gate /*
40380Sstevel@tonic-gate  * Return the page for the kpm virtual address vaddr.
40390Sstevel@tonic-gate  */
40400Sstevel@tonic-gate page_t *
40410Sstevel@tonic-gate hat_kpm_vaddr2page(caddr_t vaddr)
40420Sstevel@tonic-gate {
40430Sstevel@tonic-gate 	pfn_t		pfn;
40440Sstevel@tonic-gate 
40450Sstevel@tonic-gate 	ASSERT(IS_KPM_ADDR(vaddr));
40460Sstevel@tonic-gate 
40470Sstevel@tonic-gate 	pfn = hat_kpm_va2pfn(vaddr);
40480Sstevel@tonic-gate 
40490Sstevel@tonic-gate 	return (page_numtopp_nolock(pfn));
40500Sstevel@tonic-gate }
40510Sstevel@tonic-gate 
40520Sstevel@tonic-gate /*
40530Sstevel@tonic-gate  * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
40540Sstevel@tonic-gate  * KPM page.  This should never happen on x86
40550Sstevel@tonic-gate  */
40560Sstevel@tonic-gate int
40570Sstevel@tonic-gate hat_kpm_fault(hat_t *hat, caddr_t vaddr)
40580Sstevel@tonic-gate {
40590Sstevel@tonic-gate 	panic("pagefault in seg_kpm.  hat: 0x%p  vaddr: 0x%p", hat, vaddr);
40600Sstevel@tonic-gate 
40610Sstevel@tonic-gate 	return (0);
40620Sstevel@tonic-gate }
40630Sstevel@tonic-gate 
40640Sstevel@tonic-gate /*ARGSUSED*/
40650Sstevel@tonic-gate void
40660Sstevel@tonic-gate hat_kpm_mseghash_clear(int nentries)
40670Sstevel@tonic-gate {}
40680Sstevel@tonic-gate 
40690Sstevel@tonic-gate /*ARGSUSED*/
40700Sstevel@tonic-gate void
40710Sstevel@tonic-gate hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
40720Sstevel@tonic-gate {}
4073