xref: /onnv-gate/usr/src/uts/i86pc/vm/i86_mmu.c (revision 5159:6cdd421a2458)
13446Smrj /*
23446Smrj  * CDDL HEADER START
33446Smrj  *
43446Smrj  * The contents of this file are subject to the terms of the
53446Smrj  * Common Development and Distribution License (the "License").
63446Smrj  * You may not use this file except in compliance with the License.
73446Smrj  *
83446Smrj  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
93446Smrj  * or http://www.opensolaris.org/os/licensing.
103446Smrj  * See the License for the specific language governing permissions
113446Smrj  * and limitations under the License.
123446Smrj  *
133446Smrj  * When distributing Covered Code, include this CDDL HEADER in each
143446Smrj  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
153446Smrj  * If applicable, add the following below this CDDL HEADER, with the
163446Smrj  * fields enclosed by brackets "[]" replaced with your own identifying
173446Smrj  * information: Portions Copyright [yyyy] [name of copyright owner]
183446Smrj  *
193446Smrj  * CDDL HEADER END
203446Smrj  */
213446Smrj /*
223446Smrj  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
233446Smrj  * Use is subject to license terms.
243446Smrj  */
253446Smrj 
263446Smrj #pragma ident	"%Z%%M%	%I%	%E% SMI"
273446Smrj 
283446Smrj #include <sys/t_lock.h>
293446Smrj #include <sys/memlist.h>
303446Smrj #include <sys/cpuvar.h>
313446Smrj #include <sys/vmem.h>
323446Smrj #include <sys/mman.h>
333446Smrj #include <sys/vm.h>
343446Smrj #include <sys/kmem.h>
353446Smrj #include <sys/cmn_err.h>
363446Smrj #include <sys/debug.h>
373446Smrj #include <sys/vm_machparam.h>
383446Smrj #include <sys/tss.h>
393446Smrj #include <sys/vnode.h>
403446Smrj #include <vm/hat.h>
413446Smrj #include <vm/anon.h>
423446Smrj #include <vm/as.h>
433446Smrj #include <vm/page.h>
443446Smrj #include <vm/seg.h>
453446Smrj #include <vm/seg_kmem.h>
463446Smrj #include <vm/seg_map.h>
473446Smrj #include <vm/hat_i86.h>
483446Smrj #include <sys/promif.h>
493446Smrj #include <sys/x86_archext.h>
503446Smrj #include <sys/systm.h>
513446Smrj #include <sys/archsystm.h>
523446Smrj #include <sys/sunddi.h>
533446Smrj #include <sys/ddidmareq.h>
543446Smrj #include <sys/controlregs.h>
553446Smrj #include <sys/reboot.h>
563446Smrj #include <sys/kdi.h>
573446Smrj #include <sys/bootconf.h>
583446Smrj #include <sys/bootsvcs.h>
593446Smrj #include <sys/bootinfo.h>
603446Smrj #include <vm/kboot_mmu.h>
613446Smrj 
625084Sjohnlev #ifdef __xpv
635084Sjohnlev #include <sys/hypervisor.h>
645084Sjohnlev #endif
655084Sjohnlev 
663446Smrj caddr_t
673446Smrj i86devmap(pfn_t pf, pgcnt_t pgcnt, uint_t prot)
683446Smrj {
693446Smrj 	caddr_t addr;
703446Smrj 	caddr_t addr1;
713446Smrj 	page_t *pp;
723446Smrj 
733446Smrj 	addr1 = addr = vmem_alloc(heap_arena, mmu_ptob(pgcnt), VM_SLEEP);
743446Smrj 
753446Smrj 	for (; pgcnt != 0; addr += MMU_PAGESIZE, ++pf, --pgcnt) {
763446Smrj 		pp = page_numtopp_nolock(pf);
773446Smrj 		if (pp == NULL) {
783446Smrj 			hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pf,
793446Smrj 			    prot | HAT_NOSYNC, HAT_LOAD_LOCK);
803446Smrj 		} else {
813446Smrj 			hat_memload(kas.a_hat, addr, pp,
823446Smrj 			    prot | HAT_NOSYNC, HAT_LOAD_LOCK);
833446Smrj 		}
843446Smrj 	}
853446Smrj 
863446Smrj 	return (addr1);
873446Smrj }
883446Smrj 
893446Smrj /*
903446Smrj  * This routine is like page_numtopp, but accepts only free pages, which
913446Smrj  * it allocates (unfrees) and returns with the exclusive lock held.
923446Smrj  * It is used by machdep.c/dma_init() to find contiguous free pages.
933446Smrj  *
943446Smrj  * XXX this and some others should probably be in vm_machdep.c
953446Smrj  */
963446Smrj page_t *
973446Smrj page_numtopp_alloc(pfn_t pfnum)
983446Smrj {
993446Smrj 	page_t *pp;
1003446Smrj 
1013446Smrj retry:
1023446Smrj 	pp = page_numtopp_nolock(pfnum);
1033446Smrj 	if (pp == NULL) {
1043446Smrj 		return (NULL);
1053446Smrj 	}
1063446Smrj 
1073446Smrj 	if (!page_trylock(pp, SE_EXCL)) {
1083446Smrj 		return (NULL);
1093446Smrj 	}
1103446Smrj 
1113446Smrj 	if (page_pptonum(pp) != pfnum) {
1123446Smrj 		page_unlock(pp);
1133446Smrj 		goto retry;
1143446Smrj 	}
1153446Smrj 
1163446Smrj 	if (!PP_ISFREE(pp)) {
1173446Smrj 		page_unlock(pp);
1183446Smrj 		return (NULL);
1193446Smrj 	}
1203446Smrj 	if (pp->p_szc) {
1213446Smrj 		page_demote_free_pages(pp);
1223446Smrj 		page_unlock(pp);
1233446Smrj 		goto retry;
1243446Smrj 	}
1253446Smrj 
1263446Smrj 	/* If associated with a vnode, destroy mappings */
1273446Smrj 
1283446Smrj 	if (pp->p_vnode) {
1293446Smrj 
1303446Smrj 		page_destroy_free(pp);
1313446Smrj 
1323446Smrj 		if (!page_lock(pp, SE_EXCL, (kmutex_t *)NULL, P_NO_RECLAIM)) {
1333446Smrj 			return (NULL);
1343446Smrj 		}
1353446Smrj 
1363446Smrj 		if (page_pptonum(pp) != pfnum) {
1373446Smrj 			page_unlock(pp);
1383446Smrj 			goto retry;
1393446Smrj 		}
1403446Smrj 	}
1413446Smrj 
142*5159Sjohnlev 	if (!PP_ISFREE(pp)) {
1433446Smrj 		page_unlock(pp);
1443446Smrj 		return (NULL);
1453446Smrj 	}
1463446Smrj 
147*5159Sjohnlev 	if (!page_reclaim(pp, (kmutex_t *)NULL))
148*5159Sjohnlev 		return (NULL);
149*5159Sjohnlev 
1503446Smrj 	return (pp);
1513446Smrj }
1523446Smrj 
1533446Smrj /*
1543446Smrj  * Flag is not set early in boot. Once it is set we are no longer
1553446Smrj  * using boot's page tables.
1563446Smrj  */
1573446Smrj uint_t khat_running = 0;
1583446Smrj 
1593446Smrj /*
1603446Smrj  * This procedure is callable only while the boot loader is in charge of the
1613446Smrj  * MMU. It assumes that PA == VA for page table pointers.  It doesn't live in
1623446Smrj  * kboot_mmu.c since it's used from common code.
1633446Smrj  */
1643446Smrj pfn_t
1653446Smrj va_to_pfn(void *vaddr)
1663446Smrj {
1673446Smrj 	uintptr_t	des_va = ALIGN2PAGE(vaddr);
1683446Smrj 	uintptr_t	va = des_va;
1693446Smrj 	size_t		len;
1703446Smrj 	uint_t		prot;
1713446Smrj 	pfn_t		pfn;
1723446Smrj 
1733446Smrj 	if (khat_running)
1743446Smrj 		panic("va_to_pfn(): called too late\n");
1753446Smrj 
1763446Smrj 	if (kbm_probe(&va, &len, &pfn, &prot) == 0)
1773446Smrj 		return (PFN_INVALID);
1783446Smrj 	if (va > des_va)
1793446Smrj 		return (PFN_INVALID);
1803446Smrj 	if (va < des_va)
1813446Smrj 		pfn += mmu_btop(des_va - va);
1823446Smrj 	return (pfn);
1833446Smrj }
1843446Smrj 
1853446Smrj /*
1863446Smrj  * Initialize a special area in the kernel that always holds some PTEs for
1873446Smrj  * faster performance. This always holds segmap's PTEs.
1883446Smrj  * In the 32 bit kernel this maps the kernel heap too.
1893446Smrj  */
1903446Smrj void
1913446Smrj hat_kmap_init(uintptr_t base, size_t len)
1923446Smrj {
1933446Smrj 	uintptr_t map_addr;	/* base rounded down to large page size */
1943446Smrj 	uintptr_t map_eaddr;	/* base + len rounded up */
1953446Smrj 	size_t map_len;
1963446Smrj 	caddr_t ptes;		/* mapping area in kernel for kmap ptes */
1973446Smrj 	size_t window_size;	/* size of mapping area for ptes */
1983446Smrj 	ulong_t htable_cnt;	/* # of page tables to cover map_len */
1993446Smrj 	ulong_t i;
2003446Smrj 	htable_t *ht;
2013446Smrj 	uintptr_t va;
2023446Smrj 
2033446Smrj 	/*
2043446Smrj 	 * We have to map in an area that matches an entire page table.
2055084Sjohnlev 	 * The PTEs are large page aligned to avoid spurious pagefaults
2065084Sjohnlev 	 * on the hypervisor.
2073446Smrj 	 */
2083446Smrj 	map_addr = base & LEVEL_MASK(1);
2093446Smrj 	map_eaddr = (base + len + LEVEL_SIZE(1) - 1) & LEVEL_MASK(1);
2103446Smrj 	map_len = map_eaddr - map_addr;
2113446Smrj 	window_size = mmu_btop(map_len) * mmu.pte_size;
2123446Smrj 	window_size = (window_size + LEVEL_SIZE(1)) & LEVEL_MASK(1);
2133446Smrj 	htable_cnt = map_len >> LEVEL_SHIFT(1);
2143446Smrj 
2153446Smrj 	/*
2163446Smrj 	 * allocate vmem for the kmap_ptes
2173446Smrj 	 */
2183446Smrj 	ptes = vmem_xalloc(heap_arena, window_size, LEVEL_SIZE(1), 0,
2193446Smrj 	    0, NULL, NULL, VM_SLEEP);
2203446Smrj 	mmu.kmap_htables =
2213446Smrj 	    kmem_alloc(htable_cnt * sizeof (htable_t *), KM_SLEEP);
2223446Smrj 
2233446Smrj 	/*
2243446Smrj 	 * Map the page tables that cover kmap into the allocated range.
2253446Smrj 	 * Note we don't ever htable_release() the kmap page tables - they
2263446Smrj 	 * can't ever be stolen, freed, etc.
2273446Smrj 	 */
2283446Smrj 	for (va = map_addr, i = 0; i < htable_cnt; va += LEVEL_SIZE(1), ++i) {
2293446Smrj 		ht = htable_create(kas.a_hat, va, 0, NULL);
2303446Smrj 		if (ht == NULL)
2313446Smrj 			panic("hat_kmap_init: ht == NULL");
2323446Smrj 		mmu.kmap_htables[i] = ht;
2333446Smrj 
2343446Smrj 		hat_devload(kas.a_hat, ptes + i * MMU_PAGESIZE,
2353446Smrj 		    MMU_PAGESIZE, ht->ht_pfn,
2365084Sjohnlev #ifdef __xpv
2375084Sjohnlev 		    PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
2385084Sjohnlev #else
2393446Smrj 		    PROT_READ | PROT_WRITE | HAT_NOSYNC | HAT_UNORDERED_OK,
2405084Sjohnlev #endif
2413446Smrj 		    HAT_LOAD | HAT_LOAD_NOCONSIST);
2423446Smrj 	}
2433446Smrj 
2443446Smrj 	/*
2453446Smrj 	 * set information in mmu to activate handling of kmap
2463446Smrj 	 */
2473446Smrj 	mmu.kmap_addr = map_addr;
2483446Smrj 	mmu.kmap_eaddr = map_eaddr;
2493446Smrj 	mmu.kmap_ptes = (x86pte_t *)ptes;
2503446Smrj }
2513446Smrj 
2523446Smrj extern caddr_t	kpm_vbase;
2533446Smrj extern size_t	kpm_size;
2543446Smrj 
2555084Sjohnlev #ifdef __xpv
2565084Sjohnlev /*
2575084Sjohnlev  * Create the initial segkpm mappings for the hypervisor. To avoid having
2585084Sjohnlev  * to deal with page tables being read only, we make all mappings
2595084Sjohnlev  * read only at first.
2605084Sjohnlev  */
2615084Sjohnlev static void
2625084Sjohnlev xen_kpm_create(paddr_t paddr, level_t lvl)
2635084Sjohnlev {
2645084Sjohnlev 	ulong_t pg_off;
2655084Sjohnlev 
2665084Sjohnlev 	for (pg_off = 0; pg_off < LEVEL_SIZE(lvl); pg_off += MMU_PAGESIZE) {
2675084Sjohnlev 		kbm_map((uintptr_t)kpm_vbase + paddr, (paddr_t)0, 0, 1);
2685084Sjohnlev 		kbm_read_only((uintptr_t)kpm_vbase + paddr + pg_off,
2695084Sjohnlev 		    paddr + pg_off);
2705084Sjohnlev 	}
2715084Sjohnlev }
2725084Sjohnlev 
2735084Sjohnlev /*
2745084Sjohnlev  * Try to make all kpm mappings writable. Failures are ok, as those
2755084Sjohnlev  * are just pagetable, GDT, etc. pages.
2765084Sjohnlev  */
2775084Sjohnlev static void
2785084Sjohnlev xen_kpm_finish_init(void)
2795084Sjohnlev {
2805084Sjohnlev 	pfn_t gdtpfn = mmu_btop(CPU->cpu_m.mcpu_gdtpa);
2815084Sjohnlev 	pfn_t pfn;
2825084Sjohnlev 	page_t *pp;
2835084Sjohnlev 
2845084Sjohnlev 	for (pfn = 0; pfn < mfn_count; ++pfn) {
2855084Sjohnlev 		/*
2865084Sjohnlev 		 * skip gdt
2875084Sjohnlev 		 */
2885084Sjohnlev 		if (pfn == gdtpfn)
2895084Sjohnlev 			continue;
2905084Sjohnlev 
2915084Sjohnlev 		/*
2925084Sjohnlev 		 * p_index is a hint that this is a pagetable
2935084Sjohnlev 		 */
2945084Sjohnlev 		pp = page_numtopp_nolock(pfn);
2955084Sjohnlev 		if (pp && pp->p_index) {
2965084Sjohnlev 			pp->p_index = 0;
2975084Sjohnlev 			continue;
2985084Sjohnlev 		}
2995084Sjohnlev 		(void) xen_kpm_page(pfn, PT_VALID | PT_WRITABLE);
3005084Sjohnlev 	}
3015084Sjohnlev }
3025084Sjohnlev #endif
3035084Sjohnlev 
3043446Smrj /*
3053446Smrj  * Routine to pre-allocate data structures for hat_kern_setup(). It computes
3063446Smrj  * how many pagetables it needs by walking the boot loader's page tables.
3073446Smrj  */
3083446Smrj /*ARGSUSED*/
3093446Smrj void
3103446Smrj hat_kern_alloc(
3113446Smrj 	caddr_t	segmap_base,
3123446Smrj 	size_t	segmap_size,
3133446Smrj 	caddr_t	ekernelheap)
3143446Smrj {
3153446Smrj 	uintptr_t	last_va = (uintptr_t)-1;	/* catch 1st time */
3163446Smrj 	uintptr_t	va = 0;
3173446Smrj 	size_t		size;
3183446Smrj 	pfn_t		pfn;
3193446Smrj 	uint_t		prot;
3203446Smrj 	uint_t		table_cnt = 1;
3213446Smrj 	uint_t		mapping_cnt;
3223446Smrj 	level_t		start_level;
3233446Smrj 	level_t		l;
3243446Smrj 	struct memlist	*pmem;
3253446Smrj 	level_t		lpagel = mmu.max_page_level;
3263446Smrj 	uint64_t	paddr;
3273446Smrj 	int64_t		psize;
3285084Sjohnlev 	int		nwindows;
3293446Smrj 
3303446Smrj 	if (kpm_size > 0) {
3313446Smrj 		/*
3325084Sjohnlev 		 * Create the kpm page tables.  When running on the
3335084Sjohnlev 		 * hypervisor these are made read/only at first.
3345084Sjohnlev 		 * Later we'll add write permission where possible.
3353446Smrj 		 */
3363446Smrj 		for (pmem = phys_install; pmem; pmem = pmem->next) {
3373446Smrj 			paddr = pmem->address;
3383446Smrj 			psize = pmem->size;
3393446Smrj 			while (psize >= MMU_PAGESIZE) {
3403446Smrj 				if ((paddr & LEVEL_OFFSET(lpagel)) == 0 &&
3413446Smrj 				    psize > LEVEL_SIZE(lpagel))
3423446Smrj 					l = lpagel;
3433446Smrj 				else
3443446Smrj 					l = 0;
3455084Sjohnlev #if defined(__xpv)
3465084Sjohnlev 				/*
3475084Sjohnlev 				 * Create read/only mappings to avoid
3485084Sjohnlev 				 * conflicting with pagetable usage
3495084Sjohnlev 				 */
3505084Sjohnlev 				xen_kpm_create(paddr, l);
3515084Sjohnlev #else
3523446Smrj 				kbm_map((uintptr_t)kpm_vbase + paddr, paddr,
3533446Smrj 				    l, 1);
3545084Sjohnlev #endif
3553446Smrj 				paddr += LEVEL_SIZE(l);
3563446Smrj 				psize -= LEVEL_SIZE(l);
3573446Smrj 			}
3583446Smrj 		}
3595084Sjohnlev 	}
3605084Sjohnlev 
3615084Sjohnlev 	/*
3625084Sjohnlev 	 * If this machine doesn't have a kpm segment, we need to allocate
3635084Sjohnlev 	 * a small number of 'windows' which can be used to map pagetables.
3645084Sjohnlev 	 */
3655084Sjohnlev 	nwindows = (kpm_size == 0) ? 2 * NCPU : 0;
3665084Sjohnlev 
3675084Sjohnlev #if defined(__xpv)
3685084Sjohnlev 	/*
3695084Sjohnlev 	 * On a hypervisor, these windows are also used by the xpv_panic
3705084Sjohnlev 	 * code, where we need one window for each level of the pagetable
3715084Sjohnlev 	 * hierarchy.
3725084Sjohnlev 	 */
3735084Sjohnlev 	nwindows = MAX(nwindows, mmu.max_level);
3745084Sjohnlev #endif
3755084Sjohnlev 
3765084Sjohnlev 	if (nwindows != 0) {
3773446Smrj 		/*
3783446Smrj 		 * Create the page windows and 1 page of VA in
3793446Smrj 		 * which we map the PTEs of those windows.
3803446Smrj 		 */
3815084Sjohnlev 		mmu.pwin_base = vmem_xalloc(heap_arena, nwindows * MMU_PAGESIZE,
3823446Smrj 		    LEVEL_SIZE(1), 0, 0, NULL, NULL, VM_SLEEP);
3835084Sjohnlev 		ASSERT(nwindows <= MMU_PAGESIZE / mmu.pte_size);
3843446Smrj 		mmu.pwin_pte_va = vmem_xalloc(heap_arena, MMU_PAGESIZE,
3853446Smrj 		    MMU_PAGESIZE, 0, 0, NULL, NULL, VM_SLEEP);
3863446Smrj 
3873446Smrj 		/*
3883446Smrj 		 * Find/Create the page table window mappings.
3893446Smrj 		 */
3903446Smrj 		paddr = 0;
3913446Smrj 		(void) find_pte((uintptr_t)mmu.pwin_base, &paddr, 0, 0);
3923446Smrj 		ASSERT(paddr != 0);
3933446Smrj 		ASSERT((paddr & MMU_PAGEOFFSET) == 0);
3943446Smrj 		mmu.pwin_pte_pa = paddr;
3955084Sjohnlev #ifdef __xpv
3965084Sjohnlev 		(void) find_pte((uintptr_t)mmu.pwin_pte_va, NULL, 0, 0);
3975084Sjohnlev 		kbm_read_only((uintptr_t)mmu.pwin_pte_va, mmu.pwin_pte_pa);
3985084Sjohnlev #else
3993446Smrj 		kbm_map((uintptr_t)mmu.pwin_pte_va, mmu.pwin_pte_pa, 0, 1);
4005084Sjohnlev #endif
4013446Smrj 	}
4023446Smrj 
4033446Smrj 	/*
4043446Smrj 	 * Walk the boot loader's page tables and figure out
4053446Smrj 	 * how many tables and page mappings there will be.
4063446Smrj 	 */
4073446Smrj 	while (kbm_probe(&va, &size, &pfn, &prot) != 0) {
4083446Smrj 		/*
4093446Smrj 		 * At each level, if the last_va falls into a new htable,
4103446Smrj 		 * increment table_cnt. We can stop at the 1st level where
4113446Smrj 		 * they are in the same htable.
4123446Smrj 		 */
4133446Smrj 		if (size == MMU_PAGESIZE)
4143446Smrj 			start_level = 0;
4153446Smrj 		else
4163446Smrj 			start_level = 1;
4173446Smrj 
4183446Smrj 		for (l = start_level; l < mmu.max_level; ++l) {
4193446Smrj 			if (va >> LEVEL_SHIFT(l + 1) ==
4203446Smrj 			    last_va >> LEVEL_SHIFT(l + 1))
4213446Smrj 				break;
4223446Smrj 			++table_cnt;
4233446Smrj 		}
4243446Smrj 		last_va = va;
4253446Smrj 		va = (va & LEVEL_MASK(1)) + LEVEL_SIZE(1);
4263446Smrj 	}
4273446Smrj 
4283446Smrj 	/*
4293446Smrj 	 * Besides the boot loader mappings, we're going to fill in
4303446Smrj 	 * the entire top level page table for the kernel. Make sure there's
4313446Smrj 	 * enough reserve for that too.
4323446Smrj 	 */
4333446Smrj 	table_cnt += mmu.top_level_count - ((kernelbase >>
4343446Smrj 	    LEVEL_SHIFT(mmu.max_level)) & (mmu.top_level_count - 1));
4353446Smrj 
4363446Smrj #if defined(__i386)
4373446Smrj 	/*
4383446Smrj 	 * The 32 bit PAE hat allocates tables one level below the top when
4393446Smrj 	 * kernelbase isn't 1 Gig aligned. We'll just be sloppy and allocate
4403446Smrj 	 * a bunch more to the reserve. Any unused will be returned later.
4413446Smrj 	 * Note we've already counted these mappings, just not the extra
4423446Smrj 	 * pagetables.
4433446Smrj 	 */
4443446Smrj 	if (mmu.pae_hat != 0 && (kernelbase & LEVEL_OFFSET(mmu.max_level)) != 0)
4453446Smrj 		table_cnt += mmu.ptes_per_table -
4463446Smrj 		    ((kernelbase & LEVEL_OFFSET(mmu.max_level)) >>
4473446Smrj 		    LEVEL_SHIFT(mmu.max_level - 1));
4483446Smrj #endif
4493446Smrj 
4503446Smrj 	/*
4513446Smrj 	 * Add 1/4 more into table_cnt for extra slop.  The unused
4523446Smrj 	 * slop is freed back when we htable_adjust_reserve() later.
4533446Smrj 	 */
4543446Smrj 	table_cnt += table_cnt >> 2;
4553446Smrj 
4563446Smrj 	/*
4573446Smrj 	 * We only need mapping entries (hments) for shared pages.
4583446Smrj 	 * This should be far, far fewer than the total possible,
4593446Smrj 	 * We'll allocate enough for 1/16 of all possible PTEs.
4603446Smrj 	 */
4613446Smrj 	mapping_cnt = (table_cnt * mmu.ptes_per_table) >> 4;
4623446Smrj 
4633446Smrj 	/*
4643446Smrj 	 * Now create the initial htable/hment reserves
4653446Smrj 	 */
4663446Smrj 	htable_initial_reserve(table_cnt);
4673446Smrj 	hment_reserve(mapping_cnt);
4683446Smrj 	x86pte_cpu_init(CPU);
4693446Smrj }
4703446Smrj 
4713446Smrj 
4723446Smrj /*
4733446Smrj  * This routine handles the work of creating the kernel's initial mappings
4743446Smrj  * by deciphering the mappings in the page tables created by the boot program.
4753446Smrj  *
4763446Smrj  * We maintain large page mappings, but only to a level 1 pagesize.
4773446Smrj  * The boot loader can only add new mappings once this function starts.
4783446Smrj  * In particular it can not change the pagesize used for any existing
4793446Smrj  * mappings or this code breaks!
4803446Smrj  */
4813446Smrj 
4823446Smrj void
4833446Smrj hat_kern_setup(void)
4843446Smrj {
4853446Smrj 	/*
4863446Smrj 	 * Attach htables to the existing pagetables
4873446Smrj 	 */
4885084Sjohnlev 	/* BEGIN CSTYLED */
4893446Smrj 	htable_attach(kas.a_hat, 0, mmu.max_level, NULL,
4905084Sjohnlev #ifdef __xpv
4915084Sjohnlev 	    mmu_btop(xen_info->pt_base - ONE_GIG));
4925084Sjohnlev #else
4933446Smrj 	    mmu_btop(getcr3()));
4945084Sjohnlev #endif
4955084Sjohnlev 	/* END CSTYLED */
4963446Smrj 
4975084Sjohnlev #if defined(__i386) && !defined(__xpv)
4983446Smrj 	CPU->cpu_tss->tss_cr3 = dftss0.tss_cr3 = getcr3();
4993446Smrj #endif /* __i386 */
5003446Smrj 
5015084Sjohnlev #if defined(__xpv) && defined(__amd64)
5025084Sjohnlev 	/*
5035084Sjohnlev 	 * Try to make the kpm mappings r/w. Failures here are OK, as
5045084Sjohnlev 	 * it's probably just a pagetable
5055084Sjohnlev 	 */
5065084Sjohnlev 	xen_kpm_finish_init();
5075084Sjohnlev #endif
5085084Sjohnlev 
5093446Smrj 	/*
5103446Smrj 	 * The kernel HAT is now officially open for business.
5113446Smrj 	 */
5123446Smrj 	khat_running = 1;
5133446Smrj 
5143446Smrj 	CPUSET_ATOMIC_ADD(kas.a_hat->hat_cpus, CPU->cpu_id);
5153446Smrj 	CPU->cpu_current_hat = kas.a_hat;
5163446Smrj }
517