10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51747Sjosephb * Common Development and Distribution License (the "License"). 61747Sjosephb * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 213446Smrj 220Sstevel@tonic-gate /* 23*7240Srh87107 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate #include <sys/types.h> 300Sstevel@tonic-gate #include <sys/sysmacros.h> 310Sstevel@tonic-gate #include <sys/kmem.h> 320Sstevel@tonic-gate #include <sys/atomic.h> 330Sstevel@tonic-gate #include <sys/bitmap.h> 340Sstevel@tonic-gate #include <sys/machparam.h> 350Sstevel@tonic-gate #include <sys/machsystm.h> 360Sstevel@tonic-gate #include <sys/mman.h> 370Sstevel@tonic-gate #include <sys/systm.h> 380Sstevel@tonic-gate #include <sys/cpuvar.h> 390Sstevel@tonic-gate #include <sys/thread.h> 400Sstevel@tonic-gate #include <sys/proc.h> 410Sstevel@tonic-gate #include <sys/cpu.h> 420Sstevel@tonic-gate #include <sys/kmem.h> 430Sstevel@tonic-gate #include <sys/disp.h> 440Sstevel@tonic-gate #include <sys/vmem.h> 450Sstevel@tonic-gate #include <sys/vmsystm.h> 460Sstevel@tonic-gate #include <sys/promif.h> 470Sstevel@tonic-gate #include <sys/var.h> 480Sstevel@tonic-gate #include <sys/x86_archext.h> 493446Smrj #include <sys/archsystm.h> 500Sstevel@tonic-gate #include <sys/bootconf.h> 510Sstevel@tonic-gate #include <sys/dumphdr.h> 520Sstevel@tonic-gate #include <vm/seg_kmem.h> 530Sstevel@tonic-gate #include <vm/seg_kpm.h> 540Sstevel@tonic-gate #include <vm/hat.h> 550Sstevel@tonic-gate #include <vm/hat_i86.h> 560Sstevel@tonic-gate #include <sys/cmn_err.h> 575084Sjohnlev #include <sys/panic.h> 585084Sjohnlev 595084Sjohnlev #ifdef __xpv 605084Sjohnlev #include <sys/hypervisor.h> 615084Sjohnlev #include <sys/xpv_panic.h> 625084Sjohnlev #endif 630Sstevel@tonic-gate 643446Smrj #include <sys/bootinfo.h> 653446Smrj #include <vm/kboot_mmu.h> 663446Smrj 673446Smrj static void x86pte_zero(htable_t *dest, uint_t entry, uint_t count); 683446Smrj 690Sstevel@tonic-gate kmem_cache_t *htable_cache; 700Sstevel@tonic-gate 710Sstevel@tonic-gate /* 720Sstevel@tonic-gate * The variable htable_reserve_amount, rather than HTABLE_RESERVE_AMOUNT, 730Sstevel@tonic-gate * is used in order to facilitate testing of the htable_steal() code. 740Sstevel@tonic-gate * By resetting htable_reserve_amount to a lower value, we can force 750Sstevel@tonic-gate * stealing to occur. The reserve amount is a guess to get us through boot. 760Sstevel@tonic-gate */ 770Sstevel@tonic-gate #define HTABLE_RESERVE_AMOUNT (200) 780Sstevel@tonic-gate uint_t htable_reserve_amount = HTABLE_RESERVE_AMOUNT; 790Sstevel@tonic-gate kmutex_t htable_reserve_mutex; 800Sstevel@tonic-gate uint_t htable_reserve_cnt; 810Sstevel@tonic-gate htable_t *htable_reserve_pool; 820Sstevel@tonic-gate 830Sstevel@tonic-gate /* 841747Sjosephb * Used to hand test htable_steal(). 850Sstevel@tonic-gate */ 861747Sjosephb #ifdef DEBUG 871747Sjosephb ulong_t force_steal = 0; 881747Sjosephb ulong_t ptable_cnt = 0; 891747Sjosephb #endif 901747Sjosephb 911747Sjosephb /* 921747Sjosephb * This variable is so that we can tune this via /etc/system 931747Sjosephb * Any value works, but a power of two <= mmu.ptes_per_table is best. 941747Sjosephb */ 951747Sjosephb uint_t htable_steal_passes = 8; 960Sstevel@tonic-gate 970Sstevel@tonic-gate /* 980Sstevel@tonic-gate * mutex stuff for access to htable hash 990Sstevel@tonic-gate */ 1000Sstevel@tonic-gate #define NUM_HTABLE_MUTEX 128 1010Sstevel@tonic-gate kmutex_t htable_mutex[NUM_HTABLE_MUTEX]; 1020Sstevel@tonic-gate #define HTABLE_MUTEX_HASH(h) ((h) & (NUM_HTABLE_MUTEX - 1)) 1030Sstevel@tonic-gate 1040Sstevel@tonic-gate #define HTABLE_ENTER(h) mutex_enter(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 1050Sstevel@tonic-gate #define HTABLE_EXIT(h) mutex_exit(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 1060Sstevel@tonic-gate 1070Sstevel@tonic-gate /* 1080Sstevel@tonic-gate * forward declarations 1090Sstevel@tonic-gate */ 1100Sstevel@tonic-gate static void link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr); 1110Sstevel@tonic-gate static void unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr); 1120Sstevel@tonic-gate static void htable_free(htable_t *ht); 1133446Smrj static x86pte_t *x86pte_access_pagetable(htable_t *ht, uint_t index); 1140Sstevel@tonic-gate static void x86pte_release_pagetable(htable_t *ht); 1150Sstevel@tonic-gate static x86pte_t x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, 1160Sstevel@tonic-gate x86pte_t new); 1170Sstevel@tonic-gate 1180Sstevel@tonic-gate /* 1190Sstevel@tonic-gate * A counter to track if we are stealing or reaping htables. When non-zero 1200Sstevel@tonic-gate * htable_free() will directly free htables (either to the reserve or kmem) 1210Sstevel@tonic-gate * instead of putting them in a hat's htable cache. 1220Sstevel@tonic-gate */ 1230Sstevel@tonic-gate uint32_t htable_dont_cache = 0; 1240Sstevel@tonic-gate 1250Sstevel@tonic-gate /* 1260Sstevel@tonic-gate * Track the number of active pagetables, so we can know how many to reap 1270Sstevel@tonic-gate */ 1280Sstevel@tonic-gate static uint32_t active_ptables = 0; 1290Sstevel@tonic-gate 1305084Sjohnlev #ifdef __xpv 1315084Sjohnlev /* 1325084Sjohnlev * Deal with hypervisor complications. 1335084Sjohnlev */ 1345084Sjohnlev void 1355084Sjohnlev xen_flush_va(caddr_t va) 1365084Sjohnlev { 1375084Sjohnlev struct mmuext_op t; 1385084Sjohnlev uint_t count; 1395084Sjohnlev 1405084Sjohnlev if (IN_XPV_PANIC()) { 1415084Sjohnlev mmu_tlbflush_entry((caddr_t)va); 1425084Sjohnlev } else { 1435084Sjohnlev t.cmd = MMUEXT_INVLPG_LOCAL; 1445084Sjohnlev t.arg1.linear_addr = (uintptr_t)va; 1455084Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 1465084Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 1475084Sjohnlev ASSERT(count == 1); 1485084Sjohnlev } 1495084Sjohnlev } 1505084Sjohnlev 1515084Sjohnlev void 1525084Sjohnlev xen_gflush_va(caddr_t va, cpuset_t cpus) 1535084Sjohnlev { 1545084Sjohnlev struct mmuext_op t; 1555084Sjohnlev uint_t count; 1565084Sjohnlev 1575084Sjohnlev if (IN_XPV_PANIC()) { 1585084Sjohnlev mmu_tlbflush_entry((caddr_t)va); 1595084Sjohnlev return; 1605084Sjohnlev } 1615084Sjohnlev 1625084Sjohnlev t.cmd = MMUEXT_INVLPG_MULTI; 1635084Sjohnlev t.arg1.linear_addr = (uintptr_t)va; 1645084Sjohnlev /*LINTED: constant in conditional context*/ 1655084Sjohnlev set_xen_guest_handle(t.arg2.vcpumask, &cpus); 1665084Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 1675084Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 1685084Sjohnlev ASSERT(count == 1); 1695084Sjohnlev } 1705084Sjohnlev 1715084Sjohnlev void 1725084Sjohnlev xen_flush_tlb() 1735084Sjohnlev { 1745084Sjohnlev struct mmuext_op t; 1755084Sjohnlev uint_t count; 1765084Sjohnlev 1775084Sjohnlev if (IN_XPV_PANIC()) { 1785084Sjohnlev xpv_panic_reload_cr3(); 1795084Sjohnlev } else { 1805084Sjohnlev t.cmd = MMUEXT_TLB_FLUSH_LOCAL; 1815084Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 1825084Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 1835084Sjohnlev ASSERT(count == 1); 1845084Sjohnlev } 1855084Sjohnlev } 1865084Sjohnlev 1875084Sjohnlev void 1885084Sjohnlev xen_gflush_tlb(cpuset_t cpus) 1895084Sjohnlev { 1905084Sjohnlev struct mmuext_op t; 1915084Sjohnlev uint_t count; 1925084Sjohnlev 1935084Sjohnlev ASSERT(!IN_XPV_PANIC()); 1945084Sjohnlev t.cmd = MMUEXT_TLB_FLUSH_MULTI; 1955084Sjohnlev /*LINTED: constant in conditional context*/ 1965084Sjohnlev set_xen_guest_handle(t.arg2.vcpumask, &cpus); 1975084Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 1985084Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 1995084Sjohnlev ASSERT(count == 1); 2005084Sjohnlev } 2015084Sjohnlev 2025084Sjohnlev /* 2035084Sjohnlev * Install/Adjust a kpm mapping under the hypervisor. 2045084Sjohnlev * Value of "how" should be: 2055084Sjohnlev * PT_WRITABLE | PT_VALID - regular kpm mapping 2065084Sjohnlev * PT_VALID - make mapping read-only 2075084Sjohnlev * 0 - remove mapping 2085084Sjohnlev * 2095084Sjohnlev * returns 0 on success. non-zero for failure. 2105084Sjohnlev */ 2115084Sjohnlev int 2125084Sjohnlev xen_kpm_page(pfn_t pfn, uint_t how) 2135084Sjohnlev { 2145084Sjohnlev paddr_t pa = mmu_ptob((paddr_t)pfn); 2155084Sjohnlev x86pte_t pte = PT_NOCONSIST | PT_REF | PT_MOD; 2165084Sjohnlev 2175084Sjohnlev if (kpm_vbase == NULL) 2185084Sjohnlev return (0); 2195084Sjohnlev 2205084Sjohnlev if (how) 2215084Sjohnlev pte |= pa_to_ma(pa) | how; 2225084Sjohnlev else 2235084Sjohnlev pte = 0; 2245084Sjohnlev return (HYPERVISOR_update_va_mapping((uintptr_t)kpm_vbase + pa, 2255084Sjohnlev pte, UVMF_INVLPG | UVMF_ALL)); 2265084Sjohnlev } 2275084Sjohnlev 2285084Sjohnlev void 2295084Sjohnlev xen_pin(pfn_t pfn, level_t lvl) 2305084Sjohnlev { 2315084Sjohnlev struct mmuext_op t; 2325084Sjohnlev uint_t count; 2335084Sjohnlev 2345084Sjohnlev t.cmd = MMUEXT_PIN_L1_TABLE + lvl; 2355084Sjohnlev t.arg1.mfn = pfn_to_mfn(pfn); 2365084Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 2375084Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 2385084Sjohnlev ASSERT(count == 1); 2395084Sjohnlev } 2405084Sjohnlev 2415084Sjohnlev void 2425084Sjohnlev xen_unpin(pfn_t pfn) 2435084Sjohnlev { 2445084Sjohnlev struct mmuext_op t; 2455084Sjohnlev uint_t count; 2465084Sjohnlev 2475084Sjohnlev t.cmd = MMUEXT_UNPIN_TABLE; 2485084Sjohnlev t.arg1.mfn = pfn_to_mfn(pfn); 2495084Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 2505084Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 2515084Sjohnlev ASSERT(count == 1); 2525084Sjohnlev } 2535084Sjohnlev 2545084Sjohnlev static void 2555084Sjohnlev xen_map(uint64_t pte, caddr_t va) 2565084Sjohnlev { 2575084Sjohnlev if (HYPERVISOR_update_va_mapping((uintptr_t)va, pte, 2585084Sjohnlev UVMF_INVLPG | UVMF_LOCAL)) 2595084Sjohnlev panic("HYPERVISOR_update_va_mapping() failed"); 2605084Sjohnlev } 2615084Sjohnlev #endif /* __xpv */ 2625084Sjohnlev 2630Sstevel@tonic-gate /* 2640Sstevel@tonic-gate * Allocate a memory page for a hardware page table. 2650Sstevel@tonic-gate * 2663446Smrj * A wrapper around page_get_physical(), with some extra checks. 2670Sstevel@tonic-gate */ 2683446Smrj static pfn_t 2693446Smrj ptable_alloc(uintptr_t seed) 2700Sstevel@tonic-gate { 2710Sstevel@tonic-gate pfn_t pfn; 2720Sstevel@tonic-gate page_t *pp; 2730Sstevel@tonic-gate 2743446Smrj pfn = PFN_INVALID; 2750Sstevel@tonic-gate atomic_add_32(&active_ptables, 1); 2760Sstevel@tonic-gate 2773446Smrj /* 2783446Smrj * The first check is to see if there is memory in the system. If we 2793446Smrj * drop to throttlefree, then fail the ptable_alloc() and let the 2803446Smrj * stealing code kick in. Note that we have to do this test here, 2813446Smrj * since the test in page_create_throttle() would let the NOSLEEP 2823446Smrj * allocation go through and deplete the page reserves. 2833446Smrj * 2843446Smrj * The !NOMEMWAIT() lets pageout, fsflush, etc. skip this check. 2853446Smrj */ 2863446Smrj if (!NOMEMWAIT() && freemem <= throttlefree + 1) 2873446Smrj return (PFN_INVALID); 2880Sstevel@tonic-gate 2891747Sjosephb #ifdef DEBUG 2903446Smrj /* 2913446Smrj * This code makes htable_steal() easier to test. By setting 2923446Smrj * force_steal we force pagetable allocations to fall 2933446Smrj * into the stealing code. Roughly 1 in ever "force_steal" 2943446Smrj * page table allocations will fail. 2953446Smrj */ 2963446Smrj if (proc_pageout != NULL && force_steal > 1 && 2973446Smrj ++ptable_cnt > force_steal) { 2983446Smrj ptable_cnt = 0; 2993446Smrj return (PFN_INVALID); 3003446Smrj } 3011747Sjosephb #endif /* DEBUG */ 3021747Sjosephb 3033446Smrj pp = page_get_physical(seed); 3043446Smrj if (pp == NULL) 3053446Smrj return (PFN_INVALID); 3063446Smrj pfn = pp->p_pagenum; 3070Sstevel@tonic-gate page_downgrade(pp); 3080Sstevel@tonic-gate ASSERT(PAGE_SHARED(pp)); 3090Sstevel@tonic-gate 3100Sstevel@tonic-gate if (pfn == PFN_INVALID) 3110Sstevel@tonic-gate panic("ptable_alloc(): Invalid PFN!!"); 3121747Sjosephb HATSTAT_INC(hs_ptable_allocs); 3133446Smrj return (pfn); 3140Sstevel@tonic-gate } 3150Sstevel@tonic-gate 3160Sstevel@tonic-gate /* 3170Sstevel@tonic-gate * Free an htable's associated page table page. See the comments 3180Sstevel@tonic-gate * for ptable_alloc(). 3190Sstevel@tonic-gate */ 3200Sstevel@tonic-gate static void 3213446Smrj ptable_free(pfn_t pfn) 3220Sstevel@tonic-gate { 3233446Smrj page_t *pp = page_numtopp_nolock(pfn); 3240Sstevel@tonic-gate 3250Sstevel@tonic-gate /* 3260Sstevel@tonic-gate * need to destroy the page used for the pagetable 3270Sstevel@tonic-gate */ 3280Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 3290Sstevel@tonic-gate HATSTAT_INC(hs_ptable_frees); 3300Sstevel@tonic-gate atomic_add_32(&active_ptables, -1); 3310Sstevel@tonic-gate if (pp == NULL) 3320Sstevel@tonic-gate panic("ptable_free(): no page for pfn!"); 3330Sstevel@tonic-gate ASSERT(PAGE_SHARED(pp)); 3340Sstevel@tonic-gate ASSERT(pfn == pp->p_pagenum); 3355084Sjohnlev ASSERT(!IN_XPV_PANIC()); 3360Sstevel@tonic-gate 3370Sstevel@tonic-gate /* 3380Sstevel@tonic-gate * Get an exclusive lock, might have to wait for a kmem reader. 3390Sstevel@tonic-gate */ 3400Sstevel@tonic-gate if (!page_tryupgrade(pp)) { 3410Sstevel@tonic-gate page_unlock(pp); 3420Sstevel@tonic-gate /* 3430Sstevel@tonic-gate * RFE: we could change this to not loop forever 3440Sstevel@tonic-gate * George Cameron had some idea on how to do that. 3450Sstevel@tonic-gate * For now looping works - it's just like sfmmu. 3460Sstevel@tonic-gate */ 3470Sstevel@tonic-gate while (!page_lock(pp, SE_EXCL, (kmutex_t *)NULL, P_RECLAIM)) 3480Sstevel@tonic-gate continue; 3490Sstevel@tonic-gate } 3505084Sjohnlev #ifdef __xpv 3515084Sjohnlev if (kpm_vbase && xen_kpm_page(pfn, PT_VALID | PT_WRITABLE) < 0) 3525084Sjohnlev panic("failure making kpm r/w pfn=0x%lx", pfn); 3535084Sjohnlev #endif 3540Sstevel@tonic-gate page_free(pp, 1); 3550Sstevel@tonic-gate page_unresv(1); 3560Sstevel@tonic-gate } 3570Sstevel@tonic-gate 3580Sstevel@tonic-gate /* 3590Sstevel@tonic-gate * Put one htable on the reserve list. 3600Sstevel@tonic-gate */ 3610Sstevel@tonic-gate static void 3620Sstevel@tonic-gate htable_put_reserve(htable_t *ht) 3630Sstevel@tonic-gate { 3640Sstevel@tonic-gate ht->ht_hat = NULL; /* no longer tied to a hat */ 3650Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 3660Sstevel@tonic-gate HATSTAT_INC(hs_htable_rputs); 3670Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 3680Sstevel@tonic-gate ht->ht_next = htable_reserve_pool; 3690Sstevel@tonic-gate htable_reserve_pool = ht; 3700Sstevel@tonic-gate ++htable_reserve_cnt; 3710Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 3720Sstevel@tonic-gate } 3730Sstevel@tonic-gate 3740Sstevel@tonic-gate /* 3750Sstevel@tonic-gate * Take one htable from the reserve. 3760Sstevel@tonic-gate */ 3770Sstevel@tonic-gate static htable_t * 3780Sstevel@tonic-gate htable_get_reserve(void) 3790Sstevel@tonic-gate { 3800Sstevel@tonic-gate htable_t *ht = NULL; 3810Sstevel@tonic-gate 3820Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 3830Sstevel@tonic-gate if (htable_reserve_cnt != 0) { 3840Sstevel@tonic-gate ht = htable_reserve_pool; 3850Sstevel@tonic-gate ASSERT(ht != NULL); 3860Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 3870Sstevel@tonic-gate htable_reserve_pool = ht->ht_next; 3880Sstevel@tonic-gate --htable_reserve_cnt; 3890Sstevel@tonic-gate HATSTAT_INC(hs_htable_rgets); 3900Sstevel@tonic-gate } 3910Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 3920Sstevel@tonic-gate return (ht); 3930Sstevel@tonic-gate } 3940Sstevel@tonic-gate 3950Sstevel@tonic-gate /* 3963446Smrj * Allocate initial htables and put them on the reserve list 3970Sstevel@tonic-gate */ 3980Sstevel@tonic-gate void 3990Sstevel@tonic-gate htable_initial_reserve(uint_t count) 4000Sstevel@tonic-gate { 4010Sstevel@tonic-gate htable_t *ht; 4020Sstevel@tonic-gate 4030Sstevel@tonic-gate count += HTABLE_RESERVE_AMOUNT; 4040Sstevel@tonic-gate while (count > 0) { 4050Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, KM_NOSLEEP); 4060Sstevel@tonic-gate ASSERT(ht != NULL); 4070Sstevel@tonic-gate 4080Sstevel@tonic-gate ASSERT(use_boot_reserve); 4093446Smrj ht->ht_pfn = PFN_INVALID; 4103446Smrj htable_put_reserve(ht); 4110Sstevel@tonic-gate --count; 4120Sstevel@tonic-gate } 4130Sstevel@tonic-gate } 4140Sstevel@tonic-gate 4150Sstevel@tonic-gate /* 4160Sstevel@tonic-gate * Readjust the reserves after a thread finishes using them. 4170Sstevel@tonic-gate */ 4180Sstevel@tonic-gate void 4190Sstevel@tonic-gate htable_adjust_reserve() 4200Sstevel@tonic-gate { 4210Sstevel@tonic-gate htable_t *ht; 4220Sstevel@tonic-gate 4230Sstevel@tonic-gate /* 4240Sstevel@tonic-gate * Free any excess htables in the reserve list 4250Sstevel@tonic-gate */ 4264004Sjosephb while (htable_reserve_cnt > htable_reserve_amount && 4274004Sjosephb !USE_HAT_RESERVES()) { 4280Sstevel@tonic-gate ht = htable_get_reserve(); 4290Sstevel@tonic-gate if (ht == NULL) 4300Sstevel@tonic-gate return; 4310Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 4320Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 4330Sstevel@tonic-gate } 4340Sstevel@tonic-gate } 4350Sstevel@tonic-gate 4360Sstevel@tonic-gate 4370Sstevel@tonic-gate /* 4380Sstevel@tonic-gate * This routine steals htables from user processes for htable_alloc() or 4390Sstevel@tonic-gate * for htable_reap(). 4400Sstevel@tonic-gate */ 4410Sstevel@tonic-gate static htable_t * 4420Sstevel@tonic-gate htable_steal(uint_t cnt) 4430Sstevel@tonic-gate { 4440Sstevel@tonic-gate hat_t *hat = kas.a_hat; /* list starts with khat */ 4450Sstevel@tonic-gate htable_t *list = NULL; 4460Sstevel@tonic-gate htable_t *ht; 4470Sstevel@tonic-gate htable_t *higher; 4480Sstevel@tonic-gate uint_t h; 4491747Sjosephb uint_t h_start; 4501747Sjosephb static uint_t h_seed = 0; 4510Sstevel@tonic-gate uint_t e; 4520Sstevel@tonic-gate uintptr_t va; 4530Sstevel@tonic-gate x86pte_t pte; 4540Sstevel@tonic-gate uint_t stolen = 0; 4550Sstevel@tonic-gate uint_t pass; 4561747Sjosephb uint_t threshold; 4570Sstevel@tonic-gate 4580Sstevel@tonic-gate /* 4590Sstevel@tonic-gate * Limit htable_steal_passes to something reasonable 4600Sstevel@tonic-gate */ 4610Sstevel@tonic-gate if (htable_steal_passes == 0) 4620Sstevel@tonic-gate htable_steal_passes = 1; 4630Sstevel@tonic-gate if (htable_steal_passes > mmu.ptes_per_table) 4640Sstevel@tonic-gate htable_steal_passes = mmu.ptes_per_table; 4650Sstevel@tonic-gate 4660Sstevel@tonic-gate /* 4671747Sjosephb * Loop through all user hats. The 1st pass takes cached htables that 4680Sstevel@tonic-gate * aren't in use. The later passes steal by removing mappings, too. 4690Sstevel@tonic-gate */ 4700Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, 1); 4711747Sjosephb for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) { 4721747Sjosephb threshold = pass * mmu.ptes_per_table / htable_steal_passes; 4731747Sjosephb hat = kas.a_hat; 4740Sstevel@tonic-gate for (;;) { 4750Sstevel@tonic-gate 4760Sstevel@tonic-gate /* 4771747Sjosephb * Clear the victim flag and move to next hat 4780Sstevel@tonic-gate */ 4790Sstevel@tonic-gate mutex_enter(&hat_list_lock); 4801747Sjosephb if (hat != kas.a_hat) { 4811747Sjosephb hat->hat_flags &= ~HAT_VICTIM; 4821747Sjosephb cv_broadcast(&hat_list_cv); 4831747Sjosephb } 4841747Sjosephb hat = hat->hat_next; 4851747Sjosephb 4861747Sjosephb /* 4871747Sjosephb * Skip any hat that is already being stolen from. 4881747Sjosephb * 4891747Sjosephb * We skip SHARED hats, as these are dummy 4901747Sjosephb * hats that host ISM shared page tables. 4911747Sjosephb * 4921747Sjosephb * We also skip if HAT_FREEING because hat_pte_unmap() 4931747Sjosephb * won't zero out the PTE's. That would lead to hitting 4941747Sjosephb * stale PTEs either here or under hat_unload() when we 4951747Sjosephb * steal and unload the same page table in competing 4961747Sjosephb * threads. 4971747Sjosephb */ 4981747Sjosephb while (hat != NULL && 4991747Sjosephb (hat->hat_flags & 5001747Sjosephb (HAT_VICTIM | HAT_SHARED | HAT_FREEING)) != 0) 5011747Sjosephb hat = hat->hat_next; 5021747Sjosephb 5031747Sjosephb if (hat == NULL) { 5040Sstevel@tonic-gate mutex_exit(&hat_list_lock); 5050Sstevel@tonic-gate break; 5060Sstevel@tonic-gate } 5071747Sjosephb 5081747Sjosephb /* 5091747Sjosephb * Are we finished? 5101747Sjosephb */ 5111747Sjosephb if (stolen == cnt) { 5121747Sjosephb /* 5131747Sjosephb * Try to spread the pain of stealing, 5141747Sjosephb * move victim HAT to the end of the HAT list. 5151747Sjosephb */ 5161747Sjosephb if (pass >= 1 && cnt == 1 && 5171747Sjosephb kas.a_hat->hat_prev != hat) { 5181747Sjosephb 5191747Sjosephb /* unlink victim hat */ 5201747Sjosephb if (hat->hat_prev) 5211747Sjosephb hat->hat_prev->hat_next = 5221747Sjosephb hat->hat_next; 5231747Sjosephb else 5241747Sjosephb kas.a_hat->hat_next = 5251747Sjosephb hat->hat_next; 5261747Sjosephb if (hat->hat_next) 5271747Sjosephb hat->hat_next->hat_prev = 5281747Sjosephb hat->hat_prev; 5291747Sjosephb else 5301747Sjosephb kas.a_hat->hat_prev = 5311747Sjosephb hat->hat_prev; 5321747Sjosephb 5331747Sjosephb 5341747Sjosephb /* relink at end of hat list */ 5351747Sjosephb hat->hat_next = NULL; 5361747Sjosephb hat->hat_prev = kas.a_hat->hat_prev; 5371747Sjosephb if (hat->hat_prev) 5381747Sjosephb hat->hat_prev->hat_next = hat; 5391747Sjosephb else 5401747Sjosephb kas.a_hat->hat_next = hat; 5411747Sjosephb kas.a_hat->hat_prev = hat; 5421747Sjosephb 5431747Sjosephb } 5441747Sjosephb 5451747Sjosephb mutex_exit(&hat_list_lock); 5461747Sjosephb break; 5471747Sjosephb } 5481747Sjosephb 5491747Sjosephb /* 5501747Sjosephb * Mark the HAT as a stealing victim. 5511747Sjosephb */ 5520Sstevel@tonic-gate hat->hat_flags |= HAT_VICTIM; 5530Sstevel@tonic-gate mutex_exit(&hat_list_lock); 5540Sstevel@tonic-gate 5550Sstevel@tonic-gate /* 5560Sstevel@tonic-gate * Take any htables from the hat's cached "free" list. 5570Sstevel@tonic-gate */ 5580Sstevel@tonic-gate hat_enter(hat); 5590Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL && 5600Sstevel@tonic-gate stolen < cnt) { 5610Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 5620Sstevel@tonic-gate ht->ht_next = list; 5630Sstevel@tonic-gate list = ht; 5640Sstevel@tonic-gate ++stolen; 5650Sstevel@tonic-gate } 5660Sstevel@tonic-gate hat_exit(hat); 5670Sstevel@tonic-gate 5680Sstevel@tonic-gate /* 5690Sstevel@tonic-gate * Don't steal on first pass. 5700Sstevel@tonic-gate */ 5711747Sjosephb if (pass == 0 || stolen == cnt) 5720Sstevel@tonic-gate continue; 5730Sstevel@tonic-gate 5740Sstevel@tonic-gate /* 5751747Sjosephb * Search the active htables for one to steal. 5761747Sjosephb * Start at a different hash bucket every time to 5771747Sjosephb * help spread the pain of stealing. 5780Sstevel@tonic-gate */ 5791747Sjosephb h = h_start = h_seed++ % hat->hat_num_hash; 5801747Sjosephb do { 5810Sstevel@tonic-gate higher = NULL; 5820Sstevel@tonic-gate HTABLE_ENTER(h); 5830Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; 5840Sstevel@tonic-gate ht = ht->ht_next) { 5850Sstevel@tonic-gate 5860Sstevel@tonic-gate /* 5870Sstevel@tonic-gate * Can we rule out reaping? 5880Sstevel@tonic-gate */ 5890Sstevel@tonic-gate if (ht->ht_busy != 0 || 5900Sstevel@tonic-gate (ht->ht_flags & HTABLE_SHARED_PFN)|| 5911747Sjosephb ht->ht_level > 0 || 5921747Sjosephb ht->ht_valid_cnt > threshold || 5930Sstevel@tonic-gate ht->ht_lock_cnt != 0) 5940Sstevel@tonic-gate continue; 5950Sstevel@tonic-gate 5960Sstevel@tonic-gate /* 5970Sstevel@tonic-gate * Increment busy so the htable can't 5980Sstevel@tonic-gate * disappear. We drop the htable mutex 5990Sstevel@tonic-gate * to avoid deadlocks with 6000Sstevel@tonic-gate * hat_pageunload() and the hment mutex 6010Sstevel@tonic-gate * while we call hat_pte_unmap() 6020Sstevel@tonic-gate */ 6030Sstevel@tonic-gate ++ht->ht_busy; 6040Sstevel@tonic-gate HTABLE_EXIT(h); 6050Sstevel@tonic-gate 6060Sstevel@tonic-gate /* 6070Sstevel@tonic-gate * Try stealing. 6080Sstevel@tonic-gate * - unload and invalidate all PTEs 6090Sstevel@tonic-gate */ 6100Sstevel@tonic-gate for (e = 0, va = ht->ht_vaddr; 6113446Smrj e < HTABLE_NUM_PTES(ht) && 6120Sstevel@tonic-gate ht->ht_valid_cnt > 0 && 6130Sstevel@tonic-gate ht->ht_busy == 1 && 6140Sstevel@tonic-gate ht->ht_lock_cnt == 0; 6150Sstevel@tonic-gate ++e, va += MMU_PAGESIZE) { 6160Sstevel@tonic-gate pte = x86pte_get(ht, e); 6170Sstevel@tonic-gate if (!PTE_ISVALID(pte)) 6180Sstevel@tonic-gate continue; 6190Sstevel@tonic-gate hat_pte_unmap(ht, e, 6200Sstevel@tonic-gate HAT_UNLOAD, pte, NULL); 6210Sstevel@tonic-gate } 6220Sstevel@tonic-gate 6230Sstevel@tonic-gate /* 6240Sstevel@tonic-gate * Reacquire htable lock. If we didn't 6250Sstevel@tonic-gate * remove all mappings in the table, 6260Sstevel@tonic-gate * or another thread added a new mapping 6270Sstevel@tonic-gate * behind us, give up on this table. 6280Sstevel@tonic-gate */ 6290Sstevel@tonic-gate HTABLE_ENTER(h); 6300Sstevel@tonic-gate if (ht->ht_busy != 1 || 6310Sstevel@tonic-gate ht->ht_valid_cnt != 0 || 6320Sstevel@tonic-gate ht->ht_lock_cnt != 0) { 6330Sstevel@tonic-gate --ht->ht_busy; 6340Sstevel@tonic-gate continue; 6350Sstevel@tonic-gate } 6360Sstevel@tonic-gate 6370Sstevel@tonic-gate /* 6380Sstevel@tonic-gate * Steal it and unlink the page table. 6390Sstevel@tonic-gate */ 6400Sstevel@tonic-gate higher = ht->ht_parent; 6410Sstevel@tonic-gate unlink_ptp(higher, ht, ht->ht_vaddr); 6420Sstevel@tonic-gate 6430Sstevel@tonic-gate /* 6440Sstevel@tonic-gate * remove from the hash list 6450Sstevel@tonic-gate */ 6460Sstevel@tonic-gate if (ht->ht_next) 6470Sstevel@tonic-gate ht->ht_next->ht_prev = 6480Sstevel@tonic-gate ht->ht_prev; 6490Sstevel@tonic-gate 6500Sstevel@tonic-gate if (ht->ht_prev) { 6510Sstevel@tonic-gate ht->ht_prev->ht_next = 6520Sstevel@tonic-gate ht->ht_next; 6530Sstevel@tonic-gate } else { 6540Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[h] == 6550Sstevel@tonic-gate ht); 6560Sstevel@tonic-gate hat->hat_ht_hash[h] = 6570Sstevel@tonic-gate ht->ht_next; 6580Sstevel@tonic-gate } 6590Sstevel@tonic-gate 6600Sstevel@tonic-gate /* 6610Sstevel@tonic-gate * Break to outer loop to release the 6623446Smrj * higher (ht_parent) pagetable. This 6630Sstevel@tonic-gate * spreads out the pain caused by 6640Sstevel@tonic-gate * pagefaults. 6650Sstevel@tonic-gate */ 6660Sstevel@tonic-gate ht->ht_next = list; 6670Sstevel@tonic-gate list = ht; 6680Sstevel@tonic-gate ++stolen; 6690Sstevel@tonic-gate break; 6700Sstevel@tonic-gate } 6710Sstevel@tonic-gate HTABLE_EXIT(h); 6720Sstevel@tonic-gate if (higher != NULL) 6730Sstevel@tonic-gate htable_release(higher); 6741747Sjosephb if (++h == hat->hat_num_hash) 6751747Sjosephb h = 0; 6761747Sjosephb } while (stolen < cnt && h != h_start); 6770Sstevel@tonic-gate } 6780Sstevel@tonic-gate } 6790Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, -1); 6800Sstevel@tonic-gate return (list); 6810Sstevel@tonic-gate } 6820Sstevel@tonic-gate 6830Sstevel@tonic-gate 6840Sstevel@tonic-gate /* 6850Sstevel@tonic-gate * This is invoked from kmem when the system is low on memory. We try 6860Sstevel@tonic-gate * to free hments, htables, and ptables to improve the memory situation. 6870Sstevel@tonic-gate */ 6880Sstevel@tonic-gate /*ARGSUSED*/ 6890Sstevel@tonic-gate static void 6900Sstevel@tonic-gate htable_reap(void *handle) 6910Sstevel@tonic-gate { 6920Sstevel@tonic-gate uint_t reap_cnt; 6930Sstevel@tonic-gate htable_t *list; 6940Sstevel@tonic-gate htable_t *ht; 6950Sstevel@tonic-gate 6960Sstevel@tonic-gate HATSTAT_INC(hs_reap_attempts); 6970Sstevel@tonic-gate if (!can_steal_post_boot) 6980Sstevel@tonic-gate return; 6990Sstevel@tonic-gate 7000Sstevel@tonic-gate /* 7010Sstevel@tonic-gate * Try to reap 5% of the page tables bounded by a maximum of 7020Sstevel@tonic-gate * 5% of physmem and a minimum of 10. 7030Sstevel@tonic-gate */ 7040Sstevel@tonic-gate reap_cnt = MIN(MAX(physmem / 20, active_ptables / 20), 10); 7050Sstevel@tonic-gate 7060Sstevel@tonic-gate /* 7070Sstevel@tonic-gate * Let htable_steal() do the work, we just call htable_free() 7080Sstevel@tonic-gate */ 7095084Sjohnlev XPV_DISALLOW_MIGRATE(); 7100Sstevel@tonic-gate list = htable_steal(reap_cnt); 7115084Sjohnlev XPV_ALLOW_MIGRATE(); 7120Sstevel@tonic-gate while ((ht = list) != NULL) { 7130Sstevel@tonic-gate list = ht->ht_next; 7140Sstevel@tonic-gate HATSTAT_INC(hs_reaped); 7150Sstevel@tonic-gate htable_free(ht); 7160Sstevel@tonic-gate } 7170Sstevel@tonic-gate 7180Sstevel@tonic-gate /* 7190Sstevel@tonic-gate * Free up excess reserves 7200Sstevel@tonic-gate */ 7210Sstevel@tonic-gate htable_adjust_reserve(); 7220Sstevel@tonic-gate hment_adjust_reserve(); 7230Sstevel@tonic-gate } 7240Sstevel@tonic-gate 7250Sstevel@tonic-gate /* 7263446Smrj * Allocate an htable, stealing one or using the reserve if necessary 7270Sstevel@tonic-gate */ 7280Sstevel@tonic-gate static htable_t * 7290Sstevel@tonic-gate htable_alloc( 7300Sstevel@tonic-gate hat_t *hat, 7310Sstevel@tonic-gate uintptr_t vaddr, 7320Sstevel@tonic-gate level_t level, 7330Sstevel@tonic-gate htable_t *shared) 7340Sstevel@tonic-gate { 7350Sstevel@tonic-gate htable_t *ht = NULL; 7360Sstevel@tonic-gate uint_t is_vlp; 7370Sstevel@tonic-gate uint_t is_bare = 0; 7380Sstevel@tonic-gate uint_t need_to_zero = 1; 7390Sstevel@tonic-gate int kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP); 7400Sstevel@tonic-gate 7410Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 7420Sstevel@tonic-gate panic("htable_alloc(): level %d out of range\n", level); 7430Sstevel@tonic-gate 7440Sstevel@tonic-gate is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL; 7450Sstevel@tonic-gate if (is_vlp || shared != NULL) 7460Sstevel@tonic-gate is_bare = 1; 7470Sstevel@tonic-gate 7480Sstevel@tonic-gate /* 7490Sstevel@tonic-gate * First reuse a cached htable from the hat_ht_cached field, this 7503446Smrj * avoids unnecessary trips through kmem/page allocators. 7510Sstevel@tonic-gate */ 7520Sstevel@tonic-gate if (hat->hat_ht_cached != NULL && !is_bare) { 7530Sstevel@tonic-gate hat_enter(hat); 7540Sstevel@tonic-gate ht = hat->hat_ht_cached; 7550Sstevel@tonic-gate if (ht != NULL) { 7560Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 7570Sstevel@tonic-gate need_to_zero = 0; 7580Sstevel@tonic-gate /* XX64 ASSERT() they're all zero somehow */ 7590Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 7600Sstevel@tonic-gate } 7610Sstevel@tonic-gate hat_exit(hat); 7620Sstevel@tonic-gate } 7630Sstevel@tonic-gate 7640Sstevel@tonic-gate if (ht == NULL) { 7650Sstevel@tonic-gate /* 7663543Sjosephb * Allocate an htable, possibly refilling the reserves. 7670Sstevel@tonic-gate */ 7683543Sjosephb if (USE_HAT_RESERVES()) { 7690Sstevel@tonic-gate ht = htable_get_reserve(); 7700Sstevel@tonic-gate } else { 7710Sstevel@tonic-gate /* 7720Sstevel@tonic-gate * Donate successful htable allocations to the reserve. 7730Sstevel@tonic-gate */ 7740Sstevel@tonic-gate for (;;) { 7750Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, kmflags); 7760Sstevel@tonic-gate if (ht == NULL) 7770Sstevel@tonic-gate break; 7780Sstevel@tonic-gate ht->ht_pfn = PFN_INVALID; 7793543Sjosephb if (USE_HAT_RESERVES() || 7800Sstevel@tonic-gate htable_reserve_cnt >= htable_reserve_amount) 7810Sstevel@tonic-gate break; 7820Sstevel@tonic-gate htable_put_reserve(ht); 7830Sstevel@tonic-gate } 7840Sstevel@tonic-gate } 7850Sstevel@tonic-gate 7860Sstevel@tonic-gate /* 7870Sstevel@tonic-gate * allocate a page for the hardware page table if needed 7880Sstevel@tonic-gate */ 7890Sstevel@tonic-gate if (ht != NULL && !is_bare) { 7901747Sjosephb ht->ht_hat = hat; 7913446Smrj ht->ht_pfn = ptable_alloc((uintptr_t)ht); 7920Sstevel@tonic-gate if (ht->ht_pfn == PFN_INVALID) { 7933543Sjosephb if (USE_HAT_RESERVES()) 7943543Sjosephb htable_put_reserve(ht); 7953543Sjosephb else 7963543Sjosephb kmem_cache_free(htable_cache, ht); 7970Sstevel@tonic-gate ht = NULL; 7980Sstevel@tonic-gate } 7990Sstevel@tonic-gate } 8000Sstevel@tonic-gate } 8010Sstevel@tonic-gate 8020Sstevel@tonic-gate /* 8031747Sjosephb * If allocations failed, kick off a kmem_reap() and resort to 8041747Sjosephb * htable steal(). We may spin here if the system is very low on 8051747Sjosephb * memory. If the kernel itself has consumed all memory and kmem_reap() 8061747Sjosephb * can't free up anything, then we'll really get stuck here. 8071747Sjosephb * That should only happen in a system where the administrator has 8081747Sjosephb * misconfigured VM parameters via /etc/system. 8090Sstevel@tonic-gate */ 8101747Sjosephb while (ht == NULL && can_steal_post_boot) { 8111747Sjosephb kmem_reap(); 8120Sstevel@tonic-gate ht = htable_steal(1); 8130Sstevel@tonic-gate HATSTAT_INC(hs_steals); 8140Sstevel@tonic-gate 8150Sstevel@tonic-gate /* 8161747Sjosephb * If we stole for a bare htable, release the pagetable page. 8170Sstevel@tonic-gate */ 8183446Smrj if (ht != NULL) { 8193446Smrj if (is_bare) { 8203446Smrj ptable_free(ht->ht_pfn); 8213446Smrj ht->ht_pfn = PFN_INVALID; 8225084Sjohnlev #if defined(__xpv) && defined(__amd64) 8235084Sjohnlev /* 8245084Sjohnlev * make stolen page table writable again in kpm 8255084Sjohnlev */ 8265084Sjohnlev } else if (kpm_vbase && xen_kpm_page(ht->ht_pfn, 8275084Sjohnlev PT_VALID | PT_WRITABLE) < 0) { 8285084Sjohnlev panic("failure making kpm r/w pfn=0x%lx", 8295084Sjohnlev ht->ht_pfn); 8305084Sjohnlev #endif 8313446Smrj } 8323446Smrj } 8330Sstevel@tonic-gate } 8340Sstevel@tonic-gate 8350Sstevel@tonic-gate /* 8361747Sjosephb * All attempts to allocate or steal failed. This should only happen 8371747Sjosephb * if we run out of memory during boot, due perhaps to a huge 8381747Sjosephb * boot_archive. At this point there's no way to continue. 8390Sstevel@tonic-gate */ 8400Sstevel@tonic-gate if (ht == NULL) 8410Sstevel@tonic-gate panic("htable_alloc(): couldn't steal\n"); 8420Sstevel@tonic-gate 8435084Sjohnlev #if defined(__amd64) && defined(__xpv) 8445084Sjohnlev /* 8455084Sjohnlev * Under the 64-bit hypervisor, we have 2 top level page tables. 8465084Sjohnlev * If this allocation fails, we'll resort to stealing. 8475084Sjohnlev * We use the stolen page indirectly, by freeing the 8485084Sjohnlev * stolen htable first. 8495084Sjohnlev */ 8505084Sjohnlev if (level == mmu.max_level) { 8515084Sjohnlev for (;;) { 8525084Sjohnlev htable_t *stolen; 8535084Sjohnlev 8545084Sjohnlev hat->hat_user_ptable = ptable_alloc((uintptr_t)ht + 1); 8555084Sjohnlev if (hat->hat_user_ptable != PFN_INVALID) 8565084Sjohnlev break; 8575084Sjohnlev stolen = htable_steal(1); 8585084Sjohnlev if (stolen == NULL) 8595084Sjohnlev panic("2nd steal ptable failed\n"); 8605084Sjohnlev htable_free(stolen); 8615084Sjohnlev } 8625084Sjohnlev block_zero_no_xmm(kpm_vbase + pfn_to_pa(hat->hat_user_ptable), 8635084Sjohnlev MMU_PAGESIZE); 8645084Sjohnlev } 8655084Sjohnlev #endif 8665084Sjohnlev 8670Sstevel@tonic-gate /* 8680Sstevel@tonic-gate * Shared page tables have all entries locked and entries may not 8690Sstevel@tonic-gate * be added or deleted. 8700Sstevel@tonic-gate */ 8710Sstevel@tonic-gate ht->ht_flags = 0; 8720Sstevel@tonic-gate if (shared != NULL) { 8730Sstevel@tonic-gate ASSERT(shared->ht_valid_cnt > 0); 8740Sstevel@tonic-gate ht->ht_flags |= HTABLE_SHARED_PFN; 8750Sstevel@tonic-gate ht->ht_pfn = shared->ht_pfn; 8760Sstevel@tonic-gate ht->ht_lock_cnt = 0; 8770Sstevel@tonic-gate ht->ht_valid_cnt = 0; /* updated in hat_share() */ 8780Sstevel@tonic-gate ht->ht_shares = shared; 8790Sstevel@tonic-gate need_to_zero = 0; 8800Sstevel@tonic-gate } else { 8810Sstevel@tonic-gate ht->ht_shares = NULL; 8820Sstevel@tonic-gate ht->ht_lock_cnt = 0; 8830Sstevel@tonic-gate ht->ht_valid_cnt = 0; 8840Sstevel@tonic-gate } 8850Sstevel@tonic-gate 8860Sstevel@tonic-gate /* 8870Sstevel@tonic-gate * setup flags, etc. for VLP htables 8880Sstevel@tonic-gate */ 8890Sstevel@tonic-gate if (is_vlp) { 8900Sstevel@tonic-gate ht->ht_flags |= HTABLE_VLP; 8910Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 8920Sstevel@tonic-gate need_to_zero = 0; 8930Sstevel@tonic-gate } 8940Sstevel@tonic-gate 8950Sstevel@tonic-gate /* 8960Sstevel@tonic-gate * fill in the htable 8970Sstevel@tonic-gate */ 8980Sstevel@tonic-gate ht->ht_hat = hat; 8990Sstevel@tonic-gate ht->ht_parent = NULL; 9000Sstevel@tonic-gate ht->ht_vaddr = vaddr; 9010Sstevel@tonic-gate ht->ht_level = level; 9020Sstevel@tonic-gate ht->ht_busy = 1; 9030Sstevel@tonic-gate ht->ht_next = NULL; 9040Sstevel@tonic-gate ht->ht_prev = NULL; 9050Sstevel@tonic-gate 9060Sstevel@tonic-gate /* 9070Sstevel@tonic-gate * Zero out any freshly allocated page table 9080Sstevel@tonic-gate */ 9090Sstevel@tonic-gate if (need_to_zero) 9100Sstevel@tonic-gate x86pte_zero(ht, 0, mmu.ptes_per_table); 9113446Smrj 9125084Sjohnlev #if defined(__amd64) && defined(__xpv) 9135084Sjohnlev if (!is_bare && kpm_vbase) { 9145084Sjohnlev (void) xen_kpm_page(ht->ht_pfn, PT_VALID); 9155084Sjohnlev if (level == mmu.max_level) 9165084Sjohnlev (void) xen_kpm_page(hat->hat_user_ptable, PT_VALID); 9175084Sjohnlev } 9185084Sjohnlev #endif 9195084Sjohnlev 9200Sstevel@tonic-gate return (ht); 9210Sstevel@tonic-gate } 9220Sstevel@tonic-gate 9230Sstevel@tonic-gate /* 9240Sstevel@tonic-gate * Free up an htable, either to a hat's cached list, the reserves or 9250Sstevel@tonic-gate * back to kmem. 9260Sstevel@tonic-gate */ 9270Sstevel@tonic-gate static void 9280Sstevel@tonic-gate htable_free(htable_t *ht) 9290Sstevel@tonic-gate { 9300Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 9310Sstevel@tonic-gate 9320Sstevel@tonic-gate /* 9330Sstevel@tonic-gate * If the process isn't exiting, cache the free htable in the hat 9345084Sjohnlev * structure. We always do this for the boot time reserve. We don't 9350Sstevel@tonic-gate * do this if the hat is exiting or we are stealing/reaping htables. 9360Sstevel@tonic-gate */ 9370Sstevel@tonic-gate if (hat != NULL && 9380Sstevel@tonic-gate !(ht->ht_flags & HTABLE_SHARED_PFN) && 9390Sstevel@tonic-gate (use_boot_reserve || 9400Sstevel@tonic-gate (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) { 9410Sstevel@tonic-gate ASSERT((ht->ht_flags & HTABLE_VLP) == 0); 9420Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 9430Sstevel@tonic-gate hat_enter(hat); 9440Sstevel@tonic-gate ht->ht_next = hat->hat_ht_cached; 9450Sstevel@tonic-gate hat->hat_ht_cached = ht; 9460Sstevel@tonic-gate hat_exit(hat); 9470Sstevel@tonic-gate return; 9480Sstevel@tonic-gate } 9490Sstevel@tonic-gate 9500Sstevel@tonic-gate /* 9510Sstevel@tonic-gate * If we have a hardware page table, free it. 9523446Smrj * We don't free page tables that are accessed by sharing. 9530Sstevel@tonic-gate */ 9540Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 9550Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 9560Sstevel@tonic-gate } else if (!(ht->ht_flags & HTABLE_VLP)) { 9573446Smrj ptable_free(ht->ht_pfn); 9585084Sjohnlev #if defined(__amd64) && defined(__xpv) 9595084Sjohnlev if (ht->ht_level == mmu.max_level) { 9605084Sjohnlev ptable_free(hat->hat_user_ptable); 9615084Sjohnlev hat->hat_user_ptable = PFN_INVALID; 9625084Sjohnlev } 9635084Sjohnlev #endif 9640Sstevel@tonic-gate } 9653446Smrj ht->ht_pfn = PFN_INVALID; 9660Sstevel@tonic-gate 9670Sstevel@tonic-gate /* 9685084Sjohnlev * Free it or put into reserves. 9690Sstevel@tonic-gate */ 9704004Sjosephb if (USE_HAT_RESERVES() || htable_reserve_cnt < htable_reserve_amount) { 9710Sstevel@tonic-gate htable_put_reserve(ht); 9724004Sjosephb } else { 9730Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 9744004Sjosephb htable_adjust_reserve(); 9754004Sjosephb } 9760Sstevel@tonic-gate } 9770Sstevel@tonic-gate 9780Sstevel@tonic-gate 9790Sstevel@tonic-gate /* 9800Sstevel@tonic-gate * This is called when a hat is being destroyed or swapped out. We reap all 9810Sstevel@tonic-gate * the remaining htables in the hat cache. If destroying all left over 9820Sstevel@tonic-gate * htables are also destroyed. 9830Sstevel@tonic-gate * 9840Sstevel@tonic-gate * We also don't need to invalidate any of the PTPs nor do any demapping. 9850Sstevel@tonic-gate */ 9860Sstevel@tonic-gate void 9870Sstevel@tonic-gate htable_purge_hat(hat_t *hat) 9880Sstevel@tonic-gate { 9890Sstevel@tonic-gate htable_t *ht; 9900Sstevel@tonic-gate int h; 9910Sstevel@tonic-gate 9920Sstevel@tonic-gate /* 9930Sstevel@tonic-gate * Purge the htable cache if just reaping. 9940Sstevel@tonic-gate */ 9950Sstevel@tonic-gate if (!(hat->hat_flags & HAT_FREEING)) { 9960Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, 1); 9970Sstevel@tonic-gate for (;;) { 9980Sstevel@tonic-gate hat_enter(hat); 9990Sstevel@tonic-gate ht = hat->hat_ht_cached; 10000Sstevel@tonic-gate if (ht == NULL) { 10010Sstevel@tonic-gate hat_exit(hat); 10020Sstevel@tonic-gate break; 10030Sstevel@tonic-gate } 10040Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 10050Sstevel@tonic-gate hat_exit(hat); 10060Sstevel@tonic-gate htable_free(ht); 10070Sstevel@tonic-gate } 10080Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, -1); 10090Sstevel@tonic-gate return; 10100Sstevel@tonic-gate } 10110Sstevel@tonic-gate 10120Sstevel@tonic-gate /* 10130Sstevel@tonic-gate * if freeing, no locking is needed 10140Sstevel@tonic-gate */ 10150Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL) { 10160Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 10170Sstevel@tonic-gate htable_free(ht); 10180Sstevel@tonic-gate } 10190Sstevel@tonic-gate 10200Sstevel@tonic-gate /* 10210Sstevel@tonic-gate * walk thru the htable hash table and free all the htables in it. 10220Sstevel@tonic-gate */ 10230Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 10240Sstevel@tonic-gate while ((ht = hat->hat_ht_hash[h]) != NULL) { 10250Sstevel@tonic-gate if (ht->ht_next) 10260Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 10270Sstevel@tonic-gate 10280Sstevel@tonic-gate if (ht->ht_prev) { 10290Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 10300Sstevel@tonic-gate } else { 10310Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[h] == ht); 10320Sstevel@tonic-gate hat->hat_ht_hash[h] = ht->ht_next; 10330Sstevel@tonic-gate } 10340Sstevel@tonic-gate htable_free(ht); 10350Sstevel@tonic-gate } 10360Sstevel@tonic-gate } 10370Sstevel@tonic-gate } 10380Sstevel@tonic-gate 10390Sstevel@tonic-gate /* 10400Sstevel@tonic-gate * Unlink an entry for a table at vaddr and level out of the existing table 10410Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 10420Sstevel@tonic-gate */ 10430Sstevel@tonic-gate static void 10440Sstevel@tonic-gate unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr) 10450Sstevel@tonic-gate { 10460Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 10470Sstevel@tonic-gate x86pte_t expect = MAKEPTP(old->ht_pfn, old->ht_level); 10480Sstevel@tonic-gate x86pte_t found; 10494169Sjosephb hat_t *hat = old->ht_hat; 10500Sstevel@tonic-gate 10510Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 10520Sstevel@tonic-gate ASSERT(higher->ht_valid_cnt > 0); 10530Sstevel@tonic-gate ASSERT(old->ht_valid_cnt == 0); 10540Sstevel@tonic-gate found = x86pte_cas(higher, entry, expect, 0); 10555084Sjohnlev #ifdef __xpv 10565084Sjohnlev /* 10575084Sjohnlev * This is weird, but Xen apparently automatically unlinks empty 10585084Sjohnlev * pagetables from the upper page table. So allow PTP to be 0 already. 10595084Sjohnlev */ 10605084Sjohnlev if (found != expect && found != 0) 10615084Sjohnlev #else 10620Sstevel@tonic-gate if (found != expect) 10635084Sjohnlev #endif 10640Sstevel@tonic-gate panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE, 10650Sstevel@tonic-gate found, expect); 10664169Sjosephb 10674169Sjosephb /* 10684654Sjosephb * When a top level VLP page table entry changes, we must issue 10694654Sjosephb * a reload of cr3 on all processors. 10704654Sjosephb * 10714654Sjosephb * If we don't need do do that, then we still have to INVLPG against 10724654Sjosephb * an address covered by the inner page table, as the latest processors 10734654Sjosephb * have TLB-like caches for non-leaf page table entries. 10744169Sjosephb */ 10754169Sjosephb if (!(hat->hat_flags & HAT_FREEING)) { 10764654Sjosephb hat_tlb_inval(hat, (higher->ht_flags & HTABLE_VLP) ? 10774654Sjosephb DEMAP_ALL_ADDR : old->ht_vaddr); 10784169Sjosephb } 10794169Sjosephb 10800Sstevel@tonic-gate HTABLE_DEC(higher->ht_valid_cnt); 10810Sstevel@tonic-gate } 10820Sstevel@tonic-gate 10830Sstevel@tonic-gate /* 10840Sstevel@tonic-gate * Link an entry for a new table at vaddr and level into the existing table 10850Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 10860Sstevel@tonic-gate */ 10870Sstevel@tonic-gate static void 10880Sstevel@tonic-gate link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr) 10890Sstevel@tonic-gate { 10900Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 10910Sstevel@tonic-gate x86pte_t newptp = MAKEPTP(new->ht_pfn, new->ht_level); 10920Sstevel@tonic-gate x86pte_t found; 10930Sstevel@tonic-gate 10940Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 10950Sstevel@tonic-gate 10960Sstevel@tonic-gate ASSERT(new->ht_level != mmu.max_level); 10970Sstevel@tonic-gate 10980Sstevel@tonic-gate HTABLE_INC(higher->ht_valid_cnt); 10990Sstevel@tonic-gate 11000Sstevel@tonic-gate found = x86pte_cas(higher, entry, 0, newptp); 11011251Skchow if ((found & ~PT_REF) != 0) 11020Sstevel@tonic-gate panic("HAT: ptp not 0, found=" FMT_PTE, found); 11034169Sjosephb 11044169Sjosephb /* 11054169Sjosephb * When any top level VLP page table entry changes, we must issue 11064169Sjosephb * a reload of cr3 on all processors using it. 11074269Sjosephb * We also need to do this for the kernel hat on PAE 32 bit kernel. 11084169Sjosephb */ 11094269Sjosephb if ( 11104269Sjosephb #ifdef __i386 11114269Sjosephb (higher->ht_hat == kas.a_hat && higher->ht_level == VLP_LEVEL) || 11124269Sjosephb #endif 11134269Sjosephb (higher->ht_flags & HTABLE_VLP)) 11144169Sjosephb hat_tlb_inval(higher->ht_hat, DEMAP_ALL_ADDR); 11150Sstevel@tonic-gate } 11160Sstevel@tonic-gate 11170Sstevel@tonic-gate /* 11183446Smrj * Release of hold on an htable. If this is the last use and the pagetable 11193446Smrj * is empty we may want to free it, then recursively look at the pagetable 11203446Smrj * above it. The recursion is handled by the outer while() loop. 11215084Sjohnlev * 11225084Sjohnlev * On the metal, during process exit, we don't bother unlinking the tables from 11235084Sjohnlev * upper level pagetables. They are instead handled in bulk by hat_free_end(). 11245084Sjohnlev * We can't do this on the hypervisor as we need the page table to be 11255084Sjohnlev * implicitly unpinnned before it goes to the free page lists. This can't 11265084Sjohnlev * happen unless we fully unlink it from the page table hierarchy. 11270Sstevel@tonic-gate */ 11280Sstevel@tonic-gate void 11290Sstevel@tonic-gate htable_release(htable_t *ht) 11300Sstevel@tonic-gate { 11310Sstevel@tonic-gate uint_t hashval; 11320Sstevel@tonic-gate htable_t *shared; 11330Sstevel@tonic-gate htable_t *higher; 11340Sstevel@tonic-gate hat_t *hat; 11350Sstevel@tonic-gate uintptr_t va; 11360Sstevel@tonic-gate level_t level; 11370Sstevel@tonic-gate 11380Sstevel@tonic-gate while (ht != NULL) { 11390Sstevel@tonic-gate shared = NULL; 11400Sstevel@tonic-gate for (;;) { 11410Sstevel@tonic-gate hat = ht->ht_hat; 11420Sstevel@tonic-gate va = ht->ht_vaddr; 11430Sstevel@tonic-gate level = ht->ht_level; 11440Sstevel@tonic-gate hashval = HTABLE_HASH(hat, va, level); 11450Sstevel@tonic-gate 11460Sstevel@tonic-gate /* 11470Sstevel@tonic-gate * The common case is that this isn't the last use of 11480Sstevel@tonic-gate * an htable so we don't want to free the htable. 11490Sstevel@tonic-gate */ 11500Sstevel@tonic-gate HTABLE_ENTER(hashval); 11510Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt >= 0); 11520Sstevel@tonic-gate ASSERT(ht->ht_busy > 0); 11530Sstevel@tonic-gate if (ht->ht_valid_cnt > 0) 11540Sstevel@tonic-gate break; 11550Sstevel@tonic-gate if (ht->ht_busy > 1) 11560Sstevel@tonic-gate break; 11575224Smec ASSERT(ht->ht_lock_cnt == 0); 11580Sstevel@tonic-gate 11595084Sjohnlev #if !defined(__xpv) 11600Sstevel@tonic-gate /* 11610Sstevel@tonic-gate * we always release empty shared htables 11620Sstevel@tonic-gate */ 11630Sstevel@tonic-gate if (!(ht->ht_flags & HTABLE_SHARED_PFN)) { 11640Sstevel@tonic-gate 11650Sstevel@tonic-gate /* 11660Sstevel@tonic-gate * don't release if in address space tear down 11670Sstevel@tonic-gate */ 11680Sstevel@tonic-gate if (hat->hat_flags & HAT_FREEING) 11690Sstevel@tonic-gate break; 11700Sstevel@tonic-gate 11710Sstevel@tonic-gate /* 11720Sstevel@tonic-gate * At and above max_page_level, free if it's for 11730Sstevel@tonic-gate * a boot-time kernel mapping below kernelbase. 11740Sstevel@tonic-gate */ 11750Sstevel@tonic-gate if (level >= mmu.max_page_level && 11760Sstevel@tonic-gate (hat != kas.a_hat || va >= kernelbase)) 11770Sstevel@tonic-gate break; 11780Sstevel@tonic-gate } 11795084Sjohnlev #endif /* __xpv */ 11800Sstevel@tonic-gate 11810Sstevel@tonic-gate /* 11823446Smrj * Remember if we destroy an htable that shares its PFN 11833446Smrj * from elsewhere. 11840Sstevel@tonic-gate */ 11850Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 11860Sstevel@tonic-gate ASSERT(shared == NULL); 11870Sstevel@tonic-gate shared = ht->ht_shares; 11880Sstevel@tonic-gate HATSTAT_INC(hs_htable_unshared); 11890Sstevel@tonic-gate } 11900Sstevel@tonic-gate 11910Sstevel@tonic-gate /* 11920Sstevel@tonic-gate * Handle release of a table and freeing the htable_t. 11930Sstevel@tonic-gate * Unlink it from the table higher (ie. ht_parent). 11940Sstevel@tonic-gate */ 11950Sstevel@tonic-gate ASSERT(ht->ht_lock_cnt == 0); 11960Sstevel@tonic-gate higher = ht->ht_parent; 11970Sstevel@tonic-gate ASSERT(higher != NULL); 11980Sstevel@tonic-gate 11990Sstevel@tonic-gate /* 12000Sstevel@tonic-gate * Unlink the pagetable. 12010Sstevel@tonic-gate */ 12020Sstevel@tonic-gate unlink_ptp(higher, ht, va); 12030Sstevel@tonic-gate 12040Sstevel@tonic-gate /* 12050Sstevel@tonic-gate * remove this htable from its hash list 12060Sstevel@tonic-gate */ 12070Sstevel@tonic-gate if (ht->ht_next) 12080Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 12090Sstevel@tonic-gate 12100Sstevel@tonic-gate if (ht->ht_prev) { 12110Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 12120Sstevel@tonic-gate } else { 12130Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[hashval] == ht); 12140Sstevel@tonic-gate hat->hat_ht_hash[hashval] = ht->ht_next; 12150Sstevel@tonic-gate } 12160Sstevel@tonic-gate HTABLE_EXIT(hashval); 12170Sstevel@tonic-gate htable_free(ht); 12180Sstevel@tonic-gate ht = higher; 12190Sstevel@tonic-gate } 12200Sstevel@tonic-gate 12210Sstevel@tonic-gate ASSERT(ht->ht_busy >= 1); 12220Sstevel@tonic-gate --ht->ht_busy; 12230Sstevel@tonic-gate HTABLE_EXIT(hashval); 12240Sstevel@tonic-gate 12250Sstevel@tonic-gate /* 12260Sstevel@tonic-gate * If we released a shared htable, do a release on the htable 12270Sstevel@tonic-gate * from which it shared 12280Sstevel@tonic-gate */ 12290Sstevel@tonic-gate ht = shared; 12300Sstevel@tonic-gate } 12310Sstevel@tonic-gate } 12320Sstevel@tonic-gate 12330Sstevel@tonic-gate /* 12340Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 12350Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 12360Sstevel@tonic-gate */ 12370Sstevel@tonic-gate htable_t * 12380Sstevel@tonic-gate htable_lookup(hat_t *hat, uintptr_t vaddr, level_t level) 12390Sstevel@tonic-gate { 12400Sstevel@tonic-gate uintptr_t base; 12410Sstevel@tonic-gate uint_t hashval; 12420Sstevel@tonic-gate htable_t *ht = NULL; 12430Sstevel@tonic-gate 12440Sstevel@tonic-gate ASSERT(level >= 0); 12450Sstevel@tonic-gate ASSERT(level <= TOP_LEVEL(hat)); 12460Sstevel@tonic-gate 12474654Sjosephb if (level == TOP_LEVEL(hat)) { 12484654Sjosephb #if defined(__amd64) 12494654Sjosephb /* 12504654Sjosephb * 32 bit address spaces on 64 bit kernels need to check 12514654Sjosephb * for overflow of the 32 bit address space 12524654Sjosephb */ 12534654Sjosephb if ((hat->hat_flags & HAT_VLP) && vaddr >= ((uint64_t)1 << 32)) 12544654Sjosephb return (NULL); 12554654Sjosephb #endif 12560Sstevel@tonic-gate base = 0; 12574654Sjosephb } else { 12580Sstevel@tonic-gate base = vaddr & LEVEL_MASK(level + 1); 12594654Sjosephb } 12600Sstevel@tonic-gate 12610Sstevel@tonic-gate hashval = HTABLE_HASH(hat, base, level); 12620Sstevel@tonic-gate HTABLE_ENTER(hashval); 12630Sstevel@tonic-gate for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) { 12640Sstevel@tonic-gate if (ht->ht_hat == hat && 12650Sstevel@tonic-gate ht->ht_vaddr == base && 12660Sstevel@tonic-gate ht->ht_level == level) 12670Sstevel@tonic-gate break; 12680Sstevel@tonic-gate } 12690Sstevel@tonic-gate if (ht) 12700Sstevel@tonic-gate ++ht->ht_busy; 12710Sstevel@tonic-gate 12720Sstevel@tonic-gate HTABLE_EXIT(hashval); 12730Sstevel@tonic-gate return (ht); 12740Sstevel@tonic-gate } 12750Sstevel@tonic-gate 12760Sstevel@tonic-gate /* 12770Sstevel@tonic-gate * Acquires a hold on a known htable (from a locked hment entry). 12780Sstevel@tonic-gate */ 12790Sstevel@tonic-gate void 12800Sstevel@tonic-gate htable_acquire(htable_t *ht) 12810Sstevel@tonic-gate { 12820Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 12830Sstevel@tonic-gate level_t level = ht->ht_level; 12840Sstevel@tonic-gate uintptr_t base = ht->ht_vaddr; 12850Sstevel@tonic-gate uint_t hashval = HTABLE_HASH(hat, base, level); 12860Sstevel@tonic-gate 12870Sstevel@tonic-gate HTABLE_ENTER(hashval); 12880Sstevel@tonic-gate #ifdef DEBUG 12890Sstevel@tonic-gate /* 12900Sstevel@tonic-gate * make sure the htable is there 12910Sstevel@tonic-gate */ 12920Sstevel@tonic-gate { 12930Sstevel@tonic-gate htable_t *h; 12940Sstevel@tonic-gate 12950Sstevel@tonic-gate for (h = hat->hat_ht_hash[hashval]; 12960Sstevel@tonic-gate h && h != ht; 12970Sstevel@tonic-gate h = h->ht_next) 12980Sstevel@tonic-gate ; 12990Sstevel@tonic-gate ASSERT(h == ht); 13000Sstevel@tonic-gate } 13010Sstevel@tonic-gate #endif /* DEBUG */ 13020Sstevel@tonic-gate ++ht->ht_busy; 13030Sstevel@tonic-gate HTABLE_EXIT(hashval); 13040Sstevel@tonic-gate } 13050Sstevel@tonic-gate 13060Sstevel@tonic-gate /* 13070Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 13080Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 13090Sstevel@tonic-gate * If not found the table is created. 13100Sstevel@tonic-gate * 13110Sstevel@tonic-gate * Since we can't hold a hash table mutex during allocation, we have to 13120Sstevel@tonic-gate * drop it and redo the search on a create. Then we may have to free the newly 13130Sstevel@tonic-gate * allocated htable if another thread raced in and created it ahead of us. 13140Sstevel@tonic-gate */ 13150Sstevel@tonic-gate htable_t * 13160Sstevel@tonic-gate htable_create( 13170Sstevel@tonic-gate hat_t *hat, 13180Sstevel@tonic-gate uintptr_t vaddr, 13190Sstevel@tonic-gate level_t level, 13200Sstevel@tonic-gate htable_t *shared) 13210Sstevel@tonic-gate { 13220Sstevel@tonic-gate uint_t h; 13230Sstevel@tonic-gate level_t l; 13240Sstevel@tonic-gate uintptr_t base; 13250Sstevel@tonic-gate htable_t *ht; 13260Sstevel@tonic-gate htable_t *higher = NULL; 13270Sstevel@tonic-gate htable_t *new = NULL; 13280Sstevel@tonic-gate 13290Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 13300Sstevel@tonic-gate panic("htable_create(): level %d out of range\n", level); 13310Sstevel@tonic-gate 13320Sstevel@tonic-gate /* 13330Sstevel@tonic-gate * Create the page tables in top down order. 13340Sstevel@tonic-gate */ 13350Sstevel@tonic-gate for (l = TOP_LEVEL(hat); l >= level; --l) { 13360Sstevel@tonic-gate new = NULL; 13370Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) 13380Sstevel@tonic-gate base = 0; 13390Sstevel@tonic-gate else 13400Sstevel@tonic-gate base = vaddr & LEVEL_MASK(l + 1); 13410Sstevel@tonic-gate 13420Sstevel@tonic-gate h = HTABLE_HASH(hat, base, l); 13430Sstevel@tonic-gate try_again: 13440Sstevel@tonic-gate /* 13450Sstevel@tonic-gate * look up the htable at this level 13460Sstevel@tonic-gate */ 13470Sstevel@tonic-gate HTABLE_ENTER(h); 13480Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) { 13490Sstevel@tonic-gate ht = hat->hat_htable; 13500Sstevel@tonic-gate } else { 13510Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 13520Sstevel@tonic-gate ASSERT(ht->ht_hat == hat); 13530Sstevel@tonic-gate if (ht->ht_vaddr == base && 13540Sstevel@tonic-gate ht->ht_level == l) 13550Sstevel@tonic-gate break; 13560Sstevel@tonic-gate } 13570Sstevel@tonic-gate } 13580Sstevel@tonic-gate 13590Sstevel@tonic-gate /* 13600Sstevel@tonic-gate * if we found the htable, increment its busy cnt 13610Sstevel@tonic-gate * and if we had allocated a new htable, free it. 13620Sstevel@tonic-gate */ 13630Sstevel@tonic-gate if (ht != NULL) { 13640Sstevel@tonic-gate /* 13650Sstevel@tonic-gate * If we find a pre-existing shared table, it must 13660Sstevel@tonic-gate * share from the same place. 13670Sstevel@tonic-gate */ 13680Sstevel@tonic-gate if (l == level && shared && ht->ht_shares && 13690Sstevel@tonic-gate ht->ht_shares != shared) { 13700Sstevel@tonic-gate panic("htable shared from wrong place " 1371*7240Srh87107 "found htable=%p shared=%p", 1372*7240Srh87107 (void *)ht, (void *)shared); 13730Sstevel@tonic-gate } 13740Sstevel@tonic-gate ++ht->ht_busy; 13750Sstevel@tonic-gate HTABLE_EXIT(h); 13760Sstevel@tonic-gate if (new) 13770Sstevel@tonic-gate htable_free(new); 13780Sstevel@tonic-gate if (higher != NULL) 13790Sstevel@tonic-gate htable_release(higher); 13800Sstevel@tonic-gate higher = ht; 13810Sstevel@tonic-gate 13820Sstevel@tonic-gate /* 13830Sstevel@tonic-gate * if we didn't find it on the first search 13840Sstevel@tonic-gate * allocate a new one and search again 13850Sstevel@tonic-gate */ 13860Sstevel@tonic-gate } else if (new == NULL) { 13870Sstevel@tonic-gate HTABLE_EXIT(h); 13880Sstevel@tonic-gate new = htable_alloc(hat, base, l, 13890Sstevel@tonic-gate l == level ? shared : NULL); 13900Sstevel@tonic-gate goto try_again; 13910Sstevel@tonic-gate 13920Sstevel@tonic-gate /* 13930Sstevel@tonic-gate * 2nd search and still not there, use "new" table 13940Sstevel@tonic-gate * Link new table into higher, when not at top level. 13950Sstevel@tonic-gate */ 13960Sstevel@tonic-gate } else { 13970Sstevel@tonic-gate ht = new; 13980Sstevel@tonic-gate if (higher != NULL) { 13990Sstevel@tonic-gate link_ptp(higher, ht, base); 14000Sstevel@tonic-gate ht->ht_parent = higher; 14010Sstevel@tonic-gate } 14020Sstevel@tonic-gate ht->ht_next = hat->hat_ht_hash[h]; 14030Sstevel@tonic-gate ASSERT(ht->ht_prev == NULL); 14040Sstevel@tonic-gate if (hat->hat_ht_hash[h]) 14050Sstevel@tonic-gate hat->hat_ht_hash[h]->ht_prev = ht; 14060Sstevel@tonic-gate hat->hat_ht_hash[h] = ht; 14070Sstevel@tonic-gate HTABLE_EXIT(h); 14080Sstevel@tonic-gate 14090Sstevel@tonic-gate /* 14100Sstevel@tonic-gate * Note we don't do htable_release(higher). 14110Sstevel@tonic-gate * That happens recursively when "new" is removed by 14120Sstevel@tonic-gate * htable_release() or htable_steal(). 14130Sstevel@tonic-gate */ 14140Sstevel@tonic-gate higher = ht; 14150Sstevel@tonic-gate 14160Sstevel@tonic-gate /* 14170Sstevel@tonic-gate * If we just created a new shared page table we 14180Sstevel@tonic-gate * increment the shared htable's busy count, so that 14190Sstevel@tonic-gate * it can't be the victim of a steal even if it's empty. 14200Sstevel@tonic-gate */ 14210Sstevel@tonic-gate if (l == level && shared) { 14220Sstevel@tonic-gate (void) htable_lookup(shared->ht_hat, 14230Sstevel@tonic-gate shared->ht_vaddr, shared->ht_level); 14240Sstevel@tonic-gate HATSTAT_INC(hs_htable_shared); 14250Sstevel@tonic-gate } 14260Sstevel@tonic-gate } 14270Sstevel@tonic-gate } 14280Sstevel@tonic-gate 14290Sstevel@tonic-gate return (ht); 14300Sstevel@tonic-gate } 14310Sstevel@tonic-gate 14320Sstevel@tonic-gate /* 14335084Sjohnlev * Inherit initial pagetables from the boot program. On the 64-bit 14345084Sjohnlev * hypervisor we also temporarily mark the p_index field of page table 14355084Sjohnlev * pages, so we know not to try making them writable in seg_kpm. 14363446Smrj */ 14373446Smrj void 14383446Smrj htable_attach( 14393446Smrj hat_t *hat, 14403446Smrj uintptr_t base, 14413446Smrj level_t level, 14423446Smrj htable_t *parent, 14433446Smrj pfn_t pfn) 14443446Smrj { 14453446Smrj htable_t *ht; 14463446Smrj uint_t h; 14473446Smrj uint_t i; 14483446Smrj x86pte_t pte; 14493446Smrj x86pte_t *ptep; 14503446Smrj page_t *pp; 14513446Smrj extern page_t *boot_claim_page(pfn_t); 14523446Smrj 14533446Smrj ht = htable_get_reserve(); 14543446Smrj if (level == mmu.max_level) 14553446Smrj kas.a_hat->hat_htable = ht; 14563446Smrj ht->ht_hat = hat; 14573446Smrj ht->ht_parent = parent; 14583446Smrj ht->ht_vaddr = base; 14593446Smrj ht->ht_level = level; 14603446Smrj ht->ht_busy = 1; 14613446Smrj ht->ht_next = NULL; 14623446Smrj ht->ht_prev = NULL; 14633446Smrj ht->ht_flags = 0; 14643446Smrj ht->ht_pfn = pfn; 14653446Smrj ht->ht_lock_cnt = 0; 14663446Smrj ht->ht_valid_cnt = 0; 14673446Smrj if (parent != NULL) 14683446Smrj ++parent->ht_busy; 14693446Smrj 14703446Smrj h = HTABLE_HASH(hat, base, level); 14713446Smrj HTABLE_ENTER(h); 14723446Smrj ht->ht_next = hat->hat_ht_hash[h]; 14733446Smrj ASSERT(ht->ht_prev == NULL); 14743446Smrj if (hat->hat_ht_hash[h]) 14753446Smrj hat->hat_ht_hash[h]->ht_prev = ht; 14763446Smrj hat->hat_ht_hash[h] = ht; 14773446Smrj HTABLE_EXIT(h); 14783446Smrj 14793446Smrj /* 14803446Smrj * make sure the page table physical page is not FREE 14813446Smrj */ 14823446Smrj if (page_resv(1, KM_NOSLEEP) == 0) 14833446Smrj panic("page_resv() failed in ptable alloc"); 14843446Smrj 14853446Smrj pp = boot_claim_page(pfn); 14863446Smrj ASSERT(pp != NULL); 14873446Smrj page_downgrade(pp); 14885084Sjohnlev #if defined(__xpv) && defined(__amd64) 14893446Smrj /* 14903446Smrj * Record in the page_t that is a pagetable for segkpm setup. 14913446Smrj */ 14923446Smrj if (kpm_vbase) 14933446Smrj pp->p_index = 1; 14945084Sjohnlev #endif 14953446Smrj 14963446Smrj /* 14973446Smrj * Count valid mappings and recursively attach lower level pagetables. 14983446Smrj */ 14993446Smrj ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 15003446Smrj for (i = 0; i < HTABLE_NUM_PTES(ht); ++i) { 15013446Smrj if (mmu.pae_hat) 15023446Smrj pte = ptep[i]; 15033446Smrj else 15043446Smrj pte = ((x86pte32_t *)ptep)[i]; 15053446Smrj if (!IN_HYPERVISOR_VA(base) && PTE_ISVALID(pte)) { 15063446Smrj ++ht->ht_valid_cnt; 15073446Smrj if (!PTE_ISPAGE(pte, level)) { 15083446Smrj htable_attach(hat, base, level - 1, 15093446Smrj ht, PTE2PFN(pte, level)); 15103446Smrj ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 15113446Smrj } 15123446Smrj } 15133446Smrj base += LEVEL_SIZE(level); 15143446Smrj if (base == mmu.hole_start) 15153446Smrj base = (mmu.hole_end + MMU_PAGEOFFSET) & MMU_PAGEMASK; 15163446Smrj } 15173446Smrj 15183446Smrj /* 15193446Smrj * As long as all the mappings we had were below kernel base 15203446Smrj * we can release the htable. 15213446Smrj */ 15223446Smrj if (base < kernelbase) 15233446Smrj htable_release(ht); 15243446Smrj } 15253446Smrj 15263446Smrj /* 15270Sstevel@tonic-gate * Walk through a given htable looking for the first valid entry. This 15280Sstevel@tonic-gate * routine takes both a starting and ending address. The starting address 15290Sstevel@tonic-gate * is required to be within the htable provided by the caller, but there is 15300Sstevel@tonic-gate * no such restriction on the ending address. 15310Sstevel@tonic-gate * 15320Sstevel@tonic-gate * If the routine finds a valid entry in the htable (at or beyond the 15330Sstevel@tonic-gate * starting address), the PTE (and its address) will be returned. 15340Sstevel@tonic-gate * This PTE may correspond to either a page or a pagetable - it is the 15350Sstevel@tonic-gate * caller's responsibility to determine which. If no valid entry is 15360Sstevel@tonic-gate * found, 0 (and invalid PTE) and the next unexamined address will be 15370Sstevel@tonic-gate * returned. 15380Sstevel@tonic-gate * 15390Sstevel@tonic-gate * The loop has been carefully coded for optimization. 15400Sstevel@tonic-gate */ 15410Sstevel@tonic-gate static x86pte_t 15420Sstevel@tonic-gate htable_scan(htable_t *ht, uintptr_t *vap, uintptr_t eaddr) 15430Sstevel@tonic-gate { 15440Sstevel@tonic-gate uint_t e; 15450Sstevel@tonic-gate x86pte_t found_pte = (x86pte_t)0; 15463446Smrj caddr_t pte_ptr; 15473446Smrj caddr_t end_pte_ptr; 15480Sstevel@tonic-gate int l = ht->ht_level; 15490Sstevel@tonic-gate uintptr_t va = *vap & LEVEL_MASK(l); 15500Sstevel@tonic-gate size_t pgsize = LEVEL_SIZE(l); 15510Sstevel@tonic-gate 15520Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 15530Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 15540Sstevel@tonic-gate 15550Sstevel@tonic-gate /* 15560Sstevel@tonic-gate * Compute the starting index and ending virtual address 15570Sstevel@tonic-gate */ 15580Sstevel@tonic-gate e = htable_va2entry(va, ht); 15590Sstevel@tonic-gate 15600Sstevel@tonic-gate /* 15610Sstevel@tonic-gate * The following page table scan code knows that the valid 15620Sstevel@tonic-gate * bit of a PTE is in the lowest byte AND that x86 is little endian!! 15630Sstevel@tonic-gate */ 15643446Smrj pte_ptr = (caddr_t)x86pte_access_pagetable(ht, 0); 15653446Smrj end_pte_ptr = (caddr_t)PT_INDEX_PTR(pte_ptr, HTABLE_NUM_PTES(ht)); 15663446Smrj pte_ptr = (caddr_t)PT_INDEX_PTR((x86pte_t *)pte_ptr, e); 15672687Skchow while (!PTE_ISVALID(*pte_ptr)) { 15680Sstevel@tonic-gate va += pgsize; 15690Sstevel@tonic-gate if (va >= eaddr) 15700Sstevel@tonic-gate break; 15710Sstevel@tonic-gate pte_ptr += mmu.pte_size; 15720Sstevel@tonic-gate ASSERT(pte_ptr <= end_pte_ptr); 15730Sstevel@tonic-gate if (pte_ptr == end_pte_ptr) 15740Sstevel@tonic-gate break; 15750Sstevel@tonic-gate } 15760Sstevel@tonic-gate 15770Sstevel@tonic-gate /* 15780Sstevel@tonic-gate * if we found a valid PTE, load the entire PTE 15790Sstevel@tonic-gate */ 15803446Smrj if (va < eaddr && pte_ptr != end_pte_ptr) 15813446Smrj found_pte = GET_PTE((x86pte_t *)pte_ptr); 15820Sstevel@tonic-gate x86pte_release_pagetable(ht); 15830Sstevel@tonic-gate 15840Sstevel@tonic-gate #if defined(__amd64) 15850Sstevel@tonic-gate /* 15860Sstevel@tonic-gate * deal with VA hole on amd64 15870Sstevel@tonic-gate */ 15880Sstevel@tonic-gate if (l == mmu.max_level && va >= mmu.hole_start && va <= mmu.hole_end) 15890Sstevel@tonic-gate va = mmu.hole_end + va - mmu.hole_start; 15900Sstevel@tonic-gate #endif /* __amd64 */ 15910Sstevel@tonic-gate 15920Sstevel@tonic-gate *vap = va; 15930Sstevel@tonic-gate return (found_pte); 15940Sstevel@tonic-gate } 15950Sstevel@tonic-gate 15960Sstevel@tonic-gate /* 15970Sstevel@tonic-gate * Find the address and htable for the first populated translation at or 15980Sstevel@tonic-gate * above the given virtual address. The caller may also specify an upper 15990Sstevel@tonic-gate * limit to the address range to search. Uses level information to quickly 16000Sstevel@tonic-gate * skip unpopulated sections of virtual address spaces. 16010Sstevel@tonic-gate * 16020Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable and virt addr 16030Sstevel@tonic-gate * and has a hold on the htable. 16040Sstevel@tonic-gate */ 16050Sstevel@tonic-gate x86pte_t 16060Sstevel@tonic-gate htable_walk( 16070Sstevel@tonic-gate struct hat *hat, 16080Sstevel@tonic-gate htable_t **htp, 16090Sstevel@tonic-gate uintptr_t *vaddr, 16100Sstevel@tonic-gate uintptr_t eaddr) 16110Sstevel@tonic-gate { 16120Sstevel@tonic-gate uintptr_t va = *vaddr; 16130Sstevel@tonic-gate htable_t *ht; 16140Sstevel@tonic-gate htable_t *prev = *htp; 16150Sstevel@tonic-gate level_t l; 16160Sstevel@tonic-gate level_t max_mapped_level; 16170Sstevel@tonic-gate x86pte_t pte; 16180Sstevel@tonic-gate 16190Sstevel@tonic-gate ASSERT(eaddr > va); 16200Sstevel@tonic-gate 16210Sstevel@tonic-gate /* 16220Sstevel@tonic-gate * If this is a user address, then we know we need not look beyond 16230Sstevel@tonic-gate * kernelbase. 16240Sstevel@tonic-gate */ 16250Sstevel@tonic-gate ASSERT(hat == kas.a_hat || eaddr <= kernelbase || 16260Sstevel@tonic-gate eaddr == HTABLE_WALK_TO_END); 16270Sstevel@tonic-gate if (hat != kas.a_hat && eaddr == HTABLE_WALK_TO_END) 16280Sstevel@tonic-gate eaddr = kernelbase; 16290Sstevel@tonic-gate 16300Sstevel@tonic-gate /* 16310Sstevel@tonic-gate * If we're coming in with a previous page table, search it first 16320Sstevel@tonic-gate * without doing an htable_lookup(), this should be frequent. 16330Sstevel@tonic-gate */ 16340Sstevel@tonic-gate if (prev) { 16350Sstevel@tonic-gate ASSERT(prev->ht_busy > 0); 16360Sstevel@tonic-gate ASSERT(prev->ht_vaddr <= va); 16370Sstevel@tonic-gate l = prev->ht_level; 16380Sstevel@tonic-gate if (va <= HTABLE_LAST_PAGE(prev)) { 16390Sstevel@tonic-gate pte = htable_scan(prev, &va, eaddr); 16400Sstevel@tonic-gate 16410Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 16420Sstevel@tonic-gate *vaddr = va; 16430Sstevel@tonic-gate *htp = prev; 16440Sstevel@tonic-gate return (pte); 16450Sstevel@tonic-gate } 16460Sstevel@tonic-gate } 16470Sstevel@tonic-gate 16480Sstevel@tonic-gate /* 16490Sstevel@tonic-gate * We found nothing in the htable provided by the caller, 16500Sstevel@tonic-gate * so fall through and do the full search 16510Sstevel@tonic-gate */ 16520Sstevel@tonic-gate htable_release(prev); 16530Sstevel@tonic-gate } 16540Sstevel@tonic-gate 16550Sstevel@tonic-gate /* 16560Sstevel@tonic-gate * Find the level of the largest pagesize used by this HAT. 16570Sstevel@tonic-gate */ 16584654Sjosephb if (hat->hat_ism_pgcnt > 0) { 16595349Skchow max_mapped_level = mmu.umax_page_level; 16604654Sjosephb } else { 16614654Sjosephb max_mapped_level = 0; 16624654Sjosephb for (l = 1; l <= mmu.max_page_level; ++l) 16634654Sjosephb if (hat->hat_pages_mapped[l] != 0) 16644654Sjosephb max_mapped_level = l; 16654654Sjosephb } 16660Sstevel@tonic-gate 16670Sstevel@tonic-gate while (va < eaddr && va >= *vaddr) { 16680Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 16690Sstevel@tonic-gate 16700Sstevel@tonic-gate /* 16710Sstevel@tonic-gate * Find lowest table with any entry for given address. 16720Sstevel@tonic-gate */ 16730Sstevel@tonic-gate for (l = 0; l <= TOP_LEVEL(hat); ++l) { 16740Sstevel@tonic-gate ht = htable_lookup(hat, va, l); 16750Sstevel@tonic-gate if (ht != NULL) { 16760Sstevel@tonic-gate pte = htable_scan(ht, &va, eaddr); 16770Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 16780Sstevel@tonic-gate *vaddr = va; 16790Sstevel@tonic-gate *htp = ht; 16800Sstevel@tonic-gate return (pte); 16810Sstevel@tonic-gate } 16820Sstevel@tonic-gate htable_release(ht); 16830Sstevel@tonic-gate break; 16840Sstevel@tonic-gate } 16850Sstevel@tonic-gate 16860Sstevel@tonic-gate /* 16874654Sjosephb * No htable at this level for the address. If there 16884654Sjosephb * is no larger page size that could cover it, we can 16894654Sjosephb * skip right to the start of the next page table. 16904575Sdm120769 */ 16914575Sdm120769 ASSERT(l < TOP_LEVEL(hat)); 16924575Sdm120769 if (l >= max_mapped_level) { 16930Sstevel@tonic-gate va = NEXT_ENTRY_VA(va, l + 1); 16944654Sjosephb if (va >= eaddr) 16954654Sjosephb break; 16964575Sdm120769 } 16970Sstevel@tonic-gate } 16980Sstevel@tonic-gate } 16990Sstevel@tonic-gate 17000Sstevel@tonic-gate *vaddr = 0; 17010Sstevel@tonic-gate *htp = NULL; 17020Sstevel@tonic-gate return (0); 17030Sstevel@tonic-gate } 17040Sstevel@tonic-gate 17050Sstevel@tonic-gate /* 17060Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address 17070Sstevel@tonic-gate * with pagesize at or below given level. 17080Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 17090Sstevel@tonic-gate * entry, and has a hold on the htable. 17100Sstevel@tonic-gate */ 17110Sstevel@tonic-gate htable_t * 17120Sstevel@tonic-gate htable_getpte( 17130Sstevel@tonic-gate struct hat *hat, 17140Sstevel@tonic-gate uintptr_t vaddr, 17150Sstevel@tonic-gate uint_t *entry, 17160Sstevel@tonic-gate x86pte_t *pte, 17170Sstevel@tonic-gate level_t level) 17180Sstevel@tonic-gate { 17190Sstevel@tonic-gate htable_t *ht; 17200Sstevel@tonic-gate level_t l; 17210Sstevel@tonic-gate uint_t e; 17220Sstevel@tonic-gate 17230Sstevel@tonic-gate ASSERT(level <= mmu.max_page_level); 17240Sstevel@tonic-gate 17250Sstevel@tonic-gate for (l = 0; l <= level; ++l) { 17260Sstevel@tonic-gate ht = htable_lookup(hat, vaddr, l); 17270Sstevel@tonic-gate if (ht == NULL) 17280Sstevel@tonic-gate continue; 17290Sstevel@tonic-gate e = htable_va2entry(vaddr, ht); 17300Sstevel@tonic-gate if (entry != NULL) 17310Sstevel@tonic-gate *entry = e; 17320Sstevel@tonic-gate if (pte != NULL) 17330Sstevel@tonic-gate *pte = x86pte_get(ht, e); 17340Sstevel@tonic-gate return (ht); 17350Sstevel@tonic-gate } 17360Sstevel@tonic-gate return (NULL); 17370Sstevel@tonic-gate } 17380Sstevel@tonic-gate 17390Sstevel@tonic-gate /* 17400Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address. 17410Sstevel@tonic-gate * There must be a valid page mapped at the given address. 17420Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 17430Sstevel@tonic-gate * entry, and has a hold on the htable. 17440Sstevel@tonic-gate */ 17450Sstevel@tonic-gate htable_t * 17460Sstevel@tonic-gate htable_getpage(struct hat *hat, uintptr_t vaddr, uint_t *entry) 17470Sstevel@tonic-gate { 17480Sstevel@tonic-gate htable_t *ht; 17490Sstevel@tonic-gate uint_t e; 17500Sstevel@tonic-gate x86pte_t pte; 17510Sstevel@tonic-gate 17520Sstevel@tonic-gate ht = htable_getpte(hat, vaddr, &e, &pte, mmu.max_page_level); 17530Sstevel@tonic-gate if (ht == NULL) 17540Sstevel@tonic-gate return (NULL); 17550Sstevel@tonic-gate 17560Sstevel@tonic-gate if (entry) 17570Sstevel@tonic-gate *entry = e; 17580Sstevel@tonic-gate 17590Sstevel@tonic-gate if (PTE_ISPAGE(pte, ht->ht_level)) 17600Sstevel@tonic-gate return (ht); 17610Sstevel@tonic-gate htable_release(ht); 17620Sstevel@tonic-gate return (NULL); 17630Sstevel@tonic-gate } 17640Sstevel@tonic-gate 17650Sstevel@tonic-gate 17660Sstevel@tonic-gate void 17670Sstevel@tonic-gate htable_init() 17680Sstevel@tonic-gate { 17690Sstevel@tonic-gate /* 17700Sstevel@tonic-gate * To save on kernel VA usage, we avoid debug information in 32 bit 17710Sstevel@tonic-gate * kernels. 17720Sstevel@tonic-gate */ 17730Sstevel@tonic-gate #if defined(__amd64) 17740Sstevel@tonic-gate int kmem_flags = KMC_NOHASH; 17750Sstevel@tonic-gate #elif defined(__i386) 17760Sstevel@tonic-gate int kmem_flags = KMC_NOHASH | KMC_NODEBUG; 17770Sstevel@tonic-gate #endif 17780Sstevel@tonic-gate 17790Sstevel@tonic-gate /* 17800Sstevel@tonic-gate * initialize kmem caches 17810Sstevel@tonic-gate */ 17820Sstevel@tonic-gate htable_cache = kmem_cache_create("htable_t", 17830Sstevel@tonic-gate sizeof (htable_t), 0, NULL, NULL, 17840Sstevel@tonic-gate htable_reap, NULL, hat_memload_arena, kmem_flags); 17850Sstevel@tonic-gate } 17860Sstevel@tonic-gate 17870Sstevel@tonic-gate /* 17880Sstevel@tonic-gate * get the pte index for the virtual address in the given htable's pagetable 17890Sstevel@tonic-gate */ 17900Sstevel@tonic-gate uint_t 17910Sstevel@tonic-gate htable_va2entry(uintptr_t va, htable_t *ht) 17920Sstevel@tonic-gate { 17930Sstevel@tonic-gate level_t l = ht->ht_level; 17940Sstevel@tonic-gate 17950Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 17960Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 17973446Smrj return ((va >> LEVEL_SHIFT(l)) & (HTABLE_NUM_PTES(ht) - 1)); 17980Sstevel@tonic-gate } 17990Sstevel@tonic-gate 18000Sstevel@tonic-gate /* 18010Sstevel@tonic-gate * Given an htable and the index of a pte in it, return the virtual address 18020Sstevel@tonic-gate * of the page. 18030Sstevel@tonic-gate */ 18040Sstevel@tonic-gate uintptr_t 18050Sstevel@tonic-gate htable_e2va(htable_t *ht, uint_t entry) 18060Sstevel@tonic-gate { 18070Sstevel@tonic-gate level_t l = ht->ht_level; 18080Sstevel@tonic-gate uintptr_t va; 18090Sstevel@tonic-gate 18103446Smrj ASSERT(entry < HTABLE_NUM_PTES(ht)); 18110Sstevel@tonic-gate va = ht->ht_vaddr + ((uintptr_t)entry << LEVEL_SHIFT(l)); 18120Sstevel@tonic-gate 18130Sstevel@tonic-gate /* 18140Sstevel@tonic-gate * Need to skip over any VA hole in top level table 18150Sstevel@tonic-gate */ 18160Sstevel@tonic-gate #if defined(__amd64) 18170Sstevel@tonic-gate if (ht->ht_level == mmu.max_level && va >= mmu.hole_start) 18180Sstevel@tonic-gate va += ((mmu.hole_end - mmu.hole_start) + 1); 18190Sstevel@tonic-gate #endif 18200Sstevel@tonic-gate 18210Sstevel@tonic-gate return (va); 18220Sstevel@tonic-gate } 18230Sstevel@tonic-gate 18240Sstevel@tonic-gate /* 18250Sstevel@tonic-gate * The code uses compare and swap instructions to read/write PTE's to 18260Sstevel@tonic-gate * avoid atomicity problems, since PTEs can be 8 bytes on 32 bit systems. 18270Sstevel@tonic-gate * will naturally be atomic. 18280Sstevel@tonic-gate * 18290Sstevel@tonic-gate * The combination of using kpreempt_disable()/_enable() and the hci_mutex 18300Sstevel@tonic-gate * are used to ensure that an interrupt won't overwrite a temporary mapping 18310Sstevel@tonic-gate * while it's in use. If an interrupt thread tries to access a PTE, it will 18320Sstevel@tonic-gate * yield briefly back to the pinned thread which holds the cpu's hci_mutex. 18330Sstevel@tonic-gate */ 18340Sstevel@tonic-gate void 18353446Smrj x86pte_cpu_init(cpu_t *cpu) 18360Sstevel@tonic-gate { 18370Sstevel@tonic-gate struct hat_cpu_info *hci; 18380Sstevel@tonic-gate 18393446Smrj hci = kmem_zalloc(sizeof (*hci), KM_SLEEP); 18400Sstevel@tonic-gate mutex_init(&hci->hci_mutex, NULL, MUTEX_DEFAULT, NULL); 18410Sstevel@tonic-gate cpu->cpu_hat_info = hci; 18420Sstevel@tonic-gate } 18430Sstevel@tonic-gate 18443446Smrj void 18453446Smrj x86pte_cpu_fini(cpu_t *cpu) 18463446Smrj { 18473446Smrj struct hat_cpu_info *hci = cpu->cpu_hat_info; 18483446Smrj 18493446Smrj kmem_free(hci, sizeof (*hci)); 18503446Smrj cpu->cpu_hat_info = NULL; 18513446Smrj } 18523446Smrj 18533446Smrj #ifdef __i386 18540Sstevel@tonic-gate /* 18553446Smrj * On 32 bit kernels, loading a 64 bit PTE is a little tricky 18560Sstevel@tonic-gate */ 18573446Smrj x86pte_t 18583446Smrj get_pte64(x86pte_t *ptr) 18593446Smrj { 18603446Smrj volatile uint32_t *p = (uint32_t *)ptr; 18613446Smrj x86pte_t t; 18623446Smrj 18633446Smrj ASSERT(mmu.pae_hat != 0); 18643446Smrj for (;;) { 18653446Smrj t = p[0]; 18663446Smrj t |= (uint64_t)p[1] << 32; 18673446Smrj if ((t & 0xffffffff) == p[0]) 18683446Smrj return (t); 18693446Smrj } 18700Sstevel@tonic-gate } 18713446Smrj #endif /* __i386 */ 18720Sstevel@tonic-gate 18730Sstevel@tonic-gate /* 18740Sstevel@tonic-gate * Disable preemption and establish a mapping to the pagetable with the 18750Sstevel@tonic-gate * given pfn. This is optimized for there case where it's the same 18760Sstevel@tonic-gate * pfn as we last used referenced from this CPU. 18770Sstevel@tonic-gate */ 18780Sstevel@tonic-gate static x86pte_t * 18793446Smrj x86pte_access_pagetable(htable_t *ht, uint_t index) 18800Sstevel@tonic-gate { 18810Sstevel@tonic-gate /* 18820Sstevel@tonic-gate * VLP pagetables are contained in the hat_t 18830Sstevel@tonic-gate */ 18840Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 18853446Smrj return (PT_INDEX_PTR(ht->ht_hat->hat_vlp_ptes, index)); 18863446Smrj return (x86pte_mapin(ht->ht_pfn, index, ht)); 18873446Smrj } 18880Sstevel@tonic-gate 18893446Smrj /* 18903446Smrj * map the given pfn into the page table window. 18913446Smrj */ 18923446Smrj /*ARGSUSED*/ 18933446Smrj x86pte_t * 18943446Smrj x86pte_mapin(pfn_t pfn, uint_t index, htable_t *ht) 18953446Smrj { 18963446Smrj x86pte_t *pteptr; 18975217Sjosephb x86pte_t pte = 0; 18983446Smrj x86pte_t newpte; 18993446Smrj int x; 19003446Smrj 19010Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 19020Sstevel@tonic-gate 19030Sstevel@tonic-gate if (!khat_running) { 19043446Smrj caddr_t va = kbm_remap_window(pfn_to_pa(pfn), 1); 19053446Smrj return (PT_INDEX_PTR(va, index)); 19060Sstevel@tonic-gate } 19070Sstevel@tonic-gate 19080Sstevel@tonic-gate /* 19093446Smrj * If kpm is available, use it. 19103446Smrj */ 19113446Smrj if (kpm_vbase) 19123446Smrj return (PT_INDEX_PTR(hat_kpm_pfn2va(pfn), index)); 19133446Smrj 19143446Smrj /* 19153446Smrj * Disable preemption and grab the CPU's hci_mutex 19160Sstevel@tonic-gate */ 19170Sstevel@tonic-gate kpreempt_disable(); 19183446Smrj ASSERT(CPU->cpu_hat_info != NULL); 19193446Smrj mutex_enter(&CPU->cpu_hat_info->hci_mutex); 19203446Smrj x = PWIN_TABLE(CPU->cpu_id); 19213446Smrj pteptr = (x86pte_t *)PWIN_PTE_VA(x); 19225217Sjosephb #ifndef __xpv 19233446Smrj if (mmu.pae_hat) 19243446Smrj pte = *pteptr; 19253446Smrj else 19263446Smrj pte = *(x86pte32_t *)pteptr; 19275217Sjosephb #endif 19283446Smrj 19293446Smrj newpte = MAKEPTE(pfn, 0) | mmu.pt_global | mmu.pt_nx; 19305084Sjohnlev 19315084Sjohnlev /* 19325084Sjohnlev * For hardware we can use a writable mapping. 19335084Sjohnlev */ 19345084Sjohnlev #ifdef __xpv 19355084Sjohnlev if (IN_XPV_PANIC()) 19365084Sjohnlev #endif 19375084Sjohnlev newpte |= PT_WRITABLE; 19383446Smrj 19393446Smrj if (!PTE_EQUIV(newpte, pte)) { 19405084Sjohnlev 19415084Sjohnlev #ifdef __xpv 19425084Sjohnlev if (!IN_XPV_PANIC()) { 19435084Sjohnlev xen_map(newpte, PWIN_VA(x)); 19445084Sjohnlev } else 19455084Sjohnlev #endif 19465084Sjohnlev { 19475084Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 19485084Sjohnlev if (mmu.pae_hat) 19495084Sjohnlev *pteptr = newpte; 19505084Sjohnlev else 19515084Sjohnlev *(x86pte32_t *)pteptr = newpte; 19525084Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 19535084Sjohnlev mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 19545084Sjohnlev } 19550Sstevel@tonic-gate } 19563446Smrj return (PT_INDEX_PTR(PWIN_VA(x), index)); 19570Sstevel@tonic-gate } 19580Sstevel@tonic-gate 19590Sstevel@tonic-gate /* 19600Sstevel@tonic-gate * Release access to a page table. 19610Sstevel@tonic-gate */ 19620Sstevel@tonic-gate static void 19630Sstevel@tonic-gate x86pte_release_pagetable(htable_t *ht) 19640Sstevel@tonic-gate { 19650Sstevel@tonic-gate /* 19660Sstevel@tonic-gate * nothing to do for VLP htables 19670Sstevel@tonic-gate */ 19680Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 19690Sstevel@tonic-gate return; 19700Sstevel@tonic-gate 19713446Smrj x86pte_mapout(); 19723446Smrj } 19733446Smrj 19743446Smrj void 19753446Smrj x86pte_mapout(void) 19763446Smrj { 19775084Sjohnlev if (kpm_vbase != NULL || !khat_running) 19780Sstevel@tonic-gate return; 19790Sstevel@tonic-gate 19800Sstevel@tonic-gate /* 19813446Smrj * Drop the CPU's hci_mutex and restore preemption. 19820Sstevel@tonic-gate */ 19835217Sjosephb #ifdef __xpv 19845217Sjosephb if (!IN_XPV_PANIC()) { 19855217Sjosephb uintptr_t va; 19865217Sjosephb 19875217Sjosephb /* 19885217Sjosephb * We need to always clear the mapping in case a page 19895217Sjosephb * that was once a page table page is ballooned out. 19905217Sjosephb */ 19915217Sjosephb va = (uintptr_t)PWIN_VA(PWIN_TABLE(CPU->cpu_id)); 19925217Sjosephb (void) HYPERVISOR_update_va_mapping(va, 0, 19935217Sjosephb UVMF_INVLPG | UVMF_LOCAL); 19945217Sjosephb } 19955217Sjosephb #endif 19963446Smrj mutex_exit(&CPU->cpu_hat_info->hci_mutex); 19970Sstevel@tonic-gate kpreempt_enable(); 19980Sstevel@tonic-gate } 19990Sstevel@tonic-gate 20000Sstevel@tonic-gate /* 20010Sstevel@tonic-gate * Atomic retrieval of a pagetable entry 20020Sstevel@tonic-gate */ 20030Sstevel@tonic-gate x86pte_t 20040Sstevel@tonic-gate x86pte_get(htable_t *ht, uint_t entry) 20050Sstevel@tonic-gate { 20060Sstevel@tonic-gate x86pte_t pte; 200747Sjosephb x86pte_t *ptep; 20080Sstevel@tonic-gate 20090Sstevel@tonic-gate /* 201047Sjosephb * Be careful that loading PAE entries in 32 bit kernel is atomic. 20110Sstevel@tonic-gate */ 20123446Smrj ASSERT(entry < mmu.ptes_per_table); 20133446Smrj ptep = x86pte_access_pagetable(ht, entry); 20143446Smrj pte = GET_PTE(ptep); 20150Sstevel@tonic-gate x86pte_release_pagetable(ht); 20160Sstevel@tonic-gate return (pte); 20170Sstevel@tonic-gate } 20180Sstevel@tonic-gate 20190Sstevel@tonic-gate /* 20200Sstevel@tonic-gate * Atomic unconditional set of a page table entry, it returns the previous 20213446Smrj * value. For pre-existing mappings if the PFN changes, then we don't care 20223446Smrj * about the old pte's REF / MOD bits. If the PFN remains the same, we leave 20233446Smrj * the MOD/REF bits unchanged. 20243446Smrj * 20253446Smrj * If asked to overwrite a link to a lower page table with a large page 20263446Smrj * mapping, this routine returns the special value of LPAGE_ERROR. This 20273446Smrj * allows the upper HAT layers to retry with a smaller mapping size. 20280Sstevel@tonic-gate */ 20290Sstevel@tonic-gate x86pte_t 20300Sstevel@tonic-gate x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr) 20310Sstevel@tonic-gate { 20320Sstevel@tonic-gate x86pte_t old; 20333446Smrj x86pte_t prev; 20340Sstevel@tonic-gate x86pte_t *ptep; 20353446Smrj level_t l = ht->ht_level; 20363446Smrj x86pte_t pfn_mask = (l != 0) ? PT_PADDR_LGPG : PT_PADDR; 20373446Smrj x86pte_t n; 20383446Smrj uintptr_t addr = htable_e2va(ht, entry); 20393446Smrj hat_t *hat = ht->ht_hat; 20400Sstevel@tonic-gate 20413446Smrj ASSERT(new != 0); /* don't use to invalidate a PTE, see x86pte_update */ 20420Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 20433446Smrj if (ptr == NULL) 20443446Smrj ptep = x86pte_access_pagetable(ht, entry); 20453446Smrj else 20460Sstevel@tonic-gate ptep = ptr; 20473446Smrj 20483446Smrj /* 20493446Smrj * Install the new PTE. If remapping the same PFN, then 20503446Smrj * copy existing REF/MOD bits to new mapping. 20513446Smrj */ 20523446Smrj do { 20533446Smrj prev = GET_PTE(ptep); 20543446Smrj n = new; 20553446Smrj if (PTE_ISVALID(n) && (prev & pfn_mask) == (new & pfn_mask)) 20563446Smrj n |= prev & (PT_REF | PT_MOD); 20570Sstevel@tonic-gate 20583446Smrj /* 20593446Smrj * Another thread may have installed this mapping already, 20603446Smrj * flush the local TLB and be done. 20613446Smrj */ 20623446Smrj if (prev == n) { 20633446Smrj old = new; 20645084Sjohnlev #ifdef __xpv 20655084Sjohnlev if (!IN_XPV_PANIC()) 20665084Sjohnlev xen_flush_va((caddr_t)addr); 20675084Sjohnlev else 20685084Sjohnlev #endif 20695084Sjohnlev mmu_tlbflush_entry((caddr_t)addr); 20703446Smrj goto done; 20710Sstevel@tonic-gate } 20723446Smrj 20733446Smrj /* 20743446Smrj * Detect if we have a collision of installing a large 20753446Smrj * page mapping where there already is a lower page table. 20763446Smrj */ 20773543Sjosephb if (l > 0 && (prev & PT_VALID) && !(prev & PT_PAGESIZE)) { 20783543Sjosephb old = LPAGE_ERROR; 20793543Sjosephb goto done; 20803543Sjosephb } 20813446Smrj 20825084Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 20833446Smrj old = CAS_PTE(ptep, prev, n); 20845084Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 20853446Smrj } while (old != prev); 20863446Smrj 20873446Smrj /* 20883446Smrj * Do a TLB demap if needed, ie. the old pte was valid. 20893446Smrj * 20903446Smrj * Note that a stale TLB writeback to the PTE here either can't happen 20913446Smrj * or doesn't matter. The PFN can only change for NOSYNC|NOCONSIST 20923446Smrj * mappings, but they were created with REF and MOD already set, so 20933446Smrj * no stale writeback will happen. 20943446Smrj * 20953446Smrj * Segmap is the only place where remaps happen on the same pfn and for 20963446Smrj * that we want to preserve the stale REF/MOD bits. 20973446Smrj */ 20983446Smrj if (old & PT_REF) 20993446Smrj hat_tlb_inval(hat, addr); 21003446Smrj 21013446Smrj done: 21020Sstevel@tonic-gate if (ptr == NULL) 21030Sstevel@tonic-gate x86pte_release_pagetable(ht); 21040Sstevel@tonic-gate return (old); 21050Sstevel@tonic-gate } 21060Sstevel@tonic-gate 21070Sstevel@tonic-gate /* 21083446Smrj * Atomic compare and swap of a page table entry. No TLB invalidates are done. 21093446Smrj * This is used for links between pagetables of different levels. 21103446Smrj * Note we always create these links with dirty/access set, so they should 21113446Smrj * never change. 21120Sstevel@tonic-gate */ 21133446Smrj x86pte_t 21140Sstevel@tonic-gate x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, x86pte_t new) 21150Sstevel@tonic-gate { 21160Sstevel@tonic-gate x86pte_t pte; 21170Sstevel@tonic-gate x86pte_t *ptep; 21185084Sjohnlev #ifdef __xpv 21195084Sjohnlev /* 21205084Sjohnlev * We can't use writable pagetables for upper level tables, so fake it. 21215084Sjohnlev */ 21225084Sjohnlev mmu_update_t t[2]; 21235084Sjohnlev int cnt = 1; 21245084Sjohnlev int count; 21255084Sjohnlev maddr_t ma; 21260Sstevel@tonic-gate 21275084Sjohnlev if (!IN_XPV_PANIC()) { 21285084Sjohnlev ASSERT(!(ht->ht_flags & HTABLE_VLP)); /* no VLP yet */ 21295084Sjohnlev ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 21305084Sjohnlev t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 21315084Sjohnlev t[0].val = new; 21325084Sjohnlev 21335084Sjohnlev #if defined(__amd64) 21345084Sjohnlev /* 21355084Sjohnlev * On the 64-bit hypervisor we need to maintain the user mode 21365084Sjohnlev * top page table too. 21375084Sjohnlev */ 21385084Sjohnlev if (ht->ht_level == mmu.max_level && ht->ht_hat != kas.a_hat) { 21395084Sjohnlev ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa( 21405084Sjohnlev ht->ht_hat->hat_user_ptable), entry)); 21415084Sjohnlev t[1].ptr = ma | MMU_NORMAL_PT_UPDATE; 21425084Sjohnlev t[1].val = new; 21435084Sjohnlev ++cnt; 21445084Sjohnlev } 21455084Sjohnlev #endif /* __amd64 */ 21465084Sjohnlev 21475084Sjohnlev if (HYPERVISOR_mmu_update(t, cnt, &count, DOMID_SELF)) 21485084Sjohnlev panic("HYPERVISOR_mmu_update() failed"); 21495084Sjohnlev ASSERT(count == cnt); 21505084Sjohnlev return (old); 21515084Sjohnlev } 21525084Sjohnlev #endif 21533446Smrj ptep = x86pte_access_pagetable(ht, entry); 21545084Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 21553446Smrj pte = CAS_PTE(ptep, old, new); 21565084Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 21570Sstevel@tonic-gate x86pte_release_pagetable(ht); 21580Sstevel@tonic-gate return (pte); 21590Sstevel@tonic-gate } 21600Sstevel@tonic-gate 21610Sstevel@tonic-gate /* 21623446Smrj * Invalidate a page table entry as long as it currently maps something that 21633446Smrj * matches the value determined by expect. 21643446Smrj * 21653446Smrj * Also invalidates any TLB entries and returns the previous value of the PTE. 21660Sstevel@tonic-gate */ 21673446Smrj x86pte_t 21683446Smrj x86pte_inval( 21693446Smrj htable_t *ht, 21703446Smrj uint_t entry, 21713446Smrj x86pte_t expect, 21723446Smrj x86pte_t *pte_ptr) 21730Sstevel@tonic-gate { 21743446Smrj x86pte_t *ptep; 21754191Sjosephb x86pte_t oldpte; 21764191Sjosephb x86pte_t found; 21770Sstevel@tonic-gate 21783446Smrj ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 21795349Skchow ASSERT(ht->ht_level <= mmu.max_page_level); 21803543Sjosephb 21813446Smrj if (pte_ptr != NULL) 21823446Smrj ptep = pte_ptr; 21833446Smrj else 21843446Smrj ptep = x86pte_access_pagetable(ht, entry); 21850Sstevel@tonic-gate 21865084Sjohnlev #if defined(__xpv) 21875084Sjohnlev /* 21885084Sjohnlev * If exit()ing just use HYPERVISOR_mmu_update(), as we can't be racing 21895084Sjohnlev * with anything else. 21905084Sjohnlev */ 21915084Sjohnlev if ((ht->ht_hat->hat_flags & HAT_FREEING) && !IN_XPV_PANIC()) { 21925084Sjohnlev int count; 21935084Sjohnlev mmu_update_t t[1]; 21945084Sjohnlev maddr_t ma; 21955084Sjohnlev 21965084Sjohnlev oldpte = GET_PTE(ptep); 21975084Sjohnlev if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 21985084Sjohnlev goto done; 21995084Sjohnlev ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 22005084Sjohnlev t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 22015084Sjohnlev t[0].val = 0; 22025084Sjohnlev if (HYPERVISOR_mmu_update(t, 1, &count, DOMID_SELF)) 22035084Sjohnlev panic("HYPERVISOR_mmu_update() failed"); 22045084Sjohnlev ASSERT(count == 1); 22055084Sjohnlev goto done; 22065084Sjohnlev } 22075084Sjohnlev #endif /* __xpv */ 22085084Sjohnlev 22090Sstevel@tonic-gate /* 22103543Sjosephb * Note that the loop is needed to handle changes due to h/w updating 22113543Sjosephb * of PT_MOD/PT_REF. 22120Sstevel@tonic-gate */ 22133446Smrj do { 22144191Sjosephb oldpte = GET_PTE(ptep); 22154191Sjosephb if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 22164191Sjosephb goto done; 22175084Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 22184191Sjosephb found = CAS_PTE(ptep, oldpte, 0); 22195084Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 22204191Sjosephb } while (found != oldpte); 22214191Sjosephb if (oldpte & (PT_REF | PT_MOD)) 22224191Sjosephb hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 22230Sstevel@tonic-gate 22244191Sjosephb done: 22253446Smrj if (pte_ptr == NULL) 22263446Smrj x86pte_release_pagetable(ht); 22274191Sjosephb return (oldpte); 22280Sstevel@tonic-gate } 22290Sstevel@tonic-gate 22300Sstevel@tonic-gate /* 22313446Smrj * Change a page table entry af it currently matches the value in expect. 22320Sstevel@tonic-gate */ 22330Sstevel@tonic-gate x86pte_t 22343446Smrj x86pte_update( 22353446Smrj htable_t *ht, 22363446Smrj uint_t entry, 22373446Smrj x86pte_t expect, 22383446Smrj x86pte_t new) 22390Sstevel@tonic-gate { 22400Sstevel@tonic-gate x86pte_t *ptep; 22413446Smrj x86pte_t found; 22420Sstevel@tonic-gate 22433446Smrj ASSERT(new != 0); 22443446Smrj ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 22455349Skchow ASSERT(ht->ht_level <= mmu.max_page_level); 22460Sstevel@tonic-gate 22473446Smrj ptep = x86pte_access_pagetable(ht, entry); 22485084Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 22493446Smrj found = CAS_PTE(ptep, expect, new); 22505084Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 22513446Smrj if (found == expect) { 22523446Smrj hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 22530Sstevel@tonic-gate 22543446Smrj /* 22553446Smrj * When removing write permission *and* clearing the 22563446Smrj * MOD bit, check if a write happened via a stale 22573446Smrj * TLB entry before the TLB shootdown finished. 22583446Smrj * 22593446Smrj * If it did happen, simply re-enable write permission and 22603446Smrj * act like the original CAS failed. 22613446Smrj */ 22623446Smrj if ((expect & (PT_WRITABLE | PT_MOD)) == PT_WRITABLE && 22633446Smrj (new & (PT_WRITABLE | PT_MOD)) == 0 && 22643446Smrj (GET_PTE(ptep) & PT_MOD) != 0) { 22653446Smrj do { 22663446Smrj found = GET_PTE(ptep); 22675084Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 22683446Smrj found = 22693446Smrj CAS_PTE(ptep, found, found | PT_WRITABLE); 22705084Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 22713446Smrj } while ((found & PT_WRITABLE) == 0); 22723446Smrj } 22733446Smrj } 22740Sstevel@tonic-gate x86pte_release_pagetable(ht); 22753446Smrj return (found); 22760Sstevel@tonic-gate } 22770Sstevel@tonic-gate 22785084Sjohnlev #ifndef __xpv 22790Sstevel@tonic-gate /* 22800Sstevel@tonic-gate * Copy page tables - this is just a little more complicated than the 22810Sstevel@tonic-gate * previous routines. Note that it's also not atomic! It also is never 22820Sstevel@tonic-gate * used for VLP pagetables. 22830Sstevel@tonic-gate */ 22840Sstevel@tonic-gate void 22850Sstevel@tonic-gate x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 22860Sstevel@tonic-gate { 22870Sstevel@tonic-gate caddr_t src_va; 22880Sstevel@tonic-gate caddr_t dst_va; 22890Sstevel@tonic-gate size_t size; 22903446Smrj x86pte_t *pteptr; 22913446Smrj x86pte_t pte; 22920Sstevel@tonic-gate 22930Sstevel@tonic-gate ASSERT(khat_running); 22940Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 22950Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_VLP)); 22960Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN)); 22970Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 22980Sstevel@tonic-gate 22990Sstevel@tonic-gate /* 23003446Smrj * Acquire access to the CPU pagetable windows for the dest and source. 23010Sstevel@tonic-gate */ 23023446Smrj dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 23033446Smrj if (kpm_vbase) { 23043446Smrj src_va = (caddr_t) 23053446Smrj PT_INDEX_PTR(hat_kpm_pfn2va(src->ht_pfn), entry); 23060Sstevel@tonic-gate } else { 23073446Smrj uint_t x = PWIN_SRC(CPU->cpu_id); 23080Sstevel@tonic-gate 23090Sstevel@tonic-gate /* 23100Sstevel@tonic-gate * Finish defining the src pagetable mapping 23110Sstevel@tonic-gate */ 23123446Smrj src_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 23133446Smrj pte = MAKEPTE(src->ht_pfn, 0) | mmu.pt_global | mmu.pt_nx; 23143446Smrj pteptr = (x86pte_t *)PWIN_PTE_VA(x); 23153446Smrj if (mmu.pae_hat) 23163446Smrj *pteptr = pte; 23173446Smrj else 23183446Smrj *(x86pte32_t *)pteptr = pte; 23193446Smrj mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 23200Sstevel@tonic-gate } 23210Sstevel@tonic-gate 23220Sstevel@tonic-gate /* 23230Sstevel@tonic-gate * now do the copy 23240Sstevel@tonic-gate */ 23250Sstevel@tonic-gate size = count << mmu.pte_size_shift; 23260Sstevel@tonic-gate bcopy(src_va, dst_va, size); 23270Sstevel@tonic-gate 23280Sstevel@tonic-gate x86pte_release_pagetable(dest); 23290Sstevel@tonic-gate } 23300Sstevel@tonic-gate 23315084Sjohnlev #else /* __xpv */ 23325084Sjohnlev 23335084Sjohnlev /* 23345084Sjohnlev * The hypervisor only supports writable pagetables at level 0, so we have 23355084Sjohnlev * to install these 1 by 1 the slow way. 23365084Sjohnlev */ 23375084Sjohnlev void 23385084Sjohnlev x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 23395084Sjohnlev { 23405084Sjohnlev caddr_t src_va; 23415084Sjohnlev x86pte_t pte; 23425084Sjohnlev 23435084Sjohnlev ASSERT(!IN_XPV_PANIC()); 23445084Sjohnlev src_va = (caddr_t)x86pte_access_pagetable(src, entry); 23455084Sjohnlev while (count) { 23465084Sjohnlev if (mmu.pae_hat) 23475084Sjohnlev pte = *(x86pte_t *)src_va; 23485084Sjohnlev else 23495084Sjohnlev pte = *(x86pte32_t *)src_va; 23505084Sjohnlev if (pte != 0) { 23515084Sjohnlev set_pteval(pfn_to_pa(dest->ht_pfn), entry, 23525084Sjohnlev dest->ht_level, pte); 23535084Sjohnlev #ifdef __amd64 23545084Sjohnlev if (dest->ht_level == mmu.max_level && 23555084Sjohnlev htable_e2va(dest, entry) < HYPERVISOR_VIRT_END) 23565084Sjohnlev set_pteval( 23575084Sjohnlev pfn_to_pa(dest->ht_hat->hat_user_ptable), 23585084Sjohnlev entry, dest->ht_level, pte); 23595084Sjohnlev #endif 23605084Sjohnlev } 23615084Sjohnlev --count; 23625084Sjohnlev ++entry; 23635084Sjohnlev src_va += mmu.pte_size; 23645084Sjohnlev } 23655084Sjohnlev x86pte_release_pagetable(src); 23665084Sjohnlev } 23675084Sjohnlev #endif /* __xpv */ 23685084Sjohnlev 23690Sstevel@tonic-gate /* 23700Sstevel@tonic-gate * Zero page table entries - Note this doesn't use atomic stores! 23710Sstevel@tonic-gate */ 23723446Smrj static void 23730Sstevel@tonic-gate x86pte_zero(htable_t *dest, uint_t entry, uint_t count) 23740Sstevel@tonic-gate { 23750Sstevel@tonic-gate caddr_t dst_va; 23760Sstevel@tonic-gate size_t size; 23775084Sjohnlev #ifdef __xpv 23785084Sjohnlev int x; 23795084Sjohnlev x86pte_t newpte; 23805084Sjohnlev #endif 23810Sstevel@tonic-gate 23820Sstevel@tonic-gate /* 23830Sstevel@tonic-gate * Map in the page table to be zeroed. 23840Sstevel@tonic-gate */ 23850Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 23860Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 23873446Smrj 23885084Sjohnlev /* 23895084Sjohnlev * On the hypervisor we don't use x86pte_access_pagetable() since 23905084Sjohnlev * in this case the page is not pinned yet. 23915084Sjohnlev */ 23925084Sjohnlev #ifdef __xpv 23935084Sjohnlev if (kpm_vbase == NULL) { 23945084Sjohnlev kpreempt_disable(); 23955084Sjohnlev ASSERT(CPU->cpu_hat_info != NULL); 23965084Sjohnlev mutex_enter(&CPU->cpu_hat_info->hci_mutex); 23975084Sjohnlev x = PWIN_TABLE(CPU->cpu_id); 23985084Sjohnlev newpte = MAKEPTE(dest->ht_pfn, 0) | PT_WRITABLE; 23995084Sjohnlev xen_map(newpte, PWIN_VA(x)); 24005084Sjohnlev dst_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 24015084Sjohnlev } else 24025084Sjohnlev #endif 24035084Sjohnlev dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 24043446Smrj 24050Sstevel@tonic-gate size = count << mmu.pte_size_shift; 24063446Smrj ASSERT(size > BLOCKZEROALIGN); 24073446Smrj #ifdef __i386 24083446Smrj if ((x86_feature & X86_SSE2) == 0) 24090Sstevel@tonic-gate bzero(dst_va, size); 24103446Smrj else 24113446Smrj #endif 24123446Smrj block_zero_no_xmm(dst_va, size); 24133446Smrj 24145084Sjohnlev #ifdef __xpv 24155084Sjohnlev if (kpm_vbase == NULL) { 24165084Sjohnlev xen_map(0, PWIN_VA(x)); 24175084Sjohnlev mutex_exit(&CPU->cpu_hat_info->hci_mutex); 24185084Sjohnlev kpreempt_enable(); 24195084Sjohnlev } else 24205084Sjohnlev #endif 24215084Sjohnlev x86pte_release_pagetable(dest); 24220Sstevel@tonic-gate } 24230Sstevel@tonic-gate 24240Sstevel@tonic-gate /* 24250Sstevel@tonic-gate * Called to ensure that all pagetables are in the system dump 24260Sstevel@tonic-gate */ 24270Sstevel@tonic-gate void 24280Sstevel@tonic-gate hat_dump(void) 24290Sstevel@tonic-gate { 24300Sstevel@tonic-gate hat_t *hat; 24310Sstevel@tonic-gate uint_t h; 24320Sstevel@tonic-gate htable_t *ht; 24330Sstevel@tonic-gate 24340Sstevel@tonic-gate /* 24351747Sjosephb * Dump all page tables 24360Sstevel@tonic-gate */ 24371747Sjosephb for (hat = kas.a_hat; hat != NULL; hat = hat->hat_next) { 24380Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 24390Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 24401747Sjosephb if ((ht->ht_flags & HTABLE_VLP) == 0) 24410Sstevel@tonic-gate dump_page(ht->ht_pfn); 24420Sstevel@tonic-gate } 24430Sstevel@tonic-gate } 24440Sstevel@tonic-gate } 24450Sstevel@tonic-gate } 2446