10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51747Sjosephb * Common Development and Distribution License (the "License"). 61747Sjosephb * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 213446Smrj 220Sstevel@tonic-gate /* 23*12532Sjoe.bonasera@oracle.com * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #include <sys/types.h> 270Sstevel@tonic-gate #include <sys/sysmacros.h> 280Sstevel@tonic-gate #include <sys/kmem.h> 290Sstevel@tonic-gate #include <sys/atomic.h> 300Sstevel@tonic-gate #include <sys/bitmap.h> 310Sstevel@tonic-gate #include <sys/machparam.h> 320Sstevel@tonic-gate #include <sys/machsystm.h> 330Sstevel@tonic-gate #include <sys/mman.h> 340Sstevel@tonic-gate #include <sys/systm.h> 350Sstevel@tonic-gate #include <sys/cpuvar.h> 360Sstevel@tonic-gate #include <sys/thread.h> 370Sstevel@tonic-gate #include <sys/proc.h> 380Sstevel@tonic-gate #include <sys/cpu.h> 390Sstevel@tonic-gate #include <sys/kmem.h> 400Sstevel@tonic-gate #include <sys/disp.h> 410Sstevel@tonic-gate #include <sys/vmem.h> 420Sstevel@tonic-gate #include <sys/vmsystm.h> 430Sstevel@tonic-gate #include <sys/promif.h> 440Sstevel@tonic-gate #include <sys/var.h> 450Sstevel@tonic-gate #include <sys/x86_archext.h> 463446Smrj #include <sys/archsystm.h> 470Sstevel@tonic-gate #include <sys/bootconf.h> 480Sstevel@tonic-gate #include <sys/dumphdr.h> 490Sstevel@tonic-gate #include <vm/seg_kmem.h> 500Sstevel@tonic-gate #include <vm/seg_kpm.h> 510Sstevel@tonic-gate #include <vm/hat.h> 520Sstevel@tonic-gate #include <vm/hat_i86.h> 530Sstevel@tonic-gate #include <sys/cmn_err.h> 545084Sjohnlev #include <sys/panic.h> 555084Sjohnlev 565084Sjohnlev #ifdef __xpv 575084Sjohnlev #include <sys/hypervisor.h> 585084Sjohnlev #include <sys/xpv_panic.h> 595084Sjohnlev #endif 600Sstevel@tonic-gate 613446Smrj #include <sys/bootinfo.h> 623446Smrj #include <vm/kboot_mmu.h> 633446Smrj 643446Smrj static void x86pte_zero(htable_t *dest, uint_t entry, uint_t count); 653446Smrj 660Sstevel@tonic-gate kmem_cache_t *htable_cache; 670Sstevel@tonic-gate 680Sstevel@tonic-gate /* 690Sstevel@tonic-gate * The variable htable_reserve_amount, rather than HTABLE_RESERVE_AMOUNT, 700Sstevel@tonic-gate * is used in order to facilitate testing of the htable_steal() code. 710Sstevel@tonic-gate * By resetting htable_reserve_amount to a lower value, we can force 720Sstevel@tonic-gate * stealing to occur. The reserve amount is a guess to get us through boot. 730Sstevel@tonic-gate */ 740Sstevel@tonic-gate #define HTABLE_RESERVE_AMOUNT (200) 750Sstevel@tonic-gate uint_t htable_reserve_amount = HTABLE_RESERVE_AMOUNT; 760Sstevel@tonic-gate kmutex_t htable_reserve_mutex; 770Sstevel@tonic-gate uint_t htable_reserve_cnt; 780Sstevel@tonic-gate htable_t *htable_reserve_pool; 790Sstevel@tonic-gate 800Sstevel@tonic-gate /* 811747Sjosephb * Used to hand test htable_steal(). 820Sstevel@tonic-gate */ 831747Sjosephb #ifdef DEBUG 841747Sjosephb ulong_t force_steal = 0; 851747Sjosephb ulong_t ptable_cnt = 0; 861747Sjosephb #endif 871747Sjosephb 881747Sjosephb /* 891747Sjosephb * This variable is so that we can tune this via /etc/system 901747Sjosephb * Any value works, but a power of two <= mmu.ptes_per_table is best. 911747Sjosephb */ 921747Sjosephb uint_t htable_steal_passes = 8; 930Sstevel@tonic-gate 940Sstevel@tonic-gate /* 950Sstevel@tonic-gate * mutex stuff for access to htable hash 960Sstevel@tonic-gate */ 970Sstevel@tonic-gate #define NUM_HTABLE_MUTEX 128 980Sstevel@tonic-gate kmutex_t htable_mutex[NUM_HTABLE_MUTEX]; 990Sstevel@tonic-gate #define HTABLE_MUTEX_HASH(h) ((h) & (NUM_HTABLE_MUTEX - 1)) 1000Sstevel@tonic-gate 1010Sstevel@tonic-gate #define HTABLE_ENTER(h) mutex_enter(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 1020Sstevel@tonic-gate #define HTABLE_EXIT(h) mutex_exit(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 1030Sstevel@tonic-gate 1040Sstevel@tonic-gate /* 1050Sstevel@tonic-gate * forward declarations 1060Sstevel@tonic-gate */ 1070Sstevel@tonic-gate static void link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr); 1080Sstevel@tonic-gate static void unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr); 1090Sstevel@tonic-gate static void htable_free(htable_t *ht); 1103446Smrj static x86pte_t *x86pte_access_pagetable(htable_t *ht, uint_t index); 1110Sstevel@tonic-gate static void x86pte_release_pagetable(htable_t *ht); 1120Sstevel@tonic-gate static x86pte_t x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, 1130Sstevel@tonic-gate x86pte_t new); 1140Sstevel@tonic-gate 1150Sstevel@tonic-gate /* 1160Sstevel@tonic-gate * A counter to track if we are stealing or reaping htables. When non-zero 1170Sstevel@tonic-gate * htable_free() will directly free htables (either to the reserve or kmem) 1180Sstevel@tonic-gate * instead of putting them in a hat's htable cache. 1190Sstevel@tonic-gate */ 1200Sstevel@tonic-gate uint32_t htable_dont_cache = 0; 1210Sstevel@tonic-gate 1220Sstevel@tonic-gate /* 1230Sstevel@tonic-gate * Track the number of active pagetables, so we can know how many to reap 1240Sstevel@tonic-gate */ 1250Sstevel@tonic-gate static uint32_t active_ptables = 0; 1260Sstevel@tonic-gate 1275084Sjohnlev #ifdef __xpv 1285084Sjohnlev /* 1295084Sjohnlev * Deal with hypervisor complications. 1305084Sjohnlev */ 1315084Sjohnlev void 1325084Sjohnlev xen_flush_va(caddr_t va) 1335084Sjohnlev { 1345084Sjohnlev struct mmuext_op t; 1355084Sjohnlev uint_t count; 1365084Sjohnlev 1375084Sjohnlev if (IN_XPV_PANIC()) { 1385084Sjohnlev mmu_tlbflush_entry((caddr_t)va); 1395084Sjohnlev } else { 1405084Sjohnlev t.cmd = MMUEXT_INVLPG_LOCAL; 1415084Sjohnlev t.arg1.linear_addr = (uintptr_t)va; 1425084Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 1435084Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 1445084Sjohnlev ASSERT(count == 1); 1455084Sjohnlev } 1465084Sjohnlev } 1475084Sjohnlev 1485084Sjohnlev void 1495084Sjohnlev xen_gflush_va(caddr_t va, cpuset_t cpus) 1505084Sjohnlev { 1515084Sjohnlev struct mmuext_op t; 1525084Sjohnlev uint_t count; 1535084Sjohnlev 1545084Sjohnlev if (IN_XPV_PANIC()) { 1555084Sjohnlev mmu_tlbflush_entry((caddr_t)va); 1565084Sjohnlev return; 1575084Sjohnlev } 1585084Sjohnlev 1595084Sjohnlev t.cmd = MMUEXT_INVLPG_MULTI; 1605084Sjohnlev t.arg1.linear_addr = (uintptr_t)va; 1615084Sjohnlev /*LINTED: constant in conditional context*/ 1625084Sjohnlev set_xen_guest_handle(t.arg2.vcpumask, &cpus); 1635084Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 1645084Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 1655084Sjohnlev ASSERT(count == 1); 1665084Sjohnlev } 1675084Sjohnlev 1685084Sjohnlev void 1695084Sjohnlev xen_flush_tlb() 1705084Sjohnlev { 1715084Sjohnlev struct mmuext_op t; 1725084Sjohnlev uint_t count; 1735084Sjohnlev 1745084Sjohnlev if (IN_XPV_PANIC()) { 1755084Sjohnlev xpv_panic_reload_cr3(); 1765084Sjohnlev } else { 1775084Sjohnlev t.cmd = MMUEXT_TLB_FLUSH_LOCAL; 1785084Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 1795084Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 1805084Sjohnlev ASSERT(count == 1); 1815084Sjohnlev } 1825084Sjohnlev } 1835084Sjohnlev 1845084Sjohnlev void 1855084Sjohnlev xen_gflush_tlb(cpuset_t cpus) 1865084Sjohnlev { 1875084Sjohnlev struct mmuext_op t; 1885084Sjohnlev uint_t count; 1895084Sjohnlev 1905084Sjohnlev ASSERT(!IN_XPV_PANIC()); 1915084Sjohnlev t.cmd = MMUEXT_TLB_FLUSH_MULTI; 1925084Sjohnlev /*LINTED: constant in conditional context*/ 1935084Sjohnlev set_xen_guest_handle(t.arg2.vcpumask, &cpus); 1945084Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 1955084Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 1965084Sjohnlev ASSERT(count == 1); 1975084Sjohnlev } 1985084Sjohnlev 1995084Sjohnlev /* 2005084Sjohnlev * Install/Adjust a kpm mapping under the hypervisor. 2015084Sjohnlev * Value of "how" should be: 2025084Sjohnlev * PT_WRITABLE | PT_VALID - regular kpm mapping 2035084Sjohnlev * PT_VALID - make mapping read-only 2045084Sjohnlev * 0 - remove mapping 2055084Sjohnlev * 2065084Sjohnlev * returns 0 on success. non-zero for failure. 2075084Sjohnlev */ 2085084Sjohnlev int 2095084Sjohnlev xen_kpm_page(pfn_t pfn, uint_t how) 2105084Sjohnlev { 2115084Sjohnlev paddr_t pa = mmu_ptob((paddr_t)pfn); 2125084Sjohnlev x86pte_t pte = PT_NOCONSIST | PT_REF | PT_MOD; 2135084Sjohnlev 2145084Sjohnlev if (kpm_vbase == NULL) 2155084Sjohnlev return (0); 2165084Sjohnlev 2175084Sjohnlev if (how) 2185084Sjohnlev pte |= pa_to_ma(pa) | how; 2195084Sjohnlev else 2205084Sjohnlev pte = 0; 2215084Sjohnlev return (HYPERVISOR_update_va_mapping((uintptr_t)kpm_vbase + pa, 2225084Sjohnlev pte, UVMF_INVLPG | UVMF_ALL)); 2235084Sjohnlev } 2245084Sjohnlev 2255084Sjohnlev void 2265084Sjohnlev xen_pin(pfn_t pfn, level_t lvl) 2275084Sjohnlev { 2285084Sjohnlev struct mmuext_op t; 2295084Sjohnlev uint_t count; 2305084Sjohnlev 2315084Sjohnlev t.cmd = MMUEXT_PIN_L1_TABLE + lvl; 2325084Sjohnlev t.arg1.mfn = pfn_to_mfn(pfn); 2335084Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 2345084Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 2355084Sjohnlev ASSERT(count == 1); 2365084Sjohnlev } 2375084Sjohnlev 2385084Sjohnlev void 2395084Sjohnlev xen_unpin(pfn_t pfn) 2405084Sjohnlev { 2415084Sjohnlev struct mmuext_op t; 2425084Sjohnlev uint_t count; 2435084Sjohnlev 2445084Sjohnlev t.cmd = MMUEXT_UNPIN_TABLE; 2455084Sjohnlev t.arg1.mfn = pfn_to_mfn(pfn); 2465084Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 2475084Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 2485084Sjohnlev ASSERT(count == 1); 2495084Sjohnlev } 2505084Sjohnlev 2515084Sjohnlev static void 2525084Sjohnlev xen_map(uint64_t pte, caddr_t va) 2535084Sjohnlev { 2545084Sjohnlev if (HYPERVISOR_update_va_mapping((uintptr_t)va, pte, 2555084Sjohnlev UVMF_INVLPG | UVMF_LOCAL)) 2565084Sjohnlev panic("HYPERVISOR_update_va_mapping() failed"); 2575084Sjohnlev } 2585084Sjohnlev #endif /* __xpv */ 2595084Sjohnlev 2600Sstevel@tonic-gate /* 2610Sstevel@tonic-gate * Allocate a memory page for a hardware page table. 2620Sstevel@tonic-gate * 2633446Smrj * A wrapper around page_get_physical(), with some extra checks. 2640Sstevel@tonic-gate */ 2653446Smrj static pfn_t 2669062SVikram.Hegde@Sun.COM ptable_alloc(uintptr_t seed) 2670Sstevel@tonic-gate { 2680Sstevel@tonic-gate pfn_t pfn; 2690Sstevel@tonic-gate page_t *pp; 2700Sstevel@tonic-gate 2713446Smrj pfn = PFN_INVALID; 2720Sstevel@tonic-gate 2733446Smrj /* 2743446Smrj * The first check is to see if there is memory in the system. If we 2753446Smrj * drop to throttlefree, then fail the ptable_alloc() and let the 2763446Smrj * stealing code kick in. Note that we have to do this test here, 2773446Smrj * since the test in page_create_throttle() would let the NOSLEEP 2783446Smrj * allocation go through and deplete the page reserves. 2793446Smrj * 2803446Smrj * The !NOMEMWAIT() lets pageout, fsflush, etc. skip this check. 2813446Smrj */ 2823446Smrj if (!NOMEMWAIT() && freemem <= throttlefree + 1) 2833446Smrj return (PFN_INVALID); 2840Sstevel@tonic-gate 2851747Sjosephb #ifdef DEBUG 2863446Smrj /* 2873446Smrj * This code makes htable_steal() easier to test. By setting 2883446Smrj * force_steal we force pagetable allocations to fall 2893446Smrj * into the stealing code. Roughly 1 in ever "force_steal" 2903446Smrj * page table allocations will fail. 2913446Smrj */ 2923446Smrj if (proc_pageout != NULL && force_steal > 1 && 2933446Smrj ++ptable_cnt > force_steal) { 2943446Smrj ptable_cnt = 0; 2953446Smrj return (PFN_INVALID); 2963446Smrj } 2971747Sjosephb #endif /* DEBUG */ 2981747Sjosephb 2999062SVikram.Hegde@Sun.COM pp = page_get_physical(seed); 3003446Smrj if (pp == NULL) 3013446Smrj return (PFN_INVALID); 3027589SVikram.Hegde@Sun.COM ASSERT(PAGE_SHARED(pp)); 3033446Smrj pfn = pp->p_pagenum; 3040Sstevel@tonic-gate if (pfn == PFN_INVALID) 3050Sstevel@tonic-gate panic("ptable_alloc(): Invalid PFN!!"); 30611058SJakub.Jermar@Sun.COM atomic_add_32(&active_ptables, 1); 3071747Sjosephb HATSTAT_INC(hs_ptable_allocs); 3083446Smrj return (pfn); 3090Sstevel@tonic-gate } 3100Sstevel@tonic-gate 3110Sstevel@tonic-gate /* 3120Sstevel@tonic-gate * Free an htable's associated page table page. See the comments 3130Sstevel@tonic-gate * for ptable_alloc(). 3140Sstevel@tonic-gate */ 3150Sstevel@tonic-gate static void 3163446Smrj ptable_free(pfn_t pfn) 3170Sstevel@tonic-gate { 3183446Smrj page_t *pp = page_numtopp_nolock(pfn); 3190Sstevel@tonic-gate 3200Sstevel@tonic-gate /* 3210Sstevel@tonic-gate * need to destroy the page used for the pagetable 3220Sstevel@tonic-gate */ 3230Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 3240Sstevel@tonic-gate HATSTAT_INC(hs_ptable_frees); 3250Sstevel@tonic-gate atomic_add_32(&active_ptables, -1); 3260Sstevel@tonic-gate if (pp == NULL) 3270Sstevel@tonic-gate panic("ptable_free(): no page for pfn!"); 3289062SVikram.Hegde@Sun.COM ASSERT(PAGE_SHARED(pp)); 3290Sstevel@tonic-gate ASSERT(pfn == pp->p_pagenum); 3305084Sjohnlev ASSERT(!IN_XPV_PANIC()); 3319062SVikram.Hegde@Sun.COM 3329062SVikram.Hegde@Sun.COM /* 3339062SVikram.Hegde@Sun.COM * Get an exclusive lock, might have to wait for a kmem reader. 3349062SVikram.Hegde@Sun.COM */ 3359062SVikram.Hegde@Sun.COM if (!page_tryupgrade(pp)) { 336*12532Sjoe.bonasera@oracle.com u_offset_t off = pp->p_offset; 3379062SVikram.Hegde@Sun.COM page_unlock(pp); 338*12532Sjoe.bonasera@oracle.com pp = page_lookup(&kvp, off, SE_EXCL); 339*12532Sjoe.bonasera@oracle.com if (pp == NULL) 340*12532Sjoe.bonasera@oracle.com panic("page not found"); 3419062SVikram.Hegde@Sun.COM } 3425084Sjohnlev #ifdef __xpv 3435084Sjohnlev if (kpm_vbase && xen_kpm_page(pfn, PT_VALID | PT_WRITABLE) < 0) 3445084Sjohnlev panic("failure making kpm r/w pfn=0x%lx", pfn); 3455084Sjohnlev #endif 346*12532Sjoe.bonasera@oracle.com page_hashout(pp, NULL); 3479062SVikram.Hegde@Sun.COM page_free(pp, 1); 3489062SVikram.Hegde@Sun.COM page_unresv(1); 3490Sstevel@tonic-gate } 3500Sstevel@tonic-gate 3510Sstevel@tonic-gate /* 3520Sstevel@tonic-gate * Put one htable on the reserve list. 3530Sstevel@tonic-gate */ 3540Sstevel@tonic-gate static void 3550Sstevel@tonic-gate htable_put_reserve(htable_t *ht) 3560Sstevel@tonic-gate { 3570Sstevel@tonic-gate ht->ht_hat = NULL; /* no longer tied to a hat */ 3580Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 3590Sstevel@tonic-gate HATSTAT_INC(hs_htable_rputs); 3600Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 3610Sstevel@tonic-gate ht->ht_next = htable_reserve_pool; 3620Sstevel@tonic-gate htable_reserve_pool = ht; 3630Sstevel@tonic-gate ++htable_reserve_cnt; 3640Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 3650Sstevel@tonic-gate } 3660Sstevel@tonic-gate 3670Sstevel@tonic-gate /* 3680Sstevel@tonic-gate * Take one htable from the reserve. 3690Sstevel@tonic-gate */ 3700Sstevel@tonic-gate static htable_t * 3710Sstevel@tonic-gate htable_get_reserve(void) 3720Sstevel@tonic-gate { 3730Sstevel@tonic-gate htable_t *ht = NULL; 3740Sstevel@tonic-gate 3750Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 3760Sstevel@tonic-gate if (htable_reserve_cnt != 0) { 3770Sstevel@tonic-gate ht = htable_reserve_pool; 3780Sstevel@tonic-gate ASSERT(ht != NULL); 3790Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 3800Sstevel@tonic-gate htable_reserve_pool = ht->ht_next; 3810Sstevel@tonic-gate --htable_reserve_cnt; 3820Sstevel@tonic-gate HATSTAT_INC(hs_htable_rgets); 3830Sstevel@tonic-gate } 3840Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 3850Sstevel@tonic-gate return (ht); 3860Sstevel@tonic-gate } 3870Sstevel@tonic-gate 3880Sstevel@tonic-gate /* 3893446Smrj * Allocate initial htables and put them on the reserve list 3900Sstevel@tonic-gate */ 3910Sstevel@tonic-gate void 3920Sstevel@tonic-gate htable_initial_reserve(uint_t count) 3930Sstevel@tonic-gate { 3940Sstevel@tonic-gate htable_t *ht; 3950Sstevel@tonic-gate 3960Sstevel@tonic-gate count += HTABLE_RESERVE_AMOUNT; 3970Sstevel@tonic-gate while (count > 0) { 3980Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, KM_NOSLEEP); 3990Sstevel@tonic-gate ASSERT(ht != NULL); 4000Sstevel@tonic-gate 4010Sstevel@tonic-gate ASSERT(use_boot_reserve); 4023446Smrj ht->ht_pfn = PFN_INVALID; 4033446Smrj htable_put_reserve(ht); 4040Sstevel@tonic-gate --count; 4050Sstevel@tonic-gate } 4060Sstevel@tonic-gate } 4070Sstevel@tonic-gate 4080Sstevel@tonic-gate /* 4090Sstevel@tonic-gate * Readjust the reserves after a thread finishes using them. 4100Sstevel@tonic-gate */ 4110Sstevel@tonic-gate void 4120Sstevel@tonic-gate htable_adjust_reserve() 4130Sstevel@tonic-gate { 4140Sstevel@tonic-gate htable_t *ht; 4150Sstevel@tonic-gate 4160Sstevel@tonic-gate /* 4170Sstevel@tonic-gate * Free any excess htables in the reserve list 4180Sstevel@tonic-gate */ 4194004Sjosephb while (htable_reserve_cnt > htable_reserve_amount && 4204004Sjosephb !USE_HAT_RESERVES()) { 4210Sstevel@tonic-gate ht = htable_get_reserve(); 4220Sstevel@tonic-gate if (ht == NULL) 4230Sstevel@tonic-gate return; 4240Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 4250Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 4260Sstevel@tonic-gate } 4270Sstevel@tonic-gate } 4280Sstevel@tonic-gate 4290Sstevel@tonic-gate 4300Sstevel@tonic-gate /* 4310Sstevel@tonic-gate * This routine steals htables from user processes for htable_alloc() or 4320Sstevel@tonic-gate * for htable_reap(). 4330Sstevel@tonic-gate */ 4340Sstevel@tonic-gate static htable_t * 4350Sstevel@tonic-gate htable_steal(uint_t cnt) 4360Sstevel@tonic-gate { 4370Sstevel@tonic-gate hat_t *hat = kas.a_hat; /* list starts with khat */ 4380Sstevel@tonic-gate htable_t *list = NULL; 4390Sstevel@tonic-gate htable_t *ht; 4400Sstevel@tonic-gate htable_t *higher; 4410Sstevel@tonic-gate uint_t h; 4421747Sjosephb uint_t h_start; 4431747Sjosephb static uint_t h_seed = 0; 4440Sstevel@tonic-gate uint_t e; 4450Sstevel@tonic-gate uintptr_t va; 4460Sstevel@tonic-gate x86pte_t pte; 4470Sstevel@tonic-gate uint_t stolen = 0; 4480Sstevel@tonic-gate uint_t pass; 4491747Sjosephb uint_t threshold; 4500Sstevel@tonic-gate 4510Sstevel@tonic-gate /* 4520Sstevel@tonic-gate * Limit htable_steal_passes to something reasonable 4530Sstevel@tonic-gate */ 4540Sstevel@tonic-gate if (htable_steal_passes == 0) 4550Sstevel@tonic-gate htable_steal_passes = 1; 4560Sstevel@tonic-gate if (htable_steal_passes > mmu.ptes_per_table) 4570Sstevel@tonic-gate htable_steal_passes = mmu.ptes_per_table; 4580Sstevel@tonic-gate 4590Sstevel@tonic-gate /* 4601747Sjosephb * Loop through all user hats. The 1st pass takes cached htables that 4610Sstevel@tonic-gate * aren't in use. The later passes steal by removing mappings, too. 4620Sstevel@tonic-gate */ 4630Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, 1); 4641747Sjosephb for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) { 4651747Sjosephb threshold = pass * mmu.ptes_per_table / htable_steal_passes; 4661747Sjosephb hat = kas.a_hat; 4670Sstevel@tonic-gate for (;;) { 4680Sstevel@tonic-gate 4690Sstevel@tonic-gate /* 4701747Sjosephb * Clear the victim flag and move to next hat 4710Sstevel@tonic-gate */ 4720Sstevel@tonic-gate mutex_enter(&hat_list_lock); 4731747Sjosephb if (hat != kas.a_hat) { 4741747Sjosephb hat->hat_flags &= ~HAT_VICTIM; 4751747Sjosephb cv_broadcast(&hat_list_cv); 4761747Sjosephb } 4771747Sjosephb hat = hat->hat_next; 4781747Sjosephb 4791747Sjosephb /* 4801747Sjosephb * Skip any hat that is already being stolen from. 4811747Sjosephb * 4821747Sjosephb * We skip SHARED hats, as these are dummy 4831747Sjosephb * hats that host ISM shared page tables. 4841747Sjosephb * 4851747Sjosephb * We also skip if HAT_FREEING because hat_pte_unmap() 4861747Sjosephb * won't zero out the PTE's. That would lead to hitting 4871747Sjosephb * stale PTEs either here or under hat_unload() when we 4881747Sjosephb * steal and unload the same page table in competing 4891747Sjosephb * threads. 4901747Sjosephb */ 4911747Sjosephb while (hat != NULL && 4921747Sjosephb (hat->hat_flags & 4931747Sjosephb (HAT_VICTIM | HAT_SHARED | HAT_FREEING)) != 0) 4941747Sjosephb hat = hat->hat_next; 4951747Sjosephb 4961747Sjosephb if (hat == NULL) { 4970Sstevel@tonic-gate mutex_exit(&hat_list_lock); 4980Sstevel@tonic-gate break; 4990Sstevel@tonic-gate } 5001747Sjosephb 5011747Sjosephb /* 5021747Sjosephb * Are we finished? 5031747Sjosephb */ 5041747Sjosephb if (stolen == cnt) { 5051747Sjosephb /* 5061747Sjosephb * Try to spread the pain of stealing, 5071747Sjosephb * move victim HAT to the end of the HAT list. 5081747Sjosephb */ 5091747Sjosephb if (pass >= 1 && cnt == 1 && 5101747Sjosephb kas.a_hat->hat_prev != hat) { 5111747Sjosephb 5121747Sjosephb /* unlink victim hat */ 5131747Sjosephb if (hat->hat_prev) 5141747Sjosephb hat->hat_prev->hat_next = 5151747Sjosephb hat->hat_next; 5161747Sjosephb else 5171747Sjosephb kas.a_hat->hat_next = 5181747Sjosephb hat->hat_next; 5191747Sjosephb if (hat->hat_next) 5201747Sjosephb hat->hat_next->hat_prev = 5211747Sjosephb hat->hat_prev; 5221747Sjosephb else 5231747Sjosephb kas.a_hat->hat_prev = 5241747Sjosephb hat->hat_prev; 5251747Sjosephb 5261747Sjosephb 5271747Sjosephb /* relink at end of hat list */ 5281747Sjosephb hat->hat_next = NULL; 5291747Sjosephb hat->hat_prev = kas.a_hat->hat_prev; 5301747Sjosephb if (hat->hat_prev) 5311747Sjosephb hat->hat_prev->hat_next = hat; 5321747Sjosephb else 5331747Sjosephb kas.a_hat->hat_next = hat; 5341747Sjosephb kas.a_hat->hat_prev = hat; 5351747Sjosephb 5361747Sjosephb } 5371747Sjosephb 5381747Sjosephb mutex_exit(&hat_list_lock); 5391747Sjosephb break; 5401747Sjosephb } 5411747Sjosephb 5421747Sjosephb /* 5431747Sjosephb * Mark the HAT as a stealing victim. 5441747Sjosephb */ 5450Sstevel@tonic-gate hat->hat_flags |= HAT_VICTIM; 5460Sstevel@tonic-gate mutex_exit(&hat_list_lock); 5470Sstevel@tonic-gate 5480Sstevel@tonic-gate /* 5490Sstevel@tonic-gate * Take any htables from the hat's cached "free" list. 5500Sstevel@tonic-gate */ 5510Sstevel@tonic-gate hat_enter(hat); 5520Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL && 5530Sstevel@tonic-gate stolen < cnt) { 5540Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 5550Sstevel@tonic-gate ht->ht_next = list; 5560Sstevel@tonic-gate list = ht; 5570Sstevel@tonic-gate ++stolen; 5580Sstevel@tonic-gate } 5590Sstevel@tonic-gate hat_exit(hat); 5600Sstevel@tonic-gate 5610Sstevel@tonic-gate /* 5620Sstevel@tonic-gate * Don't steal on first pass. 5630Sstevel@tonic-gate */ 5641747Sjosephb if (pass == 0 || stolen == cnt) 5650Sstevel@tonic-gate continue; 5660Sstevel@tonic-gate 5670Sstevel@tonic-gate /* 5681747Sjosephb * Search the active htables for one to steal. 5691747Sjosephb * Start at a different hash bucket every time to 5701747Sjosephb * help spread the pain of stealing. 5710Sstevel@tonic-gate */ 5721747Sjosephb h = h_start = h_seed++ % hat->hat_num_hash; 5731747Sjosephb do { 5740Sstevel@tonic-gate higher = NULL; 5750Sstevel@tonic-gate HTABLE_ENTER(h); 5760Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; 5770Sstevel@tonic-gate ht = ht->ht_next) { 5780Sstevel@tonic-gate 5790Sstevel@tonic-gate /* 5800Sstevel@tonic-gate * Can we rule out reaping? 5810Sstevel@tonic-gate */ 5820Sstevel@tonic-gate if (ht->ht_busy != 0 || 5830Sstevel@tonic-gate (ht->ht_flags & HTABLE_SHARED_PFN)|| 5841747Sjosephb ht->ht_level > 0 || 5851747Sjosephb ht->ht_valid_cnt > threshold || 5860Sstevel@tonic-gate ht->ht_lock_cnt != 0) 5870Sstevel@tonic-gate continue; 5880Sstevel@tonic-gate 5890Sstevel@tonic-gate /* 5900Sstevel@tonic-gate * Increment busy so the htable can't 5910Sstevel@tonic-gate * disappear. We drop the htable mutex 5920Sstevel@tonic-gate * to avoid deadlocks with 5930Sstevel@tonic-gate * hat_pageunload() and the hment mutex 5940Sstevel@tonic-gate * while we call hat_pte_unmap() 5950Sstevel@tonic-gate */ 5960Sstevel@tonic-gate ++ht->ht_busy; 5970Sstevel@tonic-gate HTABLE_EXIT(h); 5980Sstevel@tonic-gate 5990Sstevel@tonic-gate /* 6000Sstevel@tonic-gate * Try stealing. 6010Sstevel@tonic-gate * - unload and invalidate all PTEs 6020Sstevel@tonic-gate */ 6030Sstevel@tonic-gate for (e = 0, va = ht->ht_vaddr; 6043446Smrj e < HTABLE_NUM_PTES(ht) && 6050Sstevel@tonic-gate ht->ht_valid_cnt > 0 && 6060Sstevel@tonic-gate ht->ht_busy == 1 && 6070Sstevel@tonic-gate ht->ht_lock_cnt == 0; 6080Sstevel@tonic-gate ++e, va += MMU_PAGESIZE) { 6090Sstevel@tonic-gate pte = x86pte_get(ht, e); 6100Sstevel@tonic-gate if (!PTE_ISVALID(pte)) 6110Sstevel@tonic-gate continue; 6120Sstevel@tonic-gate hat_pte_unmap(ht, e, 6130Sstevel@tonic-gate HAT_UNLOAD, pte, NULL); 6140Sstevel@tonic-gate } 6150Sstevel@tonic-gate 6160Sstevel@tonic-gate /* 6170Sstevel@tonic-gate * Reacquire htable lock. If we didn't 6180Sstevel@tonic-gate * remove all mappings in the table, 6190Sstevel@tonic-gate * or another thread added a new mapping 6200Sstevel@tonic-gate * behind us, give up on this table. 6210Sstevel@tonic-gate */ 6220Sstevel@tonic-gate HTABLE_ENTER(h); 6230Sstevel@tonic-gate if (ht->ht_busy != 1 || 6240Sstevel@tonic-gate ht->ht_valid_cnt != 0 || 6250Sstevel@tonic-gate ht->ht_lock_cnt != 0) { 6260Sstevel@tonic-gate --ht->ht_busy; 6270Sstevel@tonic-gate continue; 6280Sstevel@tonic-gate } 6290Sstevel@tonic-gate 6300Sstevel@tonic-gate /* 6310Sstevel@tonic-gate * Steal it and unlink the page table. 6320Sstevel@tonic-gate */ 6330Sstevel@tonic-gate higher = ht->ht_parent; 6340Sstevel@tonic-gate unlink_ptp(higher, ht, ht->ht_vaddr); 6350Sstevel@tonic-gate 6360Sstevel@tonic-gate /* 6370Sstevel@tonic-gate * remove from the hash list 6380Sstevel@tonic-gate */ 6390Sstevel@tonic-gate if (ht->ht_next) 6400Sstevel@tonic-gate ht->ht_next->ht_prev = 6410Sstevel@tonic-gate ht->ht_prev; 6420Sstevel@tonic-gate 6430Sstevel@tonic-gate if (ht->ht_prev) { 6440Sstevel@tonic-gate ht->ht_prev->ht_next = 6450Sstevel@tonic-gate ht->ht_next; 6460Sstevel@tonic-gate } else { 6470Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[h] == 6480Sstevel@tonic-gate ht); 6490Sstevel@tonic-gate hat->hat_ht_hash[h] = 6500Sstevel@tonic-gate ht->ht_next; 6510Sstevel@tonic-gate } 6520Sstevel@tonic-gate 6530Sstevel@tonic-gate /* 6540Sstevel@tonic-gate * Break to outer loop to release the 6553446Smrj * higher (ht_parent) pagetable. This 6560Sstevel@tonic-gate * spreads out the pain caused by 6570Sstevel@tonic-gate * pagefaults. 6580Sstevel@tonic-gate */ 6590Sstevel@tonic-gate ht->ht_next = list; 6600Sstevel@tonic-gate list = ht; 6610Sstevel@tonic-gate ++stolen; 6620Sstevel@tonic-gate break; 6630Sstevel@tonic-gate } 6640Sstevel@tonic-gate HTABLE_EXIT(h); 6650Sstevel@tonic-gate if (higher != NULL) 6660Sstevel@tonic-gate htable_release(higher); 6671747Sjosephb if (++h == hat->hat_num_hash) 6681747Sjosephb h = 0; 6691747Sjosephb } while (stolen < cnt && h != h_start); 6700Sstevel@tonic-gate } 6710Sstevel@tonic-gate } 6720Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, -1); 6730Sstevel@tonic-gate return (list); 6740Sstevel@tonic-gate } 6750Sstevel@tonic-gate 6760Sstevel@tonic-gate /* 6770Sstevel@tonic-gate * This is invoked from kmem when the system is low on memory. We try 6780Sstevel@tonic-gate * to free hments, htables, and ptables to improve the memory situation. 6790Sstevel@tonic-gate */ 6800Sstevel@tonic-gate /*ARGSUSED*/ 6810Sstevel@tonic-gate static void 6820Sstevel@tonic-gate htable_reap(void *handle) 6830Sstevel@tonic-gate { 6840Sstevel@tonic-gate uint_t reap_cnt; 6850Sstevel@tonic-gate htable_t *list; 6860Sstevel@tonic-gate htable_t *ht; 6870Sstevel@tonic-gate 6880Sstevel@tonic-gate HATSTAT_INC(hs_reap_attempts); 6890Sstevel@tonic-gate if (!can_steal_post_boot) 6900Sstevel@tonic-gate return; 6910Sstevel@tonic-gate 6920Sstevel@tonic-gate /* 6930Sstevel@tonic-gate * Try to reap 5% of the page tables bounded by a maximum of 6940Sstevel@tonic-gate * 5% of physmem and a minimum of 10. 6950Sstevel@tonic-gate */ 69611058SJakub.Jermar@Sun.COM reap_cnt = MAX(MIN(physmem / 20, active_ptables / 20), 10); 6970Sstevel@tonic-gate 6980Sstevel@tonic-gate /* 6990Sstevel@tonic-gate * Let htable_steal() do the work, we just call htable_free() 7000Sstevel@tonic-gate */ 7015084Sjohnlev XPV_DISALLOW_MIGRATE(); 7020Sstevel@tonic-gate list = htable_steal(reap_cnt); 7035084Sjohnlev XPV_ALLOW_MIGRATE(); 7040Sstevel@tonic-gate while ((ht = list) != NULL) { 7050Sstevel@tonic-gate list = ht->ht_next; 7060Sstevel@tonic-gate HATSTAT_INC(hs_reaped); 7070Sstevel@tonic-gate htable_free(ht); 7080Sstevel@tonic-gate } 7090Sstevel@tonic-gate 7100Sstevel@tonic-gate /* 7110Sstevel@tonic-gate * Free up excess reserves 7120Sstevel@tonic-gate */ 7130Sstevel@tonic-gate htable_adjust_reserve(); 7140Sstevel@tonic-gate hment_adjust_reserve(); 7150Sstevel@tonic-gate } 7160Sstevel@tonic-gate 7170Sstevel@tonic-gate /* 7183446Smrj * Allocate an htable, stealing one or using the reserve if necessary 7190Sstevel@tonic-gate */ 7200Sstevel@tonic-gate static htable_t * 7210Sstevel@tonic-gate htable_alloc( 7220Sstevel@tonic-gate hat_t *hat, 7230Sstevel@tonic-gate uintptr_t vaddr, 7240Sstevel@tonic-gate level_t level, 7250Sstevel@tonic-gate htable_t *shared) 7260Sstevel@tonic-gate { 7270Sstevel@tonic-gate htable_t *ht = NULL; 7280Sstevel@tonic-gate uint_t is_vlp; 7290Sstevel@tonic-gate uint_t is_bare = 0; 7300Sstevel@tonic-gate uint_t need_to_zero = 1; 7310Sstevel@tonic-gate int kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP); 7320Sstevel@tonic-gate 7330Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 7340Sstevel@tonic-gate panic("htable_alloc(): level %d out of range\n", level); 7350Sstevel@tonic-gate 7360Sstevel@tonic-gate is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL; 7370Sstevel@tonic-gate if (is_vlp || shared != NULL) 7380Sstevel@tonic-gate is_bare = 1; 7390Sstevel@tonic-gate 7400Sstevel@tonic-gate /* 7410Sstevel@tonic-gate * First reuse a cached htable from the hat_ht_cached field, this 7423446Smrj * avoids unnecessary trips through kmem/page allocators. 7430Sstevel@tonic-gate */ 7440Sstevel@tonic-gate if (hat->hat_ht_cached != NULL && !is_bare) { 7450Sstevel@tonic-gate hat_enter(hat); 7460Sstevel@tonic-gate ht = hat->hat_ht_cached; 7470Sstevel@tonic-gate if (ht != NULL) { 7480Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 7490Sstevel@tonic-gate need_to_zero = 0; 7500Sstevel@tonic-gate /* XX64 ASSERT() they're all zero somehow */ 7510Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 7520Sstevel@tonic-gate } 7530Sstevel@tonic-gate hat_exit(hat); 7540Sstevel@tonic-gate } 7550Sstevel@tonic-gate 7560Sstevel@tonic-gate if (ht == NULL) { 7570Sstevel@tonic-gate /* 7583543Sjosephb * Allocate an htable, possibly refilling the reserves. 7590Sstevel@tonic-gate */ 7603543Sjosephb if (USE_HAT_RESERVES()) { 7610Sstevel@tonic-gate ht = htable_get_reserve(); 7620Sstevel@tonic-gate } else { 7630Sstevel@tonic-gate /* 7640Sstevel@tonic-gate * Donate successful htable allocations to the reserve. 7650Sstevel@tonic-gate */ 7660Sstevel@tonic-gate for (;;) { 7670Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, kmflags); 7680Sstevel@tonic-gate if (ht == NULL) 7690Sstevel@tonic-gate break; 7700Sstevel@tonic-gate ht->ht_pfn = PFN_INVALID; 7713543Sjosephb if (USE_HAT_RESERVES() || 7720Sstevel@tonic-gate htable_reserve_cnt >= htable_reserve_amount) 7730Sstevel@tonic-gate break; 7740Sstevel@tonic-gate htable_put_reserve(ht); 7750Sstevel@tonic-gate } 7760Sstevel@tonic-gate } 7770Sstevel@tonic-gate 7780Sstevel@tonic-gate /* 7790Sstevel@tonic-gate * allocate a page for the hardware page table if needed 7800Sstevel@tonic-gate */ 7810Sstevel@tonic-gate if (ht != NULL && !is_bare) { 7821747Sjosephb ht->ht_hat = hat; 7839062SVikram.Hegde@Sun.COM ht->ht_pfn = ptable_alloc((uintptr_t)ht); 7840Sstevel@tonic-gate if (ht->ht_pfn == PFN_INVALID) { 7853543Sjosephb if (USE_HAT_RESERVES()) 7863543Sjosephb htable_put_reserve(ht); 7873543Sjosephb else 7883543Sjosephb kmem_cache_free(htable_cache, ht); 7890Sstevel@tonic-gate ht = NULL; 7900Sstevel@tonic-gate } 7910Sstevel@tonic-gate } 7920Sstevel@tonic-gate } 7930Sstevel@tonic-gate 7940Sstevel@tonic-gate /* 7951747Sjosephb * If allocations failed, kick off a kmem_reap() and resort to 7961747Sjosephb * htable steal(). We may spin here if the system is very low on 7971747Sjosephb * memory. If the kernel itself has consumed all memory and kmem_reap() 7981747Sjosephb * can't free up anything, then we'll really get stuck here. 7991747Sjosephb * That should only happen in a system where the administrator has 8001747Sjosephb * misconfigured VM parameters via /etc/system. 8010Sstevel@tonic-gate */ 8021747Sjosephb while (ht == NULL && can_steal_post_boot) { 8031747Sjosephb kmem_reap(); 8040Sstevel@tonic-gate ht = htable_steal(1); 8050Sstevel@tonic-gate HATSTAT_INC(hs_steals); 8060Sstevel@tonic-gate 8070Sstevel@tonic-gate /* 8081747Sjosephb * If we stole for a bare htable, release the pagetable page. 8090Sstevel@tonic-gate */ 8103446Smrj if (ht != NULL) { 8113446Smrj if (is_bare) { 8123446Smrj ptable_free(ht->ht_pfn); 8133446Smrj ht->ht_pfn = PFN_INVALID; 8145084Sjohnlev #if defined(__xpv) && defined(__amd64) 8155084Sjohnlev /* 8165084Sjohnlev * make stolen page table writable again in kpm 8175084Sjohnlev */ 8185084Sjohnlev } else if (kpm_vbase && xen_kpm_page(ht->ht_pfn, 8195084Sjohnlev PT_VALID | PT_WRITABLE) < 0) { 8205084Sjohnlev panic("failure making kpm r/w pfn=0x%lx", 8215084Sjohnlev ht->ht_pfn); 8225084Sjohnlev #endif 8233446Smrj } 8243446Smrj } 8250Sstevel@tonic-gate } 8260Sstevel@tonic-gate 8270Sstevel@tonic-gate /* 8281747Sjosephb * All attempts to allocate or steal failed. This should only happen 8291747Sjosephb * if we run out of memory during boot, due perhaps to a huge 8301747Sjosephb * boot_archive. At this point there's no way to continue. 8310Sstevel@tonic-gate */ 8320Sstevel@tonic-gate if (ht == NULL) 8330Sstevel@tonic-gate panic("htable_alloc(): couldn't steal\n"); 8340Sstevel@tonic-gate 8355084Sjohnlev #if defined(__amd64) && defined(__xpv) 8365084Sjohnlev /* 8375084Sjohnlev * Under the 64-bit hypervisor, we have 2 top level page tables. 8385084Sjohnlev * If this allocation fails, we'll resort to stealing. 8395084Sjohnlev * We use the stolen page indirectly, by freeing the 8405084Sjohnlev * stolen htable first. 8415084Sjohnlev */ 8425084Sjohnlev if (level == mmu.max_level) { 8435084Sjohnlev for (;;) { 8445084Sjohnlev htable_t *stolen; 8455084Sjohnlev 8469062SVikram.Hegde@Sun.COM hat->hat_user_ptable = ptable_alloc((uintptr_t)ht + 1); 8475084Sjohnlev if (hat->hat_user_ptable != PFN_INVALID) 8485084Sjohnlev break; 8495084Sjohnlev stolen = htable_steal(1); 8505084Sjohnlev if (stolen == NULL) 8515084Sjohnlev panic("2nd steal ptable failed\n"); 8525084Sjohnlev htable_free(stolen); 8535084Sjohnlev } 8545084Sjohnlev block_zero_no_xmm(kpm_vbase + pfn_to_pa(hat->hat_user_ptable), 8555084Sjohnlev MMU_PAGESIZE); 8565084Sjohnlev } 8575084Sjohnlev #endif 8585084Sjohnlev 8590Sstevel@tonic-gate /* 8600Sstevel@tonic-gate * Shared page tables have all entries locked and entries may not 8610Sstevel@tonic-gate * be added or deleted. 8620Sstevel@tonic-gate */ 8630Sstevel@tonic-gate ht->ht_flags = 0; 8640Sstevel@tonic-gate if (shared != NULL) { 8650Sstevel@tonic-gate ASSERT(shared->ht_valid_cnt > 0); 8660Sstevel@tonic-gate ht->ht_flags |= HTABLE_SHARED_PFN; 8670Sstevel@tonic-gate ht->ht_pfn = shared->ht_pfn; 8680Sstevel@tonic-gate ht->ht_lock_cnt = 0; 8690Sstevel@tonic-gate ht->ht_valid_cnt = 0; /* updated in hat_share() */ 8700Sstevel@tonic-gate ht->ht_shares = shared; 8710Sstevel@tonic-gate need_to_zero = 0; 8720Sstevel@tonic-gate } else { 8730Sstevel@tonic-gate ht->ht_shares = NULL; 8740Sstevel@tonic-gate ht->ht_lock_cnt = 0; 8750Sstevel@tonic-gate ht->ht_valid_cnt = 0; 8760Sstevel@tonic-gate } 8770Sstevel@tonic-gate 8780Sstevel@tonic-gate /* 8790Sstevel@tonic-gate * setup flags, etc. for VLP htables 8800Sstevel@tonic-gate */ 8810Sstevel@tonic-gate if (is_vlp) { 8820Sstevel@tonic-gate ht->ht_flags |= HTABLE_VLP; 8830Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 8840Sstevel@tonic-gate need_to_zero = 0; 8850Sstevel@tonic-gate } 8860Sstevel@tonic-gate 8870Sstevel@tonic-gate /* 8880Sstevel@tonic-gate * fill in the htable 8890Sstevel@tonic-gate */ 8900Sstevel@tonic-gate ht->ht_hat = hat; 8910Sstevel@tonic-gate ht->ht_parent = NULL; 8920Sstevel@tonic-gate ht->ht_vaddr = vaddr; 8930Sstevel@tonic-gate ht->ht_level = level; 8940Sstevel@tonic-gate ht->ht_busy = 1; 8950Sstevel@tonic-gate ht->ht_next = NULL; 8960Sstevel@tonic-gate ht->ht_prev = NULL; 8970Sstevel@tonic-gate 8980Sstevel@tonic-gate /* 8990Sstevel@tonic-gate * Zero out any freshly allocated page table 9000Sstevel@tonic-gate */ 9010Sstevel@tonic-gate if (need_to_zero) 9020Sstevel@tonic-gate x86pte_zero(ht, 0, mmu.ptes_per_table); 9033446Smrj 9045084Sjohnlev #if defined(__amd64) && defined(__xpv) 9055084Sjohnlev if (!is_bare && kpm_vbase) { 9065084Sjohnlev (void) xen_kpm_page(ht->ht_pfn, PT_VALID); 9075084Sjohnlev if (level == mmu.max_level) 9085084Sjohnlev (void) xen_kpm_page(hat->hat_user_ptable, PT_VALID); 9095084Sjohnlev } 9105084Sjohnlev #endif 9115084Sjohnlev 9120Sstevel@tonic-gate return (ht); 9130Sstevel@tonic-gate } 9140Sstevel@tonic-gate 9150Sstevel@tonic-gate /* 9160Sstevel@tonic-gate * Free up an htable, either to a hat's cached list, the reserves or 9170Sstevel@tonic-gate * back to kmem. 9180Sstevel@tonic-gate */ 9190Sstevel@tonic-gate static void 9200Sstevel@tonic-gate htable_free(htable_t *ht) 9210Sstevel@tonic-gate { 9220Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 9230Sstevel@tonic-gate 9240Sstevel@tonic-gate /* 9250Sstevel@tonic-gate * If the process isn't exiting, cache the free htable in the hat 9265084Sjohnlev * structure. We always do this for the boot time reserve. We don't 9270Sstevel@tonic-gate * do this if the hat is exiting or we are stealing/reaping htables. 9280Sstevel@tonic-gate */ 9290Sstevel@tonic-gate if (hat != NULL && 9300Sstevel@tonic-gate !(ht->ht_flags & HTABLE_SHARED_PFN) && 9310Sstevel@tonic-gate (use_boot_reserve || 9320Sstevel@tonic-gate (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) { 9330Sstevel@tonic-gate ASSERT((ht->ht_flags & HTABLE_VLP) == 0); 9340Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 9350Sstevel@tonic-gate hat_enter(hat); 9360Sstevel@tonic-gate ht->ht_next = hat->hat_ht_cached; 9370Sstevel@tonic-gate hat->hat_ht_cached = ht; 9380Sstevel@tonic-gate hat_exit(hat); 9390Sstevel@tonic-gate return; 9400Sstevel@tonic-gate } 9410Sstevel@tonic-gate 9420Sstevel@tonic-gate /* 9430Sstevel@tonic-gate * If we have a hardware page table, free it. 9443446Smrj * We don't free page tables that are accessed by sharing. 9450Sstevel@tonic-gate */ 9460Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 9470Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 9480Sstevel@tonic-gate } else if (!(ht->ht_flags & HTABLE_VLP)) { 9493446Smrj ptable_free(ht->ht_pfn); 9505084Sjohnlev #if defined(__amd64) && defined(__xpv) 9515084Sjohnlev if (ht->ht_level == mmu.max_level) { 9525084Sjohnlev ptable_free(hat->hat_user_ptable); 9535084Sjohnlev hat->hat_user_ptable = PFN_INVALID; 9545084Sjohnlev } 9555084Sjohnlev #endif 9560Sstevel@tonic-gate } 9573446Smrj ht->ht_pfn = PFN_INVALID; 9580Sstevel@tonic-gate 9590Sstevel@tonic-gate /* 9605084Sjohnlev * Free it or put into reserves. 9610Sstevel@tonic-gate */ 9624004Sjosephb if (USE_HAT_RESERVES() || htable_reserve_cnt < htable_reserve_amount) { 9630Sstevel@tonic-gate htable_put_reserve(ht); 9644004Sjosephb } else { 9650Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 9664004Sjosephb htable_adjust_reserve(); 9674004Sjosephb } 9680Sstevel@tonic-gate } 9690Sstevel@tonic-gate 9700Sstevel@tonic-gate 9710Sstevel@tonic-gate /* 9720Sstevel@tonic-gate * This is called when a hat is being destroyed or swapped out. We reap all 9730Sstevel@tonic-gate * the remaining htables in the hat cache. If destroying all left over 9740Sstevel@tonic-gate * htables are also destroyed. 9750Sstevel@tonic-gate * 9760Sstevel@tonic-gate * We also don't need to invalidate any of the PTPs nor do any demapping. 9770Sstevel@tonic-gate */ 9780Sstevel@tonic-gate void 9790Sstevel@tonic-gate htable_purge_hat(hat_t *hat) 9800Sstevel@tonic-gate { 9810Sstevel@tonic-gate htable_t *ht; 9820Sstevel@tonic-gate int h; 9830Sstevel@tonic-gate 9840Sstevel@tonic-gate /* 9850Sstevel@tonic-gate * Purge the htable cache if just reaping. 9860Sstevel@tonic-gate */ 9870Sstevel@tonic-gate if (!(hat->hat_flags & HAT_FREEING)) { 9880Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, 1); 9890Sstevel@tonic-gate for (;;) { 9900Sstevel@tonic-gate hat_enter(hat); 9910Sstevel@tonic-gate ht = hat->hat_ht_cached; 9920Sstevel@tonic-gate if (ht == NULL) { 9930Sstevel@tonic-gate hat_exit(hat); 9940Sstevel@tonic-gate break; 9950Sstevel@tonic-gate } 9960Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 9970Sstevel@tonic-gate hat_exit(hat); 9980Sstevel@tonic-gate htable_free(ht); 9990Sstevel@tonic-gate } 10000Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, -1); 10010Sstevel@tonic-gate return; 10020Sstevel@tonic-gate } 10030Sstevel@tonic-gate 10040Sstevel@tonic-gate /* 10050Sstevel@tonic-gate * if freeing, no locking is needed 10060Sstevel@tonic-gate */ 10070Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL) { 10080Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 10090Sstevel@tonic-gate htable_free(ht); 10100Sstevel@tonic-gate } 10110Sstevel@tonic-gate 10120Sstevel@tonic-gate /* 10130Sstevel@tonic-gate * walk thru the htable hash table and free all the htables in it. 10140Sstevel@tonic-gate */ 10150Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 10160Sstevel@tonic-gate while ((ht = hat->hat_ht_hash[h]) != NULL) { 10170Sstevel@tonic-gate if (ht->ht_next) 10180Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 10190Sstevel@tonic-gate 10200Sstevel@tonic-gate if (ht->ht_prev) { 10210Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 10220Sstevel@tonic-gate } else { 10230Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[h] == ht); 10240Sstevel@tonic-gate hat->hat_ht_hash[h] = ht->ht_next; 10250Sstevel@tonic-gate } 10260Sstevel@tonic-gate htable_free(ht); 10270Sstevel@tonic-gate } 10280Sstevel@tonic-gate } 10290Sstevel@tonic-gate } 10300Sstevel@tonic-gate 10310Sstevel@tonic-gate /* 10320Sstevel@tonic-gate * Unlink an entry for a table at vaddr and level out of the existing table 10330Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 10340Sstevel@tonic-gate */ 10350Sstevel@tonic-gate static void 10360Sstevel@tonic-gate unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr) 10370Sstevel@tonic-gate { 10380Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 10390Sstevel@tonic-gate x86pte_t expect = MAKEPTP(old->ht_pfn, old->ht_level); 10400Sstevel@tonic-gate x86pte_t found; 10414169Sjosephb hat_t *hat = old->ht_hat; 10420Sstevel@tonic-gate 10430Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 10440Sstevel@tonic-gate ASSERT(higher->ht_valid_cnt > 0); 10450Sstevel@tonic-gate ASSERT(old->ht_valid_cnt == 0); 10460Sstevel@tonic-gate found = x86pte_cas(higher, entry, expect, 0); 10475084Sjohnlev #ifdef __xpv 10485084Sjohnlev /* 10495084Sjohnlev * This is weird, but Xen apparently automatically unlinks empty 10505084Sjohnlev * pagetables from the upper page table. So allow PTP to be 0 already. 10515084Sjohnlev */ 10525084Sjohnlev if (found != expect && found != 0) 10535084Sjohnlev #else 10540Sstevel@tonic-gate if (found != expect) 10555084Sjohnlev #endif 10560Sstevel@tonic-gate panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE, 10570Sstevel@tonic-gate found, expect); 10584169Sjosephb 10594169Sjosephb /* 10604654Sjosephb * When a top level VLP page table entry changes, we must issue 10614654Sjosephb * a reload of cr3 on all processors. 10624654Sjosephb * 10634654Sjosephb * If we don't need do do that, then we still have to INVLPG against 10644654Sjosephb * an address covered by the inner page table, as the latest processors 10654654Sjosephb * have TLB-like caches for non-leaf page table entries. 10664169Sjosephb */ 10674169Sjosephb if (!(hat->hat_flags & HAT_FREEING)) { 10684654Sjosephb hat_tlb_inval(hat, (higher->ht_flags & HTABLE_VLP) ? 10694654Sjosephb DEMAP_ALL_ADDR : old->ht_vaddr); 10704169Sjosephb } 10714169Sjosephb 10720Sstevel@tonic-gate HTABLE_DEC(higher->ht_valid_cnt); 10730Sstevel@tonic-gate } 10740Sstevel@tonic-gate 10750Sstevel@tonic-gate /* 10760Sstevel@tonic-gate * Link an entry for a new table at vaddr and level into the existing table 10770Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 10780Sstevel@tonic-gate */ 10790Sstevel@tonic-gate static void 10800Sstevel@tonic-gate link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr) 10810Sstevel@tonic-gate { 10820Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 10830Sstevel@tonic-gate x86pte_t newptp = MAKEPTP(new->ht_pfn, new->ht_level); 10840Sstevel@tonic-gate x86pte_t found; 10850Sstevel@tonic-gate 10860Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 10870Sstevel@tonic-gate 10880Sstevel@tonic-gate ASSERT(new->ht_level != mmu.max_level); 10890Sstevel@tonic-gate 10900Sstevel@tonic-gate HTABLE_INC(higher->ht_valid_cnt); 10910Sstevel@tonic-gate 10920Sstevel@tonic-gate found = x86pte_cas(higher, entry, 0, newptp); 10931251Skchow if ((found & ~PT_REF) != 0) 10940Sstevel@tonic-gate panic("HAT: ptp not 0, found=" FMT_PTE, found); 10954169Sjosephb 10964169Sjosephb /* 10974169Sjosephb * When any top level VLP page table entry changes, we must issue 10984169Sjosephb * a reload of cr3 on all processors using it. 10994269Sjosephb * We also need to do this for the kernel hat on PAE 32 bit kernel. 11004169Sjosephb */ 11014269Sjosephb if ( 11024269Sjosephb #ifdef __i386 11034269Sjosephb (higher->ht_hat == kas.a_hat && higher->ht_level == VLP_LEVEL) || 11044269Sjosephb #endif 11054269Sjosephb (higher->ht_flags & HTABLE_VLP)) 11064169Sjosephb hat_tlb_inval(higher->ht_hat, DEMAP_ALL_ADDR); 11070Sstevel@tonic-gate } 11080Sstevel@tonic-gate 11090Sstevel@tonic-gate /* 11103446Smrj * Release of hold on an htable. If this is the last use and the pagetable 11113446Smrj * is empty we may want to free it, then recursively look at the pagetable 11123446Smrj * above it. The recursion is handled by the outer while() loop. 11135084Sjohnlev * 11145084Sjohnlev * On the metal, during process exit, we don't bother unlinking the tables from 11155084Sjohnlev * upper level pagetables. They are instead handled in bulk by hat_free_end(). 11165084Sjohnlev * We can't do this on the hypervisor as we need the page table to be 11175084Sjohnlev * implicitly unpinnned before it goes to the free page lists. This can't 11185084Sjohnlev * happen unless we fully unlink it from the page table hierarchy. 11190Sstevel@tonic-gate */ 11200Sstevel@tonic-gate void 11210Sstevel@tonic-gate htable_release(htable_t *ht) 11220Sstevel@tonic-gate { 11230Sstevel@tonic-gate uint_t hashval; 11240Sstevel@tonic-gate htable_t *shared; 11250Sstevel@tonic-gate htable_t *higher; 11260Sstevel@tonic-gate hat_t *hat; 11270Sstevel@tonic-gate uintptr_t va; 11280Sstevel@tonic-gate level_t level; 11290Sstevel@tonic-gate 11300Sstevel@tonic-gate while (ht != NULL) { 11310Sstevel@tonic-gate shared = NULL; 11320Sstevel@tonic-gate for (;;) { 11330Sstevel@tonic-gate hat = ht->ht_hat; 11340Sstevel@tonic-gate va = ht->ht_vaddr; 11350Sstevel@tonic-gate level = ht->ht_level; 11360Sstevel@tonic-gate hashval = HTABLE_HASH(hat, va, level); 11370Sstevel@tonic-gate 11380Sstevel@tonic-gate /* 11390Sstevel@tonic-gate * The common case is that this isn't the last use of 11400Sstevel@tonic-gate * an htable so we don't want to free the htable. 11410Sstevel@tonic-gate */ 11420Sstevel@tonic-gate HTABLE_ENTER(hashval); 11430Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt >= 0); 11440Sstevel@tonic-gate ASSERT(ht->ht_busy > 0); 11450Sstevel@tonic-gate if (ht->ht_valid_cnt > 0) 11460Sstevel@tonic-gate break; 11470Sstevel@tonic-gate if (ht->ht_busy > 1) 11480Sstevel@tonic-gate break; 11495224Smec ASSERT(ht->ht_lock_cnt == 0); 11500Sstevel@tonic-gate 11515084Sjohnlev #if !defined(__xpv) 11520Sstevel@tonic-gate /* 11530Sstevel@tonic-gate * we always release empty shared htables 11540Sstevel@tonic-gate */ 11550Sstevel@tonic-gate if (!(ht->ht_flags & HTABLE_SHARED_PFN)) { 11560Sstevel@tonic-gate 11570Sstevel@tonic-gate /* 11580Sstevel@tonic-gate * don't release if in address space tear down 11590Sstevel@tonic-gate */ 11600Sstevel@tonic-gate if (hat->hat_flags & HAT_FREEING) 11610Sstevel@tonic-gate break; 11620Sstevel@tonic-gate 11630Sstevel@tonic-gate /* 11640Sstevel@tonic-gate * At and above max_page_level, free if it's for 11650Sstevel@tonic-gate * a boot-time kernel mapping below kernelbase. 11660Sstevel@tonic-gate */ 11670Sstevel@tonic-gate if (level >= mmu.max_page_level && 11680Sstevel@tonic-gate (hat != kas.a_hat || va >= kernelbase)) 11690Sstevel@tonic-gate break; 11700Sstevel@tonic-gate } 11715084Sjohnlev #endif /* __xpv */ 11720Sstevel@tonic-gate 11730Sstevel@tonic-gate /* 11743446Smrj * Remember if we destroy an htable that shares its PFN 11753446Smrj * from elsewhere. 11760Sstevel@tonic-gate */ 11770Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 11780Sstevel@tonic-gate ASSERT(shared == NULL); 11790Sstevel@tonic-gate shared = ht->ht_shares; 11800Sstevel@tonic-gate HATSTAT_INC(hs_htable_unshared); 11810Sstevel@tonic-gate } 11820Sstevel@tonic-gate 11830Sstevel@tonic-gate /* 11840Sstevel@tonic-gate * Handle release of a table and freeing the htable_t. 11850Sstevel@tonic-gate * Unlink it from the table higher (ie. ht_parent). 11860Sstevel@tonic-gate */ 11870Sstevel@tonic-gate higher = ht->ht_parent; 11880Sstevel@tonic-gate ASSERT(higher != NULL); 11890Sstevel@tonic-gate 11900Sstevel@tonic-gate /* 11910Sstevel@tonic-gate * Unlink the pagetable. 11920Sstevel@tonic-gate */ 11930Sstevel@tonic-gate unlink_ptp(higher, ht, va); 11940Sstevel@tonic-gate 11950Sstevel@tonic-gate /* 11960Sstevel@tonic-gate * remove this htable from its hash list 11970Sstevel@tonic-gate */ 11980Sstevel@tonic-gate if (ht->ht_next) 11990Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 12000Sstevel@tonic-gate 12010Sstevel@tonic-gate if (ht->ht_prev) { 12020Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 12030Sstevel@tonic-gate } else { 12040Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[hashval] == ht); 12050Sstevel@tonic-gate hat->hat_ht_hash[hashval] = ht->ht_next; 12060Sstevel@tonic-gate } 12070Sstevel@tonic-gate HTABLE_EXIT(hashval); 12080Sstevel@tonic-gate htable_free(ht); 12090Sstevel@tonic-gate ht = higher; 12100Sstevel@tonic-gate } 12110Sstevel@tonic-gate 12120Sstevel@tonic-gate ASSERT(ht->ht_busy >= 1); 12130Sstevel@tonic-gate --ht->ht_busy; 12140Sstevel@tonic-gate HTABLE_EXIT(hashval); 12150Sstevel@tonic-gate 12160Sstevel@tonic-gate /* 12170Sstevel@tonic-gate * If we released a shared htable, do a release on the htable 12180Sstevel@tonic-gate * from which it shared 12190Sstevel@tonic-gate */ 12200Sstevel@tonic-gate ht = shared; 12210Sstevel@tonic-gate } 12220Sstevel@tonic-gate } 12230Sstevel@tonic-gate 12240Sstevel@tonic-gate /* 12250Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 12260Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 12270Sstevel@tonic-gate */ 12280Sstevel@tonic-gate htable_t * 12290Sstevel@tonic-gate htable_lookup(hat_t *hat, uintptr_t vaddr, level_t level) 12300Sstevel@tonic-gate { 12310Sstevel@tonic-gate uintptr_t base; 12320Sstevel@tonic-gate uint_t hashval; 12330Sstevel@tonic-gate htable_t *ht = NULL; 12340Sstevel@tonic-gate 12350Sstevel@tonic-gate ASSERT(level >= 0); 12360Sstevel@tonic-gate ASSERT(level <= TOP_LEVEL(hat)); 12370Sstevel@tonic-gate 12384654Sjosephb if (level == TOP_LEVEL(hat)) { 12394654Sjosephb #if defined(__amd64) 12404654Sjosephb /* 12414654Sjosephb * 32 bit address spaces on 64 bit kernels need to check 12424654Sjosephb * for overflow of the 32 bit address space 12434654Sjosephb */ 12444654Sjosephb if ((hat->hat_flags & HAT_VLP) && vaddr >= ((uint64_t)1 << 32)) 12454654Sjosephb return (NULL); 12464654Sjosephb #endif 12470Sstevel@tonic-gate base = 0; 12484654Sjosephb } else { 12490Sstevel@tonic-gate base = vaddr & LEVEL_MASK(level + 1); 12504654Sjosephb } 12510Sstevel@tonic-gate 12520Sstevel@tonic-gate hashval = HTABLE_HASH(hat, base, level); 12530Sstevel@tonic-gate HTABLE_ENTER(hashval); 12540Sstevel@tonic-gate for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) { 12550Sstevel@tonic-gate if (ht->ht_hat == hat && 12560Sstevel@tonic-gate ht->ht_vaddr == base && 12570Sstevel@tonic-gate ht->ht_level == level) 12580Sstevel@tonic-gate break; 12590Sstevel@tonic-gate } 12600Sstevel@tonic-gate if (ht) 12610Sstevel@tonic-gate ++ht->ht_busy; 12620Sstevel@tonic-gate 12630Sstevel@tonic-gate HTABLE_EXIT(hashval); 12640Sstevel@tonic-gate return (ht); 12650Sstevel@tonic-gate } 12660Sstevel@tonic-gate 12670Sstevel@tonic-gate /* 12680Sstevel@tonic-gate * Acquires a hold on a known htable (from a locked hment entry). 12690Sstevel@tonic-gate */ 12700Sstevel@tonic-gate void 12710Sstevel@tonic-gate htable_acquire(htable_t *ht) 12720Sstevel@tonic-gate { 12730Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 12740Sstevel@tonic-gate level_t level = ht->ht_level; 12750Sstevel@tonic-gate uintptr_t base = ht->ht_vaddr; 12760Sstevel@tonic-gate uint_t hashval = HTABLE_HASH(hat, base, level); 12770Sstevel@tonic-gate 12780Sstevel@tonic-gate HTABLE_ENTER(hashval); 12790Sstevel@tonic-gate #ifdef DEBUG 12800Sstevel@tonic-gate /* 12810Sstevel@tonic-gate * make sure the htable is there 12820Sstevel@tonic-gate */ 12830Sstevel@tonic-gate { 12840Sstevel@tonic-gate htable_t *h; 12850Sstevel@tonic-gate 12860Sstevel@tonic-gate for (h = hat->hat_ht_hash[hashval]; 12870Sstevel@tonic-gate h && h != ht; 12880Sstevel@tonic-gate h = h->ht_next) 12890Sstevel@tonic-gate ; 12900Sstevel@tonic-gate ASSERT(h == ht); 12910Sstevel@tonic-gate } 12920Sstevel@tonic-gate #endif /* DEBUG */ 12930Sstevel@tonic-gate ++ht->ht_busy; 12940Sstevel@tonic-gate HTABLE_EXIT(hashval); 12950Sstevel@tonic-gate } 12960Sstevel@tonic-gate 12970Sstevel@tonic-gate /* 12980Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 12990Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 13000Sstevel@tonic-gate * If not found the table is created. 13010Sstevel@tonic-gate * 13020Sstevel@tonic-gate * Since we can't hold a hash table mutex during allocation, we have to 13030Sstevel@tonic-gate * drop it and redo the search on a create. Then we may have to free the newly 13040Sstevel@tonic-gate * allocated htable if another thread raced in and created it ahead of us. 13050Sstevel@tonic-gate */ 13060Sstevel@tonic-gate htable_t * 13070Sstevel@tonic-gate htable_create( 13080Sstevel@tonic-gate hat_t *hat, 13090Sstevel@tonic-gate uintptr_t vaddr, 13100Sstevel@tonic-gate level_t level, 13110Sstevel@tonic-gate htable_t *shared) 13120Sstevel@tonic-gate { 13130Sstevel@tonic-gate uint_t h; 13140Sstevel@tonic-gate level_t l; 13150Sstevel@tonic-gate uintptr_t base; 13160Sstevel@tonic-gate htable_t *ht; 13170Sstevel@tonic-gate htable_t *higher = NULL; 13180Sstevel@tonic-gate htable_t *new = NULL; 13190Sstevel@tonic-gate 13200Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 13210Sstevel@tonic-gate panic("htable_create(): level %d out of range\n", level); 13220Sstevel@tonic-gate 13230Sstevel@tonic-gate /* 13240Sstevel@tonic-gate * Create the page tables in top down order. 13250Sstevel@tonic-gate */ 13260Sstevel@tonic-gate for (l = TOP_LEVEL(hat); l >= level; --l) { 13270Sstevel@tonic-gate new = NULL; 13280Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) 13290Sstevel@tonic-gate base = 0; 13300Sstevel@tonic-gate else 13310Sstevel@tonic-gate base = vaddr & LEVEL_MASK(l + 1); 13320Sstevel@tonic-gate 13330Sstevel@tonic-gate h = HTABLE_HASH(hat, base, l); 13340Sstevel@tonic-gate try_again: 13350Sstevel@tonic-gate /* 13360Sstevel@tonic-gate * look up the htable at this level 13370Sstevel@tonic-gate */ 13380Sstevel@tonic-gate HTABLE_ENTER(h); 13390Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) { 13400Sstevel@tonic-gate ht = hat->hat_htable; 13410Sstevel@tonic-gate } else { 13420Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 13430Sstevel@tonic-gate ASSERT(ht->ht_hat == hat); 13440Sstevel@tonic-gate if (ht->ht_vaddr == base && 13450Sstevel@tonic-gate ht->ht_level == l) 13460Sstevel@tonic-gate break; 13470Sstevel@tonic-gate } 13480Sstevel@tonic-gate } 13490Sstevel@tonic-gate 13500Sstevel@tonic-gate /* 13510Sstevel@tonic-gate * if we found the htable, increment its busy cnt 13520Sstevel@tonic-gate * and if we had allocated a new htable, free it. 13530Sstevel@tonic-gate */ 13540Sstevel@tonic-gate if (ht != NULL) { 13550Sstevel@tonic-gate /* 13560Sstevel@tonic-gate * If we find a pre-existing shared table, it must 13570Sstevel@tonic-gate * share from the same place. 13580Sstevel@tonic-gate */ 13590Sstevel@tonic-gate if (l == level && shared && ht->ht_shares && 13600Sstevel@tonic-gate ht->ht_shares != shared) { 13610Sstevel@tonic-gate panic("htable shared from wrong place " 13627240Srh87107 "found htable=%p shared=%p", 13637240Srh87107 (void *)ht, (void *)shared); 13640Sstevel@tonic-gate } 13650Sstevel@tonic-gate ++ht->ht_busy; 13660Sstevel@tonic-gate HTABLE_EXIT(h); 13670Sstevel@tonic-gate if (new) 13680Sstevel@tonic-gate htable_free(new); 13690Sstevel@tonic-gate if (higher != NULL) 13700Sstevel@tonic-gate htable_release(higher); 13710Sstevel@tonic-gate higher = ht; 13720Sstevel@tonic-gate 13730Sstevel@tonic-gate /* 13740Sstevel@tonic-gate * if we didn't find it on the first search 13750Sstevel@tonic-gate * allocate a new one and search again 13760Sstevel@tonic-gate */ 13770Sstevel@tonic-gate } else if (new == NULL) { 13780Sstevel@tonic-gate HTABLE_EXIT(h); 13790Sstevel@tonic-gate new = htable_alloc(hat, base, l, 13800Sstevel@tonic-gate l == level ? shared : NULL); 13810Sstevel@tonic-gate goto try_again; 13820Sstevel@tonic-gate 13830Sstevel@tonic-gate /* 13840Sstevel@tonic-gate * 2nd search and still not there, use "new" table 13850Sstevel@tonic-gate * Link new table into higher, when not at top level. 13860Sstevel@tonic-gate */ 13870Sstevel@tonic-gate } else { 13880Sstevel@tonic-gate ht = new; 13890Sstevel@tonic-gate if (higher != NULL) { 13900Sstevel@tonic-gate link_ptp(higher, ht, base); 13910Sstevel@tonic-gate ht->ht_parent = higher; 13920Sstevel@tonic-gate } 13930Sstevel@tonic-gate ht->ht_next = hat->hat_ht_hash[h]; 13940Sstevel@tonic-gate ASSERT(ht->ht_prev == NULL); 13950Sstevel@tonic-gate if (hat->hat_ht_hash[h]) 13960Sstevel@tonic-gate hat->hat_ht_hash[h]->ht_prev = ht; 13970Sstevel@tonic-gate hat->hat_ht_hash[h] = ht; 13980Sstevel@tonic-gate HTABLE_EXIT(h); 13990Sstevel@tonic-gate 14000Sstevel@tonic-gate /* 14010Sstevel@tonic-gate * Note we don't do htable_release(higher). 14020Sstevel@tonic-gate * That happens recursively when "new" is removed by 14030Sstevel@tonic-gate * htable_release() or htable_steal(). 14040Sstevel@tonic-gate */ 14050Sstevel@tonic-gate higher = ht; 14060Sstevel@tonic-gate 14070Sstevel@tonic-gate /* 14080Sstevel@tonic-gate * If we just created a new shared page table we 14090Sstevel@tonic-gate * increment the shared htable's busy count, so that 14100Sstevel@tonic-gate * it can't be the victim of a steal even if it's empty. 14110Sstevel@tonic-gate */ 14120Sstevel@tonic-gate if (l == level && shared) { 14130Sstevel@tonic-gate (void) htable_lookup(shared->ht_hat, 14140Sstevel@tonic-gate shared->ht_vaddr, shared->ht_level); 14150Sstevel@tonic-gate HATSTAT_INC(hs_htable_shared); 14160Sstevel@tonic-gate } 14170Sstevel@tonic-gate } 14180Sstevel@tonic-gate } 14190Sstevel@tonic-gate 14200Sstevel@tonic-gate return (ht); 14210Sstevel@tonic-gate } 14220Sstevel@tonic-gate 14230Sstevel@tonic-gate /* 14245084Sjohnlev * Inherit initial pagetables from the boot program. On the 64-bit 14255084Sjohnlev * hypervisor we also temporarily mark the p_index field of page table 14265084Sjohnlev * pages, so we know not to try making them writable in seg_kpm. 14273446Smrj */ 14283446Smrj void 14293446Smrj htable_attach( 14303446Smrj hat_t *hat, 14313446Smrj uintptr_t base, 14323446Smrj level_t level, 14333446Smrj htable_t *parent, 14343446Smrj pfn_t pfn) 14353446Smrj { 14363446Smrj htable_t *ht; 14373446Smrj uint_t h; 14383446Smrj uint_t i; 14393446Smrj x86pte_t pte; 14403446Smrj x86pte_t *ptep; 14413446Smrj page_t *pp; 14423446Smrj extern page_t *boot_claim_page(pfn_t); 14433446Smrj 14443446Smrj ht = htable_get_reserve(); 14453446Smrj if (level == mmu.max_level) 14463446Smrj kas.a_hat->hat_htable = ht; 14473446Smrj ht->ht_hat = hat; 14483446Smrj ht->ht_parent = parent; 14493446Smrj ht->ht_vaddr = base; 14503446Smrj ht->ht_level = level; 14513446Smrj ht->ht_busy = 1; 14523446Smrj ht->ht_next = NULL; 14533446Smrj ht->ht_prev = NULL; 14543446Smrj ht->ht_flags = 0; 14553446Smrj ht->ht_pfn = pfn; 14563446Smrj ht->ht_lock_cnt = 0; 14573446Smrj ht->ht_valid_cnt = 0; 14583446Smrj if (parent != NULL) 14593446Smrj ++parent->ht_busy; 14603446Smrj 14613446Smrj h = HTABLE_HASH(hat, base, level); 14623446Smrj HTABLE_ENTER(h); 14633446Smrj ht->ht_next = hat->hat_ht_hash[h]; 14643446Smrj ASSERT(ht->ht_prev == NULL); 14653446Smrj if (hat->hat_ht_hash[h]) 14663446Smrj hat->hat_ht_hash[h]->ht_prev = ht; 14673446Smrj hat->hat_ht_hash[h] = ht; 14683446Smrj HTABLE_EXIT(h); 14693446Smrj 14703446Smrj /* 14713446Smrj * make sure the page table physical page is not FREE 14723446Smrj */ 14733446Smrj if (page_resv(1, KM_NOSLEEP) == 0) 14743446Smrj panic("page_resv() failed in ptable alloc"); 14753446Smrj 14763446Smrj pp = boot_claim_page(pfn); 14773446Smrj ASSERT(pp != NULL); 14783446Smrj page_downgrade(pp); 14795084Sjohnlev #if defined(__xpv) && defined(__amd64) 14803446Smrj /* 14813446Smrj * Record in the page_t that is a pagetable for segkpm setup. 14823446Smrj */ 14833446Smrj if (kpm_vbase) 14843446Smrj pp->p_index = 1; 14855084Sjohnlev #endif 14863446Smrj 14873446Smrj /* 14883446Smrj * Count valid mappings and recursively attach lower level pagetables. 14893446Smrj */ 14903446Smrj ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 14913446Smrj for (i = 0; i < HTABLE_NUM_PTES(ht); ++i) { 14923446Smrj if (mmu.pae_hat) 14933446Smrj pte = ptep[i]; 14943446Smrj else 14953446Smrj pte = ((x86pte32_t *)ptep)[i]; 14963446Smrj if (!IN_HYPERVISOR_VA(base) && PTE_ISVALID(pte)) { 14973446Smrj ++ht->ht_valid_cnt; 14983446Smrj if (!PTE_ISPAGE(pte, level)) { 14993446Smrj htable_attach(hat, base, level - 1, 15003446Smrj ht, PTE2PFN(pte, level)); 15013446Smrj ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 15023446Smrj } 15033446Smrj } 15043446Smrj base += LEVEL_SIZE(level); 15053446Smrj if (base == mmu.hole_start) 15063446Smrj base = (mmu.hole_end + MMU_PAGEOFFSET) & MMU_PAGEMASK; 15073446Smrj } 15083446Smrj 15093446Smrj /* 15103446Smrj * As long as all the mappings we had were below kernel base 15113446Smrj * we can release the htable. 15123446Smrj */ 15133446Smrj if (base < kernelbase) 15143446Smrj htable_release(ht); 15153446Smrj } 15163446Smrj 15173446Smrj /* 15180Sstevel@tonic-gate * Walk through a given htable looking for the first valid entry. This 15190Sstevel@tonic-gate * routine takes both a starting and ending address. The starting address 15200Sstevel@tonic-gate * is required to be within the htable provided by the caller, but there is 15210Sstevel@tonic-gate * no such restriction on the ending address. 15220Sstevel@tonic-gate * 15230Sstevel@tonic-gate * If the routine finds a valid entry in the htable (at or beyond the 15240Sstevel@tonic-gate * starting address), the PTE (and its address) will be returned. 15250Sstevel@tonic-gate * This PTE may correspond to either a page or a pagetable - it is the 15260Sstevel@tonic-gate * caller's responsibility to determine which. If no valid entry is 15270Sstevel@tonic-gate * found, 0 (and invalid PTE) and the next unexamined address will be 15280Sstevel@tonic-gate * returned. 15290Sstevel@tonic-gate * 15300Sstevel@tonic-gate * The loop has been carefully coded for optimization. 15310Sstevel@tonic-gate */ 15320Sstevel@tonic-gate static x86pte_t 15330Sstevel@tonic-gate htable_scan(htable_t *ht, uintptr_t *vap, uintptr_t eaddr) 15340Sstevel@tonic-gate { 15350Sstevel@tonic-gate uint_t e; 15360Sstevel@tonic-gate x86pte_t found_pte = (x86pte_t)0; 15373446Smrj caddr_t pte_ptr; 15383446Smrj caddr_t end_pte_ptr; 15390Sstevel@tonic-gate int l = ht->ht_level; 15400Sstevel@tonic-gate uintptr_t va = *vap & LEVEL_MASK(l); 15410Sstevel@tonic-gate size_t pgsize = LEVEL_SIZE(l); 15420Sstevel@tonic-gate 15430Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 15440Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 15450Sstevel@tonic-gate 15460Sstevel@tonic-gate /* 15470Sstevel@tonic-gate * Compute the starting index and ending virtual address 15480Sstevel@tonic-gate */ 15490Sstevel@tonic-gate e = htable_va2entry(va, ht); 15500Sstevel@tonic-gate 15510Sstevel@tonic-gate /* 15520Sstevel@tonic-gate * The following page table scan code knows that the valid 15530Sstevel@tonic-gate * bit of a PTE is in the lowest byte AND that x86 is little endian!! 15540Sstevel@tonic-gate */ 15553446Smrj pte_ptr = (caddr_t)x86pte_access_pagetable(ht, 0); 15563446Smrj end_pte_ptr = (caddr_t)PT_INDEX_PTR(pte_ptr, HTABLE_NUM_PTES(ht)); 15573446Smrj pte_ptr = (caddr_t)PT_INDEX_PTR((x86pte_t *)pte_ptr, e); 15582687Skchow while (!PTE_ISVALID(*pte_ptr)) { 15590Sstevel@tonic-gate va += pgsize; 15600Sstevel@tonic-gate if (va >= eaddr) 15610Sstevel@tonic-gate break; 15620Sstevel@tonic-gate pte_ptr += mmu.pte_size; 15630Sstevel@tonic-gate ASSERT(pte_ptr <= end_pte_ptr); 15640Sstevel@tonic-gate if (pte_ptr == end_pte_ptr) 15650Sstevel@tonic-gate break; 15660Sstevel@tonic-gate } 15670Sstevel@tonic-gate 15680Sstevel@tonic-gate /* 15690Sstevel@tonic-gate * if we found a valid PTE, load the entire PTE 15700Sstevel@tonic-gate */ 15713446Smrj if (va < eaddr && pte_ptr != end_pte_ptr) 15723446Smrj found_pte = GET_PTE((x86pte_t *)pte_ptr); 15730Sstevel@tonic-gate x86pte_release_pagetable(ht); 15740Sstevel@tonic-gate 15750Sstevel@tonic-gate #if defined(__amd64) 15760Sstevel@tonic-gate /* 15770Sstevel@tonic-gate * deal with VA hole on amd64 15780Sstevel@tonic-gate */ 15790Sstevel@tonic-gate if (l == mmu.max_level && va >= mmu.hole_start && va <= mmu.hole_end) 15800Sstevel@tonic-gate va = mmu.hole_end + va - mmu.hole_start; 15810Sstevel@tonic-gate #endif /* __amd64 */ 15820Sstevel@tonic-gate 15830Sstevel@tonic-gate *vap = va; 15840Sstevel@tonic-gate return (found_pte); 15850Sstevel@tonic-gate } 15860Sstevel@tonic-gate 15870Sstevel@tonic-gate /* 15880Sstevel@tonic-gate * Find the address and htable for the first populated translation at or 15890Sstevel@tonic-gate * above the given virtual address. The caller may also specify an upper 15900Sstevel@tonic-gate * limit to the address range to search. Uses level information to quickly 15910Sstevel@tonic-gate * skip unpopulated sections of virtual address spaces. 15920Sstevel@tonic-gate * 15930Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable and virt addr 15940Sstevel@tonic-gate * and has a hold on the htable. 15950Sstevel@tonic-gate */ 15960Sstevel@tonic-gate x86pte_t 15970Sstevel@tonic-gate htable_walk( 15980Sstevel@tonic-gate struct hat *hat, 15990Sstevel@tonic-gate htable_t **htp, 16000Sstevel@tonic-gate uintptr_t *vaddr, 16010Sstevel@tonic-gate uintptr_t eaddr) 16020Sstevel@tonic-gate { 16030Sstevel@tonic-gate uintptr_t va = *vaddr; 16040Sstevel@tonic-gate htable_t *ht; 16050Sstevel@tonic-gate htable_t *prev = *htp; 16060Sstevel@tonic-gate level_t l; 16070Sstevel@tonic-gate level_t max_mapped_level; 16080Sstevel@tonic-gate x86pte_t pte; 16090Sstevel@tonic-gate 16100Sstevel@tonic-gate ASSERT(eaddr > va); 16110Sstevel@tonic-gate 16120Sstevel@tonic-gate /* 16130Sstevel@tonic-gate * If this is a user address, then we know we need not look beyond 16140Sstevel@tonic-gate * kernelbase. 16150Sstevel@tonic-gate */ 16160Sstevel@tonic-gate ASSERT(hat == kas.a_hat || eaddr <= kernelbase || 16170Sstevel@tonic-gate eaddr == HTABLE_WALK_TO_END); 16180Sstevel@tonic-gate if (hat != kas.a_hat && eaddr == HTABLE_WALK_TO_END) 16190Sstevel@tonic-gate eaddr = kernelbase; 16200Sstevel@tonic-gate 16210Sstevel@tonic-gate /* 16220Sstevel@tonic-gate * If we're coming in with a previous page table, search it first 16230Sstevel@tonic-gate * without doing an htable_lookup(), this should be frequent. 16240Sstevel@tonic-gate */ 16250Sstevel@tonic-gate if (prev) { 16260Sstevel@tonic-gate ASSERT(prev->ht_busy > 0); 16270Sstevel@tonic-gate ASSERT(prev->ht_vaddr <= va); 16280Sstevel@tonic-gate l = prev->ht_level; 16290Sstevel@tonic-gate if (va <= HTABLE_LAST_PAGE(prev)) { 16300Sstevel@tonic-gate pte = htable_scan(prev, &va, eaddr); 16310Sstevel@tonic-gate 16320Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 16330Sstevel@tonic-gate *vaddr = va; 16340Sstevel@tonic-gate *htp = prev; 16350Sstevel@tonic-gate return (pte); 16360Sstevel@tonic-gate } 16370Sstevel@tonic-gate } 16380Sstevel@tonic-gate 16390Sstevel@tonic-gate /* 16400Sstevel@tonic-gate * We found nothing in the htable provided by the caller, 16410Sstevel@tonic-gate * so fall through and do the full search 16420Sstevel@tonic-gate */ 16430Sstevel@tonic-gate htable_release(prev); 16440Sstevel@tonic-gate } 16450Sstevel@tonic-gate 16460Sstevel@tonic-gate /* 16470Sstevel@tonic-gate * Find the level of the largest pagesize used by this HAT. 16480Sstevel@tonic-gate */ 16494654Sjosephb if (hat->hat_ism_pgcnt > 0) { 16505349Skchow max_mapped_level = mmu.umax_page_level; 16514654Sjosephb } else { 16524654Sjosephb max_mapped_level = 0; 16534654Sjosephb for (l = 1; l <= mmu.max_page_level; ++l) 16544654Sjosephb if (hat->hat_pages_mapped[l] != 0) 16554654Sjosephb max_mapped_level = l; 16564654Sjosephb } 16570Sstevel@tonic-gate 16580Sstevel@tonic-gate while (va < eaddr && va >= *vaddr) { 16590Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 16600Sstevel@tonic-gate 16610Sstevel@tonic-gate /* 16620Sstevel@tonic-gate * Find lowest table with any entry for given address. 16630Sstevel@tonic-gate */ 16640Sstevel@tonic-gate for (l = 0; l <= TOP_LEVEL(hat); ++l) { 16650Sstevel@tonic-gate ht = htable_lookup(hat, va, l); 16660Sstevel@tonic-gate if (ht != NULL) { 16670Sstevel@tonic-gate pte = htable_scan(ht, &va, eaddr); 16680Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 16690Sstevel@tonic-gate *vaddr = va; 16700Sstevel@tonic-gate *htp = ht; 16710Sstevel@tonic-gate return (pte); 16720Sstevel@tonic-gate } 16730Sstevel@tonic-gate htable_release(ht); 16740Sstevel@tonic-gate break; 16750Sstevel@tonic-gate } 16760Sstevel@tonic-gate 16770Sstevel@tonic-gate /* 16784654Sjosephb * No htable at this level for the address. If there 16794654Sjosephb * is no larger page size that could cover it, we can 16804654Sjosephb * skip right to the start of the next page table. 16814575Sdm120769 */ 16824575Sdm120769 ASSERT(l < TOP_LEVEL(hat)); 16834575Sdm120769 if (l >= max_mapped_level) { 16840Sstevel@tonic-gate va = NEXT_ENTRY_VA(va, l + 1); 16854654Sjosephb if (va >= eaddr) 16864654Sjosephb break; 16874575Sdm120769 } 16880Sstevel@tonic-gate } 16890Sstevel@tonic-gate } 16900Sstevel@tonic-gate 16910Sstevel@tonic-gate *vaddr = 0; 16920Sstevel@tonic-gate *htp = NULL; 16930Sstevel@tonic-gate return (0); 16940Sstevel@tonic-gate } 16950Sstevel@tonic-gate 16960Sstevel@tonic-gate /* 16970Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address 16980Sstevel@tonic-gate * with pagesize at or below given level. 16990Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 17000Sstevel@tonic-gate * entry, and has a hold on the htable. 17010Sstevel@tonic-gate */ 17020Sstevel@tonic-gate htable_t * 17030Sstevel@tonic-gate htable_getpte( 17040Sstevel@tonic-gate struct hat *hat, 17050Sstevel@tonic-gate uintptr_t vaddr, 17060Sstevel@tonic-gate uint_t *entry, 17070Sstevel@tonic-gate x86pte_t *pte, 17080Sstevel@tonic-gate level_t level) 17090Sstevel@tonic-gate { 17100Sstevel@tonic-gate htable_t *ht; 17110Sstevel@tonic-gate level_t l; 17120Sstevel@tonic-gate uint_t e; 17130Sstevel@tonic-gate 17140Sstevel@tonic-gate ASSERT(level <= mmu.max_page_level); 17150Sstevel@tonic-gate 17160Sstevel@tonic-gate for (l = 0; l <= level; ++l) { 17170Sstevel@tonic-gate ht = htable_lookup(hat, vaddr, l); 17180Sstevel@tonic-gate if (ht == NULL) 17190Sstevel@tonic-gate continue; 17200Sstevel@tonic-gate e = htable_va2entry(vaddr, ht); 17210Sstevel@tonic-gate if (entry != NULL) 17220Sstevel@tonic-gate *entry = e; 17230Sstevel@tonic-gate if (pte != NULL) 17240Sstevel@tonic-gate *pte = x86pte_get(ht, e); 17250Sstevel@tonic-gate return (ht); 17260Sstevel@tonic-gate } 17270Sstevel@tonic-gate return (NULL); 17280Sstevel@tonic-gate } 17290Sstevel@tonic-gate 17300Sstevel@tonic-gate /* 17310Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address. 17320Sstevel@tonic-gate * There must be a valid page mapped at the given address. 17330Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 17340Sstevel@tonic-gate * entry, and has a hold on the htable. 17350Sstevel@tonic-gate */ 17360Sstevel@tonic-gate htable_t * 17370Sstevel@tonic-gate htable_getpage(struct hat *hat, uintptr_t vaddr, uint_t *entry) 17380Sstevel@tonic-gate { 17390Sstevel@tonic-gate htable_t *ht; 17400Sstevel@tonic-gate uint_t e; 17410Sstevel@tonic-gate x86pte_t pte; 17420Sstevel@tonic-gate 17430Sstevel@tonic-gate ht = htable_getpte(hat, vaddr, &e, &pte, mmu.max_page_level); 17440Sstevel@tonic-gate if (ht == NULL) 17450Sstevel@tonic-gate return (NULL); 17460Sstevel@tonic-gate 17470Sstevel@tonic-gate if (entry) 17480Sstevel@tonic-gate *entry = e; 17490Sstevel@tonic-gate 17500Sstevel@tonic-gate if (PTE_ISPAGE(pte, ht->ht_level)) 17510Sstevel@tonic-gate return (ht); 17520Sstevel@tonic-gate htable_release(ht); 17530Sstevel@tonic-gate return (NULL); 17540Sstevel@tonic-gate } 17550Sstevel@tonic-gate 17560Sstevel@tonic-gate 17570Sstevel@tonic-gate void 17580Sstevel@tonic-gate htable_init() 17590Sstevel@tonic-gate { 17600Sstevel@tonic-gate /* 17610Sstevel@tonic-gate * To save on kernel VA usage, we avoid debug information in 32 bit 17620Sstevel@tonic-gate * kernels. 17630Sstevel@tonic-gate */ 17640Sstevel@tonic-gate #if defined(__amd64) 17650Sstevel@tonic-gate int kmem_flags = KMC_NOHASH; 17660Sstevel@tonic-gate #elif defined(__i386) 17670Sstevel@tonic-gate int kmem_flags = KMC_NOHASH | KMC_NODEBUG; 17680Sstevel@tonic-gate #endif 17690Sstevel@tonic-gate 17700Sstevel@tonic-gate /* 17710Sstevel@tonic-gate * initialize kmem caches 17720Sstevel@tonic-gate */ 17730Sstevel@tonic-gate htable_cache = kmem_cache_create("htable_t", 17740Sstevel@tonic-gate sizeof (htable_t), 0, NULL, NULL, 17750Sstevel@tonic-gate htable_reap, NULL, hat_memload_arena, kmem_flags); 17760Sstevel@tonic-gate } 17770Sstevel@tonic-gate 17780Sstevel@tonic-gate /* 17790Sstevel@tonic-gate * get the pte index for the virtual address in the given htable's pagetable 17800Sstevel@tonic-gate */ 17810Sstevel@tonic-gate uint_t 17820Sstevel@tonic-gate htable_va2entry(uintptr_t va, htable_t *ht) 17830Sstevel@tonic-gate { 17840Sstevel@tonic-gate level_t l = ht->ht_level; 17850Sstevel@tonic-gate 17860Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 17870Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 17883446Smrj return ((va >> LEVEL_SHIFT(l)) & (HTABLE_NUM_PTES(ht) - 1)); 17890Sstevel@tonic-gate } 17900Sstevel@tonic-gate 17910Sstevel@tonic-gate /* 17920Sstevel@tonic-gate * Given an htable and the index of a pte in it, return the virtual address 17930Sstevel@tonic-gate * of the page. 17940Sstevel@tonic-gate */ 17950Sstevel@tonic-gate uintptr_t 17960Sstevel@tonic-gate htable_e2va(htable_t *ht, uint_t entry) 17970Sstevel@tonic-gate { 17980Sstevel@tonic-gate level_t l = ht->ht_level; 17990Sstevel@tonic-gate uintptr_t va; 18000Sstevel@tonic-gate 18013446Smrj ASSERT(entry < HTABLE_NUM_PTES(ht)); 18020Sstevel@tonic-gate va = ht->ht_vaddr + ((uintptr_t)entry << LEVEL_SHIFT(l)); 18030Sstevel@tonic-gate 18040Sstevel@tonic-gate /* 18050Sstevel@tonic-gate * Need to skip over any VA hole in top level table 18060Sstevel@tonic-gate */ 18070Sstevel@tonic-gate #if defined(__amd64) 18080Sstevel@tonic-gate if (ht->ht_level == mmu.max_level && va >= mmu.hole_start) 18090Sstevel@tonic-gate va += ((mmu.hole_end - mmu.hole_start) + 1); 18100Sstevel@tonic-gate #endif 18110Sstevel@tonic-gate 18120Sstevel@tonic-gate return (va); 18130Sstevel@tonic-gate } 18140Sstevel@tonic-gate 18150Sstevel@tonic-gate /* 18160Sstevel@tonic-gate * The code uses compare and swap instructions to read/write PTE's to 18170Sstevel@tonic-gate * avoid atomicity problems, since PTEs can be 8 bytes on 32 bit systems. 18180Sstevel@tonic-gate * will naturally be atomic. 18190Sstevel@tonic-gate * 18200Sstevel@tonic-gate * The combination of using kpreempt_disable()/_enable() and the hci_mutex 18210Sstevel@tonic-gate * are used to ensure that an interrupt won't overwrite a temporary mapping 18220Sstevel@tonic-gate * while it's in use. If an interrupt thread tries to access a PTE, it will 18230Sstevel@tonic-gate * yield briefly back to the pinned thread which holds the cpu's hci_mutex. 18240Sstevel@tonic-gate */ 18250Sstevel@tonic-gate void 18263446Smrj x86pte_cpu_init(cpu_t *cpu) 18270Sstevel@tonic-gate { 18280Sstevel@tonic-gate struct hat_cpu_info *hci; 18290Sstevel@tonic-gate 18303446Smrj hci = kmem_zalloc(sizeof (*hci), KM_SLEEP); 18310Sstevel@tonic-gate mutex_init(&hci->hci_mutex, NULL, MUTEX_DEFAULT, NULL); 18320Sstevel@tonic-gate cpu->cpu_hat_info = hci; 18330Sstevel@tonic-gate } 18340Sstevel@tonic-gate 18353446Smrj void 18363446Smrj x86pte_cpu_fini(cpu_t *cpu) 18373446Smrj { 18383446Smrj struct hat_cpu_info *hci = cpu->cpu_hat_info; 18393446Smrj 18403446Smrj kmem_free(hci, sizeof (*hci)); 18413446Smrj cpu->cpu_hat_info = NULL; 18423446Smrj } 18433446Smrj 18443446Smrj #ifdef __i386 18450Sstevel@tonic-gate /* 18463446Smrj * On 32 bit kernels, loading a 64 bit PTE is a little tricky 18470Sstevel@tonic-gate */ 18483446Smrj x86pte_t 18493446Smrj get_pte64(x86pte_t *ptr) 18503446Smrj { 18513446Smrj volatile uint32_t *p = (uint32_t *)ptr; 18523446Smrj x86pte_t t; 18533446Smrj 18543446Smrj ASSERT(mmu.pae_hat != 0); 18553446Smrj for (;;) { 18563446Smrj t = p[0]; 18573446Smrj t |= (uint64_t)p[1] << 32; 18583446Smrj if ((t & 0xffffffff) == p[0]) 18593446Smrj return (t); 18603446Smrj } 18610Sstevel@tonic-gate } 18623446Smrj #endif /* __i386 */ 18630Sstevel@tonic-gate 18640Sstevel@tonic-gate /* 18650Sstevel@tonic-gate * Disable preemption and establish a mapping to the pagetable with the 18660Sstevel@tonic-gate * given pfn. This is optimized for there case where it's the same 18670Sstevel@tonic-gate * pfn as we last used referenced from this CPU. 18680Sstevel@tonic-gate */ 18690Sstevel@tonic-gate static x86pte_t * 18703446Smrj x86pte_access_pagetable(htable_t *ht, uint_t index) 18710Sstevel@tonic-gate { 18720Sstevel@tonic-gate /* 18730Sstevel@tonic-gate * VLP pagetables are contained in the hat_t 18740Sstevel@tonic-gate */ 18750Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 18763446Smrj return (PT_INDEX_PTR(ht->ht_hat->hat_vlp_ptes, index)); 18773446Smrj return (x86pte_mapin(ht->ht_pfn, index, ht)); 18783446Smrj } 18790Sstevel@tonic-gate 18803446Smrj /* 18813446Smrj * map the given pfn into the page table window. 18823446Smrj */ 18833446Smrj /*ARGSUSED*/ 18843446Smrj x86pte_t * 18853446Smrj x86pte_mapin(pfn_t pfn, uint_t index, htable_t *ht) 18863446Smrj { 18873446Smrj x86pte_t *pteptr; 18885217Sjosephb x86pte_t pte = 0; 18893446Smrj x86pte_t newpte; 18903446Smrj int x; 18913446Smrj 18920Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 18930Sstevel@tonic-gate 18940Sstevel@tonic-gate if (!khat_running) { 18953446Smrj caddr_t va = kbm_remap_window(pfn_to_pa(pfn), 1); 18963446Smrj return (PT_INDEX_PTR(va, index)); 18970Sstevel@tonic-gate } 18980Sstevel@tonic-gate 18990Sstevel@tonic-gate /* 19003446Smrj * If kpm is available, use it. 19013446Smrj */ 19023446Smrj if (kpm_vbase) 19033446Smrj return (PT_INDEX_PTR(hat_kpm_pfn2va(pfn), index)); 19043446Smrj 19053446Smrj /* 19063446Smrj * Disable preemption and grab the CPU's hci_mutex 19070Sstevel@tonic-gate */ 19080Sstevel@tonic-gate kpreempt_disable(); 19093446Smrj ASSERT(CPU->cpu_hat_info != NULL); 19103446Smrj mutex_enter(&CPU->cpu_hat_info->hci_mutex); 19113446Smrj x = PWIN_TABLE(CPU->cpu_id); 19123446Smrj pteptr = (x86pte_t *)PWIN_PTE_VA(x); 19135217Sjosephb #ifndef __xpv 19143446Smrj if (mmu.pae_hat) 19153446Smrj pte = *pteptr; 19163446Smrj else 19173446Smrj pte = *(x86pte32_t *)pteptr; 19185217Sjosephb #endif 19193446Smrj 19203446Smrj newpte = MAKEPTE(pfn, 0) | mmu.pt_global | mmu.pt_nx; 19215084Sjohnlev 19225084Sjohnlev /* 19235084Sjohnlev * For hardware we can use a writable mapping. 19245084Sjohnlev */ 19255084Sjohnlev #ifdef __xpv 19265084Sjohnlev if (IN_XPV_PANIC()) 19275084Sjohnlev #endif 19285084Sjohnlev newpte |= PT_WRITABLE; 19293446Smrj 19303446Smrj if (!PTE_EQUIV(newpte, pte)) { 19315084Sjohnlev 19325084Sjohnlev #ifdef __xpv 19335084Sjohnlev if (!IN_XPV_PANIC()) { 19345084Sjohnlev xen_map(newpte, PWIN_VA(x)); 19355084Sjohnlev } else 19365084Sjohnlev #endif 19375084Sjohnlev { 19385084Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 19395084Sjohnlev if (mmu.pae_hat) 19405084Sjohnlev *pteptr = newpte; 19415084Sjohnlev else 19425084Sjohnlev *(x86pte32_t *)pteptr = newpte; 19435084Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 19445084Sjohnlev mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 19455084Sjohnlev } 19460Sstevel@tonic-gate } 19473446Smrj return (PT_INDEX_PTR(PWIN_VA(x), index)); 19480Sstevel@tonic-gate } 19490Sstevel@tonic-gate 19500Sstevel@tonic-gate /* 19510Sstevel@tonic-gate * Release access to a page table. 19520Sstevel@tonic-gate */ 19530Sstevel@tonic-gate static void 19540Sstevel@tonic-gate x86pte_release_pagetable(htable_t *ht) 19550Sstevel@tonic-gate { 19560Sstevel@tonic-gate /* 19570Sstevel@tonic-gate * nothing to do for VLP htables 19580Sstevel@tonic-gate */ 19590Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 19600Sstevel@tonic-gate return; 19610Sstevel@tonic-gate 19623446Smrj x86pte_mapout(); 19633446Smrj } 19643446Smrj 19653446Smrj void 19663446Smrj x86pte_mapout(void) 19673446Smrj { 19685084Sjohnlev if (kpm_vbase != NULL || !khat_running) 19690Sstevel@tonic-gate return; 19700Sstevel@tonic-gate 19710Sstevel@tonic-gate /* 19723446Smrj * Drop the CPU's hci_mutex and restore preemption. 19730Sstevel@tonic-gate */ 19745217Sjosephb #ifdef __xpv 19755217Sjosephb if (!IN_XPV_PANIC()) { 19765217Sjosephb uintptr_t va; 19775217Sjosephb 19785217Sjosephb /* 19795217Sjosephb * We need to always clear the mapping in case a page 19805217Sjosephb * that was once a page table page is ballooned out. 19815217Sjosephb */ 19825217Sjosephb va = (uintptr_t)PWIN_VA(PWIN_TABLE(CPU->cpu_id)); 19835217Sjosephb (void) HYPERVISOR_update_va_mapping(va, 0, 19845217Sjosephb UVMF_INVLPG | UVMF_LOCAL); 19855217Sjosephb } 19865217Sjosephb #endif 19873446Smrj mutex_exit(&CPU->cpu_hat_info->hci_mutex); 19880Sstevel@tonic-gate kpreempt_enable(); 19890Sstevel@tonic-gate } 19900Sstevel@tonic-gate 19910Sstevel@tonic-gate /* 19920Sstevel@tonic-gate * Atomic retrieval of a pagetable entry 19930Sstevel@tonic-gate */ 19940Sstevel@tonic-gate x86pte_t 19950Sstevel@tonic-gate x86pte_get(htable_t *ht, uint_t entry) 19960Sstevel@tonic-gate { 19970Sstevel@tonic-gate x86pte_t pte; 199847Sjosephb x86pte_t *ptep; 19990Sstevel@tonic-gate 20000Sstevel@tonic-gate /* 200147Sjosephb * Be careful that loading PAE entries in 32 bit kernel is atomic. 20020Sstevel@tonic-gate */ 20033446Smrj ASSERT(entry < mmu.ptes_per_table); 20043446Smrj ptep = x86pte_access_pagetable(ht, entry); 20053446Smrj pte = GET_PTE(ptep); 20060Sstevel@tonic-gate x86pte_release_pagetable(ht); 20070Sstevel@tonic-gate return (pte); 20080Sstevel@tonic-gate } 20090Sstevel@tonic-gate 20100Sstevel@tonic-gate /* 20110Sstevel@tonic-gate * Atomic unconditional set of a page table entry, it returns the previous 20123446Smrj * value. For pre-existing mappings if the PFN changes, then we don't care 20133446Smrj * about the old pte's REF / MOD bits. If the PFN remains the same, we leave 20143446Smrj * the MOD/REF bits unchanged. 20153446Smrj * 20163446Smrj * If asked to overwrite a link to a lower page table with a large page 20173446Smrj * mapping, this routine returns the special value of LPAGE_ERROR. This 20183446Smrj * allows the upper HAT layers to retry with a smaller mapping size. 20190Sstevel@tonic-gate */ 20200Sstevel@tonic-gate x86pte_t 20210Sstevel@tonic-gate x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr) 20220Sstevel@tonic-gate { 20230Sstevel@tonic-gate x86pte_t old; 20243446Smrj x86pte_t prev; 20250Sstevel@tonic-gate x86pte_t *ptep; 20263446Smrj level_t l = ht->ht_level; 20273446Smrj x86pte_t pfn_mask = (l != 0) ? PT_PADDR_LGPG : PT_PADDR; 20283446Smrj x86pte_t n; 20293446Smrj uintptr_t addr = htable_e2va(ht, entry); 20303446Smrj hat_t *hat = ht->ht_hat; 20310Sstevel@tonic-gate 20323446Smrj ASSERT(new != 0); /* don't use to invalidate a PTE, see x86pte_update */ 20330Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 20343446Smrj if (ptr == NULL) 20353446Smrj ptep = x86pte_access_pagetable(ht, entry); 20363446Smrj else 20370Sstevel@tonic-gate ptep = ptr; 20383446Smrj 20393446Smrj /* 20403446Smrj * Install the new PTE. If remapping the same PFN, then 20413446Smrj * copy existing REF/MOD bits to new mapping. 20423446Smrj */ 20433446Smrj do { 20443446Smrj prev = GET_PTE(ptep); 20453446Smrj n = new; 20463446Smrj if (PTE_ISVALID(n) && (prev & pfn_mask) == (new & pfn_mask)) 20473446Smrj n |= prev & (PT_REF | PT_MOD); 20480Sstevel@tonic-gate 20493446Smrj /* 20503446Smrj * Another thread may have installed this mapping already, 20513446Smrj * flush the local TLB and be done. 20523446Smrj */ 20533446Smrj if (prev == n) { 20543446Smrj old = new; 20555084Sjohnlev #ifdef __xpv 20565084Sjohnlev if (!IN_XPV_PANIC()) 20575084Sjohnlev xen_flush_va((caddr_t)addr); 20585084Sjohnlev else 20595084Sjohnlev #endif 20605084Sjohnlev mmu_tlbflush_entry((caddr_t)addr); 20613446Smrj goto done; 20620Sstevel@tonic-gate } 20633446Smrj 20643446Smrj /* 20653446Smrj * Detect if we have a collision of installing a large 20663446Smrj * page mapping where there already is a lower page table. 20673446Smrj */ 20683543Sjosephb if (l > 0 && (prev & PT_VALID) && !(prev & PT_PAGESIZE)) { 20693543Sjosephb old = LPAGE_ERROR; 20703543Sjosephb goto done; 20713543Sjosephb } 20723446Smrj 20735084Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 20743446Smrj old = CAS_PTE(ptep, prev, n); 20755084Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 20763446Smrj } while (old != prev); 20773446Smrj 20783446Smrj /* 20793446Smrj * Do a TLB demap if needed, ie. the old pte was valid. 20803446Smrj * 20813446Smrj * Note that a stale TLB writeback to the PTE here either can't happen 20823446Smrj * or doesn't matter. The PFN can only change for NOSYNC|NOCONSIST 20833446Smrj * mappings, but they were created with REF and MOD already set, so 20843446Smrj * no stale writeback will happen. 20853446Smrj * 20863446Smrj * Segmap is the only place where remaps happen on the same pfn and for 20873446Smrj * that we want to preserve the stale REF/MOD bits. 20883446Smrj */ 20893446Smrj if (old & PT_REF) 20903446Smrj hat_tlb_inval(hat, addr); 20913446Smrj 20923446Smrj done: 20930Sstevel@tonic-gate if (ptr == NULL) 20940Sstevel@tonic-gate x86pte_release_pagetable(ht); 20950Sstevel@tonic-gate return (old); 20960Sstevel@tonic-gate } 20970Sstevel@tonic-gate 20980Sstevel@tonic-gate /* 20993446Smrj * Atomic compare and swap of a page table entry. No TLB invalidates are done. 21003446Smrj * This is used for links between pagetables of different levels. 21013446Smrj * Note we always create these links with dirty/access set, so they should 21023446Smrj * never change. 21030Sstevel@tonic-gate */ 21043446Smrj x86pte_t 21050Sstevel@tonic-gate x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, x86pte_t new) 21060Sstevel@tonic-gate { 21070Sstevel@tonic-gate x86pte_t pte; 21080Sstevel@tonic-gate x86pte_t *ptep; 21095084Sjohnlev #ifdef __xpv 21105084Sjohnlev /* 21115084Sjohnlev * We can't use writable pagetables for upper level tables, so fake it. 21125084Sjohnlev */ 21135084Sjohnlev mmu_update_t t[2]; 21145084Sjohnlev int cnt = 1; 21155084Sjohnlev int count; 21165084Sjohnlev maddr_t ma; 21170Sstevel@tonic-gate 21185084Sjohnlev if (!IN_XPV_PANIC()) { 21195084Sjohnlev ASSERT(!(ht->ht_flags & HTABLE_VLP)); /* no VLP yet */ 21205084Sjohnlev ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 21215084Sjohnlev t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 21225084Sjohnlev t[0].val = new; 21235084Sjohnlev 21245084Sjohnlev #if defined(__amd64) 21255084Sjohnlev /* 21265084Sjohnlev * On the 64-bit hypervisor we need to maintain the user mode 21275084Sjohnlev * top page table too. 21285084Sjohnlev */ 21295084Sjohnlev if (ht->ht_level == mmu.max_level && ht->ht_hat != kas.a_hat) { 21305084Sjohnlev ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa( 21315084Sjohnlev ht->ht_hat->hat_user_ptable), entry)); 21325084Sjohnlev t[1].ptr = ma | MMU_NORMAL_PT_UPDATE; 21335084Sjohnlev t[1].val = new; 21345084Sjohnlev ++cnt; 21355084Sjohnlev } 21365084Sjohnlev #endif /* __amd64 */ 21375084Sjohnlev 21385084Sjohnlev if (HYPERVISOR_mmu_update(t, cnt, &count, DOMID_SELF)) 21395084Sjohnlev panic("HYPERVISOR_mmu_update() failed"); 21405084Sjohnlev ASSERT(count == cnt); 21415084Sjohnlev return (old); 21425084Sjohnlev } 21435084Sjohnlev #endif 21443446Smrj ptep = x86pte_access_pagetable(ht, entry); 21455084Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 21463446Smrj pte = CAS_PTE(ptep, old, new); 21475084Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 21480Sstevel@tonic-gate x86pte_release_pagetable(ht); 21490Sstevel@tonic-gate return (pte); 21500Sstevel@tonic-gate } 21510Sstevel@tonic-gate 21520Sstevel@tonic-gate /* 21533446Smrj * Invalidate a page table entry as long as it currently maps something that 21543446Smrj * matches the value determined by expect. 21553446Smrj * 21563446Smrj * Also invalidates any TLB entries and returns the previous value of the PTE. 21570Sstevel@tonic-gate */ 21583446Smrj x86pte_t 21593446Smrj x86pte_inval( 21603446Smrj htable_t *ht, 21613446Smrj uint_t entry, 21623446Smrj x86pte_t expect, 21633446Smrj x86pte_t *pte_ptr) 21640Sstevel@tonic-gate { 21653446Smrj x86pte_t *ptep; 21664191Sjosephb x86pte_t oldpte; 21674191Sjosephb x86pte_t found; 21680Sstevel@tonic-gate 21693446Smrj ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 21705349Skchow ASSERT(ht->ht_level <= mmu.max_page_level); 21713543Sjosephb 21723446Smrj if (pte_ptr != NULL) 21733446Smrj ptep = pte_ptr; 21743446Smrj else 21753446Smrj ptep = x86pte_access_pagetable(ht, entry); 21760Sstevel@tonic-gate 21775084Sjohnlev #if defined(__xpv) 21785084Sjohnlev /* 21795084Sjohnlev * If exit()ing just use HYPERVISOR_mmu_update(), as we can't be racing 21805084Sjohnlev * with anything else. 21815084Sjohnlev */ 21825084Sjohnlev if ((ht->ht_hat->hat_flags & HAT_FREEING) && !IN_XPV_PANIC()) { 21835084Sjohnlev int count; 21845084Sjohnlev mmu_update_t t[1]; 21855084Sjohnlev maddr_t ma; 21865084Sjohnlev 21875084Sjohnlev oldpte = GET_PTE(ptep); 21885084Sjohnlev if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 21895084Sjohnlev goto done; 21905084Sjohnlev ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 21915084Sjohnlev t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 21925084Sjohnlev t[0].val = 0; 21935084Sjohnlev if (HYPERVISOR_mmu_update(t, 1, &count, DOMID_SELF)) 21945084Sjohnlev panic("HYPERVISOR_mmu_update() failed"); 21955084Sjohnlev ASSERT(count == 1); 21965084Sjohnlev goto done; 21975084Sjohnlev } 21985084Sjohnlev #endif /* __xpv */ 21995084Sjohnlev 22000Sstevel@tonic-gate /* 22013543Sjosephb * Note that the loop is needed to handle changes due to h/w updating 22023543Sjosephb * of PT_MOD/PT_REF. 22030Sstevel@tonic-gate */ 22043446Smrj do { 22054191Sjosephb oldpte = GET_PTE(ptep); 22064191Sjosephb if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 22074191Sjosephb goto done; 22085084Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 22094191Sjosephb found = CAS_PTE(ptep, oldpte, 0); 22105084Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 22114191Sjosephb } while (found != oldpte); 22124191Sjosephb if (oldpte & (PT_REF | PT_MOD)) 22134191Sjosephb hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 22140Sstevel@tonic-gate 22154191Sjosephb done: 22163446Smrj if (pte_ptr == NULL) 22173446Smrj x86pte_release_pagetable(ht); 22184191Sjosephb return (oldpte); 22190Sstevel@tonic-gate } 22200Sstevel@tonic-gate 22210Sstevel@tonic-gate /* 22223446Smrj * Change a page table entry af it currently matches the value in expect. 22230Sstevel@tonic-gate */ 22240Sstevel@tonic-gate x86pte_t 22253446Smrj x86pte_update( 22263446Smrj htable_t *ht, 22273446Smrj uint_t entry, 22283446Smrj x86pte_t expect, 22293446Smrj x86pte_t new) 22300Sstevel@tonic-gate { 22310Sstevel@tonic-gate x86pte_t *ptep; 22323446Smrj x86pte_t found; 22330Sstevel@tonic-gate 22343446Smrj ASSERT(new != 0); 22353446Smrj ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 22365349Skchow ASSERT(ht->ht_level <= mmu.max_page_level); 22370Sstevel@tonic-gate 22383446Smrj ptep = x86pte_access_pagetable(ht, entry); 22395084Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 22403446Smrj found = CAS_PTE(ptep, expect, new); 22415084Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 22423446Smrj if (found == expect) { 22433446Smrj hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 22440Sstevel@tonic-gate 22453446Smrj /* 22463446Smrj * When removing write permission *and* clearing the 22473446Smrj * MOD bit, check if a write happened via a stale 22483446Smrj * TLB entry before the TLB shootdown finished. 22493446Smrj * 22503446Smrj * If it did happen, simply re-enable write permission and 22513446Smrj * act like the original CAS failed. 22523446Smrj */ 22533446Smrj if ((expect & (PT_WRITABLE | PT_MOD)) == PT_WRITABLE && 22543446Smrj (new & (PT_WRITABLE | PT_MOD)) == 0 && 22553446Smrj (GET_PTE(ptep) & PT_MOD) != 0) { 22563446Smrj do { 22573446Smrj found = GET_PTE(ptep); 22585084Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 22593446Smrj found = 22603446Smrj CAS_PTE(ptep, found, found | PT_WRITABLE); 22615084Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 22623446Smrj } while ((found & PT_WRITABLE) == 0); 22633446Smrj } 22643446Smrj } 22650Sstevel@tonic-gate x86pte_release_pagetable(ht); 22663446Smrj return (found); 22670Sstevel@tonic-gate } 22680Sstevel@tonic-gate 22695084Sjohnlev #ifndef __xpv 22700Sstevel@tonic-gate /* 22710Sstevel@tonic-gate * Copy page tables - this is just a little more complicated than the 22720Sstevel@tonic-gate * previous routines. Note that it's also not atomic! It also is never 22730Sstevel@tonic-gate * used for VLP pagetables. 22740Sstevel@tonic-gate */ 22750Sstevel@tonic-gate void 22760Sstevel@tonic-gate x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 22770Sstevel@tonic-gate { 22780Sstevel@tonic-gate caddr_t src_va; 22790Sstevel@tonic-gate caddr_t dst_va; 22800Sstevel@tonic-gate size_t size; 22813446Smrj x86pte_t *pteptr; 22823446Smrj x86pte_t pte; 22830Sstevel@tonic-gate 22840Sstevel@tonic-gate ASSERT(khat_running); 22850Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 22860Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_VLP)); 22870Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN)); 22880Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 22890Sstevel@tonic-gate 22900Sstevel@tonic-gate /* 22913446Smrj * Acquire access to the CPU pagetable windows for the dest and source. 22920Sstevel@tonic-gate */ 22933446Smrj dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 22943446Smrj if (kpm_vbase) { 22953446Smrj src_va = (caddr_t) 22963446Smrj PT_INDEX_PTR(hat_kpm_pfn2va(src->ht_pfn), entry); 22970Sstevel@tonic-gate } else { 22983446Smrj uint_t x = PWIN_SRC(CPU->cpu_id); 22990Sstevel@tonic-gate 23000Sstevel@tonic-gate /* 23010Sstevel@tonic-gate * Finish defining the src pagetable mapping 23020Sstevel@tonic-gate */ 23033446Smrj src_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 23043446Smrj pte = MAKEPTE(src->ht_pfn, 0) | mmu.pt_global | mmu.pt_nx; 23053446Smrj pteptr = (x86pte_t *)PWIN_PTE_VA(x); 23063446Smrj if (mmu.pae_hat) 23073446Smrj *pteptr = pte; 23083446Smrj else 23093446Smrj *(x86pte32_t *)pteptr = pte; 23103446Smrj mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 23110Sstevel@tonic-gate } 23120Sstevel@tonic-gate 23130Sstevel@tonic-gate /* 23140Sstevel@tonic-gate * now do the copy 23150Sstevel@tonic-gate */ 23160Sstevel@tonic-gate size = count << mmu.pte_size_shift; 23170Sstevel@tonic-gate bcopy(src_va, dst_va, size); 23180Sstevel@tonic-gate 23190Sstevel@tonic-gate x86pte_release_pagetable(dest); 23200Sstevel@tonic-gate } 23210Sstevel@tonic-gate 23225084Sjohnlev #else /* __xpv */ 23235084Sjohnlev 23245084Sjohnlev /* 23255084Sjohnlev * The hypervisor only supports writable pagetables at level 0, so we have 23265084Sjohnlev * to install these 1 by 1 the slow way. 23275084Sjohnlev */ 23285084Sjohnlev void 23295084Sjohnlev x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 23305084Sjohnlev { 23315084Sjohnlev caddr_t src_va; 23325084Sjohnlev x86pte_t pte; 23335084Sjohnlev 23345084Sjohnlev ASSERT(!IN_XPV_PANIC()); 23355084Sjohnlev src_va = (caddr_t)x86pte_access_pagetable(src, entry); 23365084Sjohnlev while (count) { 23375084Sjohnlev if (mmu.pae_hat) 23385084Sjohnlev pte = *(x86pte_t *)src_va; 23395084Sjohnlev else 23405084Sjohnlev pte = *(x86pte32_t *)src_va; 23415084Sjohnlev if (pte != 0) { 23425084Sjohnlev set_pteval(pfn_to_pa(dest->ht_pfn), entry, 23435084Sjohnlev dest->ht_level, pte); 23445084Sjohnlev #ifdef __amd64 23455084Sjohnlev if (dest->ht_level == mmu.max_level && 23465084Sjohnlev htable_e2va(dest, entry) < HYPERVISOR_VIRT_END) 23475084Sjohnlev set_pteval( 23485084Sjohnlev pfn_to_pa(dest->ht_hat->hat_user_ptable), 23495084Sjohnlev entry, dest->ht_level, pte); 23505084Sjohnlev #endif 23515084Sjohnlev } 23525084Sjohnlev --count; 23535084Sjohnlev ++entry; 23545084Sjohnlev src_va += mmu.pte_size; 23555084Sjohnlev } 23565084Sjohnlev x86pte_release_pagetable(src); 23575084Sjohnlev } 23585084Sjohnlev #endif /* __xpv */ 23595084Sjohnlev 23600Sstevel@tonic-gate /* 23610Sstevel@tonic-gate * Zero page table entries - Note this doesn't use atomic stores! 23620Sstevel@tonic-gate */ 23633446Smrj static void 23640Sstevel@tonic-gate x86pte_zero(htable_t *dest, uint_t entry, uint_t count) 23650Sstevel@tonic-gate { 23660Sstevel@tonic-gate caddr_t dst_va; 23670Sstevel@tonic-gate size_t size; 23685084Sjohnlev #ifdef __xpv 23695084Sjohnlev int x; 23705084Sjohnlev x86pte_t newpte; 23715084Sjohnlev #endif 23720Sstevel@tonic-gate 23730Sstevel@tonic-gate /* 23740Sstevel@tonic-gate * Map in the page table to be zeroed. 23750Sstevel@tonic-gate */ 23760Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 23770Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 23783446Smrj 23795084Sjohnlev /* 23805084Sjohnlev * On the hypervisor we don't use x86pte_access_pagetable() since 23815084Sjohnlev * in this case the page is not pinned yet. 23825084Sjohnlev */ 23835084Sjohnlev #ifdef __xpv 23845084Sjohnlev if (kpm_vbase == NULL) { 23855084Sjohnlev kpreempt_disable(); 23865084Sjohnlev ASSERT(CPU->cpu_hat_info != NULL); 23875084Sjohnlev mutex_enter(&CPU->cpu_hat_info->hci_mutex); 23885084Sjohnlev x = PWIN_TABLE(CPU->cpu_id); 23895084Sjohnlev newpte = MAKEPTE(dest->ht_pfn, 0) | PT_WRITABLE; 23905084Sjohnlev xen_map(newpte, PWIN_VA(x)); 23915084Sjohnlev dst_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 23925084Sjohnlev } else 23935084Sjohnlev #endif 23945084Sjohnlev dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 23953446Smrj 23960Sstevel@tonic-gate size = count << mmu.pte_size_shift; 23973446Smrj ASSERT(size > BLOCKZEROALIGN); 23983446Smrj #ifdef __i386 23993446Smrj if ((x86_feature & X86_SSE2) == 0) 24000Sstevel@tonic-gate bzero(dst_va, size); 24013446Smrj else 24023446Smrj #endif 24033446Smrj block_zero_no_xmm(dst_va, size); 24043446Smrj 24055084Sjohnlev #ifdef __xpv 24065084Sjohnlev if (kpm_vbase == NULL) { 24075084Sjohnlev xen_map(0, PWIN_VA(x)); 24085084Sjohnlev mutex_exit(&CPU->cpu_hat_info->hci_mutex); 24095084Sjohnlev kpreempt_enable(); 24105084Sjohnlev } else 24115084Sjohnlev #endif 24125084Sjohnlev x86pte_release_pagetable(dest); 24130Sstevel@tonic-gate } 24140Sstevel@tonic-gate 24150Sstevel@tonic-gate /* 24160Sstevel@tonic-gate * Called to ensure that all pagetables are in the system dump 24170Sstevel@tonic-gate */ 24180Sstevel@tonic-gate void 24190Sstevel@tonic-gate hat_dump(void) 24200Sstevel@tonic-gate { 24210Sstevel@tonic-gate hat_t *hat; 24220Sstevel@tonic-gate uint_t h; 24230Sstevel@tonic-gate htable_t *ht; 24240Sstevel@tonic-gate 24250Sstevel@tonic-gate /* 24261747Sjosephb * Dump all page tables 24270Sstevel@tonic-gate */ 24281747Sjosephb for (hat = kas.a_hat; hat != NULL; hat = hat->hat_next) { 24290Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 24300Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 24311747Sjosephb if ((ht->ht_flags & HTABLE_VLP) == 0) 24320Sstevel@tonic-gate dump_page(ht->ht_pfn); 24330Sstevel@tonic-gate } 24340Sstevel@tonic-gate } 24350Sstevel@tonic-gate } 24360Sstevel@tonic-gate } 2437