10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51747Sjosephb * Common Development and Distribution License (the "License"). 61747Sjosephb * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 221251Skchow * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate #include <sys/types.h> 290Sstevel@tonic-gate #include <sys/sysmacros.h> 300Sstevel@tonic-gate #include <sys/kmem.h> 310Sstevel@tonic-gate #include <sys/atomic.h> 320Sstevel@tonic-gate #include <sys/bitmap.h> 330Sstevel@tonic-gate #include <sys/machparam.h> 340Sstevel@tonic-gate #include <sys/machsystm.h> 350Sstevel@tonic-gate #include <sys/mman.h> 360Sstevel@tonic-gate #include <sys/systm.h> 370Sstevel@tonic-gate #include <sys/cpuvar.h> 380Sstevel@tonic-gate #include <sys/thread.h> 390Sstevel@tonic-gate #include <sys/proc.h> 400Sstevel@tonic-gate #include <sys/cpu.h> 410Sstevel@tonic-gate #include <sys/kmem.h> 420Sstevel@tonic-gate #include <sys/disp.h> 430Sstevel@tonic-gate #include <sys/vmem.h> 440Sstevel@tonic-gate #include <sys/vmsystm.h> 450Sstevel@tonic-gate #include <sys/promif.h> 460Sstevel@tonic-gate #include <sys/var.h> 470Sstevel@tonic-gate #include <sys/x86_archext.h> 480Sstevel@tonic-gate #include <sys/bootconf.h> 490Sstevel@tonic-gate #include <sys/dumphdr.h> 500Sstevel@tonic-gate #include <vm/seg_kmem.h> 510Sstevel@tonic-gate #include <vm/seg_kpm.h> 520Sstevel@tonic-gate #include <vm/hat.h> 530Sstevel@tonic-gate #include <vm/hat_i86.h> 540Sstevel@tonic-gate #include <sys/cmn_err.h> 550Sstevel@tonic-gate 560Sstevel@tonic-gate kmem_cache_t *htable_cache; 570Sstevel@tonic-gate extern cpuset_t khat_cpuset; 580Sstevel@tonic-gate 590Sstevel@tonic-gate /* 600Sstevel@tonic-gate * The variable htable_reserve_amount, rather than HTABLE_RESERVE_AMOUNT, 610Sstevel@tonic-gate * is used in order to facilitate testing of the htable_steal() code. 620Sstevel@tonic-gate * By resetting htable_reserve_amount to a lower value, we can force 630Sstevel@tonic-gate * stealing to occur. The reserve amount is a guess to get us through boot. 640Sstevel@tonic-gate */ 650Sstevel@tonic-gate #define HTABLE_RESERVE_AMOUNT (200) 660Sstevel@tonic-gate uint_t htable_reserve_amount = HTABLE_RESERVE_AMOUNT; 670Sstevel@tonic-gate kmutex_t htable_reserve_mutex; 680Sstevel@tonic-gate uint_t htable_reserve_cnt; 690Sstevel@tonic-gate htable_t *htable_reserve_pool; 700Sstevel@tonic-gate 710Sstevel@tonic-gate /* 721747Sjosephb * Used to hand test htable_steal(). 730Sstevel@tonic-gate */ 741747Sjosephb #ifdef DEBUG 751747Sjosephb ulong_t force_steal = 0; 761747Sjosephb ulong_t ptable_cnt = 0; 771747Sjosephb #endif 781747Sjosephb 791747Sjosephb /* 801747Sjosephb * This variable is so that we can tune this via /etc/system 811747Sjosephb * Any value works, but a power of two <= mmu.ptes_per_table is best. 821747Sjosephb */ 831747Sjosephb uint_t htable_steal_passes = 8; 840Sstevel@tonic-gate 850Sstevel@tonic-gate /* 860Sstevel@tonic-gate * mutex stuff for access to htable hash 870Sstevel@tonic-gate */ 880Sstevel@tonic-gate #define NUM_HTABLE_MUTEX 128 890Sstevel@tonic-gate kmutex_t htable_mutex[NUM_HTABLE_MUTEX]; 900Sstevel@tonic-gate #define HTABLE_MUTEX_HASH(h) ((h) & (NUM_HTABLE_MUTEX - 1)) 910Sstevel@tonic-gate 920Sstevel@tonic-gate #define HTABLE_ENTER(h) mutex_enter(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 930Sstevel@tonic-gate #define HTABLE_EXIT(h) mutex_exit(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 940Sstevel@tonic-gate 950Sstevel@tonic-gate /* 960Sstevel@tonic-gate * forward declarations 970Sstevel@tonic-gate */ 980Sstevel@tonic-gate static void link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr); 990Sstevel@tonic-gate static void unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr); 1000Sstevel@tonic-gate static void htable_free(htable_t *ht); 1010Sstevel@tonic-gate static x86pte_t *x86pte_access_pagetable(htable_t *ht); 1020Sstevel@tonic-gate static void x86pte_release_pagetable(htable_t *ht); 1030Sstevel@tonic-gate static x86pte_t x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, 1040Sstevel@tonic-gate x86pte_t new); 1050Sstevel@tonic-gate 1060Sstevel@tonic-gate /* 1070Sstevel@tonic-gate * Address used for kernel page tables. See ptable_alloc() below. 1080Sstevel@tonic-gate */ 1090Sstevel@tonic-gate uintptr_t ptable_va = 0; 1100Sstevel@tonic-gate size_t ptable_sz = 2 * MMU_PAGESIZE; 1110Sstevel@tonic-gate 1120Sstevel@tonic-gate /* 1130Sstevel@tonic-gate * A counter to track if we are stealing or reaping htables. When non-zero 1140Sstevel@tonic-gate * htable_free() will directly free htables (either to the reserve or kmem) 1150Sstevel@tonic-gate * instead of putting them in a hat's htable cache. 1160Sstevel@tonic-gate */ 1170Sstevel@tonic-gate uint32_t htable_dont_cache = 0; 1180Sstevel@tonic-gate 1190Sstevel@tonic-gate /* 1200Sstevel@tonic-gate * Track the number of active pagetables, so we can know how many to reap 1210Sstevel@tonic-gate */ 1220Sstevel@tonic-gate static uint32_t active_ptables = 0; 1230Sstevel@tonic-gate 1240Sstevel@tonic-gate /* 1250Sstevel@tonic-gate * Allocate a memory page for a hardware page table. 1260Sstevel@tonic-gate * 1270Sstevel@tonic-gate * The pages allocated for page tables are currently gotten in a hacked up 1280Sstevel@tonic-gate * way. It works for now, but really needs to be fixed up a bit. 1290Sstevel@tonic-gate * 1300Sstevel@tonic-gate * During boot: The boot loader controls physical memory allocation via 1310Sstevel@tonic-gate * boot_alloc(). To avoid conflict with vmem, we just do boot_alloc()s with 1320Sstevel@tonic-gate * addresses less than kernelbase. These addresses are ignored when we take 1330Sstevel@tonic-gate * over mappings from the boot loader. 1340Sstevel@tonic-gate * 1350Sstevel@tonic-gate * Post-boot: we currently use page_create_va() on the kvp with fake offsets, 1360Sstevel@tonic-gate * segments and virt address. This is pretty bogus, but was copied from the 1370Sstevel@tonic-gate * old hat_i86.c code. A better approach would be to have a custom 1380Sstevel@tonic-gate * page_get_physical() interface that can specify either mnode random or 1390Sstevel@tonic-gate * mnode local and takes a page from whatever color has the MOST available - 1400Sstevel@tonic-gate * this would have a minimal impact on page coloring. 1410Sstevel@tonic-gate * 1420Sstevel@tonic-gate * For now the htable pointer in ht is only used to compute a unique vnode 1430Sstevel@tonic-gate * offset for the page. 1440Sstevel@tonic-gate */ 1450Sstevel@tonic-gate static void 1460Sstevel@tonic-gate ptable_alloc(htable_t *ht) 1470Sstevel@tonic-gate { 1480Sstevel@tonic-gate pfn_t pfn; 1490Sstevel@tonic-gate page_t *pp; 1500Sstevel@tonic-gate u_offset_t offset; 1510Sstevel@tonic-gate static struct seg tmpseg; 1520Sstevel@tonic-gate static int first_time = 1; 1530Sstevel@tonic-gate 1540Sstevel@tonic-gate /* 1550Sstevel@tonic-gate * Allocating the associated hardware page table is very different 1560Sstevel@tonic-gate * before boot has finished. We get a physical page to from boot 1570Sstevel@tonic-gate * w/o eating up any kernel address space. 1580Sstevel@tonic-gate */ 1590Sstevel@tonic-gate ht->ht_pfn = PFN_INVALID; 1600Sstevel@tonic-gate atomic_add_32(&active_ptables, 1); 1610Sstevel@tonic-gate 1620Sstevel@tonic-gate if (use_boot_reserve) { 1630Sstevel@tonic-gate ASSERT(ptable_va != 0); 1640Sstevel@tonic-gate 1650Sstevel@tonic-gate /* 1660Sstevel@tonic-gate * Allocate, then demap the ptable_va, so that we're 1670Sstevel@tonic-gate * sure there exist page table entries for the addresses 1680Sstevel@tonic-gate */ 1690Sstevel@tonic-gate if (first_time) { 1700Sstevel@tonic-gate first_time = 0; 1710Sstevel@tonic-gate if ((uintptr_t)BOP_ALLOC(bootops, (caddr_t)ptable_va, 1720Sstevel@tonic-gate ptable_sz, BO_NO_ALIGN) != ptable_va) 1730Sstevel@tonic-gate panic("BOP_ALLOC failed"); 1740Sstevel@tonic-gate 1750Sstevel@tonic-gate hat_boot_demap(ptable_va); 1760Sstevel@tonic-gate hat_boot_demap(ptable_va + MMU_PAGESIZE); 1770Sstevel@tonic-gate } 1780Sstevel@tonic-gate 1790Sstevel@tonic-gate pfn = ((uintptr_t)BOP_EALLOC(bootops, 0, MMU_PAGESIZE, 1800Sstevel@tonic-gate BO_NO_ALIGN, BOPF_X86_ALLOC_PHYS)) >> MMU_PAGESHIFT; 1810Sstevel@tonic-gate if (page_resv(1, KM_NOSLEEP) == 0) 1820Sstevel@tonic-gate panic("page_resv() failed in ptable alloc"); 1830Sstevel@tonic-gate 1840Sstevel@tonic-gate pp = page_numtopp_nolock(pfn); 1850Sstevel@tonic-gate ASSERT(pp != NULL); 1860Sstevel@tonic-gate if (pp->p_szc != 0) 1870Sstevel@tonic-gate page_boot_demote(pp); 1880Sstevel@tonic-gate pp = page_numtopp(pfn, SE_EXCL); 1890Sstevel@tonic-gate ASSERT(pp != NULL); 1900Sstevel@tonic-gate 1910Sstevel@tonic-gate } else { 1920Sstevel@tonic-gate /* 1930Sstevel@tonic-gate * Post boot get a page for the table. 1940Sstevel@tonic-gate * 1950Sstevel@tonic-gate * The first check is to see if there is memory in 1960Sstevel@tonic-gate * the system. If we drop to throttlefree, then fail 1970Sstevel@tonic-gate * the ptable_alloc() and let the stealing code kick in. 1980Sstevel@tonic-gate * Note that we have to do this test here, since the test in 1990Sstevel@tonic-gate * page_create_throttle() would let the NOSLEEP allocation 2000Sstevel@tonic-gate * go through and deplete the page reserves. 2011747Sjosephb * 2021747Sjosephb * The !NOMEMWAIT() lets pageout, fsflush, etc. skip this check. 2030Sstevel@tonic-gate */ 2041747Sjosephb if (!NOMEMWAIT() && freemem <= throttlefree + 1) 2050Sstevel@tonic-gate return; 2060Sstevel@tonic-gate 2071747Sjosephb #ifdef DEBUG 2081747Sjosephb /* 2091747Sjosephb * This code makes htable_ steal() easier to test. By setting 2101747Sjosephb * force_steal we force pagetable allocations to fall 2111747Sjosephb * into the stealing code. Roughly 1 in ever "force_steal" 2121747Sjosephb * page table allocations will fail. 2131747Sjosephb */ 2141747Sjosephb if (ht->ht_hat != kas.a_hat && force_steal > 1 && 2151747Sjosephb ++ptable_cnt > force_steal) { 2161747Sjosephb ptable_cnt = 0; 2171747Sjosephb return; 2181747Sjosephb } 2191747Sjosephb #endif /* DEBUG */ 2201747Sjosephb 2210Sstevel@tonic-gate /* 2220Sstevel@tonic-gate * This code is temporary, so don't review too critically. 2230Sstevel@tonic-gate * I'm awaiting a new phys page allocator from Kit -- Joe 2240Sstevel@tonic-gate * 2250Sstevel@tonic-gate * We need assign an offset for the page to call 2260Sstevel@tonic-gate * page_create_va. To avoid conflicts with other pages, 2270Sstevel@tonic-gate * we get creative with the offset. 2280Sstevel@tonic-gate * for 32 bits, we pic an offset > 4Gig 2290Sstevel@tonic-gate * for 64 bits, pic an offset somewhere in the VA hole. 2300Sstevel@tonic-gate */ 2310Sstevel@tonic-gate offset = (uintptr_t)ht - kernelbase; 2320Sstevel@tonic-gate offset <<= MMU_PAGESHIFT; 2330Sstevel@tonic-gate #if defined(__amd64) 2340Sstevel@tonic-gate offset += mmu.hole_start; /* something in VA hole */ 2350Sstevel@tonic-gate #else 2360Sstevel@tonic-gate offset += 1ULL << 40; /* something > 4 Gig */ 2370Sstevel@tonic-gate #endif 2380Sstevel@tonic-gate 2390Sstevel@tonic-gate if (page_resv(1, KM_NOSLEEP) == 0) 2400Sstevel@tonic-gate return; 2410Sstevel@tonic-gate 2420Sstevel@tonic-gate #ifdef DEBUG 2430Sstevel@tonic-gate pp = page_exists(&kvp, offset); 2440Sstevel@tonic-gate if (pp != NULL) 2450Sstevel@tonic-gate panic("ptable already exists %p", pp); 2460Sstevel@tonic-gate #endif 2470Sstevel@tonic-gate pp = page_create_va(&kvp, offset, MMU_PAGESIZE, 2480Sstevel@tonic-gate PG_EXCL | PG_NORELOC, &tmpseg, 2490Sstevel@tonic-gate (void *)((uintptr_t)ht << MMU_PAGESHIFT)); 2500Sstevel@tonic-gate if (pp == NULL) 2510Sstevel@tonic-gate return; 2520Sstevel@tonic-gate page_io_unlock(pp); 2530Sstevel@tonic-gate page_hashout(pp, NULL); 2540Sstevel@tonic-gate pfn = pp->p_pagenum; 2550Sstevel@tonic-gate } 2560Sstevel@tonic-gate page_downgrade(pp); 2570Sstevel@tonic-gate ASSERT(PAGE_SHARED(pp)); 2580Sstevel@tonic-gate 2590Sstevel@tonic-gate if (pfn == PFN_INVALID) 2600Sstevel@tonic-gate panic("ptable_alloc(): Invalid PFN!!"); 2610Sstevel@tonic-gate ht->ht_pfn = pfn; 2621747Sjosephb HATSTAT_INC(hs_ptable_allocs); 2630Sstevel@tonic-gate } 2640Sstevel@tonic-gate 2650Sstevel@tonic-gate /* 2660Sstevel@tonic-gate * Free an htable's associated page table page. See the comments 2670Sstevel@tonic-gate * for ptable_alloc(). 2680Sstevel@tonic-gate */ 2690Sstevel@tonic-gate static void 2700Sstevel@tonic-gate ptable_free(htable_t *ht) 2710Sstevel@tonic-gate { 2720Sstevel@tonic-gate pfn_t pfn = ht->ht_pfn; 2730Sstevel@tonic-gate page_t *pp; 2740Sstevel@tonic-gate 2750Sstevel@tonic-gate /* 2760Sstevel@tonic-gate * need to destroy the page used for the pagetable 2770Sstevel@tonic-gate */ 2780Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 2790Sstevel@tonic-gate HATSTAT_INC(hs_ptable_frees); 2800Sstevel@tonic-gate atomic_add_32(&active_ptables, -1); 2810Sstevel@tonic-gate pp = page_numtopp_nolock(pfn); 2820Sstevel@tonic-gate if (pp == NULL) 2830Sstevel@tonic-gate panic("ptable_free(): no page for pfn!"); 2840Sstevel@tonic-gate ASSERT(PAGE_SHARED(pp)); 2850Sstevel@tonic-gate ASSERT(pfn == pp->p_pagenum); 2860Sstevel@tonic-gate 2870Sstevel@tonic-gate /* 2880Sstevel@tonic-gate * Get an exclusive lock, might have to wait for a kmem reader. 2890Sstevel@tonic-gate */ 2900Sstevel@tonic-gate if (!page_tryupgrade(pp)) { 2910Sstevel@tonic-gate page_unlock(pp); 2920Sstevel@tonic-gate /* 2930Sstevel@tonic-gate * RFE: we could change this to not loop forever 2940Sstevel@tonic-gate * George Cameron had some idea on how to do that. 2950Sstevel@tonic-gate * For now looping works - it's just like sfmmu. 2960Sstevel@tonic-gate */ 2970Sstevel@tonic-gate while (!page_lock(pp, SE_EXCL, (kmutex_t *)NULL, P_RECLAIM)) 2980Sstevel@tonic-gate continue; 2990Sstevel@tonic-gate } 3000Sstevel@tonic-gate page_free(pp, 1); 3010Sstevel@tonic-gate page_unresv(1); 3020Sstevel@tonic-gate ht->ht_pfn = PFN_INVALID; 3030Sstevel@tonic-gate } 3040Sstevel@tonic-gate 3050Sstevel@tonic-gate /* 3060Sstevel@tonic-gate * Put one htable on the reserve list. 3070Sstevel@tonic-gate */ 3080Sstevel@tonic-gate static void 3090Sstevel@tonic-gate htable_put_reserve(htable_t *ht) 3100Sstevel@tonic-gate { 3110Sstevel@tonic-gate ht->ht_hat = NULL; /* no longer tied to a hat */ 3120Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 3130Sstevel@tonic-gate HATSTAT_INC(hs_htable_rputs); 3140Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 3150Sstevel@tonic-gate ht->ht_next = htable_reserve_pool; 3160Sstevel@tonic-gate htable_reserve_pool = ht; 3170Sstevel@tonic-gate ++htable_reserve_cnt; 3180Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 3190Sstevel@tonic-gate } 3200Sstevel@tonic-gate 3210Sstevel@tonic-gate /* 3220Sstevel@tonic-gate * Take one htable from the reserve. 3230Sstevel@tonic-gate */ 3240Sstevel@tonic-gate static htable_t * 3250Sstevel@tonic-gate htable_get_reserve(void) 3260Sstevel@tonic-gate { 3270Sstevel@tonic-gate htable_t *ht = NULL; 3280Sstevel@tonic-gate 3290Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 3300Sstevel@tonic-gate if (htable_reserve_cnt != 0) { 3310Sstevel@tonic-gate ht = htable_reserve_pool; 3320Sstevel@tonic-gate ASSERT(ht != NULL); 3330Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 3340Sstevel@tonic-gate htable_reserve_pool = ht->ht_next; 3350Sstevel@tonic-gate --htable_reserve_cnt; 3360Sstevel@tonic-gate HATSTAT_INC(hs_htable_rgets); 3370Sstevel@tonic-gate } 3380Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 3390Sstevel@tonic-gate return (ht); 3400Sstevel@tonic-gate } 3410Sstevel@tonic-gate 3420Sstevel@tonic-gate /* 3430Sstevel@tonic-gate * Allocate initial htables with page tables and put them on the kernel hat's 3440Sstevel@tonic-gate * cache list. 3450Sstevel@tonic-gate */ 3460Sstevel@tonic-gate void 3470Sstevel@tonic-gate htable_initial_reserve(uint_t count) 3480Sstevel@tonic-gate { 3490Sstevel@tonic-gate htable_t *ht; 3500Sstevel@tonic-gate hat_t *hat = kas.a_hat; 3510Sstevel@tonic-gate 3520Sstevel@tonic-gate count += HTABLE_RESERVE_AMOUNT; 3530Sstevel@tonic-gate while (count > 0) { 3540Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, KM_NOSLEEP); 3550Sstevel@tonic-gate ASSERT(ht != NULL); 3560Sstevel@tonic-gate 3570Sstevel@tonic-gate ASSERT(use_boot_reserve); 3580Sstevel@tonic-gate ht->ht_hat = kas.a_hat; /* so htable_free() works */ 3590Sstevel@tonic-gate ht->ht_flags = 0; /* so x86pte_zero works */ 3600Sstevel@tonic-gate ptable_alloc(ht); 3610Sstevel@tonic-gate if (ht->ht_pfn == PFN_INVALID) 3620Sstevel@tonic-gate panic("ptable_alloc() failed"); 3630Sstevel@tonic-gate 3640Sstevel@tonic-gate x86pte_zero(ht, 0, mmu.ptes_per_table); 3650Sstevel@tonic-gate 3660Sstevel@tonic-gate ht->ht_next = hat->hat_ht_cached; 3670Sstevel@tonic-gate hat->hat_ht_cached = ht; 3680Sstevel@tonic-gate --count; 3690Sstevel@tonic-gate } 3700Sstevel@tonic-gate } 3710Sstevel@tonic-gate 3720Sstevel@tonic-gate /* 3730Sstevel@tonic-gate * Readjust the reserves after a thread finishes using them. 3740Sstevel@tonic-gate * 3750Sstevel@tonic-gate * The first time this is called post boot, we'll also clear out the 3760Sstevel@tonic-gate * extra boot htables that were put in the kernel hat's cache list. 3770Sstevel@tonic-gate */ 3780Sstevel@tonic-gate void 3790Sstevel@tonic-gate htable_adjust_reserve() 3800Sstevel@tonic-gate { 3810Sstevel@tonic-gate static int first_time = 1; 3820Sstevel@tonic-gate htable_t *ht; 3830Sstevel@tonic-gate 3840Sstevel@tonic-gate ASSERT(curthread != hat_reserves_thread); 3850Sstevel@tonic-gate 3860Sstevel@tonic-gate /* 3870Sstevel@tonic-gate * The first time this is called after we can steal, we free up the 3880Sstevel@tonic-gate * the kernel's cache htable list. It has lots of extra htable/page 3890Sstevel@tonic-gate * tables that were allocated for boot up. 3900Sstevel@tonic-gate */ 3910Sstevel@tonic-gate if (first_time) { 3920Sstevel@tonic-gate first_time = 0; 3930Sstevel@tonic-gate while ((ht = kas.a_hat->hat_ht_cached) != NULL) { 3940Sstevel@tonic-gate kas.a_hat->hat_ht_cached = ht->ht_next; 3950Sstevel@tonic-gate ASSERT(ht->ht_hat == kas.a_hat); 3960Sstevel@tonic-gate ptable_free(ht); 3970Sstevel@tonic-gate htable_put_reserve(ht); 3980Sstevel@tonic-gate } 3990Sstevel@tonic-gate return; 4000Sstevel@tonic-gate } 4010Sstevel@tonic-gate 4020Sstevel@tonic-gate /* 4030Sstevel@tonic-gate * Free any excess htables in the reserve list 4040Sstevel@tonic-gate */ 4050Sstevel@tonic-gate while (htable_reserve_cnt > htable_reserve_amount) { 4060Sstevel@tonic-gate ht = htable_get_reserve(); 4070Sstevel@tonic-gate if (ht == NULL) 4080Sstevel@tonic-gate return; 4090Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 4100Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 4110Sstevel@tonic-gate } 4120Sstevel@tonic-gate } 4130Sstevel@tonic-gate 4140Sstevel@tonic-gate 4150Sstevel@tonic-gate /* 4160Sstevel@tonic-gate * This routine steals htables from user processes for htable_alloc() or 4170Sstevel@tonic-gate * for htable_reap(). 4180Sstevel@tonic-gate */ 4190Sstevel@tonic-gate static htable_t * 4200Sstevel@tonic-gate htable_steal(uint_t cnt) 4210Sstevel@tonic-gate { 4220Sstevel@tonic-gate hat_t *hat = kas.a_hat; /* list starts with khat */ 4230Sstevel@tonic-gate htable_t *list = NULL; 4240Sstevel@tonic-gate htable_t *ht; 4250Sstevel@tonic-gate htable_t *higher; 4260Sstevel@tonic-gate uint_t h; 4271747Sjosephb uint_t h_start; 4281747Sjosephb static uint_t h_seed = 0; 4290Sstevel@tonic-gate uint_t e; 4300Sstevel@tonic-gate uintptr_t va; 4310Sstevel@tonic-gate x86pte_t pte; 4320Sstevel@tonic-gate uint_t stolen = 0; 4330Sstevel@tonic-gate uint_t pass; 4341747Sjosephb uint_t threshold; 4350Sstevel@tonic-gate 4360Sstevel@tonic-gate /* 4370Sstevel@tonic-gate * Limit htable_steal_passes to something reasonable 4380Sstevel@tonic-gate */ 4390Sstevel@tonic-gate if (htable_steal_passes == 0) 4400Sstevel@tonic-gate htable_steal_passes = 1; 4410Sstevel@tonic-gate if (htable_steal_passes > mmu.ptes_per_table) 4420Sstevel@tonic-gate htable_steal_passes = mmu.ptes_per_table; 4430Sstevel@tonic-gate 4440Sstevel@tonic-gate /* 4451747Sjosephb * Loop through all user hats. The 1st pass takes cached htables that 4460Sstevel@tonic-gate * aren't in use. The later passes steal by removing mappings, too. 4470Sstevel@tonic-gate */ 4480Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, 1); 4491747Sjosephb for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) { 4501747Sjosephb threshold = pass * mmu.ptes_per_table / htable_steal_passes; 4511747Sjosephb hat = kas.a_hat; 4520Sstevel@tonic-gate for (;;) { 4530Sstevel@tonic-gate 4540Sstevel@tonic-gate /* 4551747Sjosephb * Clear the victim flag and move to next hat 4560Sstevel@tonic-gate */ 4570Sstevel@tonic-gate mutex_enter(&hat_list_lock); 4581747Sjosephb if (hat != kas.a_hat) { 4591747Sjosephb hat->hat_flags &= ~HAT_VICTIM; 4601747Sjosephb cv_broadcast(&hat_list_cv); 4611747Sjosephb } 4621747Sjosephb hat = hat->hat_next; 4631747Sjosephb 4641747Sjosephb /* 4651747Sjosephb * Skip any hat that is already being stolen from. 4661747Sjosephb * 4671747Sjosephb * We skip SHARED hats, as these are dummy 4681747Sjosephb * hats that host ISM shared page tables. 4691747Sjosephb * 4701747Sjosephb * We also skip if HAT_FREEING because hat_pte_unmap() 4711747Sjosephb * won't zero out the PTE's. That would lead to hitting 4721747Sjosephb * stale PTEs either here or under hat_unload() when we 4731747Sjosephb * steal and unload the same page table in competing 4741747Sjosephb * threads. 4751747Sjosephb */ 4761747Sjosephb while (hat != NULL && 4771747Sjosephb (hat->hat_flags & 4781747Sjosephb (HAT_VICTIM | HAT_SHARED | HAT_FREEING)) != 0) 4791747Sjosephb hat = hat->hat_next; 4801747Sjosephb 4811747Sjosephb if (hat == NULL) { 4820Sstevel@tonic-gate mutex_exit(&hat_list_lock); 4830Sstevel@tonic-gate break; 4840Sstevel@tonic-gate } 4851747Sjosephb 4861747Sjosephb /* 4871747Sjosephb * Are we finished? 4881747Sjosephb */ 4891747Sjosephb if (stolen == cnt) { 4901747Sjosephb /* 4911747Sjosephb * Try to spread the pain of stealing, 4921747Sjosephb * move victim HAT to the end of the HAT list. 4931747Sjosephb */ 4941747Sjosephb if (pass >= 1 && cnt == 1 && 4951747Sjosephb kas.a_hat->hat_prev != hat) { 4961747Sjosephb 4971747Sjosephb /* unlink victim hat */ 4981747Sjosephb if (hat->hat_prev) 4991747Sjosephb hat->hat_prev->hat_next = 5001747Sjosephb hat->hat_next; 5011747Sjosephb else 5021747Sjosephb kas.a_hat->hat_next = 5031747Sjosephb hat->hat_next; 5041747Sjosephb if (hat->hat_next) 5051747Sjosephb hat->hat_next->hat_prev = 5061747Sjosephb hat->hat_prev; 5071747Sjosephb else 5081747Sjosephb kas.a_hat->hat_prev = 5091747Sjosephb hat->hat_prev; 5101747Sjosephb 5111747Sjosephb 5121747Sjosephb /* relink at end of hat list */ 5131747Sjosephb hat->hat_next = NULL; 5141747Sjosephb hat->hat_prev = kas.a_hat->hat_prev; 5151747Sjosephb if (hat->hat_prev) 5161747Sjosephb hat->hat_prev->hat_next = hat; 5171747Sjosephb else 5181747Sjosephb kas.a_hat->hat_next = hat; 5191747Sjosephb kas.a_hat->hat_prev = hat; 5201747Sjosephb 5211747Sjosephb } 5221747Sjosephb 5231747Sjosephb mutex_exit(&hat_list_lock); 5241747Sjosephb break; 5251747Sjosephb } 5261747Sjosephb 5271747Sjosephb /* 5281747Sjosephb * Mark the HAT as a stealing victim. 5291747Sjosephb */ 5300Sstevel@tonic-gate hat->hat_flags |= HAT_VICTIM; 5310Sstevel@tonic-gate mutex_exit(&hat_list_lock); 5320Sstevel@tonic-gate 5330Sstevel@tonic-gate /* 5340Sstevel@tonic-gate * Take any htables from the hat's cached "free" list. 5350Sstevel@tonic-gate */ 5360Sstevel@tonic-gate hat_enter(hat); 5370Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL && 5380Sstevel@tonic-gate stolen < cnt) { 5390Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 5400Sstevel@tonic-gate ht->ht_next = list; 5410Sstevel@tonic-gate list = ht; 5420Sstevel@tonic-gate ++stolen; 5430Sstevel@tonic-gate } 5440Sstevel@tonic-gate hat_exit(hat); 5450Sstevel@tonic-gate 5460Sstevel@tonic-gate /* 5470Sstevel@tonic-gate * Don't steal on first pass. 5480Sstevel@tonic-gate */ 5491747Sjosephb if (pass == 0 || stolen == cnt) 5500Sstevel@tonic-gate continue; 5510Sstevel@tonic-gate 5520Sstevel@tonic-gate /* 5531747Sjosephb * Search the active htables for one to steal. 5541747Sjosephb * Start at a different hash bucket every time to 5551747Sjosephb * help spread the pain of stealing. 5560Sstevel@tonic-gate */ 5571747Sjosephb h = h_start = h_seed++ % hat->hat_num_hash; 5581747Sjosephb do { 5590Sstevel@tonic-gate higher = NULL; 5600Sstevel@tonic-gate HTABLE_ENTER(h); 5610Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; 5620Sstevel@tonic-gate ht = ht->ht_next) { 5630Sstevel@tonic-gate 5640Sstevel@tonic-gate /* 5650Sstevel@tonic-gate * Can we rule out reaping? 5660Sstevel@tonic-gate */ 5670Sstevel@tonic-gate if (ht->ht_busy != 0 || 5680Sstevel@tonic-gate (ht->ht_flags & HTABLE_SHARED_PFN)|| 5691747Sjosephb ht->ht_level > 0 || 5701747Sjosephb ht->ht_valid_cnt > threshold || 5710Sstevel@tonic-gate ht->ht_lock_cnt != 0) 5720Sstevel@tonic-gate continue; 5730Sstevel@tonic-gate 5740Sstevel@tonic-gate /* 5750Sstevel@tonic-gate * Increment busy so the htable can't 5760Sstevel@tonic-gate * disappear. We drop the htable mutex 5770Sstevel@tonic-gate * to avoid deadlocks with 5780Sstevel@tonic-gate * hat_pageunload() and the hment mutex 5790Sstevel@tonic-gate * while we call hat_pte_unmap() 5800Sstevel@tonic-gate */ 5810Sstevel@tonic-gate ++ht->ht_busy; 5820Sstevel@tonic-gate HTABLE_EXIT(h); 5830Sstevel@tonic-gate 5840Sstevel@tonic-gate /* 5850Sstevel@tonic-gate * Try stealing. 5860Sstevel@tonic-gate * - unload and invalidate all PTEs 5870Sstevel@tonic-gate */ 5880Sstevel@tonic-gate for (e = 0, va = ht->ht_vaddr; 5890Sstevel@tonic-gate e < ht->ht_num_ptes && 5900Sstevel@tonic-gate ht->ht_valid_cnt > 0 && 5910Sstevel@tonic-gate ht->ht_busy == 1 && 5920Sstevel@tonic-gate ht->ht_lock_cnt == 0; 5930Sstevel@tonic-gate ++e, va += MMU_PAGESIZE) { 5940Sstevel@tonic-gate pte = x86pte_get(ht, e); 5950Sstevel@tonic-gate if (!PTE_ISVALID(pte)) 5960Sstevel@tonic-gate continue; 5970Sstevel@tonic-gate hat_pte_unmap(ht, e, 5980Sstevel@tonic-gate HAT_UNLOAD, pte, NULL); 5990Sstevel@tonic-gate } 6000Sstevel@tonic-gate 6010Sstevel@tonic-gate /* 6020Sstevel@tonic-gate * Reacquire htable lock. If we didn't 6030Sstevel@tonic-gate * remove all mappings in the table, 6040Sstevel@tonic-gate * or another thread added a new mapping 6050Sstevel@tonic-gate * behind us, give up on this table. 6060Sstevel@tonic-gate */ 6070Sstevel@tonic-gate HTABLE_ENTER(h); 6080Sstevel@tonic-gate if (ht->ht_busy != 1 || 6090Sstevel@tonic-gate ht->ht_valid_cnt != 0 || 6100Sstevel@tonic-gate ht->ht_lock_cnt != 0) { 6110Sstevel@tonic-gate --ht->ht_busy; 6120Sstevel@tonic-gate continue; 6130Sstevel@tonic-gate } 6140Sstevel@tonic-gate 6150Sstevel@tonic-gate /* 6160Sstevel@tonic-gate * Steal it and unlink the page table. 6170Sstevel@tonic-gate */ 6180Sstevel@tonic-gate higher = ht->ht_parent; 6190Sstevel@tonic-gate unlink_ptp(higher, ht, ht->ht_vaddr); 6200Sstevel@tonic-gate 6210Sstevel@tonic-gate /* 6220Sstevel@tonic-gate * remove from the hash list 6230Sstevel@tonic-gate */ 6240Sstevel@tonic-gate if (ht->ht_next) 6250Sstevel@tonic-gate ht->ht_next->ht_prev = 6260Sstevel@tonic-gate ht->ht_prev; 6270Sstevel@tonic-gate 6280Sstevel@tonic-gate if (ht->ht_prev) { 6290Sstevel@tonic-gate ht->ht_prev->ht_next = 6300Sstevel@tonic-gate ht->ht_next; 6310Sstevel@tonic-gate } else { 6320Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[h] == 6330Sstevel@tonic-gate ht); 6340Sstevel@tonic-gate hat->hat_ht_hash[h] = 6350Sstevel@tonic-gate ht->ht_next; 6360Sstevel@tonic-gate } 6370Sstevel@tonic-gate 6380Sstevel@tonic-gate /* 6390Sstevel@tonic-gate * Break to outer loop to release the 6400Sstevel@tonic-gate * higher (ht_parent) pagtable. This 6410Sstevel@tonic-gate * spreads out the pain caused by 6420Sstevel@tonic-gate * pagefaults. 6430Sstevel@tonic-gate */ 6440Sstevel@tonic-gate ht->ht_next = list; 6450Sstevel@tonic-gate list = ht; 6460Sstevel@tonic-gate ++stolen; 6470Sstevel@tonic-gate break; 6480Sstevel@tonic-gate } 6490Sstevel@tonic-gate HTABLE_EXIT(h); 6500Sstevel@tonic-gate if (higher != NULL) 6510Sstevel@tonic-gate htable_release(higher); 6521747Sjosephb if (++h == hat->hat_num_hash) 6531747Sjosephb h = 0; 6541747Sjosephb } while (stolen < cnt && h != h_start); 6550Sstevel@tonic-gate } 6560Sstevel@tonic-gate } 6570Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, -1); 6580Sstevel@tonic-gate return (list); 6590Sstevel@tonic-gate } 6600Sstevel@tonic-gate 6610Sstevel@tonic-gate 6620Sstevel@tonic-gate /* 6630Sstevel@tonic-gate * This is invoked from kmem when the system is low on memory. We try 6640Sstevel@tonic-gate * to free hments, htables, and ptables to improve the memory situation. 6650Sstevel@tonic-gate */ 6660Sstevel@tonic-gate /*ARGSUSED*/ 6670Sstevel@tonic-gate static void 6680Sstevel@tonic-gate htable_reap(void *handle) 6690Sstevel@tonic-gate { 6700Sstevel@tonic-gate uint_t reap_cnt; 6710Sstevel@tonic-gate htable_t *list; 6720Sstevel@tonic-gate htable_t *ht; 6730Sstevel@tonic-gate 6740Sstevel@tonic-gate HATSTAT_INC(hs_reap_attempts); 6750Sstevel@tonic-gate if (!can_steal_post_boot) 6760Sstevel@tonic-gate return; 6770Sstevel@tonic-gate 6780Sstevel@tonic-gate /* 6790Sstevel@tonic-gate * Try to reap 5% of the page tables bounded by a maximum of 6800Sstevel@tonic-gate * 5% of physmem and a minimum of 10. 6810Sstevel@tonic-gate */ 6820Sstevel@tonic-gate reap_cnt = MIN(MAX(physmem / 20, active_ptables / 20), 10); 6830Sstevel@tonic-gate 6840Sstevel@tonic-gate /* 6850Sstevel@tonic-gate * Let htable_steal() do the work, we just call htable_free() 6860Sstevel@tonic-gate */ 6870Sstevel@tonic-gate list = htable_steal(reap_cnt); 6880Sstevel@tonic-gate while ((ht = list) != NULL) { 6890Sstevel@tonic-gate list = ht->ht_next; 6900Sstevel@tonic-gate HATSTAT_INC(hs_reaped); 6910Sstevel@tonic-gate htable_free(ht); 6920Sstevel@tonic-gate } 6930Sstevel@tonic-gate 6940Sstevel@tonic-gate /* 6950Sstevel@tonic-gate * Free up excess reserves 6960Sstevel@tonic-gate */ 6970Sstevel@tonic-gate htable_adjust_reserve(); 6980Sstevel@tonic-gate hment_adjust_reserve(); 6990Sstevel@tonic-gate } 7000Sstevel@tonic-gate 7010Sstevel@tonic-gate /* 7020Sstevel@tonic-gate * allocate an htable, stealing one or using the reserve if necessary 7030Sstevel@tonic-gate */ 7040Sstevel@tonic-gate static htable_t * 7050Sstevel@tonic-gate htable_alloc( 7060Sstevel@tonic-gate hat_t *hat, 7070Sstevel@tonic-gate uintptr_t vaddr, 7080Sstevel@tonic-gate level_t level, 7090Sstevel@tonic-gate htable_t *shared) 7100Sstevel@tonic-gate { 7110Sstevel@tonic-gate htable_t *ht = NULL; 7120Sstevel@tonic-gate uint_t is_vlp; 7130Sstevel@tonic-gate uint_t is_bare = 0; 7140Sstevel@tonic-gate uint_t need_to_zero = 1; 7150Sstevel@tonic-gate int kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP); 7160Sstevel@tonic-gate 7170Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 7180Sstevel@tonic-gate panic("htable_alloc(): level %d out of range\n", level); 7190Sstevel@tonic-gate 7200Sstevel@tonic-gate is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL; 7210Sstevel@tonic-gate if (is_vlp || shared != NULL) 7220Sstevel@tonic-gate is_bare = 1; 7230Sstevel@tonic-gate 7240Sstevel@tonic-gate /* 7250Sstevel@tonic-gate * First reuse a cached htable from the hat_ht_cached field, this 7260Sstevel@tonic-gate * avoids unnecessary trips through kmem/page allocators. This is also 7270Sstevel@tonic-gate * what happens during use_boot_reserve. 7280Sstevel@tonic-gate */ 7290Sstevel@tonic-gate if (hat->hat_ht_cached != NULL && !is_bare) { 7300Sstevel@tonic-gate hat_enter(hat); 7310Sstevel@tonic-gate ht = hat->hat_ht_cached; 7320Sstevel@tonic-gate if (ht != NULL) { 7330Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 7340Sstevel@tonic-gate need_to_zero = 0; 7350Sstevel@tonic-gate /* XX64 ASSERT() they're all zero somehow */ 7360Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 7370Sstevel@tonic-gate } 7380Sstevel@tonic-gate hat_exit(hat); 7390Sstevel@tonic-gate } 7400Sstevel@tonic-gate 7410Sstevel@tonic-gate if (ht == NULL) { 7420Sstevel@tonic-gate ASSERT(!use_boot_reserve); 7430Sstevel@tonic-gate /* 7440Sstevel@tonic-gate * When allocating for hat_memload_arena, we use the reserve. 7450Sstevel@tonic-gate * Also use reserves if we are in a panic(). 7460Sstevel@tonic-gate */ 7470Sstevel@tonic-gate if (curthread == hat_reserves_thread || panicstr != NULL) { 7480Sstevel@tonic-gate ASSERT(panicstr != NULL || !is_bare); 7490Sstevel@tonic-gate ASSERT(panicstr != NULL || 7500Sstevel@tonic-gate curthread == hat_reserves_thread); 7510Sstevel@tonic-gate ht = htable_get_reserve(); 7520Sstevel@tonic-gate } else { 7530Sstevel@tonic-gate /* 7540Sstevel@tonic-gate * Donate successful htable allocations to the reserve. 7550Sstevel@tonic-gate */ 7560Sstevel@tonic-gate for (;;) { 7570Sstevel@tonic-gate ASSERT(curthread != hat_reserves_thread); 7580Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, kmflags); 7590Sstevel@tonic-gate if (ht == NULL) 7600Sstevel@tonic-gate break; 7610Sstevel@tonic-gate ht->ht_pfn = PFN_INVALID; 7620Sstevel@tonic-gate if (curthread == hat_reserves_thread || 7630Sstevel@tonic-gate panicstr != NULL || 7640Sstevel@tonic-gate htable_reserve_cnt >= htable_reserve_amount) 7650Sstevel@tonic-gate break; 7660Sstevel@tonic-gate htable_put_reserve(ht); 7670Sstevel@tonic-gate } 7680Sstevel@tonic-gate } 7690Sstevel@tonic-gate 7700Sstevel@tonic-gate /* 7710Sstevel@tonic-gate * allocate a page for the hardware page table if needed 7720Sstevel@tonic-gate */ 7730Sstevel@tonic-gate if (ht != NULL && !is_bare) { 7741747Sjosephb ht->ht_hat = hat; 7750Sstevel@tonic-gate ptable_alloc(ht); 7760Sstevel@tonic-gate if (ht->ht_pfn == PFN_INVALID) { 7770Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 7780Sstevel@tonic-gate ht = NULL; 7790Sstevel@tonic-gate } 7800Sstevel@tonic-gate } 7810Sstevel@tonic-gate } 7820Sstevel@tonic-gate 7830Sstevel@tonic-gate /* 7841747Sjosephb * If allocations failed, kick off a kmem_reap() and resort to 7851747Sjosephb * htable steal(). We may spin here if the system is very low on 7861747Sjosephb * memory. If the kernel itself has consumed all memory and kmem_reap() 7871747Sjosephb * can't free up anything, then we'll really get stuck here. 7881747Sjosephb * That should only happen in a system where the administrator has 7891747Sjosephb * misconfigured VM parameters via /etc/system. 7900Sstevel@tonic-gate */ 7911747Sjosephb while (ht == NULL && can_steal_post_boot) { 7921747Sjosephb kmem_reap(); 7930Sstevel@tonic-gate ht = htable_steal(1); 7940Sstevel@tonic-gate HATSTAT_INC(hs_steals); 7950Sstevel@tonic-gate 7960Sstevel@tonic-gate /* 7971747Sjosephb * If we stole for a bare htable, release the pagetable page. 7980Sstevel@tonic-gate */ 7990Sstevel@tonic-gate if (ht != NULL && is_bare) 8000Sstevel@tonic-gate ptable_free(ht); 8010Sstevel@tonic-gate } 8020Sstevel@tonic-gate 8030Sstevel@tonic-gate /* 8041747Sjosephb * All attempts to allocate or steal failed. This should only happen 8051747Sjosephb * if we run out of memory during boot, due perhaps to a huge 8061747Sjosephb * boot_archive. At this point there's no way to continue. 8070Sstevel@tonic-gate */ 8080Sstevel@tonic-gate if (ht == NULL) 8090Sstevel@tonic-gate panic("htable_alloc(): couldn't steal\n"); 8100Sstevel@tonic-gate 8110Sstevel@tonic-gate /* 8120Sstevel@tonic-gate * Shared page tables have all entries locked and entries may not 8130Sstevel@tonic-gate * be added or deleted. 8140Sstevel@tonic-gate */ 8150Sstevel@tonic-gate ht->ht_flags = 0; 8160Sstevel@tonic-gate if (shared != NULL) { 8170Sstevel@tonic-gate ASSERT(level == 0); 8180Sstevel@tonic-gate ASSERT(shared->ht_valid_cnt > 0); 8190Sstevel@tonic-gate ht->ht_flags |= HTABLE_SHARED_PFN; 8200Sstevel@tonic-gate ht->ht_pfn = shared->ht_pfn; 8210Sstevel@tonic-gate ht->ht_lock_cnt = 0; 8220Sstevel@tonic-gate ht->ht_valid_cnt = 0; /* updated in hat_share() */ 8230Sstevel@tonic-gate ht->ht_shares = shared; 8240Sstevel@tonic-gate need_to_zero = 0; 8250Sstevel@tonic-gate } else { 8260Sstevel@tonic-gate ht->ht_shares = NULL; 8270Sstevel@tonic-gate ht->ht_lock_cnt = 0; 8280Sstevel@tonic-gate ht->ht_valid_cnt = 0; 8290Sstevel@tonic-gate } 8300Sstevel@tonic-gate 8310Sstevel@tonic-gate /* 8320Sstevel@tonic-gate * setup flags, etc. for VLP htables 8330Sstevel@tonic-gate */ 8340Sstevel@tonic-gate if (is_vlp) { 8350Sstevel@tonic-gate ht->ht_flags |= HTABLE_VLP; 8360Sstevel@tonic-gate ht->ht_num_ptes = VLP_NUM_PTES; 8370Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 8380Sstevel@tonic-gate need_to_zero = 0; 8390Sstevel@tonic-gate } else if (level == mmu.max_level) { 8400Sstevel@tonic-gate ht->ht_num_ptes = mmu.top_level_count; 8410Sstevel@tonic-gate } else { 8420Sstevel@tonic-gate ht->ht_num_ptes = mmu.ptes_per_table; 8430Sstevel@tonic-gate } 8440Sstevel@tonic-gate 8450Sstevel@tonic-gate /* 8460Sstevel@tonic-gate * fill in the htable 8470Sstevel@tonic-gate */ 8480Sstevel@tonic-gate ht->ht_hat = hat; 8490Sstevel@tonic-gate ht->ht_parent = NULL; 8500Sstevel@tonic-gate ht->ht_vaddr = vaddr; 8510Sstevel@tonic-gate ht->ht_level = level; 8520Sstevel@tonic-gate ht->ht_busy = 1; 8530Sstevel@tonic-gate ht->ht_next = NULL; 8540Sstevel@tonic-gate ht->ht_prev = NULL; 8550Sstevel@tonic-gate 8560Sstevel@tonic-gate /* 8570Sstevel@tonic-gate * Zero out any freshly allocated page table 8580Sstevel@tonic-gate */ 8590Sstevel@tonic-gate if (need_to_zero) 8600Sstevel@tonic-gate x86pte_zero(ht, 0, mmu.ptes_per_table); 8610Sstevel@tonic-gate return (ht); 8620Sstevel@tonic-gate } 8630Sstevel@tonic-gate 8640Sstevel@tonic-gate /* 8650Sstevel@tonic-gate * Free up an htable, either to a hat's cached list, the reserves or 8660Sstevel@tonic-gate * back to kmem. 8670Sstevel@tonic-gate */ 8680Sstevel@tonic-gate static void 8690Sstevel@tonic-gate htable_free(htable_t *ht) 8700Sstevel@tonic-gate { 8710Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 8720Sstevel@tonic-gate 8730Sstevel@tonic-gate /* 8740Sstevel@tonic-gate * If the process isn't exiting, cache the free htable in the hat 8750Sstevel@tonic-gate * structure. We always do this for the boot reserve. We don't 8760Sstevel@tonic-gate * do this if the hat is exiting or we are stealing/reaping htables. 8770Sstevel@tonic-gate */ 8780Sstevel@tonic-gate if (hat != NULL && 8790Sstevel@tonic-gate !(ht->ht_flags & HTABLE_SHARED_PFN) && 8800Sstevel@tonic-gate (use_boot_reserve || 8810Sstevel@tonic-gate (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) { 8820Sstevel@tonic-gate ASSERT((ht->ht_flags & HTABLE_VLP) == 0); 8830Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 8840Sstevel@tonic-gate hat_enter(hat); 8850Sstevel@tonic-gate ht->ht_next = hat->hat_ht_cached; 8860Sstevel@tonic-gate hat->hat_ht_cached = ht; 8870Sstevel@tonic-gate hat_exit(hat); 8880Sstevel@tonic-gate return; 8890Sstevel@tonic-gate } 8900Sstevel@tonic-gate 8910Sstevel@tonic-gate /* 8920Sstevel@tonic-gate * If we have a hardware page table, free it. 8930Sstevel@tonic-gate * We don't free page tables that are accessed by sharing someone else. 8940Sstevel@tonic-gate */ 8950Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 8960Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 8970Sstevel@tonic-gate ht->ht_pfn = PFN_INVALID; 8980Sstevel@tonic-gate } else if (!(ht->ht_flags & HTABLE_VLP)) { 8990Sstevel@tonic-gate ptable_free(ht); 9000Sstevel@tonic-gate } 9010Sstevel@tonic-gate 9020Sstevel@tonic-gate /* 9030Sstevel@tonic-gate * If we are the thread using the reserves, put free htables 9040Sstevel@tonic-gate * into reserves. 9050Sstevel@tonic-gate */ 9060Sstevel@tonic-gate if (curthread == hat_reserves_thread || 9070Sstevel@tonic-gate htable_reserve_cnt < htable_reserve_amount) 9080Sstevel@tonic-gate htable_put_reserve(ht); 9090Sstevel@tonic-gate else 9100Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 9110Sstevel@tonic-gate } 9120Sstevel@tonic-gate 9130Sstevel@tonic-gate 9140Sstevel@tonic-gate /* 9150Sstevel@tonic-gate * This is called when a hat is being destroyed or swapped out. We reap all 9160Sstevel@tonic-gate * the remaining htables in the hat cache. If destroying all left over 9170Sstevel@tonic-gate * htables are also destroyed. 9180Sstevel@tonic-gate * 9190Sstevel@tonic-gate * We also don't need to invalidate any of the PTPs nor do any demapping. 9200Sstevel@tonic-gate */ 9210Sstevel@tonic-gate void 9220Sstevel@tonic-gate htable_purge_hat(hat_t *hat) 9230Sstevel@tonic-gate { 9240Sstevel@tonic-gate htable_t *ht; 9250Sstevel@tonic-gate int h; 9260Sstevel@tonic-gate 9270Sstevel@tonic-gate /* 9280Sstevel@tonic-gate * Purge the htable cache if just reaping. 9290Sstevel@tonic-gate */ 9300Sstevel@tonic-gate if (!(hat->hat_flags & HAT_FREEING)) { 9310Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, 1); 9320Sstevel@tonic-gate for (;;) { 9330Sstevel@tonic-gate hat_enter(hat); 9340Sstevel@tonic-gate ht = hat->hat_ht_cached; 9350Sstevel@tonic-gate if (ht == NULL) { 9360Sstevel@tonic-gate hat_exit(hat); 9370Sstevel@tonic-gate break; 9380Sstevel@tonic-gate } 9390Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 9400Sstevel@tonic-gate hat_exit(hat); 9410Sstevel@tonic-gate htable_free(ht); 9420Sstevel@tonic-gate } 9430Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, -1); 9440Sstevel@tonic-gate return; 9450Sstevel@tonic-gate } 9460Sstevel@tonic-gate 9470Sstevel@tonic-gate /* 9480Sstevel@tonic-gate * if freeing, no locking is needed 9490Sstevel@tonic-gate */ 9500Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL) { 9510Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 9520Sstevel@tonic-gate htable_free(ht); 9530Sstevel@tonic-gate } 9540Sstevel@tonic-gate 9550Sstevel@tonic-gate /* 9560Sstevel@tonic-gate * walk thru the htable hash table and free all the htables in it. 9570Sstevel@tonic-gate */ 9580Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 9590Sstevel@tonic-gate while ((ht = hat->hat_ht_hash[h]) != NULL) { 9600Sstevel@tonic-gate if (ht->ht_next) 9610Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 9620Sstevel@tonic-gate 9630Sstevel@tonic-gate if (ht->ht_prev) { 9640Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 9650Sstevel@tonic-gate } else { 9660Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[h] == ht); 9670Sstevel@tonic-gate hat->hat_ht_hash[h] = ht->ht_next; 9680Sstevel@tonic-gate } 9690Sstevel@tonic-gate htable_free(ht); 9700Sstevel@tonic-gate } 9710Sstevel@tonic-gate } 9720Sstevel@tonic-gate } 9730Sstevel@tonic-gate 9740Sstevel@tonic-gate /* 9750Sstevel@tonic-gate * Unlink an entry for a table at vaddr and level out of the existing table 9760Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 9770Sstevel@tonic-gate */ 9780Sstevel@tonic-gate static void 9790Sstevel@tonic-gate unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr) 9800Sstevel@tonic-gate { 9810Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 9820Sstevel@tonic-gate x86pte_t expect = MAKEPTP(old->ht_pfn, old->ht_level); 9830Sstevel@tonic-gate x86pte_t found; 9840Sstevel@tonic-gate 9850Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 9860Sstevel@tonic-gate ASSERT(higher->ht_valid_cnt > 0); 9870Sstevel@tonic-gate ASSERT(old->ht_valid_cnt == 0); 9880Sstevel@tonic-gate found = x86pte_cas(higher, entry, expect, 0); 9890Sstevel@tonic-gate if (found != expect) 9900Sstevel@tonic-gate panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE, 9910Sstevel@tonic-gate found, expect); 9920Sstevel@tonic-gate HTABLE_DEC(higher->ht_valid_cnt); 9930Sstevel@tonic-gate } 9940Sstevel@tonic-gate 9950Sstevel@tonic-gate /* 9960Sstevel@tonic-gate * Link an entry for a new table at vaddr and level into the existing table 9970Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 9980Sstevel@tonic-gate */ 9990Sstevel@tonic-gate static void 10000Sstevel@tonic-gate link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr) 10010Sstevel@tonic-gate { 10020Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 10030Sstevel@tonic-gate x86pte_t newptp = MAKEPTP(new->ht_pfn, new->ht_level); 10040Sstevel@tonic-gate x86pte_t found; 10050Sstevel@tonic-gate 10060Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 10070Sstevel@tonic-gate 10080Sstevel@tonic-gate ASSERT(new->ht_level != mmu.max_level); 10090Sstevel@tonic-gate 10100Sstevel@tonic-gate HTABLE_INC(higher->ht_valid_cnt); 10110Sstevel@tonic-gate 10120Sstevel@tonic-gate found = x86pte_cas(higher, entry, 0, newptp); 10131251Skchow if ((found & ~PT_REF) != 0) 10140Sstevel@tonic-gate panic("HAT: ptp not 0, found=" FMT_PTE, found); 10150Sstevel@tonic-gate } 10160Sstevel@tonic-gate 10170Sstevel@tonic-gate /* 10180Sstevel@tonic-gate * Release of an htable. 10190Sstevel@tonic-gate * 10200Sstevel@tonic-gate * During process exit, some empty page tables are not unlinked - hat_free_end() 10210Sstevel@tonic-gate * cleans them up. Upper level pagetable (mmu.max_page_level and higher) are 10220Sstevel@tonic-gate * only released during hat_free_end() or by htable_steal(). We always 10230Sstevel@tonic-gate * release SHARED page tables. 10240Sstevel@tonic-gate */ 10250Sstevel@tonic-gate void 10260Sstevel@tonic-gate htable_release(htable_t *ht) 10270Sstevel@tonic-gate { 10280Sstevel@tonic-gate uint_t hashval; 10290Sstevel@tonic-gate htable_t *shared; 10300Sstevel@tonic-gate htable_t *higher; 10310Sstevel@tonic-gate hat_t *hat; 10320Sstevel@tonic-gate uintptr_t va; 10330Sstevel@tonic-gate level_t level; 10340Sstevel@tonic-gate 10350Sstevel@tonic-gate while (ht != NULL) { 10360Sstevel@tonic-gate shared = NULL; 10370Sstevel@tonic-gate for (;;) { 10380Sstevel@tonic-gate hat = ht->ht_hat; 10390Sstevel@tonic-gate va = ht->ht_vaddr; 10400Sstevel@tonic-gate level = ht->ht_level; 10410Sstevel@tonic-gate hashval = HTABLE_HASH(hat, va, level); 10420Sstevel@tonic-gate 10430Sstevel@tonic-gate /* 10440Sstevel@tonic-gate * The common case is that this isn't the last use of 10450Sstevel@tonic-gate * an htable so we don't want to free the htable. 10460Sstevel@tonic-gate */ 10470Sstevel@tonic-gate HTABLE_ENTER(hashval); 10480Sstevel@tonic-gate ASSERT(ht->ht_lock_cnt == 0 || ht->ht_valid_cnt > 0); 10490Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt >= 0); 10500Sstevel@tonic-gate ASSERT(ht->ht_busy > 0); 10510Sstevel@tonic-gate if (ht->ht_valid_cnt > 0) 10520Sstevel@tonic-gate break; 10530Sstevel@tonic-gate if (ht->ht_busy > 1) 10540Sstevel@tonic-gate break; 10550Sstevel@tonic-gate 10560Sstevel@tonic-gate /* 10570Sstevel@tonic-gate * we always release empty shared htables 10580Sstevel@tonic-gate */ 10590Sstevel@tonic-gate if (!(ht->ht_flags & HTABLE_SHARED_PFN)) { 10600Sstevel@tonic-gate 10610Sstevel@tonic-gate /* 10620Sstevel@tonic-gate * don't release if in address space tear down 10630Sstevel@tonic-gate */ 10640Sstevel@tonic-gate if (hat->hat_flags & HAT_FREEING) 10650Sstevel@tonic-gate break; 10660Sstevel@tonic-gate 10670Sstevel@tonic-gate /* 10680Sstevel@tonic-gate * At and above max_page_level, free if it's for 10690Sstevel@tonic-gate * a boot-time kernel mapping below kernelbase. 10700Sstevel@tonic-gate */ 10710Sstevel@tonic-gate if (level >= mmu.max_page_level && 10720Sstevel@tonic-gate (hat != kas.a_hat || va >= kernelbase)) 10730Sstevel@tonic-gate break; 10740Sstevel@tonic-gate } 10750Sstevel@tonic-gate 10760Sstevel@tonic-gate /* 10770Sstevel@tonic-gate * remember if we destroy an htable that shares its PFN 10780Sstevel@tonic-gate * from elsewhere 10790Sstevel@tonic-gate */ 10800Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 10810Sstevel@tonic-gate ASSERT(ht->ht_level == 0); 10820Sstevel@tonic-gate ASSERT(shared == NULL); 10830Sstevel@tonic-gate shared = ht->ht_shares; 10840Sstevel@tonic-gate HATSTAT_INC(hs_htable_unshared); 10850Sstevel@tonic-gate } 10860Sstevel@tonic-gate 10870Sstevel@tonic-gate /* 10880Sstevel@tonic-gate * Handle release of a table and freeing the htable_t. 10890Sstevel@tonic-gate * Unlink it from the table higher (ie. ht_parent). 10900Sstevel@tonic-gate */ 10910Sstevel@tonic-gate ASSERT(ht->ht_lock_cnt == 0); 10920Sstevel@tonic-gate higher = ht->ht_parent; 10930Sstevel@tonic-gate ASSERT(higher != NULL); 10940Sstevel@tonic-gate 10950Sstevel@tonic-gate /* 10960Sstevel@tonic-gate * Unlink the pagetable. 10970Sstevel@tonic-gate */ 10980Sstevel@tonic-gate unlink_ptp(higher, ht, va); 10990Sstevel@tonic-gate 11000Sstevel@tonic-gate /* 11010Sstevel@tonic-gate * When any top level VLP page table entry changes, we 11020Sstevel@tonic-gate * must issue a reload of cr3 on all processors. 11030Sstevel@tonic-gate */ 11040Sstevel@tonic-gate if ((hat->hat_flags & HAT_VLP) && 11050Sstevel@tonic-gate level == VLP_LEVEL - 1) 11060Sstevel@tonic-gate hat_demap(hat, DEMAP_ALL_ADDR); 11070Sstevel@tonic-gate 11080Sstevel@tonic-gate /* 11090Sstevel@tonic-gate * remove this htable from its hash list 11100Sstevel@tonic-gate */ 11110Sstevel@tonic-gate if (ht->ht_next) 11120Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 11130Sstevel@tonic-gate 11140Sstevel@tonic-gate if (ht->ht_prev) { 11150Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 11160Sstevel@tonic-gate } else { 11170Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[hashval] == ht); 11180Sstevel@tonic-gate hat->hat_ht_hash[hashval] = ht->ht_next; 11190Sstevel@tonic-gate } 11200Sstevel@tonic-gate HTABLE_EXIT(hashval); 11210Sstevel@tonic-gate htable_free(ht); 11220Sstevel@tonic-gate ht = higher; 11230Sstevel@tonic-gate } 11240Sstevel@tonic-gate 11250Sstevel@tonic-gate ASSERT(ht->ht_busy >= 1); 11260Sstevel@tonic-gate --ht->ht_busy; 11270Sstevel@tonic-gate HTABLE_EXIT(hashval); 11280Sstevel@tonic-gate 11290Sstevel@tonic-gate /* 11300Sstevel@tonic-gate * If we released a shared htable, do a release on the htable 11310Sstevel@tonic-gate * from which it shared 11320Sstevel@tonic-gate */ 11330Sstevel@tonic-gate ht = shared; 11340Sstevel@tonic-gate } 11350Sstevel@tonic-gate } 11360Sstevel@tonic-gate 11370Sstevel@tonic-gate /* 11380Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 11390Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 11400Sstevel@tonic-gate */ 11410Sstevel@tonic-gate htable_t * 11420Sstevel@tonic-gate htable_lookup(hat_t *hat, uintptr_t vaddr, level_t level) 11430Sstevel@tonic-gate { 11440Sstevel@tonic-gate uintptr_t base; 11450Sstevel@tonic-gate uint_t hashval; 11460Sstevel@tonic-gate htable_t *ht = NULL; 11470Sstevel@tonic-gate 11480Sstevel@tonic-gate ASSERT(level >= 0); 11490Sstevel@tonic-gate ASSERT(level <= TOP_LEVEL(hat)); 11500Sstevel@tonic-gate 11510Sstevel@tonic-gate if (level == TOP_LEVEL(hat)) 11520Sstevel@tonic-gate base = 0; 11530Sstevel@tonic-gate else 11540Sstevel@tonic-gate base = vaddr & LEVEL_MASK(level + 1); 11550Sstevel@tonic-gate 11560Sstevel@tonic-gate hashval = HTABLE_HASH(hat, base, level); 11570Sstevel@tonic-gate HTABLE_ENTER(hashval); 11580Sstevel@tonic-gate for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) { 11590Sstevel@tonic-gate if (ht->ht_hat == hat && 11600Sstevel@tonic-gate ht->ht_vaddr == base && 11610Sstevel@tonic-gate ht->ht_level == level) 11620Sstevel@tonic-gate break; 11630Sstevel@tonic-gate } 11640Sstevel@tonic-gate if (ht) 11650Sstevel@tonic-gate ++ht->ht_busy; 11660Sstevel@tonic-gate 11670Sstevel@tonic-gate HTABLE_EXIT(hashval); 11680Sstevel@tonic-gate return (ht); 11690Sstevel@tonic-gate } 11700Sstevel@tonic-gate 11710Sstevel@tonic-gate /* 11720Sstevel@tonic-gate * Acquires a hold on a known htable (from a locked hment entry). 11730Sstevel@tonic-gate */ 11740Sstevel@tonic-gate void 11750Sstevel@tonic-gate htable_acquire(htable_t *ht) 11760Sstevel@tonic-gate { 11770Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 11780Sstevel@tonic-gate level_t level = ht->ht_level; 11790Sstevel@tonic-gate uintptr_t base = ht->ht_vaddr; 11800Sstevel@tonic-gate uint_t hashval = HTABLE_HASH(hat, base, level); 11810Sstevel@tonic-gate 11820Sstevel@tonic-gate HTABLE_ENTER(hashval); 11830Sstevel@tonic-gate #ifdef DEBUG 11840Sstevel@tonic-gate /* 11850Sstevel@tonic-gate * make sure the htable is there 11860Sstevel@tonic-gate */ 11870Sstevel@tonic-gate { 11880Sstevel@tonic-gate htable_t *h; 11890Sstevel@tonic-gate 11900Sstevel@tonic-gate for (h = hat->hat_ht_hash[hashval]; 11910Sstevel@tonic-gate h && h != ht; 11920Sstevel@tonic-gate h = h->ht_next) 11930Sstevel@tonic-gate ; 11940Sstevel@tonic-gate ASSERT(h == ht); 11950Sstevel@tonic-gate } 11960Sstevel@tonic-gate #endif /* DEBUG */ 11970Sstevel@tonic-gate ++ht->ht_busy; 11980Sstevel@tonic-gate HTABLE_EXIT(hashval); 11990Sstevel@tonic-gate } 12000Sstevel@tonic-gate 12010Sstevel@tonic-gate /* 12020Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 12030Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 12040Sstevel@tonic-gate * If not found the table is created. 12050Sstevel@tonic-gate * 12060Sstevel@tonic-gate * Since we can't hold a hash table mutex during allocation, we have to 12070Sstevel@tonic-gate * drop it and redo the search on a create. Then we may have to free the newly 12080Sstevel@tonic-gate * allocated htable if another thread raced in and created it ahead of us. 12090Sstevel@tonic-gate */ 12100Sstevel@tonic-gate htable_t * 12110Sstevel@tonic-gate htable_create( 12120Sstevel@tonic-gate hat_t *hat, 12130Sstevel@tonic-gate uintptr_t vaddr, 12140Sstevel@tonic-gate level_t level, 12150Sstevel@tonic-gate htable_t *shared) 12160Sstevel@tonic-gate { 12170Sstevel@tonic-gate uint_t h; 12180Sstevel@tonic-gate level_t l; 12190Sstevel@tonic-gate uintptr_t base; 12200Sstevel@tonic-gate htable_t *ht; 12210Sstevel@tonic-gate htable_t *higher = NULL; 12220Sstevel@tonic-gate htable_t *new = NULL; 12230Sstevel@tonic-gate 12240Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 12250Sstevel@tonic-gate panic("htable_create(): level %d out of range\n", level); 12260Sstevel@tonic-gate 12270Sstevel@tonic-gate /* 12280Sstevel@tonic-gate * Create the page tables in top down order. 12290Sstevel@tonic-gate */ 12300Sstevel@tonic-gate for (l = TOP_LEVEL(hat); l >= level; --l) { 12310Sstevel@tonic-gate new = NULL; 12320Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) 12330Sstevel@tonic-gate base = 0; 12340Sstevel@tonic-gate else 12350Sstevel@tonic-gate base = vaddr & LEVEL_MASK(l + 1); 12360Sstevel@tonic-gate 12370Sstevel@tonic-gate h = HTABLE_HASH(hat, base, l); 12380Sstevel@tonic-gate try_again: 12390Sstevel@tonic-gate /* 12400Sstevel@tonic-gate * look up the htable at this level 12410Sstevel@tonic-gate */ 12420Sstevel@tonic-gate HTABLE_ENTER(h); 12430Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) { 12440Sstevel@tonic-gate ht = hat->hat_htable; 12450Sstevel@tonic-gate } else { 12460Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 12470Sstevel@tonic-gate ASSERT(ht->ht_hat == hat); 12480Sstevel@tonic-gate if (ht->ht_vaddr == base && 12490Sstevel@tonic-gate ht->ht_level == l) 12500Sstevel@tonic-gate break; 12510Sstevel@tonic-gate } 12520Sstevel@tonic-gate } 12530Sstevel@tonic-gate 12540Sstevel@tonic-gate /* 12550Sstevel@tonic-gate * if we found the htable, increment its busy cnt 12560Sstevel@tonic-gate * and if we had allocated a new htable, free it. 12570Sstevel@tonic-gate */ 12580Sstevel@tonic-gate if (ht != NULL) { 12590Sstevel@tonic-gate /* 12600Sstevel@tonic-gate * If we find a pre-existing shared table, it must 12610Sstevel@tonic-gate * share from the same place. 12620Sstevel@tonic-gate */ 12630Sstevel@tonic-gate if (l == level && shared && ht->ht_shares && 12640Sstevel@tonic-gate ht->ht_shares != shared) { 12650Sstevel@tonic-gate panic("htable shared from wrong place " 12660Sstevel@tonic-gate "found htable=%p shared=%p", ht, shared); 12670Sstevel@tonic-gate } 12680Sstevel@tonic-gate ++ht->ht_busy; 12690Sstevel@tonic-gate HTABLE_EXIT(h); 12700Sstevel@tonic-gate if (new) 12710Sstevel@tonic-gate htable_free(new); 12720Sstevel@tonic-gate if (higher != NULL) 12730Sstevel@tonic-gate htable_release(higher); 12740Sstevel@tonic-gate higher = ht; 12750Sstevel@tonic-gate 12760Sstevel@tonic-gate /* 12770Sstevel@tonic-gate * if we didn't find it on the first search 12780Sstevel@tonic-gate * allocate a new one and search again 12790Sstevel@tonic-gate */ 12800Sstevel@tonic-gate } else if (new == NULL) { 12810Sstevel@tonic-gate HTABLE_EXIT(h); 12820Sstevel@tonic-gate new = htable_alloc(hat, base, l, 12830Sstevel@tonic-gate l == level ? shared : NULL); 12840Sstevel@tonic-gate goto try_again; 12850Sstevel@tonic-gate 12860Sstevel@tonic-gate /* 12870Sstevel@tonic-gate * 2nd search and still not there, use "new" table 12880Sstevel@tonic-gate * Link new table into higher, when not at top level. 12890Sstevel@tonic-gate */ 12900Sstevel@tonic-gate } else { 12910Sstevel@tonic-gate ht = new; 12920Sstevel@tonic-gate if (higher != NULL) { 12930Sstevel@tonic-gate link_ptp(higher, ht, base); 12940Sstevel@tonic-gate ht->ht_parent = higher; 12950Sstevel@tonic-gate 12960Sstevel@tonic-gate /* 12970Sstevel@tonic-gate * When any top level VLP page table changes, 12980Sstevel@tonic-gate * we must reload cr3 on all processors. 12990Sstevel@tonic-gate */ 13000Sstevel@tonic-gate #ifdef __i386 13010Sstevel@tonic-gate if (mmu.pae_hat && 13020Sstevel@tonic-gate #else /* !__i386 */ 13030Sstevel@tonic-gate if ((hat->hat_flags & HAT_VLP) && 13040Sstevel@tonic-gate #endif /* __i386 */ 13050Sstevel@tonic-gate l == VLP_LEVEL - 1) 13060Sstevel@tonic-gate hat_demap(hat, DEMAP_ALL_ADDR); 13070Sstevel@tonic-gate } 13080Sstevel@tonic-gate ht->ht_next = hat->hat_ht_hash[h]; 13090Sstevel@tonic-gate ASSERT(ht->ht_prev == NULL); 13100Sstevel@tonic-gate if (hat->hat_ht_hash[h]) 13110Sstevel@tonic-gate hat->hat_ht_hash[h]->ht_prev = ht; 13120Sstevel@tonic-gate hat->hat_ht_hash[h] = ht; 13130Sstevel@tonic-gate HTABLE_EXIT(h); 13140Sstevel@tonic-gate 13150Sstevel@tonic-gate /* 13160Sstevel@tonic-gate * Note we don't do htable_release(higher). 13170Sstevel@tonic-gate * That happens recursively when "new" is removed by 13180Sstevel@tonic-gate * htable_release() or htable_steal(). 13190Sstevel@tonic-gate */ 13200Sstevel@tonic-gate higher = ht; 13210Sstevel@tonic-gate 13220Sstevel@tonic-gate /* 13230Sstevel@tonic-gate * If we just created a new shared page table we 13240Sstevel@tonic-gate * increment the shared htable's busy count, so that 13250Sstevel@tonic-gate * it can't be the victim of a steal even if it's empty. 13260Sstevel@tonic-gate */ 13270Sstevel@tonic-gate if (l == level && shared) { 13280Sstevel@tonic-gate (void) htable_lookup(shared->ht_hat, 13290Sstevel@tonic-gate shared->ht_vaddr, shared->ht_level); 13300Sstevel@tonic-gate HATSTAT_INC(hs_htable_shared); 13310Sstevel@tonic-gate } 13320Sstevel@tonic-gate } 13330Sstevel@tonic-gate } 13340Sstevel@tonic-gate 13350Sstevel@tonic-gate return (ht); 13360Sstevel@tonic-gate } 13370Sstevel@tonic-gate 13380Sstevel@tonic-gate /* 13390Sstevel@tonic-gate * Walk through a given htable looking for the first valid entry. This 13400Sstevel@tonic-gate * routine takes both a starting and ending address. The starting address 13410Sstevel@tonic-gate * is required to be within the htable provided by the caller, but there is 13420Sstevel@tonic-gate * no such restriction on the ending address. 13430Sstevel@tonic-gate * 13440Sstevel@tonic-gate * If the routine finds a valid entry in the htable (at or beyond the 13450Sstevel@tonic-gate * starting address), the PTE (and its address) will be returned. 13460Sstevel@tonic-gate * This PTE may correspond to either a page or a pagetable - it is the 13470Sstevel@tonic-gate * caller's responsibility to determine which. If no valid entry is 13480Sstevel@tonic-gate * found, 0 (and invalid PTE) and the next unexamined address will be 13490Sstevel@tonic-gate * returned. 13500Sstevel@tonic-gate * 13510Sstevel@tonic-gate * The loop has been carefully coded for optimization. 13520Sstevel@tonic-gate */ 13530Sstevel@tonic-gate static x86pte_t 13540Sstevel@tonic-gate htable_scan(htable_t *ht, uintptr_t *vap, uintptr_t eaddr) 13550Sstevel@tonic-gate { 13560Sstevel@tonic-gate uint_t e; 13570Sstevel@tonic-gate x86pte_t found_pte = (x86pte_t)0; 13580Sstevel@tonic-gate char *pte_ptr; 13590Sstevel@tonic-gate char *end_pte_ptr; 13600Sstevel@tonic-gate int l = ht->ht_level; 13610Sstevel@tonic-gate uintptr_t va = *vap & LEVEL_MASK(l); 13620Sstevel@tonic-gate size_t pgsize = LEVEL_SIZE(l); 13630Sstevel@tonic-gate 13640Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 13650Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 13660Sstevel@tonic-gate 13670Sstevel@tonic-gate /* 13680Sstevel@tonic-gate * Compute the starting index and ending virtual address 13690Sstevel@tonic-gate */ 13700Sstevel@tonic-gate e = htable_va2entry(va, ht); 13710Sstevel@tonic-gate 13720Sstevel@tonic-gate /* 13730Sstevel@tonic-gate * The following page table scan code knows that the valid 13740Sstevel@tonic-gate * bit of a PTE is in the lowest byte AND that x86 is little endian!! 13750Sstevel@tonic-gate */ 13760Sstevel@tonic-gate pte_ptr = (char *)x86pte_access_pagetable(ht); 13770Sstevel@tonic-gate end_pte_ptr = pte_ptr + (ht->ht_num_ptes << mmu.pte_size_shift); 13780Sstevel@tonic-gate pte_ptr += e << mmu.pte_size_shift; 1379*2687Skchow while (!PTE_ISVALID(*pte_ptr)) { 13800Sstevel@tonic-gate va += pgsize; 13810Sstevel@tonic-gate if (va >= eaddr) 13820Sstevel@tonic-gate break; 13830Sstevel@tonic-gate pte_ptr += mmu.pte_size; 13840Sstevel@tonic-gate ASSERT(pte_ptr <= end_pte_ptr); 13850Sstevel@tonic-gate if (pte_ptr == end_pte_ptr) 13860Sstevel@tonic-gate break; 13870Sstevel@tonic-gate } 13880Sstevel@tonic-gate 13890Sstevel@tonic-gate /* 13900Sstevel@tonic-gate * if we found a valid PTE, load the entire PTE 13910Sstevel@tonic-gate */ 13920Sstevel@tonic-gate if (va < eaddr && pte_ptr != end_pte_ptr) { 13930Sstevel@tonic-gate if (mmu.pae_hat) { 139447Sjosephb ATOMIC_LOAD64((x86pte_t *)pte_ptr, found_pte); 13950Sstevel@tonic-gate } else { 13960Sstevel@tonic-gate found_pte = *(x86pte32_t *)pte_ptr; 13970Sstevel@tonic-gate } 13980Sstevel@tonic-gate } 13990Sstevel@tonic-gate x86pte_release_pagetable(ht); 14000Sstevel@tonic-gate 14010Sstevel@tonic-gate #if defined(__amd64) 14020Sstevel@tonic-gate /* 14030Sstevel@tonic-gate * deal with VA hole on amd64 14040Sstevel@tonic-gate */ 14050Sstevel@tonic-gate if (l == mmu.max_level && va >= mmu.hole_start && va <= mmu.hole_end) 14060Sstevel@tonic-gate va = mmu.hole_end + va - mmu.hole_start; 14070Sstevel@tonic-gate #endif /* __amd64 */ 14080Sstevel@tonic-gate 14090Sstevel@tonic-gate *vap = va; 14100Sstevel@tonic-gate return (found_pte); 14110Sstevel@tonic-gate } 14120Sstevel@tonic-gate 14130Sstevel@tonic-gate /* 14140Sstevel@tonic-gate * Find the address and htable for the first populated translation at or 14150Sstevel@tonic-gate * above the given virtual address. The caller may also specify an upper 14160Sstevel@tonic-gate * limit to the address range to search. Uses level information to quickly 14170Sstevel@tonic-gate * skip unpopulated sections of virtual address spaces. 14180Sstevel@tonic-gate * 14190Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable and virt addr 14200Sstevel@tonic-gate * and has a hold on the htable. 14210Sstevel@tonic-gate */ 14220Sstevel@tonic-gate x86pte_t 14230Sstevel@tonic-gate htable_walk( 14240Sstevel@tonic-gate struct hat *hat, 14250Sstevel@tonic-gate htable_t **htp, 14260Sstevel@tonic-gate uintptr_t *vaddr, 14270Sstevel@tonic-gate uintptr_t eaddr) 14280Sstevel@tonic-gate { 14290Sstevel@tonic-gate uintptr_t va = *vaddr; 14300Sstevel@tonic-gate htable_t *ht; 14310Sstevel@tonic-gate htable_t *prev = *htp; 14320Sstevel@tonic-gate level_t l; 14330Sstevel@tonic-gate level_t max_mapped_level; 14340Sstevel@tonic-gate x86pte_t pte; 14350Sstevel@tonic-gate 14360Sstevel@tonic-gate ASSERT(eaddr > va); 14370Sstevel@tonic-gate 14380Sstevel@tonic-gate /* 14390Sstevel@tonic-gate * If this is a user address, then we know we need not look beyond 14400Sstevel@tonic-gate * kernelbase. 14410Sstevel@tonic-gate */ 14420Sstevel@tonic-gate ASSERT(hat == kas.a_hat || eaddr <= kernelbase || 14430Sstevel@tonic-gate eaddr == HTABLE_WALK_TO_END); 14440Sstevel@tonic-gate if (hat != kas.a_hat && eaddr == HTABLE_WALK_TO_END) 14450Sstevel@tonic-gate eaddr = kernelbase; 14460Sstevel@tonic-gate 14470Sstevel@tonic-gate /* 14480Sstevel@tonic-gate * If we're coming in with a previous page table, search it first 14490Sstevel@tonic-gate * without doing an htable_lookup(), this should be frequent. 14500Sstevel@tonic-gate */ 14510Sstevel@tonic-gate if (prev) { 14520Sstevel@tonic-gate ASSERT(prev->ht_busy > 0); 14530Sstevel@tonic-gate ASSERT(prev->ht_vaddr <= va); 14540Sstevel@tonic-gate l = prev->ht_level; 14550Sstevel@tonic-gate if (va <= HTABLE_LAST_PAGE(prev)) { 14560Sstevel@tonic-gate pte = htable_scan(prev, &va, eaddr); 14570Sstevel@tonic-gate 14580Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 14590Sstevel@tonic-gate *vaddr = va; 14600Sstevel@tonic-gate *htp = prev; 14610Sstevel@tonic-gate return (pte); 14620Sstevel@tonic-gate } 14630Sstevel@tonic-gate } 14640Sstevel@tonic-gate 14650Sstevel@tonic-gate /* 14660Sstevel@tonic-gate * We found nothing in the htable provided by the caller, 14670Sstevel@tonic-gate * so fall through and do the full search 14680Sstevel@tonic-gate */ 14690Sstevel@tonic-gate htable_release(prev); 14700Sstevel@tonic-gate } 14710Sstevel@tonic-gate 14720Sstevel@tonic-gate /* 14730Sstevel@tonic-gate * Find the level of the largest pagesize used by this HAT. 14740Sstevel@tonic-gate */ 14750Sstevel@tonic-gate max_mapped_level = 0; 14760Sstevel@tonic-gate for (l = 1; l <= mmu.max_page_level; ++l) 14770Sstevel@tonic-gate if (hat->hat_pages_mapped[l] != 0) 14780Sstevel@tonic-gate max_mapped_level = l; 14790Sstevel@tonic-gate 14800Sstevel@tonic-gate while (va < eaddr && va >= *vaddr) { 14810Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 14820Sstevel@tonic-gate 14830Sstevel@tonic-gate /* 14840Sstevel@tonic-gate * Find lowest table with any entry for given address. 14850Sstevel@tonic-gate */ 14860Sstevel@tonic-gate for (l = 0; l <= TOP_LEVEL(hat); ++l) { 14870Sstevel@tonic-gate ht = htable_lookup(hat, va, l); 14880Sstevel@tonic-gate if (ht != NULL) { 14890Sstevel@tonic-gate pte = htable_scan(ht, &va, eaddr); 14900Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 14910Sstevel@tonic-gate *vaddr = va; 14920Sstevel@tonic-gate *htp = ht; 14930Sstevel@tonic-gate return (pte); 14940Sstevel@tonic-gate } 14950Sstevel@tonic-gate htable_release(ht); 14960Sstevel@tonic-gate break; 14970Sstevel@tonic-gate } 14980Sstevel@tonic-gate 14990Sstevel@tonic-gate /* 15000Sstevel@tonic-gate * The ht is never NULL at the top level since 15010Sstevel@tonic-gate * the top level htable is created in hat_alloc(). 15020Sstevel@tonic-gate */ 15030Sstevel@tonic-gate ASSERT(l < TOP_LEVEL(hat)); 15040Sstevel@tonic-gate 15050Sstevel@tonic-gate /* 15060Sstevel@tonic-gate * No htable covers the address. If there is no 15070Sstevel@tonic-gate * larger page size that could cover it, we 15080Sstevel@tonic-gate * skip to the start of the next page table. 15090Sstevel@tonic-gate */ 15100Sstevel@tonic-gate if (l >= max_mapped_level) { 15110Sstevel@tonic-gate va = NEXT_ENTRY_VA(va, l + 1); 15120Sstevel@tonic-gate break; 15130Sstevel@tonic-gate } 15140Sstevel@tonic-gate } 15150Sstevel@tonic-gate } 15160Sstevel@tonic-gate 15170Sstevel@tonic-gate *vaddr = 0; 15180Sstevel@tonic-gate *htp = NULL; 15190Sstevel@tonic-gate return (0); 15200Sstevel@tonic-gate } 15210Sstevel@tonic-gate 15220Sstevel@tonic-gate /* 15230Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address 15240Sstevel@tonic-gate * with pagesize at or below given level. 15250Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 15260Sstevel@tonic-gate * entry, and has a hold on the htable. 15270Sstevel@tonic-gate */ 15280Sstevel@tonic-gate htable_t * 15290Sstevel@tonic-gate htable_getpte( 15300Sstevel@tonic-gate struct hat *hat, 15310Sstevel@tonic-gate uintptr_t vaddr, 15320Sstevel@tonic-gate uint_t *entry, 15330Sstevel@tonic-gate x86pte_t *pte, 15340Sstevel@tonic-gate level_t level) 15350Sstevel@tonic-gate { 15360Sstevel@tonic-gate htable_t *ht; 15370Sstevel@tonic-gate level_t l; 15380Sstevel@tonic-gate uint_t e; 15390Sstevel@tonic-gate 15400Sstevel@tonic-gate ASSERT(level <= mmu.max_page_level); 15410Sstevel@tonic-gate 15420Sstevel@tonic-gate for (l = 0; l <= level; ++l) { 15430Sstevel@tonic-gate ht = htable_lookup(hat, vaddr, l); 15440Sstevel@tonic-gate if (ht == NULL) 15450Sstevel@tonic-gate continue; 15460Sstevel@tonic-gate e = htable_va2entry(vaddr, ht); 15470Sstevel@tonic-gate if (entry != NULL) 15480Sstevel@tonic-gate *entry = e; 15490Sstevel@tonic-gate if (pte != NULL) 15500Sstevel@tonic-gate *pte = x86pte_get(ht, e); 15510Sstevel@tonic-gate return (ht); 15520Sstevel@tonic-gate } 15530Sstevel@tonic-gate return (NULL); 15540Sstevel@tonic-gate } 15550Sstevel@tonic-gate 15560Sstevel@tonic-gate /* 15570Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address. 15580Sstevel@tonic-gate * There must be a valid page mapped at the given address. 15590Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 15600Sstevel@tonic-gate * entry, and has a hold on the htable. 15610Sstevel@tonic-gate */ 15620Sstevel@tonic-gate htable_t * 15630Sstevel@tonic-gate htable_getpage(struct hat *hat, uintptr_t vaddr, uint_t *entry) 15640Sstevel@tonic-gate { 15650Sstevel@tonic-gate htable_t *ht; 15660Sstevel@tonic-gate uint_t e; 15670Sstevel@tonic-gate x86pte_t pte; 15680Sstevel@tonic-gate 15690Sstevel@tonic-gate ht = htable_getpte(hat, vaddr, &e, &pte, mmu.max_page_level); 15700Sstevel@tonic-gate if (ht == NULL) 15710Sstevel@tonic-gate return (NULL); 15720Sstevel@tonic-gate 15730Sstevel@tonic-gate if (entry) 15740Sstevel@tonic-gate *entry = e; 15750Sstevel@tonic-gate 15760Sstevel@tonic-gate if (PTE_ISPAGE(pte, ht->ht_level)) 15770Sstevel@tonic-gate return (ht); 15780Sstevel@tonic-gate htable_release(ht); 15790Sstevel@tonic-gate return (NULL); 15800Sstevel@tonic-gate } 15810Sstevel@tonic-gate 15820Sstevel@tonic-gate 15830Sstevel@tonic-gate void 15840Sstevel@tonic-gate htable_init() 15850Sstevel@tonic-gate { 15860Sstevel@tonic-gate /* 15870Sstevel@tonic-gate * To save on kernel VA usage, we avoid debug information in 32 bit 15880Sstevel@tonic-gate * kernels. 15890Sstevel@tonic-gate */ 15900Sstevel@tonic-gate #if defined(__amd64) 15910Sstevel@tonic-gate int kmem_flags = KMC_NOHASH; 15920Sstevel@tonic-gate #elif defined(__i386) 15930Sstevel@tonic-gate int kmem_flags = KMC_NOHASH | KMC_NODEBUG; 15940Sstevel@tonic-gate #endif 15950Sstevel@tonic-gate 15960Sstevel@tonic-gate /* 15970Sstevel@tonic-gate * initialize kmem caches 15980Sstevel@tonic-gate */ 15990Sstevel@tonic-gate htable_cache = kmem_cache_create("htable_t", 16000Sstevel@tonic-gate sizeof (htable_t), 0, NULL, NULL, 16010Sstevel@tonic-gate htable_reap, NULL, hat_memload_arena, kmem_flags); 16020Sstevel@tonic-gate } 16030Sstevel@tonic-gate 16040Sstevel@tonic-gate /* 16050Sstevel@tonic-gate * get the pte index for the virtual address in the given htable's pagetable 16060Sstevel@tonic-gate */ 16070Sstevel@tonic-gate uint_t 16080Sstevel@tonic-gate htable_va2entry(uintptr_t va, htable_t *ht) 16090Sstevel@tonic-gate { 16100Sstevel@tonic-gate level_t l = ht->ht_level; 16110Sstevel@tonic-gate 16120Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 16130Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 16140Sstevel@tonic-gate return ((va >> LEVEL_SHIFT(l)) & (ht->ht_num_ptes - 1)); 16150Sstevel@tonic-gate } 16160Sstevel@tonic-gate 16170Sstevel@tonic-gate /* 16180Sstevel@tonic-gate * Given an htable and the index of a pte in it, return the virtual address 16190Sstevel@tonic-gate * of the page. 16200Sstevel@tonic-gate */ 16210Sstevel@tonic-gate uintptr_t 16220Sstevel@tonic-gate htable_e2va(htable_t *ht, uint_t entry) 16230Sstevel@tonic-gate { 16240Sstevel@tonic-gate level_t l = ht->ht_level; 16250Sstevel@tonic-gate uintptr_t va; 16260Sstevel@tonic-gate 16270Sstevel@tonic-gate ASSERT(entry < ht->ht_num_ptes); 16280Sstevel@tonic-gate va = ht->ht_vaddr + ((uintptr_t)entry << LEVEL_SHIFT(l)); 16290Sstevel@tonic-gate 16300Sstevel@tonic-gate /* 16310Sstevel@tonic-gate * Need to skip over any VA hole in top level table 16320Sstevel@tonic-gate */ 16330Sstevel@tonic-gate #if defined(__amd64) 16340Sstevel@tonic-gate if (ht->ht_level == mmu.max_level && va >= mmu.hole_start) 16350Sstevel@tonic-gate va += ((mmu.hole_end - mmu.hole_start) + 1); 16360Sstevel@tonic-gate #endif 16370Sstevel@tonic-gate 16380Sstevel@tonic-gate return (va); 16390Sstevel@tonic-gate } 16400Sstevel@tonic-gate 16410Sstevel@tonic-gate /* 16420Sstevel@tonic-gate * The code uses compare and swap instructions to read/write PTE's to 16430Sstevel@tonic-gate * avoid atomicity problems, since PTEs can be 8 bytes on 32 bit systems. 16440Sstevel@tonic-gate * Again this can be optimized on 64 bit systems, since aligned load/store 16450Sstevel@tonic-gate * will naturally be atomic. 16460Sstevel@tonic-gate * 16470Sstevel@tonic-gate * The combination of using kpreempt_disable()/_enable() and the hci_mutex 16480Sstevel@tonic-gate * are used to ensure that an interrupt won't overwrite a temporary mapping 16490Sstevel@tonic-gate * while it's in use. If an interrupt thread tries to access a PTE, it will 16500Sstevel@tonic-gate * yield briefly back to the pinned thread which holds the cpu's hci_mutex. 16510Sstevel@tonic-gate */ 16520Sstevel@tonic-gate 16530Sstevel@tonic-gate static struct hat_cpu_info init_hci; /* used for cpu 0 */ 16540Sstevel@tonic-gate 16550Sstevel@tonic-gate /* 16560Sstevel@tonic-gate * Initialize a CPU private window for mapping page tables. 16570Sstevel@tonic-gate * There will be 3 total pages of addressing needed: 16580Sstevel@tonic-gate * 16590Sstevel@tonic-gate * 1 for r/w access to pagetables 16600Sstevel@tonic-gate * 1 for r access when copying pagetables (hat_alloc) 16610Sstevel@tonic-gate * 1 that will map the PTEs for the 1st 2, so we can access them quickly 16620Sstevel@tonic-gate * 16630Sstevel@tonic-gate * We use vmem_xalloc() to get a correct alignment so that only one 16640Sstevel@tonic-gate * hat_mempte_setup() is needed. 16650Sstevel@tonic-gate */ 16660Sstevel@tonic-gate void 16670Sstevel@tonic-gate x86pte_cpu_init(cpu_t *cpu, void *pages) 16680Sstevel@tonic-gate { 16690Sstevel@tonic-gate struct hat_cpu_info *hci; 16700Sstevel@tonic-gate caddr_t va; 16710Sstevel@tonic-gate 16720Sstevel@tonic-gate /* 16730Sstevel@tonic-gate * We can't use kmem_alloc/vmem_alloc for the 1st CPU, as this is 16740Sstevel@tonic-gate * called before we've activated our own HAT 16750Sstevel@tonic-gate */ 16760Sstevel@tonic-gate if (pages != NULL) { 16770Sstevel@tonic-gate hci = &init_hci; 16780Sstevel@tonic-gate va = pages; 16790Sstevel@tonic-gate } else { 16800Sstevel@tonic-gate hci = kmem_alloc(sizeof (struct hat_cpu_info), KM_SLEEP); 16810Sstevel@tonic-gate va = vmem_xalloc(heap_arena, 3 * MMU_PAGESIZE, MMU_PAGESIZE, 0, 16820Sstevel@tonic-gate LEVEL_SIZE(1), NULL, NULL, VM_SLEEP); 16830Sstevel@tonic-gate } 16840Sstevel@tonic-gate mutex_init(&hci->hci_mutex, NULL, MUTEX_DEFAULT, NULL); 16850Sstevel@tonic-gate 16860Sstevel@tonic-gate /* 16870Sstevel@tonic-gate * If we are using segkpm, then there is no need for any of the 16880Sstevel@tonic-gate * mempte support. We can access the desired memory through a kpm 16890Sstevel@tonic-gate * mapping rather than setting up a temporary mempte mapping. 16900Sstevel@tonic-gate */ 16910Sstevel@tonic-gate if (kpm_enable == 0) { 16920Sstevel@tonic-gate hci->hci_mapped_pfn = PFN_INVALID; 16930Sstevel@tonic-gate 16940Sstevel@tonic-gate hci->hci_kernel_pte = 16950Sstevel@tonic-gate hat_mempte_kern_setup(va, va + (2 * MMU_PAGESIZE)); 16960Sstevel@tonic-gate hci->hci_pagetable_va = (void *)va; 16970Sstevel@tonic-gate } 16980Sstevel@tonic-gate 16990Sstevel@tonic-gate cpu->cpu_hat_info = hci; 17000Sstevel@tonic-gate } 17010Sstevel@tonic-gate 17020Sstevel@tonic-gate /* 17030Sstevel@tonic-gate * Macro to establish temporary mappings for x86pte_XXX routines. 17040Sstevel@tonic-gate */ 17050Sstevel@tonic-gate #define X86PTE_REMAP(addr, pte, index, perm, pfn) { \ 17060Sstevel@tonic-gate x86pte_t t; \ 17070Sstevel@tonic-gate \ 17080Sstevel@tonic-gate t = MAKEPTE((pfn), 0) | (perm) | mmu.pt_global | mmu.pt_nx;\ 17090Sstevel@tonic-gate if (mmu.pae_hat) \ 17100Sstevel@tonic-gate pte[index] = t; \ 17110Sstevel@tonic-gate else \ 17120Sstevel@tonic-gate ((x86pte32_t *)(pte))[index] = t; \ 17130Sstevel@tonic-gate mmu_tlbflush_entry((caddr_t)(addr)); \ 17140Sstevel@tonic-gate } 17150Sstevel@tonic-gate 17160Sstevel@tonic-gate /* 17170Sstevel@tonic-gate * Disable preemption and establish a mapping to the pagetable with the 17180Sstevel@tonic-gate * given pfn. This is optimized for there case where it's the same 17190Sstevel@tonic-gate * pfn as we last used referenced from this CPU. 17200Sstevel@tonic-gate */ 17210Sstevel@tonic-gate static x86pte_t * 17220Sstevel@tonic-gate x86pte_access_pagetable(htable_t *ht) 17230Sstevel@tonic-gate { 17240Sstevel@tonic-gate pfn_t pfn; 17250Sstevel@tonic-gate struct hat_cpu_info *hci; 17260Sstevel@tonic-gate 17270Sstevel@tonic-gate /* 17280Sstevel@tonic-gate * VLP pagetables are contained in the hat_t 17290Sstevel@tonic-gate */ 17300Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 17310Sstevel@tonic-gate return (ht->ht_hat->hat_vlp_ptes); 17320Sstevel@tonic-gate 17330Sstevel@tonic-gate /* 17340Sstevel@tonic-gate * During early boot, use hat_boot_remap() of a page table adddress. 17350Sstevel@tonic-gate */ 17360Sstevel@tonic-gate pfn = ht->ht_pfn; 17370Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 17380Sstevel@tonic-gate if (kpm_enable) 17390Sstevel@tonic-gate return ((x86pte_t *)hat_kpm_pfn2va(pfn)); 17400Sstevel@tonic-gate 17410Sstevel@tonic-gate if (!khat_running) { 17420Sstevel@tonic-gate (void) hat_boot_remap(ptable_va, pfn); 17430Sstevel@tonic-gate return ((x86pte_t *)ptable_va); 17440Sstevel@tonic-gate } 17450Sstevel@tonic-gate 17460Sstevel@tonic-gate /* 17470Sstevel@tonic-gate * Normally, disable preemption and grab the CPU's hci_mutex 17480Sstevel@tonic-gate */ 17490Sstevel@tonic-gate kpreempt_disable(); 17500Sstevel@tonic-gate hci = CPU->cpu_hat_info; 17510Sstevel@tonic-gate ASSERT(hci != NULL); 17520Sstevel@tonic-gate mutex_enter(&hci->hci_mutex); 17530Sstevel@tonic-gate if (hci->hci_mapped_pfn != pfn) { 17540Sstevel@tonic-gate /* 17550Sstevel@tonic-gate * The current mapping doesn't already point to this page. 17560Sstevel@tonic-gate * Update the CPU specific pagetable mapping to map the pfn. 17570Sstevel@tonic-gate */ 17580Sstevel@tonic-gate X86PTE_REMAP(hci->hci_pagetable_va, hci->hci_kernel_pte, 0, 17590Sstevel@tonic-gate PT_WRITABLE, pfn); 17600Sstevel@tonic-gate hci->hci_mapped_pfn = pfn; 17610Sstevel@tonic-gate } 17620Sstevel@tonic-gate return (hci->hci_pagetable_va); 17630Sstevel@tonic-gate } 17640Sstevel@tonic-gate 17650Sstevel@tonic-gate /* 17660Sstevel@tonic-gate * Release access to a page table. 17670Sstevel@tonic-gate */ 17680Sstevel@tonic-gate static void 17690Sstevel@tonic-gate x86pte_release_pagetable(htable_t *ht) 17700Sstevel@tonic-gate { 17710Sstevel@tonic-gate struct hat_cpu_info *hci; 17720Sstevel@tonic-gate 17730Sstevel@tonic-gate if (kpm_enable) 17740Sstevel@tonic-gate return; 17750Sstevel@tonic-gate 17760Sstevel@tonic-gate /* 17770Sstevel@tonic-gate * nothing to do for VLP htables 17780Sstevel@tonic-gate */ 17790Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 17800Sstevel@tonic-gate return; 17810Sstevel@tonic-gate 17820Sstevel@tonic-gate /* 17830Sstevel@tonic-gate * During boot-up hat_kern_setup(), erase the boot loader remapping. 17840Sstevel@tonic-gate */ 17850Sstevel@tonic-gate if (!khat_running) { 17860Sstevel@tonic-gate hat_boot_demap(ptable_va); 17870Sstevel@tonic-gate return; 17880Sstevel@tonic-gate } 17890Sstevel@tonic-gate 17900Sstevel@tonic-gate /* 17910Sstevel@tonic-gate * Normal Operation: drop the CPU's hci_mutex and restore preemption 17920Sstevel@tonic-gate */ 17930Sstevel@tonic-gate hci = CPU->cpu_hat_info; 17940Sstevel@tonic-gate ASSERT(hci != NULL); 17950Sstevel@tonic-gate mutex_exit(&hci->hci_mutex); 17960Sstevel@tonic-gate kpreempt_enable(); 17970Sstevel@tonic-gate } 17980Sstevel@tonic-gate 17990Sstevel@tonic-gate /* 18000Sstevel@tonic-gate * Atomic retrieval of a pagetable entry 18010Sstevel@tonic-gate */ 18020Sstevel@tonic-gate x86pte_t 18030Sstevel@tonic-gate x86pte_get(htable_t *ht, uint_t entry) 18040Sstevel@tonic-gate { 18050Sstevel@tonic-gate x86pte_t pte; 18060Sstevel@tonic-gate x86pte32_t *pte32p; 180747Sjosephb x86pte_t *ptep; 18080Sstevel@tonic-gate 18090Sstevel@tonic-gate /* 181047Sjosephb * Be careful that loading PAE entries in 32 bit kernel is atomic. 18110Sstevel@tonic-gate */ 18120Sstevel@tonic-gate ptep = x86pte_access_pagetable(ht); 18130Sstevel@tonic-gate if (mmu.pae_hat) { 181447Sjosephb ATOMIC_LOAD64(ptep + entry, pte); 18150Sstevel@tonic-gate } else { 18160Sstevel@tonic-gate pte32p = (x86pte32_t *)ptep; 18170Sstevel@tonic-gate pte = pte32p[entry]; 18180Sstevel@tonic-gate } 18190Sstevel@tonic-gate x86pte_release_pagetable(ht); 18200Sstevel@tonic-gate return (pte); 18210Sstevel@tonic-gate } 18220Sstevel@tonic-gate 18230Sstevel@tonic-gate /* 18240Sstevel@tonic-gate * Atomic unconditional set of a page table entry, it returns the previous 18250Sstevel@tonic-gate * value. 18260Sstevel@tonic-gate */ 18270Sstevel@tonic-gate x86pte_t 18280Sstevel@tonic-gate x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr) 18290Sstevel@tonic-gate { 18300Sstevel@tonic-gate x86pte_t old; 1831510Skchow x86pte_t prev, n; 18320Sstevel@tonic-gate x86pte_t *ptep; 18330Sstevel@tonic-gate x86pte32_t *pte32p; 18340Sstevel@tonic-gate x86pte32_t n32, p32; 18350Sstevel@tonic-gate 18360Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 18370Sstevel@tonic-gate if (ptr == NULL) { 18380Sstevel@tonic-gate ptep = x86pte_access_pagetable(ht); 18390Sstevel@tonic-gate ptep = (void *)((caddr_t)ptep + (entry << mmu.pte_size_shift)); 18400Sstevel@tonic-gate } else { 18410Sstevel@tonic-gate ptep = ptr; 18420Sstevel@tonic-gate } 18430Sstevel@tonic-gate 18440Sstevel@tonic-gate if (mmu.pae_hat) { 18450Sstevel@tonic-gate for (;;) { 18460Sstevel@tonic-gate prev = *ptep; 1847510Skchow n = new; 1848510Skchow /* 1849510Skchow * prevent potential data loss by preserving the MOD 1850510Skchow * bit if set in the current PTE and the pfns are the 1851510Skchow * same. For example, segmap can reissue a read-only 1852510Skchow * hat_memload on top of a dirty page. 1853510Skchow */ 1854510Skchow if (PTE_ISVALID(prev) && PTE2PFN(prev, ht->ht_level) == 1855510Skchow PTE2PFN(n, ht->ht_level)) { 1856510Skchow n |= prev & (PT_REF | PT_MOD); 1857510Skchow } 1858510Skchow if (prev == n) { 18590Sstevel@tonic-gate old = new; 18600Sstevel@tonic-gate break; 18610Sstevel@tonic-gate } 1862510Skchow old = cas64(ptep, prev, n); 18630Sstevel@tonic-gate if (old == prev) 18640Sstevel@tonic-gate break; 18650Sstevel@tonic-gate } 18660Sstevel@tonic-gate } else { 18670Sstevel@tonic-gate pte32p = (x86pte32_t *)ptep; 18680Sstevel@tonic-gate for (;;) { 18690Sstevel@tonic-gate p32 = *pte32p; 1870510Skchow n32 = new; 1871510Skchow if (PTE_ISVALID(p32) && PTE2PFN(p32, ht->ht_level) == 1872510Skchow PTE2PFN(n32, ht->ht_level)) { 1873510Skchow n32 |= p32 & (PT_REF | PT_MOD); 1874510Skchow } 18750Sstevel@tonic-gate if (p32 == n32) { 18760Sstevel@tonic-gate old = new; 18770Sstevel@tonic-gate break; 18780Sstevel@tonic-gate } 18790Sstevel@tonic-gate old = cas32(pte32p, p32, n32); 18800Sstevel@tonic-gate if (old == p32) 18810Sstevel@tonic-gate break; 18820Sstevel@tonic-gate } 18830Sstevel@tonic-gate } 18840Sstevel@tonic-gate if (ptr == NULL) 18850Sstevel@tonic-gate x86pte_release_pagetable(ht); 18860Sstevel@tonic-gate return (old); 18870Sstevel@tonic-gate } 18880Sstevel@tonic-gate 18890Sstevel@tonic-gate /* 18900Sstevel@tonic-gate * Atomic compare and swap of a page table entry. 18910Sstevel@tonic-gate */ 18920Sstevel@tonic-gate static x86pte_t 18930Sstevel@tonic-gate x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, x86pte_t new) 18940Sstevel@tonic-gate { 18950Sstevel@tonic-gate x86pte_t pte; 18960Sstevel@tonic-gate x86pte_t *ptep; 18970Sstevel@tonic-gate x86pte32_t pte32, o32, n32; 18980Sstevel@tonic-gate x86pte32_t *pte32p; 18990Sstevel@tonic-gate 19000Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 19010Sstevel@tonic-gate ptep = x86pte_access_pagetable(ht); 19020Sstevel@tonic-gate if (mmu.pae_hat) { 19030Sstevel@tonic-gate pte = cas64(&ptep[entry], old, new); 19040Sstevel@tonic-gate } else { 19050Sstevel@tonic-gate o32 = old; 19060Sstevel@tonic-gate n32 = new; 19070Sstevel@tonic-gate pte32p = (x86pte32_t *)ptep; 19080Sstevel@tonic-gate pte32 = cas32(&pte32p[entry], o32, n32); 19090Sstevel@tonic-gate pte = pte32; 19100Sstevel@tonic-gate } 19110Sstevel@tonic-gate x86pte_release_pagetable(ht); 19120Sstevel@tonic-gate 19130Sstevel@tonic-gate return (pte); 19140Sstevel@tonic-gate } 19150Sstevel@tonic-gate 19160Sstevel@tonic-gate /* 19170Sstevel@tonic-gate * data structure for cross call information 19180Sstevel@tonic-gate */ 19190Sstevel@tonic-gate typedef struct xcall_info { 19200Sstevel@tonic-gate x86pte_t xi_pte; 19210Sstevel@tonic-gate x86pte_t xi_old; 19220Sstevel@tonic-gate x86pte_t *xi_pteptr; 19230Sstevel@tonic-gate pfn_t xi_pfn; 19240Sstevel@tonic-gate processorid_t xi_cpuid; 19250Sstevel@tonic-gate level_t xi_level; 19260Sstevel@tonic-gate xc_func_t xi_func; 19270Sstevel@tonic-gate } xcall_info_t; 19280Sstevel@tonic-gate 19290Sstevel@tonic-gate /* 19300Sstevel@tonic-gate * Cross call service function to atomically invalidate a PTE and flush TLBs 19310Sstevel@tonic-gate */ 19320Sstevel@tonic-gate /*ARGSUSED*/ 19330Sstevel@tonic-gate static int 19340Sstevel@tonic-gate x86pte_inval_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3) 19350Sstevel@tonic-gate { 19360Sstevel@tonic-gate xcall_info_t *xi = (xcall_info_t *)a1; 19370Sstevel@tonic-gate caddr_t addr = (caddr_t)a2; 19380Sstevel@tonic-gate 19390Sstevel@tonic-gate /* 19400Sstevel@tonic-gate * Only the initiating cpu invalidates the page table entry. 19410Sstevel@tonic-gate * It returns the previous PTE value to the caller. 19420Sstevel@tonic-gate */ 19430Sstevel@tonic-gate if (CPU->cpu_id == xi->xi_cpuid) { 19440Sstevel@tonic-gate x86pte_t *ptep = xi->xi_pteptr; 19450Sstevel@tonic-gate pfn_t pfn = xi->xi_pfn; 19460Sstevel@tonic-gate level_t level = xi->xi_level; 19470Sstevel@tonic-gate x86pte_t old; 19480Sstevel@tonic-gate x86pte_t prev; 19490Sstevel@tonic-gate x86pte32_t *pte32p; 19500Sstevel@tonic-gate x86pte32_t p32; 19510Sstevel@tonic-gate 19520Sstevel@tonic-gate if (mmu.pae_hat) { 19530Sstevel@tonic-gate for (;;) { 19540Sstevel@tonic-gate prev = *ptep; 19550Sstevel@tonic-gate if (PTE2PFN(prev, level) != pfn) 19560Sstevel@tonic-gate break; 19570Sstevel@tonic-gate old = cas64(ptep, prev, 0); 19580Sstevel@tonic-gate if (old == prev) 19590Sstevel@tonic-gate break; 19600Sstevel@tonic-gate } 19610Sstevel@tonic-gate } else { 19620Sstevel@tonic-gate pte32p = (x86pte32_t *)ptep; 19630Sstevel@tonic-gate for (;;) { 19640Sstevel@tonic-gate p32 = *pte32p; 19650Sstevel@tonic-gate if (PTE2PFN(p32, level) != pfn) 19660Sstevel@tonic-gate break; 19670Sstevel@tonic-gate old = cas32(pte32p, p32, 0); 19680Sstevel@tonic-gate if (old == p32) 19690Sstevel@tonic-gate break; 19700Sstevel@tonic-gate } 19710Sstevel@tonic-gate prev = p32; 19720Sstevel@tonic-gate } 19730Sstevel@tonic-gate xi->xi_pte = prev; 19740Sstevel@tonic-gate } 19750Sstevel@tonic-gate 19760Sstevel@tonic-gate /* 19770Sstevel@tonic-gate * For a normal address, we just flush one page mapping 19780Sstevel@tonic-gate * Otherwise reload cr3 to effect a complete TLB flush. 19790Sstevel@tonic-gate * 19800Sstevel@tonic-gate * Note we don't reload VLP pte's -- this assume we never have a 19810Sstevel@tonic-gate * large page size at VLP_LEVEL for VLP processes. 19820Sstevel@tonic-gate */ 19830Sstevel@tonic-gate if ((uintptr_t)addr != DEMAP_ALL_ADDR) { 19840Sstevel@tonic-gate mmu_tlbflush_entry(addr); 19850Sstevel@tonic-gate } else { 19860Sstevel@tonic-gate reload_cr3(); 19870Sstevel@tonic-gate } 19880Sstevel@tonic-gate return (0); 19890Sstevel@tonic-gate } 19900Sstevel@tonic-gate 19910Sstevel@tonic-gate /* 19920Sstevel@tonic-gate * Cross call service function to atomically change a PTE and flush TLBs 19930Sstevel@tonic-gate */ 19940Sstevel@tonic-gate /*ARGSUSED*/ 19950Sstevel@tonic-gate static int 19960Sstevel@tonic-gate x86pte_update_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3) 19970Sstevel@tonic-gate { 19980Sstevel@tonic-gate xcall_info_t *xi = (xcall_info_t *)a1; 19990Sstevel@tonic-gate caddr_t addr = (caddr_t)a2; 20000Sstevel@tonic-gate 20010Sstevel@tonic-gate /* 20020Sstevel@tonic-gate * Only the initiating cpu changes the page table entry. 20030Sstevel@tonic-gate * It returns the previous PTE value to the caller. 20040Sstevel@tonic-gate */ 20050Sstevel@tonic-gate if (CPU->cpu_id == xi->xi_cpuid) { 20060Sstevel@tonic-gate x86pte_t *ptep = xi->xi_pteptr; 20070Sstevel@tonic-gate x86pte_t new = xi->xi_pte; 20080Sstevel@tonic-gate x86pte_t old = xi->xi_old; 20090Sstevel@tonic-gate x86pte_t prev; 20100Sstevel@tonic-gate 20110Sstevel@tonic-gate if (mmu.pae_hat) { 20120Sstevel@tonic-gate prev = cas64(ptep, old, new); 20130Sstevel@tonic-gate } else { 20140Sstevel@tonic-gate x86pte32_t o32 = old; 20150Sstevel@tonic-gate x86pte32_t n32 = new; 20160Sstevel@tonic-gate x86pte32_t *pte32p = (x86pte32_t *)ptep; 20170Sstevel@tonic-gate prev = cas32(pte32p, o32, n32); 20180Sstevel@tonic-gate } 20190Sstevel@tonic-gate 20200Sstevel@tonic-gate xi->xi_pte = prev; 20210Sstevel@tonic-gate } 20220Sstevel@tonic-gate 20230Sstevel@tonic-gate /* 20240Sstevel@tonic-gate * Flush the TLB entry 20250Sstevel@tonic-gate */ 20260Sstevel@tonic-gate if ((uintptr_t)addr != DEMAP_ALL_ADDR) 20270Sstevel@tonic-gate mmu_tlbflush_entry(addr); 20280Sstevel@tonic-gate else 20290Sstevel@tonic-gate reload_cr3(); 20300Sstevel@tonic-gate return (0); 20310Sstevel@tonic-gate } 20320Sstevel@tonic-gate 20330Sstevel@tonic-gate /* 20340Sstevel@tonic-gate * Use cross calls to change a page table entry and invalidate TLBs. 20350Sstevel@tonic-gate */ 20360Sstevel@tonic-gate void 20370Sstevel@tonic-gate x86pte_xcall(hat_t *hat, xcall_info_t *xi, uintptr_t addr) 20380Sstevel@tonic-gate { 20390Sstevel@tonic-gate cpuset_t cpus; 20400Sstevel@tonic-gate 20410Sstevel@tonic-gate /* 20420Sstevel@tonic-gate * Given the current implementation of hat_share(), doing a 20430Sstevel@tonic-gate * hat_pageunload() on a shared page table requries invalidating 20440Sstevel@tonic-gate * all user TLB entries on all CPUs. 20450Sstevel@tonic-gate */ 20460Sstevel@tonic-gate if (hat->hat_flags & HAT_SHARED) { 20470Sstevel@tonic-gate hat = kas.a_hat; 20480Sstevel@tonic-gate addr = DEMAP_ALL_ADDR; 20490Sstevel@tonic-gate } 20500Sstevel@tonic-gate 20510Sstevel@tonic-gate /* 20520Sstevel@tonic-gate * Use a cross call to do the invalidations. 20530Sstevel@tonic-gate * Note the current CPU always has to be in the cross call CPU set. 20540Sstevel@tonic-gate */ 20550Sstevel@tonic-gate kpreempt_disable(); 20560Sstevel@tonic-gate xi->xi_cpuid = CPU->cpu_id; 20570Sstevel@tonic-gate CPUSET_ZERO(cpus); 20580Sstevel@tonic-gate if (hat == kas.a_hat) { 20590Sstevel@tonic-gate CPUSET_OR(cpus, khat_cpuset); 20600Sstevel@tonic-gate } else { 20610Sstevel@tonic-gate mutex_enter(&hat->hat_switch_mutex); 20620Sstevel@tonic-gate CPUSET_OR(cpus, hat->hat_cpus); 20630Sstevel@tonic-gate CPUSET_ADD(cpus, CPU->cpu_id); 20640Sstevel@tonic-gate } 20650Sstevel@tonic-gate 20660Sstevel@tonic-gate /* 20670Sstevel@tonic-gate * Use a cross call to modify the page table entry and invalidate TLBs. 20680Sstevel@tonic-gate * If we're panic'ing, don't bother with the cross call. 20690Sstevel@tonic-gate * Note the panicstr check isn't bullet proof and the panic system 20700Sstevel@tonic-gate * ought to be made tighter. 20710Sstevel@tonic-gate */ 20720Sstevel@tonic-gate if (panicstr == NULL) 20730Sstevel@tonic-gate xc_wait_sync((xc_arg_t)xi, addr, NULL, X_CALL_HIPRI, 20740Sstevel@tonic-gate cpus, xi->xi_func); 20750Sstevel@tonic-gate else 20760Sstevel@tonic-gate (void) xi->xi_func((xc_arg_t)xi, (xc_arg_t)addr, NULL); 20770Sstevel@tonic-gate if (hat != kas.a_hat) 20780Sstevel@tonic-gate mutex_exit(&hat->hat_switch_mutex); 20790Sstevel@tonic-gate kpreempt_enable(); 20800Sstevel@tonic-gate } 20810Sstevel@tonic-gate 20820Sstevel@tonic-gate /* 20830Sstevel@tonic-gate * Invalidate a page table entry if it currently maps the given pfn. 20840Sstevel@tonic-gate * This returns the previous value of the PTE. 20850Sstevel@tonic-gate */ 20860Sstevel@tonic-gate x86pte_t 20870Sstevel@tonic-gate x86pte_invalidate_pfn(htable_t *ht, uint_t entry, pfn_t pfn, void *pte_ptr) 20880Sstevel@tonic-gate { 20890Sstevel@tonic-gate xcall_info_t xi; 20900Sstevel@tonic-gate x86pte_t *ptep; 20910Sstevel@tonic-gate hat_t *hat; 20920Sstevel@tonic-gate uintptr_t addr; 20930Sstevel@tonic-gate 20940Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 20950Sstevel@tonic-gate if (pte_ptr != NULL) { 20960Sstevel@tonic-gate ptep = pte_ptr; 20970Sstevel@tonic-gate } else { 20980Sstevel@tonic-gate ptep = x86pte_access_pagetable(ht); 20990Sstevel@tonic-gate ptep = (void *)((caddr_t)ptep + (entry << mmu.pte_size_shift)); 21000Sstevel@tonic-gate } 21010Sstevel@tonic-gate 21020Sstevel@tonic-gate /* 21030Sstevel@tonic-gate * Fill in the structure used by the cross call function to do the 21040Sstevel@tonic-gate * invalidation. 21050Sstevel@tonic-gate */ 21060Sstevel@tonic-gate xi.xi_pte = 0; 21070Sstevel@tonic-gate xi.xi_pteptr = ptep; 21080Sstevel@tonic-gate xi.xi_pfn = pfn; 21090Sstevel@tonic-gate xi.xi_level = ht->ht_level; 21100Sstevel@tonic-gate xi.xi_func = x86pte_inval_func; 21110Sstevel@tonic-gate ASSERT(xi.xi_level != VLP_LEVEL); 21120Sstevel@tonic-gate 21130Sstevel@tonic-gate hat = ht->ht_hat; 21140Sstevel@tonic-gate addr = htable_e2va(ht, entry); 21150Sstevel@tonic-gate 21160Sstevel@tonic-gate x86pte_xcall(hat, &xi, addr); 21170Sstevel@tonic-gate 21180Sstevel@tonic-gate if (pte_ptr == NULL) 21190Sstevel@tonic-gate x86pte_release_pagetable(ht); 21200Sstevel@tonic-gate return (xi.xi_pte); 21210Sstevel@tonic-gate } 21220Sstevel@tonic-gate 21230Sstevel@tonic-gate /* 21240Sstevel@tonic-gate * update a PTE and invalidate any stale TLB entries. 21250Sstevel@tonic-gate */ 21260Sstevel@tonic-gate x86pte_t 21270Sstevel@tonic-gate x86pte_update(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new) 21280Sstevel@tonic-gate { 21290Sstevel@tonic-gate xcall_info_t xi; 21300Sstevel@tonic-gate x86pte_t *ptep; 21310Sstevel@tonic-gate hat_t *hat; 21320Sstevel@tonic-gate uintptr_t addr; 21330Sstevel@tonic-gate 21340Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 21350Sstevel@tonic-gate ptep = x86pte_access_pagetable(ht); 21360Sstevel@tonic-gate ptep = (void *)((caddr_t)ptep + (entry << mmu.pte_size_shift)); 21370Sstevel@tonic-gate 21380Sstevel@tonic-gate /* 21390Sstevel@tonic-gate * Fill in the structure used by the cross call function to do the 21400Sstevel@tonic-gate * invalidation. 21410Sstevel@tonic-gate */ 21420Sstevel@tonic-gate xi.xi_pte = new; 21430Sstevel@tonic-gate xi.xi_old = expected; 21440Sstevel@tonic-gate xi.xi_pteptr = ptep; 21450Sstevel@tonic-gate xi.xi_func = x86pte_update_func; 21460Sstevel@tonic-gate 21470Sstevel@tonic-gate hat = ht->ht_hat; 21480Sstevel@tonic-gate addr = htable_e2va(ht, entry); 21490Sstevel@tonic-gate 21500Sstevel@tonic-gate x86pte_xcall(hat, &xi, addr); 21510Sstevel@tonic-gate 21520Sstevel@tonic-gate x86pte_release_pagetable(ht); 21530Sstevel@tonic-gate return (xi.xi_pte); 21540Sstevel@tonic-gate } 21550Sstevel@tonic-gate 21560Sstevel@tonic-gate /* 21570Sstevel@tonic-gate * Copy page tables - this is just a little more complicated than the 21580Sstevel@tonic-gate * previous routines. Note that it's also not atomic! It also is never 21590Sstevel@tonic-gate * used for VLP pagetables. 21600Sstevel@tonic-gate */ 21610Sstevel@tonic-gate void 21620Sstevel@tonic-gate x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 21630Sstevel@tonic-gate { 21640Sstevel@tonic-gate struct hat_cpu_info *hci; 21650Sstevel@tonic-gate caddr_t src_va; 21660Sstevel@tonic-gate caddr_t dst_va; 21670Sstevel@tonic-gate size_t size; 21680Sstevel@tonic-gate 21690Sstevel@tonic-gate ASSERT(khat_running); 21700Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 21710Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_VLP)); 21720Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN)); 21730Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 21740Sstevel@tonic-gate 21750Sstevel@tonic-gate /* 21760Sstevel@tonic-gate * Acquire access to the CPU pagetable window for the destination. 21770Sstevel@tonic-gate */ 21780Sstevel@tonic-gate dst_va = (caddr_t)x86pte_access_pagetable(dest); 21790Sstevel@tonic-gate if (kpm_enable) { 21800Sstevel@tonic-gate src_va = (caddr_t)x86pte_access_pagetable(src); 21810Sstevel@tonic-gate } else { 21820Sstevel@tonic-gate hci = CPU->cpu_hat_info; 21830Sstevel@tonic-gate 21840Sstevel@tonic-gate /* 21850Sstevel@tonic-gate * Finish defining the src pagetable mapping 21860Sstevel@tonic-gate */ 21870Sstevel@tonic-gate src_va = dst_va + MMU_PAGESIZE; 21880Sstevel@tonic-gate X86PTE_REMAP(src_va, hci->hci_kernel_pte, 1, 0, src->ht_pfn); 21890Sstevel@tonic-gate } 21900Sstevel@tonic-gate 21910Sstevel@tonic-gate /* 21920Sstevel@tonic-gate * now do the copy 21930Sstevel@tonic-gate */ 21940Sstevel@tonic-gate 21950Sstevel@tonic-gate dst_va += entry << mmu.pte_size_shift; 21960Sstevel@tonic-gate src_va += entry << mmu.pte_size_shift; 21970Sstevel@tonic-gate size = count << mmu.pte_size_shift; 21980Sstevel@tonic-gate bcopy(src_va, dst_va, size); 21990Sstevel@tonic-gate 22000Sstevel@tonic-gate x86pte_release_pagetable(dest); 22010Sstevel@tonic-gate } 22020Sstevel@tonic-gate 22030Sstevel@tonic-gate /* 22040Sstevel@tonic-gate * Zero page table entries - Note this doesn't use atomic stores! 22050Sstevel@tonic-gate */ 22060Sstevel@tonic-gate void 22070Sstevel@tonic-gate x86pte_zero(htable_t *dest, uint_t entry, uint_t count) 22080Sstevel@tonic-gate { 22090Sstevel@tonic-gate caddr_t dst_va; 22100Sstevel@tonic-gate x86pte_t *p; 22110Sstevel@tonic-gate x86pte32_t *p32; 22120Sstevel@tonic-gate size_t size; 22130Sstevel@tonic-gate extern void hat_pte_zero(void *, size_t); 22140Sstevel@tonic-gate 22150Sstevel@tonic-gate /* 22160Sstevel@tonic-gate * Map in the page table to be zeroed. 22170Sstevel@tonic-gate */ 22180Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 22190Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 22200Sstevel@tonic-gate dst_va = (caddr_t)x86pte_access_pagetable(dest); 22210Sstevel@tonic-gate dst_va += entry << mmu.pte_size_shift; 22220Sstevel@tonic-gate size = count << mmu.pte_size_shift; 22230Sstevel@tonic-gate if (x86_feature & X86_SSE2) { 22240Sstevel@tonic-gate hat_pte_zero(dst_va, size); 22250Sstevel@tonic-gate } else if (khat_running) { 22260Sstevel@tonic-gate bzero(dst_va, size); 22270Sstevel@tonic-gate } else { 22280Sstevel@tonic-gate /* 22290Sstevel@tonic-gate * Can't just use bzero during boot because it checks the 22300Sstevel@tonic-gate * address against kernelbase. Instead just use a zero loop. 22310Sstevel@tonic-gate */ 22320Sstevel@tonic-gate if (mmu.pae_hat) { 22330Sstevel@tonic-gate p = (x86pte_t *)dst_va; 22340Sstevel@tonic-gate while (count-- > 0) 22350Sstevel@tonic-gate *p++ = 0; 22360Sstevel@tonic-gate } else { 22370Sstevel@tonic-gate p32 = (x86pte32_t *)dst_va; 22380Sstevel@tonic-gate while (count-- > 0) 22390Sstevel@tonic-gate *p32++ = 0; 22400Sstevel@tonic-gate } 22410Sstevel@tonic-gate } 22420Sstevel@tonic-gate x86pte_release_pagetable(dest); 22430Sstevel@tonic-gate } 22440Sstevel@tonic-gate 22450Sstevel@tonic-gate /* 22460Sstevel@tonic-gate * Called to ensure that all pagetables are in the system dump 22470Sstevel@tonic-gate */ 22480Sstevel@tonic-gate void 22490Sstevel@tonic-gate hat_dump(void) 22500Sstevel@tonic-gate { 22510Sstevel@tonic-gate hat_t *hat; 22520Sstevel@tonic-gate uint_t h; 22530Sstevel@tonic-gate htable_t *ht; 22540Sstevel@tonic-gate 22550Sstevel@tonic-gate /* 22561747Sjosephb * Dump all page tables 22570Sstevel@tonic-gate */ 22581747Sjosephb for (hat = kas.a_hat; hat != NULL; hat = hat->hat_next) { 22590Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 22600Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 22611747Sjosephb if ((ht->ht_flags & HTABLE_VLP) == 0) 22620Sstevel@tonic-gate dump_page(ht->ht_pfn); 22630Sstevel@tonic-gate } 22640Sstevel@tonic-gate } 22650Sstevel@tonic-gate } 22660Sstevel@tonic-gate } 2267