10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51747Sjosephb * Common Development and Distribution License (the "License"). 61747Sjosephb * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 213446Smrj 220Sstevel@tonic-gate /* 233446Smrj * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate #include <sys/types.h> 300Sstevel@tonic-gate #include <sys/sysmacros.h> 310Sstevel@tonic-gate #include <sys/kmem.h> 320Sstevel@tonic-gate #include <sys/atomic.h> 330Sstevel@tonic-gate #include <sys/bitmap.h> 340Sstevel@tonic-gate #include <sys/machparam.h> 350Sstevel@tonic-gate #include <sys/machsystm.h> 360Sstevel@tonic-gate #include <sys/mman.h> 370Sstevel@tonic-gate #include <sys/systm.h> 380Sstevel@tonic-gate #include <sys/cpuvar.h> 390Sstevel@tonic-gate #include <sys/thread.h> 400Sstevel@tonic-gate #include <sys/proc.h> 410Sstevel@tonic-gate #include <sys/cpu.h> 420Sstevel@tonic-gate #include <sys/kmem.h> 430Sstevel@tonic-gate #include <sys/disp.h> 440Sstevel@tonic-gate #include <sys/vmem.h> 450Sstevel@tonic-gate #include <sys/vmsystm.h> 460Sstevel@tonic-gate #include <sys/promif.h> 470Sstevel@tonic-gate #include <sys/var.h> 480Sstevel@tonic-gate #include <sys/x86_archext.h> 493446Smrj #include <sys/archsystm.h> 500Sstevel@tonic-gate #include <sys/bootconf.h> 510Sstevel@tonic-gate #include <sys/dumphdr.h> 520Sstevel@tonic-gate #include <vm/seg_kmem.h> 530Sstevel@tonic-gate #include <vm/seg_kpm.h> 540Sstevel@tonic-gate #include <vm/hat.h> 550Sstevel@tonic-gate #include <vm/hat_i86.h> 560Sstevel@tonic-gate #include <sys/cmn_err.h> 570Sstevel@tonic-gate 583446Smrj #include <sys/bootinfo.h> 593446Smrj #include <vm/kboot_mmu.h> 603446Smrj 613446Smrj static void x86pte_zero(htable_t *dest, uint_t entry, uint_t count); 623446Smrj 630Sstevel@tonic-gate kmem_cache_t *htable_cache; 640Sstevel@tonic-gate 650Sstevel@tonic-gate /* 660Sstevel@tonic-gate * The variable htable_reserve_amount, rather than HTABLE_RESERVE_AMOUNT, 670Sstevel@tonic-gate * is used in order to facilitate testing of the htable_steal() code. 680Sstevel@tonic-gate * By resetting htable_reserve_amount to a lower value, we can force 690Sstevel@tonic-gate * stealing to occur. The reserve amount is a guess to get us through boot. 700Sstevel@tonic-gate */ 710Sstevel@tonic-gate #define HTABLE_RESERVE_AMOUNT (200) 720Sstevel@tonic-gate uint_t htable_reserve_amount = HTABLE_RESERVE_AMOUNT; 730Sstevel@tonic-gate kmutex_t htable_reserve_mutex; 740Sstevel@tonic-gate uint_t htable_reserve_cnt; 750Sstevel@tonic-gate htable_t *htable_reserve_pool; 760Sstevel@tonic-gate 770Sstevel@tonic-gate /* 781747Sjosephb * Used to hand test htable_steal(). 790Sstevel@tonic-gate */ 801747Sjosephb #ifdef DEBUG 811747Sjosephb ulong_t force_steal = 0; 821747Sjosephb ulong_t ptable_cnt = 0; 831747Sjosephb #endif 841747Sjosephb 851747Sjosephb /* 861747Sjosephb * This variable is so that we can tune this via /etc/system 871747Sjosephb * Any value works, but a power of two <= mmu.ptes_per_table is best. 881747Sjosephb */ 891747Sjosephb uint_t htable_steal_passes = 8; 900Sstevel@tonic-gate 910Sstevel@tonic-gate /* 920Sstevel@tonic-gate * mutex stuff for access to htable hash 930Sstevel@tonic-gate */ 940Sstevel@tonic-gate #define NUM_HTABLE_MUTEX 128 950Sstevel@tonic-gate kmutex_t htable_mutex[NUM_HTABLE_MUTEX]; 960Sstevel@tonic-gate #define HTABLE_MUTEX_HASH(h) ((h) & (NUM_HTABLE_MUTEX - 1)) 970Sstevel@tonic-gate 980Sstevel@tonic-gate #define HTABLE_ENTER(h) mutex_enter(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 990Sstevel@tonic-gate #define HTABLE_EXIT(h) mutex_exit(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 1000Sstevel@tonic-gate 1010Sstevel@tonic-gate /* 1020Sstevel@tonic-gate * forward declarations 1030Sstevel@tonic-gate */ 1040Sstevel@tonic-gate static void link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr); 1050Sstevel@tonic-gate static void unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr); 1060Sstevel@tonic-gate static void htable_free(htable_t *ht); 1073446Smrj static x86pte_t *x86pte_access_pagetable(htable_t *ht, uint_t index); 1080Sstevel@tonic-gate static void x86pte_release_pagetable(htable_t *ht); 1090Sstevel@tonic-gate static x86pte_t x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, 1100Sstevel@tonic-gate x86pte_t new); 1110Sstevel@tonic-gate 1120Sstevel@tonic-gate /* 1130Sstevel@tonic-gate * A counter to track if we are stealing or reaping htables. When non-zero 1140Sstevel@tonic-gate * htable_free() will directly free htables (either to the reserve or kmem) 1150Sstevel@tonic-gate * instead of putting them in a hat's htable cache. 1160Sstevel@tonic-gate */ 1170Sstevel@tonic-gate uint32_t htable_dont_cache = 0; 1180Sstevel@tonic-gate 1190Sstevel@tonic-gate /* 1200Sstevel@tonic-gate * Track the number of active pagetables, so we can know how many to reap 1210Sstevel@tonic-gate */ 1220Sstevel@tonic-gate static uint32_t active_ptables = 0; 1230Sstevel@tonic-gate 1240Sstevel@tonic-gate /* 1250Sstevel@tonic-gate * Allocate a memory page for a hardware page table. 1260Sstevel@tonic-gate * 1273446Smrj * A wrapper around page_get_physical(), with some extra checks. 1280Sstevel@tonic-gate */ 1293446Smrj static pfn_t 1303446Smrj ptable_alloc(uintptr_t seed) 1310Sstevel@tonic-gate { 1320Sstevel@tonic-gate pfn_t pfn; 1330Sstevel@tonic-gate page_t *pp; 1340Sstevel@tonic-gate 1353446Smrj pfn = PFN_INVALID; 1360Sstevel@tonic-gate atomic_add_32(&active_ptables, 1); 1370Sstevel@tonic-gate 1383446Smrj /* 1393446Smrj * The first check is to see if there is memory in the system. If we 1403446Smrj * drop to throttlefree, then fail the ptable_alloc() and let the 1413446Smrj * stealing code kick in. Note that we have to do this test here, 1423446Smrj * since the test in page_create_throttle() would let the NOSLEEP 1433446Smrj * allocation go through and deplete the page reserves. 1443446Smrj * 1453446Smrj * The !NOMEMWAIT() lets pageout, fsflush, etc. skip this check. 1463446Smrj */ 1473446Smrj if (!NOMEMWAIT() && freemem <= throttlefree + 1) 1483446Smrj return (PFN_INVALID); 1490Sstevel@tonic-gate 1501747Sjosephb #ifdef DEBUG 1513446Smrj /* 1523446Smrj * This code makes htable_steal() easier to test. By setting 1533446Smrj * force_steal we force pagetable allocations to fall 1543446Smrj * into the stealing code. Roughly 1 in ever "force_steal" 1553446Smrj * page table allocations will fail. 1563446Smrj */ 1573446Smrj if (proc_pageout != NULL && force_steal > 1 && 1583446Smrj ++ptable_cnt > force_steal) { 1593446Smrj ptable_cnt = 0; 1603446Smrj return (PFN_INVALID); 1613446Smrj } 1621747Sjosephb #endif /* DEBUG */ 1631747Sjosephb 1643446Smrj pp = page_get_physical(seed); 1653446Smrj if (pp == NULL) 1663446Smrj return (PFN_INVALID); 1673446Smrj pfn = pp->p_pagenum; 1680Sstevel@tonic-gate page_downgrade(pp); 1690Sstevel@tonic-gate ASSERT(PAGE_SHARED(pp)); 1700Sstevel@tonic-gate 1710Sstevel@tonic-gate if (pfn == PFN_INVALID) 1720Sstevel@tonic-gate panic("ptable_alloc(): Invalid PFN!!"); 1731747Sjosephb HATSTAT_INC(hs_ptable_allocs); 1743446Smrj return (pfn); 1750Sstevel@tonic-gate } 1760Sstevel@tonic-gate 1770Sstevel@tonic-gate /* 1780Sstevel@tonic-gate * Free an htable's associated page table page. See the comments 1790Sstevel@tonic-gate * for ptable_alloc(). 1800Sstevel@tonic-gate */ 1810Sstevel@tonic-gate static void 1823446Smrj ptable_free(pfn_t pfn) 1830Sstevel@tonic-gate { 1843446Smrj page_t *pp = page_numtopp_nolock(pfn); 1850Sstevel@tonic-gate 1860Sstevel@tonic-gate /* 1870Sstevel@tonic-gate * need to destroy the page used for the pagetable 1880Sstevel@tonic-gate */ 1890Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 1900Sstevel@tonic-gate HATSTAT_INC(hs_ptable_frees); 1910Sstevel@tonic-gate atomic_add_32(&active_ptables, -1); 1920Sstevel@tonic-gate if (pp == NULL) 1930Sstevel@tonic-gate panic("ptable_free(): no page for pfn!"); 1940Sstevel@tonic-gate ASSERT(PAGE_SHARED(pp)); 1950Sstevel@tonic-gate ASSERT(pfn == pp->p_pagenum); 1960Sstevel@tonic-gate 1970Sstevel@tonic-gate /* 1980Sstevel@tonic-gate * Get an exclusive lock, might have to wait for a kmem reader. 1990Sstevel@tonic-gate */ 2000Sstevel@tonic-gate if (!page_tryupgrade(pp)) { 2010Sstevel@tonic-gate page_unlock(pp); 2020Sstevel@tonic-gate /* 2030Sstevel@tonic-gate * RFE: we could change this to not loop forever 2040Sstevel@tonic-gate * George Cameron had some idea on how to do that. 2050Sstevel@tonic-gate * For now looping works - it's just like sfmmu. 2060Sstevel@tonic-gate */ 2070Sstevel@tonic-gate while (!page_lock(pp, SE_EXCL, (kmutex_t *)NULL, P_RECLAIM)) 2080Sstevel@tonic-gate continue; 2090Sstevel@tonic-gate } 2100Sstevel@tonic-gate page_free(pp, 1); 2110Sstevel@tonic-gate page_unresv(1); 2120Sstevel@tonic-gate } 2130Sstevel@tonic-gate 2140Sstevel@tonic-gate /* 2150Sstevel@tonic-gate * Put one htable on the reserve list. 2160Sstevel@tonic-gate */ 2170Sstevel@tonic-gate static void 2180Sstevel@tonic-gate htable_put_reserve(htable_t *ht) 2190Sstevel@tonic-gate { 2200Sstevel@tonic-gate ht->ht_hat = NULL; /* no longer tied to a hat */ 2210Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 2220Sstevel@tonic-gate HATSTAT_INC(hs_htable_rputs); 2230Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 2240Sstevel@tonic-gate ht->ht_next = htable_reserve_pool; 2250Sstevel@tonic-gate htable_reserve_pool = ht; 2260Sstevel@tonic-gate ++htable_reserve_cnt; 2270Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 2280Sstevel@tonic-gate } 2290Sstevel@tonic-gate 2300Sstevel@tonic-gate /* 2310Sstevel@tonic-gate * Take one htable from the reserve. 2320Sstevel@tonic-gate */ 2330Sstevel@tonic-gate static htable_t * 2340Sstevel@tonic-gate htable_get_reserve(void) 2350Sstevel@tonic-gate { 2360Sstevel@tonic-gate htable_t *ht = NULL; 2370Sstevel@tonic-gate 2380Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 2390Sstevel@tonic-gate if (htable_reserve_cnt != 0) { 2400Sstevel@tonic-gate ht = htable_reserve_pool; 2410Sstevel@tonic-gate ASSERT(ht != NULL); 2420Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 2430Sstevel@tonic-gate htable_reserve_pool = ht->ht_next; 2440Sstevel@tonic-gate --htable_reserve_cnt; 2450Sstevel@tonic-gate HATSTAT_INC(hs_htable_rgets); 2460Sstevel@tonic-gate } 2470Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 2480Sstevel@tonic-gate return (ht); 2490Sstevel@tonic-gate } 2500Sstevel@tonic-gate 2510Sstevel@tonic-gate /* 2523446Smrj * Allocate initial htables and put them on the reserve list 2530Sstevel@tonic-gate */ 2540Sstevel@tonic-gate void 2550Sstevel@tonic-gate htable_initial_reserve(uint_t count) 2560Sstevel@tonic-gate { 2570Sstevel@tonic-gate htable_t *ht; 2580Sstevel@tonic-gate 2590Sstevel@tonic-gate count += HTABLE_RESERVE_AMOUNT; 2600Sstevel@tonic-gate while (count > 0) { 2610Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, KM_NOSLEEP); 2620Sstevel@tonic-gate ASSERT(ht != NULL); 2630Sstevel@tonic-gate 2640Sstevel@tonic-gate ASSERT(use_boot_reserve); 2653446Smrj ht->ht_pfn = PFN_INVALID; 2663446Smrj htable_put_reserve(ht); 2670Sstevel@tonic-gate --count; 2680Sstevel@tonic-gate } 2690Sstevel@tonic-gate } 2700Sstevel@tonic-gate 2710Sstevel@tonic-gate /* 2720Sstevel@tonic-gate * Readjust the reserves after a thread finishes using them. 2730Sstevel@tonic-gate */ 2740Sstevel@tonic-gate void 2750Sstevel@tonic-gate htable_adjust_reserve() 2760Sstevel@tonic-gate { 2770Sstevel@tonic-gate htable_t *ht; 2780Sstevel@tonic-gate 2790Sstevel@tonic-gate /* 2800Sstevel@tonic-gate * Free any excess htables in the reserve list 2810Sstevel@tonic-gate */ 2824004Sjosephb while (htable_reserve_cnt > htable_reserve_amount && 2834004Sjosephb !USE_HAT_RESERVES()) { 2840Sstevel@tonic-gate ht = htable_get_reserve(); 2850Sstevel@tonic-gate if (ht == NULL) 2860Sstevel@tonic-gate return; 2870Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 2880Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 2890Sstevel@tonic-gate } 2900Sstevel@tonic-gate } 2910Sstevel@tonic-gate 2920Sstevel@tonic-gate 2930Sstevel@tonic-gate /* 2940Sstevel@tonic-gate * This routine steals htables from user processes for htable_alloc() or 2950Sstevel@tonic-gate * for htable_reap(). 2960Sstevel@tonic-gate */ 2970Sstevel@tonic-gate static htable_t * 2980Sstevel@tonic-gate htable_steal(uint_t cnt) 2990Sstevel@tonic-gate { 3000Sstevel@tonic-gate hat_t *hat = kas.a_hat; /* list starts with khat */ 3010Sstevel@tonic-gate htable_t *list = NULL; 3020Sstevel@tonic-gate htable_t *ht; 3030Sstevel@tonic-gate htable_t *higher; 3040Sstevel@tonic-gate uint_t h; 3051747Sjosephb uint_t h_start; 3061747Sjosephb static uint_t h_seed = 0; 3070Sstevel@tonic-gate uint_t e; 3080Sstevel@tonic-gate uintptr_t va; 3090Sstevel@tonic-gate x86pte_t pte; 3100Sstevel@tonic-gate uint_t stolen = 0; 3110Sstevel@tonic-gate uint_t pass; 3121747Sjosephb uint_t threshold; 3130Sstevel@tonic-gate 3140Sstevel@tonic-gate /* 3150Sstevel@tonic-gate * Limit htable_steal_passes to something reasonable 3160Sstevel@tonic-gate */ 3170Sstevel@tonic-gate if (htable_steal_passes == 0) 3180Sstevel@tonic-gate htable_steal_passes = 1; 3190Sstevel@tonic-gate if (htable_steal_passes > mmu.ptes_per_table) 3200Sstevel@tonic-gate htable_steal_passes = mmu.ptes_per_table; 3210Sstevel@tonic-gate 3220Sstevel@tonic-gate /* 3231747Sjosephb * Loop through all user hats. The 1st pass takes cached htables that 3240Sstevel@tonic-gate * aren't in use. The later passes steal by removing mappings, too. 3250Sstevel@tonic-gate */ 3260Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, 1); 3271747Sjosephb for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) { 3281747Sjosephb threshold = pass * mmu.ptes_per_table / htable_steal_passes; 3291747Sjosephb hat = kas.a_hat; 3300Sstevel@tonic-gate for (;;) { 3310Sstevel@tonic-gate 3320Sstevel@tonic-gate /* 3331747Sjosephb * Clear the victim flag and move to next hat 3340Sstevel@tonic-gate */ 3350Sstevel@tonic-gate mutex_enter(&hat_list_lock); 3361747Sjosephb if (hat != kas.a_hat) { 3371747Sjosephb hat->hat_flags &= ~HAT_VICTIM; 3381747Sjosephb cv_broadcast(&hat_list_cv); 3391747Sjosephb } 3401747Sjosephb hat = hat->hat_next; 3411747Sjosephb 3421747Sjosephb /* 3431747Sjosephb * Skip any hat that is already being stolen from. 3441747Sjosephb * 3451747Sjosephb * We skip SHARED hats, as these are dummy 3461747Sjosephb * hats that host ISM shared page tables. 3471747Sjosephb * 3481747Sjosephb * We also skip if HAT_FREEING because hat_pte_unmap() 3491747Sjosephb * won't zero out the PTE's. That would lead to hitting 3501747Sjosephb * stale PTEs either here or under hat_unload() when we 3511747Sjosephb * steal and unload the same page table in competing 3521747Sjosephb * threads. 3531747Sjosephb */ 3541747Sjosephb while (hat != NULL && 3551747Sjosephb (hat->hat_flags & 3561747Sjosephb (HAT_VICTIM | HAT_SHARED | HAT_FREEING)) != 0) 3571747Sjosephb hat = hat->hat_next; 3581747Sjosephb 3591747Sjosephb if (hat == NULL) { 3600Sstevel@tonic-gate mutex_exit(&hat_list_lock); 3610Sstevel@tonic-gate break; 3620Sstevel@tonic-gate } 3631747Sjosephb 3641747Sjosephb /* 3651747Sjosephb * Are we finished? 3661747Sjosephb */ 3671747Sjosephb if (stolen == cnt) { 3681747Sjosephb /* 3691747Sjosephb * Try to spread the pain of stealing, 3701747Sjosephb * move victim HAT to the end of the HAT list. 3711747Sjosephb */ 3721747Sjosephb if (pass >= 1 && cnt == 1 && 3731747Sjosephb kas.a_hat->hat_prev != hat) { 3741747Sjosephb 3751747Sjosephb /* unlink victim hat */ 3761747Sjosephb if (hat->hat_prev) 3771747Sjosephb hat->hat_prev->hat_next = 3781747Sjosephb hat->hat_next; 3791747Sjosephb else 3801747Sjosephb kas.a_hat->hat_next = 3811747Sjosephb hat->hat_next; 3821747Sjosephb if (hat->hat_next) 3831747Sjosephb hat->hat_next->hat_prev = 3841747Sjosephb hat->hat_prev; 3851747Sjosephb else 3861747Sjosephb kas.a_hat->hat_prev = 3871747Sjosephb hat->hat_prev; 3881747Sjosephb 3891747Sjosephb 3901747Sjosephb /* relink at end of hat list */ 3911747Sjosephb hat->hat_next = NULL; 3921747Sjosephb hat->hat_prev = kas.a_hat->hat_prev; 3931747Sjosephb if (hat->hat_prev) 3941747Sjosephb hat->hat_prev->hat_next = hat; 3951747Sjosephb else 3961747Sjosephb kas.a_hat->hat_next = hat; 3971747Sjosephb kas.a_hat->hat_prev = hat; 3981747Sjosephb 3991747Sjosephb } 4001747Sjosephb 4011747Sjosephb mutex_exit(&hat_list_lock); 4021747Sjosephb break; 4031747Sjosephb } 4041747Sjosephb 4051747Sjosephb /* 4061747Sjosephb * Mark the HAT as a stealing victim. 4071747Sjosephb */ 4080Sstevel@tonic-gate hat->hat_flags |= HAT_VICTIM; 4090Sstevel@tonic-gate mutex_exit(&hat_list_lock); 4100Sstevel@tonic-gate 4110Sstevel@tonic-gate /* 4120Sstevel@tonic-gate * Take any htables from the hat's cached "free" list. 4130Sstevel@tonic-gate */ 4140Sstevel@tonic-gate hat_enter(hat); 4150Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL && 4160Sstevel@tonic-gate stolen < cnt) { 4170Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 4180Sstevel@tonic-gate ht->ht_next = list; 4190Sstevel@tonic-gate list = ht; 4200Sstevel@tonic-gate ++stolen; 4210Sstevel@tonic-gate } 4220Sstevel@tonic-gate hat_exit(hat); 4230Sstevel@tonic-gate 4240Sstevel@tonic-gate /* 4250Sstevel@tonic-gate * Don't steal on first pass. 4260Sstevel@tonic-gate */ 4271747Sjosephb if (pass == 0 || stolen == cnt) 4280Sstevel@tonic-gate continue; 4290Sstevel@tonic-gate 4300Sstevel@tonic-gate /* 4311747Sjosephb * Search the active htables for one to steal. 4321747Sjosephb * Start at a different hash bucket every time to 4331747Sjosephb * help spread the pain of stealing. 4340Sstevel@tonic-gate */ 4351747Sjosephb h = h_start = h_seed++ % hat->hat_num_hash; 4361747Sjosephb do { 4370Sstevel@tonic-gate higher = NULL; 4380Sstevel@tonic-gate HTABLE_ENTER(h); 4390Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; 4400Sstevel@tonic-gate ht = ht->ht_next) { 4410Sstevel@tonic-gate 4420Sstevel@tonic-gate /* 4430Sstevel@tonic-gate * Can we rule out reaping? 4440Sstevel@tonic-gate */ 4450Sstevel@tonic-gate if (ht->ht_busy != 0 || 4460Sstevel@tonic-gate (ht->ht_flags & HTABLE_SHARED_PFN)|| 4471747Sjosephb ht->ht_level > 0 || 4481747Sjosephb ht->ht_valid_cnt > threshold || 4490Sstevel@tonic-gate ht->ht_lock_cnt != 0) 4500Sstevel@tonic-gate continue; 4510Sstevel@tonic-gate 4520Sstevel@tonic-gate /* 4530Sstevel@tonic-gate * Increment busy so the htable can't 4540Sstevel@tonic-gate * disappear. We drop the htable mutex 4550Sstevel@tonic-gate * to avoid deadlocks with 4560Sstevel@tonic-gate * hat_pageunload() and the hment mutex 4570Sstevel@tonic-gate * while we call hat_pte_unmap() 4580Sstevel@tonic-gate */ 4590Sstevel@tonic-gate ++ht->ht_busy; 4600Sstevel@tonic-gate HTABLE_EXIT(h); 4610Sstevel@tonic-gate 4620Sstevel@tonic-gate /* 4630Sstevel@tonic-gate * Try stealing. 4640Sstevel@tonic-gate * - unload and invalidate all PTEs 4650Sstevel@tonic-gate */ 4660Sstevel@tonic-gate for (e = 0, va = ht->ht_vaddr; 4673446Smrj e < HTABLE_NUM_PTES(ht) && 4680Sstevel@tonic-gate ht->ht_valid_cnt > 0 && 4690Sstevel@tonic-gate ht->ht_busy == 1 && 4700Sstevel@tonic-gate ht->ht_lock_cnt == 0; 4710Sstevel@tonic-gate ++e, va += MMU_PAGESIZE) { 4720Sstevel@tonic-gate pte = x86pte_get(ht, e); 4730Sstevel@tonic-gate if (!PTE_ISVALID(pte)) 4740Sstevel@tonic-gate continue; 4750Sstevel@tonic-gate hat_pte_unmap(ht, e, 4760Sstevel@tonic-gate HAT_UNLOAD, pte, NULL); 4770Sstevel@tonic-gate } 4780Sstevel@tonic-gate 4790Sstevel@tonic-gate /* 4800Sstevel@tonic-gate * Reacquire htable lock. If we didn't 4810Sstevel@tonic-gate * remove all mappings in the table, 4820Sstevel@tonic-gate * or another thread added a new mapping 4830Sstevel@tonic-gate * behind us, give up on this table. 4840Sstevel@tonic-gate */ 4850Sstevel@tonic-gate HTABLE_ENTER(h); 4860Sstevel@tonic-gate if (ht->ht_busy != 1 || 4870Sstevel@tonic-gate ht->ht_valid_cnt != 0 || 4880Sstevel@tonic-gate ht->ht_lock_cnt != 0) { 4890Sstevel@tonic-gate --ht->ht_busy; 4900Sstevel@tonic-gate continue; 4910Sstevel@tonic-gate } 4920Sstevel@tonic-gate 4930Sstevel@tonic-gate /* 4940Sstevel@tonic-gate * Steal it and unlink the page table. 4950Sstevel@tonic-gate */ 4960Sstevel@tonic-gate higher = ht->ht_parent; 4970Sstevel@tonic-gate unlink_ptp(higher, ht, ht->ht_vaddr); 4980Sstevel@tonic-gate 4990Sstevel@tonic-gate /* 5000Sstevel@tonic-gate * remove from the hash list 5010Sstevel@tonic-gate */ 5020Sstevel@tonic-gate if (ht->ht_next) 5030Sstevel@tonic-gate ht->ht_next->ht_prev = 5040Sstevel@tonic-gate ht->ht_prev; 5050Sstevel@tonic-gate 5060Sstevel@tonic-gate if (ht->ht_prev) { 5070Sstevel@tonic-gate ht->ht_prev->ht_next = 5080Sstevel@tonic-gate ht->ht_next; 5090Sstevel@tonic-gate } else { 5100Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[h] == 5110Sstevel@tonic-gate ht); 5120Sstevel@tonic-gate hat->hat_ht_hash[h] = 5130Sstevel@tonic-gate ht->ht_next; 5140Sstevel@tonic-gate } 5150Sstevel@tonic-gate 5160Sstevel@tonic-gate /* 5170Sstevel@tonic-gate * Break to outer loop to release the 5183446Smrj * higher (ht_parent) pagetable. This 5190Sstevel@tonic-gate * spreads out the pain caused by 5200Sstevel@tonic-gate * pagefaults. 5210Sstevel@tonic-gate */ 5220Sstevel@tonic-gate ht->ht_next = list; 5230Sstevel@tonic-gate list = ht; 5240Sstevel@tonic-gate ++stolen; 5250Sstevel@tonic-gate break; 5260Sstevel@tonic-gate } 5270Sstevel@tonic-gate HTABLE_EXIT(h); 5280Sstevel@tonic-gate if (higher != NULL) 5290Sstevel@tonic-gate htable_release(higher); 5301747Sjosephb if (++h == hat->hat_num_hash) 5311747Sjosephb h = 0; 5321747Sjosephb } while (stolen < cnt && h != h_start); 5330Sstevel@tonic-gate } 5340Sstevel@tonic-gate } 5350Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, -1); 5360Sstevel@tonic-gate return (list); 5370Sstevel@tonic-gate } 5380Sstevel@tonic-gate 5390Sstevel@tonic-gate 5400Sstevel@tonic-gate /* 5410Sstevel@tonic-gate * This is invoked from kmem when the system is low on memory. We try 5420Sstevel@tonic-gate * to free hments, htables, and ptables to improve the memory situation. 5430Sstevel@tonic-gate */ 5440Sstevel@tonic-gate /*ARGSUSED*/ 5450Sstevel@tonic-gate static void 5460Sstevel@tonic-gate htable_reap(void *handle) 5470Sstevel@tonic-gate { 5480Sstevel@tonic-gate uint_t reap_cnt; 5490Sstevel@tonic-gate htable_t *list; 5500Sstevel@tonic-gate htable_t *ht; 5510Sstevel@tonic-gate 5520Sstevel@tonic-gate HATSTAT_INC(hs_reap_attempts); 5530Sstevel@tonic-gate if (!can_steal_post_boot) 5540Sstevel@tonic-gate return; 5550Sstevel@tonic-gate 5560Sstevel@tonic-gate /* 5570Sstevel@tonic-gate * Try to reap 5% of the page tables bounded by a maximum of 5580Sstevel@tonic-gate * 5% of physmem and a minimum of 10. 5590Sstevel@tonic-gate */ 5600Sstevel@tonic-gate reap_cnt = MIN(MAX(physmem / 20, active_ptables / 20), 10); 5610Sstevel@tonic-gate 5620Sstevel@tonic-gate /* 5630Sstevel@tonic-gate * Let htable_steal() do the work, we just call htable_free() 5640Sstevel@tonic-gate */ 5650Sstevel@tonic-gate list = htable_steal(reap_cnt); 5660Sstevel@tonic-gate while ((ht = list) != NULL) { 5670Sstevel@tonic-gate list = ht->ht_next; 5680Sstevel@tonic-gate HATSTAT_INC(hs_reaped); 5690Sstevel@tonic-gate htable_free(ht); 5700Sstevel@tonic-gate } 5710Sstevel@tonic-gate 5720Sstevel@tonic-gate /* 5730Sstevel@tonic-gate * Free up excess reserves 5740Sstevel@tonic-gate */ 5750Sstevel@tonic-gate htable_adjust_reserve(); 5760Sstevel@tonic-gate hment_adjust_reserve(); 5770Sstevel@tonic-gate } 5780Sstevel@tonic-gate 5790Sstevel@tonic-gate /* 5803446Smrj * Allocate an htable, stealing one or using the reserve if necessary 5810Sstevel@tonic-gate */ 5820Sstevel@tonic-gate static htable_t * 5830Sstevel@tonic-gate htable_alloc( 5840Sstevel@tonic-gate hat_t *hat, 5850Sstevel@tonic-gate uintptr_t vaddr, 5860Sstevel@tonic-gate level_t level, 5870Sstevel@tonic-gate htable_t *shared) 5880Sstevel@tonic-gate { 5890Sstevel@tonic-gate htable_t *ht = NULL; 5900Sstevel@tonic-gate uint_t is_vlp; 5910Sstevel@tonic-gate uint_t is_bare = 0; 5920Sstevel@tonic-gate uint_t need_to_zero = 1; 5930Sstevel@tonic-gate int kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP); 5940Sstevel@tonic-gate 5950Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 5960Sstevel@tonic-gate panic("htable_alloc(): level %d out of range\n", level); 5970Sstevel@tonic-gate 5980Sstevel@tonic-gate is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL; 5990Sstevel@tonic-gate if (is_vlp || shared != NULL) 6000Sstevel@tonic-gate is_bare = 1; 6010Sstevel@tonic-gate 6020Sstevel@tonic-gate /* 6030Sstevel@tonic-gate * First reuse a cached htable from the hat_ht_cached field, this 6043446Smrj * avoids unnecessary trips through kmem/page allocators. 6050Sstevel@tonic-gate */ 6060Sstevel@tonic-gate if (hat->hat_ht_cached != NULL && !is_bare) { 6070Sstevel@tonic-gate hat_enter(hat); 6080Sstevel@tonic-gate ht = hat->hat_ht_cached; 6090Sstevel@tonic-gate if (ht != NULL) { 6100Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 6110Sstevel@tonic-gate need_to_zero = 0; 6120Sstevel@tonic-gate /* XX64 ASSERT() they're all zero somehow */ 6130Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 6140Sstevel@tonic-gate } 6150Sstevel@tonic-gate hat_exit(hat); 6160Sstevel@tonic-gate } 6170Sstevel@tonic-gate 6180Sstevel@tonic-gate if (ht == NULL) { 6190Sstevel@tonic-gate /* 6203543Sjosephb * Allocate an htable, possibly refilling the reserves. 6210Sstevel@tonic-gate */ 6223543Sjosephb if (USE_HAT_RESERVES()) { 6230Sstevel@tonic-gate ht = htable_get_reserve(); 6240Sstevel@tonic-gate } else { 6250Sstevel@tonic-gate /* 6260Sstevel@tonic-gate * Donate successful htable allocations to the reserve. 6270Sstevel@tonic-gate */ 6280Sstevel@tonic-gate for (;;) { 6290Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, kmflags); 6300Sstevel@tonic-gate if (ht == NULL) 6310Sstevel@tonic-gate break; 6320Sstevel@tonic-gate ht->ht_pfn = PFN_INVALID; 6333543Sjosephb if (USE_HAT_RESERVES() || 6340Sstevel@tonic-gate htable_reserve_cnt >= htable_reserve_amount) 6350Sstevel@tonic-gate break; 6360Sstevel@tonic-gate htable_put_reserve(ht); 6370Sstevel@tonic-gate } 6380Sstevel@tonic-gate } 6390Sstevel@tonic-gate 6400Sstevel@tonic-gate /* 6410Sstevel@tonic-gate * allocate a page for the hardware page table if needed 6420Sstevel@tonic-gate */ 6430Sstevel@tonic-gate if (ht != NULL && !is_bare) { 6441747Sjosephb ht->ht_hat = hat; 6453446Smrj ht->ht_pfn = ptable_alloc((uintptr_t)ht); 6460Sstevel@tonic-gate if (ht->ht_pfn == PFN_INVALID) { 6473543Sjosephb if (USE_HAT_RESERVES()) 6483543Sjosephb htable_put_reserve(ht); 6493543Sjosephb else 6503543Sjosephb kmem_cache_free(htable_cache, ht); 6510Sstevel@tonic-gate ht = NULL; 6520Sstevel@tonic-gate } 6530Sstevel@tonic-gate } 6540Sstevel@tonic-gate } 6550Sstevel@tonic-gate 6560Sstevel@tonic-gate /* 6571747Sjosephb * If allocations failed, kick off a kmem_reap() and resort to 6581747Sjosephb * htable steal(). We may spin here if the system is very low on 6591747Sjosephb * memory. If the kernel itself has consumed all memory and kmem_reap() 6601747Sjosephb * can't free up anything, then we'll really get stuck here. 6611747Sjosephb * That should only happen in a system where the administrator has 6621747Sjosephb * misconfigured VM parameters via /etc/system. 6630Sstevel@tonic-gate */ 6641747Sjosephb while (ht == NULL && can_steal_post_boot) { 6651747Sjosephb kmem_reap(); 6660Sstevel@tonic-gate ht = htable_steal(1); 6670Sstevel@tonic-gate HATSTAT_INC(hs_steals); 6680Sstevel@tonic-gate 6690Sstevel@tonic-gate /* 6701747Sjosephb * If we stole for a bare htable, release the pagetable page. 6710Sstevel@tonic-gate */ 6723446Smrj if (ht != NULL) { 6733446Smrj if (is_bare) { 6743446Smrj ptable_free(ht->ht_pfn); 6753446Smrj ht->ht_pfn = PFN_INVALID; 6763446Smrj } 6773446Smrj } 6780Sstevel@tonic-gate } 6790Sstevel@tonic-gate 6800Sstevel@tonic-gate /* 6811747Sjosephb * All attempts to allocate or steal failed. This should only happen 6821747Sjosephb * if we run out of memory during boot, due perhaps to a huge 6831747Sjosephb * boot_archive. At this point there's no way to continue. 6840Sstevel@tonic-gate */ 6850Sstevel@tonic-gate if (ht == NULL) 6860Sstevel@tonic-gate panic("htable_alloc(): couldn't steal\n"); 6870Sstevel@tonic-gate 6880Sstevel@tonic-gate /* 6890Sstevel@tonic-gate * Shared page tables have all entries locked and entries may not 6900Sstevel@tonic-gate * be added or deleted. 6910Sstevel@tonic-gate */ 6920Sstevel@tonic-gate ht->ht_flags = 0; 6930Sstevel@tonic-gate if (shared != NULL) { 6940Sstevel@tonic-gate ASSERT(level == 0); 6950Sstevel@tonic-gate ASSERT(shared->ht_valid_cnt > 0); 6960Sstevel@tonic-gate ht->ht_flags |= HTABLE_SHARED_PFN; 6970Sstevel@tonic-gate ht->ht_pfn = shared->ht_pfn; 6980Sstevel@tonic-gate ht->ht_lock_cnt = 0; 6990Sstevel@tonic-gate ht->ht_valid_cnt = 0; /* updated in hat_share() */ 7000Sstevel@tonic-gate ht->ht_shares = shared; 7010Sstevel@tonic-gate need_to_zero = 0; 7020Sstevel@tonic-gate } else { 7030Sstevel@tonic-gate ht->ht_shares = NULL; 7040Sstevel@tonic-gate ht->ht_lock_cnt = 0; 7050Sstevel@tonic-gate ht->ht_valid_cnt = 0; 7060Sstevel@tonic-gate } 7070Sstevel@tonic-gate 7080Sstevel@tonic-gate /* 7090Sstevel@tonic-gate * setup flags, etc. for VLP htables 7100Sstevel@tonic-gate */ 7110Sstevel@tonic-gate if (is_vlp) { 7120Sstevel@tonic-gate ht->ht_flags |= HTABLE_VLP; 7130Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 7140Sstevel@tonic-gate need_to_zero = 0; 7150Sstevel@tonic-gate } 7160Sstevel@tonic-gate 7170Sstevel@tonic-gate /* 7180Sstevel@tonic-gate * fill in the htable 7190Sstevel@tonic-gate */ 7200Sstevel@tonic-gate ht->ht_hat = hat; 7210Sstevel@tonic-gate ht->ht_parent = NULL; 7220Sstevel@tonic-gate ht->ht_vaddr = vaddr; 7230Sstevel@tonic-gate ht->ht_level = level; 7240Sstevel@tonic-gate ht->ht_busy = 1; 7250Sstevel@tonic-gate ht->ht_next = NULL; 7260Sstevel@tonic-gate ht->ht_prev = NULL; 7270Sstevel@tonic-gate 7280Sstevel@tonic-gate /* 7290Sstevel@tonic-gate * Zero out any freshly allocated page table 7300Sstevel@tonic-gate */ 7310Sstevel@tonic-gate if (need_to_zero) 7320Sstevel@tonic-gate x86pte_zero(ht, 0, mmu.ptes_per_table); 7333446Smrj 7340Sstevel@tonic-gate return (ht); 7350Sstevel@tonic-gate } 7360Sstevel@tonic-gate 7370Sstevel@tonic-gate /* 7380Sstevel@tonic-gate * Free up an htable, either to a hat's cached list, the reserves or 7390Sstevel@tonic-gate * back to kmem. 7400Sstevel@tonic-gate */ 7410Sstevel@tonic-gate static void 7420Sstevel@tonic-gate htable_free(htable_t *ht) 7430Sstevel@tonic-gate { 7440Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 7450Sstevel@tonic-gate 7460Sstevel@tonic-gate /* 7470Sstevel@tonic-gate * If the process isn't exiting, cache the free htable in the hat 7480Sstevel@tonic-gate * structure. We always do this for the boot reserve. We don't 7490Sstevel@tonic-gate * do this if the hat is exiting or we are stealing/reaping htables. 7500Sstevel@tonic-gate */ 7510Sstevel@tonic-gate if (hat != NULL && 7520Sstevel@tonic-gate !(ht->ht_flags & HTABLE_SHARED_PFN) && 7530Sstevel@tonic-gate (use_boot_reserve || 7540Sstevel@tonic-gate (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) { 7550Sstevel@tonic-gate ASSERT((ht->ht_flags & HTABLE_VLP) == 0); 7560Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 7570Sstevel@tonic-gate hat_enter(hat); 7580Sstevel@tonic-gate ht->ht_next = hat->hat_ht_cached; 7590Sstevel@tonic-gate hat->hat_ht_cached = ht; 7600Sstevel@tonic-gate hat_exit(hat); 7610Sstevel@tonic-gate return; 7620Sstevel@tonic-gate } 7630Sstevel@tonic-gate 7640Sstevel@tonic-gate /* 7650Sstevel@tonic-gate * If we have a hardware page table, free it. 7663446Smrj * We don't free page tables that are accessed by sharing. 7670Sstevel@tonic-gate */ 7680Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 7690Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 7700Sstevel@tonic-gate } else if (!(ht->ht_flags & HTABLE_VLP)) { 7713446Smrj ptable_free(ht->ht_pfn); 7720Sstevel@tonic-gate } 7733446Smrj ht->ht_pfn = PFN_INVALID; 7740Sstevel@tonic-gate 7750Sstevel@tonic-gate /* 7763543Sjosephb * Free htables or put into reserves. 7770Sstevel@tonic-gate */ 7784004Sjosephb if (USE_HAT_RESERVES() || htable_reserve_cnt < htable_reserve_amount) { 7790Sstevel@tonic-gate htable_put_reserve(ht); 7804004Sjosephb } else { 7810Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 7824004Sjosephb htable_adjust_reserve(); 7834004Sjosephb } 7840Sstevel@tonic-gate } 7850Sstevel@tonic-gate 7860Sstevel@tonic-gate 7870Sstevel@tonic-gate /* 7880Sstevel@tonic-gate * This is called when a hat is being destroyed or swapped out. We reap all 7890Sstevel@tonic-gate * the remaining htables in the hat cache. If destroying all left over 7900Sstevel@tonic-gate * htables are also destroyed. 7910Sstevel@tonic-gate * 7920Sstevel@tonic-gate * We also don't need to invalidate any of the PTPs nor do any demapping. 7930Sstevel@tonic-gate */ 7940Sstevel@tonic-gate void 7950Sstevel@tonic-gate htable_purge_hat(hat_t *hat) 7960Sstevel@tonic-gate { 7970Sstevel@tonic-gate htable_t *ht; 7980Sstevel@tonic-gate int h; 7990Sstevel@tonic-gate 8000Sstevel@tonic-gate /* 8010Sstevel@tonic-gate * Purge the htable cache if just reaping. 8020Sstevel@tonic-gate */ 8030Sstevel@tonic-gate if (!(hat->hat_flags & HAT_FREEING)) { 8040Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, 1); 8050Sstevel@tonic-gate for (;;) { 8060Sstevel@tonic-gate hat_enter(hat); 8070Sstevel@tonic-gate ht = hat->hat_ht_cached; 8080Sstevel@tonic-gate if (ht == NULL) { 8090Sstevel@tonic-gate hat_exit(hat); 8100Sstevel@tonic-gate break; 8110Sstevel@tonic-gate } 8120Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 8130Sstevel@tonic-gate hat_exit(hat); 8140Sstevel@tonic-gate htable_free(ht); 8150Sstevel@tonic-gate } 8160Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, -1); 8170Sstevel@tonic-gate return; 8180Sstevel@tonic-gate } 8190Sstevel@tonic-gate 8200Sstevel@tonic-gate /* 8210Sstevel@tonic-gate * if freeing, no locking is needed 8220Sstevel@tonic-gate */ 8230Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL) { 8240Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 8250Sstevel@tonic-gate htable_free(ht); 8260Sstevel@tonic-gate } 8270Sstevel@tonic-gate 8280Sstevel@tonic-gate /* 8290Sstevel@tonic-gate * walk thru the htable hash table and free all the htables in it. 8300Sstevel@tonic-gate */ 8310Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 8320Sstevel@tonic-gate while ((ht = hat->hat_ht_hash[h]) != NULL) { 8330Sstevel@tonic-gate if (ht->ht_next) 8340Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 8350Sstevel@tonic-gate 8360Sstevel@tonic-gate if (ht->ht_prev) { 8370Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 8380Sstevel@tonic-gate } else { 8390Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[h] == ht); 8400Sstevel@tonic-gate hat->hat_ht_hash[h] = ht->ht_next; 8410Sstevel@tonic-gate } 8420Sstevel@tonic-gate htable_free(ht); 8430Sstevel@tonic-gate } 8440Sstevel@tonic-gate } 8450Sstevel@tonic-gate } 8460Sstevel@tonic-gate 8470Sstevel@tonic-gate /* 8480Sstevel@tonic-gate * Unlink an entry for a table at vaddr and level out of the existing table 8490Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 8500Sstevel@tonic-gate */ 8510Sstevel@tonic-gate static void 8520Sstevel@tonic-gate unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr) 8530Sstevel@tonic-gate { 8540Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 8550Sstevel@tonic-gate x86pte_t expect = MAKEPTP(old->ht_pfn, old->ht_level); 8560Sstevel@tonic-gate x86pte_t found; 857*4169Sjosephb hat_t *hat = old->ht_hat; 8580Sstevel@tonic-gate 8590Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 8600Sstevel@tonic-gate ASSERT(higher->ht_valid_cnt > 0); 8610Sstevel@tonic-gate ASSERT(old->ht_valid_cnt == 0); 8620Sstevel@tonic-gate found = x86pte_cas(higher, entry, expect, 0); 8630Sstevel@tonic-gate if (found != expect) 8640Sstevel@tonic-gate panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE, 8650Sstevel@tonic-gate found, expect); 866*4169Sjosephb 867*4169Sjosephb /* 868*4169Sjosephb * When any top level VLP page table entry changes, we must issue 869*4169Sjosephb * a reload of cr3 on all processors. Also some CPU types require 870*4169Sjosephb * invalidating when inner table entries are invalidated. 871*4169Sjosephb */ 872*4169Sjosephb if (!(hat->hat_flags & HAT_FREEING)) { 873*4169Sjosephb if (higher->ht_flags & HTABLE_VLP) 874*4169Sjosephb hat_tlb_inval(hat, DEMAP_ALL_ADDR); 875*4169Sjosephb else if (mmu.inval_nonleaf) 876*4169Sjosephb hat_tlb_inval(hat, old->ht_vaddr); 877*4169Sjosephb } 878*4169Sjosephb 8790Sstevel@tonic-gate HTABLE_DEC(higher->ht_valid_cnt); 8800Sstevel@tonic-gate } 8810Sstevel@tonic-gate 8820Sstevel@tonic-gate /* 8830Sstevel@tonic-gate * Link an entry for a new table at vaddr and level into the existing table 8840Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 8850Sstevel@tonic-gate */ 8860Sstevel@tonic-gate static void 8870Sstevel@tonic-gate link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr) 8880Sstevel@tonic-gate { 8890Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 8900Sstevel@tonic-gate x86pte_t newptp = MAKEPTP(new->ht_pfn, new->ht_level); 8910Sstevel@tonic-gate x86pte_t found; 8920Sstevel@tonic-gate 8930Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 8940Sstevel@tonic-gate 8950Sstevel@tonic-gate ASSERT(new->ht_level != mmu.max_level); 8960Sstevel@tonic-gate 8970Sstevel@tonic-gate HTABLE_INC(higher->ht_valid_cnt); 8980Sstevel@tonic-gate 8990Sstevel@tonic-gate found = x86pte_cas(higher, entry, 0, newptp); 9001251Skchow if ((found & ~PT_REF) != 0) 9010Sstevel@tonic-gate panic("HAT: ptp not 0, found=" FMT_PTE, found); 902*4169Sjosephb 903*4169Sjosephb /* 904*4169Sjosephb * When any top level VLP page table entry changes, we must issue 905*4169Sjosephb * a reload of cr3 on all processors using it. 906*4169Sjosephb */ 907*4169Sjosephb if (higher->ht_flags & HTABLE_VLP) 908*4169Sjosephb hat_tlb_inval(higher->ht_hat, DEMAP_ALL_ADDR); 9090Sstevel@tonic-gate } 9100Sstevel@tonic-gate 9110Sstevel@tonic-gate /* 9123446Smrj * Release of hold on an htable. If this is the last use and the pagetable 9133446Smrj * is empty we may want to free it, then recursively look at the pagetable 9143446Smrj * above it. The recursion is handled by the outer while() loop. 9150Sstevel@tonic-gate */ 9160Sstevel@tonic-gate void 9170Sstevel@tonic-gate htable_release(htable_t *ht) 9180Sstevel@tonic-gate { 9190Sstevel@tonic-gate uint_t hashval; 9200Sstevel@tonic-gate htable_t *shared; 9210Sstevel@tonic-gate htable_t *higher; 9220Sstevel@tonic-gate hat_t *hat; 9230Sstevel@tonic-gate uintptr_t va; 9240Sstevel@tonic-gate level_t level; 9250Sstevel@tonic-gate 9260Sstevel@tonic-gate while (ht != NULL) { 9270Sstevel@tonic-gate shared = NULL; 9280Sstevel@tonic-gate for (;;) { 9290Sstevel@tonic-gate hat = ht->ht_hat; 9300Sstevel@tonic-gate va = ht->ht_vaddr; 9310Sstevel@tonic-gate level = ht->ht_level; 9320Sstevel@tonic-gate hashval = HTABLE_HASH(hat, va, level); 9330Sstevel@tonic-gate 9340Sstevel@tonic-gate /* 9350Sstevel@tonic-gate * The common case is that this isn't the last use of 9360Sstevel@tonic-gate * an htable so we don't want to free the htable. 9370Sstevel@tonic-gate */ 9380Sstevel@tonic-gate HTABLE_ENTER(hashval); 9390Sstevel@tonic-gate ASSERT(ht->ht_lock_cnt == 0 || ht->ht_valid_cnt > 0); 9400Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt >= 0); 9410Sstevel@tonic-gate ASSERT(ht->ht_busy > 0); 9420Sstevel@tonic-gate if (ht->ht_valid_cnt > 0) 9430Sstevel@tonic-gate break; 9440Sstevel@tonic-gate if (ht->ht_busy > 1) 9450Sstevel@tonic-gate break; 9460Sstevel@tonic-gate 9470Sstevel@tonic-gate /* 9480Sstevel@tonic-gate * we always release empty shared htables 9490Sstevel@tonic-gate */ 9500Sstevel@tonic-gate if (!(ht->ht_flags & HTABLE_SHARED_PFN)) { 9510Sstevel@tonic-gate 9520Sstevel@tonic-gate /* 9530Sstevel@tonic-gate * don't release if in address space tear down 9540Sstevel@tonic-gate */ 9550Sstevel@tonic-gate if (hat->hat_flags & HAT_FREEING) 9560Sstevel@tonic-gate break; 9570Sstevel@tonic-gate 9580Sstevel@tonic-gate /* 9590Sstevel@tonic-gate * At and above max_page_level, free if it's for 9600Sstevel@tonic-gate * a boot-time kernel mapping below kernelbase. 9610Sstevel@tonic-gate */ 9620Sstevel@tonic-gate if (level >= mmu.max_page_level && 9630Sstevel@tonic-gate (hat != kas.a_hat || va >= kernelbase)) 9640Sstevel@tonic-gate break; 9650Sstevel@tonic-gate } 9660Sstevel@tonic-gate 9670Sstevel@tonic-gate /* 9683446Smrj * Remember if we destroy an htable that shares its PFN 9693446Smrj * from elsewhere. 9700Sstevel@tonic-gate */ 9710Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 9720Sstevel@tonic-gate ASSERT(ht->ht_level == 0); 9730Sstevel@tonic-gate ASSERT(shared == NULL); 9740Sstevel@tonic-gate shared = ht->ht_shares; 9750Sstevel@tonic-gate HATSTAT_INC(hs_htable_unshared); 9760Sstevel@tonic-gate } 9770Sstevel@tonic-gate 9780Sstevel@tonic-gate /* 9790Sstevel@tonic-gate * Handle release of a table and freeing the htable_t. 9800Sstevel@tonic-gate * Unlink it from the table higher (ie. ht_parent). 9810Sstevel@tonic-gate */ 9820Sstevel@tonic-gate ASSERT(ht->ht_lock_cnt == 0); 9830Sstevel@tonic-gate higher = ht->ht_parent; 9840Sstevel@tonic-gate ASSERT(higher != NULL); 9850Sstevel@tonic-gate 9860Sstevel@tonic-gate /* 9870Sstevel@tonic-gate * Unlink the pagetable. 9880Sstevel@tonic-gate */ 9890Sstevel@tonic-gate unlink_ptp(higher, ht, va); 9900Sstevel@tonic-gate 9910Sstevel@tonic-gate /* 9920Sstevel@tonic-gate * remove this htable from its hash list 9930Sstevel@tonic-gate */ 9940Sstevel@tonic-gate if (ht->ht_next) 9950Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 9960Sstevel@tonic-gate 9970Sstevel@tonic-gate if (ht->ht_prev) { 9980Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 9990Sstevel@tonic-gate } else { 10000Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[hashval] == ht); 10010Sstevel@tonic-gate hat->hat_ht_hash[hashval] = ht->ht_next; 10020Sstevel@tonic-gate } 10030Sstevel@tonic-gate HTABLE_EXIT(hashval); 10040Sstevel@tonic-gate htable_free(ht); 10050Sstevel@tonic-gate ht = higher; 10060Sstevel@tonic-gate } 10070Sstevel@tonic-gate 10080Sstevel@tonic-gate ASSERT(ht->ht_busy >= 1); 10090Sstevel@tonic-gate --ht->ht_busy; 10100Sstevel@tonic-gate HTABLE_EXIT(hashval); 10110Sstevel@tonic-gate 10120Sstevel@tonic-gate /* 10130Sstevel@tonic-gate * If we released a shared htable, do a release on the htable 10140Sstevel@tonic-gate * from which it shared 10150Sstevel@tonic-gate */ 10160Sstevel@tonic-gate ht = shared; 10170Sstevel@tonic-gate } 10180Sstevel@tonic-gate } 10190Sstevel@tonic-gate 10200Sstevel@tonic-gate /* 10210Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 10220Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 10230Sstevel@tonic-gate */ 10240Sstevel@tonic-gate htable_t * 10250Sstevel@tonic-gate htable_lookup(hat_t *hat, uintptr_t vaddr, level_t level) 10260Sstevel@tonic-gate { 10270Sstevel@tonic-gate uintptr_t base; 10280Sstevel@tonic-gate uint_t hashval; 10290Sstevel@tonic-gate htable_t *ht = NULL; 10300Sstevel@tonic-gate 10310Sstevel@tonic-gate ASSERT(level >= 0); 10320Sstevel@tonic-gate ASSERT(level <= TOP_LEVEL(hat)); 10330Sstevel@tonic-gate 10340Sstevel@tonic-gate if (level == TOP_LEVEL(hat)) 10350Sstevel@tonic-gate base = 0; 10360Sstevel@tonic-gate else 10370Sstevel@tonic-gate base = vaddr & LEVEL_MASK(level + 1); 10380Sstevel@tonic-gate 10390Sstevel@tonic-gate hashval = HTABLE_HASH(hat, base, level); 10400Sstevel@tonic-gate HTABLE_ENTER(hashval); 10410Sstevel@tonic-gate for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) { 10420Sstevel@tonic-gate if (ht->ht_hat == hat && 10430Sstevel@tonic-gate ht->ht_vaddr == base && 10440Sstevel@tonic-gate ht->ht_level == level) 10450Sstevel@tonic-gate break; 10460Sstevel@tonic-gate } 10470Sstevel@tonic-gate if (ht) 10480Sstevel@tonic-gate ++ht->ht_busy; 10490Sstevel@tonic-gate 10500Sstevel@tonic-gate HTABLE_EXIT(hashval); 10510Sstevel@tonic-gate return (ht); 10520Sstevel@tonic-gate } 10530Sstevel@tonic-gate 10540Sstevel@tonic-gate /* 10550Sstevel@tonic-gate * Acquires a hold on a known htable (from a locked hment entry). 10560Sstevel@tonic-gate */ 10570Sstevel@tonic-gate void 10580Sstevel@tonic-gate htable_acquire(htable_t *ht) 10590Sstevel@tonic-gate { 10600Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 10610Sstevel@tonic-gate level_t level = ht->ht_level; 10620Sstevel@tonic-gate uintptr_t base = ht->ht_vaddr; 10630Sstevel@tonic-gate uint_t hashval = HTABLE_HASH(hat, base, level); 10640Sstevel@tonic-gate 10650Sstevel@tonic-gate HTABLE_ENTER(hashval); 10660Sstevel@tonic-gate #ifdef DEBUG 10670Sstevel@tonic-gate /* 10680Sstevel@tonic-gate * make sure the htable is there 10690Sstevel@tonic-gate */ 10700Sstevel@tonic-gate { 10710Sstevel@tonic-gate htable_t *h; 10720Sstevel@tonic-gate 10730Sstevel@tonic-gate for (h = hat->hat_ht_hash[hashval]; 10740Sstevel@tonic-gate h && h != ht; 10750Sstevel@tonic-gate h = h->ht_next) 10760Sstevel@tonic-gate ; 10770Sstevel@tonic-gate ASSERT(h == ht); 10780Sstevel@tonic-gate } 10790Sstevel@tonic-gate #endif /* DEBUG */ 10800Sstevel@tonic-gate ++ht->ht_busy; 10810Sstevel@tonic-gate HTABLE_EXIT(hashval); 10820Sstevel@tonic-gate } 10830Sstevel@tonic-gate 10840Sstevel@tonic-gate /* 10850Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 10860Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 10870Sstevel@tonic-gate * If not found the table is created. 10880Sstevel@tonic-gate * 10890Sstevel@tonic-gate * Since we can't hold a hash table mutex during allocation, we have to 10900Sstevel@tonic-gate * drop it and redo the search on a create. Then we may have to free the newly 10910Sstevel@tonic-gate * allocated htable if another thread raced in and created it ahead of us. 10920Sstevel@tonic-gate */ 10930Sstevel@tonic-gate htable_t * 10940Sstevel@tonic-gate htable_create( 10950Sstevel@tonic-gate hat_t *hat, 10960Sstevel@tonic-gate uintptr_t vaddr, 10970Sstevel@tonic-gate level_t level, 10980Sstevel@tonic-gate htable_t *shared) 10990Sstevel@tonic-gate { 11000Sstevel@tonic-gate uint_t h; 11010Sstevel@tonic-gate level_t l; 11020Sstevel@tonic-gate uintptr_t base; 11030Sstevel@tonic-gate htable_t *ht; 11040Sstevel@tonic-gate htable_t *higher = NULL; 11050Sstevel@tonic-gate htable_t *new = NULL; 11060Sstevel@tonic-gate 11070Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 11080Sstevel@tonic-gate panic("htable_create(): level %d out of range\n", level); 11090Sstevel@tonic-gate 11100Sstevel@tonic-gate /* 11110Sstevel@tonic-gate * Create the page tables in top down order. 11120Sstevel@tonic-gate */ 11130Sstevel@tonic-gate for (l = TOP_LEVEL(hat); l >= level; --l) { 11140Sstevel@tonic-gate new = NULL; 11150Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) 11160Sstevel@tonic-gate base = 0; 11170Sstevel@tonic-gate else 11180Sstevel@tonic-gate base = vaddr & LEVEL_MASK(l + 1); 11190Sstevel@tonic-gate 11200Sstevel@tonic-gate h = HTABLE_HASH(hat, base, l); 11210Sstevel@tonic-gate try_again: 11220Sstevel@tonic-gate /* 11230Sstevel@tonic-gate * look up the htable at this level 11240Sstevel@tonic-gate */ 11250Sstevel@tonic-gate HTABLE_ENTER(h); 11260Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) { 11270Sstevel@tonic-gate ht = hat->hat_htable; 11280Sstevel@tonic-gate } else { 11290Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 11300Sstevel@tonic-gate ASSERT(ht->ht_hat == hat); 11310Sstevel@tonic-gate if (ht->ht_vaddr == base && 11320Sstevel@tonic-gate ht->ht_level == l) 11330Sstevel@tonic-gate break; 11340Sstevel@tonic-gate } 11350Sstevel@tonic-gate } 11360Sstevel@tonic-gate 11370Sstevel@tonic-gate /* 11380Sstevel@tonic-gate * if we found the htable, increment its busy cnt 11390Sstevel@tonic-gate * and if we had allocated a new htable, free it. 11400Sstevel@tonic-gate */ 11410Sstevel@tonic-gate if (ht != NULL) { 11420Sstevel@tonic-gate /* 11430Sstevel@tonic-gate * If we find a pre-existing shared table, it must 11440Sstevel@tonic-gate * share from the same place. 11450Sstevel@tonic-gate */ 11460Sstevel@tonic-gate if (l == level && shared && ht->ht_shares && 11470Sstevel@tonic-gate ht->ht_shares != shared) { 11480Sstevel@tonic-gate panic("htable shared from wrong place " 11490Sstevel@tonic-gate "found htable=%p shared=%p", ht, shared); 11500Sstevel@tonic-gate } 11510Sstevel@tonic-gate ++ht->ht_busy; 11520Sstevel@tonic-gate HTABLE_EXIT(h); 11530Sstevel@tonic-gate if (new) 11540Sstevel@tonic-gate htable_free(new); 11550Sstevel@tonic-gate if (higher != NULL) 11560Sstevel@tonic-gate htable_release(higher); 11570Sstevel@tonic-gate higher = ht; 11580Sstevel@tonic-gate 11590Sstevel@tonic-gate /* 11600Sstevel@tonic-gate * if we didn't find it on the first search 11610Sstevel@tonic-gate * allocate a new one and search again 11620Sstevel@tonic-gate */ 11630Sstevel@tonic-gate } else if (new == NULL) { 11640Sstevel@tonic-gate HTABLE_EXIT(h); 11650Sstevel@tonic-gate new = htable_alloc(hat, base, l, 11660Sstevel@tonic-gate l == level ? shared : NULL); 11670Sstevel@tonic-gate goto try_again; 11680Sstevel@tonic-gate 11690Sstevel@tonic-gate /* 11700Sstevel@tonic-gate * 2nd search and still not there, use "new" table 11710Sstevel@tonic-gate * Link new table into higher, when not at top level. 11720Sstevel@tonic-gate */ 11730Sstevel@tonic-gate } else { 11740Sstevel@tonic-gate ht = new; 11750Sstevel@tonic-gate if (higher != NULL) { 11760Sstevel@tonic-gate link_ptp(higher, ht, base); 11770Sstevel@tonic-gate ht->ht_parent = higher; 11780Sstevel@tonic-gate } 11790Sstevel@tonic-gate ht->ht_next = hat->hat_ht_hash[h]; 11800Sstevel@tonic-gate ASSERT(ht->ht_prev == NULL); 11810Sstevel@tonic-gate if (hat->hat_ht_hash[h]) 11820Sstevel@tonic-gate hat->hat_ht_hash[h]->ht_prev = ht; 11830Sstevel@tonic-gate hat->hat_ht_hash[h] = ht; 11840Sstevel@tonic-gate HTABLE_EXIT(h); 11850Sstevel@tonic-gate 11860Sstevel@tonic-gate /* 11870Sstevel@tonic-gate * Note we don't do htable_release(higher). 11880Sstevel@tonic-gate * That happens recursively when "new" is removed by 11890Sstevel@tonic-gate * htable_release() or htable_steal(). 11900Sstevel@tonic-gate */ 11910Sstevel@tonic-gate higher = ht; 11920Sstevel@tonic-gate 11930Sstevel@tonic-gate /* 11940Sstevel@tonic-gate * If we just created a new shared page table we 11950Sstevel@tonic-gate * increment the shared htable's busy count, so that 11960Sstevel@tonic-gate * it can't be the victim of a steal even if it's empty. 11970Sstevel@tonic-gate */ 11980Sstevel@tonic-gate if (l == level && shared) { 11990Sstevel@tonic-gate (void) htable_lookup(shared->ht_hat, 12000Sstevel@tonic-gate shared->ht_vaddr, shared->ht_level); 12010Sstevel@tonic-gate HATSTAT_INC(hs_htable_shared); 12020Sstevel@tonic-gate } 12030Sstevel@tonic-gate } 12040Sstevel@tonic-gate } 12050Sstevel@tonic-gate 12060Sstevel@tonic-gate return (ht); 12070Sstevel@tonic-gate } 12080Sstevel@tonic-gate 12090Sstevel@tonic-gate /* 12103446Smrj * Inherit initial pagetables from the boot program. 12113446Smrj */ 12123446Smrj void 12133446Smrj htable_attach( 12143446Smrj hat_t *hat, 12153446Smrj uintptr_t base, 12163446Smrj level_t level, 12173446Smrj htable_t *parent, 12183446Smrj pfn_t pfn) 12193446Smrj { 12203446Smrj htable_t *ht; 12213446Smrj uint_t h; 12223446Smrj uint_t i; 12233446Smrj x86pte_t pte; 12243446Smrj x86pte_t *ptep; 12253446Smrj page_t *pp; 12263446Smrj extern page_t *boot_claim_page(pfn_t); 12273446Smrj 12283446Smrj ht = htable_get_reserve(); 12293446Smrj if (level == mmu.max_level) 12303446Smrj kas.a_hat->hat_htable = ht; 12313446Smrj ht->ht_hat = hat; 12323446Smrj ht->ht_parent = parent; 12333446Smrj ht->ht_vaddr = base; 12343446Smrj ht->ht_level = level; 12353446Smrj ht->ht_busy = 1; 12363446Smrj ht->ht_next = NULL; 12373446Smrj ht->ht_prev = NULL; 12383446Smrj ht->ht_flags = 0; 12393446Smrj ht->ht_pfn = pfn; 12403446Smrj ht->ht_lock_cnt = 0; 12413446Smrj ht->ht_valid_cnt = 0; 12423446Smrj if (parent != NULL) 12433446Smrj ++parent->ht_busy; 12443446Smrj 12453446Smrj h = HTABLE_HASH(hat, base, level); 12463446Smrj HTABLE_ENTER(h); 12473446Smrj ht->ht_next = hat->hat_ht_hash[h]; 12483446Smrj ASSERT(ht->ht_prev == NULL); 12493446Smrj if (hat->hat_ht_hash[h]) 12503446Smrj hat->hat_ht_hash[h]->ht_prev = ht; 12513446Smrj hat->hat_ht_hash[h] = ht; 12523446Smrj HTABLE_EXIT(h); 12533446Smrj 12543446Smrj /* 12553446Smrj * make sure the page table physical page is not FREE 12563446Smrj */ 12573446Smrj if (page_resv(1, KM_NOSLEEP) == 0) 12583446Smrj panic("page_resv() failed in ptable alloc"); 12593446Smrj 12603446Smrj pp = boot_claim_page(pfn); 12613446Smrj ASSERT(pp != NULL); 12623446Smrj page_downgrade(pp); 12633446Smrj /* 12643446Smrj * Record in the page_t that is a pagetable for segkpm setup. 12653446Smrj */ 12663446Smrj if (kpm_vbase) 12673446Smrj pp->p_index = 1; 12683446Smrj 12693446Smrj /* 12703446Smrj * Count valid mappings and recursively attach lower level pagetables. 12713446Smrj */ 12723446Smrj ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 12733446Smrj for (i = 0; i < HTABLE_NUM_PTES(ht); ++i) { 12743446Smrj if (mmu.pae_hat) 12753446Smrj pte = ptep[i]; 12763446Smrj else 12773446Smrj pte = ((x86pte32_t *)ptep)[i]; 12783446Smrj if (!IN_HYPERVISOR_VA(base) && PTE_ISVALID(pte)) { 12793446Smrj ++ht->ht_valid_cnt; 12803446Smrj if (!PTE_ISPAGE(pte, level)) { 12813446Smrj htable_attach(hat, base, level - 1, 12823446Smrj ht, PTE2PFN(pte, level)); 12833446Smrj ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 12843446Smrj } 12853446Smrj } 12863446Smrj base += LEVEL_SIZE(level); 12873446Smrj if (base == mmu.hole_start) 12883446Smrj base = (mmu.hole_end + MMU_PAGEOFFSET) & MMU_PAGEMASK; 12893446Smrj } 12903446Smrj 12913446Smrj /* 12923446Smrj * As long as all the mappings we had were below kernel base 12933446Smrj * we can release the htable. 12943446Smrj */ 12953446Smrj if (base < kernelbase) 12963446Smrj htable_release(ht); 12973446Smrj } 12983446Smrj 12993446Smrj /* 13000Sstevel@tonic-gate * Walk through a given htable looking for the first valid entry. This 13010Sstevel@tonic-gate * routine takes both a starting and ending address. The starting address 13020Sstevel@tonic-gate * is required to be within the htable provided by the caller, but there is 13030Sstevel@tonic-gate * no such restriction on the ending address. 13040Sstevel@tonic-gate * 13050Sstevel@tonic-gate * If the routine finds a valid entry in the htable (at or beyond the 13060Sstevel@tonic-gate * starting address), the PTE (and its address) will be returned. 13070Sstevel@tonic-gate * This PTE may correspond to either a page or a pagetable - it is the 13080Sstevel@tonic-gate * caller's responsibility to determine which. If no valid entry is 13090Sstevel@tonic-gate * found, 0 (and invalid PTE) and the next unexamined address will be 13100Sstevel@tonic-gate * returned. 13110Sstevel@tonic-gate * 13120Sstevel@tonic-gate * The loop has been carefully coded for optimization. 13130Sstevel@tonic-gate */ 13140Sstevel@tonic-gate static x86pte_t 13150Sstevel@tonic-gate htable_scan(htable_t *ht, uintptr_t *vap, uintptr_t eaddr) 13160Sstevel@tonic-gate { 13170Sstevel@tonic-gate uint_t e; 13180Sstevel@tonic-gate x86pte_t found_pte = (x86pte_t)0; 13193446Smrj caddr_t pte_ptr; 13203446Smrj caddr_t end_pte_ptr; 13210Sstevel@tonic-gate int l = ht->ht_level; 13220Sstevel@tonic-gate uintptr_t va = *vap & LEVEL_MASK(l); 13230Sstevel@tonic-gate size_t pgsize = LEVEL_SIZE(l); 13240Sstevel@tonic-gate 13250Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 13260Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 13270Sstevel@tonic-gate 13280Sstevel@tonic-gate /* 13290Sstevel@tonic-gate * Compute the starting index and ending virtual address 13300Sstevel@tonic-gate */ 13310Sstevel@tonic-gate e = htable_va2entry(va, ht); 13320Sstevel@tonic-gate 13330Sstevel@tonic-gate /* 13340Sstevel@tonic-gate * The following page table scan code knows that the valid 13350Sstevel@tonic-gate * bit of a PTE is in the lowest byte AND that x86 is little endian!! 13360Sstevel@tonic-gate */ 13373446Smrj pte_ptr = (caddr_t)x86pte_access_pagetable(ht, 0); 13383446Smrj end_pte_ptr = (caddr_t)PT_INDEX_PTR(pte_ptr, HTABLE_NUM_PTES(ht)); 13393446Smrj pte_ptr = (caddr_t)PT_INDEX_PTR((x86pte_t *)pte_ptr, e); 13402687Skchow while (!PTE_ISVALID(*pte_ptr)) { 13410Sstevel@tonic-gate va += pgsize; 13420Sstevel@tonic-gate if (va >= eaddr) 13430Sstevel@tonic-gate break; 13440Sstevel@tonic-gate pte_ptr += mmu.pte_size; 13450Sstevel@tonic-gate ASSERT(pte_ptr <= end_pte_ptr); 13460Sstevel@tonic-gate if (pte_ptr == end_pte_ptr) 13470Sstevel@tonic-gate break; 13480Sstevel@tonic-gate } 13490Sstevel@tonic-gate 13500Sstevel@tonic-gate /* 13510Sstevel@tonic-gate * if we found a valid PTE, load the entire PTE 13520Sstevel@tonic-gate */ 13533446Smrj if (va < eaddr && pte_ptr != end_pte_ptr) 13543446Smrj found_pte = GET_PTE((x86pte_t *)pte_ptr); 13550Sstevel@tonic-gate x86pte_release_pagetable(ht); 13560Sstevel@tonic-gate 13570Sstevel@tonic-gate #if defined(__amd64) 13580Sstevel@tonic-gate /* 13590Sstevel@tonic-gate * deal with VA hole on amd64 13600Sstevel@tonic-gate */ 13610Sstevel@tonic-gate if (l == mmu.max_level && va >= mmu.hole_start && va <= mmu.hole_end) 13620Sstevel@tonic-gate va = mmu.hole_end + va - mmu.hole_start; 13630Sstevel@tonic-gate #endif /* __amd64 */ 13640Sstevel@tonic-gate 13650Sstevel@tonic-gate *vap = va; 13660Sstevel@tonic-gate return (found_pte); 13670Sstevel@tonic-gate } 13680Sstevel@tonic-gate 13690Sstevel@tonic-gate /* 13700Sstevel@tonic-gate * Find the address and htable for the first populated translation at or 13710Sstevel@tonic-gate * above the given virtual address. The caller may also specify an upper 13720Sstevel@tonic-gate * limit to the address range to search. Uses level information to quickly 13730Sstevel@tonic-gate * skip unpopulated sections of virtual address spaces. 13740Sstevel@tonic-gate * 13750Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable and virt addr 13760Sstevel@tonic-gate * and has a hold on the htable. 13770Sstevel@tonic-gate */ 13780Sstevel@tonic-gate x86pte_t 13790Sstevel@tonic-gate htable_walk( 13800Sstevel@tonic-gate struct hat *hat, 13810Sstevel@tonic-gate htable_t **htp, 13820Sstevel@tonic-gate uintptr_t *vaddr, 13830Sstevel@tonic-gate uintptr_t eaddr) 13840Sstevel@tonic-gate { 13850Sstevel@tonic-gate uintptr_t va = *vaddr; 13860Sstevel@tonic-gate htable_t *ht; 13870Sstevel@tonic-gate htable_t *prev = *htp; 13880Sstevel@tonic-gate level_t l; 13890Sstevel@tonic-gate level_t max_mapped_level; 13900Sstevel@tonic-gate x86pte_t pte; 13910Sstevel@tonic-gate 13920Sstevel@tonic-gate ASSERT(eaddr > va); 13930Sstevel@tonic-gate 13940Sstevel@tonic-gate /* 13950Sstevel@tonic-gate * If this is a user address, then we know we need not look beyond 13960Sstevel@tonic-gate * kernelbase. 13970Sstevel@tonic-gate */ 13980Sstevel@tonic-gate ASSERT(hat == kas.a_hat || eaddr <= kernelbase || 13990Sstevel@tonic-gate eaddr == HTABLE_WALK_TO_END); 14000Sstevel@tonic-gate if (hat != kas.a_hat && eaddr == HTABLE_WALK_TO_END) 14010Sstevel@tonic-gate eaddr = kernelbase; 14020Sstevel@tonic-gate 14030Sstevel@tonic-gate /* 14040Sstevel@tonic-gate * If we're coming in with a previous page table, search it first 14050Sstevel@tonic-gate * without doing an htable_lookup(), this should be frequent. 14060Sstevel@tonic-gate */ 14070Sstevel@tonic-gate if (prev) { 14080Sstevel@tonic-gate ASSERT(prev->ht_busy > 0); 14090Sstevel@tonic-gate ASSERT(prev->ht_vaddr <= va); 14100Sstevel@tonic-gate l = prev->ht_level; 14110Sstevel@tonic-gate if (va <= HTABLE_LAST_PAGE(prev)) { 14120Sstevel@tonic-gate pte = htable_scan(prev, &va, eaddr); 14130Sstevel@tonic-gate 14140Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 14150Sstevel@tonic-gate *vaddr = va; 14160Sstevel@tonic-gate *htp = prev; 14170Sstevel@tonic-gate return (pte); 14180Sstevel@tonic-gate } 14190Sstevel@tonic-gate } 14200Sstevel@tonic-gate 14210Sstevel@tonic-gate /* 14220Sstevel@tonic-gate * We found nothing in the htable provided by the caller, 14230Sstevel@tonic-gate * so fall through and do the full search 14240Sstevel@tonic-gate */ 14250Sstevel@tonic-gate htable_release(prev); 14260Sstevel@tonic-gate } 14270Sstevel@tonic-gate 14280Sstevel@tonic-gate /* 14290Sstevel@tonic-gate * Find the level of the largest pagesize used by this HAT. 14300Sstevel@tonic-gate */ 14310Sstevel@tonic-gate max_mapped_level = 0; 14320Sstevel@tonic-gate for (l = 1; l <= mmu.max_page_level; ++l) 14330Sstevel@tonic-gate if (hat->hat_pages_mapped[l] != 0) 14340Sstevel@tonic-gate max_mapped_level = l; 14350Sstevel@tonic-gate 14360Sstevel@tonic-gate while (va < eaddr && va >= *vaddr) { 14370Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 14380Sstevel@tonic-gate 14390Sstevel@tonic-gate /* 14400Sstevel@tonic-gate * Find lowest table with any entry for given address. 14410Sstevel@tonic-gate */ 14420Sstevel@tonic-gate for (l = 0; l <= TOP_LEVEL(hat); ++l) { 14430Sstevel@tonic-gate ht = htable_lookup(hat, va, l); 14440Sstevel@tonic-gate if (ht != NULL) { 14450Sstevel@tonic-gate pte = htable_scan(ht, &va, eaddr); 14460Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 14470Sstevel@tonic-gate *vaddr = va; 14480Sstevel@tonic-gate *htp = ht; 14490Sstevel@tonic-gate return (pte); 14500Sstevel@tonic-gate } 14510Sstevel@tonic-gate htable_release(ht); 14520Sstevel@tonic-gate break; 14530Sstevel@tonic-gate } 14540Sstevel@tonic-gate 14550Sstevel@tonic-gate /* 14560Sstevel@tonic-gate * The ht is never NULL at the top level since 14570Sstevel@tonic-gate * the top level htable is created in hat_alloc(). 14580Sstevel@tonic-gate */ 14590Sstevel@tonic-gate ASSERT(l < TOP_LEVEL(hat)); 14600Sstevel@tonic-gate 14610Sstevel@tonic-gate /* 14620Sstevel@tonic-gate * No htable covers the address. If there is no 14630Sstevel@tonic-gate * larger page size that could cover it, we 14640Sstevel@tonic-gate * skip to the start of the next page table. 14650Sstevel@tonic-gate */ 14660Sstevel@tonic-gate if (l >= max_mapped_level) { 14670Sstevel@tonic-gate va = NEXT_ENTRY_VA(va, l + 1); 14680Sstevel@tonic-gate break; 14690Sstevel@tonic-gate } 14700Sstevel@tonic-gate } 14710Sstevel@tonic-gate } 14720Sstevel@tonic-gate 14730Sstevel@tonic-gate *vaddr = 0; 14740Sstevel@tonic-gate *htp = NULL; 14750Sstevel@tonic-gate return (0); 14760Sstevel@tonic-gate } 14770Sstevel@tonic-gate 14780Sstevel@tonic-gate /* 14790Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address 14800Sstevel@tonic-gate * with pagesize at or below given level. 14810Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 14820Sstevel@tonic-gate * entry, and has a hold on the htable. 14830Sstevel@tonic-gate */ 14840Sstevel@tonic-gate htable_t * 14850Sstevel@tonic-gate htable_getpte( 14860Sstevel@tonic-gate struct hat *hat, 14870Sstevel@tonic-gate uintptr_t vaddr, 14880Sstevel@tonic-gate uint_t *entry, 14890Sstevel@tonic-gate x86pte_t *pte, 14900Sstevel@tonic-gate level_t level) 14910Sstevel@tonic-gate { 14920Sstevel@tonic-gate htable_t *ht; 14930Sstevel@tonic-gate level_t l; 14940Sstevel@tonic-gate uint_t e; 14950Sstevel@tonic-gate 14960Sstevel@tonic-gate ASSERT(level <= mmu.max_page_level); 14970Sstevel@tonic-gate 14980Sstevel@tonic-gate for (l = 0; l <= level; ++l) { 14990Sstevel@tonic-gate ht = htable_lookup(hat, vaddr, l); 15000Sstevel@tonic-gate if (ht == NULL) 15010Sstevel@tonic-gate continue; 15020Sstevel@tonic-gate e = htable_va2entry(vaddr, ht); 15030Sstevel@tonic-gate if (entry != NULL) 15040Sstevel@tonic-gate *entry = e; 15050Sstevel@tonic-gate if (pte != NULL) 15060Sstevel@tonic-gate *pte = x86pte_get(ht, e); 15070Sstevel@tonic-gate return (ht); 15080Sstevel@tonic-gate } 15090Sstevel@tonic-gate return (NULL); 15100Sstevel@tonic-gate } 15110Sstevel@tonic-gate 15120Sstevel@tonic-gate /* 15130Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address. 15140Sstevel@tonic-gate * There must be a valid page mapped at the given address. 15150Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 15160Sstevel@tonic-gate * entry, and has a hold on the htable. 15170Sstevel@tonic-gate */ 15180Sstevel@tonic-gate htable_t * 15190Sstevel@tonic-gate htable_getpage(struct hat *hat, uintptr_t vaddr, uint_t *entry) 15200Sstevel@tonic-gate { 15210Sstevel@tonic-gate htable_t *ht; 15220Sstevel@tonic-gate uint_t e; 15230Sstevel@tonic-gate x86pte_t pte; 15240Sstevel@tonic-gate 15250Sstevel@tonic-gate ht = htable_getpte(hat, vaddr, &e, &pte, mmu.max_page_level); 15260Sstevel@tonic-gate if (ht == NULL) 15270Sstevel@tonic-gate return (NULL); 15280Sstevel@tonic-gate 15290Sstevel@tonic-gate if (entry) 15300Sstevel@tonic-gate *entry = e; 15310Sstevel@tonic-gate 15320Sstevel@tonic-gate if (PTE_ISPAGE(pte, ht->ht_level)) 15330Sstevel@tonic-gate return (ht); 15340Sstevel@tonic-gate htable_release(ht); 15350Sstevel@tonic-gate return (NULL); 15360Sstevel@tonic-gate } 15370Sstevel@tonic-gate 15380Sstevel@tonic-gate 15390Sstevel@tonic-gate void 15400Sstevel@tonic-gate htable_init() 15410Sstevel@tonic-gate { 15420Sstevel@tonic-gate /* 15430Sstevel@tonic-gate * To save on kernel VA usage, we avoid debug information in 32 bit 15440Sstevel@tonic-gate * kernels. 15450Sstevel@tonic-gate */ 15460Sstevel@tonic-gate #if defined(__amd64) 15470Sstevel@tonic-gate int kmem_flags = KMC_NOHASH; 15480Sstevel@tonic-gate #elif defined(__i386) 15490Sstevel@tonic-gate int kmem_flags = KMC_NOHASH | KMC_NODEBUG; 15500Sstevel@tonic-gate #endif 15510Sstevel@tonic-gate 15520Sstevel@tonic-gate /* 15530Sstevel@tonic-gate * initialize kmem caches 15540Sstevel@tonic-gate */ 15550Sstevel@tonic-gate htable_cache = kmem_cache_create("htable_t", 15560Sstevel@tonic-gate sizeof (htable_t), 0, NULL, NULL, 15570Sstevel@tonic-gate htable_reap, NULL, hat_memload_arena, kmem_flags); 15580Sstevel@tonic-gate } 15590Sstevel@tonic-gate 15600Sstevel@tonic-gate /* 15610Sstevel@tonic-gate * get the pte index for the virtual address in the given htable's pagetable 15620Sstevel@tonic-gate */ 15630Sstevel@tonic-gate uint_t 15640Sstevel@tonic-gate htable_va2entry(uintptr_t va, htable_t *ht) 15650Sstevel@tonic-gate { 15660Sstevel@tonic-gate level_t l = ht->ht_level; 15670Sstevel@tonic-gate 15680Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 15690Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 15703446Smrj return ((va >> LEVEL_SHIFT(l)) & (HTABLE_NUM_PTES(ht) - 1)); 15710Sstevel@tonic-gate } 15720Sstevel@tonic-gate 15730Sstevel@tonic-gate /* 15740Sstevel@tonic-gate * Given an htable and the index of a pte in it, return the virtual address 15750Sstevel@tonic-gate * of the page. 15760Sstevel@tonic-gate */ 15770Sstevel@tonic-gate uintptr_t 15780Sstevel@tonic-gate htable_e2va(htable_t *ht, uint_t entry) 15790Sstevel@tonic-gate { 15800Sstevel@tonic-gate level_t l = ht->ht_level; 15810Sstevel@tonic-gate uintptr_t va; 15820Sstevel@tonic-gate 15833446Smrj ASSERT(entry < HTABLE_NUM_PTES(ht)); 15840Sstevel@tonic-gate va = ht->ht_vaddr + ((uintptr_t)entry << LEVEL_SHIFT(l)); 15850Sstevel@tonic-gate 15860Sstevel@tonic-gate /* 15870Sstevel@tonic-gate * Need to skip over any VA hole in top level table 15880Sstevel@tonic-gate */ 15890Sstevel@tonic-gate #if defined(__amd64) 15900Sstevel@tonic-gate if (ht->ht_level == mmu.max_level && va >= mmu.hole_start) 15910Sstevel@tonic-gate va += ((mmu.hole_end - mmu.hole_start) + 1); 15920Sstevel@tonic-gate #endif 15930Sstevel@tonic-gate 15940Sstevel@tonic-gate return (va); 15950Sstevel@tonic-gate } 15960Sstevel@tonic-gate 15970Sstevel@tonic-gate /* 15980Sstevel@tonic-gate * The code uses compare and swap instructions to read/write PTE's to 15990Sstevel@tonic-gate * avoid atomicity problems, since PTEs can be 8 bytes on 32 bit systems. 16000Sstevel@tonic-gate * will naturally be atomic. 16010Sstevel@tonic-gate * 16020Sstevel@tonic-gate * The combination of using kpreempt_disable()/_enable() and the hci_mutex 16030Sstevel@tonic-gate * are used to ensure that an interrupt won't overwrite a temporary mapping 16040Sstevel@tonic-gate * while it's in use. If an interrupt thread tries to access a PTE, it will 16050Sstevel@tonic-gate * yield briefly back to the pinned thread which holds the cpu's hci_mutex. 16060Sstevel@tonic-gate */ 16070Sstevel@tonic-gate void 16083446Smrj x86pte_cpu_init(cpu_t *cpu) 16090Sstevel@tonic-gate { 16100Sstevel@tonic-gate struct hat_cpu_info *hci; 16110Sstevel@tonic-gate 16123446Smrj hci = kmem_zalloc(sizeof (*hci), KM_SLEEP); 16130Sstevel@tonic-gate mutex_init(&hci->hci_mutex, NULL, MUTEX_DEFAULT, NULL); 16140Sstevel@tonic-gate cpu->cpu_hat_info = hci; 16150Sstevel@tonic-gate } 16160Sstevel@tonic-gate 16173446Smrj void 16183446Smrj x86pte_cpu_fini(cpu_t *cpu) 16193446Smrj { 16203446Smrj struct hat_cpu_info *hci = cpu->cpu_hat_info; 16213446Smrj 16223446Smrj kmem_free(hci, sizeof (*hci)); 16233446Smrj cpu->cpu_hat_info = NULL; 16243446Smrj } 16253446Smrj 16263446Smrj #ifdef __i386 16270Sstevel@tonic-gate /* 16283446Smrj * On 32 bit kernels, loading a 64 bit PTE is a little tricky 16290Sstevel@tonic-gate */ 16303446Smrj x86pte_t 16313446Smrj get_pte64(x86pte_t *ptr) 16323446Smrj { 16333446Smrj volatile uint32_t *p = (uint32_t *)ptr; 16343446Smrj x86pte_t t; 16353446Smrj 16363446Smrj ASSERT(mmu.pae_hat != 0); 16373446Smrj for (;;) { 16383446Smrj t = p[0]; 16393446Smrj t |= (uint64_t)p[1] << 32; 16403446Smrj if ((t & 0xffffffff) == p[0]) 16413446Smrj return (t); 16423446Smrj } 16430Sstevel@tonic-gate } 16443446Smrj #endif /* __i386 */ 16450Sstevel@tonic-gate 16460Sstevel@tonic-gate /* 16470Sstevel@tonic-gate * Disable preemption and establish a mapping to the pagetable with the 16480Sstevel@tonic-gate * given pfn. This is optimized for there case where it's the same 16490Sstevel@tonic-gate * pfn as we last used referenced from this CPU. 16500Sstevel@tonic-gate */ 16510Sstevel@tonic-gate static x86pte_t * 16523446Smrj x86pte_access_pagetable(htable_t *ht, uint_t index) 16530Sstevel@tonic-gate { 16540Sstevel@tonic-gate /* 16550Sstevel@tonic-gate * VLP pagetables are contained in the hat_t 16560Sstevel@tonic-gate */ 16570Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 16583446Smrj return (PT_INDEX_PTR(ht->ht_hat->hat_vlp_ptes, index)); 16593446Smrj return (x86pte_mapin(ht->ht_pfn, index, ht)); 16603446Smrj } 16610Sstevel@tonic-gate 16623446Smrj /* 16633446Smrj * map the given pfn into the page table window. 16643446Smrj */ 16653446Smrj /*ARGSUSED*/ 16663446Smrj x86pte_t * 16673446Smrj x86pte_mapin(pfn_t pfn, uint_t index, htable_t *ht) 16683446Smrj { 16693446Smrj x86pte_t *pteptr; 16703446Smrj x86pte_t pte; 16713446Smrj x86pte_t newpte; 16723446Smrj int x; 16733446Smrj 16740Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 16750Sstevel@tonic-gate 16760Sstevel@tonic-gate if (!khat_running) { 16773446Smrj caddr_t va = kbm_remap_window(pfn_to_pa(pfn), 1); 16783446Smrj return (PT_INDEX_PTR(va, index)); 16790Sstevel@tonic-gate } 16800Sstevel@tonic-gate 16810Sstevel@tonic-gate /* 16823446Smrj * If kpm is available, use it. 16833446Smrj */ 16843446Smrj if (kpm_vbase) 16853446Smrj return (PT_INDEX_PTR(hat_kpm_pfn2va(pfn), index)); 16863446Smrj 16873446Smrj /* 16883446Smrj * Disable preemption and grab the CPU's hci_mutex 16890Sstevel@tonic-gate */ 16900Sstevel@tonic-gate kpreempt_disable(); 16913446Smrj ASSERT(CPU->cpu_hat_info != NULL); 16923446Smrj mutex_enter(&CPU->cpu_hat_info->hci_mutex); 16933446Smrj x = PWIN_TABLE(CPU->cpu_id); 16943446Smrj pteptr = (x86pte_t *)PWIN_PTE_VA(x); 16953446Smrj if (mmu.pae_hat) 16963446Smrj pte = *pteptr; 16973446Smrj else 16983446Smrj pte = *(x86pte32_t *)pteptr; 16993446Smrj 17003446Smrj newpte = MAKEPTE(pfn, 0) | mmu.pt_global | mmu.pt_nx; 17013446Smrj newpte |= PT_WRITABLE; 17023446Smrj 17033446Smrj if (!PTE_EQUIV(newpte, pte)) { 17043446Smrj if (mmu.pae_hat) 17053446Smrj *pteptr = newpte; 17063446Smrj else 17073446Smrj *(x86pte32_t *)pteptr = newpte; 17083446Smrj mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 17090Sstevel@tonic-gate } 17103446Smrj return (PT_INDEX_PTR(PWIN_VA(x), index)); 17110Sstevel@tonic-gate } 17120Sstevel@tonic-gate 17130Sstevel@tonic-gate /* 17140Sstevel@tonic-gate * Release access to a page table. 17150Sstevel@tonic-gate */ 17160Sstevel@tonic-gate static void 17170Sstevel@tonic-gate x86pte_release_pagetable(htable_t *ht) 17180Sstevel@tonic-gate { 17190Sstevel@tonic-gate /* 17200Sstevel@tonic-gate * nothing to do for VLP htables 17210Sstevel@tonic-gate */ 17220Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 17230Sstevel@tonic-gate return; 17240Sstevel@tonic-gate 17253446Smrj x86pte_mapout(); 17263446Smrj } 17273446Smrj 17283446Smrj void 17293446Smrj x86pte_mapout(void) 17303446Smrj { 17313446Smrj if (mmu.pwin_base == NULL || !khat_running) 17320Sstevel@tonic-gate return; 17330Sstevel@tonic-gate 17340Sstevel@tonic-gate /* 17353446Smrj * Drop the CPU's hci_mutex and restore preemption. 17360Sstevel@tonic-gate */ 17373446Smrj mutex_exit(&CPU->cpu_hat_info->hci_mutex); 17380Sstevel@tonic-gate kpreempt_enable(); 17390Sstevel@tonic-gate } 17400Sstevel@tonic-gate 17410Sstevel@tonic-gate /* 17420Sstevel@tonic-gate * Atomic retrieval of a pagetable entry 17430Sstevel@tonic-gate */ 17440Sstevel@tonic-gate x86pte_t 17450Sstevel@tonic-gate x86pte_get(htable_t *ht, uint_t entry) 17460Sstevel@tonic-gate { 17470Sstevel@tonic-gate x86pte_t pte; 174847Sjosephb x86pte_t *ptep; 17490Sstevel@tonic-gate 17500Sstevel@tonic-gate /* 175147Sjosephb * Be careful that loading PAE entries in 32 bit kernel is atomic. 17520Sstevel@tonic-gate */ 17533446Smrj ASSERT(entry < mmu.ptes_per_table); 17543446Smrj ptep = x86pte_access_pagetable(ht, entry); 17553446Smrj pte = GET_PTE(ptep); 17560Sstevel@tonic-gate x86pte_release_pagetable(ht); 17570Sstevel@tonic-gate return (pte); 17580Sstevel@tonic-gate } 17590Sstevel@tonic-gate 17600Sstevel@tonic-gate /* 17610Sstevel@tonic-gate * Atomic unconditional set of a page table entry, it returns the previous 17623446Smrj * value. For pre-existing mappings if the PFN changes, then we don't care 17633446Smrj * about the old pte's REF / MOD bits. If the PFN remains the same, we leave 17643446Smrj * the MOD/REF bits unchanged. 17653446Smrj * 17663446Smrj * If asked to overwrite a link to a lower page table with a large page 17673446Smrj * mapping, this routine returns the special value of LPAGE_ERROR. This 17683446Smrj * allows the upper HAT layers to retry with a smaller mapping size. 17690Sstevel@tonic-gate */ 17700Sstevel@tonic-gate x86pte_t 17710Sstevel@tonic-gate x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr) 17720Sstevel@tonic-gate { 17730Sstevel@tonic-gate x86pte_t old; 17743446Smrj x86pte_t prev; 17750Sstevel@tonic-gate x86pte_t *ptep; 17763446Smrj level_t l = ht->ht_level; 17773446Smrj x86pte_t pfn_mask = (l != 0) ? PT_PADDR_LGPG : PT_PADDR; 17783446Smrj x86pte_t n; 17793446Smrj uintptr_t addr = htable_e2va(ht, entry); 17803446Smrj hat_t *hat = ht->ht_hat; 17810Sstevel@tonic-gate 17823446Smrj ASSERT(new != 0); /* don't use to invalidate a PTE, see x86pte_update */ 17830Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 17843446Smrj if (ptr == NULL) 17853446Smrj ptep = x86pte_access_pagetable(ht, entry); 17863446Smrj else 17870Sstevel@tonic-gate ptep = ptr; 17883446Smrj 17893446Smrj /* 17903446Smrj * Install the new PTE. If remapping the same PFN, then 17913446Smrj * copy existing REF/MOD bits to new mapping. 17923446Smrj */ 17933446Smrj do { 17943446Smrj prev = GET_PTE(ptep); 17953446Smrj n = new; 17963446Smrj if (PTE_ISVALID(n) && (prev & pfn_mask) == (new & pfn_mask)) 17973446Smrj n |= prev & (PT_REF | PT_MOD); 17980Sstevel@tonic-gate 17993446Smrj /* 18003446Smrj * Another thread may have installed this mapping already, 18013446Smrj * flush the local TLB and be done. 18023446Smrj */ 18033446Smrj if (prev == n) { 18043446Smrj old = new; 18053446Smrj mmu_tlbflush_entry((caddr_t)addr); 18063446Smrj goto done; 18070Sstevel@tonic-gate } 18083446Smrj 18093446Smrj /* 18103446Smrj * Detect if we have a collision of installing a large 18113446Smrj * page mapping where there already is a lower page table. 18123446Smrj */ 18133543Sjosephb if (l > 0 && (prev & PT_VALID) && !(prev & PT_PAGESIZE)) { 18143543Sjosephb old = LPAGE_ERROR; 18153543Sjosephb goto done; 18163543Sjosephb } 18173446Smrj 18183446Smrj old = CAS_PTE(ptep, prev, n); 18193446Smrj } while (old != prev); 18203446Smrj 18213446Smrj /* 18223446Smrj * Do a TLB demap if needed, ie. the old pte was valid. 18233446Smrj * 18243446Smrj * Note that a stale TLB writeback to the PTE here either can't happen 18253446Smrj * or doesn't matter. The PFN can only change for NOSYNC|NOCONSIST 18263446Smrj * mappings, but they were created with REF and MOD already set, so 18273446Smrj * no stale writeback will happen. 18283446Smrj * 18293446Smrj * Segmap is the only place where remaps happen on the same pfn and for 18303446Smrj * that we want to preserve the stale REF/MOD bits. 18313446Smrj */ 18323446Smrj if (old & PT_REF) 18333446Smrj hat_tlb_inval(hat, addr); 18343446Smrj 18353446Smrj done: 18360Sstevel@tonic-gate if (ptr == NULL) 18370Sstevel@tonic-gate x86pte_release_pagetable(ht); 18380Sstevel@tonic-gate return (old); 18390Sstevel@tonic-gate } 18400Sstevel@tonic-gate 18410Sstevel@tonic-gate /* 18423446Smrj * Atomic compare and swap of a page table entry. No TLB invalidates are done. 18433446Smrj * This is used for links between pagetables of different levels. 18443446Smrj * Note we always create these links with dirty/access set, so they should 18453446Smrj * never change. 18460Sstevel@tonic-gate */ 18473446Smrj x86pte_t 18480Sstevel@tonic-gate x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, x86pte_t new) 18490Sstevel@tonic-gate { 18500Sstevel@tonic-gate x86pte_t pte; 18510Sstevel@tonic-gate x86pte_t *ptep; 18520Sstevel@tonic-gate 18533446Smrj ptep = x86pte_access_pagetable(ht, entry); 18543446Smrj pte = CAS_PTE(ptep, old, new); 18550Sstevel@tonic-gate x86pte_release_pagetable(ht); 18560Sstevel@tonic-gate return (pte); 18570Sstevel@tonic-gate } 18580Sstevel@tonic-gate 18590Sstevel@tonic-gate /* 18603543Sjosephb * data structure for cross call information 18610Sstevel@tonic-gate */ 18623543Sjosephb typedef struct xcall_inval { 18633543Sjosephb caddr_t xi_addr; 18643543Sjosephb x86pte_t xi_found; 18653543Sjosephb x86pte_t xi_oldpte; 18663543Sjosephb x86pte_t *xi_pteptr; 18673543Sjosephb processorid_t xi_initiator; 18683543Sjosephb } xcall_inval_t; 18693446Smrj 18703543Sjosephb /* 18713543Sjosephb * Cross call service routine to invalidate TLBs. On the 18723543Sjosephb * initiating CPU, this first clears the PTE in memory. 18733543Sjosephb */ 18743543Sjosephb /*ARGSUSED*/ 18753543Sjosephb static int 18763543Sjosephb x86pte_inval_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3) 18773543Sjosephb { 18783543Sjosephb xcall_inval_t *xi = (xcall_inval_t *)a1; 18793446Smrj 18803543Sjosephb if (CPU->cpu_id == xi->xi_initiator) 18813543Sjosephb xi->xi_found = CAS_PTE(xi->xi_pteptr, xi->xi_oldpte, 0); 18823446Smrj 18833543Sjosephb mmu_tlbflush_entry(xi->xi_addr); 18843543Sjosephb return (0); 18850Sstevel@tonic-gate } 18860Sstevel@tonic-gate 18870Sstevel@tonic-gate /* 18883446Smrj * Invalidate a page table entry as long as it currently maps something that 18893446Smrj * matches the value determined by expect. 18903446Smrj * 18913446Smrj * Also invalidates any TLB entries and returns the previous value of the PTE. 18920Sstevel@tonic-gate */ 18933446Smrj x86pte_t 18943446Smrj x86pte_inval( 18953446Smrj htable_t *ht, 18963446Smrj uint_t entry, 18973446Smrj x86pte_t expect, 18983446Smrj x86pte_t *pte_ptr) 18990Sstevel@tonic-gate { 19003543Sjosephb hat_t *hat = ht->ht_hat; 19013446Smrj x86pte_t *ptep; 19023543Sjosephb xcall_inval_t xi; 19033543Sjosephb cpuset_t cpus; 19040Sstevel@tonic-gate 19053446Smrj ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 19063446Smrj ASSERT(ht->ht_level != VLP_LEVEL); 19073543Sjosephb 19083446Smrj if (pte_ptr != NULL) 19093446Smrj ptep = pte_ptr; 19103446Smrj else 19113446Smrj ptep = x86pte_access_pagetable(ht, entry); 19123543Sjosephb xi.xi_pteptr = ptep; 19133543Sjosephb xi.xi_addr = (caddr_t)htable_e2va(ht, entry); 19140Sstevel@tonic-gate 19150Sstevel@tonic-gate /* 19163543Sjosephb * Setup a cross call to any CPUs using this HAT 19173543Sjosephb */ 19183543Sjosephb kpreempt_disable(); 19193543Sjosephb xi.xi_initiator = CPU->cpu_id; 19203543Sjosephb CPUSET_ZERO(cpus); 19213543Sjosephb if (hat == kas.a_hat) { 19223543Sjosephb CPUSET_OR(cpus, khat_cpuset); 19233543Sjosephb } else { 19243543Sjosephb mutex_enter(&hat->hat_switch_mutex); 19253543Sjosephb CPUSET_OR(cpus, hat->hat_cpus); 19263543Sjosephb CPUSET_ADD(cpus, CPU->cpu_id); 19273543Sjosephb } 19283543Sjosephb 19293543Sjosephb /* 19303543Sjosephb * Do the cross call to invalidate the PTE and flush TLBs. 19313543Sjosephb * Note that the loop is needed to handle changes due to h/w updating 19323543Sjosephb * of PT_MOD/PT_REF. 19330Sstevel@tonic-gate */ 19343446Smrj do { 19353543Sjosephb xi.xi_oldpte = GET_PTE(ptep); 19363543Sjosephb if (expect != 0 && 19373543Sjosephb (xi.xi_oldpte & PT_PADDR) != (expect & PT_PADDR)) 19383543Sjosephb break; 19393543Sjosephb if (panicstr == NULL) 19403543Sjosephb xc_wait_sync((xc_arg_t)&xi, NULL, NULL, X_CALL_HIPRI, 19413543Sjosephb cpus, x86pte_inval_func); 19423543Sjosephb else 19433543Sjosephb (void) x86pte_inval_func((xc_arg_t)&xi, NULL, NULL); 19443543Sjosephb } while (xi.xi_found != xi.xi_oldpte); 19450Sstevel@tonic-gate 19463543Sjosephb if (hat != kas.a_hat) 19473543Sjosephb mutex_exit(&hat->hat_switch_mutex); 19483543Sjosephb kpreempt_enable(); 19493543Sjosephb 19503446Smrj if (pte_ptr == NULL) 19513446Smrj x86pte_release_pagetable(ht); 19523543Sjosephb 19533543Sjosephb return (xi.xi_oldpte); 19540Sstevel@tonic-gate } 19550Sstevel@tonic-gate 19560Sstevel@tonic-gate /* 19573446Smrj * Change a page table entry af it currently matches the value in expect. 19580Sstevel@tonic-gate */ 19590Sstevel@tonic-gate x86pte_t 19603446Smrj x86pte_update( 19613446Smrj htable_t *ht, 19623446Smrj uint_t entry, 19633446Smrj x86pte_t expect, 19643446Smrj x86pte_t new) 19650Sstevel@tonic-gate { 19660Sstevel@tonic-gate x86pte_t *ptep; 19673446Smrj x86pte_t found; 19680Sstevel@tonic-gate 19693446Smrj ASSERT(new != 0); 19703446Smrj ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 19713446Smrj ASSERT(ht->ht_level != VLP_LEVEL); 19720Sstevel@tonic-gate 19733446Smrj ptep = x86pte_access_pagetable(ht, entry); 19743446Smrj found = CAS_PTE(ptep, expect, new); 19753446Smrj if (found == expect) { 19763446Smrj hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 19770Sstevel@tonic-gate 19783446Smrj /* 19793446Smrj * When removing write permission *and* clearing the 19803446Smrj * MOD bit, check if a write happened via a stale 19813446Smrj * TLB entry before the TLB shootdown finished. 19823446Smrj * 19833446Smrj * If it did happen, simply re-enable write permission and 19843446Smrj * act like the original CAS failed. 19853446Smrj */ 19863446Smrj if ((expect & (PT_WRITABLE | PT_MOD)) == PT_WRITABLE && 19873446Smrj (new & (PT_WRITABLE | PT_MOD)) == 0 && 19883446Smrj (GET_PTE(ptep) & PT_MOD) != 0) { 19893446Smrj do { 19903446Smrj found = GET_PTE(ptep); 19913446Smrj found = 19923446Smrj CAS_PTE(ptep, found, found | PT_WRITABLE); 19933446Smrj } while ((found & PT_WRITABLE) == 0); 19943446Smrj } 19953446Smrj } 19960Sstevel@tonic-gate x86pte_release_pagetable(ht); 19973446Smrj return (found); 19980Sstevel@tonic-gate } 19990Sstevel@tonic-gate 20000Sstevel@tonic-gate /* 20010Sstevel@tonic-gate * Copy page tables - this is just a little more complicated than the 20020Sstevel@tonic-gate * previous routines. Note that it's also not atomic! It also is never 20030Sstevel@tonic-gate * used for VLP pagetables. 20040Sstevel@tonic-gate */ 20050Sstevel@tonic-gate void 20060Sstevel@tonic-gate x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 20070Sstevel@tonic-gate { 20080Sstevel@tonic-gate caddr_t src_va; 20090Sstevel@tonic-gate caddr_t dst_va; 20100Sstevel@tonic-gate size_t size; 20113446Smrj x86pte_t *pteptr; 20123446Smrj x86pte_t pte; 20130Sstevel@tonic-gate 20140Sstevel@tonic-gate ASSERT(khat_running); 20150Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 20160Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_VLP)); 20170Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN)); 20180Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 20190Sstevel@tonic-gate 20200Sstevel@tonic-gate /* 20213446Smrj * Acquire access to the CPU pagetable windows for the dest and source. 20220Sstevel@tonic-gate */ 20233446Smrj dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 20243446Smrj if (kpm_vbase) { 20253446Smrj src_va = (caddr_t) 20263446Smrj PT_INDEX_PTR(hat_kpm_pfn2va(src->ht_pfn), entry); 20270Sstevel@tonic-gate } else { 20283446Smrj uint_t x = PWIN_SRC(CPU->cpu_id); 20290Sstevel@tonic-gate 20300Sstevel@tonic-gate /* 20310Sstevel@tonic-gate * Finish defining the src pagetable mapping 20320Sstevel@tonic-gate */ 20333446Smrj src_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 20343446Smrj pte = MAKEPTE(src->ht_pfn, 0) | mmu.pt_global | mmu.pt_nx; 20353446Smrj pteptr = (x86pte_t *)PWIN_PTE_VA(x); 20363446Smrj if (mmu.pae_hat) 20373446Smrj *pteptr = pte; 20383446Smrj else 20393446Smrj *(x86pte32_t *)pteptr = pte; 20403446Smrj mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 20410Sstevel@tonic-gate } 20420Sstevel@tonic-gate 20430Sstevel@tonic-gate /* 20440Sstevel@tonic-gate * now do the copy 20450Sstevel@tonic-gate */ 20460Sstevel@tonic-gate size = count << mmu.pte_size_shift; 20470Sstevel@tonic-gate bcopy(src_va, dst_va, size); 20480Sstevel@tonic-gate 20490Sstevel@tonic-gate x86pte_release_pagetable(dest); 20500Sstevel@tonic-gate } 20510Sstevel@tonic-gate 20520Sstevel@tonic-gate /* 20530Sstevel@tonic-gate * Zero page table entries - Note this doesn't use atomic stores! 20540Sstevel@tonic-gate */ 20553446Smrj static void 20560Sstevel@tonic-gate x86pte_zero(htable_t *dest, uint_t entry, uint_t count) 20570Sstevel@tonic-gate { 20580Sstevel@tonic-gate caddr_t dst_va; 20590Sstevel@tonic-gate size_t size; 20600Sstevel@tonic-gate 20610Sstevel@tonic-gate /* 20620Sstevel@tonic-gate * Map in the page table to be zeroed. 20630Sstevel@tonic-gate */ 20640Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 20650Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 20663446Smrj 20673446Smrj dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 20683446Smrj 20690Sstevel@tonic-gate size = count << mmu.pte_size_shift; 20703446Smrj ASSERT(size > BLOCKZEROALIGN); 20713446Smrj #ifdef __i386 20723446Smrj if ((x86_feature & X86_SSE2) == 0) 20730Sstevel@tonic-gate bzero(dst_va, size); 20743446Smrj else 20753446Smrj #endif 20763446Smrj block_zero_no_xmm(dst_va, size); 20773446Smrj 20780Sstevel@tonic-gate x86pte_release_pagetable(dest); 20790Sstevel@tonic-gate } 20800Sstevel@tonic-gate 20810Sstevel@tonic-gate /* 20820Sstevel@tonic-gate * Called to ensure that all pagetables are in the system dump 20830Sstevel@tonic-gate */ 20840Sstevel@tonic-gate void 20850Sstevel@tonic-gate hat_dump(void) 20860Sstevel@tonic-gate { 20870Sstevel@tonic-gate hat_t *hat; 20880Sstevel@tonic-gate uint_t h; 20890Sstevel@tonic-gate htable_t *ht; 20900Sstevel@tonic-gate 20910Sstevel@tonic-gate /* 20921747Sjosephb * Dump all page tables 20930Sstevel@tonic-gate */ 20941747Sjosephb for (hat = kas.a_hat; hat != NULL; hat = hat->hat_next) { 20950Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 20960Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 20971747Sjosephb if ((ht->ht_flags & HTABLE_VLP) == 0) 20980Sstevel@tonic-gate dump_page(ht->ht_pfn); 20990Sstevel@tonic-gate } 21000Sstevel@tonic-gate } 21010Sstevel@tonic-gate } 21020Sstevel@tonic-gate } 2103