10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 50Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 60Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 70Sstevel@tonic-gate * with the License. 80Sstevel@tonic-gate * 90Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 100Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 110Sstevel@tonic-gate * See the License for the specific language governing permissions 120Sstevel@tonic-gate * and limitations under the License. 130Sstevel@tonic-gate * 140Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 150Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 160Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 170Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 180Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 190Sstevel@tonic-gate * 200Sstevel@tonic-gate * CDDL HEADER END 210Sstevel@tonic-gate */ 220Sstevel@tonic-gate /* 23*1251Skchow * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate #include <sys/types.h> 300Sstevel@tonic-gate #include <sys/sysmacros.h> 310Sstevel@tonic-gate #include <sys/kmem.h> 320Sstevel@tonic-gate #include <sys/atomic.h> 330Sstevel@tonic-gate #include <sys/bitmap.h> 340Sstevel@tonic-gate #include <sys/machparam.h> 350Sstevel@tonic-gate #include <sys/machsystm.h> 360Sstevel@tonic-gate #include <sys/mman.h> 370Sstevel@tonic-gate #include <sys/systm.h> 380Sstevel@tonic-gate #include <sys/cpuvar.h> 390Sstevel@tonic-gate #include <sys/thread.h> 400Sstevel@tonic-gate #include <sys/proc.h> 410Sstevel@tonic-gate #include <sys/cpu.h> 420Sstevel@tonic-gate #include <sys/kmem.h> 430Sstevel@tonic-gate #include <sys/disp.h> 440Sstevel@tonic-gate #include <sys/vmem.h> 450Sstevel@tonic-gate #include <sys/vmsystm.h> 460Sstevel@tonic-gate #include <sys/promif.h> 470Sstevel@tonic-gate #include <sys/var.h> 480Sstevel@tonic-gate #include <sys/x86_archext.h> 490Sstevel@tonic-gate #include <sys/bootconf.h> 500Sstevel@tonic-gate #include <sys/dumphdr.h> 510Sstevel@tonic-gate #include <vm/seg_kmem.h> 520Sstevel@tonic-gate #include <vm/seg_kpm.h> 530Sstevel@tonic-gate #include <vm/hat.h> 540Sstevel@tonic-gate #include <vm/hat_i86.h> 550Sstevel@tonic-gate #include <sys/cmn_err.h> 560Sstevel@tonic-gate 570Sstevel@tonic-gate kmem_cache_t *htable_cache; 580Sstevel@tonic-gate extern cpuset_t khat_cpuset; 590Sstevel@tonic-gate 600Sstevel@tonic-gate /* 610Sstevel@tonic-gate * The variable htable_reserve_amount, rather than HTABLE_RESERVE_AMOUNT, 620Sstevel@tonic-gate * is used in order to facilitate testing of the htable_steal() code. 630Sstevel@tonic-gate * By resetting htable_reserve_amount to a lower value, we can force 640Sstevel@tonic-gate * stealing to occur. The reserve amount is a guess to get us through boot. 650Sstevel@tonic-gate */ 660Sstevel@tonic-gate #define HTABLE_RESERVE_AMOUNT (200) 670Sstevel@tonic-gate uint_t htable_reserve_amount = HTABLE_RESERVE_AMOUNT; 680Sstevel@tonic-gate kmutex_t htable_reserve_mutex; 690Sstevel@tonic-gate uint_t htable_reserve_cnt; 700Sstevel@tonic-gate htable_t *htable_reserve_pool; 710Sstevel@tonic-gate 720Sstevel@tonic-gate /* 730Sstevel@tonic-gate * This variable is so that we can tune this via /etc/system 740Sstevel@tonic-gate */ 750Sstevel@tonic-gate uint_t htable_steal_passes = 10; 760Sstevel@tonic-gate 770Sstevel@tonic-gate /* 780Sstevel@tonic-gate * mutex stuff for access to htable hash 790Sstevel@tonic-gate */ 800Sstevel@tonic-gate #define NUM_HTABLE_MUTEX 128 810Sstevel@tonic-gate kmutex_t htable_mutex[NUM_HTABLE_MUTEX]; 820Sstevel@tonic-gate #define HTABLE_MUTEX_HASH(h) ((h) & (NUM_HTABLE_MUTEX - 1)) 830Sstevel@tonic-gate 840Sstevel@tonic-gate #define HTABLE_ENTER(h) mutex_enter(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 850Sstevel@tonic-gate #define HTABLE_EXIT(h) mutex_exit(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 860Sstevel@tonic-gate 870Sstevel@tonic-gate /* 880Sstevel@tonic-gate * forward declarations 890Sstevel@tonic-gate */ 900Sstevel@tonic-gate static void link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr); 910Sstevel@tonic-gate static void unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr); 920Sstevel@tonic-gate static void htable_free(htable_t *ht); 930Sstevel@tonic-gate static x86pte_t *x86pte_access_pagetable(htable_t *ht); 940Sstevel@tonic-gate static void x86pte_release_pagetable(htable_t *ht); 950Sstevel@tonic-gate static x86pte_t x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, 960Sstevel@tonic-gate x86pte_t new); 970Sstevel@tonic-gate 980Sstevel@tonic-gate /* 990Sstevel@tonic-gate * Address used for kernel page tables. See ptable_alloc() below. 1000Sstevel@tonic-gate */ 1010Sstevel@tonic-gate uintptr_t ptable_va = 0; 1020Sstevel@tonic-gate size_t ptable_sz = 2 * MMU_PAGESIZE; 1030Sstevel@tonic-gate 1040Sstevel@tonic-gate /* 1050Sstevel@tonic-gate * A counter to track if we are stealing or reaping htables. When non-zero 1060Sstevel@tonic-gate * htable_free() will directly free htables (either to the reserve or kmem) 1070Sstevel@tonic-gate * instead of putting them in a hat's htable cache. 1080Sstevel@tonic-gate */ 1090Sstevel@tonic-gate uint32_t htable_dont_cache = 0; 1100Sstevel@tonic-gate 1110Sstevel@tonic-gate /* 1120Sstevel@tonic-gate * Track the number of active pagetables, so we can know how many to reap 1130Sstevel@tonic-gate */ 1140Sstevel@tonic-gate static uint32_t active_ptables = 0; 1150Sstevel@tonic-gate 1160Sstevel@tonic-gate /* 1170Sstevel@tonic-gate * Allocate a memory page for a hardware page table. 1180Sstevel@tonic-gate * 1190Sstevel@tonic-gate * The pages allocated for page tables are currently gotten in a hacked up 1200Sstevel@tonic-gate * way. It works for now, but really needs to be fixed up a bit. 1210Sstevel@tonic-gate * 1220Sstevel@tonic-gate * During boot: The boot loader controls physical memory allocation via 1230Sstevel@tonic-gate * boot_alloc(). To avoid conflict with vmem, we just do boot_alloc()s with 1240Sstevel@tonic-gate * addresses less than kernelbase. These addresses are ignored when we take 1250Sstevel@tonic-gate * over mappings from the boot loader. 1260Sstevel@tonic-gate * 1270Sstevel@tonic-gate * Post-boot: we currently use page_create_va() on the kvp with fake offsets, 1280Sstevel@tonic-gate * segments and virt address. This is pretty bogus, but was copied from the 1290Sstevel@tonic-gate * old hat_i86.c code. A better approach would be to have a custom 1300Sstevel@tonic-gate * page_get_physical() interface that can specify either mnode random or 1310Sstevel@tonic-gate * mnode local and takes a page from whatever color has the MOST available - 1320Sstevel@tonic-gate * this would have a minimal impact on page coloring. 1330Sstevel@tonic-gate * 1340Sstevel@tonic-gate * For now the htable pointer in ht is only used to compute a unique vnode 1350Sstevel@tonic-gate * offset for the page. 1360Sstevel@tonic-gate */ 1370Sstevel@tonic-gate static void 1380Sstevel@tonic-gate ptable_alloc(htable_t *ht) 1390Sstevel@tonic-gate { 1400Sstevel@tonic-gate pfn_t pfn; 1410Sstevel@tonic-gate page_t *pp; 1420Sstevel@tonic-gate u_offset_t offset; 1430Sstevel@tonic-gate static struct seg tmpseg; 1440Sstevel@tonic-gate static int first_time = 1; 1450Sstevel@tonic-gate 1460Sstevel@tonic-gate /* 1470Sstevel@tonic-gate * Allocating the associated hardware page table is very different 1480Sstevel@tonic-gate * before boot has finished. We get a physical page to from boot 1490Sstevel@tonic-gate * w/o eating up any kernel address space. 1500Sstevel@tonic-gate */ 1510Sstevel@tonic-gate ht->ht_pfn = PFN_INVALID; 1520Sstevel@tonic-gate HATSTAT_INC(hs_ptable_allocs); 1530Sstevel@tonic-gate atomic_add_32(&active_ptables, 1); 1540Sstevel@tonic-gate 1550Sstevel@tonic-gate if (use_boot_reserve) { 1560Sstevel@tonic-gate ASSERT(ptable_va != 0); 1570Sstevel@tonic-gate 1580Sstevel@tonic-gate /* 1590Sstevel@tonic-gate * Allocate, then demap the ptable_va, so that we're 1600Sstevel@tonic-gate * sure there exist page table entries for the addresses 1610Sstevel@tonic-gate */ 1620Sstevel@tonic-gate if (first_time) { 1630Sstevel@tonic-gate first_time = 0; 1640Sstevel@tonic-gate if ((uintptr_t)BOP_ALLOC(bootops, (caddr_t)ptable_va, 1650Sstevel@tonic-gate ptable_sz, BO_NO_ALIGN) != ptable_va) 1660Sstevel@tonic-gate panic("BOP_ALLOC failed"); 1670Sstevel@tonic-gate 1680Sstevel@tonic-gate hat_boot_demap(ptable_va); 1690Sstevel@tonic-gate hat_boot_demap(ptable_va + MMU_PAGESIZE); 1700Sstevel@tonic-gate } 1710Sstevel@tonic-gate 1720Sstevel@tonic-gate pfn = ((uintptr_t)BOP_EALLOC(bootops, 0, MMU_PAGESIZE, 1730Sstevel@tonic-gate BO_NO_ALIGN, BOPF_X86_ALLOC_PHYS)) >> MMU_PAGESHIFT; 1740Sstevel@tonic-gate if (page_resv(1, KM_NOSLEEP) == 0) 1750Sstevel@tonic-gate panic("page_resv() failed in ptable alloc"); 1760Sstevel@tonic-gate 1770Sstevel@tonic-gate pp = page_numtopp_nolock(pfn); 1780Sstevel@tonic-gate ASSERT(pp != NULL); 1790Sstevel@tonic-gate if (pp->p_szc != 0) 1800Sstevel@tonic-gate page_boot_demote(pp); 1810Sstevel@tonic-gate pp = page_numtopp(pfn, SE_EXCL); 1820Sstevel@tonic-gate ASSERT(pp != NULL); 1830Sstevel@tonic-gate 1840Sstevel@tonic-gate } else { 1850Sstevel@tonic-gate /* 1860Sstevel@tonic-gate * Post boot get a page for the table. 1870Sstevel@tonic-gate * 1880Sstevel@tonic-gate * The first check is to see if there is memory in 1890Sstevel@tonic-gate * the system. If we drop to throttlefree, then fail 1900Sstevel@tonic-gate * the ptable_alloc() and let the stealing code kick in. 1910Sstevel@tonic-gate * Note that we have to do this test here, since the test in 1920Sstevel@tonic-gate * page_create_throttle() would let the NOSLEEP allocation 1930Sstevel@tonic-gate * go through and deplete the page reserves. 1940Sstevel@tonic-gate */ 1950Sstevel@tonic-gate if (freemem <= throttlefree + 1) 1960Sstevel@tonic-gate return; 1970Sstevel@tonic-gate 1980Sstevel@tonic-gate /* 1990Sstevel@tonic-gate * This code is temporary, so don't review too critically. 2000Sstevel@tonic-gate * I'm awaiting a new phys page allocator from Kit -- Joe 2010Sstevel@tonic-gate * 2020Sstevel@tonic-gate * We need assign an offset for the page to call 2030Sstevel@tonic-gate * page_create_va. To avoid conflicts with other pages, 2040Sstevel@tonic-gate * we get creative with the offset. 2050Sstevel@tonic-gate * for 32 bits, we pic an offset > 4Gig 2060Sstevel@tonic-gate * for 64 bits, pic an offset somewhere in the VA hole. 2070Sstevel@tonic-gate */ 2080Sstevel@tonic-gate offset = (uintptr_t)ht - kernelbase; 2090Sstevel@tonic-gate offset <<= MMU_PAGESHIFT; 2100Sstevel@tonic-gate #if defined(__amd64) 2110Sstevel@tonic-gate offset += mmu.hole_start; /* something in VA hole */ 2120Sstevel@tonic-gate #else 2130Sstevel@tonic-gate offset += 1ULL << 40; /* something > 4 Gig */ 2140Sstevel@tonic-gate #endif 2150Sstevel@tonic-gate 2160Sstevel@tonic-gate if (page_resv(1, KM_NOSLEEP) == 0) 2170Sstevel@tonic-gate return; 2180Sstevel@tonic-gate 2190Sstevel@tonic-gate #ifdef DEBUG 2200Sstevel@tonic-gate pp = page_exists(&kvp, offset); 2210Sstevel@tonic-gate if (pp != NULL) 2220Sstevel@tonic-gate panic("ptable already exists %p", pp); 2230Sstevel@tonic-gate #endif 2240Sstevel@tonic-gate pp = page_create_va(&kvp, offset, MMU_PAGESIZE, 2250Sstevel@tonic-gate PG_EXCL | PG_NORELOC, &tmpseg, 2260Sstevel@tonic-gate (void *)((uintptr_t)ht << MMU_PAGESHIFT)); 2270Sstevel@tonic-gate if (pp == NULL) 2280Sstevel@tonic-gate return; 2290Sstevel@tonic-gate page_io_unlock(pp); 2300Sstevel@tonic-gate page_hashout(pp, NULL); 2310Sstevel@tonic-gate pfn = pp->p_pagenum; 2320Sstevel@tonic-gate } 2330Sstevel@tonic-gate page_downgrade(pp); 2340Sstevel@tonic-gate ASSERT(PAGE_SHARED(pp)); 2350Sstevel@tonic-gate 2360Sstevel@tonic-gate if (pfn == PFN_INVALID) 2370Sstevel@tonic-gate panic("ptable_alloc(): Invalid PFN!!"); 2380Sstevel@tonic-gate ht->ht_pfn = pfn; 2390Sstevel@tonic-gate } 2400Sstevel@tonic-gate 2410Sstevel@tonic-gate /* 2420Sstevel@tonic-gate * Free an htable's associated page table page. See the comments 2430Sstevel@tonic-gate * for ptable_alloc(). 2440Sstevel@tonic-gate */ 2450Sstevel@tonic-gate static void 2460Sstevel@tonic-gate ptable_free(htable_t *ht) 2470Sstevel@tonic-gate { 2480Sstevel@tonic-gate pfn_t pfn = ht->ht_pfn; 2490Sstevel@tonic-gate page_t *pp; 2500Sstevel@tonic-gate 2510Sstevel@tonic-gate /* 2520Sstevel@tonic-gate * need to destroy the page used for the pagetable 2530Sstevel@tonic-gate */ 2540Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 2550Sstevel@tonic-gate HATSTAT_INC(hs_ptable_frees); 2560Sstevel@tonic-gate atomic_add_32(&active_ptables, -1); 2570Sstevel@tonic-gate pp = page_numtopp_nolock(pfn); 2580Sstevel@tonic-gate if (pp == NULL) 2590Sstevel@tonic-gate panic("ptable_free(): no page for pfn!"); 2600Sstevel@tonic-gate ASSERT(PAGE_SHARED(pp)); 2610Sstevel@tonic-gate ASSERT(pfn == pp->p_pagenum); 2620Sstevel@tonic-gate 2630Sstevel@tonic-gate /* 2640Sstevel@tonic-gate * Get an exclusive lock, might have to wait for a kmem reader. 2650Sstevel@tonic-gate */ 2660Sstevel@tonic-gate if (!page_tryupgrade(pp)) { 2670Sstevel@tonic-gate page_unlock(pp); 2680Sstevel@tonic-gate /* 2690Sstevel@tonic-gate * RFE: we could change this to not loop forever 2700Sstevel@tonic-gate * George Cameron had some idea on how to do that. 2710Sstevel@tonic-gate * For now looping works - it's just like sfmmu. 2720Sstevel@tonic-gate */ 2730Sstevel@tonic-gate while (!page_lock(pp, SE_EXCL, (kmutex_t *)NULL, P_RECLAIM)) 2740Sstevel@tonic-gate continue; 2750Sstevel@tonic-gate } 2760Sstevel@tonic-gate page_free(pp, 1); 2770Sstevel@tonic-gate page_unresv(1); 2780Sstevel@tonic-gate ht->ht_pfn = PFN_INVALID; 2790Sstevel@tonic-gate } 2800Sstevel@tonic-gate 2810Sstevel@tonic-gate /* 2820Sstevel@tonic-gate * Put one htable on the reserve list. 2830Sstevel@tonic-gate */ 2840Sstevel@tonic-gate static void 2850Sstevel@tonic-gate htable_put_reserve(htable_t *ht) 2860Sstevel@tonic-gate { 2870Sstevel@tonic-gate ht->ht_hat = NULL; /* no longer tied to a hat */ 2880Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 2890Sstevel@tonic-gate HATSTAT_INC(hs_htable_rputs); 2900Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 2910Sstevel@tonic-gate ht->ht_next = htable_reserve_pool; 2920Sstevel@tonic-gate htable_reserve_pool = ht; 2930Sstevel@tonic-gate ++htable_reserve_cnt; 2940Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 2950Sstevel@tonic-gate } 2960Sstevel@tonic-gate 2970Sstevel@tonic-gate /* 2980Sstevel@tonic-gate * Take one htable from the reserve. 2990Sstevel@tonic-gate */ 3000Sstevel@tonic-gate static htable_t * 3010Sstevel@tonic-gate htable_get_reserve(void) 3020Sstevel@tonic-gate { 3030Sstevel@tonic-gate htable_t *ht = NULL; 3040Sstevel@tonic-gate 3050Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 3060Sstevel@tonic-gate if (htable_reserve_cnt != 0) { 3070Sstevel@tonic-gate ht = htable_reserve_pool; 3080Sstevel@tonic-gate ASSERT(ht != NULL); 3090Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 3100Sstevel@tonic-gate htable_reserve_pool = ht->ht_next; 3110Sstevel@tonic-gate --htable_reserve_cnt; 3120Sstevel@tonic-gate HATSTAT_INC(hs_htable_rgets); 3130Sstevel@tonic-gate } 3140Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 3150Sstevel@tonic-gate return (ht); 3160Sstevel@tonic-gate } 3170Sstevel@tonic-gate 3180Sstevel@tonic-gate /* 3190Sstevel@tonic-gate * Allocate initial htables with page tables and put them on the kernel hat's 3200Sstevel@tonic-gate * cache list. 3210Sstevel@tonic-gate */ 3220Sstevel@tonic-gate void 3230Sstevel@tonic-gate htable_initial_reserve(uint_t count) 3240Sstevel@tonic-gate { 3250Sstevel@tonic-gate htable_t *ht; 3260Sstevel@tonic-gate hat_t *hat = kas.a_hat; 3270Sstevel@tonic-gate 3280Sstevel@tonic-gate count += HTABLE_RESERVE_AMOUNT; 3290Sstevel@tonic-gate while (count > 0) { 3300Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, KM_NOSLEEP); 3310Sstevel@tonic-gate ASSERT(ht != NULL); 3320Sstevel@tonic-gate 3330Sstevel@tonic-gate ASSERT(use_boot_reserve); 3340Sstevel@tonic-gate ht->ht_hat = kas.a_hat; /* so htable_free() works */ 3350Sstevel@tonic-gate ht->ht_flags = 0; /* so x86pte_zero works */ 3360Sstevel@tonic-gate ptable_alloc(ht); 3370Sstevel@tonic-gate if (ht->ht_pfn == PFN_INVALID) 3380Sstevel@tonic-gate panic("ptable_alloc() failed"); 3390Sstevel@tonic-gate 3400Sstevel@tonic-gate x86pte_zero(ht, 0, mmu.ptes_per_table); 3410Sstevel@tonic-gate 3420Sstevel@tonic-gate ht->ht_next = hat->hat_ht_cached; 3430Sstevel@tonic-gate hat->hat_ht_cached = ht; 3440Sstevel@tonic-gate --count; 3450Sstevel@tonic-gate } 3460Sstevel@tonic-gate } 3470Sstevel@tonic-gate 3480Sstevel@tonic-gate /* 3490Sstevel@tonic-gate * Readjust the reserves after a thread finishes using them. 3500Sstevel@tonic-gate * 3510Sstevel@tonic-gate * The first time this is called post boot, we'll also clear out the 3520Sstevel@tonic-gate * extra boot htables that were put in the kernel hat's cache list. 3530Sstevel@tonic-gate */ 3540Sstevel@tonic-gate void 3550Sstevel@tonic-gate htable_adjust_reserve() 3560Sstevel@tonic-gate { 3570Sstevel@tonic-gate static int first_time = 1; 3580Sstevel@tonic-gate htable_t *ht; 3590Sstevel@tonic-gate 3600Sstevel@tonic-gate ASSERT(curthread != hat_reserves_thread); 3610Sstevel@tonic-gate 3620Sstevel@tonic-gate /* 3630Sstevel@tonic-gate * The first time this is called after we can steal, we free up the 3640Sstevel@tonic-gate * the kernel's cache htable list. It has lots of extra htable/page 3650Sstevel@tonic-gate * tables that were allocated for boot up. 3660Sstevel@tonic-gate */ 3670Sstevel@tonic-gate if (first_time) { 3680Sstevel@tonic-gate first_time = 0; 3690Sstevel@tonic-gate while ((ht = kas.a_hat->hat_ht_cached) != NULL) { 3700Sstevel@tonic-gate kas.a_hat->hat_ht_cached = ht->ht_next; 3710Sstevel@tonic-gate ASSERT(ht->ht_hat == kas.a_hat); 3720Sstevel@tonic-gate ptable_free(ht); 3730Sstevel@tonic-gate htable_put_reserve(ht); 3740Sstevel@tonic-gate } 3750Sstevel@tonic-gate return; 3760Sstevel@tonic-gate } 3770Sstevel@tonic-gate 3780Sstevel@tonic-gate /* 3790Sstevel@tonic-gate * Free any excess htables in the reserve list 3800Sstevel@tonic-gate */ 3810Sstevel@tonic-gate while (htable_reserve_cnt > htable_reserve_amount) { 3820Sstevel@tonic-gate ht = htable_get_reserve(); 3830Sstevel@tonic-gate if (ht == NULL) 3840Sstevel@tonic-gate return; 3850Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 3860Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 3870Sstevel@tonic-gate } 3880Sstevel@tonic-gate } 3890Sstevel@tonic-gate 3900Sstevel@tonic-gate 3910Sstevel@tonic-gate /* 3920Sstevel@tonic-gate * This routine steals htables from user processes for htable_alloc() or 3930Sstevel@tonic-gate * for htable_reap(). 3940Sstevel@tonic-gate */ 3950Sstevel@tonic-gate static htable_t * 3960Sstevel@tonic-gate htable_steal(uint_t cnt) 3970Sstevel@tonic-gate { 3980Sstevel@tonic-gate hat_t *hat = kas.a_hat; /* list starts with khat */ 3990Sstevel@tonic-gate htable_t *list = NULL; 4000Sstevel@tonic-gate htable_t *ht; 4010Sstevel@tonic-gate htable_t *higher; 4020Sstevel@tonic-gate uint_t h; 4030Sstevel@tonic-gate uint_t e; 4040Sstevel@tonic-gate uintptr_t va; 4050Sstevel@tonic-gate x86pte_t pte; 4060Sstevel@tonic-gate uint_t stolen = 0; 4070Sstevel@tonic-gate uint_t pass; 4080Sstevel@tonic-gate uint_t threshhold; 4090Sstevel@tonic-gate 4100Sstevel@tonic-gate /* 4110Sstevel@tonic-gate * Limit htable_steal_passes to something reasonable 4120Sstevel@tonic-gate */ 4130Sstevel@tonic-gate if (htable_steal_passes == 0) 4140Sstevel@tonic-gate htable_steal_passes = 1; 4150Sstevel@tonic-gate if (htable_steal_passes > mmu.ptes_per_table) 4160Sstevel@tonic-gate htable_steal_passes = mmu.ptes_per_table; 4170Sstevel@tonic-gate 4180Sstevel@tonic-gate /* 4190Sstevel@tonic-gate * Loop through all hats. The 1st pass takes cached htables that 4200Sstevel@tonic-gate * aren't in use. The later passes steal by removing mappings, too. 4210Sstevel@tonic-gate */ 4220Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, 1); 4230Sstevel@tonic-gate for (pass = 1; pass <= htable_steal_passes && stolen < cnt; ++pass) { 4240Sstevel@tonic-gate threshhold = pass / htable_steal_passes; 4250Sstevel@tonic-gate hat = kas.a_hat->hat_next; 4260Sstevel@tonic-gate for (;;) { 4270Sstevel@tonic-gate 4280Sstevel@tonic-gate /* 4290Sstevel@tonic-gate * move to next hat 4300Sstevel@tonic-gate */ 4310Sstevel@tonic-gate mutex_enter(&hat_list_lock); 4320Sstevel@tonic-gate hat->hat_flags &= ~HAT_VICTIM; 4330Sstevel@tonic-gate cv_broadcast(&hat_list_cv); 4340Sstevel@tonic-gate do { 4350Sstevel@tonic-gate hat = hat->hat_prev; 4360Sstevel@tonic-gate } while (hat->hat_flags & HAT_VICTIM); 4370Sstevel@tonic-gate if (stolen == cnt || hat == kas.a_hat->hat_next) { 4380Sstevel@tonic-gate mutex_exit(&hat_list_lock); 4390Sstevel@tonic-gate break; 4400Sstevel@tonic-gate } 4410Sstevel@tonic-gate hat->hat_flags |= HAT_VICTIM; 4420Sstevel@tonic-gate mutex_exit(&hat_list_lock); 4430Sstevel@tonic-gate 4440Sstevel@tonic-gate /* 4450Sstevel@tonic-gate * Take any htables from the hat's cached "free" list. 4460Sstevel@tonic-gate */ 4470Sstevel@tonic-gate hat_enter(hat); 4480Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL && 4490Sstevel@tonic-gate stolen < cnt) { 4500Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 4510Sstevel@tonic-gate ht->ht_next = list; 4520Sstevel@tonic-gate list = ht; 4530Sstevel@tonic-gate ++stolen; 4540Sstevel@tonic-gate } 4550Sstevel@tonic-gate hat_exit(hat); 4560Sstevel@tonic-gate 4570Sstevel@tonic-gate /* 4580Sstevel@tonic-gate * Don't steal on first pass. 4590Sstevel@tonic-gate */ 4600Sstevel@tonic-gate if (pass == 1 || stolen == cnt) 4610Sstevel@tonic-gate continue; 4620Sstevel@tonic-gate 4630Sstevel@tonic-gate /* 4640Sstevel@tonic-gate * search the active htables for one to steal 4650Sstevel@tonic-gate */ 4660Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash && stolen < cnt; 4670Sstevel@tonic-gate ++h) { 4680Sstevel@tonic-gate higher = NULL; 4690Sstevel@tonic-gate HTABLE_ENTER(h); 4700Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; 4710Sstevel@tonic-gate ht = ht->ht_next) { 4720Sstevel@tonic-gate 4730Sstevel@tonic-gate /* 4740Sstevel@tonic-gate * Can we rule out reaping? 4750Sstevel@tonic-gate */ 4760Sstevel@tonic-gate if (ht->ht_busy != 0 || 4770Sstevel@tonic-gate (ht->ht_flags & HTABLE_SHARED_PFN)|| 4780Sstevel@tonic-gate ht->ht_level == TOP_LEVEL(hat) || 4790Sstevel@tonic-gate (ht->ht_level >= 4800Sstevel@tonic-gate mmu.max_page_level && 4810Sstevel@tonic-gate ht->ht_valid_cnt > 0) || 4820Sstevel@tonic-gate ht->ht_valid_cnt < threshhold || 4830Sstevel@tonic-gate ht->ht_lock_cnt != 0) 4840Sstevel@tonic-gate continue; 4850Sstevel@tonic-gate 4860Sstevel@tonic-gate /* 4870Sstevel@tonic-gate * Increment busy so the htable can't 4880Sstevel@tonic-gate * disappear. We drop the htable mutex 4890Sstevel@tonic-gate * to avoid deadlocks with 4900Sstevel@tonic-gate * hat_pageunload() and the hment mutex 4910Sstevel@tonic-gate * while we call hat_pte_unmap() 4920Sstevel@tonic-gate */ 4930Sstevel@tonic-gate ++ht->ht_busy; 4940Sstevel@tonic-gate HTABLE_EXIT(h); 4950Sstevel@tonic-gate 4960Sstevel@tonic-gate /* 4970Sstevel@tonic-gate * Try stealing. 4980Sstevel@tonic-gate * - unload and invalidate all PTEs 4990Sstevel@tonic-gate */ 5000Sstevel@tonic-gate for (e = 0, va = ht->ht_vaddr; 5010Sstevel@tonic-gate e < ht->ht_num_ptes && 5020Sstevel@tonic-gate ht->ht_valid_cnt > 0 && 5030Sstevel@tonic-gate ht->ht_busy == 1 && 5040Sstevel@tonic-gate ht->ht_lock_cnt == 0; 5050Sstevel@tonic-gate ++e, va += MMU_PAGESIZE) { 5060Sstevel@tonic-gate pte = x86pte_get(ht, e); 5070Sstevel@tonic-gate if (!PTE_ISVALID(pte)) 5080Sstevel@tonic-gate continue; 5090Sstevel@tonic-gate hat_pte_unmap(ht, e, 5100Sstevel@tonic-gate HAT_UNLOAD, pte, NULL); 5110Sstevel@tonic-gate } 5120Sstevel@tonic-gate 5130Sstevel@tonic-gate /* 5140Sstevel@tonic-gate * Reacquire htable lock. If we didn't 5150Sstevel@tonic-gate * remove all mappings in the table, 5160Sstevel@tonic-gate * or another thread added a new mapping 5170Sstevel@tonic-gate * behind us, give up on this table. 5180Sstevel@tonic-gate */ 5190Sstevel@tonic-gate HTABLE_ENTER(h); 5200Sstevel@tonic-gate if (ht->ht_busy != 1 || 5210Sstevel@tonic-gate ht->ht_valid_cnt != 0 || 5220Sstevel@tonic-gate ht->ht_lock_cnt != 0) { 5230Sstevel@tonic-gate --ht->ht_busy; 5240Sstevel@tonic-gate continue; 5250Sstevel@tonic-gate } 5260Sstevel@tonic-gate 5270Sstevel@tonic-gate /* 5280Sstevel@tonic-gate * Steal it and unlink the page table. 5290Sstevel@tonic-gate */ 5300Sstevel@tonic-gate higher = ht->ht_parent; 5310Sstevel@tonic-gate unlink_ptp(higher, ht, ht->ht_vaddr); 5320Sstevel@tonic-gate 5330Sstevel@tonic-gate /* 5340Sstevel@tonic-gate * remove from the hash list 5350Sstevel@tonic-gate */ 5360Sstevel@tonic-gate if (ht->ht_next) 5370Sstevel@tonic-gate ht->ht_next->ht_prev = 5380Sstevel@tonic-gate ht->ht_prev; 5390Sstevel@tonic-gate 5400Sstevel@tonic-gate if (ht->ht_prev) { 5410Sstevel@tonic-gate ht->ht_prev->ht_next = 5420Sstevel@tonic-gate ht->ht_next; 5430Sstevel@tonic-gate } else { 5440Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[h] == 5450Sstevel@tonic-gate ht); 5460Sstevel@tonic-gate hat->hat_ht_hash[h] = 5470Sstevel@tonic-gate ht->ht_next; 5480Sstevel@tonic-gate } 5490Sstevel@tonic-gate 5500Sstevel@tonic-gate /* 5510Sstevel@tonic-gate * Break to outer loop to release the 5520Sstevel@tonic-gate * higher (ht_parent) pagtable. This 5530Sstevel@tonic-gate * spreads out the pain caused by 5540Sstevel@tonic-gate * pagefaults. 5550Sstevel@tonic-gate */ 5560Sstevel@tonic-gate ht->ht_next = list; 5570Sstevel@tonic-gate list = ht; 5580Sstevel@tonic-gate ++stolen; 5590Sstevel@tonic-gate 5600Sstevel@tonic-gate /* 5610Sstevel@tonic-gate * If this is the last steal, then move 5620Sstevel@tonic-gate * the hat list head, so that we start 5630Sstevel@tonic-gate * here next time. 5640Sstevel@tonic-gate */ 5650Sstevel@tonic-gate if (stolen == cnt) { 5660Sstevel@tonic-gate mutex_enter(&hat_list_lock); 5670Sstevel@tonic-gate kas.a_hat->hat_next = hat; 5680Sstevel@tonic-gate mutex_exit(&hat_list_lock); 5690Sstevel@tonic-gate } 5700Sstevel@tonic-gate break; 5710Sstevel@tonic-gate } 5720Sstevel@tonic-gate HTABLE_EXIT(h); 5730Sstevel@tonic-gate if (higher != NULL) 5740Sstevel@tonic-gate htable_release(higher); 5750Sstevel@tonic-gate } 5760Sstevel@tonic-gate } 5770Sstevel@tonic-gate } 5780Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, -1); 5790Sstevel@tonic-gate return (list); 5800Sstevel@tonic-gate } 5810Sstevel@tonic-gate 5820Sstevel@tonic-gate 5830Sstevel@tonic-gate /* 5840Sstevel@tonic-gate * This is invoked from kmem when the system is low on memory. We try 5850Sstevel@tonic-gate * to free hments, htables, and ptables to improve the memory situation. 5860Sstevel@tonic-gate */ 5870Sstevel@tonic-gate /*ARGSUSED*/ 5880Sstevel@tonic-gate static void 5890Sstevel@tonic-gate htable_reap(void *handle) 5900Sstevel@tonic-gate { 5910Sstevel@tonic-gate uint_t reap_cnt; 5920Sstevel@tonic-gate htable_t *list; 5930Sstevel@tonic-gate htable_t *ht; 5940Sstevel@tonic-gate 5950Sstevel@tonic-gate HATSTAT_INC(hs_reap_attempts); 5960Sstevel@tonic-gate if (!can_steal_post_boot) 5970Sstevel@tonic-gate return; 5980Sstevel@tonic-gate 5990Sstevel@tonic-gate /* 6000Sstevel@tonic-gate * Try to reap 5% of the page tables bounded by a maximum of 6010Sstevel@tonic-gate * 5% of physmem and a minimum of 10. 6020Sstevel@tonic-gate */ 6030Sstevel@tonic-gate reap_cnt = MIN(MAX(physmem / 20, active_ptables / 20), 10); 6040Sstevel@tonic-gate 6050Sstevel@tonic-gate /* 6060Sstevel@tonic-gate * Let htable_steal() do the work, we just call htable_free() 6070Sstevel@tonic-gate */ 6080Sstevel@tonic-gate list = htable_steal(reap_cnt); 6090Sstevel@tonic-gate while ((ht = list) != NULL) { 6100Sstevel@tonic-gate list = ht->ht_next; 6110Sstevel@tonic-gate HATSTAT_INC(hs_reaped); 6120Sstevel@tonic-gate htable_free(ht); 6130Sstevel@tonic-gate } 6140Sstevel@tonic-gate 6150Sstevel@tonic-gate /* 6160Sstevel@tonic-gate * Free up excess reserves 6170Sstevel@tonic-gate */ 6180Sstevel@tonic-gate htable_adjust_reserve(); 6190Sstevel@tonic-gate hment_adjust_reserve(); 6200Sstevel@tonic-gate } 6210Sstevel@tonic-gate 6220Sstevel@tonic-gate /* 6230Sstevel@tonic-gate * allocate an htable, stealing one or using the reserve if necessary 6240Sstevel@tonic-gate */ 6250Sstevel@tonic-gate static htable_t * 6260Sstevel@tonic-gate htable_alloc( 6270Sstevel@tonic-gate hat_t *hat, 6280Sstevel@tonic-gate uintptr_t vaddr, 6290Sstevel@tonic-gate level_t level, 6300Sstevel@tonic-gate htable_t *shared) 6310Sstevel@tonic-gate { 6320Sstevel@tonic-gate htable_t *ht = NULL; 6330Sstevel@tonic-gate uint_t is_vlp; 6340Sstevel@tonic-gate uint_t is_bare = 0; 6350Sstevel@tonic-gate uint_t need_to_zero = 1; 6360Sstevel@tonic-gate int kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP); 6370Sstevel@tonic-gate 6380Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 6390Sstevel@tonic-gate panic("htable_alloc(): level %d out of range\n", level); 6400Sstevel@tonic-gate 6410Sstevel@tonic-gate is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL; 6420Sstevel@tonic-gate if (is_vlp || shared != NULL) 6430Sstevel@tonic-gate is_bare = 1; 6440Sstevel@tonic-gate 6450Sstevel@tonic-gate /* 6460Sstevel@tonic-gate * First reuse a cached htable from the hat_ht_cached field, this 6470Sstevel@tonic-gate * avoids unnecessary trips through kmem/page allocators. This is also 6480Sstevel@tonic-gate * what happens during use_boot_reserve. 6490Sstevel@tonic-gate */ 6500Sstevel@tonic-gate if (hat->hat_ht_cached != NULL && !is_bare) { 6510Sstevel@tonic-gate hat_enter(hat); 6520Sstevel@tonic-gate ht = hat->hat_ht_cached; 6530Sstevel@tonic-gate if (ht != NULL) { 6540Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 6550Sstevel@tonic-gate need_to_zero = 0; 6560Sstevel@tonic-gate /* XX64 ASSERT() they're all zero somehow */ 6570Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 6580Sstevel@tonic-gate } 6590Sstevel@tonic-gate hat_exit(hat); 6600Sstevel@tonic-gate } 6610Sstevel@tonic-gate 6620Sstevel@tonic-gate if (ht == NULL) { 6630Sstevel@tonic-gate ASSERT(!use_boot_reserve); 6640Sstevel@tonic-gate /* 6650Sstevel@tonic-gate * When allocating for hat_memload_arena, we use the reserve. 6660Sstevel@tonic-gate * Also use reserves if we are in a panic(). 6670Sstevel@tonic-gate */ 6680Sstevel@tonic-gate if (curthread == hat_reserves_thread || panicstr != NULL) { 6690Sstevel@tonic-gate ASSERT(panicstr != NULL || !is_bare); 6700Sstevel@tonic-gate ASSERT(panicstr != NULL || 6710Sstevel@tonic-gate curthread == hat_reserves_thread); 6720Sstevel@tonic-gate ht = htable_get_reserve(); 6730Sstevel@tonic-gate } else { 6740Sstevel@tonic-gate /* 6750Sstevel@tonic-gate * Donate successful htable allocations to the reserve. 6760Sstevel@tonic-gate */ 6770Sstevel@tonic-gate for (;;) { 6780Sstevel@tonic-gate ASSERT(curthread != hat_reserves_thread); 6790Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, kmflags); 6800Sstevel@tonic-gate if (ht == NULL) 6810Sstevel@tonic-gate break; 6820Sstevel@tonic-gate ht->ht_pfn = PFN_INVALID; 6830Sstevel@tonic-gate if (curthread == hat_reserves_thread || 6840Sstevel@tonic-gate panicstr != NULL || 6850Sstevel@tonic-gate htable_reserve_cnt >= htable_reserve_amount) 6860Sstevel@tonic-gate break; 6870Sstevel@tonic-gate htable_put_reserve(ht); 6880Sstevel@tonic-gate } 6890Sstevel@tonic-gate } 6900Sstevel@tonic-gate 6910Sstevel@tonic-gate /* 6920Sstevel@tonic-gate * allocate a page for the hardware page table if needed 6930Sstevel@tonic-gate */ 6940Sstevel@tonic-gate if (ht != NULL && !is_bare) { 6950Sstevel@tonic-gate ptable_alloc(ht); 6960Sstevel@tonic-gate if (ht->ht_pfn == PFN_INVALID) { 6970Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 6980Sstevel@tonic-gate ht = NULL; 6990Sstevel@tonic-gate } 7000Sstevel@tonic-gate } 7010Sstevel@tonic-gate } 7020Sstevel@tonic-gate 7030Sstevel@tonic-gate /* 7040Sstevel@tonic-gate * if allocations failed resort to stealing 7050Sstevel@tonic-gate */ 7060Sstevel@tonic-gate if (ht == NULL && can_steal_post_boot) { 7070Sstevel@tonic-gate ht = htable_steal(1); 7080Sstevel@tonic-gate HATSTAT_INC(hs_steals); 7090Sstevel@tonic-gate 7100Sstevel@tonic-gate /* 7110Sstevel@tonic-gate * if we had to steal for a bare htable, release the 7120Sstevel@tonic-gate * page for the pagetable 7130Sstevel@tonic-gate */ 7140Sstevel@tonic-gate if (ht != NULL && is_bare) 7150Sstevel@tonic-gate ptable_free(ht); 7160Sstevel@tonic-gate } 7170Sstevel@tonic-gate 7180Sstevel@tonic-gate /* 7190Sstevel@tonic-gate * All attempts to allocate or steal failed... 7200Sstevel@tonic-gate */ 7210Sstevel@tonic-gate if (ht == NULL) 7220Sstevel@tonic-gate panic("htable_alloc(): couldn't steal\n"); 7230Sstevel@tonic-gate 7240Sstevel@tonic-gate /* 7250Sstevel@tonic-gate * Shared page tables have all entries locked and entries may not 7260Sstevel@tonic-gate * be added or deleted. 7270Sstevel@tonic-gate */ 7280Sstevel@tonic-gate ht->ht_flags = 0; 7290Sstevel@tonic-gate if (shared != NULL) { 7300Sstevel@tonic-gate ASSERT(level == 0); 7310Sstevel@tonic-gate ASSERT(shared->ht_valid_cnt > 0); 7320Sstevel@tonic-gate ht->ht_flags |= HTABLE_SHARED_PFN; 7330Sstevel@tonic-gate ht->ht_pfn = shared->ht_pfn; 7340Sstevel@tonic-gate ht->ht_lock_cnt = 0; 7350Sstevel@tonic-gate ht->ht_valid_cnt = 0; /* updated in hat_share() */ 7360Sstevel@tonic-gate ht->ht_shares = shared; 7370Sstevel@tonic-gate need_to_zero = 0; 7380Sstevel@tonic-gate } else { 7390Sstevel@tonic-gate ht->ht_shares = NULL; 7400Sstevel@tonic-gate ht->ht_lock_cnt = 0; 7410Sstevel@tonic-gate ht->ht_valid_cnt = 0; 7420Sstevel@tonic-gate } 7430Sstevel@tonic-gate 7440Sstevel@tonic-gate /* 7450Sstevel@tonic-gate * setup flags, etc. for VLP htables 7460Sstevel@tonic-gate */ 7470Sstevel@tonic-gate if (is_vlp) { 7480Sstevel@tonic-gate ht->ht_flags |= HTABLE_VLP; 7490Sstevel@tonic-gate ht->ht_num_ptes = VLP_NUM_PTES; 7500Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 7510Sstevel@tonic-gate need_to_zero = 0; 7520Sstevel@tonic-gate } else if (level == mmu.max_level) { 7530Sstevel@tonic-gate ht->ht_num_ptes = mmu.top_level_count; 7540Sstevel@tonic-gate } else { 7550Sstevel@tonic-gate ht->ht_num_ptes = mmu.ptes_per_table; 7560Sstevel@tonic-gate } 7570Sstevel@tonic-gate 7580Sstevel@tonic-gate /* 7590Sstevel@tonic-gate * fill in the htable 7600Sstevel@tonic-gate */ 7610Sstevel@tonic-gate ht->ht_hat = hat; 7620Sstevel@tonic-gate ht->ht_parent = NULL; 7630Sstevel@tonic-gate ht->ht_vaddr = vaddr; 7640Sstevel@tonic-gate ht->ht_level = level; 7650Sstevel@tonic-gate ht->ht_busy = 1; 7660Sstevel@tonic-gate ht->ht_next = NULL; 7670Sstevel@tonic-gate ht->ht_prev = NULL; 7680Sstevel@tonic-gate 7690Sstevel@tonic-gate /* 7700Sstevel@tonic-gate * Zero out any freshly allocated page table 7710Sstevel@tonic-gate */ 7720Sstevel@tonic-gate if (need_to_zero) 7730Sstevel@tonic-gate x86pte_zero(ht, 0, mmu.ptes_per_table); 7740Sstevel@tonic-gate return (ht); 7750Sstevel@tonic-gate } 7760Sstevel@tonic-gate 7770Sstevel@tonic-gate /* 7780Sstevel@tonic-gate * Free up an htable, either to a hat's cached list, the reserves or 7790Sstevel@tonic-gate * back to kmem. 7800Sstevel@tonic-gate */ 7810Sstevel@tonic-gate static void 7820Sstevel@tonic-gate htable_free(htable_t *ht) 7830Sstevel@tonic-gate { 7840Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 7850Sstevel@tonic-gate 7860Sstevel@tonic-gate /* 7870Sstevel@tonic-gate * If the process isn't exiting, cache the free htable in the hat 7880Sstevel@tonic-gate * structure. We always do this for the boot reserve. We don't 7890Sstevel@tonic-gate * do this if the hat is exiting or we are stealing/reaping htables. 7900Sstevel@tonic-gate */ 7910Sstevel@tonic-gate if (hat != NULL && 7920Sstevel@tonic-gate !(ht->ht_flags & HTABLE_SHARED_PFN) && 7930Sstevel@tonic-gate (use_boot_reserve || 7940Sstevel@tonic-gate (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) { 7950Sstevel@tonic-gate ASSERT((ht->ht_flags & HTABLE_VLP) == 0); 7960Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 7970Sstevel@tonic-gate hat_enter(hat); 7980Sstevel@tonic-gate ht->ht_next = hat->hat_ht_cached; 7990Sstevel@tonic-gate hat->hat_ht_cached = ht; 8000Sstevel@tonic-gate hat_exit(hat); 8010Sstevel@tonic-gate return; 8020Sstevel@tonic-gate } 8030Sstevel@tonic-gate 8040Sstevel@tonic-gate /* 8050Sstevel@tonic-gate * If we have a hardware page table, free it. 8060Sstevel@tonic-gate * We don't free page tables that are accessed by sharing someone else. 8070Sstevel@tonic-gate */ 8080Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 8090Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 8100Sstevel@tonic-gate ht->ht_pfn = PFN_INVALID; 8110Sstevel@tonic-gate } else if (!(ht->ht_flags & HTABLE_VLP)) { 8120Sstevel@tonic-gate ptable_free(ht); 8130Sstevel@tonic-gate } 8140Sstevel@tonic-gate 8150Sstevel@tonic-gate /* 8160Sstevel@tonic-gate * If we are the thread using the reserves, put free htables 8170Sstevel@tonic-gate * into reserves. 8180Sstevel@tonic-gate */ 8190Sstevel@tonic-gate if (curthread == hat_reserves_thread || 8200Sstevel@tonic-gate htable_reserve_cnt < htable_reserve_amount) 8210Sstevel@tonic-gate htable_put_reserve(ht); 8220Sstevel@tonic-gate else 8230Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 8240Sstevel@tonic-gate } 8250Sstevel@tonic-gate 8260Sstevel@tonic-gate 8270Sstevel@tonic-gate /* 8280Sstevel@tonic-gate * This is called when a hat is being destroyed or swapped out. We reap all 8290Sstevel@tonic-gate * the remaining htables in the hat cache. If destroying all left over 8300Sstevel@tonic-gate * htables are also destroyed. 8310Sstevel@tonic-gate * 8320Sstevel@tonic-gate * We also don't need to invalidate any of the PTPs nor do any demapping. 8330Sstevel@tonic-gate */ 8340Sstevel@tonic-gate void 8350Sstevel@tonic-gate htable_purge_hat(hat_t *hat) 8360Sstevel@tonic-gate { 8370Sstevel@tonic-gate htable_t *ht; 8380Sstevel@tonic-gate int h; 8390Sstevel@tonic-gate 8400Sstevel@tonic-gate /* 8410Sstevel@tonic-gate * Purge the htable cache if just reaping. 8420Sstevel@tonic-gate */ 8430Sstevel@tonic-gate if (!(hat->hat_flags & HAT_FREEING)) { 8440Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, 1); 8450Sstevel@tonic-gate for (;;) { 8460Sstevel@tonic-gate hat_enter(hat); 8470Sstevel@tonic-gate ht = hat->hat_ht_cached; 8480Sstevel@tonic-gate if (ht == NULL) { 8490Sstevel@tonic-gate hat_exit(hat); 8500Sstevel@tonic-gate break; 8510Sstevel@tonic-gate } 8520Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 8530Sstevel@tonic-gate hat_exit(hat); 8540Sstevel@tonic-gate htable_free(ht); 8550Sstevel@tonic-gate } 8560Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, -1); 8570Sstevel@tonic-gate return; 8580Sstevel@tonic-gate } 8590Sstevel@tonic-gate 8600Sstevel@tonic-gate /* 8610Sstevel@tonic-gate * if freeing, no locking is needed 8620Sstevel@tonic-gate */ 8630Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL) { 8640Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 8650Sstevel@tonic-gate htable_free(ht); 8660Sstevel@tonic-gate } 8670Sstevel@tonic-gate 8680Sstevel@tonic-gate /* 8690Sstevel@tonic-gate * walk thru the htable hash table and free all the htables in it. 8700Sstevel@tonic-gate */ 8710Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 8720Sstevel@tonic-gate while ((ht = hat->hat_ht_hash[h]) != NULL) { 8730Sstevel@tonic-gate if (ht->ht_next) 8740Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 8750Sstevel@tonic-gate 8760Sstevel@tonic-gate if (ht->ht_prev) { 8770Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 8780Sstevel@tonic-gate } else { 8790Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[h] == ht); 8800Sstevel@tonic-gate hat->hat_ht_hash[h] = ht->ht_next; 8810Sstevel@tonic-gate } 8820Sstevel@tonic-gate htable_free(ht); 8830Sstevel@tonic-gate } 8840Sstevel@tonic-gate } 8850Sstevel@tonic-gate } 8860Sstevel@tonic-gate 8870Sstevel@tonic-gate /* 8880Sstevel@tonic-gate * Unlink an entry for a table at vaddr and level out of the existing table 8890Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 8900Sstevel@tonic-gate */ 8910Sstevel@tonic-gate static void 8920Sstevel@tonic-gate unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr) 8930Sstevel@tonic-gate { 8940Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 8950Sstevel@tonic-gate x86pte_t expect = MAKEPTP(old->ht_pfn, old->ht_level); 8960Sstevel@tonic-gate x86pte_t found; 8970Sstevel@tonic-gate 8980Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 8990Sstevel@tonic-gate ASSERT(higher->ht_valid_cnt > 0); 9000Sstevel@tonic-gate ASSERT(old->ht_valid_cnt == 0); 9010Sstevel@tonic-gate found = x86pte_cas(higher, entry, expect, 0); 9020Sstevel@tonic-gate if (found != expect) 9030Sstevel@tonic-gate panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE, 9040Sstevel@tonic-gate found, expect); 9050Sstevel@tonic-gate HTABLE_DEC(higher->ht_valid_cnt); 9060Sstevel@tonic-gate } 9070Sstevel@tonic-gate 9080Sstevel@tonic-gate /* 9090Sstevel@tonic-gate * Link an entry for a new table at vaddr and level into the existing table 9100Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 9110Sstevel@tonic-gate */ 9120Sstevel@tonic-gate static void 9130Sstevel@tonic-gate link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr) 9140Sstevel@tonic-gate { 9150Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 9160Sstevel@tonic-gate x86pte_t newptp = MAKEPTP(new->ht_pfn, new->ht_level); 9170Sstevel@tonic-gate x86pte_t found; 9180Sstevel@tonic-gate 9190Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 9200Sstevel@tonic-gate 9210Sstevel@tonic-gate ASSERT(new->ht_level != mmu.max_level); 9220Sstevel@tonic-gate 9230Sstevel@tonic-gate HTABLE_INC(higher->ht_valid_cnt); 9240Sstevel@tonic-gate 9250Sstevel@tonic-gate found = x86pte_cas(higher, entry, 0, newptp); 926*1251Skchow if ((found & ~PT_REF) != 0) 9270Sstevel@tonic-gate panic("HAT: ptp not 0, found=" FMT_PTE, found); 9280Sstevel@tonic-gate } 9290Sstevel@tonic-gate 9300Sstevel@tonic-gate /* 9310Sstevel@tonic-gate * Release of an htable. 9320Sstevel@tonic-gate * 9330Sstevel@tonic-gate * During process exit, some empty page tables are not unlinked - hat_free_end() 9340Sstevel@tonic-gate * cleans them up. Upper level pagetable (mmu.max_page_level and higher) are 9350Sstevel@tonic-gate * only released during hat_free_end() or by htable_steal(). We always 9360Sstevel@tonic-gate * release SHARED page tables. 9370Sstevel@tonic-gate */ 9380Sstevel@tonic-gate void 9390Sstevel@tonic-gate htable_release(htable_t *ht) 9400Sstevel@tonic-gate { 9410Sstevel@tonic-gate uint_t hashval; 9420Sstevel@tonic-gate htable_t *shared; 9430Sstevel@tonic-gate htable_t *higher; 9440Sstevel@tonic-gate hat_t *hat; 9450Sstevel@tonic-gate uintptr_t va; 9460Sstevel@tonic-gate level_t level; 9470Sstevel@tonic-gate 9480Sstevel@tonic-gate while (ht != NULL) { 9490Sstevel@tonic-gate shared = NULL; 9500Sstevel@tonic-gate for (;;) { 9510Sstevel@tonic-gate hat = ht->ht_hat; 9520Sstevel@tonic-gate va = ht->ht_vaddr; 9530Sstevel@tonic-gate level = ht->ht_level; 9540Sstevel@tonic-gate hashval = HTABLE_HASH(hat, va, level); 9550Sstevel@tonic-gate 9560Sstevel@tonic-gate /* 9570Sstevel@tonic-gate * The common case is that this isn't the last use of 9580Sstevel@tonic-gate * an htable so we don't want to free the htable. 9590Sstevel@tonic-gate */ 9600Sstevel@tonic-gate HTABLE_ENTER(hashval); 9610Sstevel@tonic-gate ASSERT(ht->ht_lock_cnt == 0 || ht->ht_valid_cnt > 0); 9620Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt >= 0); 9630Sstevel@tonic-gate ASSERT(ht->ht_busy > 0); 9640Sstevel@tonic-gate if (ht->ht_valid_cnt > 0) 9650Sstevel@tonic-gate break; 9660Sstevel@tonic-gate if (ht->ht_busy > 1) 9670Sstevel@tonic-gate break; 9680Sstevel@tonic-gate 9690Sstevel@tonic-gate /* 9700Sstevel@tonic-gate * we always release empty shared htables 9710Sstevel@tonic-gate */ 9720Sstevel@tonic-gate if (!(ht->ht_flags & HTABLE_SHARED_PFN)) { 9730Sstevel@tonic-gate 9740Sstevel@tonic-gate /* 9750Sstevel@tonic-gate * don't release if in address space tear down 9760Sstevel@tonic-gate */ 9770Sstevel@tonic-gate if (hat->hat_flags & HAT_FREEING) 9780Sstevel@tonic-gate break; 9790Sstevel@tonic-gate 9800Sstevel@tonic-gate /* 9810Sstevel@tonic-gate * At and above max_page_level, free if it's for 9820Sstevel@tonic-gate * a boot-time kernel mapping below kernelbase. 9830Sstevel@tonic-gate */ 9840Sstevel@tonic-gate if (level >= mmu.max_page_level && 9850Sstevel@tonic-gate (hat != kas.a_hat || va >= kernelbase)) 9860Sstevel@tonic-gate break; 9870Sstevel@tonic-gate } 9880Sstevel@tonic-gate 9890Sstevel@tonic-gate /* 9900Sstevel@tonic-gate * remember if we destroy an htable that shares its PFN 9910Sstevel@tonic-gate * from elsewhere 9920Sstevel@tonic-gate */ 9930Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 9940Sstevel@tonic-gate ASSERT(ht->ht_level == 0); 9950Sstevel@tonic-gate ASSERT(shared == NULL); 9960Sstevel@tonic-gate shared = ht->ht_shares; 9970Sstevel@tonic-gate HATSTAT_INC(hs_htable_unshared); 9980Sstevel@tonic-gate } 9990Sstevel@tonic-gate 10000Sstevel@tonic-gate /* 10010Sstevel@tonic-gate * Handle release of a table and freeing the htable_t. 10020Sstevel@tonic-gate * Unlink it from the table higher (ie. ht_parent). 10030Sstevel@tonic-gate */ 10040Sstevel@tonic-gate ASSERT(ht->ht_lock_cnt == 0); 10050Sstevel@tonic-gate higher = ht->ht_parent; 10060Sstevel@tonic-gate ASSERT(higher != NULL); 10070Sstevel@tonic-gate 10080Sstevel@tonic-gate /* 10090Sstevel@tonic-gate * Unlink the pagetable. 10100Sstevel@tonic-gate */ 10110Sstevel@tonic-gate unlink_ptp(higher, ht, va); 10120Sstevel@tonic-gate 10130Sstevel@tonic-gate /* 10140Sstevel@tonic-gate * When any top level VLP page table entry changes, we 10150Sstevel@tonic-gate * must issue a reload of cr3 on all processors. 10160Sstevel@tonic-gate */ 10170Sstevel@tonic-gate if ((hat->hat_flags & HAT_VLP) && 10180Sstevel@tonic-gate level == VLP_LEVEL - 1) 10190Sstevel@tonic-gate hat_demap(hat, DEMAP_ALL_ADDR); 10200Sstevel@tonic-gate 10210Sstevel@tonic-gate /* 10220Sstevel@tonic-gate * remove this htable from its hash list 10230Sstevel@tonic-gate */ 10240Sstevel@tonic-gate if (ht->ht_next) 10250Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 10260Sstevel@tonic-gate 10270Sstevel@tonic-gate if (ht->ht_prev) { 10280Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 10290Sstevel@tonic-gate } else { 10300Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[hashval] == ht); 10310Sstevel@tonic-gate hat->hat_ht_hash[hashval] = ht->ht_next; 10320Sstevel@tonic-gate } 10330Sstevel@tonic-gate HTABLE_EXIT(hashval); 10340Sstevel@tonic-gate htable_free(ht); 10350Sstevel@tonic-gate ht = higher; 10360Sstevel@tonic-gate } 10370Sstevel@tonic-gate 10380Sstevel@tonic-gate ASSERT(ht->ht_busy >= 1); 10390Sstevel@tonic-gate --ht->ht_busy; 10400Sstevel@tonic-gate HTABLE_EXIT(hashval); 10410Sstevel@tonic-gate 10420Sstevel@tonic-gate /* 10430Sstevel@tonic-gate * If we released a shared htable, do a release on the htable 10440Sstevel@tonic-gate * from which it shared 10450Sstevel@tonic-gate */ 10460Sstevel@tonic-gate ht = shared; 10470Sstevel@tonic-gate } 10480Sstevel@tonic-gate } 10490Sstevel@tonic-gate 10500Sstevel@tonic-gate /* 10510Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 10520Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 10530Sstevel@tonic-gate */ 10540Sstevel@tonic-gate htable_t * 10550Sstevel@tonic-gate htable_lookup(hat_t *hat, uintptr_t vaddr, level_t level) 10560Sstevel@tonic-gate { 10570Sstevel@tonic-gate uintptr_t base; 10580Sstevel@tonic-gate uint_t hashval; 10590Sstevel@tonic-gate htable_t *ht = NULL; 10600Sstevel@tonic-gate 10610Sstevel@tonic-gate ASSERT(level >= 0); 10620Sstevel@tonic-gate ASSERT(level <= TOP_LEVEL(hat)); 10630Sstevel@tonic-gate 10640Sstevel@tonic-gate if (level == TOP_LEVEL(hat)) 10650Sstevel@tonic-gate base = 0; 10660Sstevel@tonic-gate else 10670Sstevel@tonic-gate base = vaddr & LEVEL_MASK(level + 1); 10680Sstevel@tonic-gate 10690Sstevel@tonic-gate hashval = HTABLE_HASH(hat, base, level); 10700Sstevel@tonic-gate HTABLE_ENTER(hashval); 10710Sstevel@tonic-gate for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) { 10720Sstevel@tonic-gate if (ht->ht_hat == hat && 10730Sstevel@tonic-gate ht->ht_vaddr == base && 10740Sstevel@tonic-gate ht->ht_level == level) 10750Sstevel@tonic-gate break; 10760Sstevel@tonic-gate } 10770Sstevel@tonic-gate if (ht) 10780Sstevel@tonic-gate ++ht->ht_busy; 10790Sstevel@tonic-gate 10800Sstevel@tonic-gate HTABLE_EXIT(hashval); 10810Sstevel@tonic-gate return (ht); 10820Sstevel@tonic-gate } 10830Sstevel@tonic-gate 10840Sstevel@tonic-gate /* 10850Sstevel@tonic-gate * Acquires a hold on a known htable (from a locked hment entry). 10860Sstevel@tonic-gate */ 10870Sstevel@tonic-gate void 10880Sstevel@tonic-gate htable_acquire(htable_t *ht) 10890Sstevel@tonic-gate { 10900Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 10910Sstevel@tonic-gate level_t level = ht->ht_level; 10920Sstevel@tonic-gate uintptr_t base = ht->ht_vaddr; 10930Sstevel@tonic-gate uint_t hashval = HTABLE_HASH(hat, base, level); 10940Sstevel@tonic-gate 10950Sstevel@tonic-gate HTABLE_ENTER(hashval); 10960Sstevel@tonic-gate #ifdef DEBUG 10970Sstevel@tonic-gate /* 10980Sstevel@tonic-gate * make sure the htable is there 10990Sstevel@tonic-gate */ 11000Sstevel@tonic-gate { 11010Sstevel@tonic-gate htable_t *h; 11020Sstevel@tonic-gate 11030Sstevel@tonic-gate for (h = hat->hat_ht_hash[hashval]; 11040Sstevel@tonic-gate h && h != ht; 11050Sstevel@tonic-gate h = h->ht_next) 11060Sstevel@tonic-gate ; 11070Sstevel@tonic-gate ASSERT(h == ht); 11080Sstevel@tonic-gate } 11090Sstevel@tonic-gate #endif /* DEBUG */ 11100Sstevel@tonic-gate ++ht->ht_busy; 11110Sstevel@tonic-gate HTABLE_EXIT(hashval); 11120Sstevel@tonic-gate } 11130Sstevel@tonic-gate 11140Sstevel@tonic-gate /* 11150Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 11160Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 11170Sstevel@tonic-gate * If not found the table is created. 11180Sstevel@tonic-gate * 11190Sstevel@tonic-gate * Since we can't hold a hash table mutex during allocation, we have to 11200Sstevel@tonic-gate * drop it and redo the search on a create. Then we may have to free the newly 11210Sstevel@tonic-gate * allocated htable if another thread raced in and created it ahead of us. 11220Sstevel@tonic-gate */ 11230Sstevel@tonic-gate htable_t * 11240Sstevel@tonic-gate htable_create( 11250Sstevel@tonic-gate hat_t *hat, 11260Sstevel@tonic-gate uintptr_t vaddr, 11270Sstevel@tonic-gate level_t level, 11280Sstevel@tonic-gate htable_t *shared) 11290Sstevel@tonic-gate { 11300Sstevel@tonic-gate uint_t h; 11310Sstevel@tonic-gate level_t l; 11320Sstevel@tonic-gate uintptr_t base; 11330Sstevel@tonic-gate htable_t *ht; 11340Sstevel@tonic-gate htable_t *higher = NULL; 11350Sstevel@tonic-gate htable_t *new = NULL; 11360Sstevel@tonic-gate 11370Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 11380Sstevel@tonic-gate panic("htable_create(): level %d out of range\n", level); 11390Sstevel@tonic-gate 11400Sstevel@tonic-gate /* 11410Sstevel@tonic-gate * Create the page tables in top down order. 11420Sstevel@tonic-gate */ 11430Sstevel@tonic-gate for (l = TOP_LEVEL(hat); l >= level; --l) { 11440Sstevel@tonic-gate new = NULL; 11450Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) 11460Sstevel@tonic-gate base = 0; 11470Sstevel@tonic-gate else 11480Sstevel@tonic-gate base = vaddr & LEVEL_MASK(l + 1); 11490Sstevel@tonic-gate 11500Sstevel@tonic-gate h = HTABLE_HASH(hat, base, l); 11510Sstevel@tonic-gate try_again: 11520Sstevel@tonic-gate /* 11530Sstevel@tonic-gate * look up the htable at this level 11540Sstevel@tonic-gate */ 11550Sstevel@tonic-gate HTABLE_ENTER(h); 11560Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) { 11570Sstevel@tonic-gate ht = hat->hat_htable; 11580Sstevel@tonic-gate } else { 11590Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 11600Sstevel@tonic-gate ASSERT(ht->ht_hat == hat); 11610Sstevel@tonic-gate if (ht->ht_vaddr == base && 11620Sstevel@tonic-gate ht->ht_level == l) 11630Sstevel@tonic-gate break; 11640Sstevel@tonic-gate } 11650Sstevel@tonic-gate } 11660Sstevel@tonic-gate 11670Sstevel@tonic-gate /* 11680Sstevel@tonic-gate * if we found the htable, increment its busy cnt 11690Sstevel@tonic-gate * and if we had allocated a new htable, free it. 11700Sstevel@tonic-gate */ 11710Sstevel@tonic-gate if (ht != NULL) { 11720Sstevel@tonic-gate /* 11730Sstevel@tonic-gate * If we find a pre-existing shared table, it must 11740Sstevel@tonic-gate * share from the same place. 11750Sstevel@tonic-gate */ 11760Sstevel@tonic-gate if (l == level && shared && ht->ht_shares && 11770Sstevel@tonic-gate ht->ht_shares != shared) { 11780Sstevel@tonic-gate panic("htable shared from wrong place " 11790Sstevel@tonic-gate "found htable=%p shared=%p", ht, shared); 11800Sstevel@tonic-gate } 11810Sstevel@tonic-gate ++ht->ht_busy; 11820Sstevel@tonic-gate HTABLE_EXIT(h); 11830Sstevel@tonic-gate if (new) 11840Sstevel@tonic-gate htable_free(new); 11850Sstevel@tonic-gate if (higher != NULL) 11860Sstevel@tonic-gate htable_release(higher); 11870Sstevel@tonic-gate higher = ht; 11880Sstevel@tonic-gate 11890Sstevel@tonic-gate /* 11900Sstevel@tonic-gate * if we didn't find it on the first search 11910Sstevel@tonic-gate * allocate a new one and search again 11920Sstevel@tonic-gate */ 11930Sstevel@tonic-gate } else if (new == NULL) { 11940Sstevel@tonic-gate HTABLE_EXIT(h); 11950Sstevel@tonic-gate new = htable_alloc(hat, base, l, 11960Sstevel@tonic-gate l == level ? shared : NULL); 11970Sstevel@tonic-gate goto try_again; 11980Sstevel@tonic-gate 11990Sstevel@tonic-gate /* 12000Sstevel@tonic-gate * 2nd search and still not there, use "new" table 12010Sstevel@tonic-gate * Link new table into higher, when not at top level. 12020Sstevel@tonic-gate */ 12030Sstevel@tonic-gate } else { 12040Sstevel@tonic-gate ht = new; 12050Sstevel@tonic-gate if (higher != NULL) { 12060Sstevel@tonic-gate link_ptp(higher, ht, base); 12070Sstevel@tonic-gate ht->ht_parent = higher; 12080Sstevel@tonic-gate 12090Sstevel@tonic-gate /* 12100Sstevel@tonic-gate * When any top level VLP page table changes, 12110Sstevel@tonic-gate * we must reload cr3 on all processors. 12120Sstevel@tonic-gate */ 12130Sstevel@tonic-gate #ifdef __i386 12140Sstevel@tonic-gate if (mmu.pae_hat && 12150Sstevel@tonic-gate #else /* !__i386 */ 12160Sstevel@tonic-gate if ((hat->hat_flags & HAT_VLP) && 12170Sstevel@tonic-gate #endif /* __i386 */ 12180Sstevel@tonic-gate l == VLP_LEVEL - 1) 12190Sstevel@tonic-gate hat_demap(hat, DEMAP_ALL_ADDR); 12200Sstevel@tonic-gate } 12210Sstevel@tonic-gate ht->ht_next = hat->hat_ht_hash[h]; 12220Sstevel@tonic-gate ASSERT(ht->ht_prev == NULL); 12230Sstevel@tonic-gate if (hat->hat_ht_hash[h]) 12240Sstevel@tonic-gate hat->hat_ht_hash[h]->ht_prev = ht; 12250Sstevel@tonic-gate hat->hat_ht_hash[h] = ht; 12260Sstevel@tonic-gate HTABLE_EXIT(h); 12270Sstevel@tonic-gate 12280Sstevel@tonic-gate /* 12290Sstevel@tonic-gate * Note we don't do htable_release(higher). 12300Sstevel@tonic-gate * That happens recursively when "new" is removed by 12310Sstevel@tonic-gate * htable_release() or htable_steal(). 12320Sstevel@tonic-gate */ 12330Sstevel@tonic-gate higher = ht; 12340Sstevel@tonic-gate 12350Sstevel@tonic-gate /* 12360Sstevel@tonic-gate * If we just created a new shared page table we 12370Sstevel@tonic-gate * increment the shared htable's busy count, so that 12380Sstevel@tonic-gate * it can't be the victim of a steal even if it's empty. 12390Sstevel@tonic-gate */ 12400Sstevel@tonic-gate if (l == level && shared) { 12410Sstevel@tonic-gate (void) htable_lookup(shared->ht_hat, 12420Sstevel@tonic-gate shared->ht_vaddr, shared->ht_level); 12430Sstevel@tonic-gate HATSTAT_INC(hs_htable_shared); 12440Sstevel@tonic-gate } 12450Sstevel@tonic-gate } 12460Sstevel@tonic-gate } 12470Sstevel@tonic-gate 12480Sstevel@tonic-gate return (ht); 12490Sstevel@tonic-gate } 12500Sstevel@tonic-gate 12510Sstevel@tonic-gate /* 12520Sstevel@tonic-gate * Walk through a given htable looking for the first valid entry. This 12530Sstevel@tonic-gate * routine takes both a starting and ending address. The starting address 12540Sstevel@tonic-gate * is required to be within the htable provided by the caller, but there is 12550Sstevel@tonic-gate * no such restriction on the ending address. 12560Sstevel@tonic-gate * 12570Sstevel@tonic-gate * If the routine finds a valid entry in the htable (at or beyond the 12580Sstevel@tonic-gate * starting address), the PTE (and its address) will be returned. 12590Sstevel@tonic-gate * This PTE may correspond to either a page or a pagetable - it is the 12600Sstevel@tonic-gate * caller's responsibility to determine which. If no valid entry is 12610Sstevel@tonic-gate * found, 0 (and invalid PTE) and the next unexamined address will be 12620Sstevel@tonic-gate * returned. 12630Sstevel@tonic-gate * 12640Sstevel@tonic-gate * The loop has been carefully coded for optimization. 12650Sstevel@tonic-gate */ 12660Sstevel@tonic-gate static x86pte_t 12670Sstevel@tonic-gate htable_scan(htable_t *ht, uintptr_t *vap, uintptr_t eaddr) 12680Sstevel@tonic-gate { 12690Sstevel@tonic-gate uint_t e; 12700Sstevel@tonic-gate x86pte_t found_pte = (x86pte_t)0; 12710Sstevel@tonic-gate char *pte_ptr; 12720Sstevel@tonic-gate char *end_pte_ptr; 12730Sstevel@tonic-gate int l = ht->ht_level; 12740Sstevel@tonic-gate uintptr_t va = *vap & LEVEL_MASK(l); 12750Sstevel@tonic-gate size_t pgsize = LEVEL_SIZE(l); 12760Sstevel@tonic-gate 12770Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 12780Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 12790Sstevel@tonic-gate 12800Sstevel@tonic-gate /* 12810Sstevel@tonic-gate * Compute the starting index and ending virtual address 12820Sstevel@tonic-gate */ 12830Sstevel@tonic-gate e = htable_va2entry(va, ht); 12840Sstevel@tonic-gate 12850Sstevel@tonic-gate /* 12860Sstevel@tonic-gate * The following page table scan code knows that the valid 12870Sstevel@tonic-gate * bit of a PTE is in the lowest byte AND that x86 is little endian!! 12880Sstevel@tonic-gate */ 12890Sstevel@tonic-gate pte_ptr = (char *)x86pte_access_pagetable(ht); 12900Sstevel@tonic-gate end_pte_ptr = pte_ptr + (ht->ht_num_ptes << mmu.pte_size_shift); 12910Sstevel@tonic-gate pte_ptr += e << mmu.pte_size_shift; 12920Sstevel@tonic-gate while (*pte_ptr == 0) { 12930Sstevel@tonic-gate va += pgsize; 12940Sstevel@tonic-gate if (va >= eaddr) 12950Sstevel@tonic-gate break; 12960Sstevel@tonic-gate pte_ptr += mmu.pte_size; 12970Sstevel@tonic-gate ASSERT(pte_ptr <= end_pte_ptr); 12980Sstevel@tonic-gate if (pte_ptr == end_pte_ptr) 12990Sstevel@tonic-gate break; 13000Sstevel@tonic-gate } 13010Sstevel@tonic-gate 13020Sstevel@tonic-gate /* 13030Sstevel@tonic-gate * if we found a valid PTE, load the entire PTE 13040Sstevel@tonic-gate */ 13050Sstevel@tonic-gate if (va < eaddr && pte_ptr != end_pte_ptr) { 13060Sstevel@tonic-gate if (mmu.pae_hat) { 130747Sjosephb ATOMIC_LOAD64((x86pte_t *)pte_ptr, found_pte); 13080Sstevel@tonic-gate } else { 13090Sstevel@tonic-gate found_pte = *(x86pte32_t *)pte_ptr; 13100Sstevel@tonic-gate } 13110Sstevel@tonic-gate } 13120Sstevel@tonic-gate x86pte_release_pagetable(ht); 13130Sstevel@tonic-gate 13140Sstevel@tonic-gate #if defined(__amd64) 13150Sstevel@tonic-gate /* 13160Sstevel@tonic-gate * deal with VA hole on amd64 13170Sstevel@tonic-gate */ 13180Sstevel@tonic-gate if (l == mmu.max_level && va >= mmu.hole_start && va <= mmu.hole_end) 13190Sstevel@tonic-gate va = mmu.hole_end + va - mmu.hole_start; 13200Sstevel@tonic-gate #endif /* __amd64 */ 13210Sstevel@tonic-gate 13220Sstevel@tonic-gate *vap = va; 13230Sstevel@tonic-gate return (found_pte); 13240Sstevel@tonic-gate } 13250Sstevel@tonic-gate 13260Sstevel@tonic-gate /* 13270Sstevel@tonic-gate * Find the address and htable for the first populated translation at or 13280Sstevel@tonic-gate * above the given virtual address. The caller may also specify an upper 13290Sstevel@tonic-gate * limit to the address range to search. Uses level information to quickly 13300Sstevel@tonic-gate * skip unpopulated sections of virtual address spaces. 13310Sstevel@tonic-gate * 13320Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable and virt addr 13330Sstevel@tonic-gate * and has a hold on the htable. 13340Sstevel@tonic-gate */ 13350Sstevel@tonic-gate x86pte_t 13360Sstevel@tonic-gate htable_walk( 13370Sstevel@tonic-gate struct hat *hat, 13380Sstevel@tonic-gate htable_t **htp, 13390Sstevel@tonic-gate uintptr_t *vaddr, 13400Sstevel@tonic-gate uintptr_t eaddr) 13410Sstevel@tonic-gate { 13420Sstevel@tonic-gate uintptr_t va = *vaddr; 13430Sstevel@tonic-gate htable_t *ht; 13440Sstevel@tonic-gate htable_t *prev = *htp; 13450Sstevel@tonic-gate level_t l; 13460Sstevel@tonic-gate level_t max_mapped_level; 13470Sstevel@tonic-gate x86pte_t pte; 13480Sstevel@tonic-gate 13490Sstevel@tonic-gate ASSERT(eaddr > va); 13500Sstevel@tonic-gate 13510Sstevel@tonic-gate /* 13520Sstevel@tonic-gate * If this is a user address, then we know we need not look beyond 13530Sstevel@tonic-gate * kernelbase. 13540Sstevel@tonic-gate */ 13550Sstevel@tonic-gate ASSERT(hat == kas.a_hat || eaddr <= kernelbase || 13560Sstevel@tonic-gate eaddr == HTABLE_WALK_TO_END); 13570Sstevel@tonic-gate if (hat != kas.a_hat && eaddr == HTABLE_WALK_TO_END) 13580Sstevel@tonic-gate eaddr = kernelbase; 13590Sstevel@tonic-gate 13600Sstevel@tonic-gate /* 13610Sstevel@tonic-gate * If we're coming in with a previous page table, search it first 13620Sstevel@tonic-gate * without doing an htable_lookup(), this should be frequent. 13630Sstevel@tonic-gate */ 13640Sstevel@tonic-gate if (prev) { 13650Sstevel@tonic-gate ASSERT(prev->ht_busy > 0); 13660Sstevel@tonic-gate ASSERT(prev->ht_vaddr <= va); 13670Sstevel@tonic-gate l = prev->ht_level; 13680Sstevel@tonic-gate if (va <= HTABLE_LAST_PAGE(prev)) { 13690Sstevel@tonic-gate pte = htable_scan(prev, &va, eaddr); 13700Sstevel@tonic-gate 13710Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 13720Sstevel@tonic-gate *vaddr = va; 13730Sstevel@tonic-gate *htp = prev; 13740Sstevel@tonic-gate return (pte); 13750Sstevel@tonic-gate } 13760Sstevel@tonic-gate } 13770Sstevel@tonic-gate 13780Sstevel@tonic-gate /* 13790Sstevel@tonic-gate * We found nothing in the htable provided by the caller, 13800Sstevel@tonic-gate * so fall through and do the full search 13810Sstevel@tonic-gate */ 13820Sstevel@tonic-gate htable_release(prev); 13830Sstevel@tonic-gate } 13840Sstevel@tonic-gate 13850Sstevel@tonic-gate /* 13860Sstevel@tonic-gate * Find the level of the largest pagesize used by this HAT. 13870Sstevel@tonic-gate */ 13880Sstevel@tonic-gate max_mapped_level = 0; 13890Sstevel@tonic-gate for (l = 1; l <= mmu.max_page_level; ++l) 13900Sstevel@tonic-gate if (hat->hat_pages_mapped[l] != 0) 13910Sstevel@tonic-gate max_mapped_level = l; 13920Sstevel@tonic-gate 13930Sstevel@tonic-gate while (va < eaddr && va >= *vaddr) { 13940Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 13950Sstevel@tonic-gate 13960Sstevel@tonic-gate /* 13970Sstevel@tonic-gate * Find lowest table with any entry for given address. 13980Sstevel@tonic-gate */ 13990Sstevel@tonic-gate for (l = 0; l <= TOP_LEVEL(hat); ++l) { 14000Sstevel@tonic-gate ht = htable_lookup(hat, va, l); 14010Sstevel@tonic-gate if (ht != NULL) { 14020Sstevel@tonic-gate pte = htable_scan(ht, &va, eaddr); 14030Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 14040Sstevel@tonic-gate *vaddr = va; 14050Sstevel@tonic-gate *htp = ht; 14060Sstevel@tonic-gate return (pte); 14070Sstevel@tonic-gate } 14080Sstevel@tonic-gate htable_release(ht); 14090Sstevel@tonic-gate break; 14100Sstevel@tonic-gate } 14110Sstevel@tonic-gate 14120Sstevel@tonic-gate /* 14130Sstevel@tonic-gate * The ht is never NULL at the top level since 14140Sstevel@tonic-gate * the top level htable is created in hat_alloc(). 14150Sstevel@tonic-gate */ 14160Sstevel@tonic-gate ASSERT(l < TOP_LEVEL(hat)); 14170Sstevel@tonic-gate 14180Sstevel@tonic-gate /* 14190Sstevel@tonic-gate * No htable covers the address. If there is no 14200Sstevel@tonic-gate * larger page size that could cover it, we 14210Sstevel@tonic-gate * skip to the start of the next page table. 14220Sstevel@tonic-gate */ 14230Sstevel@tonic-gate if (l >= max_mapped_level) { 14240Sstevel@tonic-gate va = NEXT_ENTRY_VA(va, l + 1); 14250Sstevel@tonic-gate break; 14260Sstevel@tonic-gate } 14270Sstevel@tonic-gate } 14280Sstevel@tonic-gate } 14290Sstevel@tonic-gate 14300Sstevel@tonic-gate *vaddr = 0; 14310Sstevel@tonic-gate *htp = NULL; 14320Sstevel@tonic-gate return (0); 14330Sstevel@tonic-gate } 14340Sstevel@tonic-gate 14350Sstevel@tonic-gate /* 14360Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address 14370Sstevel@tonic-gate * with pagesize at or below given level. 14380Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 14390Sstevel@tonic-gate * entry, and has a hold on the htable. 14400Sstevel@tonic-gate */ 14410Sstevel@tonic-gate htable_t * 14420Sstevel@tonic-gate htable_getpte( 14430Sstevel@tonic-gate struct hat *hat, 14440Sstevel@tonic-gate uintptr_t vaddr, 14450Sstevel@tonic-gate uint_t *entry, 14460Sstevel@tonic-gate x86pte_t *pte, 14470Sstevel@tonic-gate level_t level) 14480Sstevel@tonic-gate { 14490Sstevel@tonic-gate htable_t *ht; 14500Sstevel@tonic-gate level_t l; 14510Sstevel@tonic-gate uint_t e; 14520Sstevel@tonic-gate 14530Sstevel@tonic-gate ASSERT(level <= mmu.max_page_level); 14540Sstevel@tonic-gate 14550Sstevel@tonic-gate for (l = 0; l <= level; ++l) { 14560Sstevel@tonic-gate ht = htable_lookup(hat, vaddr, l); 14570Sstevel@tonic-gate if (ht == NULL) 14580Sstevel@tonic-gate continue; 14590Sstevel@tonic-gate e = htable_va2entry(vaddr, ht); 14600Sstevel@tonic-gate if (entry != NULL) 14610Sstevel@tonic-gate *entry = e; 14620Sstevel@tonic-gate if (pte != NULL) 14630Sstevel@tonic-gate *pte = x86pte_get(ht, e); 14640Sstevel@tonic-gate return (ht); 14650Sstevel@tonic-gate } 14660Sstevel@tonic-gate return (NULL); 14670Sstevel@tonic-gate } 14680Sstevel@tonic-gate 14690Sstevel@tonic-gate /* 14700Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address. 14710Sstevel@tonic-gate * There must be a valid page mapped at the given address. 14720Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 14730Sstevel@tonic-gate * entry, and has a hold on the htable. 14740Sstevel@tonic-gate */ 14750Sstevel@tonic-gate htable_t * 14760Sstevel@tonic-gate htable_getpage(struct hat *hat, uintptr_t vaddr, uint_t *entry) 14770Sstevel@tonic-gate { 14780Sstevel@tonic-gate htable_t *ht; 14790Sstevel@tonic-gate uint_t e; 14800Sstevel@tonic-gate x86pte_t pte; 14810Sstevel@tonic-gate 14820Sstevel@tonic-gate ht = htable_getpte(hat, vaddr, &e, &pte, mmu.max_page_level); 14830Sstevel@tonic-gate if (ht == NULL) 14840Sstevel@tonic-gate return (NULL); 14850Sstevel@tonic-gate 14860Sstevel@tonic-gate if (entry) 14870Sstevel@tonic-gate *entry = e; 14880Sstevel@tonic-gate 14890Sstevel@tonic-gate if (PTE_ISPAGE(pte, ht->ht_level)) 14900Sstevel@tonic-gate return (ht); 14910Sstevel@tonic-gate htable_release(ht); 14920Sstevel@tonic-gate return (NULL); 14930Sstevel@tonic-gate } 14940Sstevel@tonic-gate 14950Sstevel@tonic-gate 14960Sstevel@tonic-gate void 14970Sstevel@tonic-gate htable_init() 14980Sstevel@tonic-gate { 14990Sstevel@tonic-gate /* 15000Sstevel@tonic-gate * To save on kernel VA usage, we avoid debug information in 32 bit 15010Sstevel@tonic-gate * kernels. 15020Sstevel@tonic-gate */ 15030Sstevel@tonic-gate #if defined(__amd64) 15040Sstevel@tonic-gate int kmem_flags = KMC_NOHASH; 15050Sstevel@tonic-gate #elif defined(__i386) 15060Sstevel@tonic-gate int kmem_flags = KMC_NOHASH | KMC_NODEBUG; 15070Sstevel@tonic-gate #endif 15080Sstevel@tonic-gate 15090Sstevel@tonic-gate /* 15100Sstevel@tonic-gate * initialize kmem caches 15110Sstevel@tonic-gate */ 15120Sstevel@tonic-gate htable_cache = kmem_cache_create("htable_t", 15130Sstevel@tonic-gate sizeof (htable_t), 0, NULL, NULL, 15140Sstevel@tonic-gate htable_reap, NULL, hat_memload_arena, kmem_flags); 15150Sstevel@tonic-gate } 15160Sstevel@tonic-gate 15170Sstevel@tonic-gate /* 15180Sstevel@tonic-gate * get the pte index for the virtual address in the given htable's pagetable 15190Sstevel@tonic-gate */ 15200Sstevel@tonic-gate uint_t 15210Sstevel@tonic-gate htable_va2entry(uintptr_t va, htable_t *ht) 15220Sstevel@tonic-gate { 15230Sstevel@tonic-gate level_t l = ht->ht_level; 15240Sstevel@tonic-gate 15250Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 15260Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 15270Sstevel@tonic-gate return ((va >> LEVEL_SHIFT(l)) & (ht->ht_num_ptes - 1)); 15280Sstevel@tonic-gate } 15290Sstevel@tonic-gate 15300Sstevel@tonic-gate /* 15310Sstevel@tonic-gate * Given an htable and the index of a pte in it, return the virtual address 15320Sstevel@tonic-gate * of the page. 15330Sstevel@tonic-gate */ 15340Sstevel@tonic-gate uintptr_t 15350Sstevel@tonic-gate htable_e2va(htable_t *ht, uint_t entry) 15360Sstevel@tonic-gate { 15370Sstevel@tonic-gate level_t l = ht->ht_level; 15380Sstevel@tonic-gate uintptr_t va; 15390Sstevel@tonic-gate 15400Sstevel@tonic-gate ASSERT(entry < ht->ht_num_ptes); 15410Sstevel@tonic-gate va = ht->ht_vaddr + ((uintptr_t)entry << LEVEL_SHIFT(l)); 15420Sstevel@tonic-gate 15430Sstevel@tonic-gate /* 15440Sstevel@tonic-gate * Need to skip over any VA hole in top level table 15450Sstevel@tonic-gate */ 15460Sstevel@tonic-gate #if defined(__amd64) 15470Sstevel@tonic-gate if (ht->ht_level == mmu.max_level && va >= mmu.hole_start) 15480Sstevel@tonic-gate va += ((mmu.hole_end - mmu.hole_start) + 1); 15490Sstevel@tonic-gate #endif 15500Sstevel@tonic-gate 15510Sstevel@tonic-gate return (va); 15520Sstevel@tonic-gate } 15530Sstevel@tonic-gate 15540Sstevel@tonic-gate /* 15550Sstevel@tonic-gate * The code uses compare and swap instructions to read/write PTE's to 15560Sstevel@tonic-gate * avoid atomicity problems, since PTEs can be 8 bytes on 32 bit systems. 15570Sstevel@tonic-gate * Again this can be optimized on 64 bit systems, since aligned load/store 15580Sstevel@tonic-gate * will naturally be atomic. 15590Sstevel@tonic-gate * 15600Sstevel@tonic-gate * The combination of using kpreempt_disable()/_enable() and the hci_mutex 15610Sstevel@tonic-gate * are used to ensure that an interrupt won't overwrite a temporary mapping 15620Sstevel@tonic-gate * while it's in use. If an interrupt thread tries to access a PTE, it will 15630Sstevel@tonic-gate * yield briefly back to the pinned thread which holds the cpu's hci_mutex. 15640Sstevel@tonic-gate */ 15650Sstevel@tonic-gate 15660Sstevel@tonic-gate static struct hat_cpu_info init_hci; /* used for cpu 0 */ 15670Sstevel@tonic-gate 15680Sstevel@tonic-gate /* 15690Sstevel@tonic-gate * Initialize a CPU private window for mapping page tables. 15700Sstevel@tonic-gate * There will be 3 total pages of addressing needed: 15710Sstevel@tonic-gate * 15720Sstevel@tonic-gate * 1 for r/w access to pagetables 15730Sstevel@tonic-gate * 1 for r access when copying pagetables (hat_alloc) 15740Sstevel@tonic-gate * 1 that will map the PTEs for the 1st 2, so we can access them quickly 15750Sstevel@tonic-gate * 15760Sstevel@tonic-gate * We use vmem_xalloc() to get a correct alignment so that only one 15770Sstevel@tonic-gate * hat_mempte_setup() is needed. 15780Sstevel@tonic-gate */ 15790Sstevel@tonic-gate void 15800Sstevel@tonic-gate x86pte_cpu_init(cpu_t *cpu, void *pages) 15810Sstevel@tonic-gate { 15820Sstevel@tonic-gate struct hat_cpu_info *hci; 15830Sstevel@tonic-gate caddr_t va; 15840Sstevel@tonic-gate 15850Sstevel@tonic-gate /* 15860Sstevel@tonic-gate * We can't use kmem_alloc/vmem_alloc for the 1st CPU, as this is 15870Sstevel@tonic-gate * called before we've activated our own HAT 15880Sstevel@tonic-gate */ 15890Sstevel@tonic-gate if (pages != NULL) { 15900Sstevel@tonic-gate hci = &init_hci; 15910Sstevel@tonic-gate va = pages; 15920Sstevel@tonic-gate } else { 15930Sstevel@tonic-gate hci = kmem_alloc(sizeof (struct hat_cpu_info), KM_SLEEP); 15940Sstevel@tonic-gate va = vmem_xalloc(heap_arena, 3 * MMU_PAGESIZE, MMU_PAGESIZE, 0, 15950Sstevel@tonic-gate LEVEL_SIZE(1), NULL, NULL, VM_SLEEP); 15960Sstevel@tonic-gate } 15970Sstevel@tonic-gate mutex_init(&hci->hci_mutex, NULL, MUTEX_DEFAULT, NULL); 15980Sstevel@tonic-gate 15990Sstevel@tonic-gate /* 16000Sstevel@tonic-gate * If we are using segkpm, then there is no need for any of the 16010Sstevel@tonic-gate * mempte support. We can access the desired memory through a kpm 16020Sstevel@tonic-gate * mapping rather than setting up a temporary mempte mapping. 16030Sstevel@tonic-gate */ 16040Sstevel@tonic-gate if (kpm_enable == 0) { 16050Sstevel@tonic-gate hci->hci_mapped_pfn = PFN_INVALID; 16060Sstevel@tonic-gate 16070Sstevel@tonic-gate hci->hci_kernel_pte = 16080Sstevel@tonic-gate hat_mempte_kern_setup(va, va + (2 * MMU_PAGESIZE)); 16090Sstevel@tonic-gate hci->hci_pagetable_va = (void *)va; 16100Sstevel@tonic-gate } 16110Sstevel@tonic-gate 16120Sstevel@tonic-gate cpu->cpu_hat_info = hci; 16130Sstevel@tonic-gate } 16140Sstevel@tonic-gate 16150Sstevel@tonic-gate /* 16160Sstevel@tonic-gate * Macro to establish temporary mappings for x86pte_XXX routines. 16170Sstevel@tonic-gate */ 16180Sstevel@tonic-gate #define X86PTE_REMAP(addr, pte, index, perm, pfn) { \ 16190Sstevel@tonic-gate x86pte_t t; \ 16200Sstevel@tonic-gate \ 16210Sstevel@tonic-gate t = MAKEPTE((pfn), 0) | (perm) | mmu.pt_global | mmu.pt_nx;\ 16220Sstevel@tonic-gate if (mmu.pae_hat) \ 16230Sstevel@tonic-gate pte[index] = t; \ 16240Sstevel@tonic-gate else \ 16250Sstevel@tonic-gate ((x86pte32_t *)(pte))[index] = t; \ 16260Sstevel@tonic-gate mmu_tlbflush_entry((caddr_t)(addr)); \ 16270Sstevel@tonic-gate } 16280Sstevel@tonic-gate 16290Sstevel@tonic-gate /* 16300Sstevel@tonic-gate * Disable preemption and establish a mapping to the pagetable with the 16310Sstevel@tonic-gate * given pfn. This is optimized for there case where it's the same 16320Sstevel@tonic-gate * pfn as we last used referenced from this CPU. 16330Sstevel@tonic-gate */ 16340Sstevel@tonic-gate static x86pte_t * 16350Sstevel@tonic-gate x86pte_access_pagetable(htable_t *ht) 16360Sstevel@tonic-gate { 16370Sstevel@tonic-gate pfn_t pfn; 16380Sstevel@tonic-gate struct hat_cpu_info *hci; 16390Sstevel@tonic-gate 16400Sstevel@tonic-gate /* 16410Sstevel@tonic-gate * VLP pagetables are contained in the hat_t 16420Sstevel@tonic-gate */ 16430Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 16440Sstevel@tonic-gate return (ht->ht_hat->hat_vlp_ptes); 16450Sstevel@tonic-gate 16460Sstevel@tonic-gate /* 16470Sstevel@tonic-gate * During early boot, use hat_boot_remap() of a page table adddress. 16480Sstevel@tonic-gate */ 16490Sstevel@tonic-gate pfn = ht->ht_pfn; 16500Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 16510Sstevel@tonic-gate if (kpm_enable) 16520Sstevel@tonic-gate return ((x86pte_t *)hat_kpm_pfn2va(pfn)); 16530Sstevel@tonic-gate 16540Sstevel@tonic-gate if (!khat_running) { 16550Sstevel@tonic-gate (void) hat_boot_remap(ptable_va, pfn); 16560Sstevel@tonic-gate return ((x86pte_t *)ptable_va); 16570Sstevel@tonic-gate } 16580Sstevel@tonic-gate 16590Sstevel@tonic-gate /* 16600Sstevel@tonic-gate * Normally, disable preemption and grab the CPU's hci_mutex 16610Sstevel@tonic-gate */ 16620Sstevel@tonic-gate kpreempt_disable(); 16630Sstevel@tonic-gate hci = CPU->cpu_hat_info; 16640Sstevel@tonic-gate ASSERT(hci != NULL); 16650Sstevel@tonic-gate mutex_enter(&hci->hci_mutex); 16660Sstevel@tonic-gate if (hci->hci_mapped_pfn != pfn) { 16670Sstevel@tonic-gate /* 16680Sstevel@tonic-gate * The current mapping doesn't already point to this page. 16690Sstevel@tonic-gate * Update the CPU specific pagetable mapping to map the pfn. 16700Sstevel@tonic-gate */ 16710Sstevel@tonic-gate X86PTE_REMAP(hci->hci_pagetable_va, hci->hci_kernel_pte, 0, 16720Sstevel@tonic-gate PT_WRITABLE, pfn); 16730Sstevel@tonic-gate hci->hci_mapped_pfn = pfn; 16740Sstevel@tonic-gate } 16750Sstevel@tonic-gate return (hci->hci_pagetable_va); 16760Sstevel@tonic-gate } 16770Sstevel@tonic-gate 16780Sstevel@tonic-gate /* 16790Sstevel@tonic-gate * Release access to a page table. 16800Sstevel@tonic-gate */ 16810Sstevel@tonic-gate static void 16820Sstevel@tonic-gate x86pte_release_pagetable(htable_t *ht) 16830Sstevel@tonic-gate { 16840Sstevel@tonic-gate struct hat_cpu_info *hci; 16850Sstevel@tonic-gate 16860Sstevel@tonic-gate if (kpm_enable) 16870Sstevel@tonic-gate return; 16880Sstevel@tonic-gate 16890Sstevel@tonic-gate /* 16900Sstevel@tonic-gate * nothing to do for VLP htables 16910Sstevel@tonic-gate */ 16920Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 16930Sstevel@tonic-gate return; 16940Sstevel@tonic-gate 16950Sstevel@tonic-gate /* 16960Sstevel@tonic-gate * During boot-up hat_kern_setup(), erase the boot loader remapping. 16970Sstevel@tonic-gate */ 16980Sstevel@tonic-gate if (!khat_running) { 16990Sstevel@tonic-gate hat_boot_demap(ptable_va); 17000Sstevel@tonic-gate return; 17010Sstevel@tonic-gate } 17020Sstevel@tonic-gate 17030Sstevel@tonic-gate /* 17040Sstevel@tonic-gate * Normal Operation: drop the CPU's hci_mutex and restore preemption 17050Sstevel@tonic-gate */ 17060Sstevel@tonic-gate hci = CPU->cpu_hat_info; 17070Sstevel@tonic-gate ASSERT(hci != NULL); 17080Sstevel@tonic-gate mutex_exit(&hci->hci_mutex); 17090Sstevel@tonic-gate kpreempt_enable(); 17100Sstevel@tonic-gate } 17110Sstevel@tonic-gate 17120Sstevel@tonic-gate /* 17130Sstevel@tonic-gate * Atomic retrieval of a pagetable entry 17140Sstevel@tonic-gate */ 17150Sstevel@tonic-gate x86pte_t 17160Sstevel@tonic-gate x86pte_get(htable_t *ht, uint_t entry) 17170Sstevel@tonic-gate { 17180Sstevel@tonic-gate x86pte_t pte; 17190Sstevel@tonic-gate x86pte32_t *pte32p; 172047Sjosephb x86pte_t *ptep; 17210Sstevel@tonic-gate 17220Sstevel@tonic-gate /* 172347Sjosephb * Be careful that loading PAE entries in 32 bit kernel is atomic. 17240Sstevel@tonic-gate */ 17250Sstevel@tonic-gate ptep = x86pte_access_pagetable(ht); 17260Sstevel@tonic-gate if (mmu.pae_hat) { 172747Sjosephb ATOMIC_LOAD64(ptep + entry, pte); 17280Sstevel@tonic-gate } else { 17290Sstevel@tonic-gate pte32p = (x86pte32_t *)ptep; 17300Sstevel@tonic-gate pte = pte32p[entry]; 17310Sstevel@tonic-gate } 17320Sstevel@tonic-gate x86pte_release_pagetable(ht); 17330Sstevel@tonic-gate return (pte); 17340Sstevel@tonic-gate } 17350Sstevel@tonic-gate 17360Sstevel@tonic-gate /* 17370Sstevel@tonic-gate * Atomic unconditional set of a page table entry, it returns the previous 17380Sstevel@tonic-gate * value. 17390Sstevel@tonic-gate */ 17400Sstevel@tonic-gate x86pte_t 17410Sstevel@tonic-gate x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr) 17420Sstevel@tonic-gate { 17430Sstevel@tonic-gate x86pte_t old; 1744510Skchow x86pte_t prev, n; 17450Sstevel@tonic-gate x86pte_t *ptep; 17460Sstevel@tonic-gate x86pte32_t *pte32p; 17470Sstevel@tonic-gate x86pte32_t n32, p32; 17480Sstevel@tonic-gate 17490Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 17500Sstevel@tonic-gate if (ptr == NULL) { 17510Sstevel@tonic-gate ptep = x86pte_access_pagetable(ht); 17520Sstevel@tonic-gate ptep = (void *)((caddr_t)ptep + (entry << mmu.pte_size_shift)); 17530Sstevel@tonic-gate } else { 17540Sstevel@tonic-gate ptep = ptr; 17550Sstevel@tonic-gate } 17560Sstevel@tonic-gate 17570Sstevel@tonic-gate if (mmu.pae_hat) { 17580Sstevel@tonic-gate for (;;) { 17590Sstevel@tonic-gate prev = *ptep; 1760510Skchow n = new; 1761510Skchow /* 1762510Skchow * prevent potential data loss by preserving the MOD 1763510Skchow * bit if set in the current PTE and the pfns are the 1764510Skchow * same. For example, segmap can reissue a read-only 1765510Skchow * hat_memload on top of a dirty page. 1766510Skchow */ 1767510Skchow if (PTE_ISVALID(prev) && PTE2PFN(prev, ht->ht_level) == 1768510Skchow PTE2PFN(n, ht->ht_level)) { 1769510Skchow n |= prev & (PT_REF | PT_MOD); 1770510Skchow } 1771510Skchow if (prev == n) { 17720Sstevel@tonic-gate old = new; 17730Sstevel@tonic-gate break; 17740Sstevel@tonic-gate } 1775510Skchow old = cas64(ptep, prev, n); 17760Sstevel@tonic-gate if (old == prev) 17770Sstevel@tonic-gate break; 17780Sstevel@tonic-gate } 17790Sstevel@tonic-gate } else { 17800Sstevel@tonic-gate pte32p = (x86pte32_t *)ptep; 17810Sstevel@tonic-gate for (;;) { 17820Sstevel@tonic-gate p32 = *pte32p; 1783510Skchow n32 = new; 1784510Skchow if (PTE_ISVALID(p32) && PTE2PFN(p32, ht->ht_level) == 1785510Skchow PTE2PFN(n32, ht->ht_level)) { 1786510Skchow n32 |= p32 & (PT_REF | PT_MOD); 1787510Skchow } 17880Sstevel@tonic-gate if (p32 == n32) { 17890Sstevel@tonic-gate old = new; 17900Sstevel@tonic-gate break; 17910Sstevel@tonic-gate } 17920Sstevel@tonic-gate old = cas32(pte32p, p32, n32); 17930Sstevel@tonic-gate if (old == p32) 17940Sstevel@tonic-gate break; 17950Sstevel@tonic-gate } 17960Sstevel@tonic-gate } 17970Sstevel@tonic-gate if (ptr == NULL) 17980Sstevel@tonic-gate x86pte_release_pagetable(ht); 17990Sstevel@tonic-gate return (old); 18000Sstevel@tonic-gate } 18010Sstevel@tonic-gate 18020Sstevel@tonic-gate /* 18030Sstevel@tonic-gate * Atomic compare and swap of a page table entry. 18040Sstevel@tonic-gate */ 18050Sstevel@tonic-gate static x86pte_t 18060Sstevel@tonic-gate x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, x86pte_t new) 18070Sstevel@tonic-gate { 18080Sstevel@tonic-gate x86pte_t pte; 18090Sstevel@tonic-gate x86pte_t *ptep; 18100Sstevel@tonic-gate x86pte32_t pte32, o32, n32; 18110Sstevel@tonic-gate x86pte32_t *pte32p; 18120Sstevel@tonic-gate 18130Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 18140Sstevel@tonic-gate ptep = x86pte_access_pagetable(ht); 18150Sstevel@tonic-gate if (mmu.pae_hat) { 18160Sstevel@tonic-gate pte = cas64(&ptep[entry], old, new); 18170Sstevel@tonic-gate } else { 18180Sstevel@tonic-gate o32 = old; 18190Sstevel@tonic-gate n32 = new; 18200Sstevel@tonic-gate pte32p = (x86pte32_t *)ptep; 18210Sstevel@tonic-gate pte32 = cas32(&pte32p[entry], o32, n32); 18220Sstevel@tonic-gate pte = pte32; 18230Sstevel@tonic-gate } 18240Sstevel@tonic-gate x86pte_release_pagetable(ht); 18250Sstevel@tonic-gate 18260Sstevel@tonic-gate return (pte); 18270Sstevel@tonic-gate } 18280Sstevel@tonic-gate 18290Sstevel@tonic-gate /* 18300Sstevel@tonic-gate * data structure for cross call information 18310Sstevel@tonic-gate */ 18320Sstevel@tonic-gate typedef struct xcall_info { 18330Sstevel@tonic-gate x86pte_t xi_pte; 18340Sstevel@tonic-gate x86pte_t xi_old; 18350Sstevel@tonic-gate x86pte_t *xi_pteptr; 18360Sstevel@tonic-gate pfn_t xi_pfn; 18370Sstevel@tonic-gate processorid_t xi_cpuid; 18380Sstevel@tonic-gate level_t xi_level; 18390Sstevel@tonic-gate xc_func_t xi_func; 18400Sstevel@tonic-gate } xcall_info_t; 18410Sstevel@tonic-gate 18420Sstevel@tonic-gate /* 18430Sstevel@tonic-gate * Cross call service function to atomically invalidate a PTE and flush TLBs 18440Sstevel@tonic-gate */ 18450Sstevel@tonic-gate /*ARGSUSED*/ 18460Sstevel@tonic-gate static int 18470Sstevel@tonic-gate x86pte_inval_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3) 18480Sstevel@tonic-gate { 18490Sstevel@tonic-gate xcall_info_t *xi = (xcall_info_t *)a1; 18500Sstevel@tonic-gate caddr_t addr = (caddr_t)a2; 18510Sstevel@tonic-gate 18520Sstevel@tonic-gate /* 18530Sstevel@tonic-gate * Only the initiating cpu invalidates the page table entry. 18540Sstevel@tonic-gate * It returns the previous PTE value to the caller. 18550Sstevel@tonic-gate */ 18560Sstevel@tonic-gate if (CPU->cpu_id == xi->xi_cpuid) { 18570Sstevel@tonic-gate x86pte_t *ptep = xi->xi_pteptr; 18580Sstevel@tonic-gate pfn_t pfn = xi->xi_pfn; 18590Sstevel@tonic-gate level_t level = xi->xi_level; 18600Sstevel@tonic-gate x86pte_t old; 18610Sstevel@tonic-gate x86pte_t prev; 18620Sstevel@tonic-gate x86pte32_t *pte32p; 18630Sstevel@tonic-gate x86pte32_t p32; 18640Sstevel@tonic-gate 18650Sstevel@tonic-gate if (mmu.pae_hat) { 18660Sstevel@tonic-gate for (;;) { 18670Sstevel@tonic-gate prev = *ptep; 18680Sstevel@tonic-gate if (PTE2PFN(prev, level) != pfn) 18690Sstevel@tonic-gate break; 18700Sstevel@tonic-gate old = cas64(ptep, prev, 0); 18710Sstevel@tonic-gate if (old == prev) 18720Sstevel@tonic-gate break; 18730Sstevel@tonic-gate } 18740Sstevel@tonic-gate } else { 18750Sstevel@tonic-gate pte32p = (x86pte32_t *)ptep; 18760Sstevel@tonic-gate for (;;) { 18770Sstevel@tonic-gate p32 = *pte32p; 18780Sstevel@tonic-gate if (PTE2PFN(p32, level) != pfn) 18790Sstevel@tonic-gate break; 18800Sstevel@tonic-gate old = cas32(pte32p, p32, 0); 18810Sstevel@tonic-gate if (old == p32) 18820Sstevel@tonic-gate break; 18830Sstevel@tonic-gate } 18840Sstevel@tonic-gate prev = p32; 18850Sstevel@tonic-gate } 18860Sstevel@tonic-gate xi->xi_pte = prev; 18870Sstevel@tonic-gate } 18880Sstevel@tonic-gate 18890Sstevel@tonic-gate /* 18900Sstevel@tonic-gate * For a normal address, we just flush one page mapping 18910Sstevel@tonic-gate * Otherwise reload cr3 to effect a complete TLB flush. 18920Sstevel@tonic-gate * 18930Sstevel@tonic-gate * Note we don't reload VLP pte's -- this assume we never have a 18940Sstevel@tonic-gate * large page size at VLP_LEVEL for VLP processes. 18950Sstevel@tonic-gate */ 18960Sstevel@tonic-gate if ((uintptr_t)addr != DEMAP_ALL_ADDR) { 18970Sstevel@tonic-gate mmu_tlbflush_entry(addr); 18980Sstevel@tonic-gate } else { 18990Sstevel@tonic-gate reload_cr3(); 19000Sstevel@tonic-gate } 19010Sstevel@tonic-gate return (0); 19020Sstevel@tonic-gate } 19030Sstevel@tonic-gate 19040Sstevel@tonic-gate /* 19050Sstevel@tonic-gate * Cross call service function to atomically change a PTE and flush TLBs 19060Sstevel@tonic-gate */ 19070Sstevel@tonic-gate /*ARGSUSED*/ 19080Sstevel@tonic-gate static int 19090Sstevel@tonic-gate x86pte_update_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3) 19100Sstevel@tonic-gate { 19110Sstevel@tonic-gate xcall_info_t *xi = (xcall_info_t *)a1; 19120Sstevel@tonic-gate caddr_t addr = (caddr_t)a2; 19130Sstevel@tonic-gate 19140Sstevel@tonic-gate /* 19150Sstevel@tonic-gate * Only the initiating cpu changes the page table entry. 19160Sstevel@tonic-gate * It returns the previous PTE value to the caller. 19170Sstevel@tonic-gate */ 19180Sstevel@tonic-gate if (CPU->cpu_id == xi->xi_cpuid) { 19190Sstevel@tonic-gate x86pte_t *ptep = xi->xi_pteptr; 19200Sstevel@tonic-gate x86pte_t new = xi->xi_pte; 19210Sstevel@tonic-gate x86pte_t old = xi->xi_old; 19220Sstevel@tonic-gate x86pte_t prev; 19230Sstevel@tonic-gate 19240Sstevel@tonic-gate if (mmu.pae_hat) { 19250Sstevel@tonic-gate prev = cas64(ptep, old, new); 19260Sstevel@tonic-gate } else { 19270Sstevel@tonic-gate x86pte32_t o32 = old; 19280Sstevel@tonic-gate x86pte32_t n32 = new; 19290Sstevel@tonic-gate x86pte32_t *pte32p = (x86pte32_t *)ptep; 19300Sstevel@tonic-gate prev = cas32(pte32p, o32, n32); 19310Sstevel@tonic-gate } 19320Sstevel@tonic-gate 19330Sstevel@tonic-gate xi->xi_pte = prev; 19340Sstevel@tonic-gate } 19350Sstevel@tonic-gate 19360Sstevel@tonic-gate /* 19370Sstevel@tonic-gate * Flush the TLB entry 19380Sstevel@tonic-gate */ 19390Sstevel@tonic-gate if ((uintptr_t)addr != DEMAP_ALL_ADDR) 19400Sstevel@tonic-gate mmu_tlbflush_entry(addr); 19410Sstevel@tonic-gate else 19420Sstevel@tonic-gate reload_cr3(); 19430Sstevel@tonic-gate return (0); 19440Sstevel@tonic-gate } 19450Sstevel@tonic-gate 19460Sstevel@tonic-gate /* 19470Sstevel@tonic-gate * Use cross calls to change a page table entry and invalidate TLBs. 19480Sstevel@tonic-gate */ 19490Sstevel@tonic-gate void 19500Sstevel@tonic-gate x86pte_xcall(hat_t *hat, xcall_info_t *xi, uintptr_t addr) 19510Sstevel@tonic-gate { 19520Sstevel@tonic-gate cpuset_t cpus; 19530Sstevel@tonic-gate 19540Sstevel@tonic-gate /* 19550Sstevel@tonic-gate * Given the current implementation of hat_share(), doing a 19560Sstevel@tonic-gate * hat_pageunload() on a shared page table requries invalidating 19570Sstevel@tonic-gate * all user TLB entries on all CPUs. 19580Sstevel@tonic-gate */ 19590Sstevel@tonic-gate if (hat->hat_flags & HAT_SHARED) { 19600Sstevel@tonic-gate hat = kas.a_hat; 19610Sstevel@tonic-gate addr = DEMAP_ALL_ADDR; 19620Sstevel@tonic-gate } 19630Sstevel@tonic-gate 19640Sstevel@tonic-gate /* 19650Sstevel@tonic-gate * Use a cross call to do the invalidations. 19660Sstevel@tonic-gate * Note the current CPU always has to be in the cross call CPU set. 19670Sstevel@tonic-gate */ 19680Sstevel@tonic-gate kpreempt_disable(); 19690Sstevel@tonic-gate xi->xi_cpuid = CPU->cpu_id; 19700Sstevel@tonic-gate CPUSET_ZERO(cpus); 19710Sstevel@tonic-gate if (hat == kas.a_hat) { 19720Sstevel@tonic-gate CPUSET_OR(cpus, khat_cpuset); 19730Sstevel@tonic-gate } else { 19740Sstevel@tonic-gate mutex_enter(&hat->hat_switch_mutex); 19750Sstevel@tonic-gate CPUSET_OR(cpus, hat->hat_cpus); 19760Sstevel@tonic-gate CPUSET_ADD(cpus, CPU->cpu_id); 19770Sstevel@tonic-gate } 19780Sstevel@tonic-gate 19790Sstevel@tonic-gate /* 19800Sstevel@tonic-gate * Use a cross call to modify the page table entry and invalidate TLBs. 19810Sstevel@tonic-gate * If we're panic'ing, don't bother with the cross call. 19820Sstevel@tonic-gate * Note the panicstr check isn't bullet proof and the panic system 19830Sstevel@tonic-gate * ought to be made tighter. 19840Sstevel@tonic-gate */ 19850Sstevel@tonic-gate if (panicstr == NULL) 19860Sstevel@tonic-gate xc_wait_sync((xc_arg_t)xi, addr, NULL, X_CALL_HIPRI, 19870Sstevel@tonic-gate cpus, xi->xi_func); 19880Sstevel@tonic-gate else 19890Sstevel@tonic-gate (void) xi->xi_func((xc_arg_t)xi, (xc_arg_t)addr, NULL); 19900Sstevel@tonic-gate if (hat != kas.a_hat) 19910Sstevel@tonic-gate mutex_exit(&hat->hat_switch_mutex); 19920Sstevel@tonic-gate kpreempt_enable(); 19930Sstevel@tonic-gate } 19940Sstevel@tonic-gate 19950Sstevel@tonic-gate /* 19960Sstevel@tonic-gate * Invalidate a page table entry if it currently maps the given pfn. 19970Sstevel@tonic-gate * This returns the previous value of the PTE. 19980Sstevel@tonic-gate */ 19990Sstevel@tonic-gate x86pte_t 20000Sstevel@tonic-gate x86pte_invalidate_pfn(htable_t *ht, uint_t entry, pfn_t pfn, void *pte_ptr) 20010Sstevel@tonic-gate { 20020Sstevel@tonic-gate xcall_info_t xi; 20030Sstevel@tonic-gate x86pte_t *ptep; 20040Sstevel@tonic-gate hat_t *hat; 20050Sstevel@tonic-gate uintptr_t addr; 20060Sstevel@tonic-gate 20070Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 20080Sstevel@tonic-gate if (pte_ptr != NULL) { 20090Sstevel@tonic-gate ptep = pte_ptr; 20100Sstevel@tonic-gate } else { 20110Sstevel@tonic-gate ptep = x86pte_access_pagetable(ht); 20120Sstevel@tonic-gate ptep = (void *)((caddr_t)ptep + (entry << mmu.pte_size_shift)); 20130Sstevel@tonic-gate } 20140Sstevel@tonic-gate 20150Sstevel@tonic-gate /* 20160Sstevel@tonic-gate * Fill in the structure used by the cross call function to do the 20170Sstevel@tonic-gate * invalidation. 20180Sstevel@tonic-gate */ 20190Sstevel@tonic-gate xi.xi_pte = 0; 20200Sstevel@tonic-gate xi.xi_pteptr = ptep; 20210Sstevel@tonic-gate xi.xi_pfn = pfn; 20220Sstevel@tonic-gate xi.xi_level = ht->ht_level; 20230Sstevel@tonic-gate xi.xi_func = x86pte_inval_func; 20240Sstevel@tonic-gate ASSERT(xi.xi_level != VLP_LEVEL); 20250Sstevel@tonic-gate 20260Sstevel@tonic-gate hat = ht->ht_hat; 20270Sstevel@tonic-gate addr = htable_e2va(ht, entry); 20280Sstevel@tonic-gate 20290Sstevel@tonic-gate x86pte_xcall(hat, &xi, addr); 20300Sstevel@tonic-gate 20310Sstevel@tonic-gate if (pte_ptr == NULL) 20320Sstevel@tonic-gate x86pte_release_pagetable(ht); 20330Sstevel@tonic-gate return (xi.xi_pte); 20340Sstevel@tonic-gate } 20350Sstevel@tonic-gate 20360Sstevel@tonic-gate /* 20370Sstevel@tonic-gate * update a PTE and invalidate any stale TLB entries. 20380Sstevel@tonic-gate */ 20390Sstevel@tonic-gate x86pte_t 20400Sstevel@tonic-gate x86pte_update(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new) 20410Sstevel@tonic-gate { 20420Sstevel@tonic-gate xcall_info_t xi; 20430Sstevel@tonic-gate x86pte_t *ptep; 20440Sstevel@tonic-gate hat_t *hat; 20450Sstevel@tonic-gate uintptr_t addr; 20460Sstevel@tonic-gate 20470Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 20480Sstevel@tonic-gate ptep = x86pte_access_pagetable(ht); 20490Sstevel@tonic-gate ptep = (void *)((caddr_t)ptep + (entry << mmu.pte_size_shift)); 20500Sstevel@tonic-gate 20510Sstevel@tonic-gate /* 20520Sstevel@tonic-gate * Fill in the structure used by the cross call function to do the 20530Sstevel@tonic-gate * invalidation. 20540Sstevel@tonic-gate */ 20550Sstevel@tonic-gate xi.xi_pte = new; 20560Sstevel@tonic-gate xi.xi_old = expected; 20570Sstevel@tonic-gate xi.xi_pteptr = ptep; 20580Sstevel@tonic-gate xi.xi_func = x86pte_update_func; 20590Sstevel@tonic-gate 20600Sstevel@tonic-gate hat = ht->ht_hat; 20610Sstevel@tonic-gate addr = htable_e2va(ht, entry); 20620Sstevel@tonic-gate 20630Sstevel@tonic-gate x86pte_xcall(hat, &xi, addr); 20640Sstevel@tonic-gate 20650Sstevel@tonic-gate x86pte_release_pagetable(ht); 20660Sstevel@tonic-gate return (xi.xi_pte); 20670Sstevel@tonic-gate } 20680Sstevel@tonic-gate 20690Sstevel@tonic-gate /* 20700Sstevel@tonic-gate * Copy page tables - this is just a little more complicated than the 20710Sstevel@tonic-gate * previous routines. Note that it's also not atomic! It also is never 20720Sstevel@tonic-gate * used for VLP pagetables. 20730Sstevel@tonic-gate */ 20740Sstevel@tonic-gate void 20750Sstevel@tonic-gate x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 20760Sstevel@tonic-gate { 20770Sstevel@tonic-gate struct hat_cpu_info *hci; 20780Sstevel@tonic-gate caddr_t src_va; 20790Sstevel@tonic-gate caddr_t dst_va; 20800Sstevel@tonic-gate size_t size; 20810Sstevel@tonic-gate 20820Sstevel@tonic-gate ASSERT(khat_running); 20830Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 20840Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_VLP)); 20850Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN)); 20860Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 20870Sstevel@tonic-gate 20880Sstevel@tonic-gate /* 20890Sstevel@tonic-gate * Acquire access to the CPU pagetable window for the destination. 20900Sstevel@tonic-gate */ 20910Sstevel@tonic-gate dst_va = (caddr_t)x86pte_access_pagetable(dest); 20920Sstevel@tonic-gate if (kpm_enable) { 20930Sstevel@tonic-gate src_va = (caddr_t)x86pte_access_pagetable(src); 20940Sstevel@tonic-gate } else { 20950Sstevel@tonic-gate hci = CPU->cpu_hat_info; 20960Sstevel@tonic-gate 20970Sstevel@tonic-gate /* 20980Sstevel@tonic-gate * Finish defining the src pagetable mapping 20990Sstevel@tonic-gate */ 21000Sstevel@tonic-gate src_va = dst_va + MMU_PAGESIZE; 21010Sstevel@tonic-gate X86PTE_REMAP(src_va, hci->hci_kernel_pte, 1, 0, src->ht_pfn); 21020Sstevel@tonic-gate } 21030Sstevel@tonic-gate 21040Sstevel@tonic-gate /* 21050Sstevel@tonic-gate * now do the copy 21060Sstevel@tonic-gate */ 21070Sstevel@tonic-gate 21080Sstevel@tonic-gate dst_va += entry << mmu.pte_size_shift; 21090Sstevel@tonic-gate src_va += entry << mmu.pte_size_shift; 21100Sstevel@tonic-gate size = count << mmu.pte_size_shift; 21110Sstevel@tonic-gate bcopy(src_va, dst_va, size); 21120Sstevel@tonic-gate 21130Sstevel@tonic-gate x86pte_release_pagetable(dest); 21140Sstevel@tonic-gate } 21150Sstevel@tonic-gate 21160Sstevel@tonic-gate /* 21170Sstevel@tonic-gate * Zero page table entries - Note this doesn't use atomic stores! 21180Sstevel@tonic-gate */ 21190Sstevel@tonic-gate void 21200Sstevel@tonic-gate x86pte_zero(htable_t *dest, uint_t entry, uint_t count) 21210Sstevel@tonic-gate { 21220Sstevel@tonic-gate caddr_t dst_va; 21230Sstevel@tonic-gate x86pte_t *p; 21240Sstevel@tonic-gate x86pte32_t *p32; 21250Sstevel@tonic-gate size_t size; 21260Sstevel@tonic-gate extern void hat_pte_zero(void *, size_t); 21270Sstevel@tonic-gate 21280Sstevel@tonic-gate /* 21290Sstevel@tonic-gate * Map in the page table to be zeroed. 21300Sstevel@tonic-gate */ 21310Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 21320Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 21330Sstevel@tonic-gate dst_va = (caddr_t)x86pte_access_pagetable(dest); 21340Sstevel@tonic-gate dst_va += entry << mmu.pte_size_shift; 21350Sstevel@tonic-gate size = count << mmu.pte_size_shift; 21360Sstevel@tonic-gate if (x86_feature & X86_SSE2) { 21370Sstevel@tonic-gate hat_pte_zero(dst_va, size); 21380Sstevel@tonic-gate } else if (khat_running) { 21390Sstevel@tonic-gate bzero(dst_va, size); 21400Sstevel@tonic-gate } else { 21410Sstevel@tonic-gate /* 21420Sstevel@tonic-gate * Can't just use bzero during boot because it checks the 21430Sstevel@tonic-gate * address against kernelbase. Instead just use a zero loop. 21440Sstevel@tonic-gate */ 21450Sstevel@tonic-gate if (mmu.pae_hat) { 21460Sstevel@tonic-gate p = (x86pte_t *)dst_va; 21470Sstevel@tonic-gate while (count-- > 0) 21480Sstevel@tonic-gate *p++ = 0; 21490Sstevel@tonic-gate } else { 21500Sstevel@tonic-gate p32 = (x86pte32_t *)dst_va; 21510Sstevel@tonic-gate while (count-- > 0) 21520Sstevel@tonic-gate *p32++ = 0; 21530Sstevel@tonic-gate } 21540Sstevel@tonic-gate } 21550Sstevel@tonic-gate x86pte_release_pagetable(dest); 21560Sstevel@tonic-gate } 21570Sstevel@tonic-gate 21580Sstevel@tonic-gate /* 21590Sstevel@tonic-gate * Called to ensure that all pagetables are in the system dump 21600Sstevel@tonic-gate */ 21610Sstevel@tonic-gate void 21620Sstevel@tonic-gate hat_dump(void) 21630Sstevel@tonic-gate { 21640Sstevel@tonic-gate hat_t *hat; 21650Sstevel@tonic-gate uint_t h; 21660Sstevel@tonic-gate htable_t *ht; 21670Sstevel@tonic-gate int count; 21680Sstevel@tonic-gate 21690Sstevel@tonic-gate /* 21700Sstevel@tonic-gate * kas.a_hat is the head of the circular list, but not an element of 21710Sstevel@tonic-gate * the list. Once we pass kas.a_hat->hat_next a second time, we 21720Sstevel@tonic-gate * know we've iterated through every hat structure. 21730Sstevel@tonic-gate */ 21740Sstevel@tonic-gate for (hat = kas.a_hat, count = 0; hat != kas.a_hat->hat_next || 21750Sstevel@tonic-gate count++ == 0; hat = hat->hat_next) { 21760Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 21770Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 21780Sstevel@tonic-gate if ((ht->ht_flags & HTABLE_VLP) == 0) { 21790Sstevel@tonic-gate dump_page(ht->ht_pfn); 21800Sstevel@tonic-gate } 21810Sstevel@tonic-gate } 21820Sstevel@tonic-gate } 21830Sstevel@tonic-gate } 21840Sstevel@tonic-gate } 2185