10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51747Sjosephb * Common Development and Distribution License (the "License"). 61747Sjosephb * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 226285Speterte * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate /* 290Sstevel@tonic-gate * VM - Hardware Address Translation management for i386 and amd64 300Sstevel@tonic-gate * 310Sstevel@tonic-gate * Implementation of the interfaces described in <common/vm/hat.h> 320Sstevel@tonic-gate * 330Sstevel@tonic-gate * Nearly all the details of how the hardware is managed should not be 340Sstevel@tonic-gate * visible outside this layer except for misc. machine specific functions 350Sstevel@tonic-gate * that work in conjunction with this code. 360Sstevel@tonic-gate * 370Sstevel@tonic-gate * Routines used only inside of i86pc/vm start with hati_ for HAT Internal. 380Sstevel@tonic-gate */ 390Sstevel@tonic-gate 400Sstevel@tonic-gate #include <sys/machparam.h> 410Sstevel@tonic-gate #include <sys/machsystm.h> 420Sstevel@tonic-gate #include <sys/mman.h> 430Sstevel@tonic-gate #include <sys/types.h> 440Sstevel@tonic-gate #include <sys/systm.h> 450Sstevel@tonic-gate #include <sys/cpuvar.h> 460Sstevel@tonic-gate #include <sys/thread.h> 470Sstevel@tonic-gate #include <sys/proc.h> 480Sstevel@tonic-gate #include <sys/cpu.h> 490Sstevel@tonic-gate #include <sys/kmem.h> 500Sstevel@tonic-gate #include <sys/disp.h> 510Sstevel@tonic-gate #include <sys/shm.h> 520Sstevel@tonic-gate #include <sys/sysmacros.h> 530Sstevel@tonic-gate #include <sys/machparam.h> 540Sstevel@tonic-gate #include <sys/vmem.h> 550Sstevel@tonic-gate #include <sys/vmsystm.h> 560Sstevel@tonic-gate #include <sys/promif.h> 570Sstevel@tonic-gate #include <sys/var.h> 580Sstevel@tonic-gate #include <sys/x86_archext.h> 590Sstevel@tonic-gate #include <sys/atomic.h> 600Sstevel@tonic-gate #include <sys/bitmap.h> 613446Smrj #include <sys/controlregs.h> 623446Smrj #include <sys/bootconf.h> 633446Smrj #include <sys/bootsvcs.h> 643446Smrj #include <sys/bootinfo.h> 654191Sjosephb #include <sys/archsystm.h> 660Sstevel@tonic-gate 670Sstevel@tonic-gate #include <vm/seg_kmem.h> 680Sstevel@tonic-gate #include <vm/hat_i86.h> 690Sstevel@tonic-gate #include <vm/as.h> 700Sstevel@tonic-gate #include <vm/seg.h> 710Sstevel@tonic-gate #include <vm/page.h> 720Sstevel@tonic-gate #include <vm/seg_kp.h> 730Sstevel@tonic-gate #include <vm/seg_kpm.h> 740Sstevel@tonic-gate #include <vm/vm_dep.h> 755084Sjohnlev #ifdef __xpv 765084Sjohnlev #include <sys/hypervisor.h> 775084Sjohnlev #endif 783446Smrj #include <vm/kboot_mmu.h> 794381Sjosephb #include <vm/seg_spt.h> 800Sstevel@tonic-gate 810Sstevel@tonic-gate #include <sys/cmn_err.h> 820Sstevel@tonic-gate 830Sstevel@tonic-gate /* 840Sstevel@tonic-gate * Basic parameters for hat operation. 850Sstevel@tonic-gate */ 860Sstevel@tonic-gate struct hat_mmu_info mmu; 870Sstevel@tonic-gate 880Sstevel@tonic-gate /* 890Sstevel@tonic-gate * The page that is the kernel's top level pagetable. 900Sstevel@tonic-gate * 915084Sjohnlev * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries 920Sstevel@tonic-gate * on this 4K page for its top level page table. The remaining groups of 930Sstevel@tonic-gate * 4 entries are used for per processor copies of user VLP pagetables for 940Sstevel@tonic-gate * running threads. See hat_switch() and reload_pae32() for details. 950Sstevel@tonic-gate * 965084Sjohnlev * vlp_page[0..3] - level==2 PTEs for kernel HAT 975084Sjohnlev * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0 985084Sjohnlev * vlp_page[8..11] - level==2 PTE for user thread on cpu 1 995084Sjohnlev * etc... 1000Sstevel@tonic-gate */ 1010Sstevel@tonic-gate static x86pte_t *vlp_page; 1020Sstevel@tonic-gate 1030Sstevel@tonic-gate /* 1040Sstevel@tonic-gate * forward declaration of internal utility routines 1050Sstevel@tonic-gate */ 1060Sstevel@tonic-gate static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, 1070Sstevel@tonic-gate x86pte_t new); 1080Sstevel@tonic-gate 1090Sstevel@tonic-gate /* 1100Sstevel@tonic-gate * The kernel address space exists in all HATs. To implement this the 1115084Sjohnlev * kernel reserves a fixed number of entries in the topmost level(s) of page 1125084Sjohnlev * tables. The values are setup during startup and then copied to every user 1135084Sjohnlev * hat created by hat_alloc(). This means that kernelbase must be: 1140Sstevel@tonic-gate * 1150Sstevel@tonic-gate * 4Meg aligned for 32 bit kernels 1160Sstevel@tonic-gate * 512Gig aligned for x86_64 64 bit kernel 1170Sstevel@tonic-gate * 1185084Sjohnlev * The hat_kernel_range_ts describe what needs to be copied from kernel hat 1195084Sjohnlev * to each user hat. 1200Sstevel@tonic-gate */ 1215084Sjohnlev typedef struct hat_kernel_range { 1225084Sjohnlev level_t hkr_level; 1235084Sjohnlev uintptr_t hkr_start_va; 1245084Sjohnlev uintptr_t hkr_end_va; /* zero means to end of memory */ 1255084Sjohnlev } hat_kernel_range_t; 1265084Sjohnlev #define NUM_KERNEL_RANGE 2 1275084Sjohnlev static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE]; 1285084Sjohnlev static int num_kernel_ranges; 1290Sstevel@tonic-gate 1300Sstevel@tonic-gate uint_t use_boot_reserve = 1; /* cleared after early boot process */ 1310Sstevel@tonic-gate uint_t can_steal_post_boot = 0; /* set late in boot to enable stealing */ 1320Sstevel@tonic-gate 1336691Skchow /* 1346691Skchow * enable_1gpg: controls 1g page support for user applications. 1356691Skchow * By default, 1g pages are exported to user applications. enable_1gpg can 1366691Skchow * be set to 0 to not export. 1376691Skchow */ 1385466Skchow int enable_1gpg = 1; 1395349Skchow 1406691Skchow /* 1416691Skchow * AMD shanghai processors provide better management of 1gb ptes in its tlb. 1426691Skchow * By default, 1g page suppport will be disabled for pre-shanghai AMD 1436691Skchow * processors that don't have optimal tlb support for the 1g page size. 1446691Skchow * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal 1456691Skchow * processors. 1466691Skchow */ 1476691Skchow int chk_optimal_1gtlb = 1; 1486691Skchow 1496691Skchow 1505349Skchow #ifdef DEBUG 1515349Skchow uint_t map1gcnt; 1525349Skchow #endif 1535349Skchow 1545349Skchow 1550Sstevel@tonic-gate /* 1560Sstevel@tonic-gate * A cpuset for all cpus. This is used for kernel address cross calls, since 1570Sstevel@tonic-gate * the kernel addresses apply to all cpus. 1580Sstevel@tonic-gate */ 1590Sstevel@tonic-gate cpuset_t khat_cpuset; 1600Sstevel@tonic-gate 1610Sstevel@tonic-gate /* 1620Sstevel@tonic-gate * management stuff for hat structures 1630Sstevel@tonic-gate */ 1640Sstevel@tonic-gate kmutex_t hat_list_lock; 1650Sstevel@tonic-gate kcondvar_t hat_list_cv; 1660Sstevel@tonic-gate kmem_cache_t *hat_cache; 1670Sstevel@tonic-gate kmem_cache_t *hat_hash_cache; 1680Sstevel@tonic-gate kmem_cache_t *vlp_hash_cache; 1690Sstevel@tonic-gate 1700Sstevel@tonic-gate /* 1710Sstevel@tonic-gate * Simple statistics 1720Sstevel@tonic-gate */ 1730Sstevel@tonic-gate struct hatstats hatstat; 1740Sstevel@tonic-gate 1750Sstevel@tonic-gate /* 1765316Sjohnlev * Some earlier hypervisor versions do not emulate cmpxchg of PTEs 1775316Sjohnlev * correctly. For such hypervisors we must set PT_USER for kernel 1785316Sjohnlev * entries ourselves (normally the emulation would set PT_USER for 1795316Sjohnlev * kernel entries and PT_USER|PT_GLOBAL for user entries). pt_kern is 1805316Sjohnlev * thus set appropriately. Note that dboot/kbm is OK, as only the full 1815316Sjohnlev * HAT uses cmpxchg() and the other paths (hypercall etc.) were never 1825316Sjohnlev * incorrect. 1835316Sjohnlev */ 1845316Sjohnlev int pt_kern; 1855316Sjohnlev 1865316Sjohnlev /* 1870Sstevel@tonic-gate * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's. 1880Sstevel@tonic-gate */ 1890Sstevel@tonic-gate extern void atomic_orb(uchar_t *addr, uchar_t val); 1900Sstevel@tonic-gate extern void atomic_andb(uchar_t *addr, uchar_t val); 1910Sstevel@tonic-gate 1920Sstevel@tonic-gate #define PP_GETRM(pp, rmmask) (pp->p_nrm & rmmask) 1930Sstevel@tonic-gate #define PP_ISMOD(pp) PP_GETRM(pp, P_MOD) 1940Sstevel@tonic-gate #define PP_ISREF(pp) PP_GETRM(pp, P_REF) 1950Sstevel@tonic-gate #define PP_ISRO(pp) PP_GETRM(pp, P_RO) 1960Sstevel@tonic-gate 1970Sstevel@tonic-gate #define PP_SETRM(pp, rm) atomic_orb(&(pp->p_nrm), rm) 1980Sstevel@tonic-gate #define PP_SETMOD(pp) PP_SETRM(pp, P_MOD) 1990Sstevel@tonic-gate #define PP_SETREF(pp) PP_SETRM(pp, P_REF) 2000Sstevel@tonic-gate #define PP_SETRO(pp) PP_SETRM(pp, P_RO) 2010Sstevel@tonic-gate 2020Sstevel@tonic-gate #define PP_CLRRM(pp, rm) atomic_andb(&(pp->p_nrm), ~(rm)) 2030Sstevel@tonic-gate #define PP_CLRMOD(pp) PP_CLRRM(pp, P_MOD) 2040Sstevel@tonic-gate #define PP_CLRREF(pp) PP_CLRRM(pp, P_REF) 2050Sstevel@tonic-gate #define PP_CLRRO(pp) PP_CLRRM(pp, P_RO) 2060Sstevel@tonic-gate #define PP_CLRALL(pp) PP_CLRRM(pp, P_MOD | P_REF | P_RO) 2070Sstevel@tonic-gate 2080Sstevel@tonic-gate /* 2090Sstevel@tonic-gate * kmem cache constructor for struct hat 2100Sstevel@tonic-gate */ 2110Sstevel@tonic-gate /*ARGSUSED*/ 2120Sstevel@tonic-gate static int 2130Sstevel@tonic-gate hati_constructor(void *buf, void *handle, int kmflags) 2140Sstevel@tonic-gate { 2150Sstevel@tonic-gate hat_t *hat = buf; 2160Sstevel@tonic-gate 2170Sstevel@tonic-gate mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 2180Sstevel@tonic-gate bzero(hat->hat_pages_mapped, 2190Sstevel@tonic-gate sizeof (pgcnt_t) * (mmu.max_page_level + 1)); 2204381Sjosephb hat->hat_ism_pgcnt = 0; 2210Sstevel@tonic-gate hat->hat_stats = 0; 2220Sstevel@tonic-gate hat->hat_flags = 0; 2230Sstevel@tonic-gate CPUSET_ZERO(hat->hat_cpus); 2240Sstevel@tonic-gate hat->hat_htable = NULL; 2250Sstevel@tonic-gate hat->hat_ht_hash = NULL; 2260Sstevel@tonic-gate return (0); 2270Sstevel@tonic-gate } 2280Sstevel@tonic-gate 2290Sstevel@tonic-gate /* 2300Sstevel@tonic-gate * Allocate a hat structure for as. We also create the top level 2310Sstevel@tonic-gate * htable and initialize it to contain the kernel hat entries. 2320Sstevel@tonic-gate */ 2330Sstevel@tonic-gate hat_t * 2340Sstevel@tonic-gate hat_alloc(struct as *as) 2350Sstevel@tonic-gate { 2365084Sjohnlev hat_t *hat; 2375084Sjohnlev htable_t *ht; /* top level htable */ 2385084Sjohnlev uint_t use_vlp; 2395084Sjohnlev uint_t r; 2405084Sjohnlev hat_kernel_range_t *rp; 2415084Sjohnlev uintptr_t va; 2425084Sjohnlev uintptr_t eva; 2435084Sjohnlev uint_t start; 2445084Sjohnlev uint_t cnt; 2455084Sjohnlev htable_t *src; 2460Sstevel@tonic-gate 2470Sstevel@tonic-gate /* 2480Sstevel@tonic-gate * Once we start creating user process HATs we can enable 2490Sstevel@tonic-gate * the htable_steal() code. 2500Sstevel@tonic-gate */ 2510Sstevel@tonic-gate if (can_steal_post_boot == 0) 2520Sstevel@tonic-gate can_steal_post_boot = 1; 2530Sstevel@tonic-gate 2540Sstevel@tonic-gate ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 2550Sstevel@tonic-gate hat = kmem_cache_alloc(hat_cache, KM_SLEEP); 2560Sstevel@tonic-gate hat->hat_as = as; 2570Sstevel@tonic-gate mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 2580Sstevel@tonic-gate ASSERT(hat->hat_flags == 0); 2590Sstevel@tonic-gate 2605084Sjohnlev #if defined(__xpv) 2610Sstevel@tonic-gate /* 2625084Sjohnlev * No VLP stuff on the hypervisor due to the 64-bit split top level 2635084Sjohnlev * page tables. On 32-bit it's not needed as the hypervisor takes 2645084Sjohnlev * care of copying the top level PTEs to a below 4Gig page. 2650Sstevel@tonic-gate */ 2665084Sjohnlev use_vlp = 0; 2675084Sjohnlev #else /* __xpv */ 2685084Sjohnlev /* 32 bit processes uses a VLP style hat when running with PAE */ 2690Sstevel@tonic-gate #if defined(__amd64) 2700Sstevel@tonic-gate use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32); 2710Sstevel@tonic-gate #elif defined(__i386) 2720Sstevel@tonic-gate use_vlp = mmu.pae_hat; 2730Sstevel@tonic-gate #endif 2745084Sjohnlev #endif /* __xpv */ 2750Sstevel@tonic-gate if (use_vlp) { 2760Sstevel@tonic-gate hat->hat_flags = HAT_VLP; 2770Sstevel@tonic-gate bzero(hat->hat_vlp_ptes, VLP_SIZE); 2780Sstevel@tonic-gate } 2790Sstevel@tonic-gate 2800Sstevel@tonic-gate /* 2810Sstevel@tonic-gate * Allocate the htable hash 2820Sstevel@tonic-gate */ 2830Sstevel@tonic-gate if ((hat->hat_flags & HAT_VLP)) { 2840Sstevel@tonic-gate hat->hat_num_hash = mmu.vlp_hash_cnt; 2850Sstevel@tonic-gate hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP); 2860Sstevel@tonic-gate } else { 2870Sstevel@tonic-gate hat->hat_num_hash = mmu.hash_cnt; 2880Sstevel@tonic-gate hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP); 2890Sstevel@tonic-gate } 2900Sstevel@tonic-gate bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *)); 2910Sstevel@tonic-gate 2920Sstevel@tonic-gate /* 2930Sstevel@tonic-gate * Initialize Kernel HAT entries at the top of the top level page 2945084Sjohnlev * tables for the new hat. 2950Sstevel@tonic-gate */ 2960Sstevel@tonic-gate hat->hat_htable = NULL; 2970Sstevel@tonic-gate hat->hat_ht_cached = NULL; 2985084Sjohnlev XPV_DISALLOW_MIGRATE(); 2990Sstevel@tonic-gate ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL); 3005084Sjohnlev hat->hat_htable = ht; 3015084Sjohnlev 3025084Sjohnlev #if defined(__amd64) 3035084Sjohnlev if (hat->hat_flags & HAT_VLP) 3045084Sjohnlev goto init_done; 3050Sstevel@tonic-gate #endif 3065084Sjohnlev 3075084Sjohnlev for (r = 0; r < num_kernel_ranges; ++r) { 3085084Sjohnlev rp = &kernel_ranges[r]; 3095084Sjohnlev for (va = rp->hkr_start_va; va != rp->hkr_end_va; 3105084Sjohnlev va += cnt * LEVEL_SIZE(rp->hkr_level)) { 3115084Sjohnlev 3125084Sjohnlev if (rp->hkr_level == TOP_LEVEL(hat)) 3135084Sjohnlev ht = hat->hat_htable; 3145084Sjohnlev else 3155084Sjohnlev ht = htable_create(hat, va, rp->hkr_level, 3165084Sjohnlev NULL); 3175084Sjohnlev 3185084Sjohnlev start = htable_va2entry(va, ht); 3195084Sjohnlev cnt = HTABLE_NUM_PTES(ht) - start; 3205084Sjohnlev eva = va + 3215084Sjohnlev ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level)); 3225084Sjohnlev if (rp->hkr_end_va != 0 && 3235084Sjohnlev (eva > rp->hkr_end_va || eva == 0)) 3245084Sjohnlev cnt = htable_va2entry(rp->hkr_end_va, ht) - 3255084Sjohnlev start; 3265084Sjohnlev 3275084Sjohnlev #if defined(__i386) && !defined(__xpv) 3285084Sjohnlev if (ht->ht_flags & HTABLE_VLP) { 3295084Sjohnlev bcopy(&vlp_page[start], 3305084Sjohnlev &hat->hat_vlp_ptes[start], 3315084Sjohnlev cnt * sizeof (x86pte_t)); 3325084Sjohnlev continue; 3335084Sjohnlev } 3345084Sjohnlev #endif 3355084Sjohnlev src = htable_lookup(kas.a_hat, va, rp->hkr_level); 3365084Sjohnlev ASSERT(src != NULL); 3375084Sjohnlev x86pte_copy(src, ht, start, cnt); 3385084Sjohnlev htable_release(src); 3395084Sjohnlev } 3405084Sjohnlev } 3415084Sjohnlev 3425084Sjohnlev init_done: 3435084Sjohnlev 3445084Sjohnlev #if defined(__xpv) 3450Sstevel@tonic-gate /* 3465084Sjohnlev * Pin top level page tables after initializing them 3470Sstevel@tonic-gate */ 3485084Sjohnlev xen_pin(hat->hat_htable->ht_pfn, mmu.max_level); 3495084Sjohnlev #if defined(__amd64) 3505084Sjohnlev xen_pin(hat->hat_user_ptable, mmu.max_level); 3515084Sjohnlev #endif 3520Sstevel@tonic-gate #endif 3535741Smrj XPV_ALLOW_MIGRATE(); 3540Sstevel@tonic-gate 3550Sstevel@tonic-gate /* 3561747Sjosephb * Put it at the start of the global list of all hats (used by stealing) 3571747Sjosephb * 3581747Sjosephb * kas.a_hat is not in the list but is instead used to find the 3591747Sjosephb * first and last items in the list. 3601747Sjosephb * 3611747Sjosephb * - kas.a_hat->hat_next points to the start of the user hats. 3621747Sjosephb * The list ends where hat->hat_next == NULL 3631747Sjosephb * 3641747Sjosephb * - kas.a_hat->hat_prev points to the last of the user hats. 3651747Sjosephb * The list begins where hat->hat_prev == NULL 3660Sstevel@tonic-gate */ 3670Sstevel@tonic-gate mutex_enter(&hat_list_lock); 3681747Sjosephb hat->hat_prev = NULL; 3691747Sjosephb hat->hat_next = kas.a_hat->hat_next; 3701747Sjosephb if (hat->hat_next) 3711747Sjosephb hat->hat_next->hat_prev = hat; 3721747Sjosephb else 3731747Sjosephb kas.a_hat->hat_prev = hat; 3740Sstevel@tonic-gate kas.a_hat->hat_next = hat; 3750Sstevel@tonic-gate mutex_exit(&hat_list_lock); 3760Sstevel@tonic-gate 3770Sstevel@tonic-gate return (hat); 3780Sstevel@tonic-gate } 3790Sstevel@tonic-gate 3800Sstevel@tonic-gate /* 3810Sstevel@tonic-gate * process has finished executing but as has not been cleaned up yet. 3820Sstevel@tonic-gate */ 3830Sstevel@tonic-gate /*ARGSUSED*/ 3840Sstevel@tonic-gate void 3850Sstevel@tonic-gate hat_free_start(hat_t *hat) 3860Sstevel@tonic-gate { 3870Sstevel@tonic-gate ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock)); 3881747Sjosephb 3891747Sjosephb /* 3901747Sjosephb * If the hat is currently a stealing victim, wait for the stealing 3911747Sjosephb * to finish. Once we mark it as HAT_FREEING, htable_steal() 3921747Sjosephb * won't look at its pagetables anymore. 3931747Sjosephb */ 3940Sstevel@tonic-gate mutex_enter(&hat_list_lock); 3951747Sjosephb while (hat->hat_flags & HAT_VICTIM) 3961747Sjosephb cv_wait(&hat_list_cv, &hat_list_lock); 3970Sstevel@tonic-gate hat->hat_flags |= HAT_FREEING; 3980Sstevel@tonic-gate mutex_exit(&hat_list_lock); 3990Sstevel@tonic-gate } 4000Sstevel@tonic-gate 4010Sstevel@tonic-gate /* 4020Sstevel@tonic-gate * An address space is being destroyed, so we destroy the associated hat. 4030Sstevel@tonic-gate */ 4040Sstevel@tonic-gate void 4050Sstevel@tonic-gate hat_free_end(hat_t *hat) 4060Sstevel@tonic-gate { 4070Sstevel@tonic-gate kmem_cache_t *cache; 4080Sstevel@tonic-gate 4090Sstevel@tonic-gate ASSERT(hat->hat_flags & HAT_FREEING); 4100Sstevel@tonic-gate 4110Sstevel@tonic-gate /* 4120Sstevel@tonic-gate * must not be running on the given hat 4130Sstevel@tonic-gate */ 4140Sstevel@tonic-gate ASSERT(CPU->cpu_current_hat != hat); 4150Sstevel@tonic-gate 4160Sstevel@tonic-gate /* 4171747Sjosephb * Remove it from the list of HATs 4180Sstevel@tonic-gate */ 4190Sstevel@tonic-gate mutex_enter(&hat_list_lock); 4201747Sjosephb if (hat->hat_prev) 4211747Sjosephb hat->hat_prev->hat_next = hat->hat_next; 4221747Sjosephb else 4230Sstevel@tonic-gate kas.a_hat->hat_next = hat->hat_next; 4241747Sjosephb if (hat->hat_next) 4251747Sjosephb hat->hat_next->hat_prev = hat->hat_prev; 4261747Sjosephb else 4271747Sjosephb kas.a_hat->hat_prev = hat->hat_prev; 4280Sstevel@tonic-gate mutex_exit(&hat_list_lock); 4291747Sjosephb hat->hat_next = hat->hat_prev = NULL; 4300Sstevel@tonic-gate 4315084Sjohnlev #if defined(__xpv) 4325084Sjohnlev /* 4335084Sjohnlev * On the hypervisor, unpin top level page table(s) 4345084Sjohnlev */ 4355084Sjohnlev xen_unpin(hat->hat_htable->ht_pfn); 4365084Sjohnlev #if defined(__amd64) 4375084Sjohnlev xen_unpin(hat->hat_user_ptable); 4385084Sjohnlev #endif 4395084Sjohnlev #endif 4405084Sjohnlev 4410Sstevel@tonic-gate /* 4420Sstevel@tonic-gate * Make a pass through the htables freeing them all up. 4430Sstevel@tonic-gate */ 4440Sstevel@tonic-gate htable_purge_hat(hat); 4450Sstevel@tonic-gate 4460Sstevel@tonic-gate /* 4470Sstevel@tonic-gate * Decide which kmem cache the hash table came from, then free it. 4480Sstevel@tonic-gate */ 4490Sstevel@tonic-gate if (hat->hat_flags & HAT_VLP) 4500Sstevel@tonic-gate cache = vlp_hash_cache; 4510Sstevel@tonic-gate else 4520Sstevel@tonic-gate cache = hat_hash_cache; 4530Sstevel@tonic-gate kmem_cache_free(cache, hat->hat_ht_hash); 4540Sstevel@tonic-gate hat->hat_ht_hash = NULL; 4550Sstevel@tonic-gate 4560Sstevel@tonic-gate hat->hat_flags = 0; 4570Sstevel@tonic-gate kmem_cache_free(hat_cache, hat); 4580Sstevel@tonic-gate } 4590Sstevel@tonic-gate 4600Sstevel@tonic-gate /* 4610Sstevel@tonic-gate * round kernelbase down to a supported value to use for _userlimit 4620Sstevel@tonic-gate * 4630Sstevel@tonic-gate * userlimit must be aligned down to an entry in the top level htable. 4640Sstevel@tonic-gate * The one exception is for 32 bit HAT's running PAE. 4650Sstevel@tonic-gate */ 4660Sstevel@tonic-gate uintptr_t 4670Sstevel@tonic-gate hat_kernelbase(uintptr_t va) 4680Sstevel@tonic-gate { 4690Sstevel@tonic-gate #if defined(__i386) 4700Sstevel@tonic-gate va &= LEVEL_MASK(1); 4710Sstevel@tonic-gate #endif 4720Sstevel@tonic-gate if (IN_VA_HOLE(va)) 4730Sstevel@tonic-gate panic("_userlimit %p will fall in VA hole\n", (void *)va); 4740Sstevel@tonic-gate return (va); 4750Sstevel@tonic-gate } 4760Sstevel@tonic-gate 4770Sstevel@tonic-gate /* 4786691Skchow * 4796691Skchow */ 4806691Skchow static void 4816691Skchow set_max_page_level() 4826691Skchow { 4836691Skchow level_t lvl; 4846691Skchow 4856691Skchow if (!kbm_largepage_support) { 4866691Skchow lvl = 0; 487*6720Skchow } else { 488*6720Skchow if (x86_feature & X86_1GPG) { 489*6720Skchow lvl = 2; 490*6720Skchow if (chk_optimal_1gtlb && 491*6720Skchow cpuid_opteron_erratum(CPU, 6671130)) { 492*6720Skchow lvl = 1; 493*6720Skchow } 494*6720Skchow if (plat_mnode_xcheck(LEVEL_SIZE(2) >> 495*6720Skchow LEVEL_SHIFT(0))) { 496*6720Skchow lvl = 1; 497*6720Skchow } 498*6720Skchow } else { 4996691Skchow lvl = 1; 5006691Skchow } 5016691Skchow } 5026691Skchow mmu.max_page_level = lvl; 5036691Skchow 5046691Skchow if ((lvl == 2) && (enable_1gpg == 0)) 5056691Skchow mmu.umax_page_level = 1; 5066691Skchow else 5076691Skchow mmu.umax_page_level = lvl; 5086691Skchow } 5096691Skchow 5106691Skchow /* 5110Sstevel@tonic-gate * Initialize hat data structures based on processor MMU information. 5120Sstevel@tonic-gate */ 5130Sstevel@tonic-gate void 5140Sstevel@tonic-gate mmu_init(void) 5150Sstevel@tonic-gate { 5160Sstevel@tonic-gate uint_t max_htables; 5170Sstevel@tonic-gate uint_t pa_bits; 5180Sstevel@tonic-gate uint_t va_bits; 5190Sstevel@tonic-gate int i; 5200Sstevel@tonic-gate 5210Sstevel@tonic-gate /* 5223446Smrj * If CPU enabled the page table global bit, use it for the kernel 5233446Smrj * This is bit 7 in CR4 (PGE - Page Global Enable). 5240Sstevel@tonic-gate */ 5253446Smrj if ((x86_feature & X86_PGE) != 0 && (getcr4() & CR4_PGE) != 0) 5260Sstevel@tonic-gate mmu.pt_global = PT_GLOBAL; 5270Sstevel@tonic-gate 5280Sstevel@tonic-gate /* 5293446Smrj * Detect NX and PAE usage. 5300Sstevel@tonic-gate */ 5313446Smrj mmu.pae_hat = kbm_pae_support; 5323446Smrj if (kbm_nx_support) 5330Sstevel@tonic-gate mmu.pt_nx = PT_NX; 5343446Smrj else 5350Sstevel@tonic-gate mmu.pt_nx = 0; 5360Sstevel@tonic-gate 5370Sstevel@tonic-gate /* 5380Sstevel@tonic-gate * Use CPU info to set various MMU parameters 5390Sstevel@tonic-gate */ 5400Sstevel@tonic-gate cpuid_get_addrsize(CPU, &pa_bits, &va_bits); 5410Sstevel@tonic-gate 5420Sstevel@tonic-gate if (va_bits < sizeof (void *) * NBBY) { 5430Sstevel@tonic-gate mmu.hole_start = (1ul << (va_bits - 1)); 5440Sstevel@tonic-gate mmu.hole_end = 0ul - mmu.hole_start - 1; 5450Sstevel@tonic-gate } else { 5460Sstevel@tonic-gate mmu.hole_end = 0; 5470Sstevel@tonic-gate mmu.hole_start = mmu.hole_end - 1; 5480Sstevel@tonic-gate } 5490Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121) 5500Sstevel@tonic-gate /* 5510Sstevel@tonic-gate * If erratum 121 has already been detected at this time, hole_start 5520Sstevel@tonic-gate * contains the value to be subtracted from mmu.hole_start. 5530Sstevel@tonic-gate */ 5540Sstevel@tonic-gate ASSERT(hole_start == 0 || opteron_erratum_121 != 0); 5550Sstevel@tonic-gate hole_start = mmu.hole_start - hole_start; 5560Sstevel@tonic-gate #else 5570Sstevel@tonic-gate hole_start = mmu.hole_start; 5580Sstevel@tonic-gate #endif 5590Sstevel@tonic-gate hole_end = mmu.hole_end; 5600Sstevel@tonic-gate 5610Sstevel@tonic-gate mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1); 5620Sstevel@tonic-gate if (mmu.pae_hat == 0 && pa_bits > 32) 5630Sstevel@tonic-gate mmu.highest_pfn = PFN_4G - 1; 5640Sstevel@tonic-gate 5650Sstevel@tonic-gate if (mmu.pae_hat) { 5660Sstevel@tonic-gate mmu.pte_size = 8; /* 8 byte PTEs */ 5670Sstevel@tonic-gate mmu.pte_size_shift = 3; 5680Sstevel@tonic-gate } else { 5690Sstevel@tonic-gate mmu.pte_size = 4; /* 4 byte PTEs */ 5700Sstevel@tonic-gate mmu.pte_size_shift = 2; 5710Sstevel@tonic-gate } 5720Sstevel@tonic-gate 5730Sstevel@tonic-gate if (mmu.pae_hat && (x86_feature & X86_PAE) == 0) 5740Sstevel@tonic-gate panic("Processor does not support PAE"); 5750Sstevel@tonic-gate 5760Sstevel@tonic-gate if ((x86_feature & X86_CX8) == 0) 5770Sstevel@tonic-gate panic("Processor does not support cmpxchg8b instruction"); 5780Sstevel@tonic-gate 5790Sstevel@tonic-gate #if defined(__amd64) 5800Sstevel@tonic-gate 5810Sstevel@tonic-gate mmu.num_level = 4; 5820Sstevel@tonic-gate mmu.max_level = 3; 5830Sstevel@tonic-gate mmu.ptes_per_table = 512; 5840Sstevel@tonic-gate mmu.top_level_count = 512; 5850Sstevel@tonic-gate 5860Sstevel@tonic-gate mmu.level_shift[0] = 12; 5870Sstevel@tonic-gate mmu.level_shift[1] = 21; 5880Sstevel@tonic-gate mmu.level_shift[2] = 30; 5890Sstevel@tonic-gate mmu.level_shift[3] = 39; 5900Sstevel@tonic-gate 5910Sstevel@tonic-gate #elif defined(__i386) 5920Sstevel@tonic-gate 5930Sstevel@tonic-gate if (mmu.pae_hat) { 5940Sstevel@tonic-gate mmu.num_level = 3; 5950Sstevel@tonic-gate mmu.max_level = 2; 5960Sstevel@tonic-gate mmu.ptes_per_table = 512; 5970Sstevel@tonic-gate mmu.top_level_count = 4; 5980Sstevel@tonic-gate 5990Sstevel@tonic-gate mmu.level_shift[0] = 12; 6000Sstevel@tonic-gate mmu.level_shift[1] = 21; 6010Sstevel@tonic-gate mmu.level_shift[2] = 30; 6020Sstevel@tonic-gate 6030Sstevel@tonic-gate } else { 6040Sstevel@tonic-gate mmu.num_level = 2; 6050Sstevel@tonic-gate mmu.max_level = 1; 6060Sstevel@tonic-gate mmu.ptes_per_table = 1024; 6070Sstevel@tonic-gate mmu.top_level_count = 1024; 6080Sstevel@tonic-gate 6090Sstevel@tonic-gate mmu.level_shift[0] = 12; 6100Sstevel@tonic-gate mmu.level_shift[1] = 22; 6110Sstevel@tonic-gate } 6120Sstevel@tonic-gate 6130Sstevel@tonic-gate #endif /* __i386 */ 6140Sstevel@tonic-gate 6150Sstevel@tonic-gate for (i = 0; i < mmu.num_level; ++i) { 6160Sstevel@tonic-gate mmu.level_size[i] = 1UL << mmu.level_shift[i]; 6170Sstevel@tonic-gate mmu.level_offset[i] = mmu.level_size[i] - 1; 6180Sstevel@tonic-gate mmu.level_mask[i] = ~mmu.level_offset[i]; 6190Sstevel@tonic-gate } 6200Sstevel@tonic-gate 6216691Skchow set_max_page_level(); 6226691Skchow 6236291Skchow mmu_page_sizes = mmu.max_page_level + 1; 6246291Skchow mmu_exported_page_sizes = mmu.umax_page_level + 1; 6256291Skchow 6266291Skchow /* restrict legacy applications from using pagesizes 1g and above */ 6276291Skchow mmu_legacy_page_sizes = 6286291Skchow (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes; 6296291Skchow 6306291Skchow 6313446Smrj for (i = 0; i <= mmu.max_page_level; ++i) { 6325316Sjohnlev mmu.pte_bits[i] = PT_VALID | pt_kern; 6333446Smrj if (i > 0) 6343446Smrj mmu.pte_bits[i] |= PT_PAGESIZE; 6353446Smrj } 6360Sstevel@tonic-gate 6370Sstevel@tonic-gate /* 6380Sstevel@tonic-gate * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level. 6390Sstevel@tonic-gate */ 6400Sstevel@tonic-gate for (i = 1; i < mmu.num_level; ++i) 6410Sstevel@tonic-gate mmu.ptp_bits[i] = PT_PTPBITS; 6423446Smrj 6430Sstevel@tonic-gate #if defined(__i386) 6440Sstevel@tonic-gate mmu.ptp_bits[2] = PT_VALID; 6450Sstevel@tonic-gate #endif 6460Sstevel@tonic-gate 6470Sstevel@tonic-gate /* 6480Sstevel@tonic-gate * Compute how many hash table entries to have per process for htables. 6490Sstevel@tonic-gate * We start with 1 page's worth of entries. 6500Sstevel@tonic-gate * 6510Sstevel@tonic-gate * If physical memory is small, reduce the amount need to cover it. 6520Sstevel@tonic-gate */ 6530Sstevel@tonic-gate max_htables = physmax / mmu.ptes_per_table; 6540Sstevel@tonic-gate mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *); 6550Sstevel@tonic-gate while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables) 6560Sstevel@tonic-gate mmu.hash_cnt >>= 1; 6570Sstevel@tonic-gate mmu.vlp_hash_cnt = mmu.hash_cnt; 6580Sstevel@tonic-gate 6590Sstevel@tonic-gate #if defined(__amd64) 6600Sstevel@tonic-gate /* 6610Sstevel@tonic-gate * If running in 64 bits and physical memory is large, 6620Sstevel@tonic-gate * increase the size of the cache to cover all of memory for 6630Sstevel@tonic-gate * a 64 bit process. 6640Sstevel@tonic-gate */ 6650Sstevel@tonic-gate #define HASH_MAX_LENGTH 4 6660Sstevel@tonic-gate while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables) 6670Sstevel@tonic-gate mmu.hash_cnt <<= 1; 6680Sstevel@tonic-gate #endif 6690Sstevel@tonic-gate } 6700Sstevel@tonic-gate 6710Sstevel@tonic-gate 6720Sstevel@tonic-gate /* 6730Sstevel@tonic-gate * initialize hat data structures 6740Sstevel@tonic-gate */ 6750Sstevel@tonic-gate void 6760Sstevel@tonic-gate hat_init() 6770Sstevel@tonic-gate { 6780Sstevel@tonic-gate #if defined(__i386) 6790Sstevel@tonic-gate /* 6800Sstevel@tonic-gate * _userlimit must be aligned correctly 6810Sstevel@tonic-gate */ 6820Sstevel@tonic-gate if ((_userlimit & LEVEL_MASK(1)) != _userlimit) { 6830Sstevel@tonic-gate prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n", 6840Sstevel@tonic-gate (void *)_userlimit, (void *)LEVEL_SIZE(1)); 6850Sstevel@tonic-gate halt("hat_init(): Unable to continue"); 6860Sstevel@tonic-gate } 6870Sstevel@tonic-gate #endif 6880Sstevel@tonic-gate 6890Sstevel@tonic-gate cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL); 6900Sstevel@tonic-gate 6910Sstevel@tonic-gate /* 6920Sstevel@tonic-gate * initialize kmem caches 6930Sstevel@tonic-gate */ 6940Sstevel@tonic-gate htable_init(); 6950Sstevel@tonic-gate hment_init(); 6960Sstevel@tonic-gate 6970Sstevel@tonic-gate hat_cache = kmem_cache_create("hat_t", 6980Sstevel@tonic-gate sizeof (hat_t), 0, hati_constructor, NULL, NULL, 6990Sstevel@tonic-gate NULL, 0, 0); 7000Sstevel@tonic-gate 7010Sstevel@tonic-gate hat_hash_cache = kmem_cache_create("HatHash", 7020Sstevel@tonic-gate mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL, 7030Sstevel@tonic-gate NULL, 0, 0); 7040Sstevel@tonic-gate 7050Sstevel@tonic-gate /* 7060Sstevel@tonic-gate * VLP hats can use a smaller hash table size on large memroy machines 7070Sstevel@tonic-gate */ 7080Sstevel@tonic-gate if (mmu.hash_cnt == mmu.vlp_hash_cnt) { 7090Sstevel@tonic-gate vlp_hash_cache = hat_hash_cache; 7100Sstevel@tonic-gate } else { 7110Sstevel@tonic-gate vlp_hash_cache = kmem_cache_create("HatVlpHash", 7120Sstevel@tonic-gate mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL, 7130Sstevel@tonic-gate NULL, 0, 0); 7140Sstevel@tonic-gate } 7150Sstevel@tonic-gate 7160Sstevel@tonic-gate /* 7170Sstevel@tonic-gate * Set up the kernel's hat 7180Sstevel@tonic-gate */ 7190Sstevel@tonic-gate AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 7200Sstevel@tonic-gate kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP); 7210Sstevel@tonic-gate mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 7220Sstevel@tonic-gate kas.a_hat->hat_as = &kas; 7230Sstevel@tonic-gate kas.a_hat->hat_flags = 0; 7240Sstevel@tonic-gate AS_LOCK_EXIT(&kas, &kas.a_lock); 7250Sstevel@tonic-gate 7260Sstevel@tonic-gate CPUSET_ZERO(khat_cpuset); 7270Sstevel@tonic-gate CPUSET_ADD(khat_cpuset, CPU->cpu_id); 7280Sstevel@tonic-gate 7290Sstevel@tonic-gate /* 7300Sstevel@tonic-gate * The kernel hat's next pointer serves as the head of the hat list . 7311747Sjosephb * The kernel hat's prev pointer tracks the last hat on the list for 7321747Sjosephb * htable_steal() to use. 7330Sstevel@tonic-gate */ 7340Sstevel@tonic-gate kas.a_hat->hat_next = NULL; 7351747Sjosephb kas.a_hat->hat_prev = NULL; 7360Sstevel@tonic-gate 7370Sstevel@tonic-gate /* 7380Sstevel@tonic-gate * Allocate an htable hash bucket for the kernel 7390Sstevel@tonic-gate * XX64 - tune for 64 bit procs 7400Sstevel@tonic-gate */ 7410Sstevel@tonic-gate kas.a_hat->hat_num_hash = mmu.hash_cnt; 7420Sstevel@tonic-gate kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP); 7430Sstevel@tonic-gate bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *)); 7440Sstevel@tonic-gate 7450Sstevel@tonic-gate /* 7460Sstevel@tonic-gate * zero out the top level and cached htable pointers 7470Sstevel@tonic-gate */ 7480Sstevel@tonic-gate kas.a_hat->hat_ht_cached = NULL; 7490Sstevel@tonic-gate kas.a_hat->hat_htable = NULL; 7503258Strevtom 7513258Strevtom /* 7523258Strevtom * Pre-allocate hrm_hashtab before enabling the collection of 7533258Strevtom * refmod statistics. Allocating on the fly would mean us 7543258Strevtom * running the risk of suffering recursive mutex enters or 7553258Strevtom * deadlocks. 7563258Strevtom */ 7573258Strevtom hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *), 7583258Strevtom KM_SLEEP); 7590Sstevel@tonic-gate } 7600Sstevel@tonic-gate 7610Sstevel@tonic-gate /* 7620Sstevel@tonic-gate * Prepare CPU specific pagetables for VLP processes on 64 bit kernels. 7630Sstevel@tonic-gate * 7640Sstevel@tonic-gate * Each CPU has a set of 2 pagetables that are reused for any 32 bit 7650Sstevel@tonic-gate * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and 7660Sstevel@tonic-gate * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes. 7670Sstevel@tonic-gate */ 7680Sstevel@tonic-gate /*ARGSUSED*/ 7690Sstevel@tonic-gate static void 7700Sstevel@tonic-gate hat_vlp_setup(struct cpu *cpu) 7710Sstevel@tonic-gate { 7725084Sjohnlev #if defined(__amd64) && !defined(__xpv) 7730Sstevel@tonic-gate struct hat_cpu_info *hci = cpu->cpu_hat_info; 7740Sstevel@tonic-gate pfn_t pfn; 7750Sstevel@tonic-gate 7760Sstevel@tonic-gate /* 7770Sstevel@tonic-gate * allocate the level==2 page table for the bottom most 7780Sstevel@tonic-gate * 512Gig of address space (this is where 32 bit apps live) 7790Sstevel@tonic-gate */ 7800Sstevel@tonic-gate ASSERT(hci != NULL); 7810Sstevel@tonic-gate hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP); 7820Sstevel@tonic-gate 7830Sstevel@tonic-gate /* 7840Sstevel@tonic-gate * Allocate a top level pagetable and copy the kernel's 7850Sstevel@tonic-gate * entries into it. Then link in hci_vlp_l2ptes in the 1st entry. 7860Sstevel@tonic-gate */ 7870Sstevel@tonic-gate hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP); 7880Sstevel@tonic-gate hci->hci_vlp_pfn = 7890Sstevel@tonic-gate hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes); 7900Sstevel@tonic-gate ASSERT(hci->hci_vlp_pfn != PFN_INVALID); 7915084Sjohnlev bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE); 7920Sstevel@tonic-gate 7930Sstevel@tonic-gate pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes); 7940Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 7950Sstevel@tonic-gate hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2); 7965084Sjohnlev #endif /* __amd64 && !__xpv */ 7970Sstevel@tonic-gate } 7980Sstevel@tonic-gate 7993446Smrj /*ARGSUSED*/ 8003446Smrj static void 8013446Smrj hat_vlp_teardown(cpu_t *cpu) 8023446Smrj { 8035084Sjohnlev #if defined(__amd64) && !defined(__xpv) 8043446Smrj struct hat_cpu_info *hci; 8053446Smrj 8063446Smrj if ((hci = cpu->cpu_hat_info) == NULL) 8073446Smrj return; 8083446Smrj if (hci->hci_vlp_l2ptes) 8093446Smrj kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE); 8103446Smrj if (hci->hci_vlp_l3ptes) 8113446Smrj kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE); 8125084Sjohnlev #endif 8135084Sjohnlev } 8145084Sjohnlev 8155084Sjohnlev #define NEXT_HKR(r, l, s, e) { \ 8165084Sjohnlev kernel_ranges[r].hkr_level = l; \ 8175084Sjohnlev kernel_ranges[r].hkr_start_va = s; \ 8185084Sjohnlev kernel_ranges[r].hkr_end_va = e; \ 8195084Sjohnlev ++r; \ 8203446Smrj } 8213446Smrj 8220Sstevel@tonic-gate /* 8230Sstevel@tonic-gate * Finish filling in the kernel hat. 8240Sstevel@tonic-gate * Pre fill in all top level kernel page table entries for the kernel's 8250Sstevel@tonic-gate * part of the address range. From this point on we can't use any new 8260Sstevel@tonic-gate * kernel large pages if they need PTE's at max_level 8273446Smrj * 8283446Smrj * create the kmap mappings. 8290Sstevel@tonic-gate */ 8300Sstevel@tonic-gate void 8310Sstevel@tonic-gate hat_init_finish(void) 8320Sstevel@tonic-gate { 8333446Smrj size_t size; 8345084Sjohnlev uint_t r = 0; 8355084Sjohnlev uintptr_t va; 8365084Sjohnlev hat_kernel_range_t *rp; 8375084Sjohnlev 8380Sstevel@tonic-gate 8390Sstevel@tonic-gate /* 8400Sstevel@tonic-gate * We are now effectively running on the kernel hat. 8410Sstevel@tonic-gate * Clearing use_boot_reserve shuts off using the pre-allocated boot 8420Sstevel@tonic-gate * reserve for all HAT allocations. From here on, the reserves are 8435084Sjohnlev * only used when avoiding recursion in kmem_alloc(). 8440Sstevel@tonic-gate */ 8450Sstevel@tonic-gate use_boot_reserve = 0; 8460Sstevel@tonic-gate htable_adjust_reserve(); 8470Sstevel@tonic-gate 8480Sstevel@tonic-gate /* 8495084Sjohnlev * User HATs are initialized with copies of all kernel mappings in 8505084Sjohnlev * higher level page tables. Ensure that those entries exist. 8515084Sjohnlev */ 8525084Sjohnlev #if defined(__amd64) 8535084Sjohnlev 8545084Sjohnlev NEXT_HKR(r, 3, kernelbase, 0); 8555084Sjohnlev #if defined(__xpv) 8565084Sjohnlev NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END); 8575084Sjohnlev #endif 8585084Sjohnlev 8595084Sjohnlev #elif defined(__i386) 8605084Sjohnlev 8615084Sjohnlev #if !defined(__xpv) 8625084Sjohnlev if (mmu.pae_hat) { 8635084Sjohnlev va = kernelbase; 8645084Sjohnlev if ((va & LEVEL_MASK(2)) != va) { 8655084Sjohnlev va = P2ROUNDUP(va, LEVEL_SIZE(2)); 8665084Sjohnlev NEXT_HKR(r, 1, kernelbase, va); 8675084Sjohnlev } 8685084Sjohnlev if (va != 0) 8695084Sjohnlev NEXT_HKR(r, 2, va, 0); 8705084Sjohnlev } else 8715084Sjohnlev #endif /* __xpv */ 8725084Sjohnlev NEXT_HKR(r, 1, kernelbase, 0); 8735084Sjohnlev 8745084Sjohnlev #endif /* __i386 */ 8755084Sjohnlev 8765084Sjohnlev num_kernel_ranges = r; 8775084Sjohnlev 8785084Sjohnlev /* 8795084Sjohnlev * Create all the kernel pagetables that will have entries 8805084Sjohnlev * shared to user HATs. 8815084Sjohnlev */ 8825084Sjohnlev for (r = 0; r < num_kernel_ranges; ++r) { 8835084Sjohnlev rp = &kernel_ranges[r]; 8845084Sjohnlev for (va = rp->hkr_start_va; va != rp->hkr_end_va; 8855084Sjohnlev va += LEVEL_SIZE(rp->hkr_level)) { 8865084Sjohnlev htable_t *ht; 8875084Sjohnlev 8885084Sjohnlev if (IN_HYPERVISOR_VA(va)) 8895084Sjohnlev continue; 8905084Sjohnlev 8915084Sjohnlev /* can/must skip if a page mapping already exists */ 8925084Sjohnlev if (rp->hkr_level <= mmu.max_page_level && 8935084Sjohnlev (ht = htable_getpage(kas.a_hat, va, NULL)) != 8945084Sjohnlev NULL) { 8955084Sjohnlev htable_release(ht); 8965084Sjohnlev continue; 8975084Sjohnlev } 8985084Sjohnlev 8995084Sjohnlev (void) htable_create(kas.a_hat, va, rp->hkr_level - 1, 9005084Sjohnlev NULL); 9015084Sjohnlev } 9025084Sjohnlev } 9035084Sjohnlev 9045084Sjohnlev /* 9055084Sjohnlev * 32 bit PAE metal kernels use only 4 of the 512 entries in the 9065084Sjohnlev * page holding the top level pagetable. We use the remainder for 9075084Sjohnlev * the "per CPU" page tables for VLP processes. 9085084Sjohnlev * Map the top level kernel pagetable into the kernel to make 9095084Sjohnlev * it easy to use bcopy access these tables. 9100Sstevel@tonic-gate */ 9110Sstevel@tonic-gate if (mmu.pae_hat) { 9120Sstevel@tonic-gate vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP); 9130Sstevel@tonic-gate hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE, 9140Sstevel@tonic-gate kas.a_hat->hat_htable->ht_pfn, 9155084Sjohnlev #if !defined(__xpv) 9163446Smrj PROT_WRITE | 9175084Sjohnlev #endif 9183446Smrj PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK, 9190Sstevel@tonic-gate HAT_LOAD | HAT_LOAD_NOCONSIST); 9200Sstevel@tonic-gate } 9210Sstevel@tonic-gate hat_vlp_setup(CPU); 9223446Smrj 9233446Smrj /* 9243446Smrj * Create kmap (cached mappings of kernel PTEs) 9253446Smrj * for 32 bit we map from segmap_start .. ekernelheap 9263446Smrj * for 64 bit we map from segmap_start .. segmap_start + segmapsize; 9273446Smrj */ 9283446Smrj #if defined(__i386) 9293446Smrj size = (uintptr_t)ekernelheap - segmap_start; 9303446Smrj #elif defined(__amd64) 9313446Smrj size = segmapsize; 9323446Smrj #endif 9333446Smrj hat_kmap_init((uintptr_t)segmap_start, size); 9340Sstevel@tonic-gate } 9350Sstevel@tonic-gate 9360Sstevel@tonic-gate /* 9370Sstevel@tonic-gate * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references 9380Sstevel@tonic-gate * are 32 bit, so for safety we must use cas64() to install these. 9390Sstevel@tonic-gate */ 9400Sstevel@tonic-gate #ifdef __i386 9410Sstevel@tonic-gate static void 9420Sstevel@tonic-gate reload_pae32(hat_t *hat, cpu_t *cpu) 9430Sstevel@tonic-gate { 9440Sstevel@tonic-gate x86pte_t *src; 9450Sstevel@tonic-gate x86pte_t *dest; 9460Sstevel@tonic-gate x86pte_t pte; 9470Sstevel@tonic-gate int i; 9480Sstevel@tonic-gate 9490Sstevel@tonic-gate /* 9500Sstevel@tonic-gate * Load the 4 entries of the level 2 page table into this 9510Sstevel@tonic-gate * cpu's range of the vlp_page and point cr3 at them. 9520Sstevel@tonic-gate */ 9530Sstevel@tonic-gate ASSERT(mmu.pae_hat); 9540Sstevel@tonic-gate src = hat->hat_vlp_ptes; 9550Sstevel@tonic-gate dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES; 9560Sstevel@tonic-gate for (i = 0; i < VLP_NUM_PTES; ++i) { 9570Sstevel@tonic-gate for (;;) { 9580Sstevel@tonic-gate pte = dest[i]; 9590Sstevel@tonic-gate if (pte == src[i]) 9600Sstevel@tonic-gate break; 9610Sstevel@tonic-gate if (cas64(dest + i, pte, src[i]) != src[i]) 9620Sstevel@tonic-gate break; 9630Sstevel@tonic-gate } 9640Sstevel@tonic-gate } 9650Sstevel@tonic-gate } 9660Sstevel@tonic-gate #endif 9670Sstevel@tonic-gate 9680Sstevel@tonic-gate /* 9690Sstevel@tonic-gate * Switch to a new active hat, maintaining bit masks to track active CPUs. 9705084Sjohnlev * 9715084Sjohnlev * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it 9725084Sjohnlev * remains a 32-bit value. 9730Sstevel@tonic-gate */ 9740Sstevel@tonic-gate void 9750Sstevel@tonic-gate hat_switch(hat_t *hat) 9760Sstevel@tonic-gate { 9775084Sjohnlev uint64_t newcr3; 9780Sstevel@tonic-gate cpu_t *cpu = CPU; 9790Sstevel@tonic-gate hat_t *old = cpu->cpu_current_hat; 9800Sstevel@tonic-gate 9810Sstevel@tonic-gate /* 9820Sstevel@tonic-gate * set up this information first, so we don't miss any cross calls 9830Sstevel@tonic-gate */ 9840Sstevel@tonic-gate if (old != NULL) { 9850Sstevel@tonic-gate if (old == hat) 9860Sstevel@tonic-gate return; 9870Sstevel@tonic-gate if (old != kas.a_hat) 9880Sstevel@tonic-gate CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id); 9890Sstevel@tonic-gate } 9900Sstevel@tonic-gate 9910Sstevel@tonic-gate /* 9924191Sjosephb * Add this CPU to the active set for this HAT. 9930Sstevel@tonic-gate */ 9940Sstevel@tonic-gate if (hat != kas.a_hat) { 9950Sstevel@tonic-gate CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id); 9960Sstevel@tonic-gate } 9970Sstevel@tonic-gate cpu->cpu_current_hat = hat; 9980Sstevel@tonic-gate 9990Sstevel@tonic-gate /* 10000Sstevel@tonic-gate * now go ahead and load cr3 10010Sstevel@tonic-gate */ 10020Sstevel@tonic-gate if (hat->hat_flags & HAT_VLP) { 10030Sstevel@tonic-gate #if defined(__amd64) 10040Sstevel@tonic-gate x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes; 10050Sstevel@tonic-gate 10060Sstevel@tonic-gate VLP_COPY(hat->hat_vlp_ptes, vlpptep); 10070Sstevel@tonic-gate newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn); 10080Sstevel@tonic-gate #elif defined(__i386) 10090Sstevel@tonic-gate reload_pae32(hat, cpu); 10100Sstevel@tonic-gate newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) + 10110Sstevel@tonic-gate (cpu->cpu_id + 1) * VLP_SIZE; 10120Sstevel@tonic-gate #endif 10130Sstevel@tonic-gate } else { 10145084Sjohnlev newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn); 10150Sstevel@tonic-gate } 10165084Sjohnlev #ifdef __xpv 10175084Sjohnlev { 10185084Sjohnlev struct mmuext_op t[2]; 10195084Sjohnlev uint_t retcnt; 10205084Sjohnlev uint_t opcnt = 1; 10215084Sjohnlev 10225084Sjohnlev t[0].cmd = MMUEXT_NEW_BASEPTR; 10235084Sjohnlev t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3)); 10245084Sjohnlev #if defined(__amd64) 10255084Sjohnlev /* 10265084Sjohnlev * There's an interesting problem here, as to what to 10275084Sjohnlev * actually specify when switching to the kernel hat. 10285084Sjohnlev * For now we'll reuse the kernel hat again. 10295084Sjohnlev */ 10305084Sjohnlev t[1].cmd = MMUEXT_NEW_USER_BASEPTR; 10315084Sjohnlev if (hat == kas.a_hat) 10325084Sjohnlev t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3)); 10335084Sjohnlev else 10345084Sjohnlev t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable); 10355084Sjohnlev ++opcnt; 10365084Sjohnlev #endif /* __amd64 */ 10375084Sjohnlev if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0) 10385084Sjohnlev panic("HYPERVISOR_mmu_update() failed"); 10395084Sjohnlev ASSERT(retcnt == opcnt); 10405084Sjohnlev 10415084Sjohnlev } 10425084Sjohnlev #else 10430Sstevel@tonic-gate setcr3(newcr3); 10445084Sjohnlev #endif 10450Sstevel@tonic-gate ASSERT(cpu == CPU); 10460Sstevel@tonic-gate } 10470Sstevel@tonic-gate 10480Sstevel@tonic-gate /* 10490Sstevel@tonic-gate * Utility to return a valid x86pte_t from protections, pfn, and level number 10500Sstevel@tonic-gate */ 10510Sstevel@tonic-gate static x86pte_t 10520Sstevel@tonic-gate hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags) 10530Sstevel@tonic-gate { 10540Sstevel@tonic-gate x86pte_t pte; 10550Sstevel@tonic-gate uint_t cache_attr = attr & HAT_ORDER_MASK; 10560Sstevel@tonic-gate 10570Sstevel@tonic-gate pte = MAKEPTE(pfn, level); 10580Sstevel@tonic-gate 10590Sstevel@tonic-gate if (attr & PROT_WRITE) 10600Sstevel@tonic-gate PTE_SET(pte, PT_WRITABLE); 10610Sstevel@tonic-gate 10620Sstevel@tonic-gate if (attr & PROT_USER) 10630Sstevel@tonic-gate PTE_SET(pte, PT_USER); 10640Sstevel@tonic-gate 10650Sstevel@tonic-gate if (!(attr & PROT_EXEC)) 10660Sstevel@tonic-gate PTE_SET(pte, mmu.pt_nx); 10670Sstevel@tonic-gate 10680Sstevel@tonic-gate /* 10693446Smrj * Set the software bits used track ref/mod sync's and hments. 10703446Smrj * If not using REF/MOD, set them to avoid h/w rewriting PTEs. 10710Sstevel@tonic-gate */ 10720Sstevel@tonic-gate if (flags & HAT_LOAD_NOCONSIST) 10733446Smrj PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD); 10743446Smrj else if (attr & HAT_NOSYNC) 10753446Smrj PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD); 10760Sstevel@tonic-gate 10770Sstevel@tonic-gate /* 10780Sstevel@tonic-gate * Set the caching attributes in the PTE. The combination 10790Sstevel@tonic-gate * of attributes are poorly defined, so we pay attention 10800Sstevel@tonic-gate * to them in the given order. 10810Sstevel@tonic-gate * 10820Sstevel@tonic-gate * The test for HAT_STRICTORDER is different because it's defined 10830Sstevel@tonic-gate * as "0" - which was a stupid thing to do, but is too late to change! 10840Sstevel@tonic-gate */ 10850Sstevel@tonic-gate if (cache_attr == HAT_STRICTORDER) { 10860Sstevel@tonic-gate PTE_SET(pte, PT_NOCACHE); 10870Sstevel@tonic-gate /*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */ 10880Sstevel@tonic-gate } else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) { 10890Sstevel@tonic-gate /* nothing to set */; 10900Sstevel@tonic-gate } else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) { 10910Sstevel@tonic-gate PTE_SET(pte, PT_NOCACHE); 10920Sstevel@tonic-gate if (x86_feature & X86_PAT) 10930Sstevel@tonic-gate PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE); 10940Sstevel@tonic-gate else 10950Sstevel@tonic-gate PTE_SET(pte, PT_WRITETHRU); 10960Sstevel@tonic-gate } else { 10970Sstevel@tonic-gate panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr); 10980Sstevel@tonic-gate } 10990Sstevel@tonic-gate 11000Sstevel@tonic-gate return (pte); 11010Sstevel@tonic-gate } 11020Sstevel@tonic-gate 11030Sstevel@tonic-gate /* 11040Sstevel@tonic-gate * Duplicate address translations of the parent to the child. 11050Sstevel@tonic-gate * This function really isn't used anymore. 11060Sstevel@tonic-gate */ 11070Sstevel@tonic-gate /*ARGSUSED*/ 11080Sstevel@tonic-gate int 11090Sstevel@tonic-gate hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag) 11100Sstevel@tonic-gate { 11110Sstevel@tonic-gate ASSERT((uintptr_t)addr < kernelbase); 11120Sstevel@tonic-gate ASSERT(new != kas.a_hat); 11130Sstevel@tonic-gate ASSERT(old != kas.a_hat); 11140Sstevel@tonic-gate return (0); 11150Sstevel@tonic-gate } 11160Sstevel@tonic-gate 11170Sstevel@tonic-gate /* 11180Sstevel@tonic-gate * Allocate any hat resources required for a process being swapped in. 11190Sstevel@tonic-gate */ 11200Sstevel@tonic-gate /*ARGSUSED*/ 11210Sstevel@tonic-gate void 11220Sstevel@tonic-gate hat_swapin(hat_t *hat) 11230Sstevel@tonic-gate { 11240Sstevel@tonic-gate /* do nothing - we let everything fault back in */ 11250Sstevel@tonic-gate } 11260Sstevel@tonic-gate 11270Sstevel@tonic-gate /* 11280Sstevel@tonic-gate * Unload all translations associated with an address space of a process 11290Sstevel@tonic-gate * that is being swapped out. 11300Sstevel@tonic-gate */ 11310Sstevel@tonic-gate void 11320Sstevel@tonic-gate hat_swapout(hat_t *hat) 11330Sstevel@tonic-gate { 11340Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)0; 11350Sstevel@tonic-gate uintptr_t eaddr = _userlimit; 11360Sstevel@tonic-gate htable_t *ht = NULL; 11370Sstevel@tonic-gate level_t l; 11380Sstevel@tonic-gate 11395084Sjohnlev XPV_DISALLOW_MIGRATE(); 11400Sstevel@tonic-gate /* 11410Sstevel@tonic-gate * We can't just call hat_unload(hat, 0, _userlimit...) here, because 11420Sstevel@tonic-gate * seg_spt and shared pagetables can't be swapped out. 11430Sstevel@tonic-gate * Take a look at segspt_shmswapout() - it's a big no-op. 11440Sstevel@tonic-gate * 11450Sstevel@tonic-gate * Instead we'll walk through all the address space and unload 11460Sstevel@tonic-gate * any mappings which we are sure are not shared, not locked. 11470Sstevel@tonic-gate */ 11480Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 11490Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 11500Sstevel@tonic-gate ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 11510Sstevel@tonic-gate if ((uintptr_t)hat->hat_as->a_userlimit < eaddr) 11520Sstevel@tonic-gate eaddr = (uintptr_t)hat->hat_as->a_userlimit; 11530Sstevel@tonic-gate 11540Sstevel@tonic-gate while (vaddr < eaddr) { 11550Sstevel@tonic-gate (void) htable_walk(hat, &ht, &vaddr, eaddr); 11560Sstevel@tonic-gate if (ht == NULL) 11570Sstevel@tonic-gate break; 11580Sstevel@tonic-gate 11590Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr)); 11600Sstevel@tonic-gate 11610Sstevel@tonic-gate /* 11620Sstevel@tonic-gate * If the page table is shared skip its entire range. 11630Sstevel@tonic-gate */ 11640Sstevel@tonic-gate l = ht->ht_level; 11650Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 11666285Speterte vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1); 11670Sstevel@tonic-gate htable_release(ht); 11680Sstevel@tonic-gate ht = NULL; 11690Sstevel@tonic-gate continue; 11700Sstevel@tonic-gate } 11710Sstevel@tonic-gate 11720Sstevel@tonic-gate /* 11730Sstevel@tonic-gate * If the page table has no locked entries, unload this one. 11740Sstevel@tonic-gate */ 11750Sstevel@tonic-gate if (ht->ht_lock_cnt == 0) 11760Sstevel@tonic-gate hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l), 11770Sstevel@tonic-gate HAT_UNLOAD_UNMAP); 11780Sstevel@tonic-gate 11790Sstevel@tonic-gate /* 11800Sstevel@tonic-gate * If we have a level 0 page table with locked entries, 11810Sstevel@tonic-gate * skip the entire page table, otherwise skip just one entry. 11820Sstevel@tonic-gate */ 11830Sstevel@tonic-gate if (ht->ht_lock_cnt > 0 && l == 0) 11840Sstevel@tonic-gate vaddr = ht->ht_vaddr + LEVEL_SIZE(1); 11850Sstevel@tonic-gate else 11860Sstevel@tonic-gate vaddr += LEVEL_SIZE(l); 11870Sstevel@tonic-gate } 11880Sstevel@tonic-gate if (ht) 11890Sstevel@tonic-gate htable_release(ht); 11900Sstevel@tonic-gate 11910Sstevel@tonic-gate /* 11920Sstevel@tonic-gate * We're in swapout because the system is low on memory, so 11930Sstevel@tonic-gate * go back and flush all the htables off the cached list. 11940Sstevel@tonic-gate */ 11950Sstevel@tonic-gate htable_purge_hat(hat); 11965084Sjohnlev XPV_ALLOW_MIGRATE(); 11970Sstevel@tonic-gate } 11980Sstevel@tonic-gate 11990Sstevel@tonic-gate /* 12000Sstevel@tonic-gate * returns number of bytes that have valid mappings in hat. 12010Sstevel@tonic-gate */ 12020Sstevel@tonic-gate size_t 12030Sstevel@tonic-gate hat_get_mapped_size(hat_t *hat) 12040Sstevel@tonic-gate { 12050Sstevel@tonic-gate size_t total = 0; 12060Sstevel@tonic-gate int l; 12070Sstevel@tonic-gate 12080Sstevel@tonic-gate for (l = 0; l <= mmu.max_page_level; l++) 12090Sstevel@tonic-gate total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l)); 12104381Sjosephb total += hat->hat_ism_pgcnt; 12110Sstevel@tonic-gate 12120Sstevel@tonic-gate return (total); 12130Sstevel@tonic-gate } 12140Sstevel@tonic-gate 12150Sstevel@tonic-gate /* 12160Sstevel@tonic-gate * enable/disable collection of stats for hat. 12170Sstevel@tonic-gate */ 12180Sstevel@tonic-gate int 12190Sstevel@tonic-gate hat_stats_enable(hat_t *hat) 12200Sstevel@tonic-gate { 12210Sstevel@tonic-gate atomic_add_32(&hat->hat_stats, 1); 12220Sstevel@tonic-gate return (1); 12230Sstevel@tonic-gate } 12240Sstevel@tonic-gate 12250Sstevel@tonic-gate void 12260Sstevel@tonic-gate hat_stats_disable(hat_t *hat) 12270Sstevel@tonic-gate { 12280Sstevel@tonic-gate atomic_add_32(&hat->hat_stats, -1); 12290Sstevel@tonic-gate } 12300Sstevel@tonic-gate 12310Sstevel@tonic-gate /* 12320Sstevel@tonic-gate * Utility to sync the ref/mod bits from a page table entry to the page_t 12330Sstevel@tonic-gate * We must be holding the mapping list lock when this is called. 12340Sstevel@tonic-gate */ 12350Sstevel@tonic-gate static void 12360Sstevel@tonic-gate hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level) 12370Sstevel@tonic-gate { 12380Sstevel@tonic-gate uint_t rm = 0; 12390Sstevel@tonic-gate pgcnt_t pgcnt; 12400Sstevel@tonic-gate 12413446Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC) 12420Sstevel@tonic-gate return; 12430Sstevel@tonic-gate 12440Sstevel@tonic-gate if (PTE_GET(pte, PT_REF)) 12450Sstevel@tonic-gate rm |= P_REF; 12460Sstevel@tonic-gate 12470Sstevel@tonic-gate if (PTE_GET(pte, PT_MOD)) 12480Sstevel@tonic-gate rm |= P_MOD; 12490Sstevel@tonic-gate 12500Sstevel@tonic-gate if (rm == 0) 12510Sstevel@tonic-gate return; 12520Sstevel@tonic-gate 12530Sstevel@tonic-gate /* 12540Sstevel@tonic-gate * sync to all constituent pages of a large page 12550Sstevel@tonic-gate */ 12560Sstevel@tonic-gate ASSERT(x86_hm_held(pp)); 12570Sstevel@tonic-gate pgcnt = page_get_pagecnt(level); 12580Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt)); 12590Sstevel@tonic-gate for (; pgcnt > 0; --pgcnt) { 12600Sstevel@tonic-gate /* 12610Sstevel@tonic-gate * hat_page_demote() can't decrease 12620Sstevel@tonic-gate * pszc below this mapping size 12630Sstevel@tonic-gate * since this large mapping existed after we 12640Sstevel@tonic-gate * took mlist lock. 12650Sstevel@tonic-gate */ 12660Sstevel@tonic-gate ASSERT(pp->p_szc >= level); 12670Sstevel@tonic-gate hat_page_setattr(pp, rm); 12680Sstevel@tonic-gate ++pp; 12690Sstevel@tonic-gate } 12700Sstevel@tonic-gate } 12710Sstevel@tonic-gate 12720Sstevel@tonic-gate /* 12730Sstevel@tonic-gate * This the set of PTE bits for PFN, permissions and caching 12745084Sjohnlev * that are allowed to change on a HAT_LOAD_REMAP 12750Sstevel@tonic-gate */ 12760Sstevel@tonic-gate #define PT_REMAP_BITS \ 12770Sstevel@tonic-gate (PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU | \ 12785084Sjohnlev PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD) 12790Sstevel@tonic-gate 1280510Skchow #define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX) 12810Sstevel@tonic-gate /* 12820Sstevel@tonic-gate * Do the low-level work to get a mapping entered into a HAT's pagetables 12830Sstevel@tonic-gate * and in the mapping list of the associated page_t. 12840Sstevel@tonic-gate */ 12853446Smrj static int 12860Sstevel@tonic-gate hati_pte_map( 12870Sstevel@tonic-gate htable_t *ht, 12880Sstevel@tonic-gate uint_t entry, 12890Sstevel@tonic-gate page_t *pp, 12900Sstevel@tonic-gate x86pte_t pte, 12910Sstevel@tonic-gate int flags, 12920Sstevel@tonic-gate void *pte_ptr) 12930Sstevel@tonic-gate { 12940Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 12950Sstevel@tonic-gate x86pte_t old_pte; 12960Sstevel@tonic-gate level_t l = ht->ht_level; 12970Sstevel@tonic-gate hment_t *hm; 12980Sstevel@tonic-gate uint_t is_consist; 12993446Smrj int rv = 0; 13000Sstevel@tonic-gate 13010Sstevel@tonic-gate /* 13020Sstevel@tonic-gate * Is this a consistant (ie. need mapping list lock) mapping? 13030Sstevel@tonic-gate */ 13040Sstevel@tonic-gate is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0); 13050Sstevel@tonic-gate 13060Sstevel@tonic-gate /* 13070Sstevel@tonic-gate * Track locked mapping count in the htable. Do this first, 13080Sstevel@tonic-gate * as we track locking even if there already is a mapping present. 13090Sstevel@tonic-gate */ 13100Sstevel@tonic-gate if ((flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat) 13110Sstevel@tonic-gate HTABLE_LOCK_INC(ht); 13120Sstevel@tonic-gate 13130Sstevel@tonic-gate /* 13140Sstevel@tonic-gate * Acquire the page's mapping list lock and get an hment to use. 13150Sstevel@tonic-gate * Note that hment_prepare() might return NULL. 13160Sstevel@tonic-gate */ 13170Sstevel@tonic-gate if (is_consist) { 13180Sstevel@tonic-gate x86_hm_enter(pp); 13190Sstevel@tonic-gate hm = hment_prepare(ht, entry, pp); 13200Sstevel@tonic-gate } 13210Sstevel@tonic-gate 13220Sstevel@tonic-gate /* 13230Sstevel@tonic-gate * Set the new pte, retrieving the old one at the same time. 13240Sstevel@tonic-gate */ 13250Sstevel@tonic-gate old_pte = x86pte_set(ht, entry, pte, pte_ptr); 13260Sstevel@tonic-gate 13270Sstevel@tonic-gate /* 13283446Smrj * did we get a large page / page table collision? 13293446Smrj */ 13303446Smrj if (old_pte == LPAGE_ERROR) { 13313446Smrj rv = -1; 13323446Smrj goto done; 13333446Smrj } 13343446Smrj 13353446Smrj /* 13360Sstevel@tonic-gate * If the mapping didn't change there is nothing more to do. 13370Sstevel@tonic-gate */ 13383446Smrj if (PTE_EQUIV(pte, old_pte)) 13393446Smrj goto done; 13400Sstevel@tonic-gate 13410Sstevel@tonic-gate /* 13420Sstevel@tonic-gate * Install a new mapping in the page's mapping list 13430Sstevel@tonic-gate */ 13440Sstevel@tonic-gate if (!PTE_ISVALID(old_pte)) { 13450Sstevel@tonic-gate if (is_consist) { 13460Sstevel@tonic-gate hment_assign(ht, entry, pp, hm); 13470Sstevel@tonic-gate x86_hm_exit(pp); 13480Sstevel@tonic-gate } else { 13490Sstevel@tonic-gate ASSERT(flags & HAT_LOAD_NOCONSIST); 13500Sstevel@tonic-gate } 13515349Skchow #if defined(__amd64) 13525349Skchow if (ht->ht_flags & HTABLE_VLP) { 13535349Skchow cpu_t *cpu = CPU; 13545349Skchow x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes; 13555349Skchow VLP_COPY(hat->hat_vlp_ptes, vlpptep); 13565349Skchow } 13575349Skchow #endif 13580Sstevel@tonic-gate HTABLE_INC(ht->ht_valid_cnt); 13590Sstevel@tonic-gate PGCNT_INC(hat, l); 13603446Smrj return (rv); 13610Sstevel@tonic-gate } 13620Sstevel@tonic-gate 13630Sstevel@tonic-gate /* 13640Sstevel@tonic-gate * Remap's are more complicated: 13650Sstevel@tonic-gate * - HAT_LOAD_REMAP must be specified if changing the pfn. 13660Sstevel@tonic-gate * We also require that NOCONSIST be specified. 13670Sstevel@tonic-gate * - Otherwise only permission or caching bits may change. 13680Sstevel@tonic-gate */ 13690Sstevel@tonic-gate if (!PTE_ISPAGE(old_pte, l)) 13700Sstevel@tonic-gate panic("non-null/page mapping pte=" FMT_PTE, old_pte); 13710Sstevel@tonic-gate 13720Sstevel@tonic-gate if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) { 1373510Skchow REMAPASSERT(flags & HAT_LOAD_REMAP); 1374510Skchow REMAPASSERT(flags & HAT_LOAD_NOCONSIST); 13753446Smrj REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST); 1376510Skchow REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) == 13770Sstevel@tonic-gate pf_is_memory(PTE2PFN(pte, l))); 1378510Skchow REMAPASSERT(!is_consist); 13790Sstevel@tonic-gate } 13800Sstevel@tonic-gate 13810Sstevel@tonic-gate /* 13825084Sjohnlev * We only let remaps change the certain bits in the PTE. 13830Sstevel@tonic-gate */ 13845084Sjohnlev if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS)) 13855084Sjohnlev panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n", 13865084Sjohnlev old_pte, pte); 13870Sstevel@tonic-gate 13880Sstevel@tonic-gate /* 13890Sstevel@tonic-gate * We don't create any mapping list entries on a remap, so release 13900Sstevel@tonic-gate * any allocated hment after we drop the mapping list lock. 13910Sstevel@tonic-gate */ 13923446Smrj done: 13930Sstevel@tonic-gate if (is_consist) { 13940Sstevel@tonic-gate x86_hm_exit(pp); 13950Sstevel@tonic-gate if (hm != NULL) 13960Sstevel@tonic-gate hment_free(hm); 13970Sstevel@tonic-gate } 13983446Smrj return (rv); 13990Sstevel@tonic-gate } 14000Sstevel@tonic-gate 14010Sstevel@tonic-gate /* 14023446Smrj * Internal routine to load a single page table entry. This only fails if 14033446Smrj * we attempt to overwrite a page table link with a large page. 14040Sstevel@tonic-gate */ 14053446Smrj static int 14060Sstevel@tonic-gate hati_load_common( 14070Sstevel@tonic-gate hat_t *hat, 14080Sstevel@tonic-gate uintptr_t va, 14090Sstevel@tonic-gate page_t *pp, 14100Sstevel@tonic-gate uint_t attr, 14110Sstevel@tonic-gate uint_t flags, 14120Sstevel@tonic-gate level_t level, 14130Sstevel@tonic-gate pfn_t pfn) 14140Sstevel@tonic-gate { 14150Sstevel@tonic-gate htable_t *ht; 14160Sstevel@tonic-gate uint_t entry; 14170Sstevel@tonic-gate x86pte_t pte; 14183446Smrj int rv = 0; 14190Sstevel@tonic-gate 14204004Sjosephb /* 14214004Sjosephb * The number 16 is arbitrary and here to catch a recursion problem 14224004Sjosephb * early before we blow out the kernel stack. 14234004Sjosephb */ 14244004Sjosephb ++curthread->t_hatdepth; 14254004Sjosephb ASSERT(curthread->t_hatdepth < 16); 14264004Sjosephb 14270Sstevel@tonic-gate ASSERT(hat == kas.a_hat || 14280Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 14290Sstevel@tonic-gate 14300Sstevel@tonic-gate if (flags & HAT_LOAD_SHARE) 14310Sstevel@tonic-gate hat->hat_flags |= HAT_SHARED; 14320Sstevel@tonic-gate 14330Sstevel@tonic-gate /* 14340Sstevel@tonic-gate * Find the page table that maps this page if it already exists. 14350Sstevel@tonic-gate */ 14360Sstevel@tonic-gate ht = htable_lookup(hat, va, level); 14370Sstevel@tonic-gate 14380Sstevel@tonic-gate /* 14394004Sjosephb * We must have HAT_LOAD_NOCONSIST if page_t is NULL. 14400Sstevel@tonic-gate */ 14414004Sjosephb if (pp == NULL) 14420Sstevel@tonic-gate flags |= HAT_LOAD_NOCONSIST; 14430Sstevel@tonic-gate 14440Sstevel@tonic-gate if (ht == NULL) { 14450Sstevel@tonic-gate ht = htable_create(hat, va, level, NULL); 14460Sstevel@tonic-gate ASSERT(ht != NULL); 14470Sstevel@tonic-gate } 14480Sstevel@tonic-gate entry = htable_va2entry(va, ht); 14490Sstevel@tonic-gate 14500Sstevel@tonic-gate /* 14510Sstevel@tonic-gate * a bunch of paranoid error checking 14520Sstevel@tonic-gate */ 14530Sstevel@tonic-gate ASSERT(ht->ht_busy > 0); 14540Sstevel@tonic-gate if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht)) 14550Sstevel@tonic-gate panic("hati_load_common: bad htable %p, va %p", ht, (void *)va); 14560Sstevel@tonic-gate ASSERT(ht->ht_level == level); 14570Sstevel@tonic-gate 14580Sstevel@tonic-gate /* 14590Sstevel@tonic-gate * construct the new PTE 14600Sstevel@tonic-gate */ 14610Sstevel@tonic-gate if (hat == kas.a_hat) 14620Sstevel@tonic-gate attr &= ~PROT_USER; 14630Sstevel@tonic-gate pte = hati_mkpte(pfn, attr, level, flags); 14640Sstevel@tonic-gate if (hat == kas.a_hat && va >= kernelbase) 14650Sstevel@tonic-gate PTE_SET(pte, mmu.pt_global); 14660Sstevel@tonic-gate 14670Sstevel@tonic-gate /* 14680Sstevel@tonic-gate * establish the mapping 14690Sstevel@tonic-gate */ 14703446Smrj rv = hati_pte_map(ht, entry, pp, pte, flags, NULL); 14710Sstevel@tonic-gate 14720Sstevel@tonic-gate /* 14730Sstevel@tonic-gate * release the htable and any reserves 14740Sstevel@tonic-gate */ 14750Sstevel@tonic-gate htable_release(ht); 14764004Sjosephb --curthread->t_hatdepth; 14773446Smrj return (rv); 14780Sstevel@tonic-gate } 14790Sstevel@tonic-gate 14800Sstevel@tonic-gate /* 14810Sstevel@tonic-gate * special case of hat_memload to deal with some kernel addrs for performance 14820Sstevel@tonic-gate */ 14830Sstevel@tonic-gate static void 14840Sstevel@tonic-gate hat_kmap_load( 14850Sstevel@tonic-gate caddr_t addr, 14860Sstevel@tonic-gate page_t *pp, 14870Sstevel@tonic-gate uint_t attr, 14880Sstevel@tonic-gate uint_t flags) 14890Sstevel@tonic-gate { 14900Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 14910Sstevel@tonic-gate x86pte_t pte; 14920Sstevel@tonic-gate pfn_t pfn = page_pptonum(pp); 14930Sstevel@tonic-gate pgcnt_t pg_off = mmu_btop(va - mmu.kmap_addr); 14940Sstevel@tonic-gate htable_t *ht; 14950Sstevel@tonic-gate uint_t entry; 14960Sstevel@tonic-gate void *pte_ptr; 14970Sstevel@tonic-gate 14980Sstevel@tonic-gate /* 14990Sstevel@tonic-gate * construct the requested PTE 15000Sstevel@tonic-gate */ 15010Sstevel@tonic-gate attr &= ~PROT_USER; 15020Sstevel@tonic-gate attr |= HAT_STORECACHING_OK; 15030Sstevel@tonic-gate pte = hati_mkpte(pfn, attr, 0, flags); 15040Sstevel@tonic-gate PTE_SET(pte, mmu.pt_global); 15050Sstevel@tonic-gate 15060Sstevel@tonic-gate /* 15070Sstevel@tonic-gate * Figure out the pte_ptr and htable and use common code to finish up 15080Sstevel@tonic-gate */ 15090Sstevel@tonic-gate if (mmu.pae_hat) 15100Sstevel@tonic-gate pte_ptr = mmu.kmap_ptes + pg_off; 15110Sstevel@tonic-gate else 15120Sstevel@tonic-gate pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off; 15130Sstevel@tonic-gate ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >> 15140Sstevel@tonic-gate LEVEL_SHIFT(1)]; 15150Sstevel@tonic-gate entry = htable_va2entry(va, ht); 15164004Sjosephb ++curthread->t_hatdepth; 15174004Sjosephb ASSERT(curthread->t_hatdepth < 16); 15183446Smrj (void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr); 15194004Sjosephb --curthread->t_hatdepth; 15200Sstevel@tonic-gate } 15210Sstevel@tonic-gate 15220Sstevel@tonic-gate /* 15230Sstevel@tonic-gate * hat_memload() - load a translation to the given page struct 15240Sstevel@tonic-gate * 15250Sstevel@tonic-gate * Flags for hat_memload/hat_devload/hat_*attr. 15260Sstevel@tonic-gate * 15270Sstevel@tonic-gate * HAT_LOAD Default flags to load a translation to the page. 15280Sstevel@tonic-gate * 15290Sstevel@tonic-gate * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(), 15300Sstevel@tonic-gate * and hat_devload(). 15310Sstevel@tonic-gate * 15320Sstevel@tonic-gate * HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list. 15333446Smrj * sets PT_NOCONSIST 15340Sstevel@tonic-gate * 15350Sstevel@tonic-gate * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables 15360Sstevel@tonic-gate * that map some user pages (not kas) is shared by more 15370Sstevel@tonic-gate * than one process (eg. ISM). 15380Sstevel@tonic-gate * 15390Sstevel@tonic-gate * HAT_LOAD_REMAP Reload a valid pte with a different page frame. 15400Sstevel@tonic-gate * 15410Sstevel@tonic-gate * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this 15420Sstevel@tonic-gate * point, it's setting up mapping to allocate internal 15430Sstevel@tonic-gate * hat layer data structures. This flag forces hat layer 15440Sstevel@tonic-gate * to tap its reserves in order to prevent infinite 15450Sstevel@tonic-gate * recursion. 15460Sstevel@tonic-gate * 15470Sstevel@tonic-gate * The following is a protection attribute (like PROT_READ, etc.) 15480Sstevel@tonic-gate * 15493446Smrj * HAT_NOSYNC set PT_NOSYNC - this mapping's ref/mod bits 15500Sstevel@tonic-gate * are never cleared. 15510Sstevel@tonic-gate * 15520Sstevel@tonic-gate * Installing new valid PTE's and creation of the mapping list 15530Sstevel@tonic-gate * entry are controlled under the same lock. It's derived from the 15540Sstevel@tonic-gate * page_t being mapped. 15550Sstevel@tonic-gate */ 15560Sstevel@tonic-gate static uint_t supported_memload_flags = 15570Sstevel@tonic-gate HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST | 15580Sstevel@tonic-gate HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT; 15590Sstevel@tonic-gate 15600Sstevel@tonic-gate void 15610Sstevel@tonic-gate hat_memload( 15620Sstevel@tonic-gate hat_t *hat, 15630Sstevel@tonic-gate caddr_t addr, 15640Sstevel@tonic-gate page_t *pp, 15650Sstevel@tonic-gate uint_t attr, 15660Sstevel@tonic-gate uint_t flags) 15670Sstevel@tonic-gate { 15680Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 15690Sstevel@tonic-gate level_t level = 0; 15700Sstevel@tonic-gate pfn_t pfn = page_pptonum(pp); 15710Sstevel@tonic-gate 15725084Sjohnlev XPV_DISALLOW_MIGRATE(); 15730Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 15743446Smrj ASSERT(hat == kas.a_hat || va < _userlimit); 15750Sstevel@tonic-gate ASSERT(hat == kas.a_hat || 15760Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 15770Sstevel@tonic-gate ASSERT((flags & supported_memload_flags) == flags); 15780Sstevel@tonic-gate 15790Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 15800Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 15810Sstevel@tonic-gate 15820Sstevel@tonic-gate /* 15830Sstevel@tonic-gate * kernel address special case for performance. 15840Sstevel@tonic-gate */ 15850Sstevel@tonic-gate if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { 15860Sstevel@tonic-gate ASSERT(hat == kas.a_hat); 15870Sstevel@tonic-gate hat_kmap_load(addr, pp, attr, flags); 15885084Sjohnlev XPV_ALLOW_MIGRATE(); 15890Sstevel@tonic-gate return; 15900Sstevel@tonic-gate } 15910Sstevel@tonic-gate 15920Sstevel@tonic-gate /* 15930Sstevel@tonic-gate * This is used for memory with normal caching enabled, so 15940Sstevel@tonic-gate * always set HAT_STORECACHING_OK. 15950Sstevel@tonic-gate */ 15960Sstevel@tonic-gate attr |= HAT_STORECACHING_OK; 15973446Smrj if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0) 15983446Smrj panic("unexpected hati_load_common() failure"); 15995084Sjohnlev XPV_ALLOW_MIGRATE(); 16000Sstevel@tonic-gate } 16010Sstevel@tonic-gate 16024528Spaulsan /* ARGSUSED */ 16034528Spaulsan void 16044528Spaulsan hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp, 16054528Spaulsan uint_t attr, uint_t flags, hat_region_cookie_t rcookie) 16064528Spaulsan { 16074528Spaulsan hat_memload(hat, addr, pp, attr, flags); 16084528Spaulsan } 16094528Spaulsan 16100Sstevel@tonic-gate /* 16110Sstevel@tonic-gate * Load the given array of page structs using large pages when possible 16120Sstevel@tonic-gate */ 16130Sstevel@tonic-gate void 16140Sstevel@tonic-gate hat_memload_array( 16150Sstevel@tonic-gate hat_t *hat, 16160Sstevel@tonic-gate caddr_t addr, 16170Sstevel@tonic-gate size_t len, 16180Sstevel@tonic-gate page_t **pages, 16190Sstevel@tonic-gate uint_t attr, 16200Sstevel@tonic-gate uint_t flags) 16210Sstevel@tonic-gate { 16220Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 16230Sstevel@tonic-gate uintptr_t eaddr = va + len; 16240Sstevel@tonic-gate level_t level; 16250Sstevel@tonic-gate size_t pgsize; 16260Sstevel@tonic-gate pgcnt_t pgindx = 0; 16270Sstevel@tonic-gate pfn_t pfn; 16280Sstevel@tonic-gate pgcnt_t i; 16290Sstevel@tonic-gate 16305084Sjohnlev XPV_DISALLOW_MIGRATE(); 16310Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 16323446Smrj ASSERT(hat == kas.a_hat || va + len <= _userlimit); 16330Sstevel@tonic-gate ASSERT(hat == kas.a_hat || 16340Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 16350Sstevel@tonic-gate ASSERT((flags & supported_memload_flags) == flags); 16360Sstevel@tonic-gate 16370Sstevel@tonic-gate /* 16380Sstevel@tonic-gate * memload is used for memory with full caching enabled, so 16390Sstevel@tonic-gate * set HAT_STORECACHING_OK. 16400Sstevel@tonic-gate */ 16410Sstevel@tonic-gate attr |= HAT_STORECACHING_OK; 16420Sstevel@tonic-gate 16430Sstevel@tonic-gate /* 16440Sstevel@tonic-gate * handle all pages using largest possible pagesize 16450Sstevel@tonic-gate */ 16460Sstevel@tonic-gate while (va < eaddr) { 16470Sstevel@tonic-gate /* 16480Sstevel@tonic-gate * decide what level mapping to use (ie. pagesize) 16490Sstevel@tonic-gate */ 16500Sstevel@tonic-gate pfn = page_pptonum(pages[pgindx]); 16510Sstevel@tonic-gate for (level = mmu.max_page_level; ; --level) { 16520Sstevel@tonic-gate pgsize = LEVEL_SIZE(level); 16530Sstevel@tonic-gate if (level == 0) 16540Sstevel@tonic-gate break; 16553446Smrj 16560Sstevel@tonic-gate if (!IS_P2ALIGNED(va, pgsize) || 16570Sstevel@tonic-gate (eaddr - va) < pgsize || 16583446Smrj !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize)) 16590Sstevel@tonic-gate continue; 16600Sstevel@tonic-gate 16610Sstevel@tonic-gate /* 16620Sstevel@tonic-gate * To use a large mapping of this size, all the 16630Sstevel@tonic-gate * pages we are passed must be sequential subpages 16640Sstevel@tonic-gate * of the large page. 16650Sstevel@tonic-gate * hat_page_demote() can't change p_szc because 16660Sstevel@tonic-gate * all pages are locked. 16670Sstevel@tonic-gate */ 16680Sstevel@tonic-gate if (pages[pgindx]->p_szc >= level) { 16690Sstevel@tonic-gate for (i = 0; i < mmu_btop(pgsize); ++i) { 16700Sstevel@tonic-gate if (pfn + i != 16710Sstevel@tonic-gate page_pptonum(pages[pgindx + i])) 16720Sstevel@tonic-gate break; 16730Sstevel@tonic-gate ASSERT(pages[pgindx + i]->p_szc >= 16740Sstevel@tonic-gate level); 16750Sstevel@tonic-gate ASSERT(pages[pgindx] + i == 16760Sstevel@tonic-gate pages[pgindx + i]); 16770Sstevel@tonic-gate } 16785349Skchow if (i == mmu_btop(pgsize)) { 16795349Skchow #ifdef DEBUG 16805349Skchow if (level == 2) 16815349Skchow map1gcnt++; 16825349Skchow #endif 16830Sstevel@tonic-gate break; 16845349Skchow } 16850Sstevel@tonic-gate } 16860Sstevel@tonic-gate } 16870Sstevel@tonic-gate 16880Sstevel@tonic-gate /* 16893446Smrj * Load this page mapping. If the load fails, try a smaller 16903446Smrj * pagesize. 16910Sstevel@tonic-gate */ 16920Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 16933446Smrj while (hati_load_common(hat, va, pages[pgindx], attr, 16944381Sjosephb flags, level, pfn) != 0) { 16953446Smrj if (level == 0) 16963446Smrj panic("unexpected hati_load_common() failure"); 16973446Smrj --level; 16983446Smrj pgsize = LEVEL_SIZE(level); 16993446Smrj } 17000Sstevel@tonic-gate 17010Sstevel@tonic-gate /* 17020Sstevel@tonic-gate * move to next page 17030Sstevel@tonic-gate */ 17040Sstevel@tonic-gate va += pgsize; 17050Sstevel@tonic-gate pgindx += mmu_btop(pgsize); 17060Sstevel@tonic-gate } 17075084Sjohnlev XPV_ALLOW_MIGRATE(); 17080Sstevel@tonic-gate } 17090Sstevel@tonic-gate 17104528Spaulsan /* ARGSUSED */ 17114528Spaulsan void 17124528Spaulsan hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len, 17134528Spaulsan struct page **pps, uint_t attr, uint_t flags, 17144528Spaulsan hat_region_cookie_t rcookie) 17154528Spaulsan { 17164528Spaulsan hat_memload_array(hat, addr, len, pps, attr, flags); 17174528Spaulsan } 17184528Spaulsan 17190Sstevel@tonic-gate /* 17200Sstevel@tonic-gate * void hat_devload(hat, addr, len, pf, attr, flags) 17210Sstevel@tonic-gate * load/lock the given page frame number 17220Sstevel@tonic-gate * 17230Sstevel@tonic-gate * Advisory ordering attributes. Apply only to device mappings. 17240Sstevel@tonic-gate * 17250Sstevel@tonic-gate * HAT_STRICTORDER: the CPU must issue the references in order, as the 17260Sstevel@tonic-gate * programmer specified. This is the default. 17270Sstevel@tonic-gate * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds 17280Sstevel@tonic-gate * of reordering; store or load with store or load). 17290Sstevel@tonic-gate * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores 17300Sstevel@tonic-gate * to consecutive locations (for example, turn two consecutive byte 17310Sstevel@tonic-gate * stores into one halfword store), and it may batch individual loads 17320Sstevel@tonic-gate * (for example, turn two consecutive byte loads into one halfword load). 17330Sstevel@tonic-gate * This also implies re-ordering. 17340Sstevel@tonic-gate * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it 17350Sstevel@tonic-gate * until another store occurs. The default is to fetch new data 17360Sstevel@tonic-gate * on every load. This also implies merging. 17370Sstevel@tonic-gate * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to 17380Sstevel@tonic-gate * the device (perhaps with other data) at a later time. The default is 17390Sstevel@tonic-gate * to push the data right away. This also implies load caching. 17400Sstevel@tonic-gate * 17410Sstevel@tonic-gate * Equivalent of hat_memload(), but can be used for device memory where 17420Sstevel@tonic-gate * there are no page_t's and we support additional flags (write merging, etc). 17430Sstevel@tonic-gate * Note that we can have large page mappings with this interface. 17440Sstevel@tonic-gate */ 17450Sstevel@tonic-gate int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK | 17460Sstevel@tonic-gate HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK | 17470Sstevel@tonic-gate HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK; 17480Sstevel@tonic-gate 17490Sstevel@tonic-gate void 17500Sstevel@tonic-gate hat_devload( 17510Sstevel@tonic-gate hat_t *hat, 17520Sstevel@tonic-gate caddr_t addr, 17530Sstevel@tonic-gate size_t len, 17540Sstevel@tonic-gate pfn_t pfn, 17550Sstevel@tonic-gate uint_t attr, 17560Sstevel@tonic-gate int flags) 17570Sstevel@tonic-gate { 17580Sstevel@tonic-gate uintptr_t va = ALIGN2PAGE(addr); 17590Sstevel@tonic-gate uintptr_t eva = va + len; 17600Sstevel@tonic-gate level_t level; 17610Sstevel@tonic-gate size_t pgsize; 17620Sstevel@tonic-gate page_t *pp; 17630Sstevel@tonic-gate int f; /* per PTE copy of flags - maybe modified */ 17640Sstevel@tonic-gate uint_t a; /* per PTE copy of attr */ 17650Sstevel@tonic-gate 17665084Sjohnlev XPV_DISALLOW_MIGRATE(); 17670Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 17683446Smrj ASSERT(hat == kas.a_hat || eva <= _userlimit); 17690Sstevel@tonic-gate ASSERT(hat == kas.a_hat || 17700Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 17710Sstevel@tonic-gate ASSERT((flags & supported_devload_flags) == flags); 17720Sstevel@tonic-gate 17730Sstevel@tonic-gate /* 17740Sstevel@tonic-gate * handle all pages 17750Sstevel@tonic-gate */ 17760Sstevel@tonic-gate while (va < eva) { 17770Sstevel@tonic-gate 17780Sstevel@tonic-gate /* 17790Sstevel@tonic-gate * decide what level mapping to use (ie. pagesize) 17800Sstevel@tonic-gate */ 17810Sstevel@tonic-gate for (level = mmu.max_page_level; ; --level) { 17820Sstevel@tonic-gate pgsize = LEVEL_SIZE(level); 17830Sstevel@tonic-gate if (level == 0) 17840Sstevel@tonic-gate break; 17850Sstevel@tonic-gate if (IS_P2ALIGNED(va, pgsize) && 17860Sstevel@tonic-gate (eva - va) >= pgsize && 17875349Skchow IS_P2ALIGNED(pfn, mmu_btop(pgsize))) { 17885349Skchow #ifdef DEBUG 17895349Skchow if (level == 2) 17905349Skchow map1gcnt++; 17915349Skchow #endif 17920Sstevel@tonic-gate break; 17935349Skchow } 17940Sstevel@tonic-gate } 17950Sstevel@tonic-gate 17960Sstevel@tonic-gate /* 17973446Smrj * If this is just memory then allow caching (this happens 17980Sstevel@tonic-gate * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used 17993446Smrj * to override that. If we don't have a page_t then make sure 18000Sstevel@tonic-gate * NOCONSIST is set. 18010Sstevel@tonic-gate */ 18020Sstevel@tonic-gate a = attr; 18030Sstevel@tonic-gate f = flags; 18045084Sjohnlev if (!pf_is_memory(pfn)) 18055084Sjohnlev f |= HAT_LOAD_NOCONSIST; 18065084Sjohnlev else if (!(a & HAT_PLAT_NOCACHE)) 18075084Sjohnlev a |= HAT_STORECACHING_OK; 18085084Sjohnlev 18095084Sjohnlev if (f & HAT_LOAD_NOCONSIST) 18100Sstevel@tonic-gate pp = NULL; 18115084Sjohnlev else 18125084Sjohnlev pp = page_numtopp_nolock(pfn); 18130Sstevel@tonic-gate 18140Sstevel@tonic-gate /* 18150Sstevel@tonic-gate * load this page mapping 18160Sstevel@tonic-gate */ 18170Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 18183446Smrj while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) { 18193446Smrj if (level == 0) 18203446Smrj panic("unexpected hati_load_common() failure"); 18213446Smrj --level; 18223446Smrj pgsize = LEVEL_SIZE(level); 18233446Smrj } 18240Sstevel@tonic-gate 18250Sstevel@tonic-gate /* 18260Sstevel@tonic-gate * move to next page 18270Sstevel@tonic-gate */ 18280Sstevel@tonic-gate va += pgsize; 18290Sstevel@tonic-gate pfn += mmu_btop(pgsize); 18300Sstevel@tonic-gate } 18315084Sjohnlev XPV_ALLOW_MIGRATE(); 18320Sstevel@tonic-gate } 18330Sstevel@tonic-gate 18340Sstevel@tonic-gate /* 18350Sstevel@tonic-gate * void hat_unlock(hat, addr, len) 18360Sstevel@tonic-gate * unlock the mappings to a given range of addresses 18370Sstevel@tonic-gate * 18380Sstevel@tonic-gate * Locks are tracked by ht_lock_cnt in the htable. 18390Sstevel@tonic-gate */ 18400Sstevel@tonic-gate void 18410Sstevel@tonic-gate hat_unlock(hat_t *hat, caddr_t addr, size_t len) 18420Sstevel@tonic-gate { 18430Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr; 18440Sstevel@tonic-gate uintptr_t eaddr = vaddr + len; 18450Sstevel@tonic-gate htable_t *ht = NULL; 18460Sstevel@tonic-gate 18470Sstevel@tonic-gate /* 18480Sstevel@tonic-gate * kernel entries are always locked, we don't track lock counts 18490Sstevel@tonic-gate */ 18503446Smrj ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 18510Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 18520Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 18530Sstevel@tonic-gate if (hat == kas.a_hat) 18540Sstevel@tonic-gate return; 18550Sstevel@tonic-gate if (eaddr > _userlimit) 18560Sstevel@tonic-gate panic("hat_unlock() address out of range - above _userlimit"); 18570Sstevel@tonic-gate 18585084Sjohnlev XPV_DISALLOW_MIGRATE(); 18590Sstevel@tonic-gate ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 18600Sstevel@tonic-gate while (vaddr < eaddr) { 18610Sstevel@tonic-gate (void) htable_walk(hat, &ht, &vaddr, eaddr); 18620Sstevel@tonic-gate if (ht == NULL) 18630Sstevel@tonic-gate break; 18640Sstevel@tonic-gate 18650Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr)); 18660Sstevel@tonic-gate 18670Sstevel@tonic-gate if (ht->ht_lock_cnt < 1) 18680Sstevel@tonic-gate panic("hat_unlock(): lock_cnt < 1, " 18690Sstevel@tonic-gate "htable=%p, vaddr=%p\n", ht, (caddr_t)vaddr); 18700Sstevel@tonic-gate HTABLE_LOCK_DEC(ht); 18710Sstevel@tonic-gate 18720Sstevel@tonic-gate vaddr += LEVEL_SIZE(ht->ht_level); 18730Sstevel@tonic-gate } 18740Sstevel@tonic-gate if (ht) 18750Sstevel@tonic-gate htable_release(ht); 18765084Sjohnlev XPV_ALLOW_MIGRATE(); 18770Sstevel@tonic-gate } 18780Sstevel@tonic-gate 18794528Spaulsan /* ARGSUSED */ 18804528Spaulsan void 18815075Spaulsan hat_unlock_region(struct hat *hat, caddr_t addr, size_t len, 18824528Spaulsan hat_region_cookie_t rcookie) 18834528Spaulsan { 18844528Spaulsan panic("No shared region support on x86"); 18854528Spaulsan } 18864528Spaulsan 18875084Sjohnlev #if !defined(__xpv) 18880Sstevel@tonic-gate /* 18890Sstevel@tonic-gate * Cross call service routine to demap a virtual page on 18900Sstevel@tonic-gate * the current CPU or flush all mappings in TLB. 18910Sstevel@tonic-gate */ 18920Sstevel@tonic-gate /*ARGSUSED*/ 18930Sstevel@tonic-gate static int 18940Sstevel@tonic-gate hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3) 18950Sstevel@tonic-gate { 18960Sstevel@tonic-gate hat_t *hat = (hat_t *)a1; 18970Sstevel@tonic-gate caddr_t addr = (caddr_t)a2; 18980Sstevel@tonic-gate 18990Sstevel@tonic-gate /* 19000Sstevel@tonic-gate * If the target hat isn't the kernel and this CPU isn't operating 19010Sstevel@tonic-gate * in the target hat, we can ignore the cross call. 19020Sstevel@tonic-gate */ 19030Sstevel@tonic-gate if (hat != kas.a_hat && hat != CPU->cpu_current_hat) 19040Sstevel@tonic-gate return (0); 19050Sstevel@tonic-gate 19060Sstevel@tonic-gate /* 19070Sstevel@tonic-gate * For a normal address, we just flush one page mapping 19080Sstevel@tonic-gate */ 19090Sstevel@tonic-gate if ((uintptr_t)addr != DEMAP_ALL_ADDR) { 19103446Smrj mmu_tlbflush_entry(addr); 19110Sstevel@tonic-gate return (0); 19120Sstevel@tonic-gate } 19130Sstevel@tonic-gate 19140Sstevel@tonic-gate /* 19150Sstevel@tonic-gate * Otherwise we reload cr3 to effect a complete TLB flush. 19160Sstevel@tonic-gate * 19170Sstevel@tonic-gate * A reload of cr3 on a VLP process also means we must also recopy in 19180Sstevel@tonic-gate * the pte values from the struct hat 19190Sstevel@tonic-gate */ 19200Sstevel@tonic-gate if (hat->hat_flags & HAT_VLP) { 19210Sstevel@tonic-gate #if defined(__amd64) 19220Sstevel@tonic-gate x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes; 19230Sstevel@tonic-gate 19240Sstevel@tonic-gate VLP_COPY(hat->hat_vlp_ptes, vlpptep); 19250Sstevel@tonic-gate #elif defined(__i386) 19260Sstevel@tonic-gate reload_pae32(hat, CPU); 19270Sstevel@tonic-gate #endif 19280Sstevel@tonic-gate } 19290Sstevel@tonic-gate reload_cr3(); 19300Sstevel@tonic-gate return (0); 19310Sstevel@tonic-gate } 19320Sstevel@tonic-gate 19330Sstevel@tonic-gate /* 19344191Sjosephb * Flush all TLB entries, including global (ie. kernel) ones. 19354191Sjosephb */ 19364191Sjosephb static void 19374191Sjosephb flush_all_tlb_entries(void) 19384191Sjosephb { 19394191Sjosephb ulong_t cr4 = getcr4(); 19404191Sjosephb 19414191Sjosephb if (cr4 & CR4_PGE) { 19424191Sjosephb setcr4(cr4 & ~(ulong_t)CR4_PGE); 19434191Sjosephb setcr4(cr4); 19444191Sjosephb 19454191Sjosephb /* 19464191Sjosephb * 32 bit PAE also needs to always reload_cr3() 19474191Sjosephb */ 19484191Sjosephb if (mmu.max_level == 2) 19494191Sjosephb reload_cr3(); 19504191Sjosephb } else { 19514191Sjosephb reload_cr3(); 19524191Sjosephb } 19534191Sjosephb } 19544191Sjosephb 19554191Sjosephb #define TLB_CPU_HALTED (01ul) 19564191Sjosephb #define TLB_INVAL_ALL (02ul) 19574191Sjosephb #define CAS_TLB_INFO(cpu, old, new) \ 19584191Sjosephb caslong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new)) 19594191Sjosephb 19604191Sjosephb /* 19614191Sjosephb * Record that a CPU is going idle 19624191Sjosephb */ 19634191Sjosephb void 19644191Sjosephb tlb_going_idle(void) 19654191Sjosephb { 19664191Sjosephb atomic_or_long((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED); 19674191Sjosephb } 19684191Sjosephb 19694191Sjosephb /* 19704191Sjosephb * Service a delayed TLB flush if coming out of being idle. 19714191Sjosephb */ 19724191Sjosephb void 19734191Sjosephb tlb_service(void) 19744191Sjosephb { 19754191Sjosephb ulong_t flags = getflags(); 19764191Sjosephb ulong_t tlb_info; 19774191Sjosephb ulong_t found; 19784191Sjosephb 19794191Sjosephb /* 19804191Sjosephb * Be sure interrupts are off while doing this so that 19814191Sjosephb * higher level interrupts correctly wait for flushes to finish. 19824191Sjosephb */ 19834191Sjosephb if (flags & PS_IE) 19844191Sjosephb flags = intr_clear(); 19854191Sjosephb 19864191Sjosephb /* 19874191Sjosephb * We only have to do something if coming out of being idle. 19884191Sjosephb */ 19894191Sjosephb tlb_info = CPU->cpu_m.mcpu_tlb_info; 19904191Sjosephb if (tlb_info & TLB_CPU_HALTED) { 19914191Sjosephb ASSERT(CPU->cpu_current_hat == kas.a_hat); 19924191Sjosephb 19934191Sjosephb /* 19944191Sjosephb * Atomic clear and fetch of old state. 19954191Sjosephb */ 19964191Sjosephb while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) { 19974191Sjosephb ASSERT(found & TLB_CPU_HALTED); 19984191Sjosephb tlb_info = found; 19994191Sjosephb SMT_PAUSE(); 20004191Sjosephb } 20014191Sjosephb if (tlb_info & TLB_INVAL_ALL) 20024191Sjosephb flush_all_tlb_entries(); 20034191Sjosephb } 20044191Sjosephb 20054191Sjosephb /* 20064191Sjosephb * Restore interrupt enable control bit. 20074191Sjosephb */ 20084191Sjosephb if (flags & PS_IE) 20094191Sjosephb sti(); 20104191Sjosephb } 20115084Sjohnlev #endif /* !__xpv */ 20124191Sjosephb 20134191Sjosephb /* 20140Sstevel@tonic-gate * Internal routine to do cross calls to invalidate a range of pages on 20150Sstevel@tonic-gate * all CPUs using a given hat. 20160Sstevel@tonic-gate */ 20170Sstevel@tonic-gate void 20183446Smrj hat_tlb_inval(hat_t *hat, uintptr_t va) 20190Sstevel@tonic-gate { 20200Sstevel@tonic-gate extern int flushes_require_xcalls; /* from mp_startup.c */ 20210Sstevel@tonic-gate cpuset_t justme; 20225084Sjohnlev cpuset_t cpus_to_shootdown; 20235084Sjohnlev #ifndef __xpv 20244191Sjosephb cpuset_t check_cpus; 20254191Sjosephb cpu_t *cpup; 20264191Sjosephb int c; 20275084Sjohnlev #endif 20280Sstevel@tonic-gate 20290Sstevel@tonic-gate /* 20300Sstevel@tonic-gate * If the hat is being destroyed, there are no more users, so 20310Sstevel@tonic-gate * demap need not do anything. 20320Sstevel@tonic-gate */ 20330Sstevel@tonic-gate if (hat->hat_flags & HAT_FREEING) 20340Sstevel@tonic-gate return; 20350Sstevel@tonic-gate 20360Sstevel@tonic-gate /* 20370Sstevel@tonic-gate * If demapping from a shared pagetable, we best demap the 20380Sstevel@tonic-gate * entire set of user TLBs, since we don't know what addresses 20390Sstevel@tonic-gate * these were shared at. 20400Sstevel@tonic-gate */ 20410Sstevel@tonic-gate if (hat->hat_flags & HAT_SHARED) { 20420Sstevel@tonic-gate hat = kas.a_hat; 20430Sstevel@tonic-gate va = DEMAP_ALL_ADDR; 20440Sstevel@tonic-gate } 20450Sstevel@tonic-gate 20460Sstevel@tonic-gate /* 20470Sstevel@tonic-gate * if not running with multiple CPUs, don't use cross calls 20480Sstevel@tonic-gate */ 20490Sstevel@tonic-gate if (panicstr || !flushes_require_xcalls) { 20505084Sjohnlev #ifdef __xpv 20515084Sjohnlev if (va == DEMAP_ALL_ADDR) 20525084Sjohnlev xen_flush_tlb(); 20535084Sjohnlev else 20545084Sjohnlev xen_flush_va((caddr_t)va); 20555084Sjohnlev #else 20560Sstevel@tonic-gate (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL); 20575084Sjohnlev #endif 20580Sstevel@tonic-gate return; 20590Sstevel@tonic-gate } 20600Sstevel@tonic-gate 20610Sstevel@tonic-gate 20620Sstevel@tonic-gate /* 20633446Smrj * Determine CPUs to shootdown. Kernel changes always do all CPUs. 20643446Smrj * Otherwise it's just CPUs currently executing in this hat. 20650Sstevel@tonic-gate */ 20660Sstevel@tonic-gate kpreempt_disable(); 20670Sstevel@tonic-gate CPUSET_ONLY(justme, CPU->cpu_id); 20683446Smrj if (hat == kas.a_hat) 20693446Smrj cpus_to_shootdown = khat_cpuset; 20700Sstevel@tonic-gate else 20713446Smrj cpus_to_shootdown = hat->hat_cpus; 20723446Smrj 20735084Sjohnlev #ifndef __xpv 20744191Sjosephb /* 20754191Sjosephb * If any CPUs in the set are idle, just request a delayed flush 20764191Sjosephb * and avoid waking them up. 20774191Sjosephb */ 20784191Sjosephb check_cpus = cpus_to_shootdown; 20794191Sjosephb for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) { 20804191Sjosephb ulong_t tlb_info; 20814191Sjosephb 20824191Sjosephb if (!CPU_IN_SET(check_cpus, c)) 20834191Sjosephb continue; 20844191Sjosephb CPUSET_DEL(check_cpus, c); 20854191Sjosephb cpup = cpu[c]; 20864191Sjosephb if (cpup == NULL) 20874191Sjosephb continue; 20884191Sjosephb 20894191Sjosephb tlb_info = cpup->cpu_m.mcpu_tlb_info; 20904191Sjosephb while (tlb_info == TLB_CPU_HALTED) { 20914191Sjosephb (void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED, 20924381Sjosephb TLB_CPU_HALTED | TLB_INVAL_ALL); 20934191Sjosephb SMT_PAUSE(); 20944191Sjosephb tlb_info = cpup->cpu_m.mcpu_tlb_info; 20954191Sjosephb } 20964191Sjosephb if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) { 20974191Sjosephb HATSTAT_INC(hs_tlb_inval_delayed); 20984191Sjosephb CPUSET_DEL(cpus_to_shootdown, c); 20994191Sjosephb } 21004191Sjosephb } 21015084Sjohnlev #endif 21024191Sjosephb 21033446Smrj if (CPUSET_ISNULL(cpus_to_shootdown) || 21043446Smrj CPUSET_ISEQUAL(cpus_to_shootdown, justme)) { 21053446Smrj 21065084Sjohnlev #ifdef __xpv 21075084Sjohnlev if (va == DEMAP_ALL_ADDR) 21085084Sjohnlev xen_flush_tlb(); 21095084Sjohnlev else 21105084Sjohnlev xen_flush_va((caddr_t)va); 21115084Sjohnlev #else 21123446Smrj (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL); 21135084Sjohnlev #endif 21143446Smrj 21153446Smrj } else { 21163446Smrj 21173446Smrj CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id); 21185084Sjohnlev #ifdef __xpv 21195084Sjohnlev if (va == DEMAP_ALL_ADDR) 21205084Sjohnlev xen_gflush_tlb(cpus_to_shootdown); 21215084Sjohnlev else 21225084Sjohnlev xen_gflush_va((caddr_t)va, cpus_to_shootdown); 21235084Sjohnlev #else 21243446Smrj xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL, X_CALL_HIPRI, 21253446Smrj cpus_to_shootdown, hati_demap_func); 21265084Sjohnlev #endif 21273446Smrj 21283446Smrj } 21290Sstevel@tonic-gate kpreempt_enable(); 21300Sstevel@tonic-gate } 21310Sstevel@tonic-gate 21320Sstevel@tonic-gate /* 21330Sstevel@tonic-gate * Interior routine for HAT_UNLOADs from hat_unload_callback(), 21340Sstevel@tonic-gate * hat_kmap_unload() OR from hat_steal() code. This routine doesn't 21350Sstevel@tonic-gate * handle releasing of the htables. 21360Sstevel@tonic-gate */ 21370Sstevel@tonic-gate void 21380Sstevel@tonic-gate hat_pte_unmap( 21390Sstevel@tonic-gate htable_t *ht, 21400Sstevel@tonic-gate uint_t entry, 21410Sstevel@tonic-gate uint_t flags, 21420Sstevel@tonic-gate x86pte_t old_pte, 21430Sstevel@tonic-gate void *pte_ptr) 21440Sstevel@tonic-gate { 21450Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 21460Sstevel@tonic-gate hment_t *hm = NULL; 21470Sstevel@tonic-gate page_t *pp = NULL; 21480Sstevel@tonic-gate level_t l = ht->ht_level; 21490Sstevel@tonic-gate pfn_t pfn; 21500Sstevel@tonic-gate 21510Sstevel@tonic-gate /* 21520Sstevel@tonic-gate * We always track the locking counts, even if nothing is unmapped 21530Sstevel@tonic-gate */ 21540Sstevel@tonic-gate if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) { 21550Sstevel@tonic-gate ASSERT(ht->ht_lock_cnt > 0); 21560Sstevel@tonic-gate HTABLE_LOCK_DEC(ht); 21570Sstevel@tonic-gate } 21580Sstevel@tonic-gate 21590Sstevel@tonic-gate /* 21600Sstevel@tonic-gate * Figure out which page's mapping list lock to acquire using the PFN 21610Sstevel@tonic-gate * passed in "old" PTE. We then attempt to invalidate the PTE. 21620Sstevel@tonic-gate * If another thread, probably a hat_pageunload, has asynchronously 21630Sstevel@tonic-gate * unmapped/remapped this address we'll loop here. 21640Sstevel@tonic-gate */ 21650Sstevel@tonic-gate ASSERT(ht->ht_busy > 0); 21660Sstevel@tonic-gate while (PTE_ISVALID(old_pte)) { 21670Sstevel@tonic-gate pfn = PTE2PFN(old_pte, l); 21683446Smrj if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) { 21690Sstevel@tonic-gate pp = NULL; 21700Sstevel@tonic-gate } else { 21715084Sjohnlev #ifdef __xpv 21725084Sjohnlev if (pfn == PFN_INVALID) 21735084Sjohnlev panic("Invalid PFN, but not PT_NOCONSIST"); 21745084Sjohnlev #endif 21750Sstevel@tonic-gate pp = page_numtopp_nolock(pfn); 217647Sjosephb if (pp == NULL) { 217747Sjosephb panic("no page_t, not NOCONSIST: old_pte=" 217847Sjosephb FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx", 217947Sjosephb old_pte, (uintptr_t)ht, entry, 218047Sjosephb (uintptr_t)pte_ptr); 218147Sjosephb } 21820Sstevel@tonic-gate x86_hm_enter(pp); 21830Sstevel@tonic-gate } 218447Sjosephb 218547Sjosephb /* 218647Sjosephb * If freeing the address space, check that the PTE 218747Sjosephb * hasn't changed, as the mappings are no longer in use by 218847Sjosephb * any thread, invalidation is unnecessary. 218947Sjosephb * If not freeing, do a full invalidate. 21905084Sjohnlev * 21915084Sjohnlev * On the hypervisor we must always remove mappings, as a 21925084Sjohnlev * writable mapping left behind could cause a page table 21935084Sjohnlev * allocation to fail. 219447Sjosephb */ 21955084Sjohnlev #if !defined(__xpv) 219647Sjosephb if (hat->hat_flags & HAT_FREEING) 219747Sjosephb old_pte = x86pte_get(ht, entry); 219847Sjosephb else 21995084Sjohnlev #endif 22003446Smrj old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr); 22010Sstevel@tonic-gate 22020Sstevel@tonic-gate /* 22030Sstevel@tonic-gate * If the page hadn't changed we've unmapped it and can proceed 22040Sstevel@tonic-gate */ 22050Sstevel@tonic-gate if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn) 22060Sstevel@tonic-gate break; 22070Sstevel@tonic-gate 22080Sstevel@tonic-gate /* 22090Sstevel@tonic-gate * Otherwise, we'll have to retry with the current old_pte. 22100Sstevel@tonic-gate * Drop the hment lock, since the pfn may have changed. 22110Sstevel@tonic-gate */ 22120Sstevel@tonic-gate if (pp != NULL) { 22130Sstevel@tonic-gate x86_hm_exit(pp); 22140Sstevel@tonic-gate pp = NULL; 22150Sstevel@tonic-gate } else { 22163446Smrj ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST); 22170Sstevel@tonic-gate } 22180Sstevel@tonic-gate } 22190Sstevel@tonic-gate 22200Sstevel@tonic-gate /* 22210Sstevel@tonic-gate * If the old mapping wasn't valid, there's nothing more to do 22220Sstevel@tonic-gate */ 22230Sstevel@tonic-gate if (!PTE_ISVALID(old_pte)) { 22240Sstevel@tonic-gate if (pp != NULL) 22250Sstevel@tonic-gate x86_hm_exit(pp); 22260Sstevel@tonic-gate return; 22270Sstevel@tonic-gate } 22280Sstevel@tonic-gate 22290Sstevel@tonic-gate /* 22300Sstevel@tonic-gate * Take care of syncing any MOD/REF bits and removing the hment. 22310Sstevel@tonic-gate */ 22320Sstevel@tonic-gate if (pp != NULL) { 22330Sstevel@tonic-gate if (!(flags & HAT_UNLOAD_NOSYNC)) 22340Sstevel@tonic-gate hati_sync_pte_to_page(pp, old_pte, l); 22350Sstevel@tonic-gate hm = hment_remove(pp, ht, entry); 22360Sstevel@tonic-gate x86_hm_exit(pp); 22370Sstevel@tonic-gate if (hm != NULL) 22380Sstevel@tonic-gate hment_free(hm); 22390Sstevel@tonic-gate } 22400Sstevel@tonic-gate 22410Sstevel@tonic-gate /* 22420Sstevel@tonic-gate * Handle book keeping in the htable and hat 22430Sstevel@tonic-gate */ 22440Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0); 22450Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt); 22460Sstevel@tonic-gate PGCNT_DEC(hat, l); 22470Sstevel@tonic-gate } 22480Sstevel@tonic-gate 22490Sstevel@tonic-gate /* 22500Sstevel@tonic-gate * very cheap unload implementation to special case some kernel addresses 22510Sstevel@tonic-gate */ 22520Sstevel@tonic-gate static void 22530Sstevel@tonic-gate hat_kmap_unload(caddr_t addr, size_t len, uint_t flags) 22540Sstevel@tonic-gate { 22550Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 22560Sstevel@tonic-gate uintptr_t eva = va + len; 22573446Smrj pgcnt_t pg_index; 22580Sstevel@tonic-gate htable_t *ht; 22590Sstevel@tonic-gate uint_t entry; 22603446Smrj x86pte_t *pte_ptr; 22610Sstevel@tonic-gate x86pte_t old_pte; 22620Sstevel@tonic-gate 22630Sstevel@tonic-gate for (; va < eva; va += MMU_PAGESIZE) { 22640Sstevel@tonic-gate /* 22650Sstevel@tonic-gate * Get the PTE 22660Sstevel@tonic-gate */ 22673446Smrj pg_index = mmu_btop(va - mmu.kmap_addr); 22683446Smrj pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index); 22693446Smrj old_pte = GET_PTE(pte_ptr); 22700Sstevel@tonic-gate 22710Sstevel@tonic-gate /* 22720Sstevel@tonic-gate * get the htable / entry 22730Sstevel@tonic-gate */ 22740Sstevel@tonic-gate ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) 22750Sstevel@tonic-gate >> LEVEL_SHIFT(1)]; 22760Sstevel@tonic-gate entry = htable_va2entry(va, ht); 22770Sstevel@tonic-gate 22780Sstevel@tonic-gate /* 22790Sstevel@tonic-gate * use mostly common code to unmap it. 22800Sstevel@tonic-gate */ 22810Sstevel@tonic-gate hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr); 22820Sstevel@tonic-gate } 22830Sstevel@tonic-gate } 22840Sstevel@tonic-gate 22850Sstevel@tonic-gate 22860Sstevel@tonic-gate /* 22870Sstevel@tonic-gate * unload a range of virtual address space (no callback) 22880Sstevel@tonic-gate */ 22890Sstevel@tonic-gate void 22900Sstevel@tonic-gate hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 22910Sstevel@tonic-gate { 22920Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 22933446Smrj 22945084Sjohnlev XPV_DISALLOW_MIGRATE(); 22953446Smrj ASSERT(hat == kas.a_hat || va + len <= _userlimit); 22960Sstevel@tonic-gate 22970Sstevel@tonic-gate /* 22980Sstevel@tonic-gate * special case for performance. 22990Sstevel@tonic-gate */ 23000Sstevel@tonic-gate if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { 23010Sstevel@tonic-gate ASSERT(hat == kas.a_hat); 23020Sstevel@tonic-gate hat_kmap_unload(addr, len, flags); 23033446Smrj } else { 23043446Smrj hat_unload_callback(hat, addr, len, flags, NULL); 23050Sstevel@tonic-gate } 23065084Sjohnlev XPV_ALLOW_MIGRATE(); 23070Sstevel@tonic-gate } 23080Sstevel@tonic-gate 23090Sstevel@tonic-gate /* 23100Sstevel@tonic-gate * Do the callbacks for ranges being unloaded. 23110Sstevel@tonic-gate */ 23120Sstevel@tonic-gate typedef struct range_info { 23130Sstevel@tonic-gate uintptr_t rng_va; 23140Sstevel@tonic-gate ulong_t rng_cnt; 23150Sstevel@tonic-gate level_t rng_level; 23160Sstevel@tonic-gate } range_info_t; 23170Sstevel@tonic-gate 23180Sstevel@tonic-gate static void 23190Sstevel@tonic-gate handle_ranges(hat_callback_t *cb, uint_t cnt, range_info_t *range) 23200Sstevel@tonic-gate { 23210Sstevel@tonic-gate /* 23220Sstevel@tonic-gate * do callbacks to upper level VM system 23230Sstevel@tonic-gate */ 23240Sstevel@tonic-gate while (cb != NULL && cnt > 0) { 23250Sstevel@tonic-gate --cnt; 23260Sstevel@tonic-gate cb->hcb_start_addr = (caddr_t)range[cnt].rng_va; 23270Sstevel@tonic-gate cb->hcb_end_addr = cb->hcb_start_addr; 23280Sstevel@tonic-gate cb->hcb_end_addr += 23290Sstevel@tonic-gate range[cnt].rng_cnt << LEVEL_SIZE(range[cnt].rng_level); 23300Sstevel@tonic-gate cb->hcb_function(cb); 23310Sstevel@tonic-gate } 23320Sstevel@tonic-gate } 23330Sstevel@tonic-gate 23340Sstevel@tonic-gate /* 23350Sstevel@tonic-gate * Unload a given range of addresses (has optional callback) 23360Sstevel@tonic-gate * 23370Sstevel@tonic-gate * Flags: 23380Sstevel@tonic-gate * define HAT_UNLOAD 0x00 23390Sstevel@tonic-gate * define HAT_UNLOAD_NOSYNC 0x02 23400Sstevel@tonic-gate * define HAT_UNLOAD_UNLOCK 0x04 23410Sstevel@tonic-gate * define HAT_UNLOAD_OTHER 0x08 - not used 23420Sstevel@tonic-gate * define HAT_UNLOAD_UNMAP 0x10 - same as HAT_UNLOAD 23430Sstevel@tonic-gate */ 23440Sstevel@tonic-gate #define MAX_UNLOAD_CNT (8) 23450Sstevel@tonic-gate void 23460Sstevel@tonic-gate hat_unload_callback( 23470Sstevel@tonic-gate hat_t *hat, 23480Sstevel@tonic-gate caddr_t addr, 23490Sstevel@tonic-gate size_t len, 23500Sstevel@tonic-gate uint_t flags, 23510Sstevel@tonic-gate hat_callback_t *cb) 23520Sstevel@tonic-gate { 23530Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr; 23540Sstevel@tonic-gate uintptr_t eaddr = vaddr + len; 23550Sstevel@tonic-gate htable_t *ht = NULL; 23560Sstevel@tonic-gate uint_t entry; 235747Sjosephb uintptr_t contig_va = (uintptr_t)-1L; 23580Sstevel@tonic-gate range_info_t r[MAX_UNLOAD_CNT]; 23590Sstevel@tonic-gate uint_t r_cnt = 0; 23600Sstevel@tonic-gate x86pte_t old_pte; 23610Sstevel@tonic-gate 23625084Sjohnlev XPV_DISALLOW_MIGRATE(); 23633446Smrj ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 23640Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 23650Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 23660Sstevel@tonic-gate 23673446Smrj /* 23683446Smrj * Special case a single page being unloaded for speed. This happens 23693446Smrj * quite frequently, COW faults after a fork() for example. 23703446Smrj */ 23713446Smrj if (cb == NULL && len == MMU_PAGESIZE) { 23723446Smrj ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0); 23733446Smrj if (ht != NULL) { 23743446Smrj if (PTE_ISVALID(old_pte)) 23753446Smrj hat_pte_unmap(ht, entry, flags, old_pte, NULL); 23763446Smrj htable_release(ht); 23773446Smrj } 23785084Sjohnlev XPV_ALLOW_MIGRATE(); 23793446Smrj return; 23803446Smrj } 23813446Smrj 23820Sstevel@tonic-gate while (vaddr < eaddr) { 23830Sstevel@tonic-gate old_pte = htable_walk(hat, &ht, &vaddr, eaddr); 23840Sstevel@tonic-gate if (ht == NULL) 23850Sstevel@tonic-gate break; 23860Sstevel@tonic-gate 23870Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr)); 23880Sstevel@tonic-gate 23890Sstevel@tonic-gate if (vaddr < (uintptr_t)addr) 23900Sstevel@tonic-gate panic("hat_unload_callback(): unmap inside large page"); 23910Sstevel@tonic-gate 23920Sstevel@tonic-gate /* 23930Sstevel@tonic-gate * We'll do the call backs for contiguous ranges 23940Sstevel@tonic-gate */ 239547Sjosephb if (vaddr != contig_va || 23960Sstevel@tonic-gate (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) { 23970Sstevel@tonic-gate if (r_cnt == MAX_UNLOAD_CNT) { 23980Sstevel@tonic-gate handle_ranges(cb, r_cnt, r); 23990Sstevel@tonic-gate r_cnt = 0; 24000Sstevel@tonic-gate } 24010Sstevel@tonic-gate r[r_cnt].rng_va = vaddr; 24020Sstevel@tonic-gate r[r_cnt].rng_cnt = 0; 24030Sstevel@tonic-gate r[r_cnt].rng_level = ht->ht_level; 24040Sstevel@tonic-gate ++r_cnt; 24050Sstevel@tonic-gate } 24060Sstevel@tonic-gate 24070Sstevel@tonic-gate /* 24080Sstevel@tonic-gate * Unload one mapping from the page tables. 24090Sstevel@tonic-gate */ 24100Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht); 24110Sstevel@tonic-gate hat_pte_unmap(ht, entry, flags, old_pte, NULL); 24120Sstevel@tonic-gate ASSERT(ht->ht_level <= mmu.max_page_level); 24130Sstevel@tonic-gate vaddr += LEVEL_SIZE(ht->ht_level); 241447Sjosephb contig_va = vaddr; 24150Sstevel@tonic-gate ++r[r_cnt - 1].rng_cnt; 24160Sstevel@tonic-gate } 24170Sstevel@tonic-gate if (ht) 24180Sstevel@tonic-gate htable_release(ht); 24190Sstevel@tonic-gate 24200Sstevel@tonic-gate /* 24210Sstevel@tonic-gate * handle last range for callbacks 24220Sstevel@tonic-gate */ 24230Sstevel@tonic-gate if (r_cnt > 0) 24240Sstevel@tonic-gate handle_ranges(cb, r_cnt, r); 24255084Sjohnlev XPV_ALLOW_MIGRATE(); 24260Sstevel@tonic-gate } 24270Sstevel@tonic-gate 24280Sstevel@tonic-gate /* 24290Sstevel@tonic-gate * synchronize mapping with software data structures 24300Sstevel@tonic-gate * 24310Sstevel@tonic-gate * This interface is currently only used by the working set monitor 24320Sstevel@tonic-gate * driver. 24330Sstevel@tonic-gate */ 24340Sstevel@tonic-gate /*ARGSUSED*/ 24350Sstevel@tonic-gate void 24360Sstevel@tonic-gate hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 24370Sstevel@tonic-gate { 24380Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr; 24390Sstevel@tonic-gate uintptr_t eaddr = vaddr + len; 24400Sstevel@tonic-gate htable_t *ht = NULL; 24410Sstevel@tonic-gate uint_t entry; 24420Sstevel@tonic-gate x86pte_t pte; 24430Sstevel@tonic-gate x86pte_t save_pte; 24440Sstevel@tonic-gate x86pte_t new; 24450Sstevel@tonic-gate page_t *pp; 24460Sstevel@tonic-gate 24470Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr)); 24480Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 24490Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 24503446Smrj ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 24510Sstevel@tonic-gate 24525084Sjohnlev XPV_DISALLOW_MIGRATE(); 24530Sstevel@tonic-gate for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { 24540Sstevel@tonic-gate try_again: 24550Sstevel@tonic-gate pte = htable_walk(hat, &ht, &vaddr, eaddr); 24560Sstevel@tonic-gate if (ht == NULL) 24570Sstevel@tonic-gate break; 24580Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht); 24590Sstevel@tonic-gate 24603446Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC || 24610Sstevel@tonic-gate PTE_GET(pte, PT_REF | PT_MOD) == 0) 24620Sstevel@tonic-gate continue; 24630Sstevel@tonic-gate 24640Sstevel@tonic-gate /* 24650Sstevel@tonic-gate * We need to acquire the mapping list lock to protect 24660Sstevel@tonic-gate * against hat_pageunload(), hat_unload(), etc. 24670Sstevel@tonic-gate */ 24680Sstevel@tonic-gate pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level)); 24690Sstevel@tonic-gate if (pp == NULL) 24700Sstevel@tonic-gate break; 24710Sstevel@tonic-gate x86_hm_enter(pp); 24720Sstevel@tonic-gate save_pte = pte; 24730Sstevel@tonic-gate pte = x86pte_get(ht, entry); 24740Sstevel@tonic-gate if (pte != save_pte) { 24750Sstevel@tonic-gate x86_hm_exit(pp); 24760Sstevel@tonic-gate goto try_again; 24770Sstevel@tonic-gate } 24783446Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC || 24790Sstevel@tonic-gate PTE_GET(pte, PT_REF | PT_MOD) == 0) { 24800Sstevel@tonic-gate x86_hm_exit(pp); 24810Sstevel@tonic-gate continue; 24820Sstevel@tonic-gate } 24830Sstevel@tonic-gate 24840Sstevel@tonic-gate /* 24850Sstevel@tonic-gate * Need to clear ref or mod bits. We may compete with 24860Sstevel@tonic-gate * hardware updating the R/M bits and have to try again. 24870Sstevel@tonic-gate */ 24880Sstevel@tonic-gate if (flags == HAT_SYNC_ZERORM) { 24890Sstevel@tonic-gate new = pte; 24900Sstevel@tonic-gate PTE_CLR(new, PT_REF | PT_MOD); 24910Sstevel@tonic-gate pte = hati_update_pte(ht, entry, pte, new); 24920Sstevel@tonic-gate if (pte != 0) { 24930Sstevel@tonic-gate x86_hm_exit(pp); 24940Sstevel@tonic-gate goto try_again; 24950Sstevel@tonic-gate } 24960Sstevel@tonic-gate } else { 24970Sstevel@tonic-gate /* 24980Sstevel@tonic-gate * sync the PTE to the page_t 24990Sstevel@tonic-gate */ 25000Sstevel@tonic-gate hati_sync_pte_to_page(pp, save_pte, ht->ht_level); 25010Sstevel@tonic-gate } 25020Sstevel@tonic-gate x86_hm_exit(pp); 25030Sstevel@tonic-gate } 25040Sstevel@tonic-gate if (ht) 25050Sstevel@tonic-gate htable_release(ht); 25065084Sjohnlev XPV_ALLOW_MIGRATE(); 25070Sstevel@tonic-gate } 25080Sstevel@tonic-gate 25090Sstevel@tonic-gate /* 25100Sstevel@tonic-gate * void hat_map(hat, addr, len, flags) 25110Sstevel@tonic-gate */ 25120Sstevel@tonic-gate /*ARGSUSED*/ 25130Sstevel@tonic-gate void 25140Sstevel@tonic-gate hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 25150Sstevel@tonic-gate { 25160Sstevel@tonic-gate /* does nothing */ 25170Sstevel@tonic-gate } 25180Sstevel@tonic-gate 25190Sstevel@tonic-gate /* 25200Sstevel@tonic-gate * uint_t hat_getattr(hat, addr, *attr) 25210Sstevel@tonic-gate * returns attr for <hat,addr> in *attr. returns 0 if there was a 25220Sstevel@tonic-gate * mapping and *attr is valid, nonzero if there was no mapping and 25230Sstevel@tonic-gate * *attr is not valid. 25240Sstevel@tonic-gate */ 25250Sstevel@tonic-gate uint_t 25260Sstevel@tonic-gate hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr) 25270Sstevel@tonic-gate { 25280Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr); 25290Sstevel@tonic-gate htable_t *ht = NULL; 25300Sstevel@tonic-gate x86pte_t pte; 25310Sstevel@tonic-gate 25323446Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 25330Sstevel@tonic-gate 25340Sstevel@tonic-gate if (IN_VA_HOLE(vaddr)) 25350Sstevel@tonic-gate return ((uint_t)-1); 25360Sstevel@tonic-gate 25373446Smrj ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level); 25380Sstevel@tonic-gate if (ht == NULL) 25390Sstevel@tonic-gate return ((uint_t)-1); 25400Sstevel@tonic-gate 25410Sstevel@tonic-gate if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) { 25420Sstevel@tonic-gate htable_release(ht); 25430Sstevel@tonic-gate return ((uint_t)-1); 25440Sstevel@tonic-gate } 25450Sstevel@tonic-gate 25460Sstevel@tonic-gate *attr = PROT_READ; 25470Sstevel@tonic-gate if (PTE_GET(pte, PT_WRITABLE)) 25480Sstevel@tonic-gate *attr |= PROT_WRITE; 25490Sstevel@tonic-gate if (PTE_GET(pte, PT_USER)) 25500Sstevel@tonic-gate *attr |= PROT_USER; 25510Sstevel@tonic-gate if (!PTE_GET(pte, mmu.pt_nx)) 25520Sstevel@tonic-gate *attr |= PROT_EXEC; 25533446Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC) 25540Sstevel@tonic-gate *attr |= HAT_NOSYNC; 25550Sstevel@tonic-gate htable_release(ht); 25560Sstevel@tonic-gate return (0); 25570Sstevel@tonic-gate } 25580Sstevel@tonic-gate 25590Sstevel@tonic-gate /* 25600Sstevel@tonic-gate * hat_updateattr() applies the given attribute change to an existing mapping 25610Sstevel@tonic-gate */ 25620Sstevel@tonic-gate #define HAT_LOAD_ATTR 1 25630Sstevel@tonic-gate #define HAT_SET_ATTR 2 25640Sstevel@tonic-gate #define HAT_CLR_ATTR 3 25650Sstevel@tonic-gate 25660Sstevel@tonic-gate static void 25670Sstevel@tonic-gate hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what) 25680Sstevel@tonic-gate { 25690Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr; 25700Sstevel@tonic-gate uintptr_t eaddr = (uintptr_t)addr + len; 25710Sstevel@tonic-gate htable_t *ht = NULL; 25720Sstevel@tonic-gate uint_t entry; 25730Sstevel@tonic-gate x86pte_t oldpte, newpte; 25740Sstevel@tonic-gate page_t *pp; 25750Sstevel@tonic-gate 25765084Sjohnlev XPV_DISALLOW_MIGRATE(); 25770Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 25780Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 25790Sstevel@tonic-gate ASSERT(hat == kas.a_hat || 25800Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 25810Sstevel@tonic-gate for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { 25820Sstevel@tonic-gate try_again: 25830Sstevel@tonic-gate oldpte = htable_walk(hat, &ht, &vaddr, eaddr); 25840Sstevel@tonic-gate if (ht == NULL) 25850Sstevel@tonic-gate break; 25863446Smrj if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST) 25870Sstevel@tonic-gate continue; 25880Sstevel@tonic-gate 25890Sstevel@tonic-gate pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level)); 25900Sstevel@tonic-gate if (pp == NULL) 25910Sstevel@tonic-gate continue; 25920Sstevel@tonic-gate x86_hm_enter(pp); 25930Sstevel@tonic-gate 25940Sstevel@tonic-gate newpte = oldpte; 25950Sstevel@tonic-gate /* 25960Sstevel@tonic-gate * We found a page table entry in the desired range, 25970Sstevel@tonic-gate * figure out the new attributes. 25980Sstevel@tonic-gate */ 25990Sstevel@tonic-gate if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) { 26000Sstevel@tonic-gate if ((attr & PROT_WRITE) && 26010Sstevel@tonic-gate !PTE_GET(oldpte, PT_WRITABLE)) 26020Sstevel@tonic-gate newpte |= PT_WRITABLE; 26030Sstevel@tonic-gate 26043446Smrj if ((attr & HAT_NOSYNC) && 26053446Smrj PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC) 26060Sstevel@tonic-gate newpte |= PT_NOSYNC; 26070Sstevel@tonic-gate 26080Sstevel@tonic-gate if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx)) 26090Sstevel@tonic-gate newpte &= ~mmu.pt_nx; 26100Sstevel@tonic-gate } 26110Sstevel@tonic-gate 26120Sstevel@tonic-gate if (what == HAT_LOAD_ATTR) { 26130Sstevel@tonic-gate if (!(attr & PROT_WRITE) && 26140Sstevel@tonic-gate PTE_GET(oldpte, PT_WRITABLE)) 26150Sstevel@tonic-gate newpte &= ~PT_WRITABLE; 26160Sstevel@tonic-gate 26173446Smrj if (!(attr & HAT_NOSYNC) && 26183446Smrj PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC) 26193446Smrj newpte &= ~PT_SOFTWARE; 26200Sstevel@tonic-gate 26210Sstevel@tonic-gate if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) 26220Sstevel@tonic-gate newpte |= mmu.pt_nx; 26230Sstevel@tonic-gate } 26240Sstevel@tonic-gate 26250Sstevel@tonic-gate if (what == HAT_CLR_ATTR) { 26260Sstevel@tonic-gate if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE)) 26270Sstevel@tonic-gate newpte &= ~PT_WRITABLE; 26280Sstevel@tonic-gate 26293446Smrj if ((attr & HAT_NOSYNC) && 26303446Smrj PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC) 26313446Smrj newpte &= ~PT_SOFTWARE; 26320Sstevel@tonic-gate 26330Sstevel@tonic-gate if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) 26340Sstevel@tonic-gate newpte |= mmu.pt_nx; 26350Sstevel@tonic-gate } 26360Sstevel@tonic-gate 26370Sstevel@tonic-gate /* 26383446Smrj * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set. 26393446Smrj * x86pte_set() depends on this. 26403446Smrj */ 26413446Smrj if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC) 26423446Smrj newpte |= PT_REF | PT_MOD; 26433446Smrj 26443446Smrj /* 26450Sstevel@tonic-gate * what about PROT_READ or others? this code only handles: 26460Sstevel@tonic-gate * EXEC, WRITE, NOSYNC 26470Sstevel@tonic-gate */ 26480Sstevel@tonic-gate 26490Sstevel@tonic-gate /* 26500Sstevel@tonic-gate * If new PTE really changed, update the table. 26510Sstevel@tonic-gate */ 26520Sstevel@tonic-gate if (newpte != oldpte) { 26530Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht); 26540Sstevel@tonic-gate oldpte = hati_update_pte(ht, entry, oldpte, newpte); 26550Sstevel@tonic-gate if (oldpte != 0) { 26560Sstevel@tonic-gate x86_hm_exit(pp); 26570Sstevel@tonic-gate goto try_again; 26580Sstevel@tonic-gate } 26590Sstevel@tonic-gate } 26600Sstevel@tonic-gate x86_hm_exit(pp); 26610Sstevel@tonic-gate } 26620Sstevel@tonic-gate if (ht) 26630Sstevel@tonic-gate htable_release(ht); 26645084Sjohnlev XPV_ALLOW_MIGRATE(); 26650Sstevel@tonic-gate } 26660Sstevel@tonic-gate 26670Sstevel@tonic-gate /* 26680Sstevel@tonic-gate * Various wrappers for hat_updateattr() 26690Sstevel@tonic-gate */ 26700Sstevel@tonic-gate void 26710Sstevel@tonic-gate hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 26720Sstevel@tonic-gate { 26733446Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 26740Sstevel@tonic-gate hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR); 26750Sstevel@tonic-gate } 26760Sstevel@tonic-gate 26770Sstevel@tonic-gate void 26780Sstevel@tonic-gate hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 26790Sstevel@tonic-gate { 26803446Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 26810Sstevel@tonic-gate hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR); 26820Sstevel@tonic-gate } 26830Sstevel@tonic-gate 26840Sstevel@tonic-gate void 26850Sstevel@tonic-gate hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 26860Sstevel@tonic-gate { 26873446Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 26880Sstevel@tonic-gate hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR); 26890Sstevel@tonic-gate } 26900Sstevel@tonic-gate 26910Sstevel@tonic-gate void 26920Sstevel@tonic-gate hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot) 26930Sstevel@tonic-gate { 26943446Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 26950Sstevel@tonic-gate hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR); 26960Sstevel@tonic-gate } 26970Sstevel@tonic-gate 26980Sstevel@tonic-gate /* 26990Sstevel@tonic-gate * size_t hat_getpagesize(hat, addr) 27000Sstevel@tonic-gate * returns pagesize in bytes for <hat, addr>. returns -1 of there is 27010Sstevel@tonic-gate * no mapping. This is an advisory call. 27020Sstevel@tonic-gate */ 27030Sstevel@tonic-gate ssize_t 27040Sstevel@tonic-gate hat_getpagesize(hat_t *hat, caddr_t addr) 27050Sstevel@tonic-gate { 27060Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr); 27070Sstevel@tonic-gate htable_t *ht; 27080Sstevel@tonic-gate size_t pagesize; 27090Sstevel@tonic-gate 27103446Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 27110Sstevel@tonic-gate if (IN_VA_HOLE(vaddr)) 27120Sstevel@tonic-gate return (-1); 27130Sstevel@tonic-gate ht = htable_getpage(hat, vaddr, NULL); 27140Sstevel@tonic-gate if (ht == NULL) 27150Sstevel@tonic-gate return (-1); 27160Sstevel@tonic-gate pagesize = LEVEL_SIZE(ht->ht_level); 27170Sstevel@tonic-gate htable_release(ht); 27180Sstevel@tonic-gate return (pagesize); 27190Sstevel@tonic-gate } 27200Sstevel@tonic-gate 27210Sstevel@tonic-gate 27220Sstevel@tonic-gate 27230Sstevel@tonic-gate /* 27240Sstevel@tonic-gate * pfn_t hat_getpfnum(hat, addr) 27250Sstevel@tonic-gate * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid. 27260Sstevel@tonic-gate */ 27270Sstevel@tonic-gate pfn_t 27280Sstevel@tonic-gate hat_getpfnum(hat_t *hat, caddr_t addr) 27290Sstevel@tonic-gate { 27300Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr); 27310Sstevel@tonic-gate htable_t *ht; 27320Sstevel@tonic-gate uint_t entry; 27330Sstevel@tonic-gate pfn_t pfn = PFN_INVALID; 27340Sstevel@tonic-gate 27353446Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 27360Sstevel@tonic-gate if (khat_running == 0) 27373446Smrj return (PFN_INVALID); 27380Sstevel@tonic-gate 27390Sstevel@tonic-gate if (IN_VA_HOLE(vaddr)) 27400Sstevel@tonic-gate return (PFN_INVALID); 27410Sstevel@tonic-gate 27425084Sjohnlev XPV_DISALLOW_MIGRATE(); 27430Sstevel@tonic-gate /* 27440Sstevel@tonic-gate * A very common use of hat_getpfnum() is from the DDI for kernel pages. 27450Sstevel@tonic-gate * Use the kmap_ptes (which also covers the 32 bit heap) to speed 27460Sstevel@tonic-gate * this up. 27470Sstevel@tonic-gate */ 27480Sstevel@tonic-gate if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { 27490Sstevel@tonic-gate x86pte_t pte; 27503446Smrj pgcnt_t pg_index; 27513446Smrj 27523446Smrj pg_index = mmu_btop(vaddr - mmu.kmap_addr); 27533446Smrj pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index)); 27545084Sjohnlev if (PTE_ISVALID(pte)) 27555084Sjohnlev /*LINTED [use of constant 0 causes a lint warning] */ 27565084Sjohnlev pfn = PTE2PFN(pte, 0); 27575084Sjohnlev XPV_ALLOW_MIGRATE(); 27585084Sjohnlev return (pfn); 27590Sstevel@tonic-gate } 27600Sstevel@tonic-gate 27610Sstevel@tonic-gate ht = htable_getpage(hat, vaddr, &entry); 27625084Sjohnlev if (ht == NULL) { 27635084Sjohnlev XPV_ALLOW_MIGRATE(); 27640Sstevel@tonic-gate return (PFN_INVALID); 27655084Sjohnlev } 27660Sstevel@tonic-gate ASSERT(vaddr >= ht->ht_vaddr); 27670Sstevel@tonic-gate ASSERT(vaddr <= HTABLE_LAST_PAGE(ht)); 27680Sstevel@tonic-gate pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level); 27690Sstevel@tonic-gate if (ht->ht_level > 0) 27700Sstevel@tonic-gate pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level)); 27710Sstevel@tonic-gate htable_release(ht); 27725084Sjohnlev XPV_ALLOW_MIGRATE(); 27730Sstevel@tonic-gate return (pfn); 27740Sstevel@tonic-gate } 27750Sstevel@tonic-gate 27760Sstevel@tonic-gate /* 27770Sstevel@tonic-gate * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged. 27780Sstevel@tonic-gate * Use hat_getpfnum(kas.a_hat, ...) instead. 27790Sstevel@tonic-gate * 27800Sstevel@tonic-gate * We'd like to return PFN_INVALID if the mappings have underlying page_t's 27810Sstevel@tonic-gate * but can't right now due to the fact that some software has grown to use 27820Sstevel@tonic-gate * this interface incorrectly. So for now when the interface is misused, 27830Sstevel@tonic-gate * return a warning to the user that in the future it won't work in the 27840Sstevel@tonic-gate * way they're abusing it, and carry on. 27850Sstevel@tonic-gate * 27860Sstevel@tonic-gate * Note that hat_getkpfnum() is never supported on amd64. 27870Sstevel@tonic-gate */ 27880Sstevel@tonic-gate #if !defined(__amd64) 27890Sstevel@tonic-gate pfn_t 27900Sstevel@tonic-gate hat_getkpfnum(caddr_t addr) 27910Sstevel@tonic-gate { 27920Sstevel@tonic-gate pfn_t pfn; 27930Sstevel@tonic-gate int badcaller = 0; 27940Sstevel@tonic-gate 27950Sstevel@tonic-gate if (khat_running == 0) 27960Sstevel@tonic-gate panic("hat_getkpfnum(): called too early\n"); 27970Sstevel@tonic-gate if ((uintptr_t)addr < kernelbase) 27980Sstevel@tonic-gate return (PFN_INVALID); 27990Sstevel@tonic-gate 28005084Sjohnlev XPV_DISALLOW_MIGRATE(); 28010Sstevel@tonic-gate if (segkpm && IS_KPM_ADDR(addr)) { 28020Sstevel@tonic-gate badcaller = 1; 28030Sstevel@tonic-gate pfn = hat_kpm_va2pfn(addr); 28040Sstevel@tonic-gate } else { 28050Sstevel@tonic-gate pfn = hat_getpfnum(kas.a_hat, addr); 28060Sstevel@tonic-gate badcaller = pf_is_memory(pfn); 28070Sstevel@tonic-gate } 28080Sstevel@tonic-gate 28090Sstevel@tonic-gate if (badcaller) 28100Sstevel@tonic-gate hat_getkpfnum_badcall(caller()); 28115084Sjohnlev XPV_ALLOW_MIGRATE(); 28120Sstevel@tonic-gate return (pfn); 28130Sstevel@tonic-gate } 28140Sstevel@tonic-gate #endif /* __amd64 */ 28150Sstevel@tonic-gate 28160Sstevel@tonic-gate /* 28170Sstevel@tonic-gate * int hat_probe(hat, addr) 28180Sstevel@tonic-gate * return 0 if no valid mapping is present. Faster version 28190Sstevel@tonic-gate * of hat_getattr in certain architectures. 28200Sstevel@tonic-gate */ 28210Sstevel@tonic-gate int 28220Sstevel@tonic-gate hat_probe(hat_t *hat, caddr_t addr) 28230Sstevel@tonic-gate { 28240Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr); 28250Sstevel@tonic-gate uint_t entry; 28260Sstevel@tonic-gate htable_t *ht; 28270Sstevel@tonic-gate pgcnt_t pg_off; 28280Sstevel@tonic-gate 28293446Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 28300Sstevel@tonic-gate ASSERT(hat == kas.a_hat || 28310Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 28320Sstevel@tonic-gate if (IN_VA_HOLE(vaddr)) 28330Sstevel@tonic-gate return (0); 28340Sstevel@tonic-gate 28350Sstevel@tonic-gate /* 28360Sstevel@tonic-gate * Most common use of hat_probe is from segmap. We special case it 28370Sstevel@tonic-gate * for performance. 28380Sstevel@tonic-gate */ 28390Sstevel@tonic-gate if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { 28400Sstevel@tonic-gate pg_off = mmu_btop(vaddr - mmu.kmap_addr); 28410Sstevel@tonic-gate if (mmu.pae_hat) 28420Sstevel@tonic-gate return (PTE_ISVALID(mmu.kmap_ptes[pg_off])); 28430Sstevel@tonic-gate else 28440Sstevel@tonic-gate return (PTE_ISVALID( 28450Sstevel@tonic-gate ((x86pte32_t *)mmu.kmap_ptes)[pg_off])); 28460Sstevel@tonic-gate } 28470Sstevel@tonic-gate 28480Sstevel@tonic-gate ht = htable_getpage(hat, vaddr, &entry); 28490Sstevel@tonic-gate htable_release(ht); 28505084Sjohnlev return (ht != NULL); 28510Sstevel@tonic-gate } 28520Sstevel@tonic-gate 28530Sstevel@tonic-gate /* 28544381Sjosephb * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM. 28554381Sjosephb */ 28564381Sjosephb static int 28574381Sjosephb is_it_dism(hat_t *hat, caddr_t va) 28584381Sjosephb { 28594381Sjosephb struct seg *seg; 28604381Sjosephb struct shm_data *shmd; 28614381Sjosephb struct spt_data *sptd; 28624381Sjosephb 28634381Sjosephb seg = as_findseg(hat->hat_as, va, 0); 28644381Sjosephb ASSERT(seg != NULL); 28654381Sjosephb ASSERT(seg->s_base <= va); 28664381Sjosephb shmd = (struct shm_data *)seg->s_data; 28674381Sjosephb ASSERT(shmd != NULL); 28684381Sjosephb sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 28694381Sjosephb ASSERT(sptd != NULL); 28704381Sjosephb if (sptd->spt_flags & SHM_PAGEABLE) 28714381Sjosephb return (1); 28724381Sjosephb return (0); 28734381Sjosephb } 28744381Sjosephb 28754381Sjosephb /* 28764381Sjosephb * Simple implementation of ISM. hat_share() is similar to hat_memload_array(), 28770Sstevel@tonic-gate * except that we use the ism_hat's existing mappings to determine the pages 28784381Sjosephb * and protections to use for this hat. If we find a full properly aligned 28794381Sjosephb * and sized pagetable, we will attempt to share the pagetable itself. 28800Sstevel@tonic-gate */ 28810Sstevel@tonic-gate /*ARGSUSED*/ 28820Sstevel@tonic-gate int 28830Sstevel@tonic-gate hat_share( 28840Sstevel@tonic-gate hat_t *hat, 28850Sstevel@tonic-gate caddr_t addr, 28860Sstevel@tonic-gate hat_t *ism_hat, 28870Sstevel@tonic-gate caddr_t src_addr, 28880Sstevel@tonic-gate size_t len, /* almost useless value, see below.. */ 28890Sstevel@tonic-gate uint_t ismszc) 28900Sstevel@tonic-gate { 28910Sstevel@tonic-gate uintptr_t vaddr_start = (uintptr_t)addr; 28920Sstevel@tonic-gate uintptr_t vaddr; 28930Sstevel@tonic-gate uintptr_t eaddr = vaddr_start + len; 28940Sstevel@tonic-gate uintptr_t ism_addr_start = (uintptr_t)src_addr; 28950Sstevel@tonic-gate uintptr_t ism_addr = ism_addr_start; 28960Sstevel@tonic-gate uintptr_t e_ism_addr = ism_addr + len; 28970Sstevel@tonic-gate htable_t *ism_ht = NULL; 28980Sstevel@tonic-gate htable_t *ht; 28990Sstevel@tonic-gate x86pte_t pte; 29000Sstevel@tonic-gate page_t *pp; 29010Sstevel@tonic-gate pfn_t pfn; 29020Sstevel@tonic-gate level_t l; 29030Sstevel@tonic-gate pgcnt_t pgcnt; 29040Sstevel@tonic-gate uint_t prot; 29054381Sjosephb int is_dism; 29064381Sjosephb int flags; 29070Sstevel@tonic-gate 29080Sstevel@tonic-gate /* 29090Sstevel@tonic-gate * We might be asked to share an empty DISM hat by as_dup() 29100Sstevel@tonic-gate */ 29110Sstevel@tonic-gate ASSERT(hat != kas.a_hat); 29123446Smrj ASSERT(eaddr <= _userlimit); 29130Sstevel@tonic-gate if (!(ism_hat->hat_flags & HAT_SHARED)) { 29140Sstevel@tonic-gate ASSERT(hat_get_mapped_size(ism_hat) == 0); 29150Sstevel@tonic-gate return (0); 29160Sstevel@tonic-gate } 29175084Sjohnlev XPV_DISALLOW_MIGRATE(); 29180Sstevel@tonic-gate 29190Sstevel@tonic-gate /* 29200Sstevel@tonic-gate * The SPT segment driver often passes us a size larger than there are 29210Sstevel@tonic-gate * valid mappings. That's because it rounds the segment size up to a 29220Sstevel@tonic-gate * large pagesize, even if the actual memory mapped by ism_hat is less. 29230Sstevel@tonic-gate */ 29240Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr_start)); 29250Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(ism_addr_start)); 29260Sstevel@tonic-gate ASSERT(ism_hat->hat_flags & HAT_SHARED); 29274381Sjosephb is_dism = is_it_dism(hat, addr); 29280Sstevel@tonic-gate while (ism_addr < e_ism_addr) { 29290Sstevel@tonic-gate /* 29300Sstevel@tonic-gate * use htable_walk to get the next valid ISM mapping 29310Sstevel@tonic-gate */ 29320Sstevel@tonic-gate pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr); 29330Sstevel@tonic-gate if (ism_ht == NULL) 29340Sstevel@tonic-gate break; 29350Sstevel@tonic-gate 29360Sstevel@tonic-gate /* 29374381Sjosephb * First check to see if we already share the page table. 29384381Sjosephb */ 29394381Sjosephb l = ism_ht->ht_level; 29404381Sjosephb vaddr = vaddr_start + (ism_addr - ism_addr_start); 29414381Sjosephb ht = htable_lookup(hat, vaddr, l); 29424381Sjosephb if (ht != NULL) { 29434381Sjosephb if (ht->ht_flags & HTABLE_SHARED_PFN) 29444381Sjosephb goto shared; 29454381Sjosephb htable_release(ht); 29464381Sjosephb goto not_shared; 29474381Sjosephb } 29484381Sjosephb 29494381Sjosephb /* 29504381Sjosephb * Can't ever share top table. 29514381Sjosephb */ 29524381Sjosephb if (l == mmu.max_level) 29534381Sjosephb goto not_shared; 29544381Sjosephb 29554381Sjosephb /* 29564381Sjosephb * Avoid level mismatches later due to DISM faults. 29574381Sjosephb */ 29584381Sjosephb if (is_dism && l > 0) 29594381Sjosephb goto not_shared; 29604381Sjosephb 29614381Sjosephb /* 29624381Sjosephb * addresses and lengths must align 29634381Sjosephb * table must be fully populated 29644381Sjosephb * no lower level page tables 29654381Sjosephb */ 29664381Sjosephb if (ism_addr != ism_ht->ht_vaddr || 29674381Sjosephb (vaddr & LEVEL_OFFSET(l + 1)) != 0) 29684381Sjosephb goto not_shared; 29694381Sjosephb 29704381Sjosephb /* 29714381Sjosephb * The range of address space must cover a full table. 29720Sstevel@tonic-gate */ 29735159Sjohnlev if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1)) 29744381Sjosephb goto not_shared; 29754381Sjosephb 29764381Sjosephb /* 29774381Sjosephb * All entries in the ISM page table must be leaf PTEs. 29784381Sjosephb */ 29794381Sjosephb if (l > 0) { 29804381Sjosephb int e; 29814381Sjosephb 29824381Sjosephb /* 29834381Sjosephb * We know the 0th is from htable_walk() above. 29844381Sjosephb */ 29854381Sjosephb for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) { 29864381Sjosephb x86pte_t pte; 29874381Sjosephb pte = x86pte_get(ism_ht, e); 29884381Sjosephb if (!PTE_ISPAGE(pte, l)) 29894381Sjosephb goto not_shared; 29904381Sjosephb } 29914381Sjosephb } 29924381Sjosephb 29934381Sjosephb /* 29944381Sjosephb * share the page table 29954381Sjosephb */ 29964381Sjosephb ht = htable_create(hat, vaddr, l, ism_ht); 29974381Sjosephb shared: 29984381Sjosephb ASSERT(ht->ht_flags & HTABLE_SHARED_PFN); 29994381Sjosephb ASSERT(ht->ht_shares == ism_ht); 30004381Sjosephb hat->hat_ism_pgcnt += 30014381Sjosephb (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) << 30024381Sjosephb (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT); 30034381Sjosephb ht->ht_valid_cnt = ism_ht->ht_valid_cnt; 30044381Sjosephb htable_release(ht); 30054381Sjosephb ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1); 30064381Sjosephb htable_release(ism_ht); 30074381Sjosephb ism_ht = NULL; 30084381Sjosephb continue; 30094381Sjosephb 30104381Sjosephb not_shared: 30114381Sjosephb /* 30124381Sjosephb * Unable to share the page table. Instead we will 30134381Sjosephb * create new mappings from the values in the ISM mappings. 30144381Sjosephb * Figure out what level size mappings to use; 30154381Sjosephb */ 30160Sstevel@tonic-gate for (l = ism_ht->ht_level; l > 0; --l) { 30170Sstevel@tonic-gate if (LEVEL_SIZE(l) <= eaddr - vaddr && 30180Sstevel@tonic-gate (vaddr & LEVEL_OFFSET(l)) == 0) 30190Sstevel@tonic-gate break; 30200Sstevel@tonic-gate } 30210Sstevel@tonic-gate 30220Sstevel@tonic-gate /* 30230Sstevel@tonic-gate * The ISM mapping might be larger than the share area, 30244381Sjosephb * be careful to truncate it if needed. 30250Sstevel@tonic-gate */ 30260Sstevel@tonic-gate if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) { 30270Sstevel@tonic-gate pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level)); 30280Sstevel@tonic-gate } else { 30290Sstevel@tonic-gate pgcnt = mmu_btop(eaddr - vaddr); 30300Sstevel@tonic-gate l = 0; 30310Sstevel@tonic-gate } 30320Sstevel@tonic-gate 30330Sstevel@tonic-gate pfn = PTE2PFN(pte, ism_ht->ht_level); 30340Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 30350Sstevel@tonic-gate while (pgcnt > 0) { 30360Sstevel@tonic-gate /* 30370Sstevel@tonic-gate * Make a new pte for the PFN for this level. 30380Sstevel@tonic-gate * Copy protections for the pte from the ISM pte. 30390Sstevel@tonic-gate */ 30400Sstevel@tonic-gate pp = page_numtopp_nolock(pfn); 30410Sstevel@tonic-gate ASSERT(pp != NULL); 30420Sstevel@tonic-gate 30430Sstevel@tonic-gate prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK; 30440Sstevel@tonic-gate if (PTE_GET(pte, PT_WRITABLE)) 30450Sstevel@tonic-gate prot |= PROT_WRITE; 30460Sstevel@tonic-gate if (!PTE_GET(pte, PT_NX)) 30470Sstevel@tonic-gate prot |= PROT_EXEC; 30480Sstevel@tonic-gate 30494381Sjosephb flags = HAT_LOAD; 30504381Sjosephb if (!is_dism) 30514381Sjosephb flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST; 30524381Sjosephb while (hati_load_common(hat, vaddr, pp, prot, flags, 30533446Smrj l, pfn) != 0) { 30543446Smrj if (l == 0) 30553446Smrj panic("hati_load_common() failure"); 30563446Smrj --l; 30573446Smrj } 30580Sstevel@tonic-gate 30590Sstevel@tonic-gate vaddr += LEVEL_SIZE(l); 30600Sstevel@tonic-gate ism_addr += LEVEL_SIZE(l); 30610Sstevel@tonic-gate pfn += mmu_btop(LEVEL_SIZE(l)); 30620Sstevel@tonic-gate pgcnt -= mmu_btop(LEVEL_SIZE(l)); 30630Sstevel@tonic-gate } 30640Sstevel@tonic-gate } 30650Sstevel@tonic-gate if (ism_ht != NULL) 30660Sstevel@tonic-gate htable_release(ism_ht); 30675084Sjohnlev XPV_ALLOW_MIGRATE(); 30680Sstevel@tonic-gate return (0); 30690Sstevel@tonic-gate } 30700Sstevel@tonic-gate 30710Sstevel@tonic-gate 30720Sstevel@tonic-gate /* 30730Sstevel@tonic-gate * hat_unshare() is similar to hat_unload_callback(), but 30740Sstevel@tonic-gate * we have to look for empty shared pagetables. Note that 30750Sstevel@tonic-gate * hat_unshare() is always invoked against an entire segment. 30760Sstevel@tonic-gate */ 30770Sstevel@tonic-gate /*ARGSUSED*/ 30780Sstevel@tonic-gate void 30790Sstevel@tonic-gate hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc) 30800Sstevel@tonic-gate { 30814654Sjosephb uint64_t vaddr = (uintptr_t)addr; 30820Sstevel@tonic-gate uintptr_t eaddr = vaddr + len; 30830Sstevel@tonic-gate htable_t *ht = NULL; 30840Sstevel@tonic-gate uint_t need_demaps = 0; 30854381Sjosephb int flags = HAT_UNLOAD_UNMAP; 30864381Sjosephb level_t l; 30870Sstevel@tonic-gate 30880Sstevel@tonic-gate ASSERT(hat != kas.a_hat); 30893446Smrj ASSERT(eaddr <= _userlimit); 30900Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 30910Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 30925084Sjohnlev XPV_DISALLOW_MIGRATE(); 30930Sstevel@tonic-gate 30940Sstevel@tonic-gate /* 30950Sstevel@tonic-gate * First go through and remove any shared pagetables. 30960Sstevel@tonic-gate * 30973446Smrj * Note that it's ok to delay the TLB shootdown till the entire range is 30980Sstevel@tonic-gate * finished, because if hat_pageunload() were to unload a shared 30993446Smrj * pagetable page, its hat_tlb_inval() will do a global TLB invalidate. 31000Sstevel@tonic-gate */ 31014381Sjosephb l = mmu.max_page_level; 31024381Sjosephb if (l == mmu.max_level) 31034381Sjosephb --l; 31044381Sjosephb for (; l >= 0; --l) { 31054381Sjosephb for (vaddr = (uintptr_t)addr; vaddr < eaddr; 31064381Sjosephb vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) { 31074381Sjosephb ASSERT(!IN_VA_HOLE(vaddr)); 31084381Sjosephb /* 31094381Sjosephb * find a pagetable that maps the current address 31104381Sjosephb */ 31114381Sjosephb ht = htable_lookup(hat, vaddr, l); 31124381Sjosephb if (ht == NULL) 31134381Sjosephb continue; 31140Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 31150Sstevel@tonic-gate /* 31164381Sjosephb * clear page count, set valid_cnt to 0, 31174381Sjosephb * let htable_release() finish the job 31180Sstevel@tonic-gate */ 31194381Sjosephb hat->hat_ism_pgcnt -= ht->ht_valid_cnt << 31204381Sjosephb (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT); 31210Sstevel@tonic-gate ht->ht_valid_cnt = 0; 31220Sstevel@tonic-gate need_demaps = 1; 31230Sstevel@tonic-gate } 31240Sstevel@tonic-gate htable_release(ht); 31250Sstevel@tonic-gate } 31260Sstevel@tonic-gate } 31270Sstevel@tonic-gate 31280Sstevel@tonic-gate /* 31290Sstevel@tonic-gate * flush the TLBs - since we're probably dealing with MANY mappings 31300Sstevel@tonic-gate * we do just one CR3 reload. 31310Sstevel@tonic-gate */ 31320Sstevel@tonic-gate if (!(hat->hat_flags & HAT_FREEING) && need_demaps) 31333446Smrj hat_tlb_inval(hat, DEMAP_ALL_ADDR); 31340Sstevel@tonic-gate 31350Sstevel@tonic-gate /* 31360Sstevel@tonic-gate * Now go back and clean up any unaligned mappings that 31370Sstevel@tonic-gate * couldn't share pagetables. 31380Sstevel@tonic-gate */ 31394381Sjosephb if (!is_it_dism(hat, addr)) 31404381Sjosephb flags |= HAT_UNLOAD_UNLOCK; 31414381Sjosephb hat_unload(hat, addr, len, flags); 31425084Sjohnlev XPV_ALLOW_MIGRATE(); 31430Sstevel@tonic-gate } 31440Sstevel@tonic-gate 31450Sstevel@tonic-gate 31460Sstevel@tonic-gate /* 31470Sstevel@tonic-gate * hat_reserve() does nothing 31480Sstevel@tonic-gate */ 31490Sstevel@tonic-gate /*ARGSUSED*/ 31500Sstevel@tonic-gate void 31510Sstevel@tonic-gate hat_reserve(struct as *as, caddr_t addr, size_t len) 31520Sstevel@tonic-gate { 31530Sstevel@tonic-gate } 31540Sstevel@tonic-gate 31550Sstevel@tonic-gate 31560Sstevel@tonic-gate /* 31570Sstevel@tonic-gate * Called when all mappings to a page should have write permission removed. 31580Sstevel@tonic-gate * Mostly stolem from hat_pagesync() 31590Sstevel@tonic-gate */ 31600Sstevel@tonic-gate static void 31610Sstevel@tonic-gate hati_page_clrwrt(struct page *pp) 31620Sstevel@tonic-gate { 31630Sstevel@tonic-gate hment_t *hm = NULL; 31640Sstevel@tonic-gate htable_t *ht; 31650Sstevel@tonic-gate uint_t entry; 31660Sstevel@tonic-gate x86pte_t old; 31670Sstevel@tonic-gate x86pte_t new; 31680Sstevel@tonic-gate uint_t pszc = 0; 31690Sstevel@tonic-gate 31705084Sjohnlev XPV_DISALLOW_MIGRATE(); 31710Sstevel@tonic-gate next_size: 31720Sstevel@tonic-gate /* 31730Sstevel@tonic-gate * walk thru the mapping list clearing write permission 31740Sstevel@tonic-gate */ 31750Sstevel@tonic-gate x86_hm_enter(pp); 31760Sstevel@tonic-gate while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) { 31770Sstevel@tonic-gate if (ht->ht_level < pszc) 31780Sstevel@tonic-gate continue; 31790Sstevel@tonic-gate old = x86pte_get(ht, entry); 31800Sstevel@tonic-gate 31810Sstevel@tonic-gate for (;;) { 31820Sstevel@tonic-gate /* 31830Sstevel@tonic-gate * Is this mapping of interest? 31840Sstevel@tonic-gate */ 31850Sstevel@tonic-gate if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum || 31860Sstevel@tonic-gate PTE_GET(old, PT_WRITABLE) == 0) 31870Sstevel@tonic-gate break; 31880Sstevel@tonic-gate 31890Sstevel@tonic-gate /* 31900Sstevel@tonic-gate * Clear ref/mod writable bits. This requires cross 31910Sstevel@tonic-gate * calls to ensure any executing TLBs see cleared bits. 31920Sstevel@tonic-gate */ 31930Sstevel@tonic-gate new = old; 31940Sstevel@tonic-gate PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE); 31950Sstevel@tonic-gate old = hati_update_pte(ht, entry, old, new); 31960Sstevel@tonic-gate if (old != 0) 31970Sstevel@tonic-gate continue; 31980Sstevel@tonic-gate 31990Sstevel@tonic-gate break; 32000Sstevel@tonic-gate } 32010Sstevel@tonic-gate } 32020Sstevel@tonic-gate x86_hm_exit(pp); 32030Sstevel@tonic-gate while (pszc < pp->p_szc) { 32040Sstevel@tonic-gate page_t *tpp; 32050Sstevel@tonic-gate pszc++; 32060Sstevel@tonic-gate tpp = PP_GROUPLEADER(pp, pszc); 32070Sstevel@tonic-gate if (pp != tpp) { 32080Sstevel@tonic-gate pp = tpp; 32090Sstevel@tonic-gate goto next_size; 32100Sstevel@tonic-gate } 32110Sstevel@tonic-gate } 32125084Sjohnlev XPV_ALLOW_MIGRATE(); 32130Sstevel@tonic-gate } 32140Sstevel@tonic-gate 32150Sstevel@tonic-gate /* 32160Sstevel@tonic-gate * void hat_page_setattr(pp, flag) 32170Sstevel@tonic-gate * void hat_page_clrattr(pp, flag) 32180Sstevel@tonic-gate * used to set/clr ref/mod bits. 32190Sstevel@tonic-gate */ 32200Sstevel@tonic-gate void 32210Sstevel@tonic-gate hat_page_setattr(struct page *pp, uint_t flag) 32220Sstevel@tonic-gate { 32230Sstevel@tonic-gate vnode_t *vp = pp->p_vnode; 32240Sstevel@tonic-gate kmutex_t *vphm = NULL; 32250Sstevel@tonic-gate page_t **listp; 32264324Sqiao int noshuffle; 32274324Sqiao 32284324Sqiao noshuffle = flag & P_NSH; 32294324Sqiao flag &= ~P_NSH; 32300Sstevel@tonic-gate 32310Sstevel@tonic-gate if (PP_GETRM(pp, flag) == flag) 32320Sstevel@tonic-gate return; 32330Sstevel@tonic-gate 32344324Sqiao if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) && 32354324Sqiao !noshuffle) { 32360Sstevel@tonic-gate vphm = page_vnode_mutex(vp); 32370Sstevel@tonic-gate mutex_enter(vphm); 32380Sstevel@tonic-gate } 32390Sstevel@tonic-gate 32400Sstevel@tonic-gate PP_SETRM(pp, flag); 32410Sstevel@tonic-gate 32420Sstevel@tonic-gate if (vphm != NULL) { 32430Sstevel@tonic-gate 32440Sstevel@tonic-gate /* 32450Sstevel@tonic-gate * Some File Systems examine v_pages for NULL w/o 32460Sstevel@tonic-gate * grabbing the vphm mutex. Must not let it become NULL when 32470Sstevel@tonic-gate * pp is the only page on the list. 32480Sstevel@tonic-gate */ 32490Sstevel@tonic-gate if (pp->p_vpnext != pp) { 32500Sstevel@tonic-gate page_vpsub(&vp->v_pages, pp); 32510Sstevel@tonic-gate if (vp->v_pages != NULL) 32520Sstevel@tonic-gate listp = &vp->v_pages->p_vpprev->p_vpnext; 32530Sstevel@tonic-gate else 32540Sstevel@tonic-gate listp = &vp->v_pages; 32550Sstevel@tonic-gate page_vpadd(listp, pp); 32560Sstevel@tonic-gate } 32570Sstevel@tonic-gate mutex_exit(vphm); 32580Sstevel@tonic-gate } 32590Sstevel@tonic-gate } 32600Sstevel@tonic-gate 32610Sstevel@tonic-gate void 32620Sstevel@tonic-gate hat_page_clrattr(struct page *pp, uint_t flag) 32630Sstevel@tonic-gate { 32640Sstevel@tonic-gate vnode_t *vp = pp->p_vnode; 32650Sstevel@tonic-gate ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 32660Sstevel@tonic-gate 32670Sstevel@tonic-gate /* 32682999Sstans * Caller is expected to hold page's io lock for VMODSORT to work 32692999Sstans * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 32702999Sstans * bit is cleared. 32712999Sstans * We don't have assert to avoid tripping some existing third party 32722999Sstans * code. The dirty page is moved back to top of the v_page list 32732999Sstans * after IO is done in pvn_write_done(). 32740Sstevel@tonic-gate */ 32750Sstevel@tonic-gate PP_CLRRM(pp, flag); 32760Sstevel@tonic-gate 32772999Sstans if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 32780Sstevel@tonic-gate 32790Sstevel@tonic-gate /* 32800Sstevel@tonic-gate * VMODSORT works by removing write permissions and getting 32810Sstevel@tonic-gate * a fault when a page is made dirty. At this point 32820Sstevel@tonic-gate * we need to remove write permission from all mappings 32830Sstevel@tonic-gate * to this page. 32840Sstevel@tonic-gate */ 32850Sstevel@tonic-gate hati_page_clrwrt(pp); 32860Sstevel@tonic-gate } 32870Sstevel@tonic-gate } 32880Sstevel@tonic-gate 32890Sstevel@tonic-gate /* 32900Sstevel@tonic-gate * If flag is specified, returns 0 if attribute is disabled 32910Sstevel@tonic-gate * and non zero if enabled. If flag specifes multiple attributs 32920Sstevel@tonic-gate * then returns 0 if ALL atriibutes are disabled. This is an advisory 32930Sstevel@tonic-gate * call. 32940Sstevel@tonic-gate */ 32950Sstevel@tonic-gate uint_t 32960Sstevel@tonic-gate hat_page_getattr(struct page *pp, uint_t flag) 32970Sstevel@tonic-gate { 32980Sstevel@tonic-gate return (PP_GETRM(pp, flag)); 32990Sstevel@tonic-gate } 33000Sstevel@tonic-gate 33010Sstevel@tonic-gate 33020Sstevel@tonic-gate /* 33030Sstevel@tonic-gate * common code used by hat_pageunload() and hment_steal() 33040Sstevel@tonic-gate */ 33050Sstevel@tonic-gate hment_t * 33060Sstevel@tonic-gate hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry) 33070Sstevel@tonic-gate { 33080Sstevel@tonic-gate x86pte_t old_pte; 33090Sstevel@tonic-gate pfn_t pfn = pp->p_pagenum; 33100Sstevel@tonic-gate hment_t *hm; 33110Sstevel@tonic-gate 33120Sstevel@tonic-gate /* 33130Sstevel@tonic-gate * We need to acquire a hold on the htable in order to 33140Sstevel@tonic-gate * do the invalidate. We know the htable must exist, since 33150Sstevel@tonic-gate * unmap's don't release the htable until after removing any 33160Sstevel@tonic-gate * hment. Having x86_hm_enter() keeps that from proceeding. 33170Sstevel@tonic-gate */ 33180Sstevel@tonic-gate htable_acquire(ht); 33190Sstevel@tonic-gate 33200Sstevel@tonic-gate /* 33210Sstevel@tonic-gate * Invalidate the PTE and remove the hment. 33220Sstevel@tonic-gate */ 33233446Smrj old_pte = x86pte_inval(ht, entry, 0, NULL); 332447Sjosephb if (PTE2PFN(old_pte, ht->ht_level) != pfn) { 33253446Smrj panic("x86pte_inval() failure found PTE = " FMT_PTE 332647Sjosephb " pfn being unmapped is %lx ht=0x%lx entry=0x%x", 332747Sjosephb old_pte, pfn, (uintptr_t)ht, entry); 332847Sjosephb } 33290Sstevel@tonic-gate 33300Sstevel@tonic-gate /* 33310Sstevel@tonic-gate * Clean up all the htable information for this mapping 33320Sstevel@tonic-gate */ 33330Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0); 33340Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt); 33350Sstevel@tonic-gate PGCNT_DEC(ht->ht_hat, ht->ht_level); 33360Sstevel@tonic-gate 33370Sstevel@tonic-gate /* 33380Sstevel@tonic-gate * sync ref/mod bits to the page_t 33390Sstevel@tonic-gate */ 33403446Smrj if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC) 33410Sstevel@tonic-gate hati_sync_pte_to_page(pp, old_pte, ht->ht_level); 33420Sstevel@tonic-gate 33430Sstevel@tonic-gate /* 33440Sstevel@tonic-gate * Remove the mapping list entry for this page. 33450Sstevel@tonic-gate */ 33460Sstevel@tonic-gate hm = hment_remove(pp, ht, entry); 33470Sstevel@tonic-gate 33480Sstevel@tonic-gate /* 33490Sstevel@tonic-gate * drop the mapping list lock so that we might free the 33500Sstevel@tonic-gate * hment and htable. 33510Sstevel@tonic-gate */ 33520Sstevel@tonic-gate x86_hm_exit(pp); 33530Sstevel@tonic-gate htable_release(ht); 33540Sstevel@tonic-gate return (hm); 33550Sstevel@tonic-gate } 33560Sstevel@tonic-gate 33571841Spraks extern int vpm_enable; 33580Sstevel@tonic-gate /* 33590Sstevel@tonic-gate * Unload all translations to a page. If the page is a subpage of a large 33600Sstevel@tonic-gate * page, the large page mappings are also removed. 33610Sstevel@tonic-gate * 33620Sstevel@tonic-gate * The forceflags are unused. 33630Sstevel@tonic-gate */ 33640Sstevel@tonic-gate 33650Sstevel@tonic-gate /*ARGSUSED*/ 33660Sstevel@tonic-gate static int 33670Sstevel@tonic-gate hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag) 33680Sstevel@tonic-gate { 33690Sstevel@tonic-gate page_t *cur_pp = pp; 33700Sstevel@tonic-gate hment_t *hm; 33710Sstevel@tonic-gate hment_t *prev; 33720Sstevel@tonic-gate htable_t *ht; 33730Sstevel@tonic-gate uint_t entry; 33740Sstevel@tonic-gate level_t level; 33750Sstevel@tonic-gate 33765084Sjohnlev XPV_DISALLOW_MIGRATE(); 33771841Spraks #if defined(__amd64) 33781841Spraks /* 33791841Spraks * clear the vpm ref. 33801841Spraks */ 33811841Spraks if (vpm_enable) { 33821841Spraks pp->p_vpmref = 0; 33831841Spraks } 33841841Spraks #endif 33850Sstevel@tonic-gate /* 33860Sstevel@tonic-gate * The loop with next_size handles pages with multiple pagesize mappings 33870Sstevel@tonic-gate */ 33880Sstevel@tonic-gate next_size: 33890Sstevel@tonic-gate for (;;) { 33900Sstevel@tonic-gate 33910Sstevel@tonic-gate /* 33920Sstevel@tonic-gate * Get a mapping list entry 33930Sstevel@tonic-gate */ 33940Sstevel@tonic-gate x86_hm_enter(cur_pp); 33950Sstevel@tonic-gate for (prev = NULL; ; prev = hm) { 33960Sstevel@tonic-gate hm = hment_walk(cur_pp, &ht, &entry, prev); 33970Sstevel@tonic-gate if (hm == NULL) { 33980Sstevel@tonic-gate x86_hm_exit(cur_pp); 33990Sstevel@tonic-gate 34000Sstevel@tonic-gate /* 34010Sstevel@tonic-gate * If not part of a larger page, we're done. 34020Sstevel@tonic-gate */ 34033446Smrj if (cur_pp->p_szc <= pg_szcd) { 34045084Sjohnlev XPV_ALLOW_MIGRATE(); 34050Sstevel@tonic-gate return (0); 34063446Smrj } 34070Sstevel@tonic-gate 34080Sstevel@tonic-gate /* 34090Sstevel@tonic-gate * Else check the next larger page size. 34100Sstevel@tonic-gate * hat_page_demote() may decrease p_szc 34110Sstevel@tonic-gate * but that's ok we'll just take an extra 34120Sstevel@tonic-gate * trip discover there're no larger mappings 34130Sstevel@tonic-gate * and return. 34140Sstevel@tonic-gate */ 34150Sstevel@tonic-gate ++pg_szcd; 34160Sstevel@tonic-gate cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd); 34170Sstevel@tonic-gate goto next_size; 34180Sstevel@tonic-gate } 34190Sstevel@tonic-gate 34200Sstevel@tonic-gate /* 34210Sstevel@tonic-gate * If this mapping size matches, remove it. 34220Sstevel@tonic-gate */ 34230Sstevel@tonic-gate level = ht->ht_level; 34240Sstevel@tonic-gate if (level == pg_szcd) 34250Sstevel@tonic-gate break; 34260Sstevel@tonic-gate } 34270Sstevel@tonic-gate 34280Sstevel@tonic-gate /* 34290Sstevel@tonic-gate * Remove the mapping list entry for this page. 34300Sstevel@tonic-gate * Note this does the x86_hm_exit() for us. 34310Sstevel@tonic-gate */ 34320Sstevel@tonic-gate hm = hati_page_unmap(cur_pp, ht, entry); 34330Sstevel@tonic-gate if (hm != NULL) 34340Sstevel@tonic-gate hment_free(hm); 34350Sstevel@tonic-gate } 34360Sstevel@tonic-gate } 34370Sstevel@tonic-gate 34380Sstevel@tonic-gate int 34390Sstevel@tonic-gate hat_pageunload(struct page *pp, uint_t forceflag) 34400Sstevel@tonic-gate { 34410Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 34420Sstevel@tonic-gate return (hati_pageunload(pp, 0, forceflag)); 34430Sstevel@tonic-gate } 34440Sstevel@tonic-gate 34450Sstevel@tonic-gate /* 34460Sstevel@tonic-gate * Unload all large mappings to pp and reduce by 1 p_szc field of every large 34470Sstevel@tonic-gate * page level that included pp. 34480Sstevel@tonic-gate * 34490Sstevel@tonic-gate * pp must be locked EXCL. Even though no other constituent pages are locked 34500Sstevel@tonic-gate * it's legal to unload large mappings to pp because all constituent pages of 34510Sstevel@tonic-gate * large locked mappings have to be locked SHARED. therefore if we have EXCL 34520Sstevel@tonic-gate * lock on one of constituent pages none of the large mappings to pp are 34530Sstevel@tonic-gate * locked. 34540Sstevel@tonic-gate * 34550Sstevel@tonic-gate * Change (always decrease) p_szc field starting from the last constituent 34560Sstevel@tonic-gate * page and ending with root constituent page so that root's pszc always shows 34570Sstevel@tonic-gate * the area where hat_page_demote() may be active. 34580Sstevel@tonic-gate * 34590Sstevel@tonic-gate * This mechanism is only used for file system pages where it's not always 34600Sstevel@tonic-gate * possible to get EXCL locks on all constituent pages to demote the size code 34610Sstevel@tonic-gate * (as is done for anonymous or kernel large pages). 34620Sstevel@tonic-gate */ 34630Sstevel@tonic-gate void 34640Sstevel@tonic-gate hat_page_demote(page_t *pp) 34650Sstevel@tonic-gate { 34660Sstevel@tonic-gate uint_t pszc; 34670Sstevel@tonic-gate uint_t rszc; 34680Sstevel@tonic-gate uint_t szc; 34690Sstevel@tonic-gate page_t *rootpp; 34700Sstevel@tonic-gate page_t *firstpp; 34710Sstevel@tonic-gate page_t *lastpp; 34720Sstevel@tonic-gate pgcnt_t pgcnt; 34730Sstevel@tonic-gate 34740Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 34750Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 34760Sstevel@tonic-gate ASSERT(page_szc_lock_assert(pp)); 34770Sstevel@tonic-gate 34780Sstevel@tonic-gate if (pp->p_szc == 0) 34790Sstevel@tonic-gate return; 34800Sstevel@tonic-gate 34810Sstevel@tonic-gate rootpp = PP_GROUPLEADER(pp, 1); 34820Sstevel@tonic-gate (void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD); 34830Sstevel@tonic-gate 34840Sstevel@tonic-gate /* 34850Sstevel@tonic-gate * all large mappings to pp are gone 34860Sstevel@tonic-gate * and no new can be setup since pp is locked exclusively. 34870Sstevel@tonic-gate * 34880Sstevel@tonic-gate * Lock the root to make sure there's only one hat_page_demote() 34890Sstevel@tonic-gate * outstanding within the area of this root's pszc. 34900Sstevel@tonic-gate * 34910Sstevel@tonic-gate * Second potential hat_page_demote() is already eliminated by upper 34920Sstevel@tonic-gate * VM layer via page_szc_lock() but we don't rely on it and use our 34930Sstevel@tonic-gate * own locking (so that upper layer locking can be changed without 34940Sstevel@tonic-gate * assumptions that hat depends on upper layer VM to prevent multiple 34950Sstevel@tonic-gate * hat_page_demote() to be issued simultaneously to the same large 34960Sstevel@tonic-gate * page). 34970Sstevel@tonic-gate */ 34980Sstevel@tonic-gate again: 34990Sstevel@tonic-gate pszc = pp->p_szc; 35000Sstevel@tonic-gate if (pszc == 0) 35010Sstevel@tonic-gate return; 35020Sstevel@tonic-gate rootpp = PP_GROUPLEADER(pp, pszc); 35030Sstevel@tonic-gate x86_hm_enter(rootpp); 35040Sstevel@tonic-gate /* 35050Sstevel@tonic-gate * If root's p_szc is different from pszc we raced with another 35060Sstevel@tonic-gate * hat_page_demote(). Drop the lock and try to find the root again. 35070Sstevel@tonic-gate * If root's p_szc is greater than pszc previous hat_page_demote() is 35080Sstevel@tonic-gate * not done yet. Take and release mlist lock of root's root to wait 35090Sstevel@tonic-gate * for previous hat_page_demote() to complete. 35100Sstevel@tonic-gate */ 35110Sstevel@tonic-gate if ((rszc = rootpp->p_szc) != pszc) { 35120Sstevel@tonic-gate x86_hm_exit(rootpp); 35130Sstevel@tonic-gate if (rszc > pszc) { 35140Sstevel@tonic-gate /* p_szc of a locked non free page can't increase */ 35150Sstevel@tonic-gate ASSERT(pp != rootpp); 35160Sstevel@tonic-gate 35170Sstevel@tonic-gate rootpp = PP_GROUPLEADER(rootpp, rszc); 35180Sstevel@tonic-gate x86_hm_enter(rootpp); 35190Sstevel@tonic-gate x86_hm_exit(rootpp); 35200Sstevel@tonic-gate } 35210Sstevel@tonic-gate goto again; 35220Sstevel@tonic-gate } 35230Sstevel@tonic-gate ASSERT(pp->p_szc == pszc); 35240Sstevel@tonic-gate 35250Sstevel@tonic-gate /* 35260Sstevel@tonic-gate * Decrement by 1 p_szc of every constituent page of a region that 35270Sstevel@tonic-gate * covered pp. For example if original szc is 3 it gets changed to 2 35280Sstevel@tonic-gate * everywhere except in region 2 that covered pp. Region 2 that 35290Sstevel@tonic-gate * covered pp gets demoted to 1 everywhere except in region 1 that 35300Sstevel@tonic-gate * covered pp. The region 1 that covered pp is demoted to region 35310Sstevel@tonic-gate * 0. It's done this way because from region 3 we removed level 3 35320Sstevel@tonic-gate * mappings, from region 2 that covered pp we removed level 2 mappings 35330Sstevel@tonic-gate * and from region 1 that covered pp we removed level 1 mappings. All 35340Sstevel@tonic-gate * changes are done from from high pfn's to low pfn's so that roots 35350Sstevel@tonic-gate * are changed last allowing one to know the largest region where 35360Sstevel@tonic-gate * hat_page_demote() is stil active by only looking at the root page. 35370Sstevel@tonic-gate * 35380Sstevel@tonic-gate * This algorithm is implemented in 2 while loops. First loop changes 35390Sstevel@tonic-gate * p_szc of pages to the right of pp's level 1 region and second 35400Sstevel@tonic-gate * loop changes p_szc of pages of level 1 region that covers pp 35410Sstevel@tonic-gate * and all pages to the left of level 1 region that covers pp. 35420Sstevel@tonic-gate * In the first loop p_szc keeps dropping with every iteration 35430Sstevel@tonic-gate * and in the second loop it keeps increasing with every iteration. 35440Sstevel@tonic-gate * 35450Sstevel@tonic-gate * First loop description: Demote pages to the right of pp outside of 35460Sstevel@tonic-gate * level 1 region that covers pp. In every iteration of the while 35470Sstevel@tonic-gate * loop below find the last page of szc region and the first page of 35480Sstevel@tonic-gate * (szc - 1) region that is immediately to the right of (szc - 1) 35490Sstevel@tonic-gate * region that covers pp. From last such page to first such page 35500Sstevel@tonic-gate * change every page's szc to szc - 1. Decrement szc and continue 35510Sstevel@tonic-gate * looping until szc is 1. If pp belongs to the last (szc - 1) region 35520Sstevel@tonic-gate * of szc region skip to the next iteration. 35530Sstevel@tonic-gate */ 35540Sstevel@tonic-gate szc = pszc; 35550Sstevel@tonic-gate while (szc > 1) { 35560Sstevel@tonic-gate lastpp = PP_GROUPLEADER(pp, szc); 35570Sstevel@tonic-gate pgcnt = page_get_pagecnt(szc); 35580Sstevel@tonic-gate lastpp += pgcnt - 1; 35590Sstevel@tonic-gate firstpp = PP_GROUPLEADER(pp, (szc - 1)); 35600Sstevel@tonic-gate pgcnt = page_get_pagecnt(szc - 1); 35610Sstevel@tonic-gate if (lastpp - firstpp < pgcnt) { 35620Sstevel@tonic-gate szc--; 35630Sstevel@tonic-gate continue; 35640Sstevel@tonic-gate } 35650Sstevel@tonic-gate firstpp += pgcnt; 35660Sstevel@tonic-gate while (lastpp != firstpp) { 35670Sstevel@tonic-gate ASSERT(lastpp->p_szc == pszc); 35680Sstevel@tonic-gate lastpp->p_szc = szc - 1; 35690Sstevel@tonic-gate lastpp--; 35700Sstevel@tonic-gate } 35710Sstevel@tonic-gate firstpp->p_szc = szc - 1; 35720Sstevel@tonic-gate szc--; 35730Sstevel@tonic-gate } 35740Sstevel@tonic-gate 35750Sstevel@tonic-gate /* 35760Sstevel@tonic-gate * Second loop description: 35770Sstevel@tonic-gate * First iteration changes p_szc to 0 of every 35780Sstevel@tonic-gate * page of level 1 region that covers pp. 35790Sstevel@tonic-gate * Subsequent iterations find last page of szc region 35800Sstevel@tonic-gate * immediately to the left of szc region that covered pp 35810Sstevel@tonic-gate * and first page of (szc + 1) region that covers pp. 35820Sstevel@tonic-gate * From last to first page change p_szc of every page to szc. 35830Sstevel@tonic-gate * Increment szc and continue looping until szc is pszc. 35840Sstevel@tonic-gate * If pp belongs to the fist szc region of (szc + 1) region 35850Sstevel@tonic-gate * skip to the next iteration. 35860Sstevel@tonic-gate * 35870Sstevel@tonic-gate */ 35880Sstevel@tonic-gate szc = 0; 35890Sstevel@tonic-gate while (szc < pszc) { 35900Sstevel@tonic-gate firstpp = PP_GROUPLEADER(pp, (szc + 1)); 35910Sstevel@tonic-gate if (szc == 0) { 35920Sstevel@tonic-gate pgcnt = page_get_pagecnt(1); 35930Sstevel@tonic-gate lastpp = firstpp + (pgcnt - 1); 35940Sstevel@tonic-gate } else { 35950Sstevel@tonic-gate lastpp = PP_GROUPLEADER(pp, szc); 35960Sstevel@tonic-gate if (firstpp == lastpp) { 35970Sstevel@tonic-gate szc++; 35980Sstevel@tonic-gate continue; 35990Sstevel@tonic-gate } 36000Sstevel@tonic-gate lastpp--; 36010Sstevel@tonic-gate pgcnt = page_get_pagecnt(szc); 36020Sstevel@tonic-gate } 36030Sstevel@tonic-gate while (lastpp != firstpp) { 36040Sstevel@tonic-gate ASSERT(lastpp->p_szc == pszc); 36050Sstevel@tonic-gate lastpp->p_szc = szc; 36060Sstevel@tonic-gate lastpp--; 36070Sstevel@tonic-gate } 36080Sstevel@tonic-gate firstpp->p_szc = szc; 36090Sstevel@tonic-gate if (firstpp == rootpp) 36100Sstevel@tonic-gate break; 36110Sstevel@tonic-gate szc++; 36120Sstevel@tonic-gate } 36130Sstevel@tonic-gate x86_hm_exit(rootpp); 36140Sstevel@tonic-gate } 36150Sstevel@tonic-gate 36160Sstevel@tonic-gate /* 36170Sstevel@tonic-gate * get hw stats from hardware into page struct and reset hw stats 36180Sstevel@tonic-gate * returns attributes of page 36190Sstevel@tonic-gate * Flags for hat_pagesync, hat_getstat, hat_sync 36200Sstevel@tonic-gate * 36210Sstevel@tonic-gate * define HAT_SYNC_ZERORM 0x01 36220Sstevel@tonic-gate * 36230Sstevel@tonic-gate * Additional flags for hat_pagesync 36240Sstevel@tonic-gate * 36250Sstevel@tonic-gate * define HAT_SYNC_STOPON_REF 0x02 36260Sstevel@tonic-gate * define HAT_SYNC_STOPON_MOD 0x04 36270Sstevel@tonic-gate * define HAT_SYNC_STOPON_RM 0x06 36280Sstevel@tonic-gate * define HAT_SYNC_STOPON_SHARED 0x08 36290Sstevel@tonic-gate */ 36300Sstevel@tonic-gate uint_t 36310Sstevel@tonic-gate hat_pagesync(struct page *pp, uint_t flags) 36320Sstevel@tonic-gate { 36330Sstevel@tonic-gate hment_t *hm = NULL; 36340Sstevel@tonic-gate htable_t *ht; 36350Sstevel@tonic-gate uint_t entry; 36360Sstevel@tonic-gate x86pte_t old, save_old; 36370Sstevel@tonic-gate x86pte_t new; 36380Sstevel@tonic-gate uchar_t nrmbits = P_REF|P_MOD|P_RO; 36390Sstevel@tonic-gate extern ulong_t po_share; 36400Sstevel@tonic-gate page_t *save_pp = pp; 36410Sstevel@tonic-gate uint_t pszc = 0; 36420Sstevel@tonic-gate 36430Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp) || panicstr); 36440Sstevel@tonic-gate 36450Sstevel@tonic-gate if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD)) 36460Sstevel@tonic-gate return (pp->p_nrm & nrmbits); 36470Sstevel@tonic-gate 36480Sstevel@tonic-gate if ((flags & HAT_SYNC_ZERORM) == 0) { 36490Sstevel@tonic-gate 36500Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp)) 36510Sstevel@tonic-gate return (pp->p_nrm & nrmbits); 36520Sstevel@tonic-gate 36530Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp)) 36540Sstevel@tonic-gate return (pp->p_nrm & nrmbits); 36550Sstevel@tonic-gate 36560Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_SHARED) != 0 && 36570Sstevel@tonic-gate hat_page_getshare(pp) > po_share) { 36580Sstevel@tonic-gate if (PP_ISRO(pp)) 36590Sstevel@tonic-gate PP_SETREF(pp); 36600Sstevel@tonic-gate return (pp->p_nrm & nrmbits); 36610Sstevel@tonic-gate } 36620Sstevel@tonic-gate } 36630Sstevel@tonic-gate 36645084Sjohnlev XPV_DISALLOW_MIGRATE(); 36650Sstevel@tonic-gate next_size: 36660Sstevel@tonic-gate /* 36670Sstevel@tonic-gate * walk thru the mapping list syncing (and clearing) ref/mod bits. 36680Sstevel@tonic-gate */ 36690Sstevel@tonic-gate x86_hm_enter(pp); 36700Sstevel@tonic-gate while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) { 36710Sstevel@tonic-gate if (ht->ht_level < pszc) 36720Sstevel@tonic-gate continue; 36730Sstevel@tonic-gate old = x86pte_get(ht, entry); 36740Sstevel@tonic-gate try_again: 36750Sstevel@tonic-gate 36760Sstevel@tonic-gate ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum); 36770Sstevel@tonic-gate 36780Sstevel@tonic-gate if (PTE_GET(old, PT_REF | PT_MOD) == 0) 36790Sstevel@tonic-gate continue; 36800Sstevel@tonic-gate 36810Sstevel@tonic-gate save_old = old; 36820Sstevel@tonic-gate if ((flags & HAT_SYNC_ZERORM) != 0) { 36830Sstevel@tonic-gate 36840Sstevel@tonic-gate /* 36850Sstevel@tonic-gate * Need to clear ref or mod bits. Need to demap 36860Sstevel@tonic-gate * to make sure any executing TLBs see cleared bits. 36870Sstevel@tonic-gate */ 36880Sstevel@tonic-gate new = old; 36890Sstevel@tonic-gate PTE_CLR(new, PT_REF | PT_MOD); 36900Sstevel@tonic-gate old = hati_update_pte(ht, entry, old, new); 36910Sstevel@tonic-gate if (old != 0) 36920Sstevel@tonic-gate goto try_again; 36930Sstevel@tonic-gate 36940Sstevel@tonic-gate old = save_old; 36950Sstevel@tonic-gate } 36960Sstevel@tonic-gate 36970Sstevel@tonic-gate /* 36980Sstevel@tonic-gate * Sync the PTE 36990Sstevel@tonic-gate */ 37003446Smrj if (!(flags & HAT_SYNC_ZERORM) && 37013446Smrj PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC) 37020Sstevel@tonic-gate hati_sync_pte_to_page(pp, old, ht->ht_level); 37030Sstevel@tonic-gate 37040Sstevel@tonic-gate /* 37050Sstevel@tonic-gate * can stop short if we found a ref'd or mod'd page 37060Sstevel@tonic-gate */ 37070Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) || 37080Sstevel@tonic-gate (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) { 37090Sstevel@tonic-gate x86_hm_exit(pp); 37103446Smrj goto done; 37110Sstevel@tonic-gate } 37120Sstevel@tonic-gate } 37130Sstevel@tonic-gate x86_hm_exit(pp); 37140Sstevel@tonic-gate while (pszc < pp->p_szc) { 37150Sstevel@tonic-gate page_t *tpp; 37160Sstevel@tonic-gate pszc++; 37170Sstevel@tonic-gate tpp = PP_GROUPLEADER(pp, pszc); 37180Sstevel@tonic-gate if (pp != tpp) { 37190Sstevel@tonic-gate pp = tpp; 37200Sstevel@tonic-gate goto next_size; 37210Sstevel@tonic-gate } 37220Sstevel@tonic-gate } 37233446Smrj done: 37245084Sjohnlev XPV_ALLOW_MIGRATE(); 37250Sstevel@tonic-gate return (save_pp->p_nrm & nrmbits); 37260Sstevel@tonic-gate } 37270Sstevel@tonic-gate 37280Sstevel@tonic-gate /* 37290Sstevel@tonic-gate * returns approx number of mappings to this pp. A return of 0 implies 37300Sstevel@tonic-gate * there are no mappings to the page. 37310Sstevel@tonic-gate */ 37320Sstevel@tonic-gate ulong_t 37330Sstevel@tonic-gate hat_page_getshare(page_t *pp) 37340Sstevel@tonic-gate { 37350Sstevel@tonic-gate uint_t cnt; 37360Sstevel@tonic-gate cnt = hment_mapcnt(pp); 37371841Spraks #if defined(__amd64) 37381841Spraks if (vpm_enable && pp->p_vpmref) { 37391841Spraks cnt += 1; 37401841Spraks } 37411841Spraks #endif 37420Sstevel@tonic-gate return (cnt); 37430Sstevel@tonic-gate } 37440Sstevel@tonic-gate 37450Sstevel@tonic-gate /* 37464528Spaulsan * Return 1 the number of mappings exceeds sh_thresh. Return 0 37474528Spaulsan * otherwise. 37484528Spaulsan */ 37494528Spaulsan int 37504528Spaulsan hat_page_checkshare(page_t *pp, ulong_t sh_thresh) 37514528Spaulsan { 37524528Spaulsan return (hat_page_getshare(pp) > sh_thresh); 37534528Spaulsan } 37544528Spaulsan 37554528Spaulsan /* 37560Sstevel@tonic-gate * hat_softlock isn't supported anymore 37570Sstevel@tonic-gate */ 37580Sstevel@tonic-gate /*ARGSUSED*/ 37590Sstevel@tonic-gate faultcode_t 37600Sstevel@tonic-gate hat_softlock( 37610Sstevel@tonic-gate hat_t *hat, 37620Sstevel@tonic-gate caddr_t addr, 37630Sstevel@tonic-gate size_t *len, 37640Sstevel@tonic-gate struct page **page_array, 37650Sstevel@tonic-gate uint_t flags) 37660Sstevel@tonic-gate { 37670Sstevel@tonic-gate return (FC_NOSUPPORT); 37680Sstevel@tonic-gate } 37690Sstevel@tonic-gate 37700Sstevel@tonic-gate 37710Sstevel@tonic-gate 37720Sstevel@tonic-gate /* 37730Sstevel@tonic-gate * Routine to expose supported HAT features to platform independent code. 37740Sstevel@tonic-gate */ 37750Sstevel@tonic-gate /*ARGSUSED*/ 37760Sstevel@tonic-gate int 37770Sstevel@tonic-gate hat_supported(enum hat_features feature, void *arg) 37780Sstevel@tonic-gate { 37790Sstevel@tonic-gate switch (feature) { 37800Sstevel@tonic-gate 37810Sstevel@tonic-gate case HAT_SHARED_PT: /* this is really ISM */ 37820Sstevel@tonic-gate return (1); 37830Sstevel@tonic-gate 37840Sstevel@tonic-gate case HAT_DYNAMIC_ISM_UNMAP: 37850Sstevel@tonic-gate return (0); 37860Sstevel@tonic-gate 37870Sstevel@tonic-gate case HAT_VMODSORT: 37880Sstevel@tonic-gate return (1); 37890Sstevel@tonic-gate 37904528Spaulsan case HAT_SHARED_REGIONS: 37914528Spaulsan return (0); 37924528Spaulsan 37930Sstevel@tonic-gate default: 37940Sstevel@tonic-gate panic("hat_supported() - unknown feature"); 37950Sstevel@tonic-gate } 37960Sstevel@tonic-gate return (0); 37970Sstevel@tonic-gate } 37980Sstevel@tonic-gate 37990Sstevel@tonic-gate /* 38000Sstevel@tonic-gate * Called when a thread is exiting and has been switched to the kernel AS 38010Sstevel@tonic-gate */ 38020Sstevel@tonic-gate void 38030Sstevel@tonic-gate hat_thread_exit(kthread_t *thd) 38040Sstevel@tonic-gate { 38050Sstevel@tonic-gate ASSERT(thd->t_procp->p_as == &kas); 38065084Sjohnlev XPV_DISALLOW_MIGRATE(); 38070Sstevel@tonic-gate hat_switch(thd->t_procp->p_as->a_hat); 38085084Sjohnlev XPV_ALLOW_MIGRATE(); 38090Sstevel@tonic-gate } 38100Sstevel@tonic-gate 38110Sstevel@tonic-gate /* 38120Sstevel@tonic-gate * Setup the given brand new hat structure as the new HAT on this cpu's mmu. 38130Sstevel@tonic-gate */ 38140Sstevel@tonic-gate /*ARGSUSED*/ 38150Sstevel@tonic-gate void 38160Sstevel@tonic-gate hat_setup(hat_t *hat, int flags) 38170Sstevel@tonic-gate { 38185084Sjohnlev XPV_DISALLOW_MIGRATE(); 38190Sstevel@tonic-gate kpreempt_disable(); 38200Sstevel@tonic-gate 38210Sstevel@tonic-gate hat_switch(hat); 38220Sstevel@tonic-gate 38230Sstevel@tonic-gate kpreempt_enable(); 38245084Sjohnlev XPV_ALLOW_MIGRATE(); 38250Sstevel@tonic-gate } 38260Sstevel@tonic-gate 38270Sstevel@tonic-gate /* 38280Sstevel@tonic-gate * Prepare for a CPU private mapping for the given address. 38290Sstevel@tonic-gate * 38300Sstevel@tonic-gate * The address can only be used from a single CPU and can be remapped 38310Sstevel@tonic-gate * using hat_mempte_remap(). Return the address of the PTE. 38320Sstevel@tonic-gate * 38330Sstevel@tonic-gate * We do the htable_create() if necessary and increment the valid count so 38340Sstevel@tonic-gate * the htable can't disappear. We also hat_devload() the page table into 38350Sstevel@tonic-gate * kernel so that the PTE is quickly accessed. 38360Sstevel@tonic-gate */ 38373446Smrj hat_mempte_t 38383446Smrj hat_mempte_setup(caddr_t addr) 38390Sstevel@tonic-gate { 38400Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 38410Sstevel@tonic-gate htable_t *ht; 38420Sstevel@tonic-gate uint_t entry; 38430Sstevel@tonic-gate x86pte_t oldpte; 38443446Smrj hat_mempte_t p; 38450Sstevel@tonic-gate 38460Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 38470Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 38484004Sjosephb ++curthread->t_hatdepth; 38495741Smrj XPV_DISALLOW_MIGRATE(); 38500Sstevel@tonic-gate ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0); 38510Sstevel@tonic-gate if (ht == NULL) { 38520Sstevel@tonic-gate ht = htable_create(kas.a_hat, va, 0, NULL); 38530Sstevel@tonic-gate entry = htable_va2entry(va, ht); 38540Sstevel@tonic-gate ASSERT(ht->ht_level == 0); 38550Sstevel@tonic-gate oldpte = x86pte_get(ht, entry); 38560Sstevel@tonic-gate } 38570Sstevel@tonic-gate if (PTE_ISVALID(oldpte)) 38580Sstevel@tonic-gate panic("hat_mempte_setup(): address already mapped" 38590Sstevel@tonic-gate "ht=%p, entry=%d, pte=" FMT_PTE, ht, entry, oldpte); 38600Sstevel@tonic-gate 38610Sstevel@tonic-gate /* 38620Sstevel@tonic-gate * increment ht_valid_cnt so that the pagetable can't disappear 38630Sstevel@tonic-gate */ 38640Sstevel@tonic-gate HTABLE_INC(ht->ht_valid_cnt); 38650Sstevel@tonic-gate 38660Sstevel@tonic-gate /* 38673446Smrj * return the PTE physical address to the caller. 38680Sstevel@tonic-gate */ 38690Sstevel@tonic-gate htable_release(ht); 38705741Smrj XPV_ALLOW_MIGRATE(); 38713446Smrj p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry); 38724004Sjosephb --curthread->t_hatdepth; 38733446Smrj return (p); 38740Sstevel@tonic-gate } 38750Sstevel@tonic-gate 38760Sstevel@tonic-gate /* 38770Sstevel@tonic-gate * Release a CPU private mapping for the given address. 38780Sstevel@tonic-gate * We decrement the htable valid count so it might be destroyed. 38790Sstevel@tonic-gate */ 38803446Smrj /*ARGSUSED1*/ 38810Sstevel@tonic-gate void 38823446Smrj hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa) 38830Sstevel@tonic-gate { 38840Sstevel@tonic-gate htable_t *ht; 38850Sstevel@tonic-gate 38865741Smrj XPV_DISALLOW_MIGRATE(); 38870Sstevel@tonic-gate /* 38883446Smrj * invalidate any left over mapping and decrement the htable valid count 38890Sstevel@tonic-gate */ 38905084Sjohnlev #ifdef __xpv 38915084Sjohnlev if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0, 38925084Sjohnlev UVMF_INVLPG | UVMF_LOCAL)) 38935084Sjohnlev panic("HYPERVISOR_update_va_mapping() failed"); 38945084Sjohnlev #else 38953446Smrj { 38963446Smrj x86pte_t *pteptr; 38973446Smrj 38983446Smrj pteptr = x86pte_mapin(mmu_btop(pte_pa), 38993446Smrj (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); 39003446Smrj if (mmu.pae_hat) 39013446Smrj *pteptr = 0; 39023446Smrj else 39033446Smrj *(x86pte32_t *)pteptr = 0; 39043446Smrj mmu_tlbflush_entry(addr); 39053446Smrj x86pte_mapout(); 39063446Smrj } 39075084Sjohnlev #endif 39083446Smrj 39090Sstevel@tonic-gate ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0); 39100Sstevel@tonic-gate if (ht == NULL) 39110Sstevel@tonic-gate panic("hat_mempte_release(): invalid address"); 39120Sstevel@tonic-gate ASSERT(ht->ht_level == 0); 39130Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt); 39140Sstevel@tonic-gate htable_release(ht); 39155741Smrj XPV_ALLOW_MIGRATE(); 39160Sstevel@tonic-gate } 39170Sstevel@tonic-gate 39180Sstevel@tonic-gate /* 39190Sstevel@tonic-gate * Apply a temporary CPU private mapping to a page. We flush the TLB only 39200Sstevel@tonic-gate * on this CPU, so this ought to have been called with preemption disabled. 39210Sstevel@tonic-gate */ 39220Sstevel@tonic-gate void 39230Sstevel@tonic-gate hat_mempte_remap( 39243446Smrj pfn_t pfn, 39253446Smrj caddr_t addr, 39263446Smrj hat_mempte_t pte_pa, 39273446Smrj uint_t attr, 39283446Smrj uint_t flags) 39290Sstevel@tonic-gate { 39300Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 39310Sstevel@tonic-gate x86pte_t pte; 39320Sstevel@tonic-gate 39330Sstevel@tonic-gate /* 39340Sstevel@tonic-gate * Remap the given PTE to the new page's PFN. Invalidate only 39350Sstevel@tonic-gate * on this CPU. 39360Sstevel@tonic-gate */ 39370Sstevel@tonic-gate #ifdef DEBUG 39380Sstevel@tonic-gate htable_t *ht; 39390Sstevel@tonic-gate uint_t entry; 39400Sstevel@tonic-gate 39410Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 39420Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 39430Sstevel@tonic-gate ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0); 39440Sstevel@tonic-gate ASSERT(ht != NULL); 39450Sstevel@tonic-gate ASSERT(ht->ht_level == 0); 39460Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0); 39473446Smrj ASSERT(ht->ht_pfn == mmu_btop(pte_pa)); 39480Sstevel@tonic-gate htable_release(ht); 39490Sstevel@tonic-gate #endif 39505084Sjohnlev XPV_DISALLOW_MIGRATE(); 39510Sstevel@tonic-gate pte = hati_mkpte(pfn, attr, 0, flags); 39525084Sjohnlev #ifdef __xpv 39535084Sjohnlev if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL)) 39545084Sjohnlev panic("HYPERVISOR_update_va_mapping() failed"); 39555084Sjohnlev #else 39563446Smrj { 39573446Smrj x86pte_t *pteptr; 39583446Smrj 39593446Smrj pteptr = x86pte_mapin(mmu_btop(pte_pa), 39603446Smrj (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); 39613446Smrj if (mmu.pae_hat) 39623446Smrj *(x86pte_t *)pteptr = pte; 39633446Smrj else 39643446Smrj *(x86pte32_t *)pteptr = (x86pte32_t)pte; 39653446Smrj mmu_tlbflush_entry(addr); 39663446Smrj x86pte_mapout(); 39673446Smrj } 39685084Sjohnlev #endif 39695084Sjohnlev XPV_ALLOW_MIGRATE(); 39700Sstevel@tonic-gate } 39710Sstevel@tonic-gate 39720Sstevel@tonic-gate 39730Sstevel@tonic-gate 39740Sstevel@tonic-gate /* 39750Sstevel@tonic-gate * Hat locking functions 39760Sstevel@tonic-gate * XXX - these two functions are currently being used by hatstats 39770Sstevel@tonic-gate * they can be removed by using a per-as mutex for hatstats. 39780Sstevel@tonic-gate */ 39790Sstevel@tonic-gate void 39800Sstevel@tonic-gate hat_enter(hat_t *hat) 39810Sstevel@tonic-gate { 39820Sstevel@tonic-gate mutex_enter(&hat->hat_mutex); 39830Sstevel@tonic-gate } 39840Sstevel@tonic-gate 39850Sstevel@tonic-gate void 39860Sstevel@tonic-gate hat_exit(hat_t *hat) 39870Sstevel@tonic-gate { 39880Sstevel@tonic-gate mutex_exit(&hat->hat_mutex); 39890Sstevel@tonic-gate } 39900Sstevel@tonic-gate 39910Sstevel@tonic-gate /* 39923446Smrj * HAT part of cpu initialization. 39930Sstevel@tonic-gate */ 39940Sstevel@tonic-gate void 39950Sstevel@tonic-gate hat_cpu_online(struct cpu *cpup) 39960Sstevel@tonic-gate { 39970Sstevel@tonic-gate if (cpup != CPU) { 39983446Smrj x86pte_cpu_init(cpup); 39990Sstevel@tonic-gate hat_vlp_setup(cpup); 40000Sstevel@tonic-gate } 40010Sstevel@tonic-gate CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id); 40020Sstevel@tonic-gate } 40030Sstevel@tonic-gate 40040Sstevel@tonic-gate /* 40053446Smrj * HAT part of cpu deletion. 40063446Smrj * (currently, we only call this after the cpu is safely passivated.) 40073446Smrj */ 40083446Smrj void 40093446Smrj hat_cpu_offline(struct cpu *cpup) 40103446Smrj { 40113446Smrj ASSERT(cpup != CPU); 40123446Smrj 40133446Smrj CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id); 40143446Smrj x86pte_cpu_fini(cpup); 40153446Smrj hat_vlp_teardown(cpup); 40163446Smrj } 40173446Smrj 40183446Smrj /* 40190Sstevel@tonic-gate * Function called after all CPUs are brought online. 40200Sstevel@tonic-gate * Used to remove low address boot mappings. 40210Sstevel@tonic-gate */ 40220Sstevel@tonic-gate void 40230Sstevel@tonic-gate clear_boot_mappings(uintptr_t low, uintptr_t high) 40240Sstevel@tonic-gate { 40250Sstevel@tonic-gate uintptr_t vaddr = low; 40260Sstevel@tonic-gate htable_t *ht = NULL; 40270Sstevel@tonic-gate level_t level; 40280Sstevel@tonic-gate uint_t entry; 40290Sstevel@tonic-gate x86pte_t pte; 40300Sstevel@tonic-gate 40310Sstevel@tonic-gate /* 40320Sstevel@tonic-gate * On 1st CPU we can unload the prom mappings, basically we blow away 40333446Smrj * all virtual mappings under _userlimit. 40340Sstevel@tonic-gate */ 40350Sstevel@tonic-gate while (vaddr < high) { 40360Sstevel@tonic-gate pte = htable_walk(kas.a_hat, &ht, &vaddr, high); 40370Sstevel@tonic-gate if (ht == NULL) 40380Sstevel@tonic-gate break; 40390Sstevel@tonic-gate 40400Sstevel@tonic-gate level = ht->ht_level; 40410Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht); 40420Sstevel@tonic-gate ASSERT(level <= mmu.max_page_level); 40430Sstevel@tonic-gate ASSERT(PTE_ISPAGE(pte, level)); 40440Sstevel@tonic-gate 40450Sstevel@tonic-gate /* 40460Sstevel@tonic-gate * Unload the mapping from the page tables. 40470Sstevel@tonic-gate */ 40483446Smrj (void) x86pte_inval(ht, entry, 0, NULL); 40490Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0); 40500Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt); 40510Sstevel@tonic-gate PGCNT_DEC(ht->ht_hat, ht->ht_level); 40520Sstevel@tonic-gate 40530Sstevel@tonic-gate vaddr += LEVEL_SIZE(ht->ht_level); 40540Sstevel@tonic-gate } 40550Sstevel@tonic-gate if (ht) 40560Sstevel@tonic-gate htable_release(ht); 40570Sstevel@tonic-gate } 40580Sstevel@tonic-gate 40590Sstevel@tonic-gate /* 40600Sstevel@tonic-gate * Atomically update a new translation for a single page. If the 40610Sstevel@tonic-gate * currently installed PTE doesn't match the value we expect to find, 40620Sstevel@tonic-gate * it's not updated and we return the PTE we found. 40630Sstevel@tonic-gate * 40640Sstevel@tonic-gate * If activating nosync or NOWRITE and the page was modified we need to sync 40650Sstevel@tonic-gate * with the page_t. Also sync with page_t if clearing ref/mod bits. 40660Sstevel@tonic-gate */ 40670Sstevel@tonic-gate static x86pte_t 40680Sstevel@tonic-gate hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new) 40690Sstevel@tonic-gate { 40700Sstevel@tonic-gate page_t *pp; 40710Sstevel@tonic-gate uint_t rm = 0; 40720Sstevel@tonic-gate x86pte_t replaced; 40730Sstevel@tonic-gate 40743446Smrj if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC && 40750Sstevel@tonic-gate PTE_GET(expected, PT_MOD | PT_REF) && 40760Sstevel@tonic-gate (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) || 40774381Sjosephb !PTE_GET(new, PT_MOD | PT_REF))) { 40780Sstevel@tonic-gate 40793446Smrj ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level))); 40800Sstevel@tonic-gate pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level)); 40810Sstevel@tonic-gate ASSERT(pp != NULL); 40820Sstevel@tonic-gate if (PTE_GET(expected, PT_MOD)) 40830Sstevel@tonic-gate rm |= P_MOD; 40840Sstevel@tonic-gate if (PTE_GET(expected, PT_REF)) 40850Sstevel@tonic-gate rm |= P_REF; 40860Sstevel@tonic-gate PTE_CLR(new, PT_MOD | PT_REF); 40870Sstevel@tonic-gate } 40880Sstevel@tonic-gate 40890Sstevel@tonic-gate replaced = x86pte_update(ht, entry, expected, new); 40900Sstevel@tonic-gate if (replaced != expected) 40910Sstevel@tonic-gate return (replaced); 40920Sstevel@tonic-gate 40930Sstevel@tonic-gate if (rm) { 40940Sstevel@tonic-gate /* 40950Sstevel@tonic-gate * sync to all constituent pages of a large page 40960Sstevel@tonic-gate */ 40970Sstevel@tonic-gate pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level); 40980Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt)); 40990Sstevel@tonic-gate while (pgcnt-- > 0) { 41000Sstevel@tonic-gate /* 41010Sstevel@tonic-gate * hat_page_demote() can't decrease 41020Sstevel@tonic-gate * pszc below this mapping size 41030Sstevel@tonic-gate * since large mapping existed after we 41040Sstevel@tonic-gate * took mlist lock. 41050Sstevel@tonic-gate */ 41060Sstevel@tonic-gate ASSERT(pp->p_szc >= ht->ht_level); 41070Sstevel@tonic-gate hat_page_setattr(pp, rm); 41080Sstevel@tonic-gate ++pp; 41090Sstevel@tonic-gate } 41100Sstevel@tonic-gate } 41110Sstevel@tonic-gate 41120Sstevel@tonic-gate return (0); 41130Sstevel@tonic-gate } 41140Sstevel@tonic-gate 41154528Spaulsan /* ARGSUSED */ 41164528Spaulsan void 41175075Spaulsan hat_join_srd(struct hat *hat, vnode_t *evp) 41184528Spaulsan { 41194528Spaulsan } 41204528Spaulsan 41214528Spaulsan /* ARGSUSED */ 41224528Spaulsan hat_region_cookie_t 41235075Spaulsan hat_join_region(struct hat *hat, 41244528Spaulsan caddr_t r_saddr, 41254528Spaulsan size_t r_size, 41264528Spaulsan void *r_obj, 41274528Spaulsan u_offset_t r_objoff, 41284528Spaulsan uchar_t r_perm, 41294528Spaulsan uchar_t r_pgszc, 41304528Spaulsan hat_rgn_cb_func_t r_cb_function, 41314528Spaulsan uint_t flags) 41324528Spaulsan { 41334528Spaulsan panic("No shared region support on x86"); 41344528Spaulsan return (HAT_INVALID_REGION_COOKIE); 41354528Spaulsan } 41364528Spaulsan 41374528Spaulsan /* ARGSUSED */ 41384528Spaulsan void 41395075Spaulsan hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags) 41404528Spaulsan { 41414528Spaulsan panic("No shared region support on x86"); 41424528Spaulsan } 41434528Spaulsan 41444528Spaulsan /* ARGSUSED */ 41454528Spaulsan void 41465075Spaulsan hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie) 41474528Spaulsan { 41484528Spaulsan panic("No shared region support on x86"); 41494528Spaulsan } 41504528Spaulsan 41514528Spaulsan 41520Sstevel@tonic-gate /* 41530Sstevel@tonic-gate * Kernel Physical Mapping (kpm) facility 41540Sstevel@tonic-gate * 41550Sstevel@tonic-gate * Most of the routines needed to support segkpm are almost no-ops on the 41560Sstevel@tonic-gate * x86 platform. We map in the entire segment when it is created and leave 41570Sstevel@tonic-gate * it mapped in, so there is no additional work required to set up and tear 41580Sstevel@tonic-gate * down individual mappings. All of these routines were created to support 41590Sstevel@tonic-gate * SPARC platforms that have to avoid aliasing in their virtually indexed 41600Sstevel@tonic-gate * caches. 41610Sstevel@tonic-gate * 41620Sstevel@tonic-gate * Most of the routines have sanity checks in them (e.g. verifying that the 41630Sstevel@tonic-gate * passed-in page is locked). We don't actually care about most of these 41640Sstevel@tonic-gate * checks on x86, but we leave them in place to identify problems in the 41650Sstevel@tonic-gate * upper levels. 41660Sstevel@tonic-gate */ 41670Sstevel@tonic-gate 41680Sstevel@tonic-gate /* 41690Sstevel@tonic-gate * Map in a locked page and return the vaddr. 41700Sstevel@tonic-gate */ 41710Sstevel@tonic-gate /*ARGSUSED*/ 41720Sstevel@tonic-gate caddr_t 41730Sstevel@tonic-gate hat_kpm_mapin(struct page *pp, struct kpme *kpme) 41740Sstevel@tonic-gate { 41750Sstevel@tonic-gate caddr_t vaddr; 41760Sstevel@tonic-gate 41770Sstevel@tonic-gate #ifdef DEBUG 41780Sstevel@tonic-gate if (kpm_enable == 0) { 41790Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n"); 41800Sstevel@tonic-gate return ((caddr_t)NULL); 41810Sstevel@tonic-gate } 41820Sstevel@tonic-gate 41830Sstevel@tonic-gate if (pp == NULL || PAGE_LOCKED(pp) == 0) { 41840Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n"); 41850Sstevel@tonic-gate return ((caddr_t)NULL); 41860Sstevel@tonic-gate } 41870Sstevel@tonic-gate #endif 41880Sstevel@tonic-gate 41890Sstevel@tonic-gate vaddr = hat_kpm_page2va(pp, 1); 41900Sstevel@tonic-gate 41910Sstevel@tonic-gate return (vaddr); 41920Sstevel@tonic-gate } 41930Sstevel@tonic-gate 41940Sstevel@tonic-gate /* 41950Sstevel@tonic-gate * Mapout a locked page. 41960Sstevel@tonic-gate */ 41970Sstevel@tonic-gate /*ARGSUSED*/ 41980Sstevel@tonic-gate void 41990Sstevel@tonic-gate hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr) 42000Sstevel@tonic-gate { 42010Sstevel@tonic-gate #ifdef DEBUG 42020Sstevel@tonic-gate if (kpm_enable == 0) { 42030Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n"); 42040Sstevel@tonic-gate return; 42050Sstevel@tonic-gate } 42060Sstevel@tonic-gate 42070Sstevel@tonic-gate if (IS_KPM_ADDR(vaddr) == 0) { 42080Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n"); 42090Sstevel@tonic-gate return; 42100Sstevel@tonic-gate } 42110Sstevel@tonic-gate 42120Sstevel@tonic-gate if (pp == NULL || PAGE_LOCKED(pp) == 0) { 42130Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n"); 42140Sstevel@tonic-gate return; 42150Sstevel@tonic-gate } 42160Sstevel@tonic-gate #endif 42170Sstevel@tonic-gate } 42180Sstevel@tonic-gate 42190Sstevel@tonic-gate /* 42200Sstevel@tonic-gate * Return the kpm virtual address for a specific pfn 42210Sstevel@tonic-gate */ 42220Sstevel@tonic-gate caddr_t 42230Sstevel@tonic-gate hat_kpm_pfn2va(pfn_t pfn) 42240Sstevel@tonic-gate { 42253446Smrj uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn); 42260Sstevel@tonic-gate 42275262Srscott ASSERT(!pfn_is_foreign(pfn)); 42280Sstevel@tonic-gate return ((caddr_t)vaddr); 42290Sstevel@tonic-gate } 42300Sstevel@tonic-gate 42310Sstevel@tonic-gate /* 42320Sstevel@tonic-gate * Return the kpm virtual address for the page at pp. 42330Sstevel@tonic-gate */ 42340Sstevel@tonic-gate /*ARGSUSED*/ 42350Sstevel@tonic-gate caddr_t 42360Sstevel@tonic-gate hat_kpm_page2va(struct page *pp, int checkswap) 42370Sstevel@tonic-gate { 42380Sstevel@tonic-gate return (hat_kpm_pfn2va(pp->p_pagenum)); 42390Sstevel@tonic-gate } 42400Sstevel@tonic-gate 42410Sstevel@tonic-gate /* 42420Sstevel@tonic-gate * Return the page frame number for the kpm virtual address vaddr. 42430Sstevel@tonic-gate */ 42440Sstevel@tonic-gate pfn_t 42450Sstevel@tonic-gate hat_kpm_va2pfn(caddr_t vaddr) 42460Sstevel@tonic-gate { 42470Sstevel@tonic-gate pfn_t pfn; 42480Sstevel@tonic-gate 42490Sstevel@tonic-gate ASSERT(IS_KPM_ADDR(vaddr)); 42500Sstevel@tonic-gate 42510Sstevel@tonic-gate pfn = (pfn_t)btop(vaddr - kpm_vbase); 42520Sstevel@tonic-gate 42530Sstevel@tonic-gate return (pfn); 42540Sstevel@tonic-gate } 42550Sstevel@tonic-gate 42560Sstevel@tonic-gate 42570Sstevel@tonic-gate /* 42580Sstevel@tonic-gate * Return the page for the kpm virtual address vaddr. 42590Sstevel@tonic-gate */ 42600Sstevel@tonic-gate page_t * 42610Sstevel@tonic-gate hat_kpm_vaddr2page(caddr_t vaddr) 42620Sstevel@tonic-gate { 42630Sstevel@tonic-gate pfn_t pfn; 42640Sstevel@tonic-gate 42650Sstevel@tonic-gate ASSERT(IS_KPM_ADDR(vaddr)); 42660Sstevel@tonic-gate 42670Sstevel@tonic-gate pfn = hat_kpm_va2pfn(vaddr); 42680Sstevel@tonic-gate 42690Sstevel@tonic-gate return (page_numtopp_nolock(pfn)); 42700Sstevel@tonic-gate } 42710Sstevel@tonic-gate 42720Sstevel@tonic-gate /* 42730Sstevel@tonic-gate * hat_kpm_fault is called from segkpm_fault when we take a page fault on a 42740Sstevel@tonic-gate * KPM page. This should never happen on x86 42750Sstevel@tonic-gate */ 42760Sstevel@tonic-gate int 42770Sstevel@tonic-gate hat_kpm_fault(hat_t *hat, caddr_t vaddr) 42780Sstevel@tonic-gate { 42790Sstevel@tonic-gate panic("pagefault in seg_kpm. hat: 0x%p vaddr: 0x%p", hat, vaddr); 42800Sstevel@tonic-gate 42810Sstevel@tonic-gate return (0); 42820Sstevel@tonic-gate } 42830Sstevel@tonic-gate 42840Sstevel@tonic-gate /*ARGSUSED*/ 42850Sstevel@tonic-gate void 42860Sstevel@tonic-gate hat_kpm_mseghash_clear(int nentries) 42870Sstevel@tonic-gate {} 42880Sstevel@tonic-gate 42890Sstevel@tonic-gate /*ARGSUSED*/ 42900Sstevel@tonic-gate void 42910Sstevel@tonic-gate hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp) 42920Sstevel@tonic-gate {} 42935084Sjohnlev 42945084Sjohnlev #ifdef __xpv 42955084Sjohnlev /* 42965084Sjohnlev * There are specific Hypervisor calls to establish and remove mappings 42975084Sjohnlev * to grant table references and the privcmd driver. We have to ensure 42985084Sjohnlev * that a page table actually exists. 42995084Sjohnlev */ 43005084Sjohnlev void 43015084Sjohnlev hat_prepare_mapping(hat_t *hat, caddr_t addr) 43025084Sjohnlev { 43035084Sjohnlev ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE)); 43045741Smrj XPV_DISALLOW_MIGRATE(); 43055084Sjohnlev (void) htable_create(hat, (uintptr_t)addr, 0, NULL); 43065741Smrj XPV_ALLOW_MIGRATE(); 43075084Sjohnlev } 43085084Sjohnlev 43095084Sjohnlev void 43105084Sjohnlev hat_release_mapping(hat_t *hat, caddr_t addr) 43115084Sjohnlev { 43125084Sjohnlev htable_t *ht; 43135084Sjohnlev 43145084Sjohnlev ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE)); 43155741Smrj XPV_DISALLOW_MIGRATE(); 43165084Sjohnlev ht = htable_lookup(hat, (uintptr_t)addr, 0); 43175084Sjohnlev ASSERT(ht != NULL); 43185084Sjohnlev ASSERT(ht->ht_busy >= 2); 43195084Sjohnlev htable_release(ht); 43205084Sjohnlev htable_release(ht); 43215741Smrj XPV_ALLOW_MIGRATE(); 43225741Smrj } 43235084Sjohnlev #endif 4324