10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51747Sjosephb * Common Development and Distribution License (the "License"). 61747Sjosephb * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 223446Smrj * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate /* 290Sstevel@tonic-gate * VM - Hardware Address Translation management for i386 and amd64 300Sstevel@tonic-gate * 310Sstevel@tonic-gate * Implementation of the interfaces described in <common/vm/hat.h> 320Sstevel@tonic-gate * 330Sstevel@tonic-gate * Nearly all the details of how the hardware is managed should not be 340Sstevel@tonic-gate * visible outside this layer except for misc. machine specific functions 350Sstevel@tonic-gate * that work in conjunction with this code. 360Sstevel@tonic-gate * 370Sstevel@tonic-gate * Routines used only inside of i86pc/vm start with hati_ for HAT Internal. 380Sstevel@tonic-gate */ 390Sstevel@tonic-gate 400Sstevel@tonic-gate #include <sys/machparam.h> 410Sstevel@tonic-gate #include <sys/machsystm.h> 420Sstevel@tonic-gate #include <sys/mman.h> 430Sstevel@tonic-gate #include <sys/types.h> 440Sstevel@tonic-gate #include <sys/systm.h> 450Sstevel@tonic-gate #include <sys/cpuvar.h> 460Sstevel@tonic-gate #include <sys/thread.h> 470Sstevel@tonic-gate #include <sys/proc.h> 480Sstevel@tonic-gate #include <sys/cpu.h> 490Sstevel@tonic-gate #include <sys/kmem.h> 500Sstevel@tonic-gate #include <sys/disp.h> 510Sstevel@tonic-gate #include <sys/shm.h> 520Sstevel@tonic-gate #include <sys/sysmacros.h> 530Sstevel@tonic-gate #include <sys/machparam.h> 540Sstevel@tonic-gate #include <sys/vmem.h> 550Sstevel@tonic-gate #include <sys/vmsystm.h> 560Sstevel@tonic-gate #include <sys/promif.h> 570Sstevel@tonic-gate #include <sys/var.h> 580Sstevel@tonic-gate #include <sys/x86_archext.h> 590Sstevel@tonic-gate #include <sys/atomic.h> 600Sstevel@tonic-gate #include <sys/bitmap.h> 613446Smrj #include <sys/controlregs.h> 623446Smrj #include <sys/bootconf.h> 633446Smrj #include <sys/bootsvcs.h> 643446Smrj #include <sys/bootinfo.h> 654191Sjosephb #include <sys/archsystm.h> 660Sstevel@tonic-gate 670Sstevel@tonic-gate #include <vm/seg_kmem.h> 680Sstevel@tonic-gate #include <vm/hat_i86.h> 690Sstevel@tonic-gate #include <vm/as.h> 700Sstevel@tonic-gate #include <vm/seg.h> 710Sstevel@tonic-gate #include <vm/page.h> 720Sstevel@tonic-gate #include <vm/seg_kp.h> 730Sstevel@tonic-gate #include <vm/seg_kpm.h> 740Sstevel@tonic-gate #include <vm/vm_dep.h> 753446Smrj #include <vm/kboot_mmu.h> 76*4381Sjosephb #include <vm/seg_spt.h> 770Sstevel@tonic-gate 780Sstevel@tonic-gate #include <sys/cmn_err.h> 790Sstevel@tonic-gate 800Sstevel@tonic-gate /* 810Sstevel@tonic-gate * Basic parameters for hat operation. 820Sstevel@tonic-gate */ 830Sstevel@tonic-gate struct hat_mmu_info mmu; 840Sstevel@tonic-gate 850Sstevel@tonic-gate /* 860Sstevel@tonic-gate * The page that is the kernel's top level pagetable. 870Sstevel@tonic-gate * 880Sstevel@tonic-gate * For 32 bit VLP support, the kernel hat will use the 1st 4 entries 890Sstevel@tonic-gate * on this 4K page for its top level page table. The remaining groups of 900Sstevel@tonic-gate * 4 entries are used for per processor copies of user VLP pagetables for 910Sstevel@tonic-gate * running threads. See hat_switch() and reload_pae32() for details. 920Sstevel@tonic-gate * 930Sstevel@tonic-gate * vlp_page[0] - 0th level==2 PTE for kernel HAT (will be zero) 940Sstevel@tonic-gate * vlp_page[1] - 1st level==2 PTE for kernel HAT (will be zero) 950Sstevel@tonic-gate * vlp_page[2] - 2nd level==2 PTE for kernel HAT (zero for small memory) 960Sstevel@tonic-gate * vlp_page[3] - 3rd level==2 PTE for kernel 970Sstevel@tonic-gate * 980Sstevel@tonic-gate * vlp_page[4] - 0th level==2 PTE for user thread on cpu 0 990Sstevel@tonic-gate * vlp_page[5] - 1st level==2 PTE for user thread on cpu 0 1000Sstevel@tonic-gate * vlp_page[6] - 2nd level==2 PTE for user thread on cpu 0 1010Sstevel@tonic-gate * vlp_page[7] - probably copy of kernel PTE 1020Sstevel@tonic-gate * 1030Sstevel@tonic-gate * vlp_page[8] - 0th level==2 PTE for user thread on cpu 1 1040Sstevel@tonic-gate * vlp_page[9] - 1st level==2 PTE for user thread on cpu 1 1050Sstevel@tonic-gate * vlp_page[10] - 2nd level==2 PTE for user thread on cpu 1 1060Sstevel@tonic-gate * vlp_page[11] - probably copy of kernel PTE 1070Sstevel@tonic-gate * ... 1080Sstevel@tonic-gate * 1090Sstevel@tonic-gate * when / where the kernel PTE's are (entry 2 or 3 or none) depends 1100Sstevel@tonic-gate * on kernelbase. 1110Sstevel@tonic-gate */ 1120Sstevel@tonic-gate static x86pte_t *vlp_page; 1130Sstevel@tonic-gate 1140Sstevel@tonic-gate /* 1150Sstevel@tonic-gate * forward declaration of internal utility routines 1160Sstevel@tonic-gate */ 1170Sstevel@tonic-gate static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, 1180Sstevel@tonic-gate x86pte_t new); 1190Sstevel@tonic-gate 1200Sstevel@tonic-gate /* 1210Sstevel@tonic-gate * The kernel address space exists in all HATs. To implement this the 1220Sstevel@tonic-gate * kernel reserves a fixed number of entries in every topmost level page 1230Sstevel@tonic-gate * table. The values are setup in hat_init() and then copied to every hat 1240Sstevel@tonic-gate * created by hat_alloc(). This means that kernelbase must be: 1250Sstevel@tonic-gate * 1260Sstevel@tonic-gate * 4Meg aligned for 32 bit kernels 1270Sstevel@tonic-gate * 512Gig aligned for x86_64 64 bit kernel 1280Sstevel@tonic-gate * 1290Sstevel@tonic-gate * The PAE 32 bit hat is handled as a special case. Otherwise requiring 1Gig 1300Sstevel@tonic-gate * alignment would use too much VA for the kernel. 1310Sstevel@tonic-gate * 1320Sstevel@tonic-gate */ 1330Sstevel@tonic-gate static uint_t khat_start; /* index of 1st entry in kernel's top ptable */ 1340Sstevel@tonic-gate static uint_t khat_entries; /* number of entries in kernel's top ptable */ 1350Sstevel@tonic-gate 1360Sstevel@tonic-gate #if defined(__i386) 1370Sstevel@tonic-gate 1380Sstevel@tonic-gate static htable_t *khat_pae32_htable = NULL; 1390Sstevel@tonic-gate static uint_t khat_pae32_start; 1400Sstevel@tonic-gate static uint_t khat_pae32_entries; 1410Sstevel@tonic-gate 1420Sstevel@tonic-gate #endif 1430Sstevel@tonic-gate 1440Sstevel@tonic-gate uint_t use_boot_reserve = 1; /* cleared after early boot process */ 1450Sstevel@tonic-gate uint_t can_steal_post_boot = 0; /* set late in boot to enable stealing */ 1460Sstevel@tonic-gate 1470Sstevel@tonic-gate /* 1480Sstevel@tonic-gate * A cpuset for all cpus. This is used for kernel address cross calls, since 1490Sstevel@tonic-gate * the kernel addresses apply to all cpus. 1500Sstevel@tonic-gate */ 1510Sstevel@tonic-gate cpuset_t khat_cpuset; 1520Sstevel@tonic-gate 1530Sstevel@tonic-gate /* 1540Sstevel@tonic-gate * management stuff for hat structures 1550Sstevel@tonic-gate */ 1560Sstevel@tonic-gate kmutex_t hat_list_lock; 1570Sstevel@tonic-gate kcondvar_t hat_list_cv; 1580Sstevel@tonic-gate kmem_cache_t *hat_cache; 1590Sstevel@tonic-gate kmem_cache_t *hat_hash_cache; 1600Sstevel@tonic-gate kmem_cache_t *vlp_hash_cache; 1610Sstevel@tonic-gate 1620Sstevel@tonic-gate /* 1630Sstevel@tonic-gate * Simple statistics 1640Sstevel@tonic-gate */ 1650Sstevel@tonic-gate struct hatstats hatstat; 1660Sstevel@tonic-gate 1670Sstevel@tonic-gate /* 1680Sstevel@tonic-gate * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's. 1690Sstevel@tonic-gate */ 1700Sstevel@tonic-gate extern void atomic_orb(uchar_t *addr, uchar_t val); 1710Sstevel@tonic-gate extern void atomic_andb(uchar_t *addr, uchar_t val); 1720Sstevel@tonic-gate 1730Sstevel@tonic-gate #define PP_GETRM(pp, rmmask) (pp->p_nrm & rmmask) 1740Sstevel@tonic-gate #define PP_ISMOD(pp) PP_GETRM(pp, P_MOD) 1750Sstevel@tonic-gate #define PP_ISREF(pp) PP_GETRM(pp, P_REF) 1760Sstevel@tonic-gate #define PP_ISRO(pp) PP_GETRM(pp, P_RO) 1770Sstevel@tonic-gate 1780Sstevel@tonic-gate #define PP_SETRM(pp, rm) atomic_orb(&(pp->p_nrm), rm) 1790Sstevel@tonic-gate #define PP_SETMOD(pp) PP_SETRM(pp, P_MOD) 1800Sstevel@tonic-gate #define PP_SETREF(pp) PP_SETRM(pp, P_REF) 1810Sstevel@tonic-gate #define PP_SETRO(pp) PP_SETRM(pp, P_RO) 1820Sstevel@tonic-gate 1830Sstevel@tonic-gate #define PP_CLRRM(pp, rm) atomic_andb(&(pp->p_nrm), ~(rm)) 1840Sstevel@tonic-gate #define PP_CLRMOD(pp) PP_CLRRM(pp, P_MOD) 1850Sstevel@tonic-gate #define PP_CLRREF(pp) PP_CLRRM(pp, P_REF) 1860Sstevel@tonic-gate #define PP_CLRRO(pp) PP_CLRRM(pp, P_RO) 1870Sstevel@tonic-gate #define PP_CLRALL(pp) PP_CLRRM(pp, P_MOD | P_REF | P_RO) 1880Sstevel@tonic-gate 1890Sstevel@tonic-gate /* 1900Sstevel@tonic-gate * kmem cache constructor for struct hat 1910Sstevel@tonic-gate */ 1920Sstevel@tonic-gate /*ARGSUSED*/ 1930Sstevel@tonic-gate static int 1940Sstevel@tonic-gate hati_constructor(void *buf, void *handle, int kmflags) 1950Sstevel@tonic-gate { 1960Sstevel@tonic-gate hat_t *hat = buf; 1970Sstevel@tonic-gate 1980Sstevel@tonic-gate mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 1990Sstevel@tonic-gate bzero(hat->hat_pages_mapped, 2000Sstevel@tonic-gate sizeof (pgcnt_t) * (mmu.max_page_level + 1)); 201*4381Sjosephb hat->hat_ism_pgcnt = 0; 2020Sstevel@tonic-gate hat->hat_stats = 0; 2030Sstevel@tonic-gate hat->hat_flags = 0; 2040Sstevel@tonic-gate CPUSET_ZERO(hat->hat_cpus); 2050Sstevel@tonic-gate hat->hat_htable = NULL; 2060Sstevel@tonic-gate hat->hat_ht_hash = NULL; 2070Sstevel@tonic-gate return (0); 2080Sstevel@tonic-gate } 2090Sstevel@tonic-gate 2100Sstevel@tonic-gate /* 2110Sstevel@tonic-gate * Allocate a hat structure for as. We also create the top level 2120Sstevel@tonic-gate * htable and initialize it to contain the kernel hat entries. 2130Sstevel@tonic-gate */ 2140Sstevel@tonic-gate hat_t * 2150Sstevel@tonic-gate hat_alloc(struct as *as) 2160Sstevel@tonic-gate { 2170Sstevel@tonic-gate hat_t *hat; 2180Sstevel@tonic-gate htable_t *ht; /* top level htable */ 2190Sstevel@tonic-gate uint_t use_vlp; 2200Sstevel@tonic-gate 2210Sstevel@tonic-gate /* 2220Sstevel@tonic-gate * Once we start creating user process HATs we can enable 2230Sstevel@tonic-gate * the htable_steal() code. 2240Sstevel@tonic-gate */ 2250Sstevel@tonic-gate if (can_steal_post_boot == 0) 2260Sstevel@tonic-gate can_steal_post_boot = 1; 2270Sstevel@tonic-gate 2280Sstevel@tonic-gate ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 2290Sstevel@tonic-gate hat = kmem_cache_alloc(hat_cache, KM_SLEEP); 2300Sstevel@tonic-gate hat->hat_as = as; 2310Sstevel@tonic-gate mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 2320Sstevel@tonic-gate ASSERT(hat->hat_flags == 0); 2330Sstevel@tonic-gate 2340Sstevel@tonic-gate /* 2350Sstevel@tonic-gate * a 32 bit process uses a VLP style hat when using PAE 2360Sstevel@tonic-gate */ 2370Sstevel@tonic-gate #if defined(__amd64) 2380Sstevel@tonic-gate use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32); 2390Sstevel@tonic-gate #elif defined(__i386) 2400Sstevel@tonic-gate use_vlp = mmu.pae_hat; 2410Sstevel@tonic-gate #endif 2420Sstevel@tonic-gate if (use_vlp) { 2430Sstevel@tonic-gate hat->hat_flags = HAT_VLP; 2440Sstevel@tonic-gate bzero(hat->hat_vlp_ptes, VLP_SIZE); 2450Sstevel@tonic-gate } 2460Sstevel@tonic-gate 2470Sstevel@tonic-gate /* 2480Sstevel@tonic-gate * Allocate the htable hash 2490Sstevel@tonic-gate */ 2500Sstevel@tonic-gate if ((hat->hat_flags & HAT_VLP)) { 2510Sstevel@tonic-gate hat->hat_num_hash = mmu.vlp_hash_cnt; 2520Sstevel@tonic-gate hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP); 2530Sstevel@tonic-gate } else { 2540Sstevel@tonic-gate hat->hat_num_hash = mmu.hash_cnt; 2550Sstevel@tonic-gate hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP); 2560Sstevel@tonic-gate } 2570Sstevel@tonic-gate bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *)); 2580Sstevel@tonic-gate 2590Sstevel@tonic-gate /* 2600Sstevel@tonic-gate * Initialize Kernel HAT entries at the top of the top level page 2610Sstevel@tonic-gate * table for the new hat. 2620Sstevel@tonic-gate * 2630Sstevel@tonic-gate * Note that we don't call htable_release() for the top level, that 2640Sstevel@tonic-gate * happens when the hat is destroyed in hat_free_end() 2650Sstevel@tonic-gate */ 2660Sstevel@tonic-gate hat->hat_htable = NULL; 2670Sstevel@tonic-gate hat->hat_ht_cached = NULL; 2680Sstevel@tonic-gate ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL); 2693446Smrj 2700Sstevel@tonic-gate if (!(hat->hat_flags & HAT_VLP)) 2710Sstevel@tonic-gate x86pte_copy(kas.a_hat->hat_htable, ht, khat_start, 2720Sstevel@tonic-gate khat_entries); 2730Sstevel@tonic-gate #if defined(__i386) 2740Sstevel@tonic-gate else if (khat_entries > 0) 2750Sstevel@tonic-gate bcopy(vlp_page + khat_start, hat->hat_vlp_ptes + khat_start, 2760Sstevel@tonic-gate khat_entries * sizeof (x86pte_t)); 2770Sstevel@tonic-gate #endif 2780Sstevel@tonic-gate hat->hat_htable = ht; 2790Sstevel@tonic-gate 2800Sstevel@tonic-gate #if defined(__i386) 2810Sstevel@tonic-gate /* 2820Sstevel@tonic-gate * PAE32 HAT alignment is less restrictive than the others to keep 2830Sstevel@tonic-gate * the kernel from using too much VA. Because of this we may need 2840Sstevel@tonic-gate * one layer further down when kernelbase isn't 1Gig aligned. 2850Sstevel@tonic-gate * See hat_free_end() for the htable_release() that goes with this 2860Sstevel@tonic-gate * htable_create() 2870Sstevel@tonic-gate */ 2880Sstevel@tonic-gate if (khat_pae32_htable != NULL) { 2890Sstevel@tonic-gate ht = htable_create(hat, kernelbase, 2900Sstevel@tonic-gate khat_pae32_htable->ht_level, NULL); 2910Sstevel@tonic-gate x86pte_copy(khat_pae32_htable, ht, khat_pae32_start, 2920Sstevel@tonic-gate khat_pae32_entries); 2930Sstevel@tonic-gate ht->ht_valid_cnt = khat_pae32_entries; 2940Sstevel@tonic-gate } 2950Sstevel@tonic-gate #endif 2960Sstevel@tonic-gate 2970Sstevel@tonic-gate /* 2981747Sjosephb * Put it at the start of the global list of all hats (used by stealing) 2991747Sjosephb * 3001747Sjosephb * kas.a_hat is not in the list but is instead used to find the 3011747Sjosephb * first and last items in the list. 3021747Sjosephb * 3031747Sjosephb * - kas.a_hat->hat_next points to the start of the user hats. 3041747Sjosephb * The list ends where hat->hat_next == NULL 3051747Sjosephb * 3061747Sjosephb * - kas.a_hat->hat_prev points to the last of the user hats. 3071747Sjosephb * The list begins where hat->hat_prev == NULL 3080Sstevel@tonic-gate */ 3090Sstevel@tonic-gate mutex_enter(&hat_list_lock); 3101747Sjosephb hat->hat_prev = NULL; 3111747Sjosephb hat->hat_next = kas.a_hat->hat_next; 3121747Sjosephb if (hat->hat_next) 3131747Sjosephb hat->hat_next->hat_prev = hat; 3141747Sjosephb else 3151747Sjosephb kas.a_hat->hat_prev = hat; 3160Sstevel@tonic-gate kas.a_hat->hat_next = hat; 3170Sstevel@tonic-gate mutex_exit(&hat_list_lock); 3180Sstevel@tonic-gate 3190Sstevel@tonic-gate return (hat); 3200Sstevel@tonic-gate } 3210Sstevel@tonic-gate 3220Sstevel@tonic-gate /* 3230Sstevel@tonic-gate * process has finished executing but as has not been cleaned up yet. 3240Sstevel@tonic-gate */ 3250Sstevel@tonic-gate /*ARGSUSED*/ 3260Sstevel@tonic-gate void 3270Sstevel@tonic-gate hat_free_start(hat_t *hat) 3280Sstevel@tonic-gate { 3290Sstevel@tonic-gate ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock)); 3301747Sjosephb 3311747Sjosephb /* 3321747Sjosephb * If the hat is currently a stealing victim, wait for the stealing 3331747Sjosephb * to finish. Once we mark it as HAT_FREEING, htable_steal() 3341747Sjosephb * won't look at its pagetables anymore. 3351747Sjosephb */ 3360Sstevel@tonic-gate mutex_enter(&hat_list_lock); 3371747Sjosephb while (hat->hat_flags & HAT_VICTIM) 3381747Sjosephb cv_wait(&hat_list_cv, &hat_list_lock); 3390Sstevel@tonic-gate hat->hat_flags |= HAT_FREEING; 3400Sstevel@tonic-gate mutex_exit(&hat_list_lock); 3410Sstevel@tonic-gate } 3420Sstevel@tonic-gate 3430Sstevel@tonic-gate /* 3440Sstevel@tonic-gate * An address space is being destroyed, so we destroy the associated hat. 3450Sstevel@tonic-gate */ 3460Sstevel@tonic-gate void 3470Sstevel@tonic-gate hat_free_end(hat_t *hat) 3480Sstevel@tonic-gate { 3490Sstevel@tonic-gate int i; 3500Sstevel@tonic-gate kmem_cache_t *cache; 3510Sstevel@tonic-gate 3520Sstevel@tonic-gate #ifdef DEBUG 3530Sstevel@tonic-gate for (i = 0; i <= mmu.max_page_level; i++) 3540Sstevel@tonic-gate ASSERT(hat->hat_pages_mapped[i] == 0); 3550Sstevel@tonic-gate #endif 3560Sstevel@tonic-gate ASSERT(hat->hat_flags & HAT_FREEING); 3570Sstevel@tonic-gate 3580Sstevel@tonic-gate /* 3590Sstevel@tonic-gate * must not be running on the given hat 3600Sstevel@tonic-gate */ 3610Sstevel@tonic-gate ASSERT(CPU->cpu_current_hat != hat); 3620Sstevel@tonic-gate 3630Sstevel@tonic-gate /* 3641747Sjosephb * Remove it from the list of HATs 3650Sstevel@tonic-gate */ 3660Sstevel@tonic-gate mutex_enter(&hat_list_lock); 3671747Sjosephb if (hat->hat_prev) 3681747Sjosephb hat->hat_prev->hat_next = hat->hat_next; 3691747Sjosephb else 3700Sstevel@tonic-gate kas.a_hat->hat_next = hat->hat_next; 3711747Sjosephb if (hat->hat_next) 3721747Sjosephb hat->hat_next->hat_prev = hat->hat_prev; 3731747Sjosephb else 3741747Sjosephb kas.a_hat->hat_prev = hat->hat_prev; 3750Sstevel@tonic-gate mutex_exit(&hat_list_lock); 3761747Sjosephb hat->hat_next = hat->hat_prev = NULL; 3770Sstevel@tonic-gate 3780Sstevel@tonic-gate /* 3790Sstevel@tonic-gate * Make a pass through the htables freeing them all up. 3800Sstevel@tonic-gate */ 3810Sstevel@tonic-gate htable_purge_hat(hat); 3820Sstevel@tonic-gate 3830Sstevel@tonic-gate /* 3840Sstevel@tonic-gate * Decide which kmem cache the hash table came from, then free it. 3850Sstevel@tonic-gate */ 3860Sstevel@tonic-gate if (hat->hat_flags & HAT_VLP) 3870Sstevel@tonic-gate cache = vlp_hash_cache; 3880Sstevel@tonic-gate else 3890Sstevel@tonic-gate cache = hat_hash_cache; 3900Sstevel@tonic-gate kmem_cache_free(cache, hat->hat_ht_hash); 3910Sstevel@tonic-gate hat->hat_ht_hash = NULL; 3920Sstevel@tonic-gate 3930Sstevel@tonic-gate hat->hat_flags = 0; 3940Sstevel@tonic-gate kmem_cache_free(hat_cache, hat); 3950Sstevel@tonic-gate } 3960Sstevel@tonic-gate 3970Sstevel@tonic-gate /* 3980Sstevel@tonic-gate * round kernelbase down to a supported value to use for _userlimit 3990Sstevel@tonic-gate * 4000Sstevel@tonic-gate * userlimit must be aligned down to an entry in the top level htable. 4010Sstevel@tonic-gate * The one exception is for 32 bit HAT's running PAE. 4020Sstevel@tonic-gate */ 4030Sstevel@tonic-gate uintptr_t 4040Sstevel@tonic-gate hat_kernelbase(uintptr_t va) 4050Sstevel@tonic-gate { 4060Sstevel@tonic-gate #if defined(__i386) 4070Sstevel@tonic-gate va &= LEVEL_MASK(1); 4080Sstevel@tonic-gate #endif 4090Sstevel@tonic-gate if (IN_VA_HOLE(va)) 4100Sstevel@tonic-gate panic("_userlimit %p will fall in VA hole\n", (void *)va); 4110Sstevel@tonic-gate return (va); 4120Sstevel@tonic-gate } 4130Sstevel@tonic-gate 4140Sstevel@tonic-gate /* 4150Sstevel@tonic-gate * Initialize hat data structures based on processor MMU information. 4160Sstevel@tonic-gate */ 4170Sstevel@tonic-gate void 4180Sstevel@tonic-gate mmu_init(void) 4190Sstevel@tonic-gate { 4200Sstevel@tonic-gate uint_t max_htables; 4210Sstevel@tonic-gate uint_t pa_bits; 4220Sstevel@tonic-gate uint_t va_bits; 4230Sstevel@tonic-gate int i; 4240Sstevel@tonic-gate 4250Sstevel@tonic-gate /* 4263446Smrj * If CPU enabled the page table global bit, use it for the kernel 4273446Smrj * This is bit 7 in CR4 (PGE - Page Global Enable). 4280Sstevel@tonic-gate */ 4293446Smrj if ((x86_feature & X86_PGE) != 0 && (getcr4() & CR4_PGE) != 0) 4300Sstevel@tonic-gate mmu.pt_global = PT_GLOBAL; 4310Sstevel@tonic-gate 4320Sstevel@tonic-gate /* 4333446Smrj * Detect NX and PAE usage. 4340Sstevel@tonic-gate */ 4353446Smrj mmu.pae_hat = kbm_pae_support; 4363446Smrj if (kbm_nx_support) 4370Sstevel@tonic-gate mmu.pt_nx = PT_NX; 4383446Smrj else 4390Sstevel@tonic-gate mmu.pt_nx = 0; 4400Sstevel@tonic-gate 4410Sstevel@tonic-gate /* 4424169Sjosephb * Intel CPUs allow speculative caching (in TLB-like h/w) of 4434169Sjosephb * entries in upper page tables even though there may not be 4444169Sjosephb * any valid entries in lower tables. This implies we have to 4454169Sjosephb * re-INVLPG at every upper page table entry invalidation. 4464169Sjosephb */ 4474169Sjosephb if (cpuid_getvendor(CPU) == X86_VENDOR_Intel) 4484169Sjosephb mmu.inval_nonleaf = 1; 4494169Sjosephb else 4504169Sjosephb mmu.inval_nonleaf = 0; 4514169Sjosephb /* 4520Sstevel@tonic-gate * Use CPU info to set various MMU parameters 4530Sstevel@tonic-gate */ 4540Sstevel@tonic-gate cpuid_get_addrsize(CPU, &pa_bits, &va_bits); 4550Sstevel@tonic-gate 4560Sstevel@tonic-gate if (va_bits < sizeof (void *) * NBBY) { 4570Sstevel@tonic-gate mmu.hole_start = (1ul << (va_bits - 1)); 4580Sstevel@tonic-gate mmu.hole_end = 0ul - mmu.hole_start - 1; 4590Sstevel@tonic-gate } else { 4600Sstevel@tonic-gate mmu.hole_end = 0; 4610Sstevel@tonic-gate mmu.hole_start = mmu.hole_end - 1; 4620Sstevel@tonic-gate } 4630Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121) 4640Sstevel@tonic-gate /* 4650Sstevel@tonic-gate * If erratum 121 has already been detected at this time, hole_start 4660Sstevel@tonic-gate * contains the value to be subtracted from mmu.hole_start. 4670Sstevel@tonic-gate */ 4680Sstevel@tonic-gate ASSERT(hole_start == 0 || opteron_erratum_121 != 0); 4690Sstevel@tonic-gate hole_start = mmu.hole_start - hole_start; 4700Sstevel@tonic-gate #else 4710Sstevel@tonic-gate hole_start = mmu.hole_start; 4720Sstevel@tonic-gate #endif 4730Sstevel@tonic-gate hole_end = mmu.hole_end; 4740Sstevel@tonic-gate 4750Sstevel@tonic-gate mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1); 4760Sstevel@tonic-gate if (mmu.pae_hat == 0 && pa_bits > 32) 4770Sstevel@tonic-gate mmu.highest_pfn = PFN_4G - 1; 4780Sstevel@tonic-gate 4790Sstevel@tonic-gate if (mmu.pae_hat) { 4800Sstevel@tonic-gate mmu.pte_size = 8; /* 8 byte PTEs */ 4810Sstevel@tonic-gate mmu.pte_size_shift = 3; 4820Sstevel@tonic-gate } else { 4830Sstevel@tonic-gate mmu.pte_size = 4; /* 4 byte PTEs */ 4840Sstevel@tonic-gate mmu.pte_size_shift = 2; 4850Sstevel@tonic-gate } 4860Sstevel@tonic-gate 4870Sstevel@tonic-gate if (mmu.pae_hat && (x86_feature & X86_PAE) == 0) 4880Sstevel@tonic-gate panic("Processor does not support PAE"); 4890Sstevel@tonic-gate 4900Sstevel@tonic-gate if ((x86_feature & X86_CX8) == 0) 4910Sstevel@tonic-gate panic("Processor does not support cmpxchg8b instruction"); 4920Sstevel@tonic-gate 4930Sstevel@tonic-gate /* 4940Sstevel@tonic-gate * Initialize parameters based on the 64 or 32 bit kernels and 4950Sstevel@tonic-gate * for the 32 bit kernel decide if we should use PAE. 4960Sstevel@tonic-gate */ 4973446Smrj if (kbm_largepage_support) 4980Sstevel@tonic-gate mmu.max_page_level = 1; 4990Sstevel@tonic-gate else 5000Sstevel@tonic-gate mmu.max_page_level = 0; 5010Sstevel@tonic-gate mmu_page_sizes = mmu.max_page_level + 1; 5020Sstevel@tonic-gate mmu_exported_page_sizes = mmu_page_sizes; 5030Sstevel@tonic-gate 5040Sstevel@tonic-gate #if defined(__amd64) 5050Sstevel@tonic-gate 5060Sstevel@tonic-gate mmu.num_level = 4; 5070Sstevel@tonic-gate mmu.max_level = 3; 5080Sstevel@tonic-gate mmu.ptes_per_table = 512; 5090Sstevel@tonic-gate mmu.top_level_count = 512; 5100Sstevel@tonic-gate 5110Sstevel@tonic-gate mmu.level_shift[0] = 12; 5120Sstevel@tonic-gate mmu.level_shift[1] = 21; 5130Sstevel@tonic-gate mmu.level_shift[2] = 30; 5140Sstevel@tonic-gate mmu.level_shift[3] = 39; 5150Sstevel@tonic-gate 5160Sstevel@tonic-gate #elif defined(__i386) 5170Sstevel@tonic-gate 5180Sstevel@tonic-gate if (mmu.pae_hat) { 5190Sstevel@tonic-gate mmu.num_level = 3; 5200Sstevel@tonic-gate mmu.max_level = 2; 5210Sstevel@tonic-gate mmu.ptes_per_table = 512; 5220Sstevel@tonic-gate mmu.top_level_count = 4; 5230Sstevel@tonic-gate 5240Sstevel@tonic-gate mmu.level_shift[0] = 12; 5250Sstevel@tonic-gate mmu.level_shift[1] = 21; 5260Sstevel@tonic-gate mmu.level_shift[2] = 30; 5270Sstevel@tonic-gate 5280Sstevel@tonic-gate } else { 5290Sstevel@tonic-gate mmu.num_level = 2; 5300Sstevel@tonic-gate mmu.max_level = 1; 5310Sstevel@tonic-gate mmu.ptes_per_table = 1024; 5320Sstevel@tonic-gate mmu.top_level_count = 1024; 5330Sstevel@tonic-gate 5340Sstevel@tonic-gate mmu.level_shift[0] = 12; 5350Sstevel@tonic-gate mmu.level_shift[1] = 22; 5360Sstevel@tonic-gate } 5370Sstevel@tonic-gate 5380Sstevel@tonic-gate #endif /* __i386 */ 5390Sstevel@tonic-gate 5400Sstevel@tonic-gate for (i = 0; i < mmu.num_level; ++i) { 5410Sstevel@tonic-gate mmu.level_size[i] = 1UL << mmu.level_shift[i]; 5420Sstevel@tonic-gate mmu.level_offset[i] = mmu.level_size[i] - 1; 5430Sstevel@tonic-gate mmu.level_mask[i] = ~mmu.level_offset[i]; 5440Sstevel@tonic-gate } 5450Sstevel@tonic-gate 5463446Smrj for (i = 0; i <= mmu.max_page_level; ++i) { 5473446Smrj mmu.pte_bits[i] = PT_VALID; 5483446Smrj if (i > 0) 5493446Smrj mmu.pte_bits[i] |= PT_PAGESIZE; 5503446Smrj } 5510Sstevel@tonic-gate 5520Sstevel@tonic-gate /* 5530Sstevel@tonic-gate * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level. 5540Sstevel@tonic-gate */ 5550Sstevel@tonic-gate for (i = 1; i < mmu.num_level; ++i) 5560Sstevel@tonic-gate mmu.ptp_bits[i] = PT_PTPBITS; 5573446Smrj 5580Sstevel@tonic-gate #if defined(__i386) 5590Sstevel@tonic-gate mmu.ptp_bits[2] = PT_VALID; 5600Sstevel@tonic-gate #endif 5610Sstevel@tonic-gate 5620Sstevel@tonic-gate /* 5630Sstevel@tonic-gate * Compute how many hash table entries to have per process for htables. 5640Sstevel@tonic-gate * We start with 1 page's worth of entries. 5650Sstevel@tonic-gate * 5660Sstevel@tonic-gate * If physical memory is small, reduce the amount need to cover it. 5670Sstevel@tonic-gate */ 5680Sstevel@tonic-gate max_htables = physmax / mmu.ptes_per_table; 5690Sstevel@tonic-gate mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *); 5700Sstevel@tonic-gate while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables) 5710Sstevel@tonic-gate mmu.hash_cnt >>= 1; 5720Sstevel@tonic-gate mmu.vlp_hash_cnt = mmu.hash_cnt; 5730Sstevel@tonic-gate 5740Sstevel@tonic-gate #if defined(__amd64) 5750Sstevel@tonic-gate /* 5760Sstevel@tonic-gate * If running in 64 bits and physical memory is large, 5770Sstevel@tonic-gate * increase the size of the cache to cover all of memory for 5780Sstevel@tonic-gate * a 64 bit process. 5790Sstevel@tonic-gate */ 5800Sstevel@tonic-gate #define HASH_MAX_LENGTH 4 5810Sstevel@tonic-gate while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables) 5820Sstevel@tonic-gate mmu.hash_cnt <<= 1; 5830Sstevel@tonic-gate #endif 5840Sstevel@tonic-gate } 5850Sstevel@tonic-gate 5860Sstevel@tonic-gate 5870Sstevel@tonic-gate /* 5880Sstevel@tonic-gate * initialize hat data structures 5890Sstevel@tonic-gate */ 5900Sstevel@tonic-gate void 5910Sstevel@tonic-gate hat_init() 5920Sstevel@tonic-gate { 5930Sstevel@tonic-gate #if defined(__i386) 5940Sstevel@tonic-gate /* 5950Sstevel@tonic-gate * _userlimit must be aligned correctly 5960Sstevel@tonic-gate */ 5970Sstevel@tonic-gate if ((_userlimit & LEVEL_MASK(1)) != _userlimit) { 5980Sstevel@tonic-gate prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n", 5990Sstevel@tonic-gate (void *)_userlimit, (void *)LEVEL_SIZE(1)); 6000Sstevel@tonic-gate halt("hat_init(): Unable to continue"); 6010Sstevel@tonic-gate } 6020Sstevel@tonic-gate #endif 6030Sstevel@tonic-gate 6040Sstevel@tonic-gate cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL); 6050Sstevel@tonic-gate 6060Sstevel@tonic-gate /* 6070Sstevel@tonic-gate * initialize kmem caches 6080Sstevel@tonic-gate */ 6090Sstevel@tonic-gate htable_init(); 6100Sstevel@tonic-gate hment_init(); 6110Sstevel@tonic-gate 6120Sstevel@tonic-gate hat_cache = kmem_cache_create("hat_t", 6130Sstevel@tonic-gate sizeof (hat_t), 0, hati_constructor, NULL, NULL, 6140Sstevel@tonic-gate NULL, 0, 0); 6150Sstevel@tonic-gate 6160Sstevel@tonic-gate hat_hash_cache = kmem_cache_create("HatHash", 6170Sstevel@tonic-gate mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL, 6180Sstevel@tonic-gate NULL, 0, 0); 6190Sstevel@tonic-gate 6200Sstevel@tonic-gate /* 6210Sstevel@tonic-gate * VLP hats can use a smaller hash table size on large memroy machines 6220Sstevel@tonic-gate */ 6230Sstevel@tonic-gate if (mmu.hash_cnt == mmu.vlp_hash_cnt) { 6240Sstevel@tonic-gate vlp_hash_cache = hat_hash_cache; 6250Sstevel@tonic-gate } else { 6260Sstevel@tonic-gate vlp_hash_cache = kmem_cache_create("HatVlpHash", 6270Sstevel@tonic-gate mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL, 6280Sstevel@tonic-gate NULL, 0, 0); 6290Sstevel@tonic-gate } 6300Sstevel@tonic-gate 6310Sstevel@tonic-gate /* 6320Sstevel@tonic-gate * Set up the kernel's hat 6330Sstevel@tonic-gate */ 6340Sstevel@tonic-gate AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 6350Sstevel@tonic-gate kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP); 6360Sstevel@tonic-gate mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 6370Sstevel@tonic-gate kas.a_hat->hat_as = &kas; 6380Sstevel@tonic-gate kas.a_hat->hat_flags = 0; 6390Sstevel@tonic-gate AS_LOCK_EXIT(&kas, &kas.a_lock); 6400Sstevel@tonic-gate 6410Sstevel@tonic-gate CPUSET_ZERO(khat_cpuset); 6420Sstevel@tonic-gate CPUSET_ADD(khat_cpuset, CPU->cpu_id); 6430Sstevel@tonic-gate 6440Sstevel@tonic-gate /* 6450Sstevel@tonic-gate * The kernel hat's next pointer serves as the head of the hat list . 6461747Sjosephb * The kernel hat's prev pointer tracks the last hat on the list for 6471747Sjosephb * htable_steal() to use. 6480Sstevel@tonic-gate */ 6490Sstevel@tonic-gate kas.a_hat->hat_next = NULL; 6501747Sjosephb kas.a_hat->hat_prev = NULL; 6510Sstevel@tonic-gate 6520Sstevel@tonic-gate /* 6530Sstevel@tonic-gate * Allocate an htable hash bucket for the kernel 6540Sstevel@tonic-gate * XX64 - tune for 64 bit procs 6550Sstevel@tonic-gate */ 6560Sstevel@tonic-gate kas.a_hat->hat_num_hash = mmu.hash_cnt; 6570Sstevel@tonic-gate kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP); 6580Sstevel@tonic-gate bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *)); 6590Sstevel@tonic-gate 6600Sstevel@tonic-gate /* 6610Sstevel@tonic-gate * zero out the top level and cached htable pointers 6620Sstevel@tonic-gate */ 6630Sstevel@tonic-gate kas.a_hat->hat_ht_cached = NULL; 6640Sstevel@tonic-gate kas.a_hat->hat_htable = NULL; 6653258Strevtom 6663258Strevtom /* 6673258Strevtom * Pre-allocate hrm_hashtab before enabling the collection of 6683258Strevtom * refmod statistics. Allocating on the fly would mean us 6693258Strevtom * running the risk of suffering recursive mutex enters or 6703258Strevtom * deadlocks. 6713258Strevtom */ 6723258Strevtom hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *), 6733258Strevtom KM_SLEEP); 6740Sstevel@tonic-gate } 6750Sstevel@tonic-gate 6760Sstevel@tonic-gate /* 6770Sstevel@tonic-gate * Prepare CPU specific pagetables for VLP processes on 64 bit kernels. 6780Sstevel@tonic-gate * 6790Sstevel@tonic-gate * Each CPU has a set of 2 pagetables that are reused for any 32 bit 6800Sstevel@tonic-gate * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and 6810Sstevel@tonic-gate * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes. 6820Sstevel@tonic-gate */ 6830Sstevel@tonic-gate /*ARGSUSED*/ 6840Sstevel@tonic-gate static void 6850Sstevel@tonic-gate hat_vlp_setup(struct cpu *cpu) 6860Sstevel@tonic-gate { 6870Sstevel@tonic-gate #if defined(__amd64) 6880Sstevel@tonic-gate struct hat_cpu_info *hci = cpu->cpu_hat_info; 6890Sstevel@tonic-gate pfn_t pfn; 6900Sstevel@tonic-gate 6910Sstevel@tonic-gate /* 6920Sstevel@tonic-gate * allocate the level==2 page table for the bottom most 6930Sstevel@tonic-gate * 512Gig of address space (this is where 32 bit apps live) 6940Sstevel@tonic-gate */ 6950Sstevel@tonic-gate ASSERT(hci != NULL); 6960Sstevel@tonic-gate hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP); 6970Sstevel@tonic-gate 6980Sstevel@tonic-gate /* 6990Sstevel@tonic-gate * Allocate a top level pagetable and copy the kernel's 7000Sstevel@tonic-gate * entries into it. Then link in hci_vlp_l2ptes in the 1st entry. 7010Sstevel@tonic-gate */ 7020Sstevel@tonic-gate hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP); 7030Sstevel@tonic-gate hci->hci_vlp_pfn = 7040Sstevel@tonic-gate hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes); 7050Sstevel@tonic-gate ASSERT(hci->hci_vlp_pfn != PFN_INVALID); 7060Sstevel@tonic-gate bcopy(vlp_page + khat_start, hci->hci_vlp_l3ptes + khat_start, 7070Sstevel@tonic-gate khat_entries * sizeof (x86pte_t)); 7080Sstevel@tonic-gate 7090Sstevel@tonic-gate pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes); 7100Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 7110Sstevel@tonic-gate hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2); 7120Sstevel@tonic-gate #endif /* __amd64 */ 7130Sstevel@tonic-gate } 7140Sstevel@tonic-gate 7153446Smrj /*ARGSUSED*/ 7163446Smrj static void 7173446Smrj hat_vlp_teardown(cpu_t *cpu) 7183446Smrj { 7193446Smrj #if defined(__amd64) 7203446Smrj struct hat_cpu_info *hci; 7213446Smrj 7223446Smrj if ((hci = cpu->cpu_hat_info) == NULL) 7233446Smrj return; 7243446Smrj if (hci->hci_vlp_l2ptes) 7253446Smrj kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE); 7263446Smrj if (hci->hci_vlp_l3ptes) 7273446Smrj kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE); 7283446Smrj #endif /* __amd64 */ 7293446Smrj } 7303446Smrj 7310Sstevel@tonic-gate /* 7320Sstevel@tonic-gate * Finish filling in the kernel hat. 7330Sstevel@tonic-gate * Pre fill in all top level kernel page table entries for the kernel's 7340Sstevel@tonic-gate * part of the address range. From this point on we can't use any new 7350Sstevel@tonic-gate * kernel large pages if they need PTE's at max_level 7363446Smrj * 7373446Smrj * create the kmap mappings. 7380Sstevel@tonic-gate */ 7390Sstevel@tonic-gate void 7400Sstevel@tonic-gate hat_init_finish(void) 7410Sstevel@tonic-gate { 7420Sstevel@tonic-gate htable_t *top = kas.a_hat->hat_htable; 7430Sstevel@tonic-gate htable_t *ht; 7440Sstevel@tonic-gate uint_t e; 7450Sstevel@tonic-gate x86pte_t pte; 7460Sstevel@tonic-gate uintptr_t va = kernelbase; 7473446Smrj size_t size; 7480Sstevel@tonic-gate 7490Sstevel@tonic-gate 7500Sstevel@tonic-gate #if defined(__i386) 7510Sstevel@tonic-gate ASSERT((va & LEVEL_MASK(1)) == va); 7520Sstevel@tonic-gate 7530Sstevel@tonic-gate /* 7540Sstevel@tonic-gate * Deal with kernelbase not 1Gig aligned for 32 bit PAE hats. 7550Sstevel@tonic-gate */ 7560Sstevel@tonic-gate if (!mmu.pae_hat || (va & LEVEL_OFFSET(mmu.max_level)) == 0) { 7570Sstevel@tonic-gate khat_pae32_htable = NULL; 7580Sstevel@tonic-gate } else { 7590Sstevel@tonic-gate ASSERT(mmu.max_level == 2); 7600Sstevel@tonic-gate ASSERT((va & LEVEL_OFFSET(mmu.max_level - 1)) == 0); 7610Sstevel@tonic-gate khat_pae32_htable = 7620Sstevel@tonic-gate htable_create(kas.a_hat, va, mmu.max_level - 1, NULL); 7630Sstevel@tonic-gate khat_pae32_start = htable_va2entry(va, khat_pae32_htable); 7640Sstevel@tonic-gate khat_pae32_entries = mmu.ptes_per_table - khat_pae32_start; 7650Sstevel@tonic-gate for (e = khat_pae32_start; e < mmu.ptes_per_table; 7660Sstevel@tonic-gate ++e, va += LEVEL_SIZE(mmu.max_level - 1)) { 7670Sstevel@tonic-gate pte = x86pte_get(khat_pae32_htable, e); 7680Sstevel@tonic-gate if (PTE_ISVALID(pte)) 7690Sstevel@tonic-gate continue; 7700Sstevel@tonic-gate ht = htable_create(kas.a_hat, va, mmu.max_level - 2, 7710Sstevel@tonic-gate NULL); 7720Sstevel@tonic-gate ASSERT(ht != NULL); 7730Sstevel@tonic-gate } 7740Sstevel@tonic-gate } 7750Sstevel@tonic-gate #endif 7760Sstevel@tonic-gate 7770Sstevel@tonic-gate /* 7780Sstevel@tonic-gate * The kernel hat will need fixed values in the highest level 7790Sstevel@tonic-gate * ptable for copying to all other hat's. This implies 7800Sstevel@tonic-gate * alignment restrictions on _userlimit. 7810Sstevel@tonic-gate * 7820Sstevel@tonic-gate * Note we don't htable_release() these htables. This keeps them 7830Sstevel@tonic-gate * from ever being stolen or free'd. 7840Sstevel@tonic-gate * 7850Sstevel@tonic-gate * top_level_count is used instead of ptes_per_table, since 7860Sstevel@tonic-gate * on 32-bit PAE we only have 4 usable entries at the top level ptable. 7870Sstevel@tonic-gate */ 7880Sstevel@tonic-gate if (va == 0) 7890Sstevel@tonic-gate khat_start = mmu.top_level_count; 7900Sstevel@tonic-gate else 7910Sstevel@tonic-gate khat_start = htable_va2entry(va, kas.a_hat->hat_htable); 7920Sstevel@tonic-gate khat_entries = mmu.top_level_count - khat_start; 7930Sstevel@tonic-gate for (e = khat_start; e < mmu.top_level_count; 7940Sstevel@tonic-gate ++e, va += LEVEL_SIZE(mmu.max_level)) { 7953446Smrj if (IN_HYPERVISOR_VA(va)) 7963446Smrj continue; 7970Sstevel@tonic-gate pte = x86pte_get(top, e); 7980Sstevel@tonic-gate if (PTE_ISVALID(pte)) 7990Sstevel@tonic-gate continue; 8000Sstevel@tonic-gate ht = htable_create(kas.a_hat, va, mmu.max_level - 1, NULL); 8010Sstevel@tonic-gate ASSERT(ht != NULL); 8020Sstevel@tonic-gate } 8030Sstevel@tonic-gate 8040Sstevel@tonic-gate /* 8050Sstevel@tonic-gate * We are now effectively running on the kernel hat. 8060Sstevel@tonic-gate * Clearing use_boot_reserve shuts off using the pre-allocated boot 8070Sstevel@tonic-gate * reserve for all HAT allocations. From here on, the reserves are 8080Sstevel@tonic-gate * only used when mapping in memory for the hat's own allocations. 8090Sstevel@tonic-gate */ 8100Sstevel@tonic-gate use_boot_reserve = 0; 8110Sstevel@tonic-gate htable_adjust_reserve(); 8120Sstevel@tonic-gate 8130Sstevel@tonic-gate /* 8140Sstevel@tonic-gate * 32 bit kernels use only 4 of the 512 entries in its top level 8150Sstevel@tonic-gate * pagetable. We'll use the remainder for the "per CPU" page tables 8160Sstevel@tonic-gate * for VLP processes. 8170Sstevel@tonic-gate * 8183446Smrj * We also map the top level kernel pagetable into the kernel to make 8193446Smrj * it easy to use bcopy to initialize new address spaces. 8200Sstevel@tonic-gate */ 8210Sstevel@tonic-gate if (mmu.pae_hat) { 8220Sstevel@tonic-gate vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP); 8230Sstevel@tonic-gate hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE, 8240Sstevel@tonic-gate kas.a_hat->hat_htable->ht_pfn, 8253446Smrj PROT_WRITE | 8263446Smrj PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK, 8270Sstevel@tonic-gate HAT_LOAD | HAT_LOAD_NOCONSIST); 8280Sstevel@tonic-gate } 8290Sstevel@tonic-gate hat_vlp_setup(CPU); 8303446Smrj 8313446Smrj /* 8323446Smrj * Create kmap (cached mappings of kernel PTEs) 8333446Smrj * for 32 bit we map from segmap_start .. ekernelheap 8343446Smrj * for 64 bit we map from segmap_start .. segmap_start + segmapsize; 8353446Smrj */ 8363446Smrj #if defined(__i386) 8373446Smrj size = (uintptr_t)ekernelheap - segmap_start; 8383446Smrj #elif defined(__amd64) 8393446Smrj size = segmapsize; 8403446Smrj #endif 8413446Smrj hat_kmap_init((uintptr_t)segmap_start, size); 8420Sstevel@tonic-gate } 8430Sstevel@tonic-gate 8440Sstevel@tonic-gate /* 8450Sstevel@tonic-gate * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references 8460Sstevel@tonic-gate * are 32 bit, so for safety we must use cas64() to install these. 8470Sstevel@tonic-gate */ 8480Sstevel@tonic-gate #ifdef __i386 8490Sstevel@tonic-gate static void 8500Sstevel@tonic-gate reload_pae32(hat_t *hat, cpu_t *cpu) 8510Sstevel@tonic-gate { 8520Sstevel@tonic-gate x86pte_t *src; 8530Sstevel@tonic-gate x86pte_t *dest; 8540Sstevel@tonic-gate x86pte_t pte; 8550Sstevel@tonic-gate int i; 8560Sstevel@tonic-gate 8570Sstevel@tonic-gate /* 8580Sstevel@tonic-gate * Load the 4 entries of the level 2 page table into this 8590Sstevel@tonic-gate * cpu's range of the vlp_page and point cr3 at them. 8600Sstevel@tonic-gate */ 8610Sstevel@tonic-gate ASSERT(mmu.pae_hat); 8620Sstevel@tonic-gate src = hat->hat_vlp_ptes; 8630Sstevel@tonic-gate dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES; 8640Sstevel@tonic-gate for (i = 0; i < VLP_NUM_PTES; ++i) { 8650Sstevel@tonic-gate for (;;) { 8660Sstevel@tonic-gate pte = dest[i]; 8670Sstevel@tonic-gate if (pte == src[i]) 8680Sstevel@tonic-gate break; 8690Sstevel@tonic-gate if (cas64(dest + i, pte, src[i]) != src[i]) 8700Sstevel@tonic-gate break; 8710Sstevel@tonic-gate } 8720Sstevel@tonic-gate } 8730Sstevel@tonic-gate } 8740Sstevel@tonic-gate #endif 8750Sstevel@tonic-gate 8760Sstevel@tonic-gate /* 8770Sstevel@tonic-gate * Switch to a new active hat, maintaining bit masks to track active CPUs. 8780Sstevel@tonic-gate */ 8790Sstevel@tonic-gate void 8800Sstevel@tonic-gate hat_switch(hat_t *hat) 8810Sstevel@tonic-gate { 8820Sstevel@tonic-gate uintptr_t newcr3; 8830Sstevel@tonic-gate cpu_t *cpu = CPU; 8840Sstevel@tonic-gate hat_t *old = cpu->cpu_current_hat; 8850Sstevel@tonic-gate 8860Sstevel@tonic-gate /* 8870Sstevel@tonic-gate * set up this information first, so we don't miss any cross calls 8880Sstevel@tonic-gate */ 8890Sstevel@tonic-gate if (old != NULL) { 8900Sstevel@tonic-gate if (old == hat) 8910Sstevel@tonic-gate return; 8920Sstevel@tonic-gate if (old != kas.a_hat) 8930Sstevel@tonic-gate CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id); 8940Sstevel@tonic-gate } 8950Sstevel@tonic-gate 8960Sstevel@tonic-gate /* 8974191Sjosephb * Add this CPU to the active set for this HAT. 8980Sstevel@tonic-gate */ 8990Sstevel@tonic-gate if (hat != kas.a_hat) { 9000Sstevel@tonic-gate CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id); 9010Sstevel@tonic-gate } 9020Sstevel@tonic-gate cpu->cpu_current_hat = hat; 9030Sstevel@tonic-gate 9040Sstevel@tonic-gate /* 9050Sstevel@tonic-gate * now go ahead and load cr3 9060Sstevel@tonic-gate */ 9070Sstevel@tonic-gate if (hat->hat_flags & HAT_VLP) { 9080Sstevel@tonic-gate #if defined(__amd64) 9090Sstevel@tonic-gate x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes; 9100Sstevel@tonic-gate 9110Sstevel@tonic-gate VLP_COPY(hat->hat_vlp_ptes, vlpptep); 9120Sstevel@tonic-gate newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn); 9130Sstevel@tonic-gate #elif defined(__i386) 9140Sstevel@tonic-gate reload_pae32(hat, cpu); 9150Sstevel@tonic-gate newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) + 9160Sstevel@tonic-gate (cpu->cpu_id + 1) * VLP_SIZE; 9170Sstevel@tonic-gate #endif 9180Sstevel@tonic-gate } else { 9190Sstevel@tonic-gate newcr3 = MAKECR3(hat->hat_htable->ht_pfn); 9200Sstevel@tonic-gate } 9210Sstevel@tonic-gate setcr3(newcr3); 9220Sstevel@tonic-gate ASSERT(cpu == CPU); 9230Sstevel@tonic-gate } 9240Sstevel@tonic-gate 9250Sstevel@tonic-gate /* 9260Sstevel@tonic-gate * Utility to return a valid x86pte_t from protections, pfn, and level number 9270Sstevel@tonic-gate */ 9280Sstevel@tonic-gate static x86pte_t 9290Sstevel@tonic-gate hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags) 9300Sstevel@tonic-gate { 9310Sstevel@tonic-gate x86pte_t pte; 9320Sstevel@tonic-gate uint_t cache_attr = attr & HAT_ORDER_MASK; 9330Sstevel@tonic-gate 9340Sstevel@tonic-gate pte = MAKEPTE(pfn, level); 9350Sstevel@tonic-gate 9360Sstevel@tonic-gate if (attr & PROT_WRITE) 9370Sstevel@tonic-gate PTE_SET(pte, PT_WRITABLE); 9380Sstevel@tonic-gate 9390Sstevel@tonic-gate if (attr & PROT_USER) 9400Sstevel@tonic-gate PTE_SET(pte, PT_USER); 9410Sstevel@tonic-gate 9420Sstevel@tonic-gate if (!(attr & PROT_EXEC)) 9430Sstevel@tonic-gate PTE_SET(pte, mmu.pt_nx); 9440Sstevel@tonic-gate 9450Sstevel@tonic-gate /* 9463446Smrj * Set the software bits used track ref/mod sync's and hments. 9473446Smrj * If not using REF/MOD, set them to avoid h/w rewriting PTEs. 9480Sstevel@tonic-gate */ 9490Sstevel@tonic-gate if (flags & HAT_LOAD_NOCONSIST) 9503446Smrj PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD); 9513446Smrj else if (attr & HAT_NOSYNC) 9523446Smrj PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD); 9530Sstevel@tonic-gate 9540Sstevel@tonic-gate /* 9550Sstevel@tonic-gate * Set the caching attributes in the PTE. The combination 9560Sstevel@tonic-gate * of attributes are poorly defined, so we pay attention 9570Sstevel@tonic-gate * to them in the given order. 9580Sstevel@tonic-gate * 9590Sstevel@tonic-gate * The test for HAT_STRICTORDER is different because it's defined 9600Sstevel@tonic-gate * as "0" - which was a stupid thing to do, but is too late to change! 9610Sstevel@tonic-gate */ 9620Sstevel@tonic-gate if (cache_attr == HAT_STRICTORDER) { 9630Sstevel@tonic-gate PTE_SET(pte, PT_NOCACHE); 9640Sstevel@tonic-gate /*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */ 9650Sstevel@tonic-gate } else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) { 9660Sstevel@tonic-gate /* nothing to set */; 9670Sstevel@tonic-gate } else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) { 9680Sstevel@tonic-gate PTE_SET(pte, PT_NOCACHE); 9690Sstevel@tonic-gate if (x86_feature & X86_PAT) 9700Sstevel@tonic-gate PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE); 9710Sstevel@tonic-gate else 9720Sstevel@tonic-gate PTE_SET(pte, PT_WRITETHRU); 9730Sstevel@tonic-gate } else { 9740Sstevel@tonic-gate panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr); 9750Sstevel@tonic-gate } 9760Sstevel@tonic-gate 9770Sstevel@tonic-gate return (pte); 9780Sstevel@tonic-gate } 9790Sstevel@tonic-gate 9800Sstevel@tonic-gate /* 9810Sstevel@tonic-gate * Duplicate address translations of the parent to the child. 9820Sstevel@tonic-gate * This function really isn't used anymore. 9830Sstevel@tonic-gate */ 9840Sstevel@tonic-gate /*ARGSUSED*/ 9850Sstevel@tonic-gate int 9860Sstevel@tonic-gate hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag) 9870Sstevel@tonic-gate { 9880Sstevel@tonic-gate ASSERT((uintptr_t)addr < kernelbase); 9890Sstevel@tonic-gate ASSERT(new != kas.a_hat); 9900Sstevel@tonic-gate ASSERT(old != kas.a_hat); 9910Sstevel@tonic-gate return (0); 9920Sstevel@tonic-gate } 9930Sstevel@tonic-gate 9940Sstevel@tonic-gate /* 9950Sstevel@tonic-gate * Allocate any hat resources required for a process being swapped in. 9960Sstevel@tonic-gate */ 9970Sstevel@tonic-gate /*ARGSUSED*/ 9980Sstevel@tonic-gate void 9990Sstevel@tonic-gate hat_swapin(hat_t *hat) 10000Sstevel@tonic-gate { 10010Sstevel@tonic-gate /* do nothing - we let everything fault back in */ 10020Sstevel@tonic-gate } 10030Sstevel@tonic-gate 10040Sstevel@tonic-gate /* 10050Sstevel@tonic-gate * Unload all translations associated with an address space of a process 10060Sstevel@tonic-gate * that is being swapped out. 10070Sstevel@tonic-gate */ 10080Sstevel@tonic-gate void 10090Sstevel@tonic-gate hat_swapout(hat_t *hat) 10100Sstevel@tonic-gate { 10110Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)0; 10120Sstevel@tonic-gate uintptr_t eaddr = _userlimit; 10130Sstevel@tonic-gate htable_t *ht = NULL; 10140Sstevel@tonic-gate level_t l; 10150Sstevel@tonic-gate 10160Sstevel@tonic-gate /* 10170Sstevel@tonic-gate * We can't just call hat_unload(hat, 0, _userlimit...) here, because 10180Sstevel@tonic-gate * seg_spt and shared pagetables can't be swapped out. 10190Sstevel@tonic-gate * Take a look at segspt_shmswapout() - it's a big no-op. 10200Sstevel@tonic-gate * 10210Sstevel@tonic-gate * Instead we'll walk through all the address space and unload 10220Sstevel@tonic-gate * any mappings which we are sure are not shared, not locked. 10230Sstevel@tonic-gate */ 10240Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 10250Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 10260Sstevel@tonic-gate ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 10270Sstevel@tonic-gate if ((uintptr_t)hat->hat_as->a_userlimit < eaddr) 10280Sstevel@tonic-gate eaddr = (uintptr_t)hat->hat_as->a_userlimit; 10290Sstevel@tonic-gate 10300Sstevel@tonic-gate while (vaddr < eaddr) { 10310Sstevel@tonic-gate (void) htable_walk(hat, &ht, &vaddr, eaddr); 10320Sstevel@tonic-gate if (ht == NULL) 10330Sstevel@tonic-gate break; 10340Sstevel@tonic-gate 10350Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr)); 10360Sstevel@tonic-gate 10370Sstevel@tonic-gate /* 10380Sstevel@tonic-gate * If the page table is shared skip its entire range. 10390Sstevel@tonic-gate * This code knows that only level 0 page tables are shared 10400Sstevel@tonic-gate */ 10410Sstevel@tonic-gate l = ht->ht_level; 10420Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 10430Sstevel@tonic-gate ASSERT(l == 0); 10440Sstevel@tonic-gate vaddr = ht->ht_vaddr + LEVEL_SIZE(1); 10450Sstevel@tonic-gate htable_release(ht); 10460Sstevel@tonic-gate ht = NULL; 10470Sstevel@tonic-gate continue; 10480Sstevel@tonic-gate } 10490Sstevel@tonic-gate 10500Sstevel@tonic-gate /* 10510Sstevel@tonic-gate * If the page table has no locked entries, unload this one. 10520Sstevel@tonic-gate */ 10530Sstevel@tonic-gate if (ht->ht_lock_cnt == 0) 10540Sstevel@tonic-gate hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l), 10550Sstevel@tonic-gate HAT_UNLOAD_UNMAP); 10560Sstevel@tonic-gate 10570Sstevel@tonic-gate /* 10580Sstevel@tonic-gate * If we have a level 0 page table with locked entries, 10590Sstevel@tonic-gate * skip the entire page table, otherwise skip just one entry. 10600Sstevel@tonic-gate */ 10610Sstevel@tonic-gate if (ht->ht_lock_cnt > 0 && l == 0) 10620Sstevel@tonic-gate vaddr = ht->ht_vaddr + LEVEL_SIZE(1); 10630Sstevel@tonic-gate else 10640Sstevel@tonic-gate vaddr += LEVEL_SIZE(l); 10650Sstevel@tonic-gate } 10660Sstevel@tonic-gate if (ht) 10670Sstevel@tonic-gate htable_release(ht); 10680Sstevel@tonic-gate 10690Sstevel@tonic-gate /* 10700Sstevel@tonic-gate * We're in swapout because the system is low on memory, so 10710Sstevel@tonic-gate * go back and flush all the htables off the cached list. 10720Sstevel@tonic-gate */ 10730Sstevel@tonic-gate htable_purge_hat(hat); 10740Sstevel@tonic-gate } 10750Sstevel@tonic-gate 10760Sstevel@tonic-gate /* 10770Sstevel@tonic-gate * returns number of bytes that have valid mappings in hat. 10780Sstevel@tonic-gate */ 10790Sstevel@tonic-gate size_t 10800Sstevel@tonic-gate hat_get_mapped_size(hat_t *hat) 10810Sstevel@tonic-gate { 10820Sstevel@tonic-gate size_t total = 0; 10830Sstevel@tonic-gate int l; 10840Sstevel@tonic-gate 10850Sstevel@tonic-gate for (l = 0; l <= mmu.max_page_level; l++) 10860Sstevel@tonic-gate total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l)); 1087*4381Sjosephb total += hat->hat_ism_pgcnt; 10880Sstevel@tonic-gate 10890Sstevel@tonic-gate return (total); 10900Sstevel@tonic-gate } 10910Sstevel@tonic-gate 10920Sstevel@tonic-gate /* 10930Sstevel@tonic-gate * enable/disable collection of stats for hat. 10940Sstevel@tonic-gate */ 10950Sstevel@tonic-gate int 10960Sstevel@tonic-gate hat_stats_enable(hat_t *hat) 10970Sstevel@tonic-gate { 10980Sstevel@tonic-gate atomic_add_32(&hat->hat_stats, 1); 10990Sstevel@tonic-gate return (1); 11000Sstevel@tonic-gate } 11010Sstevel@tonic-gate 11020Sstevel@tonic-gate void 11030Sstevel@tonic-gate hat_stats_disable(hat_t *hat) 11040Sstevel@tonic-gate { 11050Sstevel@tonic-gate atomic_add_32(&hat->hat_stats, -1); 11060Sstevel@tonic-gate } 11070Sstevel@tonic-gate 11080Sstevel@tonic-gate /* 11090Sstevel@tonic-gate * Utility to sync the ref/mod bits from a page table entry to the page_t 11100Sstevel@tonic-gate * We must be holding the mapping list lock when this is called. 11110Sstevel@tonic-gate */ 11120Sstevel@tonic-gate static void 11130Sstevel@tonic-gate hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level) 11140Sstevel@tonic-gate { 11150Sstevel@tonic-gate uint_t rm = 0; 11160Sstevel@tonic-gate pgcnt_t pgcnt; 11170Sstevel@tonic-gate 11183446Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC) 11190Sstevel@tonic-gate return; 11200Sstevel@tonic-gate 11210Sstevel@tonic-gate if (PTE_GET(pte, PT_REF)) 11220Sstevel@tonic-gate rm |= P_REF; 11230Sstevel@tonic-gate 11240Sstevel@tonic-gate if (PTE_GET(pte, PT_MOD)) 11250Sstevel@tonic-gate rm |= P_MOD; 11260Sstevel@tonic-gate 11270Sstevel@tonic-gate if (rm == 0) 11280Sstevel@tonic-gate return; 11290Sstevel@tonic-gate 11300Sstevel@tonic-gate /* 11310Sstevel@tonic-gate * sync to all constituent pages of a large page 11320Sstevel@tonic-gate */ 11330Sstevel@tonic-gate ASSERT(x86_hm_held(pp)); 11340Sstevel@tonic-gate pgcnt = page_get_pagecnt(level); 11350Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt)); 11360Sstevel@tonic-gate for (; pgcnt > 0; --pgcnt) { 11370Sstevel@tonic-gate /* 11380Sstevel@tonic-gate * hat_page_demote() can't decrease 11390Sstevel@tonic-gate * pszc below this mapping size 11400Sstevel@tonic-gate * since this large mapping existed after we 11410Sstevel@tonic-gate * took mlist lock. 11420Sstevel@tonic-gate */ 11430Sstevel@tonic-gate ASSERT(pp->p_szc >= level); 11440Sstevel@tonic-gate hat_page_setattr(pp, rm); 11450Sstevel@tonic-gate ++pp; 11460Sstevel@tonic-gate } 11470Sstevel@tonic-gate } 11480Sstevel@tonic-gate 11490Sstevel@tonic-gate /* 11500Sstevel@tonic-gate * This the set of PTE bits for PFN, permissions and caching 11513446Smrj * that require a TLB flush (hat_tlb_inval) if changed on a HAT_LOAD_REMAP 11520Sstevel@tonic-gate */ 11530Sstevel@tonic-gate #define PT_REMAP_BITS \ 11540Sstevel@tonic-gate (PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU | \ 11550Sstevel@tonic-gate PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE) 11560Sstevel@tonic-gate 1157510Skchow #define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX) 11580Sstevel@tonic-gate /* 11590Sstevel@tonic-gate * Do the low-level work to get a mapping entered into a HAT's pagetables 11600Sstevel@tonic-gate * and in the mapping list of the associated page_t. 11610Sstevel@tonic-gate */ 11623446Smrj static int 11630Sstevel@tonic-gate hati_pte_map( 11640Sstevel@tonic-gate htable_t *ht, 11650Sstevel@tonic-gate uint_t entry, 11660Sstevel@tonic-gate page_t *pp, 11670Sstevel@tonic-gate x86pte_t pte, 11680Sstevel@tonic-gate int flags, 11690Sstevel@tonic-gate void *pte_ptr) 11700Sstevel@tonic-gate { 11710Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 11720Sstevel@tonic-gate x86pte_t old_pte; 11730Sstevel@tonic-gate level_t l = ht->ht_level; 11740Sstevel@tonic-gate hment_t *hm; 11750Sstevel@tonic-gate uint_t is_consist; 11763446Smrj int rv = 0; 11770Sstevel@tonic-gate 11780Sstevel@tonic-gate /* 11790Sstevel@tonic-gate * Is this a consistant (ie. need mapping list lock) mapping? 11800Sstevel@tonic-gate */ 11810Sstevel@tonic-gate is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0); 11820Sstevel@tonic-gate 11830Sstevel@tonic-gate /* 11840Sstevel@tonic-gate * Track locked mapping count in the htable. Do this first, 11850Sstevel@tonic-gate * as we track locking even if there already is a mapping present. 11860Sstevel@tonic-gate */ 11870Sstevel@tonic-gate if ((flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat) 11880Sstevel@tonic-gate HTABLE_LOCK_INC(ht); 11890Sstevel@tonic-gate 11900Sstevel@tonic-gate /* 11910Sstevel@tonic-gate * Acquire the page's mapping list lock and get an hment to use. 11920Sstevel@tonic-gate * Note that hment_prepare() might return NULL. 11930Sstevel@tonic-gate */ 11940Sstevel@tonic-gate if (is_consist) { 11950Sstevel@tonic-gate x86_hm_enter(pp); 11960Sstevel@tonic-gate hm = hment_prepare(ht, entry, pp); 11970Sstevel@tonic-gate } 11980Sstevel@tonic-gate 11990Sstevel@tonic-gate /* 12000Sstevel@tonic-gate * Set the new pte, retrieving the old one at the same time. 12010Sstevel@tonic-gate */ 12020Sstevel@tonic-gate old_pte = x86pte_set(ht, entry, pte, pte_ptr); 12030Sstevel@tonic-gate 12040Sstevel@tonic-gate /* 12053446Smrj * did we get a large page / page table collision? 12063446Smrj */ 12073446Smrj if (old_pte == LPAGE_ERROR) { 12083446Smrj rv = -1; 12093446Smrj goto done; 12103446Smrj } 12113446Smrj 12123446Smrj /* 12130Sstevel@tonic-gate * If the mapping didn't change there is nothing more to do. 12140Sstevel@tonic-gate */ 12153446Smrj if (PTE_EQUIV(pte, old_pte)) 12163446Smrj goto done; 12170Sstevel@tonic-gate 12180Sstevel@tonic-gate /* 12190Sstevel@tonic-gate * Install a new mapping in the page's mapping list 12200Sstevel@tonic-gate */ 12210Sstevel@tonic-gate if (!PTE_ISVALID(old_pte)) { 12220Sstevel@tonic-gate if (is_consist) { 12230Sstevel@tonic-gate hment_assign(ht, entry, pp, hm); 12240Sstevel@tonic-gate x86_hm_exit(pp); 12250Sstevel@tonic-gate } else { 12260Sstevel@tonic-gate ASSERT(flags & HAT_LOAD_NOCONSIST); 12270Sstevel@tonic-gate } 12280Sstevel@tonic-gate HTABLE_INC(ht->ht_valid_cnt); 12290Sstevel@tonic-gate PGCNT_INC(hat, l); 12303446Smrj return (rv); 12310Sstevel@tonic-gate } 12320Sstevel@tonic-gate 12330Sstevel@tonic-gate /* 12340Sstevel@tonic-gate * Remap's are more complicated: 12350Sstevel@tonic-gate * - HAT_LOAD_REMAP must be specified if changing the pfn. 12360Sstevel@tonic-gate * We also require that NOCONSIST be specified. 12370Sstevel@tonic-gate * - Otherwise only permission or caching bits may change. 12380Sstevel@tonic-gate */ 12390Sstevel@tonic-gate if (!PTE_ISPAGE(old_pte, l)) 12400Sstevel@tonic-gate panic("non-null/page mapping pte=" FMT_PTE, old_pte); 12410Sstevel@tonic-gate 12420Sstevel@tonic-gate if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) { 1243510Skchow REMAPASSERT(flags & HAT_LOAD_REMAP); 1244510Skchow REMAPASSERT(flags & HAT_LOAD_NOCONSIST); 12453446Smrj REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST); 1246510Skchow REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) == 12470Sstevel@tonic-gate pf_is_memory(PTE2PFN(pte, l))); 1248510Skchow REMAPASSERT(!is_consist); 12490Sstevel@tonic-gate } 12500Sstevel@tonic-gate 12510Sstevel@tonic-gate /* 12520Sstevel@tonic-gate * We only let remaps change the bits for PFNs, permissions 12530Sstevel@tonic-gate * or caching type. 12540Sstevel@tonic-gate */ 12550Sstevel@tonic-gate ASSERT(PTE_GET(old_pte, ~(PT_REMAP_BITS | PT_REF | PT_MOD)) == 12560Sstevel@tonic-gate PTE_GET(pte, ~PT_REMAP_BITS)); 12570Sstevel@tonic-gate 12580Sstevel@tonic-gate /* 12590Sstevel@tonic-gate * We don't create any mapping list entries on a remap, so release 12600Sstevel@tonic-gate * any allocated hment after we drop the mapping list lock. 12610Sstevel@tonic-gate */ 12623446Smrj done: 12630Sstevel@tonic-gate if (is_consist) { 12640Sstevel@tonic-gate x86_hm_exit(pp); 12650Sstevel@tonic-gate if (hm != NULL) 12660Sstevel@tonic-gate hment_free(hm); 12670Sstevel@tonic-gate } 12683446Smrj return (rv); 12690Sstevel@tonic-gate } 12700Sstevel@tonic-gate 12710Sstevel@tonic-gate /* 12723446Smrj * Internal routine to load a single page table entry. This only fails if 12733446Smrj * we attempt to overwrite a page table link with a large page. 12740Sstevel@tonic-gate */ 12753446Smrj static int 12760Sstevel@tonic-gate hati_load_common( 12770Sstevel@tonic-gate hat_t *hat, 12780Sstevel@tonic-gate uintptr_t va, 12790Sstevel@tonic-gate page_t *pp, 12800Sstevel@tonic-gate uint_t attr, 12810Sstevel@tonic-gate uint_t flags, 12820Sstevel@tonic-gate level_t level, 12830Sstevel@tonic-gate pfn_t pfn) 12840Sstevel@tonic-gate { 12850Sstevel@tonic-gate htable_t *ht; 12860Sstevel@tonic-gate uint_t entry; 12870Sstevel@tonic-gate x86pte_t pte; 12883446Smrj int rv = 0; 12890Sstevel@tonic-gate 12904004Sjosephb /* 12914004Sjosephb * The number 16 is arbitrary and here to catch a recursion problem 12924004Sjosephb * early before we blow out the kernel stack. 12934004Sjosephb */ 12944004Sjosephb ++curthread->t_hatdepth; 12954004Sjosephb ASSERT(curthread->t_hatdepth < 16); 12964004Sjosephb 12970Sstevel@tonic-gate ASSERT(hat == kas.a_hat || 12980Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 12990Sstevel@tonic-gate 13000Sstevel@tonic-gate if (flags & HAT_LOAD_SHARE) 13010Sstevel@tonic-gate hat->hat_flags |= HAT_SHARED; 13020Sstevel@tonic-gate 13030Sstevel@tonic-gate /* 13040Sstevel@tonic-gate * Find the page table that maps this page if it already exists. 13050Sstevel@tonic-gate */ 13060Sstevel@tonic-gate ht = htable_lookup(hat, va, level); 13070Sstevel@tonic-gate 13080Sstevel@tonic-gate /* 13094004Sjosephb * We must have HAT_LOAD_NOCONSIST if page_t is NULL. 13100Sstevel@tonic-gate */ 13114004Sjosephb if (pp == NULL) 13120Sstevel@tonic-gate flags |= HAT_LOAD_NOCONSIST; 13130Sstevel@tonic-gate 13140Sstevel@tonic-gate if (ht == NULL) { 13150Sstevel@tonic-gate ht = htable_create(hat, va, level, NULL); 13160Sstevel@tonic-gate ASSERT(ht != NULL); 13170Sstevel@tonic-gate } 13180Sstevel@tonic-gate entry = htable_va2entry(va, ht); 13190Sstevel@tonic-gate 13200Sstevel@tonic-gate /* 13210Sstevel@tonic-gate * a bunch of paranoid error checking 13220Sstevel@tonic-gate */ 13230Sstevel@tonic-gate ASSERT(ht->ht_busy > 0); 13240Sstevel@tonic-gate if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht)) 13250Sstevel@tonic-gate panic("hati_load_common: bad htable %p, va %p", ht, (void *)va); 13260Sstevel@tonic-gate ASSERT(ht->ht_level == level); 13270Sstevel@tonic-gate 13280Sstevel@tonic-gate /* 13290Sstevel@tonic-gate * construct the new PTE 13300Sstevel@tonic-gate */ 13310Sstevel@tonic-gate if (hat == kas.a_hat) 13320Sstevel@tonic-gate attr &= ~PROT_USER; 13330Sstevel@tonic-gate pte = hati_mkpte(pfn, attr, level, flags); 13340Sstevel@tonic-gate if (hat == kas.a_hat && va >= kernelbase) 13350Sstevel@tonic-gate PTE_SET(pte, mmu.pt_global); 13360Sstevel@tonic-gate 13370Sstevel@tonic-gate /* 13380Sstevel@tonic-gate * establish the mapping 13390Sstevel@tonic-gate */ 13403446Smrj rv = hati_pte_map(ht, entry, pp, pte, flags, NULL); 13410Sstevel@tonic-gate 13420Sstevel@tonic-gate /* 13430Sstevel@tonic-gate * release the htable and any reserves 13440Sstevel@tonic-gate */ 13450Sstevel@tonic-gate htable_release(ht); 13464004Sjosephb --curthread->t_hatdepth; 13473446Smrj return (rv); 13480Sstevel@tonic-gate } 13490Sstevel@tonic-gate 13500Sstevel@tonic-gate /* 13510Sstevel@tonic-gate * special case of hat_memload to deal with some kernel addrs for performance 13520Sstevel@tonic-gate */ 13530Sstevel@tonic-gate static void 13540Sstevel@tonic-gate hat_kmap_load( 13550Sstevel@tonic-gate caddr_t addr, 13560Sstevel@tonic-gate page_t *pp, 13570Sstevel@tonic-gate uint_t attr, 13580Sstevel@tonic-gate uint_t flags) 13590Sstevel@tonic-gate { 13600Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 13610Sstevel@tonic-gate x86pte_t pte; 13620Sstevel@tonic-gate pfn_t pfn = page_pptonum(pp); 13630Sstevel@tonic-gate pgcnt_t pg_off = mmu_btop(va - mmu.kmap_addr); 13640Sstevel@tonic-gate htable_t *ht; 13650Sstevel@tonic-gate uint_t entry; 13660Sstevel@tonic-gate void *pte_ptr; 13670Sstevel@tonic-gate 13680Sstevel@tonic-gate /* 13690Sstevel@tonic-gate * construct the requested PTE 13700Sstevel@tonic-gate */ 13710Sstevel@tonic-gate attr &= ~PROT_USER; 13720Sstevel@tonic-gate attr |= HAT_STORECACHING_OK; 13730Sstevel@tonic-gate pte = hati_mkpte(pfn, attr, 0, flags); 13740Sstevel@tonic-gate PTE_SET(pte, mmu.pt_global); 13750Sstevel@tonic-gate 13760Sstevel@tonic-gate /* 13770Sstevel@tonic-gate * Figure out the pte_ptr and htable and use common code to finish up 13780Sstevel@tonic-gate */ 13790Sstevel@tonic-gate if (mmu.pae_hat) 13800Sstevel@tonic-gate pte_ptr = mmu.kmap_ptes + pg_off; 13810Sstevel@tonic-gate else 13820Sstevel@tonic-gate pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off; 13830Sstevel@tonic-gate ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >> 13840Sstevel@tonic-gate LEVEL_SHIFT(1)]; 13850Sstevel@tonic-gate entry = htable_va2entry(va, ht); 13864004Sjosephb ++curthread->t_hatdepth; 13874004Sjosephb ASSERT(curthread->t_hatdepth < 16); 13883446Smrj (void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr); 13894004Sjosephb --curthread->t_hatdepth; 13900Sstevel@tonic-gate } 13910Sstevel@tonic-gate 13920Sstevel@tonic-gate /* 13930Sstevel@tonic-gate * hat_memload() - load a translation to the given page struct 13940Sstevel@tonic-gate * 13950Sstevel@tonic-gate * Flags for hat_memload/hat_devload/hat_*attr. 13960Sstevel@tonic-gate * 13970Sstevel@tonic-gate * HAT_LOAD Default flags to load a translation to the page. 13980Sstevel@tonic-gate * 13990Sstevel@tonic-gate * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(), 14000Sstevel@tonic-gate * and hat_devload(). 14010Sstevel@tonic-gate * 14020Sstevel@tonic-gate * HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list. 14033446Smrj * sets PT_NOCONSIST 14040Sstevel@tonic-gate * 14050Sstevel@tonic-gate * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables 14060Sstevel@tonic-gate * that map some user pages (not kas) is shared by more 14070Sstevel@tonic-gate * than one process (eg. ISM). 14080Sstevel@tonic-gate * 14090Sstevel@tonic-gate * HAT_LOAD_REMAP Reload a valid pte with a different page frame. 14100Sstevel@tonic-gate * 14110Sstevel@tonic-gate * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this 14120Sstevel@tonic-gate * point, it's setting up mapping to allocate internal 14130Sstevel@tonic-gate * hat layer data structures. This flag forces hat layer 14140Sstevel@tonic-gate * to tap its reserves in order to prevent infinite 14150Sstevel@tonic-gate * recursion. 14160Sstevel@tonic-gate * 14170Sstevel@tonic-gate * The following is a protection attribute (like PROT_READ, etc.) 14180Sstevel@tonic-gate * 14193446Smrj * HAT_NOSYNC set PT_NOSYNC - this mapping's ref/mod bits 14200Sstevel@tonic-gate * are never cleared. 14210Sstevel@tonic-gate * 14220Sstevel@tonic-gate * Installing new valid PTE's and creation of the mapping list 14230Sstevel@tonic-gate * entry are controlled under the same lock. It's derived from the 14240Sstevel@tonic-gate * page_t being mapped. 14250Sstevel@tonic-gate */ 14260Sstevel@tonic-gate static uint_t supported_memload_flags = 14270Sstevel@tonic-gate HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST | 14280Sstevel@tonic-gate HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT; 14290Sstevel@tonic-gate 14300Sstevel@tonic-gate void 14310Sstevel@tonic-gate hat_memload( 14320Sstevel@tonic-gate hat_t *hat, 14330Sstevel@tonic-gate caddr_t addr, 14340Sstevel@tonic-gate page_t *pp, 14350Sstevel@tonic-gate uint_t attr, 14360Sstevel@tonic-gate uint_t flags) 14370Sstevel@tonic-gate { 14380Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 14390Sstevel@tonic-gate level_t level = 0; 14400Sstevel@tonic-gate pfn_t pfn = page_pptonum(pp); 14410Sstevel@tonic-gate 14420Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 14433446Smrj ASSERT(hat == kas.a_hat || va < _userlimit); 14440Sstevel@tonic-gate ASSERT(hat == kas.a_hat || 14450Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 14460Sstevel@tonic-gate ASSERT((flags & supported_memload_flags) == flags); 14470Sstevel@tonic-gate 14480Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 14490Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 14500Sstevel@tonic-gate 14510Sstevel@tonic-gate /* 14520Sstevel@tonic-gate * kernel address special case for performance. 14530Sstevel@tonic-gate */ 14540Sstevel@tonic-gate if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { 14550Sstevel@tonic-gate ASSERT(hat == kas.a_hat); 14560Sstevel@tonic-gate hat_kmap_load(addr, pp, attr, flags); 14570Sstevel@tonic-gate return; 14580Sstevel@tonic-gate } 14590Sstevel@tonic-gate 14600Sstevel@tonic-gate /* 14610Sstevel@tonic-gate * This is used for memory with normal caching enabled, so 14620Sstevel@tonic-gate * always set HAT_STORECACHING_OK. 14630Sstevel@tonic-gate */ 14640Sstevel@tonic-gate attr |= HAT_STORECACHING_OK; 14653446Smrj if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0) 14663446Smrj panic("unexpected hati_load_common() failure"); 14670Sstevel@tonic-gate } 14680Sstevel@tonic-gate 14690Sstevel@tonic-gate /* 14700Sstevel@tonic-gate * Load the given array of page structs using large pages when possible 14710Sstevel@tonic-gate */ 14720Sstevel@tonic-gate void 14730Sstevel@tonic-gate hat_memload_array( 14740Sstevel@tonic-gate hat_t *hat, 14750Sstevel@tonic-gate caddr_t addr, 14760Sstevel@tonic-gate size_t len, 14770Sstevel@tonic-gate page_t **pages, 14780Sstevel@tonic-gate uint_t attr, 14790Sstevel@tonic-gate uint_t flags) 14800Sstevel@tonic-gate { 14810Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 14820Sstevel@tonic-gate uintptr_t eaddr = va + len; 14830Sstevel@tonic-gate level_t level; 14840Sstevel@tonic-gate size_t pgsize; 14850Sstevel@tonic-gate pgcnt_t pgindx = 0; 14860Sstevel@tonic-gate pfn_t pfn; 14870Sstevel@tonic-gate pgcnt_t i; 14880Sstevel@tonic-gate 14890Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 14903446Smrj ASSERT(hat == kas.a_hat || va + len <= _userlimit); 14910Sstevel@tonic-gate ASSERT(hat == kas.a_hat || 14920Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 14930Sstevel@tonic-gate ASSERT((flags & supported_memload_flags) == flags); 14940Sstevel@tonic-gate 14950Sstevel@tonic-gate /* 14960Sstevel@tonic-gate * memload is used for memory with full caching enabled, so 14970Sstevel@tonic-gate * set HAT_STORECACHING_OK. 14980Sstevel@tonic-gate */ 14990Sstevel@tonic-gate attr |= HAT_STORECACHING_OK; 15000Sstevel@tonic-gate 15010Sstevel@tonic-gate /* 15020Sstevel@tonic-gate * handle all pages using largest possible pagesize 15030Sstevel@tonic-gate */ 15040Sstevel@tonic-gate while (va < eaddr) { 15050Sstevel@tonic-gate /* 15060Sstevel@tonic-gate * decide what level mapping to use (ie. pagesize) 15070Sstevel@tonic-gate */ 15080Sstevel@tonic-gate pfn = page_pptonum(pages[pgindx]); 15090Sstevel@tonic-gate for (level = mmu.max_page_level; ; --level) { 15100Sstevel@tonic-gate pgsize = LEVEL_SIZE(level); 15110Sstevel@tonic-gate if (level == 0) 15120Sstevel@tonic-gate break; 15133446Smrj 15140Sstevel@tonic-gate if (!IS_P2ALIGNED(va, pgsize) || 15150Sstevel@tonic-gate (eaddr - va) < pgsize || 15163446Smrj !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize)) 15170Sstevel@tonic-gate continue; 15180Sstevel@tonic-gate 15190Sstevel@tonic-gate /* 15200Sstevel@tonic-gate * To use a large mapping of this size, all the 15210Sstevel@tonic-gate * pages we are passed must be sequential subpages 15220Sstevel@tonic-gate * of the large page. 15230Sstevel@tonic-gate * hat_page_demote() can't change p_szc because 15240Sstevel@tonic-gate * all pages are locked. 15250Sstevel@tonic-gate */ 15260Sstevel@tonic-gate if (pages[pgindx]->p_szc >= level) { 15270Sstevel@tonic-gate for (i = 0; i < mmu_btop(pgsize); ++i) { 15280Sstevel@tonic-gate if (pfn + i != 15290Sstevel@tonic-gate page_pptonum(pages[pgindx + i])) 15300Sstevel@tonic-gate break; 15310Sstevel@tonic-gate ASSERT(pages[pgindx + i]->p_szc >= 15320Sstevel@tonic-gate level); 15330Sstevel@tonic-gate ASSERT(pages[pgindx] + i == 15340Sstevel@tonic-gate pages[pgindx + i]); 15350Sstevel@tonic-gate } 15360Sstevel@tonic-gate if (i == mmu_btop(pgsize)) 15370Sstevel@tonic-gate break; 15380Sstevel@tonic-gate } 15390Sstevel@tonic-gate } 15400Sstevel@tonic-gate 15410Sstevel@tonic-gate /* 15423446Smrj * Load this page mapping. If the load fails, try a smaller 15433446Smrj * pagesize. 15440Sstevel@tonic-gate */ 15450Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 15463446Smrj while (hati_load_common(hat, va, pages[pgindx], attr, 1547*4381Sjosephb flags, level, pfn) != 0) { 15483446Smrj if (level == 0) 15493446Smrj panic("unexpected hati_load_common() failure"); 15503446Smrj --level; 15513446Smrj pgsize = LEVEL_SIZE(level); 15523446Smrj } 15530Sstevel@tonic-gate 15540Sstevel@tonic-gate /* 15550Sstevel@tonic-gate * move to next page 15560Sstevel@tonic-gate */ 15570Sstevel@tonic-gate va += pgsize; 15580Sstevel@tonic-gate pgindx += mmu_btop(pgsize); 15590Sstevel@tonic-gate } 15600Sstevel@tonic-gate } 15610Sstevel@tonic-gate 15620Sstevel@tonic-gate /* 15630Sstevel@tonic-gate * void hat_devload(hat, addr, len, pf, attr, flags) 15640Sstevel@tonic-gate * load/lock the given page frame number 15650Sstevel@tonic-gate * 15660Sstevel@tonic-gate * Advisory ordering attributes. Apply only to device mappings. 15670Sstevel@tonic-gate * 15680Sstevel@tonic-gate * HAT_STRICTORDER: the CPU must issue the references in order, as the 15690Sstevel@tonic-gate * programmer specified. This is the default. 15700Sstevel@tonic-gate * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds 15710Sstevel@tonic-gate * of reordering; store or load with store or load). 15720Sstevel@tonic-gate * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores 15730Sstevel@tonic-gate * to consecutive locations (for example, turn two consecutive byte 15740Sstevel@tonic-gate * stores into one halfword store), and it may batch individual loads 15750Sstevel@tonic-gate * (for example, turn two consecutive byte loads into one halfword load). 15760Sstevel@tonic-gate * This also implies re-ordering. 15770Sstevel@tonic-gate * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it 15780Sstevel@tonic-gate * until another store occurs. The default is to fetch new data 15790Sstevel@tonic-gate * on every load. This also implies merging. 15800Sstevel@tonic-gate * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to 15810Sstevel@tonic-gate * the device (perhaps with other data) at a later time. The default is 15820Sstevel@tonic-gate * to push the data right away. This also implies load caching. 15830Sstevel@tonic-gate * 15840Sstevel@tonic-gate * Equivalent of hat_memload(), but can be used for device memory where 15850Sstevel@tonic-gate * there are no page_t's and we support additional flags (write merging, etc). 15860Sstevel@tonic-gate * Note that we can have large page mappings with this interface. 15870Sstevel@tonic-gate */ 15880Sstevel@tonic-gate int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK | 15890Sstevel@tonic-gate HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK | 15900Sstevel@tonic-gate HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK; 15910Sstevel@tonic-gate 15920Sstevel@tonic-gate void 15930Sstevel@tonic-gate hat_devload( 15940Sstevel@tonic-gate hat_t *hat, 15950Sstevel@tonic-gate caddr_t addr, 15960Sstevel@tonic-gate size_t len, 15970Sstevel@tonic-gate pfn_t pfn, 15980Sstevel@tonic-gate uint_t attr, 15990Sstevel@tonic-gate int flags) 16000Sstevel@tonic-gate { 16010Sstevel@tonic-gate uintptr_t va = ALIGN2PAGE(addr); 16020Sstevel@tonic-gate uintptr_t eva = va + len; 16030Sstevel@tonic-gate level_t level; 16040Sstevel@tonic-gate size_t pgsize; 16050Sstevel@tonic-gate page_t *pp; 16060Sstevel@tonic-gate int f; /* per PTE copy of flags - maybe modified */ 16070Sstevel@tonic-gate uint_t a; /* per PTE copy of attr */ 16080Sstevel@tonic-gate 16090Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 16103446Smrj ASSERT(hat == kas.a_hat || eva <= _userlimit); 16110Sstevel@tonic-gate ASSERT(hat == kas.a_hat || 16120Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 16130Sstevel@tonic-gate ASSERT((flags & supported_devload_flags) == flags); 16140Sstevel@tonic-gate 16150Sstevel@tonic-gate /* 16160Sstevel@tonic-gate * handle all pages 16170Sstevel@tonic-gate */ 16180Sstevel@tonic-gate while (va < eva) { 16190Sstevel@tonic-gate 16200Sstevel@tonic-gate /* 16210Sstevel@tonic-gate * decide what level mapping to use (ie. pagesize) 16220Sstevel@tonic-gate */ 16230Sstevel@tonic-gate for (level = mmu.max_page_level; ; --level) { 16240Sstevel@tonic-gate pgsize = LEVEL_SIZE(level); 16250Sstevel@tonic-gate if (level == 0) 16260Sstevel@tonic-gate break; 16270Sstevel@tonic-gate if (IS_P2ALIGNED(va, pgsize) && 16280Sstevel@tonic-gate (eva - va) >= pgsize && 16290Sstevel@tonic-gate IS_P2ALIGNED(pfn, mmu_btop(pgsize))) 16300Sstevel@tonic-gate break; 16310Sstevel@tonic-gate } 16320Sstevel@tonic-gate 16330Sstevel@tonic-gate /* 16343446Smrj * If this is just memory then allow caching (this happens 16350Sstevel@tonic-gate * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used 16363446Smrj * to override that. If we don't have a page_t then make sure 16370Sstevel@tonic-gate * NOCONSIST is set. 16380Sstevel@tonic-gate */ 16390Sstevel@tonic-gate a = attr; 16400Sstevel@tonic-gate f = flags; 16410Sstevel@tonic-gate if (pf_is_memory(pfn)) { 16420Sstevel@tonic-gate if (!(a & HAT_PLAT_NOCACHE)) 16430Sstevel@tonic-gate a |= HAT_STORECACHING_OK; 16440Sstevel@tonic-gate 16450Sstevel@tonic-gate if (f & HAT_LOAD_NOCONSIST) 16460Sstevel@tonic-gate pp = NULL; 16470Sstevel@tonic-gate else 16480Sstevel@tonic-gate pp = page_numtopp_nolock(pfn); 16490Sstevel@tonic-gate } else { 16500Sstevel@tonic-gate pp = NULL; 16510Sstevel@tonic-gate f |= HAT_LOAD_NOCONSIST; 16520Sstevel@tonic-gate } 16530Sstevel@tonic-gate 16540Sstevel@tonic-gate /* 16550Sstevel@tonic-gate * load this page mapping 16560Sstevel@tonic-gate */ 16570Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 16583446Smrj while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) { 16593446Smrj if (level == 0) 16603446Smrj panic("unexpected hati_load_common() failure"); 16613446Smrj --level; 16623446Smrj pgsize = LEVEL_SIZE(level); 16633446Smrj } 16640Sstevel@tonic-gate 16650Sstevel@tonic-gate /* 16660Sstevel@tonic-gate * move to next page 16670Sstevel@tonic-gate */ 16680Sstevel@tonic-gate va += pgsize; 16690Sstevel@tonic-gate pfn += mmu_btop(pgsize); 16700Sstevel@tonic-gate } 16710Sstevel@tonic-gate } 16720Sstevel@tonic-gate 16730Sstevel@tonic-gate /* 16740Sstevel@tonic-gate * void hat_unlock(hat, addr, len) 16750Sstevel@tonic-gate * unlock the mappings to a given range of addresses 16760Sstevel@tonic-gate * 16770Sstevel@tonic-gate * Locks are tracked by ht_lock_cnt in the htable. 16780Sstevel@tonic-gate */ 16790Sstevel@tonic-gate void 16800Sstevel@tonic-gate hat_unlock(hat_t *hat, caddr_t addr, size_t len) 16810Sstevel@tonic-gate { 16820Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr; 16830Sstevel@tonic-gate uintptr_t eaddr = vaddr + len; 16840Sstevel@tonic-gate htable_t *ht = NULL; 16850Sstevel@tonic-gate 16860Sstevel@tonic-gate /* 16870Sstevel@tonic-gate * kernel entries are always locked, we don't track lock counts 16880Sstevel@tonic-gate */ 16893446Smrj ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 16900Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 16910Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 16920Sstevel@tonic-gate if (hat == kas.a_hat) 16930Sstevel@tonic-gate return; 16940Sstevel@tonic-gate if (eaddr > _userlimit) 16950Sstevel@tonic-gate panic("hat_unlock() address out of range - above _userlimit"); 16960Sstevel@tonic-gate 16970Sstevel@tonic-gate ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 16980Sstevel@tonic-gate while (vaddr < eaddr) { 16990Sstevel@tonic-gate (void) htable_walk(hat, &ht, &vaddr, eaddr); 17000Sstevel@tonic-gate if (ht == NULL) 17010Sstevel@tonic-gate break; 17020Sstevel@tonic-gate 17030Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr)); 17040Sstevel@tonic-gate 17050Sstevel@tonic-gate if (ht->ht_lock_cnt < 1) 17060Sstevel@tonic-gate panic("hat_unlock(): lock_cnt < 1, " 17070Sstevel@tonic-gate "htable=%p, vaddr=%p\n", ht, (caddr_t)vaddr); 17080Sstevel@tonic-gate HTABLE_LOCK_DEC(ht); 17090Sstevel@tonic-gate 17100Sstevel@tonic-gate vaddr += LEVEL_SIZE(ht->ht_level); 17110Sstevel@tonic-gate } 17120Sstevel@tonic-gate if (ht) 17130Sstevel@tonic-gate htable_release(ht); 17140Sstevel@tonic-gate } 17150Sstevel@tonic-gate 17160Sstevel@tonic-gate /* 17170Sstevel@tonic-gate * Cross call service routine to demap a virtual page on 17180Sstevel@tonic-gate * the current CPU or flush all mappings in TLB. 17190Sstevel@tonic-gate */ 17200Sstevel@tonic-gate /*ARGSUSED*/ 17210Sstevel@tonic-gate static int 17220Sstevel@tonic-gate hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3) 17230Sstevel@tonic-gate { 17240Sstevel@tonic-gate hat_t *hat = (hat_t *)a1; 17250Sstevel@tonic-gate caddr_t addr = (caddr_t)a2; 17260Sstevel@tonic-gate 17270Sstevel@tonic-gate /* 17280Sstevel@tonic-gate * If the target hat isn't the kernel and this CPU isn't operating 17290Sstevel@tonic-gate * in the target hat, we can ignore the cross call. 17300Sstevel@tonic-gate */ 17310Sstevel@tonic-gate if (hat != kas.a_hat && hat != CPU->cpu_current_hat) 17320Sstevel@tonic-gate return (0); 17330Sstevel@tonic-gate 17340Sstevel@tonic-gate /* 17350Sstevel@tonic-gate * For a normal address, we just flush one page mapping 17360Sstevel@tonic-gate */ 17370Sstevel@tonic-gate if ((uintptr_t)addr != DEMAP_ALL_ADDR) { 17383446Smrj mmu_tlbflush_entry(addr); 17390Sstevel@tonic-gate return (0); 17400Sstevel@tonic-gate } 17410Sstevel@tonic-gate 17420Sstevel@tonic-gate /* 17430Sstevel@tonic-gate * Otherwise we reload cr3 to effect a complete TLB flush. 17440Sstevel@tonic-gate * 17450Sstevel@tonic-gate * A reload of cr3 on a VLP process also means we must also recopy in 17460Sstevel@tonic-gate * the pte values from the struct hat 17470Sstevel@tonic-gate */ 17480Sstevel@tonic-gate if (hat->hat_flags & HAT_VLP) { 17490Sstevel@tonic-gate #if defined(__amd64) 17500Sstevel@tonic-gate x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes; 17510Sstevel@tonic-gate 17520Sstevel@tonic-gate VLP_COPY(hat->hat_vlp_ptes, vlpptep); 17530Sstevel@tonic-gate #elif defined(__i386) 17540Sstevel@tonic-gate reload_pae32(hat, CPU); 17550Sstevel@tonic-gate #endif 17560Sstevel@tonic-gate } 17570Sstevel@tonic-gate reload_cr3(); 17580Sstevel@tonic-gate return (0); 17590Sstevel@tonic-gate } 17600Sstevel@tonic-gate 17610Sstevel@tonic-gate /* 17624191Sjosephb * Flush all TLB entries, including global (ie. kernel) ones. 17634191Sjosephb */ 17644191Sjosephb static void 17654191Sjosephb flush_all_tlb_entries(void) 17664191Sjosephb { 17674191Sjosephb ulong_t cr4 = getcr4(); 17684191Sjosephb 17694191Sjosephb if (cr4 & CR4_PGE) { 17704191Sjosephb setcr4(cr4 & ~(ulong_t)CR4_PGE); 17714191Sjosephb setcr4(cr4); 17724191Sjosephb 17734191Sjosephb /* 17744191Sjosephb * 32 bit PAE also needs to always reload_cr3() 17754191Sjosephb */ 17764191Sjosephb if (mmu.max_level == 2) 17774191Sjosephb reload_cr3(); 17784191Sjosephb } else { 17794191Sjosephb reload_cr3(); 17804191Sjosephb } 17814191Sjosephb } 17824191Sjosephb 17834191Sjosephb #define TLB_CPU_HALTED (01ul) 17844191Sjosephb #define TLB_INVAL_ALL (02ul) 17854191Sjosephb #define CAS_TLB_INFO(cpu, old, new) \ 17864191Sjosephb caslong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new)) 17874191Sjosephb 17884191Sjosephb /* 17894191Sjosephb * Record that a CPU is going idle 17904191Sjosephb */ 17914191Sjosephb void 17924191Sjosephb tlb_going_idle(void) 17934191Sjosephb { 17944191Sjosephb atomic_or_long((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED); 17954191Sjosephb } 17964191Sjosephb 17974191Sjosephb /* 17984191Sjosephb * Service a delayed TLB flush if coming out of being idle. 17994191Sjosephb */ 18004191Sjosephb void 18014191Sjosephb tlb_service(void) 18024191Sjosephb { 18034191Sjosephb ulong_t flags = getflags(); 18044191Sjosephb ulong_t tlb_info; 18054191Sjosephb ulong_t found; 18064191Sjosephb 18074191Sjosephb /* 18084191Sjosephb * Be sure interrupts are off while doing this so that 18094191Sjosephb * higher level interrupts correctly wait for flushes to finish. 18104191Sjosephb */ 18114191Sjosephb if (flags & PS_IE) 18124191Sjosephb flags = intr_clear(); 18134191Sjosephb 18144191Sjosephb /* 18154191Sjosephb * We only have to do something if coming out of being idle. 18164191Sjosephb */ 18174191Sjosephb tlb_info = CPU->cpu_m.mcpu_tlb_info; 18184191Sjosephb if (tlb_info & TLB_CPU_HALTED) { 18194191Sjosephb ASSERT(CPU->cpu_current_hat == kas.a_hat); 18204191Sjosephb 18214191Sjosephb /* 18224191Sjosephb * Atomic clear and fetch of old state. 18234191Sjosephb */ 18244191Sjosephb while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) { 18254191Sjosephb ASSERT(found & TLB_CPU_HALTED); 18264191Sjosephb tlb_info = found; 18274191Sjosephb SMT_PAUSE(); 18284191Sjosephb } 18294191Sjosephb if (tlb_info & TLB_INVAL_ALL) 18304191Sjosephb flush_all_tlb_entries(); 18314191Sjosephb } 18324191Sjosephb 18334191Sjosephb /* 18344191Sjosephb * Restore interrupt enable control bit. 18354191Sjosephb */ 18364191Sjosephb if (flags & PS_IE) 18374191Sjosephb sti(); 18384191Sjosephb } 18394191Sjosephb 18404191Sjosephb /* 18410Sstevel@tonic-gate * Internal routine to do cross calls to invalidate a range of pages on 18420Sstevel@tonic-gate * all CPUs using a given hat. 18430Sstevel@tonic-gate */ 18440Sstevel@tonic-gate void 18453446Smrj hat_tlb_inval(hat_t *hat, uintptr_t va) 18460Sstevel@tonic-gate { 18470Sstevel@tonic-gate extern int flushes_require_xcalls; /* from mp_startup.c */ 18480Sstevel@tonic-gate cpuset_t justme; 18494191Sjosephb cpuset_t check_cpus; 18503446Smrj cpuset_t cpus_to_shootdown; 18514191Sjosephb cpu_t *cpup; 18524191Sjosephb int c; 18530Sstevel@tonic-gate 18540Sstevel@tonic-gate /* 18550Sstevel@tonic-gate * If the hat is being destroyed, there are no more users, so 18560Sstevel@tonic-gate * demap need not do anything. 18570Sstevel@tonic-gate */ 18580Sstevel@tonic-gate if (hat->hat_flags & HAT_FREEING) 18590Sstevel@tonic-gate return; 18600Sstevel@tonic-gate 18610Sstevel@tonic-gate /* 18620Sstevel@tonic-gate * If demapping from a shared pagetable, we best demap the 18630Sstevel@tonic-gate * entire set of user TLBs, since we don't know what addresses 18640Sstevel@tonic-gate * these were shared at. 18650Sstevel@tonic-gate */ 18660Sstevel@tonic-gate if (hat->hat_flags & HAT_SHARED) { 18670Sstevel@tonic-gate hat = kas.a_hat; 18680Sstevel@tonic-gate va = DEMAP_ALL_ADDR; 18690Sstevel@tonic-gate } 18700Sstevel@tonic-gate 18710Sstevel@tonic-gate /* 18720Sstevel@tonic-gate * if not running with multiple CPUs, don't use cross calls 18730Sstevel@tonic-gate */ 18740Sstevel@tonic-gate if (panicstr || !flushes_require_xcalls) { 18750Sstevel@tonic-gate (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL); 18760Sstevel@tonic-gate return; 18770Sstevel@tonic-gate } 18780Sstevel@tonic-gate 18790Sstevel@tonic-gate 18800Sstevel@tonic-gate /* 18813446Smrj * Determine CPUs to shootdown. Kernel changes always do all CPUs. 18823446Smrj * Otherwise it's just CPUs currently executing in this hat. 18830Sstevel@tonic-gate */ 18840Sstevel@tonic-gate kpreempt_disable(); 18850Sstevel@tonic-gate CPUSET_ONLY(justme, CPU->cpu_id); 18863446Smrj if (hat == kas.a_hat) 18873446Smrj cpus_to_shootdown = khat_cpuset; 18880Sstevel@tonic-gate else 18893446Smrj cpus_to_shootdown = hat->hat_cpus; 18903446Smrj 18914191Sjosephb /* 18924191Sjosephb * If any CPUs in the set are idle, just request a delayed flush 18934191Sjosephb * and avoid waking them up. 18944191Sjosephb */ 18954191Sjosephb check_cpus = cpus_to_shootdown; 18964191Sjosephb for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) { 18974191Sjosephb ulong_t tlb_info; 18984191Sjosephb 18994191Sjosephb if (!CPU_IN_SET(check_cpus, c)) 19004191Sjosephb continue; 19014191Sjosephb CPUSET_DEL(check_cpus, c); 19024191Sjosephb cpup = cpu[c]; 19034191Sjosephb if (cpup == NULL) 19044191Sjosephb continue; 19054191Sjosephb 19064191Sjosephb tlb_info = cpup->cpu_m.mcpu_tlb_info; 19074191Sjosephb while (tlb_info == TLB_CPU_HALTED) { 19084191Sjosephb (void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED, 1909*4381Sjosephb TLB_CPU_HALTED | TLB_INVAL_ALL); 19104191Sjosephb SMT_PAUSE(); 19114191Sjosephb tlb_info = cpup->cpu_m.mcpu_tlb_info; 19124191Sjosephb } 19134191Sjosephb if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) { 19144191Sjosephb HATSTAT_INC(hs_tlb_inval_delayed); 19154191Sjosephb CPUSET_DEL(cpus_to_shootdown, c); 19164191Sjosephb } 19174191Sjosephb } 19184191Sjosephb 19193446Smrj if (CPUSET_ISNULL(cpus_to_shootdown) || 19203446Smrj CPUSET_ISEQUAL(cpus_to_shootdown, justme)) { 19213446Smrj 19223446Smrj (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL); 19233446Smrj 19243446Smrj } else { 19253446Smrj 19263446Smrj CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id); 19273446Smrj xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL, X_CALL_HIPRI, 19283446Smrj cpus_to_shootdown, hati_demap_func); 19293446Smrj 19303446Smrj } 19310Sstevel@tonic-gate kpreempt_enable(); 19320Sstevel@tonic-gate } 19330Sstevel@tonic-gate 19340Sstevel@tonic-gate /* 19350Sstevel@tonic-gate * Interior routine for HAT_UNLOADs from hat_unload_callback(), 19360Sstevel@tonic-gate * hat_kmap_unload() OR from hat_steal() code. This routine doesn't 19370Sstevel@tonic-gate * handle releasing of the htables. 19380Sstevel@tonic-gate */ 19390Sstevel@tonic-gate void 19400Sstevel@tonic-gate hat_pte_unmap( 19410Sstevel@tonic-gate htable_t *ht, 19420Sstevel@tonic-gate uint_t entry, 19430Sstevel@tonic-gate uint_t flags, 19440Sstevel@tonic-gate x86pte_t old_pte, 19450Sstevel@tonic-gate void *pte_ptr) 19460Sstevel@tonic-gate { 19470Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 19480Sstevel@tonic-gate hment_t *hm = NULL; 19490Sstevel@tonic-gate page_t *pp = NULL; 19500Sstevel@tonic-gate level_t l = ht->ht_level; 19510Sstevel@tonic-gate pfn_t pfn; 19520Sstevel@tonic-gate 19530Sstevel@tonic-gate /* 19540Sstevel@tonic-gate * We always track the locking counts, even if nothing is unmapped 19550Sstevel@tonic-gate */ 19560Sstevel@tonic-gate if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) { 19570Sstevel@tonic-gate ASSERT(ht->ht_lock_cnt > 0); 19580Sstevel@tonic-gate HTABLE_LOCK_DEC(ht); 19590Sstevel@tonic-gate } 19600Sstevel@tonic-gate 19610Sstevel@tonic-gate /* 19620Sstevel@tonic-gate * Figure out which page's mapping list lock to acquire using the PFN 19630Sstevel@tonic-gate * passed in "old" PTE. We then attempt to invalidate the PTE. 19640Sstevel@tonic-gate * If another thread, probably a hat_pageunload, has asynchronously 19650Sstevel@tonic-gate * unmapped/remapped this address we'll loop here. 19660Sstevel@tonic-gate */ 19670Sstevel@tonic-gate ASSERT(ht->ht_busy > 0); 19680Sstevel@tonic-gate while (PTE_ISVALID(old_pte)) { 19690Sstevel@tonic-gate pfn = PTE2PFN(old_pte, l); 19703446Smrj if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) { 19710Sstevel@tonic-gate pp = NULL; 19720Sstevel@tonic-gate } else { 19730Sstevel@tonic-gate pp = page_numtopp_nolock(pfn); 197447Sjosephb if (pp == NULL) { 197547Sjosephb panic("no page_t, not NOCONSIST: old_pte=" 197647Sjosephb FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx", 197747Sjosephb old_pte, (uintptr_t)ht, entry, 197847Sjosephb (uintptr_t)pte_ptr); 197947Sjosephb } 19800Sstevel@tonic-gate x86_hm_enter(pp); 19810Sstevel@tonic-gate } 198247Sjosephb 198347Sjosephb /* 198447Sjosephb * If freeing the address space, check that the PTE 198547Sjosephb * hasn't changed, as the mappings are no longer in use by 198647Sjosephb * any thread, invalidation is unnecessary. 198747Sjosephb * If not freeing, do a full invalidate. 198847Sjosephb */ 198947Sjosephb if (hat->hat_flags & HAT_FREEING) 199047Sjosephb old_pte = x86pte_get(ht, entry); 199147Sjosephb else 19923446Smrj old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr); 19930Sstevel@tonic-gate 19940Sstevel@tonic-gate /* 19950Sstevel@tonic-gate * If the page hadn't changed we've unmapped it and can proceed 19960Sstevel@tonic-gate */ 19970Sstevel@tonic-gate if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn) 19980Sstevel@tonic-gate break; 19990Sstevel@tonic-gate 20000Sstevel@tonic-gate /* 20010Sstevel@tonic-gate * Otherwise, we'll have to retry with the current old_pte. 20020Sstevel@tonic-gate * Drop the hment lock, since the pfn may have changed. 20030Sstevel@tonic-gate */ 20040Sstevel@tonic-gate if (pp != NULL) { 20050Sstevel@tonic-gate x86_hm_exit(pp); 20060Sstevel@tonic-gate pp = NULL; 20070Sstevel@tonic-gate } else { 20083446Smrj ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST); 20090Sstevel@tonic-gate } 20100Sstevel@tonic-gate } 20110Sstevel@tonic-gate 20120Sstevel@tonic-gate /* 20130Sstevel@tonic-gate * If the old mapping wasn't valid, there's nothing more to do 20140Sstevel@tonic-gate */ 20150Sstevel@tonic-gate if (!PTE_ISVALID(old_pte)) { 20160Sstevel@tonic-gate if (pp != NULL) 20170Sstevel@tonic-gate x86_hm_exit(pp); 20180Sstevel@tonic-gate return; 20190Sstevel@tonic-gate } 20200Sstevel@tonic-gate 20210Sstevel@tonic-gate /* 20220Sstevel@tonic-gate * Take care of syncing any MOD/REF bits and removing the hment. 20230Sstevel@tonic-gate */ 20240Sstevel@tonic-gate if (pp != NULL) { 20250Sstevel@tonic-gate if (!(flags & HAT_UNLOAD_NOSYNC)) 20260Sstevel@tonic-gate hati_sync_pte_to_page(pp, old_pte, l); 20270Sstevel@tonic-gate hm = hment_remove(pp, ht, entry); 20280Sstevel@tonic-gate x86_hm_exit(pp); 20290Sstevel@tonic-gate if (hm != NULL) 20300Sstevel@tonic-gate hment_free(hm); 20310Sstevel@tonic-gate } 20320Sstevel@tonic-gate 20330Sstevel@tonic-gate /* 20340Sstevel@tonic-gate * Handle book keeping in the htable and hat 20350Sstevel@tonic-gate */ 20360Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0); 20370Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt); 20380Sstevel@tonic-gate PGCNT_DEC(hat, l); 20390Sstevel@tonic-gate } 20400Sstevel@tonic-gate 20410Sstevel@tonic-gate /* 20420Sstevel@tonic-gate * very cheap unload implementation to special case some kernel addresses 20430Sstevel@tonic-gate */ 20440Sstevel@tonic-gate static void 20450Sstevel@tonic-gate hat_kmap_unload(caddr_t addr, size_t len, uint_t flags) 20460Sstevel@tonic-gate { 20470Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 20480Sstevel@tonic-gate uintptr_t eva = va + len; 20493446Smrj pgcnt_t pg_index; 20500Sstevel@tonic-gate htable_t *ht; 20510Sstevel@tonic-gate uint_t entry; 20523446Smrj x86pte_t *pte_ptr; 20530Sstevel@tonic-gate x86pte_t old_pte; 20540Sstevel@tonic-gate 20550Sstevel@tonic-gate for (; va < eva; va += MMU_PAGESIZE) { 20560Sstevel@tonic-gate /* 20570Sstevel@tonic-gate * Get the PTE 20580Sstevel@tonic-gate */ 20593446Smrj pg_index = mmu_btop(va - mmu.kmap_addr); 20603446Smrj pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index); 20613446Smrj old_pte = GET_PTE(pte_ptr); 20620Sstevel@tonic-gate 20630Sstevel@tonic-gate /* 20640Sstevel@tonic-gate * get the htable / entry 20650Sstevel@tonic-gate */ 20660Sstevel@tonic-gate ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) 20670Sstevel@tonic-gate >> LEVEL_SHIFT(1)]; 20680Sstevel@tonic-gate entry = htable_va2entry(va, ht); 20690Sstevel@tonic-gate 20700Sstevel@tonic-gate /* 20710Sstevel@tonic-gate * use mostly common code to unmap it. 20720Sstevel@tonic-gate */ 20730Sstevel@tonic-gate hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr); 20740Sstevel@tonic-gate } 20750Sstevel@tonic-gate } 20760Sstevel@tonic-gate 20770Sstevel@tonic-gate 20780Sstevel@tonic-gate /* 20790Sstevel@tonic-gate * unload a range of virtual address space (no callback) 20800Sstevel@tonic-gate */ 20810Sstevel@tonic-gate void 20820Sstevel@tonic-gate hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 20830Sstevel@tonic-gate { 20840Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 20853446Smrj 20863446Smrj ASSERT(hat == kas.a_hat || va + len <= _userlimit); 20870Sstevel@tonic-gate 20880Sstevel@tonic-gate /* 20890Sstevel@tonic-gate * special case for performance. 20900Sstevel@tonic-gate */ 20910Sstevel@tonic-gate if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { 20920Sstevel@tonic-gate ASSERT(hat == kas.a_hat); 20930Sstevel@tonic-gate hat_kmap_unload(addr, len, flags); 20943446Smrj } else { 20953446Smrj hat_unload_callback(hat, addr, len, flags, NULL); 20960Sstevel@tonic-gate } 20970Sstevel@tonic-gate } 20980Sstevel@tonic-gate 20990Sstevel@tonic-gate /* 21000Sstevel@tonic-gate * Do the callbacks for ranges being unloaded. 21010Sstevel@tonic-gate */ 21020Sstevel@tonic-gate typedef struct range_info { 21030Sstevel@tonic-gate uintptr_t rng_va; 21040Sstevel@tonic-gate ulong_t rng_cnt; 21050Sstevel@tonic-gate level_t rng_level; 21060Sstevel@tonic-gate } range_info_t; 21070Sstevel@tonic-gate 21080Sstevel@tonic-gate static void 21090Sstevel@tonic-gate handle_ranges(hat_callback_t *cb, uint_t cnt, range_info_t *range) 21100Sstevel@tonic-gate { 21110Sstevel@tonic-gate /* 21120Sstevel@tonic-gate * do callbacks to upper level VM system 21130Sstevel@tonic-gate */ 21140Sstevel@tonic-gate while (cb != NULL && cnt > 0) { 21150Sstevel@tonic-gate --cnt; 21160Sstevel@tonic-gate cb->hcb_start_addr = (caddr_t)range[cnt].rng_va; 21170Sstevel@tonic-gate cb->hcb_end_addr = cb->hcb_start_addr; 21180Sstevel@tonic-gate cb->hcb_end_addr += 21190Sstevel@tonic-gate range[cnt].rng_cnt << LEVEL_SIZE(range[cnt].rng_level); 21200Sstevel@tonic-gate cb->hcb_function(cb); 21210Sstevel@tonic-gate } 21220Sstevel@tonic-gate } 21230Sstevel@tonic-gate 21240Sstevel@tonic-gate /* 21250Sstevel@tonic-gate * Unload a given range of addresses (has optional callback) 21260Sstevel@tonic-gate * 21270Sstevel@tonic-gate * Flags: 21280Sstevel@tonic-gate * define HAT_UNLOAD 0x00 21290Sstevel@tonic-gate * define HAT_UNLOAD_NOSYNC 0x02 21300Sstevel@tonic-gate * define HAT_UNLOAD_UNLOCK 0x04 21310Sstevel@tonic-gate * define HAT_UNLOAD_OTHER 0x08 - not used 21320Sstevel@tonic-gate * define HAT_UNLOAD_UNMAP 0x10 - same as HAT_UNLOAD 21330Sstevel@tonic-gate */ 21340Sstevel@tonic-gate #define MAX_UNLOAD_CNT (8) 21350Sstevel@tonic-gate void 21360Sstevel@tonic-gate hat_unload_callback( 21370Sstevel@tonic-gate hat_t *hat, 21380Sstevel@tonic-gate caddr_t addr, 21390Sstevel@tonic-gate size_t len, 21400Sstevel@tonic-gate uint_t flags, 21410Sstevel@tonic-gate hat_callback_t *cb) 21420Sstevel@tonic-gate { 21430Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr; 21440Sstevel@tonic-gate uintptr_t eaddr = vaddr + len; 21450Sstevel@tonic-gate htable_t *ht = NULL; 21460Sstevel@tonic-gate uint_t entry; 214747Sjosephb uintptr_t contig_va = (uintptr_t)-1L; 21480Sstevel@tonic-gate range_info_t r[MAX_UNLOAD_CNT]; 21490Sstevel@tonic-gate uint_t r_cnt = 0; 21500Sstevel@tonic-gate x86pte_t old_pte; 21510Sstevel@tonic-gate 21523446Smrj ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 21530Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 21540Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 21550Sstevel@tonic-gate 21563446Smrj /* 21573446Smrj * Special case a single page being unloaded for speed. This happens 21583446Smrj * quite frequently, COW faults after a fork() for example. 21593446Smrj */ 21603446Smrj if (cb == NULL && len == MMU_PAGESIZE) { 21613446Smrj ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0); 21623446Smrj if (ht != NULL) { 21633446Smrj if (PTE_ISVALID(old_pte)) 21643446Smrj hat_pte_unmap(ht, entry, flags, old_pte, NULL); 21653446Smrj htable_release(ht); 21663446Smrj } 21673446Smrj return; 21683446Smrj } 21693446Smrj 21700Sstevel@tonic-gate while (vaddr < eaddr) { 21710Sstevel@tonic-gate old_pte = htable_walk(hat, &ht, &vaddr, eaddr); 21720Sstevel@tonic-gate if (ht == NULL) 21730Sstevel@tonic-gate break; 21740Sstevel@tonic-gate 21750Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr)); 21760Sstevel@tonic-gate 21770Sstevel@tonic-gate if (vaddr < (uintptr_t)addr) 21780Sstevel@tonic-gate panic("hat_unload_callback(): unmap inside large page"); 21790Sstevel@tonic-gate 21800Sstevel@tonic-gate /* 21810Sstevel@tonic-gate * We'll do the call backs for contiguous ranges 21820Sstevel@tonic-gate */ 218347Sjosephb if (vaddr != contig_va || 21840Sstevel@tonic-gate (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) { 21850Sstevel@tonic-gate if (r_cnt == MAX_UNLOAD_CNT) { 21860Sstevel@tonic-gate handle_ranges(cb, r_cnt, r); 21870Sstevel@tonic-gate r_cnt = 0; 21880Sstevel@tonic-gate } 21890Sstevel@tonic-gate r[r_cnt].rng_va = vaddr; 21900Sstevel@tonic-gate r[r_cnt].rng_cnt = 0; 21910Sstevel@tonic-gate r[r_cnt].rng_level = ht->ht_level; 21920Sstevel@tonic-gate ++r_cnt; 21930Sstevel@tonic-gate } 21940Sstevel@tonic-gate 21950Sstevel@tonic-gate /* 21960Sstevel@tonic-gate * Unload one mapping from the page tables. 21970Sstevel@tonic-gate */ 21980Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht); 21990Sstevel@tonic-gate hat_pte_unmap(ht, entry, flags, old_pte, NULL); 22000Sstevel@tonic-gate ASSERT(ht->ht_level <= mmu.max_page_level); 22010Sstevel@tonic-gate vaddr += LEVEL_SIZE(ht->ht_level); 220247Sjosephb contig_va = vaddr; 22030Sstevel@tonic-gate ++r[r_cnt - 1].rng_cnt; 22040Sstevel@tonic-gate } 22050Sstevel@tonic-gate if (ht) 22060Sstevel@tonic-gate htable_release(ht); 22070Sstevel@tonic-gate 22080Sstevel@tonic-gate /* 22090Sstevel@tonic-gate * handle last range for callbacks 22100Sstevel@tonic-gate */ 22110Sstevel@tonic-gate if (r_cnt > 0) 22120Sstevel@tonic-gate handle_ranges(cb, r_cnt, r); 22130Sstevel@tonic-gate } 22140Sstevel@tonic-gate 22150Sstevel@tonic-gate /* 22160Sstevel@tonic-gate * synchronize mapping with software data structures 22170Sstevel@tonic-gate * 22180Sstevel@tonic-gate * This interface is currently only used by the working set monitor 22190Sstevel@tonic-gate * driver. 22200Sstevel@tonic-gate */ 22210Sstevel@tonic-gate /*ARGSUSED*/ 22220Sstevel@tonic-gate void 22230Sstevel@tonic-gate hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 22240Sstevel@tonic-gate { 22250Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr; 22260Sstevel@tonic-gate uintptr_t eaddr = vaddr + len; 22270Sstevel@tonic-gate htable_t *ht = NULL; 22280Sstevel@tonic-gate uint_t entry; 22290Sstevel@tonic-gate x86pte_t pte; 22300Sstevel@tonic-gate x86pte_t save_pte; 22310Sstevel@tonic-gate x86pte_t new; 22320Sstevel@tonic-gate page_t *pp; 22330Sstevel@tonic-gate 22340Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr)); 22350Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 22360Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 22373446Smrj ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 22380Sstevel@tonic-gate 22390Sstevel@tonic-gate for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { 22400Sstevel@tonic-gate try_again: 22410Sstevel@tonic-gate pte = htable_walk(hat, &ht, &vaddr, eaddr); 22420Sstevel@tonic-gate if (ht == NULL) 22430Sstevel@tonic-gate break; 22440Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht); 22450Sstevel@tonic-gate 22463446Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC || 22470Sstevel@tonic-gate PTE_GET(pte, PT_REF | PT_MOD) == 0) 22480Sstevel@tonic-gate continue; 22490Sstevel@tonic-gate 22500Sstevel@tonic-gate /* 22510Sstevel@tonic-gate * We need to acquire the mapping list lock to protect 22520Sstevel@tonic-gate * against hat_pageunload(), hat_unload(), etc. 22530Sstevel@tonic-gate */ 22540Sstevel@tonic-gate pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level)); 22550Sstevel@tonic-gate if (pp == NULL) 22560Sstevel@tonic-gate break; 22570Sstevel@tonic-gate x86_hm_enter(pp); 22580Sstevel@tonic-gate save_pte = pte; 22590Sstevel@tonic-gate pte = x86pte_get(ht, entry); 22600Sstevel@tonic-gate if (pte != save_pte) { 22610Sstevel@tonic-gate x86_hm_exit(pp); 22620Sstevel@tonic-gate goto try_again; 22630Sstevel@tonic-gate } 22643446Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC || 22650Sstevel@tonic-gate PTE_GET(pte, PT_REF | PT_MOD) == 0) { 22660Sstevel@tonic-gate x86_hm_exit(pp); 22670Sstevel@tonic-gate continue; 22680Sstevel@tonic-gate } 22690Sstevel@tonic-gate 22700Sstevel@tonic-gate /* 22710Sstevel@tonic-gate * Need to clear ref or mod bits. We may compete with 22720Sstevel@tonic-gate * hardware updating the R/M bits and have to try again. 22730Sstevel@tonic-gate */ 22740Sstevel@tonic-gate if (flags == HAT_SYNC_ZERORM) { 22750Sstevel@tonic-gate new = pte; 22760Sstevel@tonic-gate PTE_CLR(new, PT_REF | PT_MOD); 22770Sstevel@tonic-gate pte = hati_update_pte(ht, entry, pte, new); 22780Sstevel@tonic-gate if (pte != 0) { 22790Sstevel@tonic-gate x86_hm_exit(pp); 22800Sstevel@tonic-gate goto try_again; 22810Sstevel@tonic-gate } 22820Sstevel@tonic-gate } else { 22830Sstevel@tonic-gate /* 22840Sstevel@tonic-gate * sync the PTE to the page_t 22850Sstevel@tonic-gate */ 22860Sstevel@tonic-gate hati_sync_pte_to_page(pp, save_pte, ht->ht_level); 22870Sstevel@tonic-gate } 22880Sstevel@tonic-gate x86_hm_exit(pp); 22890Sstevel@tonic-gate } 22900Sstevel@tonic-gate if (ht) 22910Sstevel@tonic-gate htable_release(ht); 22920Sstevel@tonic-gate } 22930Sstevel@tonic-gate 22940Sstevel@tonic-gate /* 22950Sstevel@tonic-gate * void hat_map(hat, addr, len, flags) 22960Sstevel@tonic-gate */ 22970Sstevel@tonic-gate /*ARGSUSED*/ 22980Sstevel@tonic-gate void 22990Sstevel@tonic-gate hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 23000Sstevel@tonic-gate { 23010Sstevel@tonic-gate /* does nothing */ 23020Sstevel@tonic-gate } 23030Sstevel@tonic-gate 23040Sstevel@tonic-gate /* 23050Sstevel@tonic-gate * uint_t hat_getattr(hat, addr, *attr) 23060Sstevel@tonic-gate * returns attr for <hat,addr> in *attr. returns 0 if there was a 23070Sstevel@tonic-gate * mapping and *attr is valid, nonzero if there was no mapping and 23080Sstevel@tonic-gate * *attr is not valid. 23090Sstevel@tonic-gate */ 23100Sstevel@tonic-gate uint_t 23110Sstevel@tonic-gate hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr) 23120Sstevel@tonic-gate { 23130Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr); 23140Sstevel@tonic-gate htable_t *ht = NULL; 23150Sstevel@tonic-gate x86pte_t pte; 23160Sstevel@tonic-gate 23173446Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 23180Sstevel@tonic-gate 23190Sstevel@tonic-gate if (IN_VA_HOLE(vaddr)) 23200Sstevel@tonic-gate return ((uint_t)-1); 23210Sstevel@tonic-gate 23223446Smrj ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level); 23230Sstevel@tonic-gate if (ht == NULL) 23240Sstevel@tonic-gate return ((uint_t)-1); 23250Sstevel@tonic-gate 23260Sstevel@tonic-gate if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) { 23270Sstevel@tonic-gate htable_release(ht); 23280Sstevel@tonic-gate return ((uint_t)-1); 23290Sstevel@tonic-gate } 23300Sstevel@tonic-gate 23310Sstevel@tonic-gate *attr = PROT_READ; 23320Sstevel@tonic-gate if (PTE_GET(pte, PT_WRITABLE)) 23330Sstevel@tonic-gate *attr |= PROT_WRITE; 23340Sstevel@tonic-gate if (PTE_GET(pte, PT_USER)) 23350Sstevel@tonic-gate *attr |= PROT_USER; 23360Sstevel@tonic-gate if (!PTE_GET(pte, mmu.pt_nx)) 23370Sstevel@tonic-gate *attr |= PROT_EXEC; 23383446Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC) 23390Sstevel@tonic-gate *attr |= HAT_NOSYNC; 23400Sstevel@tonic-gate htable_release(ht); 23410Sstevel@tonic-gate return (0); 23420Sstevel@tonic-gate } 23430Sstevel@tonic-gate 23440Sstevel@tonic-gate /* 23450Sstevel@tonic-gate * hat_updateattr() applies the given attribute change to an existing mapping 23460Sstevel@tonic-gate */ 23470Sstevel@tonic-gate #define HAT_LOAD_ATTR 1 23480Sstevel@tonic-gate #define HAT_SET_ATTR 2 23490Sstevel@tonic-gate #define HAT_CLR_ATTR 3 23500Sstevel@tonic-gate 23510Sstevel@tonic-gate static void 23520Sstevel@tonic-gate hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what) 23530Sstevel@tonic-gate { 23540Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr; 23550Sstevel@tonic-gate uintptr_t eaddr = (uintptr_t)addr + len; 23560Sstevel@tonic-gate htable_t *ht = NULL; 23570Sstevel@tonic-gate uint_t entry; 23580Sstevel@tonic-gate x86pte_t oldpte, newpte; 23590Sstevel@tonic-gate page_t *pp; 23600Sstevel@tonic-gate 23610Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 23620Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 23630Sstevel@tonic-gate ASSERT(hat == kas.a_hat || 23640Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 23650Sstevel@tonic-gate for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { 23660Sstevel@tonic-gate try_again: 23670Sstevel@tonic-gate oldpte = htable_walk(hat, &ht, &vaddr, eaddr); 23680Sstevel@tonic-gate if (ht == NULL) 23690Sstevel@tonic-gate break; 23703446Smrj if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST) 23710Sstevel@tonic-gate continue; 23720Sstevel@tonic-gate 23730Sstevel@tonic-gate pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level)); 23740Sstevel@tonic-gate if (pp == NULL) 23750Sstevel@tonic-gate continue; 23760Sstevel@tonic-gate x86_hm_enter(pp); 23770Sstevel@tonic-gate 23780Sstevel@tonic-gate newpte = oldpte; 23790Sstevel@tonic-gate /* 23800Sstevel@tonic-gate * We found a page table entry in the desired range, 23810Sstevel@tonic-gate * figure out the new attributes. 23820Sstevel@tonic-gate */ 23830Sstevel@tonic-gate if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) { 23840Sstevel@tonic-gate if ((attr & PROT_WRITE) && 23850Sstevel@tonic-gate !PTE_GET(oldpte, PT_WRITABLE)) 23860Sstevel@tonic-gate newpte |= PT_WRITABLE; 23870Sstevel@tonic-gate 23883446Smrj if ((attr & HAT_NOSYNC) && 23893446Smrj PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC) 23900Sstevel@tonic-gate newpte |= PT_NOSYNC; 23910Sstevel@tonic-gate 23920Sstevel@tonic-gate if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx)) 23930Sstevel@tonic-gate newpte &= ~mmu.pt_nx; 23940Sstevel@tonic-gate } 23950Sstevel@tonic-gate 23960Sstevel@tonic-gate if (what == HAT_LOAD_ATTR) { 23970Sstevel@tonic-gate if (!(attr & PROT_WRITE) && 23980Sstevel@tonic-gate PTE_GET(oldpte, PT_WRITABLE)) 23990Sstevel@tonic-gate newpte &= ~PT_WRITABLE; 24000Sstevel@tonic-gate 24013446Smrj if (!(attr & HAT_NOSYNC) && 24023446Smrj PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC) 24033446Smrj newpte &= ~PT_SOFTWARE; 24040Sstevel@tonic-gate 24050Sstevel@tonic-gate if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) 24060Sstevel@tonic-gate newpte |= mmu.pt_nx; 24070Sstevel@tonic-gate } 24080Sstevel@tonic-gate 24090Sstevel@tonic-gate if (what == HAT_CLR_ATTR) { 24100Sstevel@tonic-gate if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE)) 24110Sstevel@tonic-gate newpte &= ~PT_WRITABLE; 24120Sstevel@tonic-gate 24133446Smrj if ((attr & HAT_NOSYNC) && 24143446Smrj PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC) 24153446Smrj newpte &= ~PT_SOFTWARE; 24160Sstevel@tonic-gate 24170Sstevel@tonic-gate if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) 24180Sstevel@tonic-gate newpte |= mmu.pt_nx; 24190Sstevel@tonic-gate } 24200Sstevel@tonic-gate 24210Sstevel@tonic-gate /* 24223446Smrj * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set. 24233446Smrj * x86pte_set() depends on this. 24243446Smrj */ 24253446Smrj if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC) 24263446Smrj newpte |= PT_REF | PT_MOD; 24273446Smrj 24283446Smrj /* 24290Sstevel@tonic-gate * what about PROT_READ or others? this code only handles: 24300Sstevel@tonic-gate * EXEC, WRITE, NOSYNC 24310Sstevel@tonic-gate */ 24320Sstevel@tonic-gate 24330Sstevel@tonic-gate /* 24340Sstevel@tonic-gate * If new PTE really changed, update the table. 24350Sstevel@tonic-gate */ 24360Sstevel@tonic-gate if (newpte != oldpte) { 24370Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht); 24380Sstevel@tonic-gate oldpte = hati_update_pte(ht, entry, oldpte, newpte); 24390Sstevel@tonic-gate if (oldpte != 0) { 24400Sstevel@tonic-gate x86_hm_exit(pp); 24410Sstevel@tonic-gate goto try_again; 24420Sstevel@tonic-gate } 24430Sstevel@tonic-gate } 24440Sstevel@tonic-gate x86_hm_exit(pp); 24450Sstevel@tonic-gate } 24460Sstevel@tonic-gate if (ht) 24470Sstevel@tonic-gate htable_release(ht); 24480Sstevel@tonic-gate } 24490Sstevel@tonic-gate 24500Sstevel@tonic-gate /* 24510Sstevel@tonic-gate * Various wrappers for hat_updateattr() 24520Sstevel@tonic-gate */ 24530Sstevel@tonic-gate void 24540Sstevel@tonic-gate hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 24550Sstevel@tonic-gate { 24563446Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 24570Sstevel@tonic-gate hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR); 24580Sstevel@tonic-gate } 24590Sstevel@tonic-gate 24600Sstevel@tonic-gate void 24610Sstevel@tonic-gate hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 24620Sstevel@tonic-gate { 24633446Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 24640Sstevel@tonic-gate hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR); 24650Sstevel@tonic-gate } 24660Sstevel@tonic-gate 24670Sstevel@tonic-gate void 24680Sstevel@tonic-gate hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 24690Sstevel@tonic-gate { 24703446Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 24710Sstevel@tonic-gate hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR); 24720Sstevel@tonic-gate } 24730Sstevel@tonic-gate 24740Sstevel@tonic-gate void 24750Sstevel@tonic-gate hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot) 24760Sstevel@tonic-gate { 24773446Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 24780Sstevel@tonic-gate hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR); 24790Sstevel@tonic-gate } 24800Sstevel@tonic-gate 24810Sstevel@tonic-gate /* 24820Sstevel@tonic-gate * size_t hat_getpagesize(hat, addr) 24830Sstevel@tonic-gate * returns pagesize in bytes for <hat, addr>. returns -1 of there is 24840Sstevel@tonic-gate * no mapping. This is an advisory call. 24850Sstevel@tonic-gate */ 24860Sstevel@tonic-gate ssize_t 24870Sstevel@tonic-gate hat_getpagesize(hat_t *hat, caddr_t addr) 24880Sstevel@tonic-gate { 24890Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr); 24900Sstevel@tonic-gate htable_t *ht; 24910Sstevel@tonic-gate size_t pagesize; 24920Sstevel@tonic-gate 24933446Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 24940Sstevel@tonic-gate if (IN_VA_HOLE(vaddr)) 24950Sstevel@tonic-gate return (-1); 24960Sstevel@tonic-gate ht = htable_getpage(hat, vaddr, NULL); 24970Sstevel@tonic-gate if (ht == NULL) 24980Sstevel@tonic-gate return (-1); 24990Sstevel@tonic-gate pagesize = LEVEL_SIZE(ht->ht_level); 25000Sstevel@tonic-gate htable_release(ht); 25010Sstevel@tonic-gate return (pagesize); 25020Sstevel@tonic-gate } 25030Sstevel@tonic-gate 25040Sstevel@tonic-gate 25050Sstevel@tonic-gate 25060Sstevel@tonic-gate /* 25070Sstevel@tonic-gate * pfn_t hat_getpfnum(hat, addr) 25080Sstevel@tonic-gate * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid. 25090Sstevel@tonic-gate */ 25100Sstevel@tonic-gate pfn_t 25110Sstevel@tonic-gate hat_getpfnum(hat_t *hat, caddr_t addr) 25120Sstevel@tonic-gate { 25130Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr); 25140Sstevel@tonic-gate htable_t *ht; 25150Sstevel@tonic-gate uint_t entry; 25160Sstevel@tonic-gate pfn_t pfn = PFN_INVALID; 25170Sstevel@tonic-gate 25183446Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 25190Sstevel@tonic-gate if (khat_running == 0) 25203446Smrj return (PFN_INVALID); 25210Sstevel@tonic-gate 25220Sstevel@tonic-gate if (IN_VA_HOLE(vaddr)) 25230Sstevel@tonic-gate return (PFN_INVALID); 25240Sstevel@tonic-gate 25250Sstevel@tonic-gate /* 25260Sstevel@tonic-gate * A very common use of hat_getpfnum() is from the DDI for kernel pages. 25270Sstevel@tonic-gate * Use the kmap_ptes (which also covers the 32 bit heap) to speed 25280Sstevel@tonic-gate * this up. 25290Sstevel@tonic-gate */ 25300Sstevel@tonic-gate if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { 25310Sstevel@tonic-gate x86pte_t pte; 25323446Smrj pgcnt_t pg_index; 25333446Smrj 25343446Smrj pg_index = mmu_btop(vaddr - mmu.kmap_addr); 25353446Smrj pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index)); 25360Sstevel@tonic-gate if (!PTE_ISVALID(pte)) 25370Sstevel@tonic-gate return (PFN_INVALID); 25380Sstevel@tonic-gate /*LINTED [use of constant 0 causes a silly lint warning] */ 25390Sstevel@tonic-gate return (PTE2PFN(pte, 0)); 25400Sstevel@tonic-gate } 25410Sstevel@tonic-gate 25420Sstevel@tonic-gate ht = htable_getpage(hat, vaddr, &entry); 25430Sstevel@tonic-gate if (ht == NULL) 25440Sstevel@tonic-gate return (PFN_INVALID); 25450Sstevel@tonic-gate ASSERT(vaddr >= ht->ht_vaddr); 25460Sstevel@tonic-gate ASSERT(vaddr <= HTABLE_LAST_PAGE(ht)); 25470Sstevel@tonic-gate pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level); 25480Sstevel@tonic-gate if (ht->ht_level > 0) 25490Sstevel@tonic-gate pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level)); 25500Sstevel@tonic-gate htable_release(ht); 25510Sstevel@tonic-gate return (pfn); 25520Sstevel@tonic-gate } 25530Sstevel@tonic-gate 25540Sstevel@tonic-gate /* 25550Sstevel@tonic-gate * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged. 25560Sstevel@tonic-gate * Use hat_getpfnum(kas.a_hat, ...) instead. 25570Sstevel@tonic-gate * 25580Sstevel@tonic-gate * We'd like to return PFN_INVALID if the mappings have underlying page_t's 25590Sstevel@tonic-gate * but can't right now due to the fact that some software has grown to use 25600Sstevel@tonic-gate * this interface incorrectly. So for now when the interface is misused, 25610Sstevel@tonic-gate * return a warning to the user that in the future it won't work in the 25620Sstevel@tonic-gate * way they're abusing it, and carry on. 25630Sstevel@tonic-gate * 25640Sstevel@tonic-gate * Note that hat_getkpfnum() is never supported on amd64. 25650Sstevel@tonic-gate */ 25660Sstevel@tonic-gate #if !defined(__amd64) 25670Sstevel@tonic-gate pfn_t 25680Sstevel@tonic-gate hat_getkpfnum(caddr_t addr) 25690Sstevel@tonic-gate { 25700Sstevel@tonic-gate pfn_t pfn; 25710Sstevel@tonic-gate int badcaller = 0; 25720Sstevel@tonic-gate 25730Sstevel@tonic-gate if (khat_running == 0) 25740Sstevel@tonic-gate panic("hat_getkpfnum(): called too early\n"); 25750Sstevel@tonic-gate if ((uintptr_t)addr < kernelbase) 25760Sstevel@tonic-gate return (PFN_INVALID); 25770Sstevel@tonic-gate 25780Sstevel@tonic-gate 25790Sstevel@tonic-gate if (segkpm && IS_KPM_ADDR(addr)) { 25800Sstevel@tonic-gate badcaller = 1; 25810Sstevel@tonic-gate pfn = hat_kpm_va2pfn(addr); 25820Sstevel@tonic-gate } else { 25830Sstevel@tonic-gate pfn = hat_getpfnum(kas.a_hat, addr); 25840Sstevel@tonic-gate badcaller = pf_is_memory(pfn); 25850Sstevel@tonic-gate } 25860Sstevel@tonic-gate 25870Sstevel@tonic-gate if (badcaller) 25880Sstevel@tonic-gate hat_getkpfnum_badcall(caller()); 25890Sstevel@tonic-gate return (pfn); 25900Sstevel@tonic-gate } 25910Sstevel@tonic-gate #endif /* __amd64 */ 25920Sstevel@tonic-gate 25930Sstevel@tonic-gate /* 25940Sstevel@tonic-gate * int hat_probe(hat, addr) 25950Sstevel@tonic-gate * return 0 if no valid mapping is present. Faster version 25960Sstevel@tonic-gate * of hat_getattr in certain architectures. 25970Sstevel@tonic-gate */ 25980Sstevel@tonic-gate int 25990Sstevel@tonic-gate hat_probe(hat_t *hat, caddr_t addr) 26000Sstevel@tonic-gate { 26010Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr); 26020Sstevel@tonic-gate uint_t entry; 26030Sstevel@tonic-gate htable_t *ht; 26040Sstevel@tonic-gate pgcnt_t pg_off; 26050Sstevel@tonic-gate 26063446Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 26070Sstevel@tonic-gate ASSERT(hat == kas.a_hat || 26080Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 26090Sstevel@tonic-gate if (IN_VA_HOLE(vaddr)) 26100Sstevel@tonic-gate return (0); 26110Sstevel@tonic-gate 26120Sstevel@tonic-gate /* 26130Sstevel@tonic-gate * Most common use of hat_probe is from segmap. We special case it 26140Sstevel@tonic-gate * for performance. 26150Sstevel@tonic-gate */ 26160Sstevel@tonic-gate if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { 26170Sstevel@tonic-gate pg_off = mmu_btop(vaddr - mmu.kmap_addr); 26180Sstevel@tonic-gate if (mmu.pae_hat) 26190Sstevel@tonic-gate return (PTE_ISVALID(mmu.kmap_ptes[pg_off])); 26200Sstevel@tonic-gate else 26210Sstevel@tonic-gate return (PTE_ISVALID( 26220Sstevel@tonic-gate ((x86pte32_t *)mmu.kmap_ptes)[pg_off])); 26230Sstevel@tonic-gate } 26240Sstevel@tonic-gate 26250Sstevel@tonic-gate ht = htable_getpage(hat, vaddr, &entry); 26260Sstevel@tonic-gate if (ht == NULL) 26270Sstevel@tonic-gate return (0); 26280Sstevel@tonic-gate htable_release(ht); 26290Sstevel@tonic-gate return (1); 26300Sstevel@tonic-gate } 26310Sstevel@tonic-gate 26320Sstevel@tonic-gate /* 2633*4381Sjosephb * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM. 2634*4381Sjosephb */ 2635*4381Sjosephb static int 2636*4381Sjosephb is_it_dism(hat_t *hat, caddr_t va) 2637*4381Sjosephb { 2638*4381Sjosephb struct seg *seg; 2639*4381Sjosephb struct shm_data *shmd; 2640*4381Sjosephb struct spt_data *sptd; 2641*4381Sjosephb 2642*4381Sjosephb seg = as_findseg(hat->hat_as, va, 0); 2643*4381Sjosephb ASSERT(seg != NULL); 2644*4381Sjosephb ASSERT(seg->s_base <= va); 2645*4381Sjosephb shmd = (struct shm_data *)seg->s_data; 2646*4381Sjosephb ASSERT(shmd != NULL); 2647*4381Sjosephb sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2648*4381Sjosephb ASSERT(sptd != NULL); 2649*4381Sjosephb if (sptd->spt_flags & SHM_PAGEABLE) 2650*4381Sjosephb return (1); 2651*4381Sjosephb return (0); 2652*4381Sjosephb } 2653*4381Sjosephb 2654*4381Sjosephb /* 2655*4381Sjosephb * Simple implementation of ISM. hat_share() is similar to hat_memload_array(), 26560Sstevel@tonic-gate * except that we use the ism_hat's existing mappings to determine the pages 2657*4381Sjosephb * and protections to use for this hat. If we find a full properly aligned 2658*4381Sjosephb * and sized pagetable, we will attempt to share the pagetable itself. 26590Sstevel@tonic-gate */ 26600Sstevel@tonic-gate /*ARGSUSED*/ 26610Sstevel@tonic-gate int 26620Sstevel@tonic-gate hat_share( 26630Sstevel@tonic-gate hat_t *hat, 26640Sstevel@tonic-gate caddr_t addr, 26650Sstevel@tonic-gate hat_t *ism_hat, 26660Sstevel@tonic-gate caddr_t src_addr, 26670Sstevel@tonic-gate size_t len, /* almost useless value, see below.. */ 26680Sstevel@tonic-gate uint_t ismszc) 26690Sstevel@tonic-gate { 26700Sstevel@tonic-gate uintptr_t vaddr_start = (uintptr_t)addr; 26710Sstevel@tonic-gate uintptr_t vaddr; 26720Sstevel@tonic-gate uintptr_t eaddr = vaddr_start + len; 26730Sstevel@tonic-gate uintptr_t ism_addr_start = (uintptr_t)src_addr; 26740Sstevel@tonic-gate uintptr_t ism_addr = ism_addr_start; 26750Sstevel@tonic-gate uintptr_t e_ism_addr = ism_addr + len; 26760Sstevel@tonic-gate htable_t *ism_ht = NULL; 26770Sstevel@tonic-gate htable_t *ht; 26780Sstevel@tonic-gate x86pte_t pte; 26790Sstevel@tonic-gate page_t *pp; 26800Sstevel@tonic-gate pfn_t pfn; 26810Sstevel@tonic-gate level_t l; 26820Sstevel@tonic-gate pgcnt_t pgcnt; 26830Sstevel@tonic-gate uint_t prot; 26840Sstevel@tonic-gate uint_t valid_cnt; 2685*4381Sjosephb int is_dism; 2686*4381Sjosephb int flags; 26870Sstevel@tonic-gate 26880Sstevel@tonic-gate /* 26890Sstevel@tonic-gate * We might be asked to share an empty DISM hat by as_dup() 26900Sstevel@tonic-gate */ 26910Sstevel@tonic-gate ASSERT(hat != kas.a_hat); 26923446Smrj ASSERT(eaddr <= _userlimit); 26930Sstevel@tonic-gate if (!(ism_hat->hat_flags & HAT_SHARED)) { 26940Sstevel@tonic-gate ASSERT(hat_get_mapped_size(ism_hat) == 0); 26950Sstevel@tonic-gate return (0); 26960Sstevel@tonic-gate } 26970Sstevel@tonic-gate 26980Sstevel@tonic-gate /* 26990Sstevel@tonic-gate * The SPT segment driver often passes us a size larger than there are 27000Sstevel@tonic-gate * valid mappings. That's because it rounds the segment size up to a 27010Sstevel@tonic-gate * large pagesize, even if the actual memory mapped by ism_hat is less. 27020Sstevel@tonic-gate */ 27030Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr_start)); 27040Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(ism_addr_start)); 27050Sstevel@tonic-gate ASSERT(ism_hat->hat_flags & HAT_SHARED); 2706*4381Sjosephb is_dism = is_it_dism(hat, addr); 27070Sstevel@tonic-gate while (ism_addr < e_ism_addr) { 27080Sstevel@tonic-gate /* 27090Sstevel@tonic-gate * use htable_walk to get the next valid ISM mapping 27100Sstevel@tonic-gate */ 27110Sstevel@tonic-gate pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr); 27120Sstevel@tonic-gate if (ism_ht == NULL) 27130Sstevel@tonic-gate break; 27140Sstevel@tonic-gate 27150Sstevel@tonic-gate /* 2716*4381Sjosephb * First check to see if we already share the page table. 2717*4381Sjosephb */ 2718*4381Sjosephb l = ism_ht->ht_level; 2719*4381Sjosephb vaddr = vaddr_start + (ism_addr - ism_addr_start); 2720*4381Sjosephb ht = htable_lookup(hat, vaddr, l); 2721*4381Sjosephb if (ht != NULL) { 2722*4381Sjosephb if (ht->ht_flags & HTABLE_SHARED_PFN) 2723*4381Sjosephb goto shared; 2724*4381Sjosephb htable_release(ht); 2725*4381Sjosephb goto not_shared; 2726*4381Sjosephb } 2727*4381Sjosephb 2728*4381Sjosephb /* 2729*4381Sjosephb * Can't ever share top table. 2730*4381Sjosephb */ 2731*4381Sjosephb if (l == mmu.max_level) 2732*4381Sjosephb goto not_shared; 2733*4381Sjosephb 2734*4381Sjosephb /* 2735*4381Sjosephb * Avoid level mismatches later due to DISM faults. 2736*4381Sjosephb */ 2737*4381Sjosephb if (is_dism && l > 0) 2738*4381Sjosephb goto not_shared; 2739*4381Sjosephb 2740*4381Sjosephb /* 2741*4381Sjosephb * addresses and lengths must align 2742*4381Sjosephb * table must be fully populated 2743*4381Sjosephb * no lower level page tables 2744*4381Sjosephb */ 2745*4381Sjosephb if (ism_addr != ism_ht->ht_vaddr || 2746*4381Sjosephb (vaddr & LEVEL_OFFSET(l + 1)) != 0) 2747*4381Sjosephb goto not_shared; 2748*4381Sjosephb 2749*4381Sjosephb /* 2750*4381Sjosephb * The range of address space must cover a full table. 27510Sstevel@tonic-gate */ 2752*4381Sjosephb if (e_ism_addr - ism_addr < LEVEL_SIZE(1 + 1)) 2753*4381Sjosephb goto not_shared; 2754*4381Sjosephb 2755*4381Sjosephb /* 2756*4381Sjosephb * All entries in the ISM page table must be leaf PTEs. 2757*4381Sjosephb */ 2758*4381Sjosephb if (l > 0) { 2759*4381Sjosephb int e; 2760*4381Sjosephb 2761*4381Sjosephb /* 2762*4381Sjosephb * We know the 0th is from htable_walk() above. 2763*4381Sjosephb */ 2764*4381Sjosephb for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) { 2765*4381Sjosephb x86pte_t pte; 2766*4381Sjosephb pte = x86pte_get(ism_ht, e); 2767*4381Sjosephb if (!PTE_ISPAGE(pte, l)) 2768*4381Sjosephb goto not_shared; 2769*4381Sjosephb } 2770*4381Sjosephb } 2771*4381Sjosephb 2772*4381Sjosephb /* 2773*4381Sjosephb * share the page table 2774*4381Sjosephb */ 2775*4381Sjosephb ht = htable_create(hat, vaddr, l, ism_ht); 2776*4381Sjosephb shared: 2777*4381Sjosephb ASSERT(ht->ht_flags & HTABLE_SHARED_PFN); 2778*4381Sjosephb ASSERT(ht->ht_shares == ism_ht); 2779*4381Sjosephb hat->hat_ism_pgcnt += 2780*4381Sjosephb (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) << 2781*4381Sjosephb (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT); 2782*4381Sjosephb ht->ht_valid_cnt = ism_ht->ht_valid_cnt; 2783*4381Sjosephb htable_release(ht); 2784*4381Sjosephb ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1); 2785*4381Sjosephb htable_release(ism_ht); 2786*4381Sjosephb ism_ht = NULL; 2787*4381Sjosephb continue; 2788*4381Sjosephb 2789*4381Sjosephb not_shared: 2790*4381Sjosephb /* 2791*4381Sjosephb * Unable to share the page table. Instead we will 2792*4381Sjosephb * create new mappings from the values in the ISM mappings. 2793*4381Sjosephb * Figure out what level size mappings to use; 2794*4381Sjosephb */ 27950Sstevel@tonic-gate for (l = ism_ht->ht_level; l > 0; --l) { 27960Sstevel@tonic-gate if (LEVEL_SIZE(l) <= eaddr - vaddr && 27970Sstevel@tonic-gate (vaddr & LEVEL_OFFSET(l)) == 0) 27980Sstevel@tonic-gate break; 27990Sstevel@tonic-gate } 28000Sstevel@tonic-gate 28010Sstevel@tonic-gate /* 28020Sstevel@tonic-gate * The ISM mapping might be larger than the share area, 2803*4381Sjosephb * be careful to truncate it if needed. 28040Sstevel@tonic-gate */ 28050Sstevel@tonic-gate if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) { 28060Sstevel@tonic-gate pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level)); 28070Sstevel@tonic-gate } else { 28080Sstevel@tonic-gate pgcnt = mmu_btop(eaddr - vaddr); 28090Sstevel@tonic-gate l = 0; 28100Sstevel@tonic-gate } 28110Sstevel@tonic-gate 28120Sstevel@tonic-gate pfn = PTE2PFN(pte, ism_ht->ht_level); 28130Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 28140Sstevel@tonic-gate while (pgcnt > 0) { 28150Sstevel@tonic-gate /* 28160Sstevel@tonic-gate * Make a new pte for the PFN for this level. 28170Sstevel@tonic-gate * Copy protections for the pte from the ISM pte. 28180Sstevel@tonic-gate */ 28190Sstevel@tonic-gate pp = page_numtopp_nolock(pfn); 28200Sstevel@tonic-gate ASSERT(pp != NULL); 28210Sstevel@tonic-gate 28220Sstevel@tonic-gate prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK; 28230Sstevel@tonic-gate if (PTE_GET(pte, PT_WRITABLE)) 28240Sstevel@tonic-gate prot |= PROT_WRITE; 28250Sstevel@tonic-gate if (!PTE_GET(pte, PT_NX)) 28260Sstevel@tonic-gate prot |= PROT_EXEC; 28270Sstevel@tonic-gate 2828*4381Sjosephb flags = HAT_LOAD; 2829*4381Sjosephb if (!is_dism) 2830*4381Sjosephb flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST; 2831*4381Sjosephb while (hati_load_common(hat, vaddr, pp, prot, flags, 28323446Smrj l, pfn) != 0) { 28333446Smrj if (l == 0) 28343446Smrj panic("hati_load_common() failure"); 28353446Smrj --l; 28363446Smrj } 28370Sstevel@tonic-gate 28380Sstevel@tonic-gate vaddr += LEVEL_SIZE(l); 28390Sstevel@tonic-gate ism_addr += LEVEL_SIZE(l); 28400Sstevel@tonic-gate pfn += mmu_btop(LEVEL_SIZE(l)); 28410Sstevel@tonic-gate pgcnt -= mmu_btop(LEVEL_SIZE(l)); 28420Sstevel@tonic-gate } 28430Sstevel@tonic-gate } 28440Sstevel@tonic-gate if (ism_ht != NULL) 28450Sstevel@tonic-gate htable_release(ism_ht); 28460Sstevel@tonic-gate return (0); 28470Sstevel@tonic-gate } 28480Sstevel@tonic-gate 28490Sstevel@tonic-gate 28500Sstevel@tonic-gate /* 28510Sstevel@tonic-gate * hat_unshare() is similar to hat_unload_callback(), but 28520Sstevel@tonic-gate * we have to look for empty shared pagetables. Note that 28530Sstevel@tonic-gate * hat_unshare() is always invoked against an entire segment. 28540Sstevel@tonic-gate */ 28550Sstevel@tonic-gate /*ARGSUSED*/ 28560Sstevel@tonic-gate void 28570Sstevel@tonic-gate hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc) 28580Sstevel@tonic-gate { 28590Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr; 28600Sstevel@tonic-gate uintptr_t eaddr = vaddr + len; 28610Sstevel@tonic-gate htable_t *ht = NULL; 28620Sstevel@tonic-gate uint_t need_demaps = 0; 2863*4381Sjosephb int flags = HAT_UNLOAD_UNMAP; 2864*4381Sjosephb level_t l; 28650Sstevel@tonic-gate 28660Sstevel@tonic-gate ASSERT(hat != kas.a_hat); 28673446Smrj ASSERT(eaddr <= _userlimit); 28680Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 28690Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 28700Sstevel@tonic-gate 28710Sstevel@tonic-gate /* 28720Sstevel@tonic-gate * First go through and remove any shared pagetables. 28730Sstevel@tonic-gate * 28743446Smrj * Note that it's ok to delay the TLB shootdown till the entire range is 28750Sstevel@tonic-gate * finished, because if hat_pageunload() were to unload a shared 28763446Smrj * pagetable page, its hat_tlb_inval() will do a global TLB invalidate. 28770Sstevel@tonic-gate */ 2878*4381Sjosephb l = mmu.max_page_level; 2879*4381Sjosephb if (l == mmu.max_level) 2880*4381Sjosephb --l; 2881*4381Sjosephb for (; l >= 0; --l) { 2882*4381Sjosephb for (vaddr = (uintptr_t)addr; vaddr < eaddr; 2883*4381Sjosephb vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) { 2884*4381Sjosephb ASSERT(!IN_VA_HOLE(vaddr)); 2885*4381Sjosephb /* 2886*4381Sjosephb * find a pagetable that maps the current address 2887*4381Sjosephb */ 2888*4381Sjosephb ht = htable_lookup(hat, vaddr, l); 2889*4381Sjosephb if (ht == NULL) 2890*4381Sjosephb continue; 28910Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 28920Sstevel@tonic-gate /* 2893*4381Sjosephb * clear page count, set valid_cnt to 0, 2894*4381Sjosephb * let htable_release() finish the job 28950Sstevel@tonic-gate */ 2896*4381Sjosephb hat->hat_ism_pgcnt -= ht->ht_valid_cnt << 2897*4381Sjosephb (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT); 28980Sstevel@tonic-gate ht->ht_valid_cnt = 0; 28990Sstevel@tonic-gate need_demaps = 1; 29000Sstevel@tonic-gate } 29010Sstevel@tonic-gate htable_release(ht); 29020Sstevel@tonic-gate } 29030Sstevel@tonic-gate } 29040Sstevel@tonic-gate 29050Sstevel@tonic-gate /* 29060Sstevel@tonic-gate * flush the TLBs - since we're probably dealing with MANY mappings 29070Sstevel@tonic-gate * we do just one CR3 reload. 29080Sstevel@tonic-gate */ 29090Sstevel@tonic-gate if (!(hat->hat_flags & HAT_FREEING) && need_demaps) 29103446Smrj hat_tlb_inval(hat, DEMAP_ALL_ADDR); 29110Sstevel@tonic-gate 29120Sstevel@tonic-gate /* 29130Sstevel@tonic-gate * Now go back and clean up any unaligned mappings that 29140Sstevel@tonic-gate * couldn't share pagetables. 29150Sstevel@tonic-gate */ 2916*4381Sjosephb if (!is_it_dism(hat, addr)) 2917*4381Sjosephb flags |= HAT_UNLOAD_UNLOCK; 2918*4381Sjosephb hat_unload(hat, addr, len, flags); 29190Sstevel@tonic-gate } 29200Sstevel@tonic-gate 29210Sstevel@tonic-gate 29220Sstevel@tonic-gate /* 29230Sstevel@tonic-gate * hat_reserve() does nothing 29240Sstevel@tonic-gate */ 29250Sstevel@tonic-gate /*ARGSUSED*/ 29260Sstevel@tonic-gate void 29270Sstevel@tonic-gate hat_reserve(struct as *as, caddr_t addr, size_t len) 29280Sstevel@tonic-gate { 29290Sstevel@tonic-gate } 29300Sstevel@tonic-gate 29310Sstevel@tonic-gate 29320Sstevel@tonic-gate /* 29330Sstevel@tonic-gate * Called when all mappings to a page should have write permission removed. 29340Sstevel@tonic-gate * Mostly stolem from hat_pagesync() 29350Sstevel@tonic-gate */ 29360Sstevel@tonic-gate static void 29370Sstevel@tonic-gate hati_page_clrwrt(struct page *pp) 29380Sstevel@tonic-gate { 29390Sstevel@tonic-gate hment_t *hm = NULL; 29400Sstevel@tonic-gate htable_t *ht; 29410Sstevel@tonic-gate uint_t entry; 29420Sstevel@tonic-gate x86pte_t old; 29430Sstevel@tonic-gate x86pte_t new; 29440Sstevel@tonic-gate uint_t pszc = 0; 29450Sstevel@tonic-gate 29460Sstevel@tonic-gate next_size: 29470Sstevel@tonic-gate /* 29480Sstevel@tonic-gate * walk thru the mapping list clearing write permission 29490Sstevel@tonic-gate */ 29500Sstevel@tonic-gate x86_hm_enter(pp); 29510Sstevel@tonic-gate while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) { 29520Sstevel@tonic-gate if (ht->ht_level < pszc) 29530Sstevel@tonic-gate continue; 29540Sstevel@tonic-gate old = x86pte_get(ht, entry); 29550Sstevel@tonic-gate 29560Sstevel@tonic-gate for (;;) { 29570Sstevel@tonic-gate /* 29580Sstevel@tonic-gate * Is this mapping of interest? 29590Sstevel@tonic-gate */ 29600Sstevel@tonic-gate if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum || 29610Sstevel@tonic-gate PTE_GET(old, PT_WRITABLE) == 0) 29620Sstevel@tonic-gate break; 29630Sstevel@tonic-gate 29640Sstevel@tonic-gate /* 29650Sstevel@tonic-gate * Clear ref/mod writable bits. This requires cross 29660Sstevel@tonic-gate * calls to ensure any executing TLBs see cleared bits. 29670Sstevel@tonic-gate */ 29680Sstevel@tonic-gate new = old; 29690Sstevel@tonic-gate PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE); 29700Sstevel@tonic-gate old = hati_update_pte(ht, entry, old, new); 29710Sstevel@tonic-gate if (old != 0) 29720Sstevel@tonic-gate continue; 29730Sstevel@tonic-gate 29740Sstevel@tonic-gate break; 29750Sstevel@tonic-gate } 29760Sstevel@tonic-gate } 29770Sstevel@tonic-gate x86_hm_exit(pp); 29780Sstevel@tonic-gate while (pszc < pp->p_szc) { 29790Sstevel@tonic-gate page_t *tpp; 29800Sstevel@tonic-gate pszc++; 29810Sstevel@tonic-gate tpp = PP_GROUPLEADER(pp, pszc); 29820Sstevel@tonic-gate if (pp != tpp) { 29830Sstevel@tonic-gate pp = tpp; 29840Sstevel@tonic-gate goto next_size; 29850Sstevel@tonic-gate } 29860Sstevel@tonic-gate } 29870Sstevel@tonic-gate } 29880Sstevel@tonic-gate 29890Sstevel@tonic-gate /* 29900Sstevel@tonic-gate * void hat_page_setattr(pp, flag) 29910Sstevel@tonic-gate * void hat_page_clrattr(pp, flag) 29920Sstevel@tonic-gate * used to set/clr ref/mod bits. 29930Sstevel@tonic-gate */ 29940Sstevel@tonic-gate void 29950Sstevel@tonic-gate hat_page_setattr(struct page *pp, uint_t flag) 29960Sstevel@tonic-gate { 29970Sstevel@tonic-gate vnode_t *vp = pp->p_vnode; 29980Sstevel@tonic-gate kmutex_t *vphm = NULL; 29990Sstevel@tonic-gate page_t **listp; 30004324Sqiao int noshuffle; 30014324Sqiao 30024324Sqiao noshuffle = flag & P_NSH; 30034324Sqiao flag &= ~P_NSH; 30040Sstevel@tonic-gate 30050Sstevel@tonic-gate if (PP_GETRM(pp, flag) == flag) 30060Sstevel@tonic-gate return; 30070Sstevel@tonic-gate 30084324Sqiao if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) && 30094324Sqiao !noshuffle) { 30100Sstevel@tonic-gate vphm = page_vnode_mutex(vp); 30110Sstevel@tonic-gate mutex_enter(vphm); 30120Sstevel@tonic-gate } 30130Sstevel@tonic-gate 30140Sstevel@tonic-gate PP_SETRM(pp, flag); 30150Sstevel@tonic-gate 30160Sstevel@tonic-gate if (vphm != NULL) { 30170Sstevel@tonic-gate 30180Sstevel@tonic-gate /* 30190Sstevel@tonic-gate * Some File Systems examine v_pages for NULL w/o 30200Sstevel@tonic-gate * grabbing the vphm mutex. Must not let it become NULL when 30210Sstevel@tonic-gate * pp is the only page on the list. 30220Sstevel@tonic-gate */ 30230Sstevel@tonic-gate if (pp->p_vpnext != pp) { 30240Sstevel@tonic-gate page_vpsub(&vp->v_pages, pp); 30250Sstevel@tonic-gate if (vp->v_pages != NULL) 30260Sstevel@tonic-gate listp = &vp->v_pages->p_vpprev->p_vpnext; 30270Sstevel@tonic-gate else 30280Sstevel@tonic-gate listp = &vp->v_pages; 30290Sstevel@tonic-gate page_vpadd(listp, pp); 30300Sstevel@tonic-gate } 30310Sstevel@tonic-gate mutex_exit(vphm); 30320Sstevel@tonic-gate } 30330Sstevel@tonic-gate } 30340Sstevel@tonic-gate 30350Sstevel@tonic-gate void 30360Sstevel@tonic-gate hat_page_clrattr(struct page *pp, uint_t flag) 30370Sstevel@tonic-gate { 30380Sstevel@tonic-gate vnode_t *vp = pp->p_vnode; 30390Sstevel@tonic-gate ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 30400Sstevel@tonic-gate 30410Sstevel@tonic-gate /* 30422999Sstans * Caller is expected to hold page's io lock for VMODSORT to work 30432999Sstans * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 30442999Sstans * bit is cleared. 30452999Sstans * We don't have assert to avoid tripping some existing third party 30462999Sstans * code. The dirty page is moved back to top of the v_page list 30472999Sstans * after IO is done in pvn_write_done(). 30480Sstevel@tonic-gate */ 30490Sstevel@tonic-gate PP_CLRRM(pp, flag); 30500Sstevel@tonic-gate 30512999Sstans if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 30520Sstevel@tonic-gate 30530Sstevel@tonic-gate /* 30540Sstevel@tonic-gate * VMODSORT works by removing write permissions and getting 30550Sstevel@tonic-gate * a fault when a page is made dirty. At this point 30560Sstevel@tonic-gate * we need to remove write permission from all mappings 30570Sstevel@tonic-gate * to this page. 30580Sstevel@tonic-gate */ 30590Sstevel@tonic-gate hati_page_clrwrt(pp); 30600Sstevel@tonic-gate } 30610Sstevel@tonic-gate } 30620Sstevel@tonic-gate 30630Sstevel@tonic-gate /* 30640Sstevel@tonic-gate * If flag is specified, returns 0 if attribute is disabled 30650Sstevel@tonic-gate * and non zero if enabled. If flag specifes multiple attributs 30660Sstevel@tonic-gate * then returns 0 if ALL atriibutes are disabled. This is an advisory 30670Sstevel@tonic-gate * call. 30680Sstevel@tonic-gate */ 30690Sstevel@tonic-gate uint_t 30700Sstevel@tonic-gate hat_page_getattr(struct page *pp, uint_t flag) 30710Sstevel@tonic-gate { 30720Sstevel@tonic-gate return (PP_GETRM(pp, flag)); 30730Sstevel@tonic-gate } 30740Sstevel@tonic-gate 30750Sstevel@tonic-gate 30760Sstevel@tonic-gate /* 30770Sstevel@tonic-gate * common code used by hat_pageunload() and hment_steal() 30780Sstevel@tonic-gate */ 30790Sstevel@tonic-gate hment_t * 30800Sstevel@tonic-gate hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry) 30810Sstevel@tonic-gate { 30820Sstevel@tonic-gate x86pte_t old_pte; 30830Sstevel@tonic-gate pfn_t pfn = pp->p_pagenum; 30840Sstevel@tonic-gate hment_t *hm; 30850Sstevel@tonic-gate 30860Sstevel@tonic-gate /* 30870Sstevel@tonic-gate * We need to acquire a hold on the htable in order to 30880Sstevel@tonic-gate * do the invalidate. We know the htable must exist, since 30890Sstevel@tonic-gate * unmap's don't release the htable until after removing any 30900Sstevel@tonic-gate * hment. Having x86_hm_enter() keeps that from proceeding. 30910Sstevel@tonic-gate */ 30920Sstevel@tonic-gate htable_acquire(ht); 30930Sstevel@tonic-gate 30940Sstevel@tonic-gate /* 30950Sstevel@tonic-gate * Invalidate the PTE and remove the hment. 30960Sstevel@tonic-gate */ 30973446Smrj old_pte = x86pte_inval(ht, entry, 0, NULL); 309847Sjosephb if (PTE2PFN(old_pte, ht->ht_level) != pfn) { 30993446Smrj panic("x86pte_inval() failure found PTE = " FMT_PTE 310047Sjosephb " pfn being unmapped is %lx ht=0x%lx entry=0x%x", 310147Sjosephb old_pte, pfn, (uintptr_t)ht, entry); 310247Sjosephb } 31030Sstevel@tonic-gate 31040Sstevel@tonic-gate /* 31050Sstevel@tonic-gate * Clean up all the htable information for this mapping 31060Sstevel@tonic-gate */ 31070Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0); 31080Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt); 31090Sstevel@tonic-gate PGCNT_DEC(ht->ht_hat, ht->ht_level); 31100Sstevel@tonic-gate 31110Sstevel@tonic-gate /* 31120Sstevel@tonic-gate * sync ref/mod bits to the page_t 31130Sstevel@tonic-gate */ 31143446Smrj if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC) 31150Sstevel@tonic-gate hati_sync_pte_to_page(pp, old_pte, ht->ht_level); 31160Sstevel@tonic-gate 31170Sstevel@tonic-gate /* 31180Sstevel@tonic-gate * Remove the mapping list entry for this page. 31190Sstevel@tonic-gate */ 31200Sstevel@tonic-gate hm = hment_remove(pp, ht, entry); 31210Sstevel@tonic-gate 31220Sstevel@tonic-gate /* 31230Sstevel@tonic-gate * drop the mapping list lock so that we might free the 31240Sstevel@tonic-gate * hment and htable. 31250Sstevel@tonic-gate */ 31260Sstevel@tonic-gate x86_hm_exit(pp); 31270Sstevel@tonic-gate htable_release(ht); 31280Sstevel@tonic-gate return (hm); 31290Sstevel@tonic-gate } 31300Sstevel@tonic-gate 31311841Spraks extern int vpm_enable; 31320Sstevel@tonic-gate /* 31330Sstevel@tonic-gate * Unload all translations to a page. If the page is a subpage of a large 31340Sstevel@tonic-gate * page, the large page mappings are also removed. 31350Sstevel@tonic-gate * 31360Sstevel@tonic-gate * The forceflags are unused. 31370Sstevel@tonic-gate */ 31380Sstevel@tonic-gate 31390Sstevel@tonic-gate /*ARGSUSED*/ 31400Sstevel@tonic-gate static int 31410Sstevel@tonic-gate hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag) 31420Sstevel@tonic-gate { 31430Sstevel@tonic-gate page_t *cur_pp = pp; 31440Sstevel@tonic-gate hment_t *hm; 31450Sstevel@tonic-gate hment_t *prev; 31460Sstevel@tonic-gate htable_t *ht; 31470Sstevel@tonic-gate uint_t entry; 31480Sstevel@tonic-gate level_t level; 31490Sstevel@tonic-gate 31501841Spraks #if defined(__amd64) 31511841Spraks /* 31521841Spraks * clear the vpm ref. 31531841Spraks */ 31541841Spraks if (vpm_enable) { 31551841Spraks pp->p_vpmref = 0; 31561841Spraks } 31571841Spraks #endif 31580Sstevel@tonic-gate /* 31590Sstevel@tonic-gate * The loop with next_size handles pages with multiple pagesize mappings 31600Sstevel@tonic-gate */ 31610Sstevel@tonic-gate next_size: 31620Sstevel@tonic-gate for (;;) { 31630Sstevel@tonic-gate 31640Sstevel@tonic-gate /* 31650Sstevel@tonic-gate * Get a mapping list entry 31660Sstevel@tonic-gate */ 31670Sstevel@tonic-gate x86_hm_enter(cur_pp); 31680Sstevel@tonic-gate for (prev = NULL; ; prev = hm) { 31690Sstevel@tonic-gate hm = hment_walk(cur_pp, &ht, &entry, prev); 31700Sstevel@tonic-gate if (hm == NULL) { 31710Sstevel@tonic-gate x86_hm_exit(cur_pp); 31720Sstevel@tonic-gate 31730Sstevel@tonic-gate /* 31740Sstevel@tonic-gate * If not part of a larger page, we're done. 31750Sstevel@tonic-gate */ 31763446Smrj if (cur_pp->p_szc <= pg_szcd) { 31770Sstevel@tonic-gate return (0); 31783446Smrj } 31790Sstevel@tonic-gate 31800Sstevel@tonic-gate /* 31810Sstevel@tonic-gate * Else check the next larger page size. 31820Sstevel@tonic-gate * hat_page_demote() may decrease p_szc 31830Sstevel@tonic-gate * but that's ok we'll just take an extra 31840Sstevel@tonic-gate * trip discover there're no larger mappings 31850Sstevel@tonic-gate * and return. 31860Sstevel@tonic-gate */ 31870Sstevel@tonic-gate ++pg_szcd; 31880Sstevel@tonic-gate cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd); 31890Sstevel@tonic-gate goto next_size; 31900Sstevel@tonic-gate } 31910Sstevel@tonic-gate 31920Sstevel@tonic-gate /* 31930Sstevel@tonic-gate * If this mapping size matches, remove it. 31940Sstevel@tonic-gate */ 31950Sstevel@tonic-gate level = ht->ht_level; 31960Sstevel@tonic-gate if (level == pg_szcd) 31970Sstevel@tonic-gate break; 31980Sstevel@tonic-gate } 31990Sstevel@tonic-gate 32000Sstevel@tonic-gate /* 32010Sstevel@tonic-gate * Remove the mapping list entry for this page. 32020Sstevel@tonic-gate * Note this does the x86_hm_exit() for us. 32030Sstevel@tonic-gate */ 32040Sstevel@tonic-gate hm = hati_page_unmap(cur_pp, ht, entry); 32050Sstevel@tonic-gate if (hm != NULL) 32060Sstevel@tonic-gate hment_free(hm); 32070Sstevel@tonic-gate } 32080Sstevel@tonic-gate } 32090Sstevel@tonic-gate 32100Sstevel@tonic-gate int 32110Sstevel@tonic-gate hat_pageunload(struct page *pp, uint_t forceflag) 32120Sstevel@tonic-gate { 32130Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 32140Sstevel@tonic-gate return (hati_pageunload(pp, 0, forceflag)); 32150Sstevel@tonic-gate } 32160Sstevel@tonic-gate 32170Sstevel@tonic-gate /* 32180Sstevel@tonic-gate * Unload all large mappings to pp and reduce by 1 p_szc field of every large 32190Sstevel@tonic-gate * page level that included pp. 32200Sstevel@tonic-gate * 32210Sstevel@tonic-gate * pp must be locked EXCL. Even though no other constituent pages are locked 32220Sstevel@tonic-gate * it's legal to unload large mappings to pp because all constituent pages of 32230Sstevel@tonic-gate * large locked mappings have to be locked SHARED. therefore if we have EXCL 32240Sstevel@tonic-gate * lock on one of constituent pages none of the large mappings to pp are 32250Sstevel@tonic-gate * locked. 32260Sstevel@tonic-gate * 32270Sstevel@tonic-gate * Change (always decrease) p_szc field starting from the last constituent 32280Sstevel@tonic-gate * page and ending with root constituent page so that root's pszc always shows 32290Sstevel@tonic-gate * the area where hat_page_demote() may be active. 32300Sstevel@tonic-gate * 32310Sstevel@tonic-gate * This mechanism is only used for file system pages where it's not always 32320Sstevel@tonic-gate * possible to get EXCL locks on all constituent pages to demote the size code 32330Sstevel@tonic-gate * (as is done for anonymous or kernel large pages). 32340Sstevel@tonic-gate */ 32350Sstevel@tonic-gate void 32360Sstevel@tonic-gate hat_page_demote(page_t *pp) 32370Sstevel@tonic-gate { 32380Sstevel@tonic-gate uint_t pszc; 32390Sstevel@tonic-gate uint_t rszc; 32400Sstevel@tonic-gate uint_t szc; 32410Sstevel@tonic-gate page_t *rootpp; 32420Sstevel@tonic-gate page_t *firstpp; 32430Sstevel@tonic-gate page_t *lastpp; 32440Sstevel@tonic-gate pgcnt_t pgcnt; 32450Sstevel@tonic-gate 32460Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 32470Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 32480Sstevel@tonic-gate ASSERT(page_szc_lock_assert(pp)); 32490Sstevel@tonic-gate 32500Sstevel@tonic-gate if (pp->p_szc == 0) 32510Sstevel@tonic-gate return; 32520Sstevel@tonic-gate 32530Sstevel@tonic-gate rootpp = PP_GROUPLEADER(pp, 1); 32540Sstevel@tonic-gate (void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD); 32550Sstevel@tonic-gate 32560Sstevel@tonic-gate /* 32570Sstevel@tonic-gate * all large mappings to pp are gone 32580Sstevel@tonic-gate * and no new can be setup since pp is locked exclusively. 32590Sstevel@tonic-gate * 32600Sstevel@tonic-gate * Lock the root to make sure there's only one hat_page_demote() 32610Sstevel@tonic-gate * outstanding within the area of this root's pszc. 32620Sstevel@tonic-gate * 32630Sstevel@tonic-gate * Second potential hat_page_demote() is already eliminated by upper 32640Sstevel@tonic-gate * VM layer via page_szc_lock() but we don't rely on it and use our 32650Sstevel@tonic-gate * own locking (so that upper layer locking can be changed without 32660Sstevel@tonic-gate * assumptions that hat depends on upper layer VM to prevent multiple 32670Sstevel@tonic-gate * hat_page_demote() to be issued simultaneously to the same large 32680Sstevel@tonic-gate * page). 32690Sstevel@tonic-gate */ 32700Sstevel@tonic-gate again: 32710Sstevel@tonic-gate pszc = pp->p_szc; 32720Sstevel@tonic-gate if (pszc == 0) 32730Sstevel@tonic-gate return; 32740Sstevel@tonic-gate rootpp = PP_GROUPLEADER(pp, pszc); 32750Sstevel@tonic-gate x86_hm_enter(rootpp); 32760Sstevel@tonic-gate /* 32770Sstevel@tonic-gate * If root's p_szc is different from pszc we raced with another 32780Sstevel@tonic-gate * hat_page_demote(). Drop the lock and try to find the root again. 32790Sstevel@tonic-gate * If root's p_szc is greater than pszc previous hat_page_demote() is 32800Sstevel@tonic-gate * not done yet. Take and release mlist lock of root's root to wait 32810Sstevel@tonic-gate * for previous hat_page_demote() to complete. 32820Sstevel@tonic-gate */ 32830Sstevel@tonic-gate if ((rszc = rootpp->p_szc) != pszc) { 32840Sstevel@tonic-gate x86_hm_exit(rootpp); 32850Sstevel@tonic-gate if (rszc > pszc) { 32860Sstevel@tonic-gate /* p_szc of a locked non free page can't increase */ 32870Sstevel@tonic-gate ASSERT(pp != rootpp); 32880Sstevel@tonic-gate 32890Sstevel@tonic-gate rootpp = PP_GROUPLEADER(rootpp, rszc); 32900Sstevel@tonic-gate x86_hm_enter(rootpp); 32910Sstevel@tonic-gate x86_hm_exit(rootpp); 32920Sstevel@tonic-gate } 32930Sstevel@tonic-gate goto again; 32940Sstevel@tonic-gate } 32950Sstevel@tonic-gate ASSERT(pp->p_szc == pszc); 32960Sstevel@tonic-gate 32970Sstevel@tonic-gate /* 32980Sstevel@tonic-gate * Decrement by 1 p_szc of every constituent page of a region that 32990Sstevel@tonic-gate * covered pp. For example if original szc is 3 it gets changed to 2 33000Sstevel@tonic-gate * everywhere except in region 2 that covered pp. Region 2 that 33010Sstevel@tonic-gate * covered pp gets demoted to 1 everywhere except in region 1 that 33020Sstevel@tonic-gate * covered pp. The region 1 that covered pp is demoted to region 33030Sstevel@tonic-gate * 0. It's done this way because from region 3 we removed level 3 33040Sstevel@tonic-gate * mappings, from region 2 that covered pp we removed level 2 mappings 33050Sstevel@tonic-gate * and from region 1 that covered pp we removed level 1 mappings. All 33060Sstevel@tonic-gate * changes are done from from high pfn's to low pfn's so that roots 33070Sstevel@tonic-gate * are changed last allowing one to know the largest region where 33080Sstevel@tonic-gate * hat_page_demote() is stil active by only looking at the root page. 33090Sstevel@tonic-gate * 33100Sstevel@tonic-gate * This algorithm is implemented in 2 while loops. First loop changes 33110Sstevel@tonic-gate * p_szc of pages to the right of pp's level 1 region and second 33120Sstevel@tonic-gate * loop changes p_szc of pages of level 1 region that covers pp 33130Sstevel@tonic-gate * and all pages to the left of level 1 region that covers pp. 33140Sstevel@tonic-gate * In the first loop p_szc keeps dropping with every iteration 33150Sstevel@tonic-gate * and in the second loop it keeps increasing with every iteration. 33160Sstevel@tonic-gate * 33170Sstevel@tonic-gate * First loop description: Demote pages to the right of pp outside of 33180Sstevel@tonic-gate * level 1 region that covers pp. In every iteration of the while 33190Sstevel@tonic-gate * loop below find the last page of szc region and the first page of 33200Sstevel@tonic-gate * (szc - 1) region that is immediately to the right of (szc - 1) 33210Sstevel@tonic-gate * region that covers pp. From last such page to first such page 33220Sstevel@tonic-gate * change every page's szc to szc - 1. Decrement szc and continue 33230Sstevel@tonic-gate * looping until szc is 1. If pp belongs to the last (szc - 1) region 33240Sstevel@tonic-gate * of szc region skip to the next iteration. 33250Sstevel@tonic-gate */ 33260Sstevel@tonic-gate szc = pszc; 33270Sstevel@tonic-gate while (szc > 1) { 33280Sstevel@tonic-gate lastpp = PP_GROUPLEADER(pp, szc); 33290Sstevel@tonic-gate pgcnt = page_get_pagecnt(szc); 33300Sstevel@tonic-gate lastpp += pgcnt - 1; 33310Sstevel@tonic-gate firstpp = PP_GROUPLEADER(pp, (szc - 1)); 33320Sstevel@tonic-gate pgcnt = page_get_pagecnt(szc - 1); 33330Sstevel@tonic-gate if (lastpp - firstpp < pgcnt) { 33340Sstevel@tonic-gate szc--; 33350Sstevel@tonic-gate continue; 33360Sstevel@tonic-gate } 33370Sstevel@tonic-gate firstpp += pgcnt; 33380Sstevel@tonic-gate while (lastpp != firstpp) { 33390Sstevel@tonic-gate ASSERT(lastpp->p_szc == pszc); 33400Sstevel@tonic-gate lastpp->p_szc = szc - 1; 33410Sstevel@tonic-gate lastpp--; 33420Sstevel@tonic-gate } 33430Sstevel@tonic-gate firstpp->p_szc = szc - 1; 33440Sstevel@tonic-gate szc--; 33450Sstevel@tonic-gate } 33460Sstevel@tonic-gate 33470Sstevel@tonic-gate /* 33480Sstevel@tonic-gate * Second loop description: 33490Sstevel@tonic-gate * First iteration changes p_szc to 0 of every 33500Sstevel@tonic-gate * page of level 1 region that covers pp. 33510Sstevel@tonic-gate * Subsequent iterations find last page of szc region 33520Sstevel@tonic-gate * immediately to the left of szc region that covered pp 33530Sstevel@tonic-gate * and first page of (szc + 1) region that covers pp. 33540Sstevel@tonic-gate * From last to first page change p_szc of every page to szc. 33550Sstevel@tonic-gate * Increment szc and continue looping until szc is pszc. 33560Sstevel@tonic-gate * If pp belongs to the fist szc region of (szc + 1) region 33570Sstevel@tonic-gate * skip to the next iteration. 33580Sstevel@tonic-gate * 33590Sstevel@tonic-gate */ 33600Sstevel@tonic-gate szc = 0; 33610Sstevel@tonic-gate while (szc < pszc) { 33620Sstevel@tonic-gate firstpp = PP_GROUPLEADER(pp, (szc + 1)); 33630Sstevel@tonic-gate if (szc == 0) { 33640Sstevel@tonic-gate pgcnt = page_get_pagecnt(1); 33650Sstevel@tonic-gate lastpp = firstpp + (pgcnt - 1); 33660Sstevel@tonic-gate } else { 33670Sstevel@tonic-gate lastpp = PP_GROUPLEADER(pp, szc); 33680Sstevel@tonic-gate if (firstpp == lastpp) { 33690Sstevel@tonic-gate szc++; 33700Sstevel@tonic-gate continue; 33710Sstevel@tonic-gate } 33720Sstevel@tonic-gate lastpp--; 33730Sstevel@tonic-gate pgcnt = page_get_pagecnt(szc); 33740Sstevel@tonic-gate } 33750Sstevel@tonic-gate while (lastpp != firstpp) { 33760Sstevel@tonic-gate ASSERT(lastpp->p_szc == pszc); 33770Sstevel@tonic-gate lastpp->p_szc = szc; 33780Sstevel@tonic-gate lastpp--; 33790Sstevel@tonic-gate } 33800Sstevel@tonic-gate firstpp->p_szc = szc; 33810Sstevel@tonic-gate if (firstpp == rootpp) 33820Sstevel@tonic-gate break; 33830Sstevel@tonic-gate szc++; 33840Sstevel@tonic-gate } 33850Sstevel@tonic-gate x86_hm_exit(rootpp); 33860Sstevel@tonic-gate } 33870Sstevel@tonic-gate 33880Sstevel@tonic-gate /* 33890Sstevel@tonic-gate * get hw stats from hardware into page struct and reset hw stats 33900Sstevel@tonic-gate * returns attributes of page 33910Sstevel@tonic-gate * Flags for hat_pagesync, hat_getstat, hat_sync 33920Sstevel@tonic-gate * 33930Sstevel@tonic-gate * define HAT_SYNC_ZERORM 0x01 33940Sstevel@tonic-gate * 33950Sstevel@tonic-gate * Additional flags for hat_pagesync 33960Sstevel@tonic-gate * 33970Sstevel@tonic-gate * define HAT_SYNC_STOPON_REF 0x02 33980Sstevel@tonic-gate * define HAT_SYNC_STOPON_MOD 0x04 33990Sstevel@tonic-gate * define HAT_SYNC_STOPON_RM 0x06 34000Sstevel@tonic-gate * define HAT_SYNC_STOPON_SHARED 0x08 34010Sstevel@tonic-gate */ 34020Sstevel@tonic-gate uint_t 34030Sstevel@tonic-gate hat_pagesync(struct page *pp, uint_t flags) 34040Sstevel@tonic-gate { 34050Sstevel@tonic-gate hment_t *hm = NULL; 34060Sstevel@tonic-gate htable_t *ht; 34070Sstevel@tonic-gate uint_t entry; 34080Sstevel@tonic-gate x86pte_t old, save_old; 34090Sstevel@tonic-gate x86pte_t new; 34100Sstevel@tonic-gate uchar_t nrmbits = P_REF|P_MOD|P_RO; 34110Sstevel@tonic-gate extern ulong_t po_share; 34120Sstevel@tonic-gate page_t *save_pp = pp; 34130Sstevel@tonic-gate uint_t pszc = 0; 34140Sstevel@tonic-gate 34150Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp) || panicstr); 34160Sstevel@tonic-gate 34170Sstevel@tonic-gate if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD)) 34180Sstevel@tonic-gate return (pp->p_nrm & nrmbits); 34190Sstevel@tonic-gate 34200Sstevel@tonic-gate if ((flags & HAT_SYNC_ZERORM) == 0) { 34210Sstevel@tonic-gate 34220Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp)) 34230Sstevel@tonic-gate return (pp->p_nrm & nrmbits); 34240Sstevel@tonic-gate 34250Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp)) 34260Sstevel@tonic-gate return (pp->p_nrm & nrmbits); 34270Sstevel@tonic-gate 34280Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_SHARED) != 0 && 34290Sstevel@tonic-gate hat_page_getshare(pp) > po_share) { 34300Sstevel@tonic-gate if (PP_ISRO(pp)) 34310Sstevel@tonic-gate PP_SETREF(pp); 34320Sstevel@tonic-gate return (pp->p_nrm & nrmbits); 34330Sstevel@tonic-gate } 34340Sstevel@tonic-gate } 34350Sstevel@tonic-gate 34360Sstevel@tonic-gate next_size: 34370Sstevel@tonic-gate /* 34380Sstevel@tonic-gate * walk thru the mapping list syncing (and clearing) ref/mod bits. 34390Sstevel@tonic-gate */ 34400Sstevel@tonic-gate x86_hm_enter(pp); 34410Sstevel@tonic-gate while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) { 34420Sstevel@tonic-gate if (ht->ht_level < pszc) 34430Sstevel@tonic-gate continue; 34440Sstevel@tonic-gate old = x86pte_get(ht, entry); 34450Sstevel@tonic-gate try_again: 34460Sstevel@tonic-gate 34470Sstevel@tonic-gate ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum); 34480Sstevel@tonic-gate 34490Sstevel@tonic-gate if (PTE_GET(old, PT_REF | PT_MOD) == 0) 34500Sstevel@tonic-gate continue; 34510Sstevel@tonic-gate 34520Sstevel@tonic-gate save_old = old; 34530Sstevel@tonic-gate if ((flags & HAT_SYNC_ZERORM) != 0) { 34540Sstevel@tonic-gate 34550Sstevel@tonic-gate /* 34560Sstevel@tonic-gate * Need to clear ref or mod bits. Need to demap 34570Sstevel@tonic-gate * to make sure any executing TLBs see cleared bits. 34580Sstevel@tonic-gate */ 34590Sstevel@tonic-gate new = old; 34600Sstevel@tonic-gate PTE_CLR(new, PT_REF | PT_MOD); 34610Sstevel@tonic-gate old = hati_update_pte(ht, entry, old, new); 34620Sstevel@tonic-gate if (old != 0) 34630Sstevel@tonic-gate goto try_again; 34640Sstevel@tonic-gate 34650Sstevel@tonic-gate old = save_old; 34660Sstevel@tonic-gate } 34670Sstevel@tonic-gate 34680Sstevel@tonic-gate /* 34690Sstevel@tonic-gate * Sync the PTE 34700Sstevel@tonic-gate */ 34713446Smrj if (!(flags & HAT_SYNC_ZERORM) && 34723446Smrj PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC) 34730Sstevel@tonic-gate hati_sync_pte_to_page(pp, old, ht->ht_level); 34740Sstevel@tonic-gate 34750Sstevel@tonic-gate /* 34760Sstevel@tonic-gate * can stop short if we found a ref'd or mod'd page 34770Sstevel@tonic-gate */ 34780Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) || 34790Sstevel@tonic-gate (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) { 34800Sstevel@tonic-gate x86_hm_exit(pp); 34813446Smrj goto done; 34820Sstevel@tonic-gate } 34830Sstevel@tonic-gate } 34840Sstevel@tonic-gate x86_hm_exit(pp); 34850Sstevel@tonic-gate while (pszc < pp->p_szc) { 34860Sstevel@tonic-gate page_t *tpp; 34870Sstevel@tonic-gate pszc++; 34880Sstevel@tonic-gate tpp = PP_GROUPLEADER(pp, pszc); 34890Sstevel@tonic-gate if (pp != tpp) { 34900Sstevel@tonic-gate pp = tpp; 34910Sstevel@tonic-gate goto next_size; 34920Sstevel@tonic-gate } 34930Sstevel@tonic-gate } 34943446Smrj done: 34950Sstevel@tonic-gate return (save_pp->p_nrm & nrmbits); 34960Sstevel@tonic-gate } 34970Sstevel@tonic-gate 34980Sstevel@tonic-gate /* 34990Sstevel@tonic-gate * returns approx number of mappings to this pp. A return of 0 implies 35000Sstevel@tonic-gate * there are no mappings to the page. 35010Sstevel@tonic-gate */ 35020Sstevel@tonic-gate ulong_t 35030Sstevel@tonic-gate hat_page_getshare(page_t *pp) 35040Sstevel@tonic-gate { 35050Sstevel@tonic-gate uint_t cnt; 35060Sstevel@tonic-gate cnt = hment_mapcnt(pp); 35071841Spraks #if defined(__amd64) 35081841Spraks if (vpm_enable && pp->p_vpmref) { 35091841Spraks cnt += 1; 35101841Spraks } 35111841Spraks #endif 35120Sstevel@tonic-gate return (cnt); 35130Sstevel@tonic-gate } 35140Sstevel@tonic-gate 35150Sstevel@tonic-gate /* 35160Sstevel@tonic-gate * hat_softlock isn't supported anymore 35170Sstevel@tonic-gate */ 35180Sstevel@tonic-gate /*ARGSUSED*/ 35190Sstevel@tonic-gate faultcode_t 35200Sstevel@tonic-gate hat_softlock( 35210Sstevel@tonic-gate hat_t *hat, 35220Sstevel@tonic-gate caddr_t addr, 35230Sstevel@tonic-gate size_t *len, 35240Sstevel@tonic-gate struct page **page_array, 35250Sstevel@tonic-gate uint_t flags) 35260Sstevel@tonic-gate { 35270Sstevel@tonic-gate return (FC_NOSUPPORT); 35280Sstevel@tonic-gate } 35290Sstevel@tonic-gate 35300Sstevel@tonic-gate 35310Sstevel@tonic-gate 35320Sstevel@tonic-gate /* 35330Sstevel@tonic-gate * Routine to expose supported HAT features to platform independent code. 35340Sstevel@tonic-gate */ 35350Sstevel@tonic-gate /*ARGSUSED*/ 35360Sstevel@tonic-gate int 35370Sstevel@tonic-gate hat_supported(enum hat_features feature, void *arg) 35380Sstevel@tonic-gate { 35390Sstevel@tonic-gate switch (feature) { 35400Sstevel@tonic-gate 35410Sstevel@tonic-gate case HAT_SHARED_PT: /* this is really ISM */ 35420Sstevel@tonic-gate return (1); 35430Sstevel@tonic-gate 35440Sstevel@tonic-gate case HAT_DYNAMIC_ISM_UNMAP: 35450Sstevel@tonic-gate return (0); 35460Sstevel@tonic-gate 35470Sstevel@tonic-gate case HAT_VMODSORT: 35480Sstevel@tonic-gate return (1); 35490Sstevel@tonic-gate 35500Sstevel@tonic-gate default: 35510Sstevel@tonic-gate panic("hat_supported() - unknown feature"); 35520Sstevel@tonic-gate } 35530Sstevel@tonic-gate return (0); 35540Sstevel@tonic-gate } 35550Sstevel@tonic-gate 35560Sstevel@tonic-gate /* 35570Sstevel@tonic-gate * Called when a thread is exiting and has been switched to the kernel AS 35580Sstevel@tonic-gate */ 35590Sstevel@tonic-gate void 35600Sstevel@tonic-gate hat_thread_exit(kthread_t *thd) 35610Sstevel@tonic-gate { 35620Sstevel@tonic-gate ASSERT(thd->t_procp->p_as == &kas); 35630Sstevel@tonic-gate hat_switch(thd->t_procp->p_as->a_hat); 35640Sstevel@tonic-gate } 35650Sstevel@tonic-gate 35660Sstevel@tonic-gate /* 35670Sstevel@tonic-gate * Setup the given brand new hat structure as the new HAT on this cpu's mmu. 35680Sstevel@tonic-gate */ 35690Sstevel@tonic-gate /*ARGSUSED*/ 35700Sstevel@tonic-gate void 35710Sstevel@tonic-gate hat_setup(hat_t *hat, int flags) 35720Sstevel@tonic-gate { 35730Sstevel@tonic-gate kpreempt_disable(); 35740Sstevel@tonic-gate 35750Sstevel@tonic-gate hat_switch(hat); 35760Sstevel@tonic-gate 35770Sstevel@tonic-gate kpreempt_enable(); 35780Sstevel@tonic-gate } 35790Sstevel@tonic-gate 35800Sstevel@tonic-gate /* 35810Sstevel@tonic-gate * Prepare for a CPU private mapping for the given address. 35820Sstevel@tonic-gate * 35830Sstevel@tonic-gate * The address can only be used from a single CPU and can be remapped 35840Sstevel@tonic-gate * using hat_mempte_remap(). Return the address of the PTE. 35850Sstevel@tonic-gate * 35860Sstevel@tonic-gate * We do the htable_create() if necessary and increment the valid count so 35870Sstevel@tonic-gate * the htable can't disappear. We also hat_devload() the page table into 35880Sstevel@tonic-gate * kernel so that the PTE is quickly accessed. 35890Sstevel@tonic-gate */ 35903446Smrj hat_mempte_t 35913446Smrj hat_mempte_setup(caddr_t addr) 35920Sstevel@tonic-gate { 35930Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 35940Sstevel@tonic-gate htable_t *ht; 35950Sstevel@tonic-gate uint_t entry; 35960Sstevel@tonic-gate x86pte_t oldpte; 35973446Smrj hat_mempte_t p; 35980Sstevel@tonic-gate 35990Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 36000Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 36014004Sjosephb ++curthread->t_hatdepth; 36020Sstevel@tonic-gate ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0); 36030Sstevel@tonic-gate if (ht == NULL) { 36040Sstevel@tonic-gate ht = htable_create(kas.a_hat, va, 0, NULL); 36050Sstevel@tonic-gate entry = htable_va2entry(va, ht); 36060Sstevel@tonic-gate ASSERT(ht->ht_level == 0); 36070Sstevel@tonic-gate oldpte = x86pte_get(ht, entry); 36080Sstevel@tonic-gate } 36090Sstevel@tonic-gate if (PTE_ISVALID(oldpte)) 36100Sstevel@tonic-gate panic("hat_mempte_setup(): address already mapped" 36110Sstevel@tonic-gate "ht=%p, entry=%d, pte=" FMT_PTE, ht, entry, oldpte); 36120Sstevel@tonic-gate 36130Sstevel@tonic-gate /* 36140Sstevel@tonic-gate * increment ht_valid_cnt so that the pagetable can't disappear 36150Sstevel@tonic-gate */ 36160Sstevel@tonic-gate HTABLE_INC(ht->ht_valid_cnt); 36170Sstevel@tonic-gate 36180Sstevel@tonic-gate /* 36193446Smrj * return the PTE physical address to the caller. 36200Sstevel@tonic-gate */ 36210Sstevel@tonic-gate htable_release(ht); 36223446Smrj p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry); 36234004Sjosephb --curthread->t_hatdepth; 36243446Smrj return (p); 36250Sstevel@tonic-gate } 36260Sstevel@tonic-gate 36270Sstevel@tonic-gate /* 36280Sstevel@tonic-gate * Release a CPU private mapping for the given address. 36290Sstevel@tonic-gate * We decrement the htable valid count so it might be destroyed. 36300Sstevel@tonic-gate */ 36313446Smrj /*ARGSUSED1*/ 36320Sstevel@tonic-gate void 36333446Smrj hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa) 36340Sstevel@tonic-gate { 36350Sstevel@tonic-gate htable_t *ht; 36360Sstevel@tonic-gate 36370Sstevel@tonic-gate /* 36383446Smrj * invalidate any left over mapping and decrement the htable valid count 36390Sstevel@tonic-gate */ 36403446Smrj { 36413446Smrj x86pte_t *pteptr; 36423446Smrj 36433446Smrj pteptr = x86pte_mapin(mmu_btop(pte_pa), 36443446Smrj (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); 36453446Smrj if (mmu.pae_hat) 36463446Smrj *pteptr = 0; 36473446Smrj else 36483446Smrj *(x86pte32_t *)pteptr = 0; 36493446Smrj mmu_tlbflush_entry(addr); 36503446Smrj x86pte_mapout(); 36513446Smrj } 36523446Smrj 36530Sstevel@tonic-gate ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0); 36540Sstevel@tonic-gate if (ht == NULL) 36550Sstevel@tonic-gate panic("hat_mempte_release(): invalid address"); 36560Sstevel@tonic-gate ASSERT(ht->ht_level == 0); 36570Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt); 36580Sstevel@tonic-gate htable_release(ht); 36590Sstevel@tonic-gate } 36600Sstevel@tonic-gate 36610Sstevel@tonic-gate /* 36620Sstevel@tonic-gate * Apply a temporary CPU private mapping to a page. We flush the TLB only 36630Sstevel@tonic-gate * on this CPU, so this ought to have been called with preemption disabled. 36640Sstevel@tonic-gate */ 36650Sstevel@tonic-gate void 36660Sstevel@tonic-gate hat_mempte_remap( 36673446Smrj pfn_t pfn, 36683446Smrj caddr_t addr, 36693446Smrj hat_mempte_t pte_pa, 36703446Smrj uint_t attr, 36713446Smrj uint_t flags) 36720Sstevel@tonic-gate { 36730Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 36740Sstevel@tonic-gate x86pte_t pte; 36750Sstevel@tonic-gate 36760Sstevel@tonic-gate /* 36770Sstevel@tonic-gate * Remap the given PTE to the new page's PFN. Invalidate only 36780Sstevel@tonic-gate * on this CPU. 36790Sstevel@tonic-gate */ 36800Sstevel@tonic-gate #ifdef DEBUG 36810Sstevel@tonic-gate htable_t *ht; 36820Sstevel@tonic-gate uint_t entry; 36830Sstevel@tonic-gate 36840Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 36850Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 36860Sstevel@tonic-gate ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0); 36870Sstevel@tonic-gate ASSERT(ht != NULL); 36880Sstevel@tonic-gate ASSERT(ht->ht_level == 0); 36890Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0); 36903446Smrj ASSERT(ht->ht_pfn == mmu_btop(pte_pa)); 36910Sstevel@tonic-gate htable_release(ht); 36920Sstevel@tonic-gate #endif 36930Sstevel@tonic-gate pte = hati_mkpte(pfn, attr, 0, flags); 36943446Smrj { 36953446Smrj x86pte_t *pteptr; 36963446Smrj 36973446Smrj pteptr = x86pte_mapin(mmu_btop(pte_pa), 36983446Smrj (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); 36993446Smrj if (mmu.pae_hat) 37003446Smrj *(x86pte_t *)pteptr = pte; 37013446Smrj else 37023446Smrj *(x86pte32_t *)pteptr = (x86pte32_t)pte; 37033446Smrj mmu_tlbflush_entry(addr); 37043446Smrj x86pte_mapout(); 37053446Smrj } 37060Sstevel@tonic-gate } 37070Sstevel@tonic-gate 37080Sstevel@tonic-gate 37090Sstevel@tonic-gate 37100Sstevel@tonic-gate /* 37110Sstevel@tonic-gate * Hat locking functions 37120Sstevel@tonic-gate * XXX - these two functions are currently being used by hatstats 37130Sstevel@tonic-gate * they can be removed by using a per-as mutex for hatstats. 37140Sstevel@tonic-gate */ 37150Sstevel@tonic-gate void 37160Sstevel@tonic-gate hat_enter(hat_t *hat) 37170Sstevel@tonic-gate { 37180Sstevel@tonic-gate mutex_enter(&hat->hat_mutex); 37190Sstevel@tonic-gate } 37200Sstevel@tonic-gate 37210Sstevel@tonic-gate void 37220Sstevel@tonic-gate hat_exit(hat_t *hat) 37230Sstevel@tonic-gate { 37240Sstevel@tonic-gate mutex_exit(&hat->hat_mutex); 37250Sstevel@tonic-gate } 37260Sstevel@tonic-gate 37270Sstevel@tonic-gate /* 37283446Smrj * HAT part of cpu initialization. 37290Sstevel@tonic-gate */ 37300Sstevel@tonic-gate void 37310Sstevel@tonic-gate hat_cpu_online(struct cpu *cpup) 37320Sstevel@tonic-gate { 37330Sstevel@tonic-gate if (cpup != CPU) { 37343446Smrj x86pte_cpu_init(cpup); 37350Sstevel@tonic-gate hat_vlp_setup(cpup); 37360Sstevel@tonic-gate } 37370Sstevel@tonic-gate CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id); 37380Sstevel@tonic-gate } 37390Sstevel@tonic-gate 37400Sstevel@tonic-gate /* 37413446Smrj * HAT part of cpu deletion. 37423446Smrj * (currently, we only call this after the cpu is safely passivated.) 37433446Smrj */ 37443446Smrj void 37453446Smrj hat_cpu_offline(struct cpu *cpup) 37463446Smrj { 37473446Smrj ASSERT(cpup != CPU); 37483446Smrj 37493446Smrj CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id); 37503446Smrj x86pte_cpu_fini(cpup); 37513446Smrj hat_vlp_teardown(cpup); 37523446Smrj } 37533446Smrj 37543446Smrj /* 37550Sstevel@tonic-gate * Function called after all CPUs are brought online. 37560Sstevel@tonic-gate * Used to remove low address boot mappings. 37570Sstevel@tonic-gate */ 37580Sstevel@tonic-gate void 37590Sstevel@tonic-gate clear_boot_mappings(uintptr_t low, uintptr_t high) 37600Sstevel@tonic-gate { 37610Sstevel@tonic-gate uintptr_t vaddr = low; 37620Sstevel@tonic-gate htable_t *ht = NULL; 37630Sstevel@tonic-gate level_t level; 37640Sstevel@tonic-gate uint_t entry; 37650Sstevel@tonic-gate x86pte_t pte; 37660Sstevel@tonic-gate 37670Sstevel@tonic-gate /* 37680Sstevel@tonic-gate * On 1st CPU we can unload the prom mappings, basically we blow away 37693446Smrj * all virtual mappings under _userlimit. 37700Sstevel@tonic-gate */ 37710Sstevel@tonic-gate while (vaddr < high) { 37720Sstevel@tonic-gate pte = htable_walk(kas.a_hat, &ht, &vaddr, high); 37730Sstevel@tonic-gate if (ht == NULL) 37740Sstevel@tonic-gate break; 37750Sstevel@tonic-gate 37760Sstevel@tonic-gate level = ht->ht_level; 37770Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht); 37780Sstevel@tonic-gate ASSERT(level <= mmu.max_page_level); 37790Sstevel@tonic-gate ASSERT(PTE_ISPAGE(pte, level)); 37800Sstevel@tonic-gate 37810Sstevel@tonic-gate /* 37820Sstevel@tonic-gate * Unload the mapping from the page tables. 37830Sstevel@tonic-gate */ 37843446Smrj (void) x86pte_inval(ht, entry, 0, NULL); 37850Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0); 37860Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt); 37870Sstevel@tonic-gate PGCNT_DEC(ht->ht_hat, ht->ht_level); 37880Sstevel@tonic-gate 37890Sstevel@tonic-gate vaddr += LEVEL_SIZE(ht->ht_level); 37900Sstevel@tonic-gate } 37910Sstevel@tonic-gate if (ht) 37920Sstevel@tonic-gate htable_release(ht); 37930Sstevel@tonic-gate } 37940Sstevel@tonic-gate 37950Sstevel@tonic-gate /* 37960Sstevel@tonic-gate * Atomically update a new translation for a single page. If the 37970Sstevel@tonic-gate * currently installed PTE doesn't match the value we expect to find, 37980Sstevel@tonic-gate * it's not updated and we return the PTE we found. 37990Sstevel@tonic-gate * 38000Sstevel@tonic-gate * If activating nosync or NOWRITE and the page was modified we need to sync 38010Sstevel@tonic-gate * with the page_t. Also sync with page_t if clearing ref/mod bits. 38020Sstevel@tonic-gate */ 38030Sstevel@tonic-gate static x86pte_t 38040Sstevel@tonic-gate hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new) 38050Sstevel@tonic-gate { 38060Sstevel@tonic-gate page_t *pp; 38070Sstevel@tonic-gate uint_t rm = 0; 38080Sstevel@tonic-gate x86pte_t replaced; 38090Sstevel@tonic-gate 38103446Smrj if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC && 38110Sstevel@tonic-gate PTE_GET(expected, PT_MOD | PT_REF) && 38120Sstevel@tonic-gate (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) || 3813*4381Sjosephb !PTE_GET(new, PT_MOD | PT_REF))) { 38140Sstevel@tonic-gate 38153446Smrj ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level))); 38160Sstevel@tonic-gate pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level)); 38170Sstevel@tonic-gate ASSERT(pp != NULL); 38180Sstevel@tonic-gate if (PTE_GET(expected, PT_MOD)) 38190Sstevel@tonic-gate rm |= P_MOD; 38200Sstevel@tonic-gate if (PTE_GET(expected, PT_REF)) 38210Sstevel@tonic-gate rm |= P_REF; 38220Sstevel@tonic-gate PTE_CLR(new, PT_MOD | PT_REF); 38230Sstevel@tonic-gate } 38240Sstevel@tonic-gate 38250Sstevel@tonic-gate replaced = x86pte_update(ht, entry, expected, new); 38260Sstevel@tonic-gate if (replaced != expected) 38270Sstevel@tonic-gate return (replaced); 38280Sstevel@tonic-gate 38290Sstevel@tonic-gate if (rm) { 38300Sstevel@tonic-gate /* 38310Sstevel@tonic-gate * sync to all constituent pages of a large page 38320Sstevel@tonic-gate */ 38330Sstevel@tonic-gate pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level); 38340Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt)); 38350Sstevel@tonic-gate while (pgcnt-- > 0) { 38360Sstevel@tonic-gate /* 38370Sstevel@tonic-gate * hat_page_demote() can't decrease 38380Sstevel@tonic-gate * pszc below this mapping size 38390Sstevel@tonic-gate * since large mapping existed after we 38400Sstevel@tonic-gate * took mlist lock. 38410Sstevel@tonic-gate */ 38420Sstevel@tonic-gate ASSERT(pp->p_szc >= ht->ht_level); 38430Sstevel@tonic-gate hat_page_setattr(pp, rm); 38440Sstevel@tonic-gate ++pp; 38450Sstevel@tonic-gate } 38460Sstevel@tonic-gate } 38470Sstevel@tonic-gate 38480Sstevel@tonic-gate return (0); 38490Sstevel@tonic-gate } 38500Sstevel@tonic-gate 38510Sstevel@tonic-gate /* 38520Sstevel@tonic-gate * Kernel Physical Mapping (kpm) facility 38530Sstevel@tonic-gate * 38540Sstevel@tonic-gate * Most of the routines needed to support segkpm are almost no-ops on the 38550Sstevel@tonic-gate * x86 platform. We map in the entire segment when it is created and leave 38560Sstevel@tonic-gate * it mapped in, so there is no additional work required to set up and tear 38570Sstevel@tonic-gate * down individual mappings. All of these routines were created to support 38580Sstevel@tonic-gate * SPARC platforms that have to avoid aliasing in their virtually indexed 38590Sstevel@tonic-gate * caches. 38600Sstevel@tonic-gate * 38610Sstevel@tonic-gate * Most of the routines have sanity checks in them (e.g. verifying that the 38620Sstevel@tonic-gate * passed-in page is locked). We don't actually care about most of these 38630Sstevel@tonic-gate * checks on x86, but we leave them in place to identify problems in the 38640Sstevel@tonic-gate * upper levels. 38650Sstevel@tonic-gate */ 38660Sstevel@tonic-gate 38670Sstevel@tonic-gate /* 38680Sstevel@tonic-gate * Map in a locked page and return the vaddr. 38690Sstevel@tonic-gate */ 38700Sstevel@tonic-gate /*ARGSUSED*/ 38710Sstevel@tonic-gate caddr_t 38720Sstevel@tonic-gate hat_kpm_mapin(struct page *pp, struct kpme *kpme) 38730Sstevel@tonic-gate { 38740Sstevel@tonic-gate caddr_t vaddr; 38750Sstevel@tonic-gate 38760Sstevel@tonic-gate #ifdef DEBUG 38770Sstevel@tonic-gate if (kpm_enable == 0) { 38780Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n"); 38790Sstevel@tonic-gate return ((caddr_t)NULL); 38800Sstevel@tonic-gate } 38810Sstevel@tonic-gate 38820Sstevel@tonic-gate if (pp == NULL || PAGE_LOCKED(pp) == 0) { 38830Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n"); 38840Sstevel@tonic-gate return ((caddr_t)NULL); 38850Sstevel@tonic-gate } 38860Sstevel@tonic-gate #endif 38870Sstevel@tonic-gate 38880Sstevel@tonic-gate vaddr = hat_kpm_page2va(pp, 1); 38890Sstevel@tonic-gate 38900Sstevel@tonic-gate return (vaddr); 38910Sstevel@tonic-gate } 38920Sstevel@tonic-gate 38930Sstevel@tonic-gate /* 38940Sstevel@tonic-gate * Mapout a locked page. 38950Sstevel@tonic-gate */ 38960Sstevel@tonic-gate /*ARGSUSED*/ 38970Sstevel@tonic-gate void 38980Sstevel@tonic-gate hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr) 38990Sstevel@tonic-gate { 39000Sstevel@tonic-gate #ifdef DEBUG 39010Sstevel@tonic-gate if (kpm_enable == 0) { 39020Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n"); 39030Sstevel@tonic-gate return; 39040Sstevel@tonic-gate } 39050Sstevel@tonic-gate 39060Sstevel@tonic-gate if (IS_KPM_ADDR(vaddr) == 0) { 39070Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n"); 39080Sstevel@tonic-gate return; 39090Sstevel@tonic-gate } 39100Sstevel@tonic-gate 39110Sstevel@tonic-gate if (pp == NULL || PAGE_LOCKED(pp) == 0) { 39120Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n"); 39130Sstevel@tonic-gate return; 39140Sstevel@tonic-gate } 39150Sstevel@tonic-gate #endif 39160Sstevel@tonic-gate } 39170Sstevel@tonic-gate 39180Sstevel@tonic-gate /* 39190Sstevel@tonic-gate * Return the kpm virtual address for a specific pfn 39200Sstevel@tonic-gate */ 39210Sstevel@tonic-gate caddr_t 39220Sstevel@tonic-gate hat_kpm_pfn2va(pfn_t pfn) 39230Sstevel@tonic-gate { 39243446Smrj uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn); 39250Sstevel@tonic-gate 39260Sstevel@tonic-gate return ((caddr_t)vaddr); 39270Sstevel@tonic-gate } 39280Sstevel@tonic-gate 39290Sstevel@tonic-gate /* 39300Sstevel@tonic-gate * Return the kpm virtual address for the page at pp. 39310Sstevel@tonic-gate */ 39320Sstevel@tonic-gate /*ARGSUSED*/ 39330Sstevel@tonic-gate caddr_t 39340Sstevel@tonic-gate hat_kpm_page2va(struct page *pp, int checkswap) 39350Sstevel@tonic-gate { 39360Sstevel@tonic-gate return (hat_kpm_pfn2va(pp->p_pagenum)); 39370Sstevel@tonic-gate } 39380Sstevel@tonic-gate 39390Sstevel@tonic-gate /* 39400Sstevel@tonic-gate * Return the page frame number for the kpm virtual address vaddr. 39410Sstevel@tonic-gate */ 39420Sstevel@tonic-gate pfn_t 39430Sstevel@tonic-gate hat_kpm_va2pfn(caddr_t vaddr) 39440Sstevel@tonic-gate { 39450Sstevel@tonic-gate pfn_t pfn; 39460Sstevel@tonic-gate 39470Sstevel@tonic-gate ASSERT(IS_KPM_ADDR(vaddr)); 39480Sstevel@tonic-gate 39490Sstevel@tonic-gate pfn = (pfn_t)btop(vaddr - kpm_vbase); 39500Sstevel@tonic-gate 39510Sstevel@tonic-gate return (pfn); 39520Sstevel@tonic-gate } 39530Sstevel@tonic-gate 39540Sstevel@tonic-gate 39550Sstevel@tonic-gate /* 39560Sstevel@tonic-gate * Return the page for the kpm virtual address vaddr. 39570Sstevel@tonic-gate */ 39580Sstevel@tonic-gate page_t * 39590Sstevel@tonic-gate hat_kpm_vaddr2page(caddr_t vaddr) 39600Sstevel@tonic-gate { 39610Sstevel@tonic-gate pfn_t pfn; 39620Sstevel@tonic-gate 39630Sstevel@tonic-gate ASSERT(IS_KPM_ADDR(vaddr)); 39640Sstevel@tonic-gate 39650Sstevel@tonic-gate pfn = hat_kpm_va2pfn(vaddr); 39660Sstevel@tonic-gate 39670Sstevel@tonic-gate return (page_numtopp_nolock(pfn)); 39680Sstevel@tonic-gate } 39690Sstevel@tonic-gate 39700Sstevel@tonic-gate /* 39710Sstevel@tonic-gate * hat_kpm_fault is called from segkpm_fault when we take a page fault on a 39720Sstevel@tonic-gate * KPM page. This should never happen on x86 39730Sstevel@tonic-gate */ 39740Sstevel@tonic-gate int 39750Sstevel@tonic-gate hat_kpm_fault(hat_t *hat, caddr_t vaddr) 39760Sstevel@tonic-gate { 39770Sstevel@tonic-gate panic("pagefault in seg_kpm. hat: 0x%p vaddr: 0x%p", hat, vaddr); 39780Sstevel@tonic-gate 39790Sstevel@tonic-gate return (0); 39800Sstevel@tonic-gate } 39810Sstevel@tonic-gate 39820Sstevel@tonic-gate /*ARGSUSED*/ 39830Sstevel@tonic-gate void 39840Sstevel@tonic-gate hat_kpm_mseghash_clear(int nentries) 39850Sstevel@tonic-gate {} 39860Sstevel@tonic-gate 39870Sstevel@tonic-gate /*ARGSUSED*/ 39880Sstevel@tonic-gate void 39890Sstevel@tonic-gate hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp) 39900Sstevel@tonic-gate {} 3991