10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51747Sjosephb * Common Development and Distribution License (the "License"). 61747Sjosephb * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 223446Smrj * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate /* 290Sstevel@tonic-gate * VM - Hardware Address Translation management for i386 and amd64 300Sstevel@tonic-gate * 310Sstevel@tonic-gate * Implementation of the interfaces described in <common/vm/hat.h> 320Sstevel@tonic-gate * 330Sstevel@tonic-gate * Nearly all the details of how the hardware is managed should not be 340Sstevel@tonic-gate * visible outside this layer except for misc. machine specific functions 350Sstevel@tonic-gate * that work in conjunction with this code. 360Sstevel@tonic-gate * 370Sstevel@tonic-gate * Routines used only inside of i86pc/vm start with hati_ for HAT Internal. 380Sstevel@tonic-gate */ 390Sstevel@tonic-gate 400Sstevel@tonic-gate #include <sys/machparam.h> 410Sstevel@tonic-gate #include <sys/machsystm.h> 420Sstevel@tonic-gate #include <sys/mman.h> 430Sstevel@tonic-gate #include <sys/types.h> 440Sstevel@tonic-gate #include <sys/systm.h> 450Sstevel@tonic-gate #include <sys/cpuvar.h> 460Sstevel@tonic-gate #include <sys/thread.h> 470Sstevel@tonic-gate #include <sys/proc.h> 480Sstevel@tonic-gate #include <sys/cpu.h> 490Sstevel@tonic-gate #include <sys/kmem.h> 500Sstevel@tonic-gate #include <sys/disp.h> 510Sstevel@tonic-gate #include <sys/shm.h> 520Sstevel@tonic-gate #include <sys/sysmacros.h> 530Sstevel@tonic-gate #include <sys/machparam.h> 540Sstevel@tonic-gate #include <sys/vmem.h> 550Sstevel@tonic-gate #include <sys/vmsystm.h> 560Sstevel@tonic-gate #include <sys/promif.h> 570Sstevel@tonic-gate #include <sys/var.h> 580Sstevel@tonic-gate #include <sys/x86_archext.h> 590Sstevel@tonic-gate #include <sys/atomic.h> 600Sstevel@tonic-gate #include <sys/bitmap.h> 613446Smrj #include <sys/controlregs.h> 623446Smrj #include <sys/bootconf.h> 633446Smrj #include <sys/bootsvcs.h> 643446Smrj #include <sys/bootinfo.h> 650Sstevel@tonic-gate 660Sstevel@tonic-gate #include <vm/seg_kmem.h> 670Sstevel@tonic-gate #include <vm/hat_i86.h> 680Sstevel@tonic-gate #include <vm/as.h> 690Sstevel@tonic-gate #include <vm/seg.h> 700Sstevel@tonic-gate #include <vm/page.h> 710Sstevel@tonic-gate #include <vm/seg_kp.h> 720Sstevel@tonic-gate #include <vm/seg_kpm.h> 730Sstevel@tonic-gate #include <vm/vm_dep.h> 743446Smrj #include <vm/kboot_mmu.h> 750Sstevel@tonic-gate 760Sstevel@tonic-gate #include <sys/cmn_err.h> 770Sstevel@tonic-gate 780Sstevel@tonic-gate /* 790Sstevel@tonic-gate * Basic parameters for hat operation. 800Sstevel@tonic-gate */ 810Sstevel@tonic-gate struct hat_mmu_info mmu; 820Sstevel@tonic-gate 830Sstevel@tonic-gate /* 840Sstevel@tonic-gate * The page that is the kernel's top level pagetable. 850Sstevel@tonic-gate * 860Sstevel@tonic-gate * For 32 bit VLP support, the kernel hat will use the 1st 4 entries 870Sstevel@tonic-gate * on this 4K page for its top level page table. The remaining groups of 880Sstevel@tonic-gate * 4 entries are used for per processor copies of user VLP pagetables for 890Sstevel@tonic-gate * running threads. See hat_switch() and reload_pae32() for details. 900Sstevel@tonic-gate * 910Sstevel@tonic-gate * vlp_page[0] - 0th level==2 PTE for kernel HAT (will be zero) 920Sstevel@tonic-gate * vlp_page[1] - 1st level==2 PTE for kernel HAT (will be zero) 930Sstevel@tonic-gate * vlp_page[2] - 2nd level==2 PTE for kernel HAT (zero for small memory) 940Sstevel@tonic-gate * vlp_page[3] - 3rd level==2 PTE for kernel 950Sstevel@tonic-gate * 960Sstevel@tonic-gate * vlp_page[4] - 0th level==2 PTE for user thread on cpu 0 970Sstevel@tonic-gate * vlp_page[5] - 1st level==2 PTE for user thread on cpu 0 980Sstevel@tonic-gate * vlp_page[6] - 2nd level==2 PTE for user thread on cpu 0 990Sstevel@tonic-gate * vlp_page[7] - probably copy of kernel PTE 1000Sstevel@tonic-gate * 1010Sstevel@tonic-gate * vlp_page[8] - 0th level==2 PTE for user thread on cpu 1 1020Sstevel@tonic-gate * vlp_page[9] - 1st level==2 PTE for user thread on cpu 1 1030Sstevel@tonic-gate * vlp_page[10] - 2nd level==2 PTE for user thread on cpu 1 1040Sstevel@tonic-gate * vlp_page[11] - probably copy of kernel PTE 1050Sstevel@tonic-gate * ... 1060Sstevel@tonic-gate * 1070Sstevel@tonic-gate * when / where the kernel PTE's are (entry 2 or 3 or none) depends 1080Sstevel@tonic-gate * on kernelbase. 1090Sstevel@tonic-gate */ 1100Sstevel@tonic-gate static x86pte_t *vlp_page; 1110Sstevel@tonic-gate 1120Sstevel@tonic-gate /* 1130Sstevel@tonic-gate * forward declaration of internal utility routines 1140Sstevel@tonic-gate */ 1150Sstevel@tonic-gate static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, 1160Sstevel@tonic-gate x86pte_t new); 1170Sstevel@tonic-gate 1180Sstevel@tonic-gate /* 1190Sstevel@tonic-gate * The kernel address space exists in all HATs. To implement this the 1200Sstevel@tonic-gate * kernel reserves a fixed number of entries in every topmost level page 1210Sstevel@tonic-gate * table. The values are setup in hat_init() and then copied to every hat 1220Sstevel@tonic-gate * created by hat_alloc(). This means that kernelbase must be: 1230Sstevel@tonic-gate * 1240Sstevel@tonic-gate * 4Meg aligned for 32 bit kernels 1250Sstevel@tonic-gate * 512Gig aligned for x86_64 64 bit kernel 1260Sstevel@tonic-gate * 1270Sstevel@tonic-gate * The PAE 32 bit hat is handled as a special case. Otherwise requiring 1Gig 1280Sstevel@tonic-gate * alignment would use too much VA for the kernel. 1290Sstevel@tonic-gate * 1300Sstevel@tonic-gate */ 1310Sstevel@tonic-gate static uint_t khat_start; /* index of 1st entry in kernel's top ptable */ 1320Sstevel@tonic-gate static uint_t khat_entries; /* number of entries in kernel's top ptable */ 1330Sstevel@tonic-gate 1340Sstevel@tonic-gate #if defined(__i386) 1350Sstevel@tonic-gate 1360Sstevel@tonic-gate static htable_t *khat_pae32_htable = NULL; 1370Sstevel@tonic-gate static uint_t khat_pae32_start; 1380Sstevel@tonic-gate static uint_t khat_pae32_entries; 1390Sstevel@tonic-gate 1400Sstevel@tonic-gate #endif 1410Sstevel@tonic-gate 1420Sstevel@tonic-gate uint_t use_boot_reserve = 1; /* cleared after early boot process */ 1430Sstevel@tonic-gate uint_t can_steal_post_boot = 0; /* set late in boot to enable stealing */ 1440Sstevel@tonic-gate 1450Sstevel@tonic-gate /* 1460Sstevel@tonic-gate * A cpuset for all cpus. This is used for kernel address cross calls, since 1470Sstevel@tonic-gate * the kernel addresses apply to all cpus. 1480Sstevel@tonic-gate */ 1490Sstevel@tonic-gate cpuset_t khat_cpuset; 1500Sstevel@tonic-gate 1510Sstevel@tonic-gate /* 1520Sstevel@tonic-gate * management stuff for hat structures 1530Sstevel@tonic-gate */ 1540Sstevel@tonic-gate kmutex_t hat_list_lock; 1550Sstevel@tonic-gate kcondvar_t hat_list_cv; 1560Sstevel@tonic-gate kmem_cache_t *hat_cache; 1570Sstevel@tonic-gate kmem_cache_t *hat_hash_cache; 1580Sstevel@tonic-gate kmem_cache_t *vlp_hash_cache; 1590Sstevel@tonic-gate 1600Sstevel@tonic-gate /* 1610Sstevel@tonic-gate * Simple statistics 1620Sstevel@tonic-gate */ 1630Sstevel@tonic-gate struct hatstats hatstat; 1640Sstevel@tonic-gate 1650Sstevel@tonic-gate /* 1660Sstevel@tonic-gate * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's. 1670Sstevel@tonic-gate */ 1680Sstevel@tonic-gate extern void atomic_orb(uchar_t *addr, uchar_t val); 1690Sstevel@tonic-gate extern void atomic_andb(uchar_t *addr, uchar_t val); 1700Sstevel@tonic-gate 1710Sstevel@tonic-gate #define PP_GETRM(pp, rmmask) (pp->p_nrm & rmmask) 1720Sstevel@tonic-gate #define PP_ISMOD(pp) PP_GETRM(pp, P_MOD) 1730Sstevel@tonic-gate #define PP_ISREF(pp) PP_GETRM(pp, P_REF) 1740Sstevel@tonic-gate #define PP_ISRO(pp) PP_GETRM(pp, P_RO) 1750Sstevel@tonic-gate 1760Sstevel@tonic-gate #define PP_SETRM(pp, rm) atomic_orb(&(pp->p_nrm), rm) 1770Sstevel@tonic-gate #define PP_SETMOD(pp) PP_SETRM(pp, P_MOD) 1780Sstevel@tonic-gate #define PP_SETREF(pp) PP_SETRM(pp, P_REF) 1790Sstevel@tonic-gate #define PP_SETRO(pp) PP_SETRM(pp, P_RO) 1800Sstevel@tonic-gate 1810Sstevel@tonic-gate #define PP_CLRRM(pp, rm) atomic_andb(&(pp->p_nrm), ~(rm)) 1820Sstevel@tonic-gate #define PP_CLRMOD(pp) PP_CLRRM(pp, P_MOD) 1830Sstevel@tonic-gate #define PP_CLRREF(pp) PP_CLRRM(pp, P_REF) 1840Sstevel@tonic-gate #define PP_CLRRO(pp) PP_CLRRM(pp, P_RO) 1850Sstevel@tonic-gate #define PP_CLRALL(pp) PP_CLRRM(pp, P_MOD | P_REF | P_RO) 1860Sstevel@tonic-gate 1870Sstevel@tonic-gate /* 1880Sstevel@tonic-gate * some useful tracing macros 1890Sstevel@tonic-gate */ 1900Sstevel@tonic-gate 1910Sstevel@tonic-gate int hattrace = 0; 1920Sstevel@tonic-gate #ifdef DEBUG 1930Sstevel@tonic-gate 1940Sstevel@tonic-gate #define HATIN(r, h, a, l) \ 1950Sstevel@tonic-gate if (hattrace) prom_printf("->%s hat=%p, adr=%p, len=%lx\n", #r, h, a, l) 1960Sstevel@tonic-gate 1970Sstevel@tonic-gate #define HATOUT(r, h, a) \ 1980Sstevel@tonic-gate if (hattrace) prom_printf("<-%s hat=%p, adr=%p\n", #r, h, a) 1990Sstevel@tonic-gate #else 2000Sstevel@tonic-gate 2010Sstevel@tonic-gate #define HATIN(r, h, a, l) 2020Sstevel@tonic-gate #define HATOUT(r, h, a) 2030Sstevel@tonic-gate 2040Sstevel@tonic-gate #endif 2050Sstevel@tonic-gate 2060Sstevel@tonic-gate 2070Sstevel@tonic-gate /* 2080Sstevel@tonic-gate * kmem cache constructor for struct hat 2090Sstevel@tonic-gate */ 2100Sstevel@tonic-gate /*ARGSUSED*/ 2110Sstevel@tonic-gate static int 2120Sstevel@tonic-gate hati_constructor(void *buf, void *handle, int kmflags) 2130Sstevel@tonic-gate { 2140Sstevel@tonic-gate hat_t *hat = buf; 2150Sstevel@tonic-gate 2160Sstevel@tonic-gate mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 2170Sstevel@tonic-gate bzero(hat->hat_pages_mapped, 2180Sstevel@tonic-gate sizeof (pgcnt_t) * (mmu.max_page_level + 1)); 2190Sstevel@tonic-gate hat->hat_stats = 0; 2200Sstevel@tonic-gate hat->hat_flags = 0; 2213543Sjosephb mutex_init(&hat->hat_switch_mutex, NULL, MUTEX_DRIVER, 2223543Sjosephb (void *)ipltospl(DISP_LEVEL)); 2230Sstevel@tonic-gate CPUSET_ZERO(hat->hat_cpus); 2240Sstevel@tonic-gate hat->hat_htable = NULL; 2250Sstevel@tonic-gate hat->hat_ht_hash = NULL; 2260Sstevel@tonic-gate return (0); 2270Sstevel@tonic-gate } 2280Sstevel@tonic-gate 2290Sstevel@tonic-gate /* 2300Sstevel@tonic-gate * Allocate a hat structure for as. We also create the top level 2310Sstevel@tonic-gate * htable and initialize it to contain the kernel hat entries. 2320Sstevel@tonic-gate */ 2330Sstevel@tonic-gate hat_t * 2340Sstevel@tonic-gate hat_alloc(struct as *as) 2350Sstevel@tonic-gate { 2360Sstevel@tonic-gate hat_t *hat; 2370Sstevel@tonic-gate htable_t *ht; /* top level htable */ 2380Sstevel@tonic-gate uint_t use_vlp; 2390Sstevel@tonic-gate 2400Sstevel@tonic-gate /* 2410Sstevel@tonic-gate * Once we start creating user process HATs we can enable 2420Sstevel@tonic-gate * the htable_steal() code. 2430Sstevel@tonic-gate */ 2440Sstevel@tonic-gate if (can_steal_post_boot == 0) 2450Sstevel@tonic-gate can_steal_post_boot = 1; 2460Sstevel@tonic-gate 2470Sstevel@tonic-gate ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 2480Sstevel@tonic-gate hat = kmem_cache_alloc(hat_cache, KM_SLEEP); 2490Sstevel@tonic-gate hat->hat_as = as; 2500Sstevel@tonic-gate mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 2510Sstevel@tonic-gate ASSERT(hat->hat_flags == 0); 2520Sstevel@tonic-gate 2530Sstevel@tonic-gate /* 2540Sstevel@tonic-gate * a 32 bit process uses a VLP style hat when using PAE 2550Sstevel@tonic-gate */ 2560Sstevel@tonic-gate #if defined(__amd64) 2570Sstevel@tonic-gate use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32); 2580Sstevel@tonic-gate #elif defined(__i386) 2590Sstevel@tonic-gate use_vlp = mmu.pae_hat; 2600Sstevel@tonic-gate #endif 2610Sstevel@tonic-gate if (use_vlp) { 2620Sstevel@tonic-gate hat->hat_flags = HAT_VLP; 2630Sstevel@tonic-gate bzero(hat->hat_vlp_ptes, VLP_SIZE); 2640Sstevel@tonic-gate } 2650Sstevel@tonic-gate 2660Sstevel@tonic-gate /* 2670Sstevel@tonic-gate * Allocate the htable hash 2680Sstevel@tonic-gate */ 2690Sstevel@tonic-gate if ((hat->hat_flags & HAT_VLP)) { 2700Sstevel@tonic-gate hat->hat_num_hash = mmu.vlp_hash_cnt; 2710Sstevel@tonic-gate hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP); 2720Sstevel@tonic-gate } else { 2730Sstevel@tonic-gate hat->hat_num_hash = mmu.hash_cnt; 2740Sstevel@tonic-gate hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP); 2750Sstevel@tonic-gate } 2760Sstevel@tonic-gate bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *)); 2770Sstevel@tonic-gate 2780Sstevel@tonic-gate /* 2790Sstevel@tonic-gate * Initialize Kernel HAT entries at the top of the top level page 2800Sstevel@tonic-gate * table for the new hat. 2810Sstevel@tonic-gate * 2820Sstevel@tonic-gate * Note that we don't call htable_release() for the top level, that 2830Sstevel@tonic-gate * happens when the hat is destroyed in hat_free_end() 2840Sstevel@tonic-gate */ 2850Sstevel@tonic-gate hat->hat_htable = NULL; 2860Sstevel@tonic-gate hat->hat_ht_cached = NULL; 2870Sstevel@tonic-gate ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL); 2883446Smrj 2890Sstevel@tonic-gate if (!(hat->hat_flags & HAT_VLP)) 2900Sstevel@tonic-gate x86pte_copy(kas.a_hat->hat_htable, ht, khat_start, 2910Sstevel@tonic-gate khat_entries); 2920Sstevel@tonic-gate #if defined(__i386) 2930Sstevel@tonic-gate else if (khat_entries > 0) 2940Sstevel@tonic-gate bcopy(vlp_page + khat_start, hat->hat_vlp_ptes + khat_start, 2950Sstevel@tonic-gate khat_entries * sizeof (x86pte_t)); 2960Sstevel@tonic-gate #endif 2970Sstevel@tonic-gate hat->hat_htable = ht; 2980Sstevel@tonic-gate 2990Sstevel@tonic-gate #if defined(__i386) 3000Sstevel@tonic-gate /* 3010Sstevel@tonic-gate * PAE32 HAT alignment is less restrictive than the others to keep 3020Sstevel@tonic-gate * the kernel from using too much VA. Because of this we may need 3030Sstevel@tonic-gate * one layer further down when kernelbase isn't 1Gig aligned. 3040Sstevel@tonic-gate * See hat_free_end() for the htable_release() that goes with this 3050Sstevel@tonic-gate * htable_create() 3060Sstevel@tonic-gate */ 3070Sstevel@tonic-gate if (khat_pae32_htable != NULL) { 3080Sstevel@tonic-gate ht = htable_create(hat, kernelbase, 3090Sstevel@tonic-gate khat_pae32_htable->ht_level, NULL); 3100Sstevel@tonic-gate x86pte_copy(khat_pae32_htable, ht, khat_pae32_start, 3110Sstevel@tonic-gate khat_pae32_entries); 3120Sstevel@tonic-gate ht->ht_valid_cnt = khat_pae32_entries; 3130Sstevel@tonic-gate } 3140Sstevel@tonic-gate #endif 3150Sstevel@tonic-gate 3160Sstevel@tonic-gate /* 3171747Sjosephb * Put it at the start of the global list of all hats (used by stealing) 3181747Sjosephb * 3191747Sjosephb * kas.a_hat is not in the list but is instead used to find the 3201747Sjosephb * first and last items in the list. 3211747Sjosephb * 3221747Sjosephb * - kas.a_hat->hat_next points to the start of the user hats. 3231747Sjosephb * The list ends where hat->hat_next == NULL 3241747Sjosephb * 3251747Sjosephb * - kas.a_hat->hat_prev points to the last of the user hats. 3261747Sjosephb * The list begins where hat->hat_prev == NULL 3270Sstevel@tonic-gate */ 3280Sstevel@tonic-gate mutex_enter(&hat_list_lock); 3291747Sjosephb hat->hat_prev = NULL; 3301747Sjosephb hat->hat_next = kas.a_hat->hat_next; 3311747Sjosephb if (hat->hat_next) 3321747Sjosephb hat->hat_next->hat_prev = hat; 3331747Sjosephb else 3341747Sjosephb kas.a_hat->hat_prev = hat; 3350Sstevel@tonic-gate kas.a_hat->hat_next = hat; 3360Sstevel@tonic-gate mutex_exit(&hat_list_lock); 3370Sstevel@tonic-gate 3380Sstevel@tonic-gate return (hat); 3390Sstevel@tonic-gate } 3400Sstevel@tonic-gate 3410Sstevel@tonic-gate /* 3420Sstevel@tonic-gate * process has finished executing but as has not been cleaned up yet. 3430Sstevel@tonic-gate */ 3440Sstevel@tonic-gate /*ARGSUSED*/ 3450Sstevel@tonic-gate void 3460Sstevel@tonic-gate hat_free_start(hat_t *hat) 3470Sstevel@tonic-gate { 3480Sstevel@tonic-gate ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock)); 3491747Sjosephb 3501747Sjosephb /* 3511747Sjosephb * If the hat is currently a stealing victim, wait for the stealing 3521747Sjosephb * to finish. Once we mark it as HAT_FREEING, htable_steal() 3531747Sjosephb * won't look at its pagetables anymore. 3541747Sjosephb */ 3550Sstevel@tonic-gate mutex_enter(&hat_list_lock); 3561747Sjosephb while (hat->hat_flags & HAT_VICTIM) 3571747Sjosephb cv_wait(&hat_list_cv, &hat_list_lock); 3580Sstevel@tonic-gate hat->hat_flags |= HAT_FREEING; 3590Sstevel@tonic-gate mutex_exit(&hat_list_lock); 3600Sstevel@tonic-gate } 3610Sstevel@tonic-gate 3620Sstevel@tonic-gate /* 3630Sstevel@tonic-gate * An address space is being destroyed, so we destroy the associated hat. 3640Sstevel@tonic-gate */ 3650Sstevel@tonic-gate void 3660Sstevel@tonic-gate hat_free_end(hat_t *hat) 3670Sstevel@tonic-gate { 3680Sstevel@tonic-gate int i; 3690Sstevel@tonic-gate kmem_cache_t *cache; 3700Sstevel@tonic-gate 3710Sstevel@tonic-gate #ifdef DEBUG 3720Sstevel@tonic-gate for (i = 0; i <= mmu.max_page_level; i++) 3730Sstevel@tonic-gate ASSERT(hat->hat_pages_mapped[i] == 0); 3740Sstevel@tonic-gate #endif 3750Sstevel@tonic-gate ASSERT(hat->hat_flags & HAT_FREEING); 3760Sstevel@tonic-gate 3770Sstevel@tonic-gate /* 3780Sstevel@tonic-gate * must not be running on the given hat 3790Sstevel@tonic-gate */ 3800Sstevel@tonic-gate ASSERT(CPU->cpu_current_hat != hat); 3810Sstevel@tonic-gate 3820Sstevel@tonic-gate /* 3831747Sjosephb * Remove it from the list of HATs 3840Sstevel@tonic-gate */ 3850Sstevel@tonic-gate mutex_enter(&hat_list_lock); 3861747Sjosephb if (hat->hat_prev) 3871747Sjosephb hat->hat_prev->hat_next = hat->hat_next; 3881747Sjosephb else 3890Sstevel@tonic-gate kas.a_hat->hat_next = hat->hat_next; 3901747Sjosephb if (hat->hat_next) 3911747Sjosephb hat->hat_next->hat_prev = hat->hat_prev; 3921747Sjosephb else 3931747Sjosephb kas.a_hat->hat_prev = hat->hat_prev; 3940Sstevel@tonic-gate mutex_exit(&hat_list_lock); 3951747Sjosephb hat->hat_next = hat->hat_prev = NULL; 3960Sstevel@tonic-gate 3970Sstevel@tonic-gate /* 3980Sstevel@tonic-gate * Make a pass through the htables freeing them all up. 3990Sstevel@tonic-gate */ 4000Sstevel@tonic-gate htable_purge_hat(hat); 4010Sstevel@tonic-gate 4020Sstevel@tonic-gate /* 4030Sstevel@tonic-gate * Decide which kmem cache the hash table came from, then free it. 4040Sstevel@tonic-gate */ 4050Sstevel@tonic-gate if (hat->hat_flags & HAT_VLP) 4060Sstevel@tonic-gate cache = vlp_hash_cache; 4070Sstevel@tonic-gate else 4080Sstevel@tonic-gate cache = hat_hash_cache; 4090Sstevel@tonic-gate kmem_cache_free(cache, hat->hat_ht_hash); 4100Sstevel@tonic-gate hat->hat_ht_hash = NULL; 4110Sstevel@tonic-gate 4120Sstevel@tonic-gate hat->hat_flags = 0; 4130Sstevel@tonic-gate kmem_cache_free(hat_cache, hat); 4140Sstevel@tonic-gate } 4150Sstevel@tonic-gate 4160Sstevel@tonic-gate /* 4170Sstevel@tonic-gate * round kernelbase down to a supported value to use for _userlimit 4180Sstevel@tonic-gate * 4190Sstevel@tonic-gate * userlimit must be aligned down to an entry in the top level htable. 4200Sstevel@tonic-gate * The one exception is for 32 bit HAT's running PAE. 4210Sstevel@tonic-gate */ 4220Sstevel@tonic-gate uintptr_t 4230Sstevel@tonic-gate hat_kernelbase(uintptr_t va) 4240Sstevel@tonic-gate { 4250Sstevel@tonic-gate #if defined(__i386) 4260Sstevel@tonic-gate va &= LEVEL_MASK(1); 4270Sstevel@tonic-gate #endif 4280Sstevel@tonic-gate if (IN_VA_HOLE(va)) 4290Sstevel@tonic-gate panic("_userlimit %p will fall in VA hole\n", (void *)va); 4300Sstevel@tonic-gate return (va); 4310Sstevel@tonic-gate } 4320Sstevel@tonic-gate 4330Sstevel@tonic-gate /* 4340Sstevel@tonic-gate * Initialize hat data structures based on processor MMU information. 4350Sstevel@tonic-gate */ 4360Sstevel@tonic-gate void 4370Sstevel@tonic-gate mmu_init(void) 4380Sstevel@tonic-gate { 4390Sstevel@tonic-gate uint_t max_htables; 4400Sstevel@tonic-gate uint_t pa_bits; 4410Sstevel@tonic-gate uint_t va_bits; 4420Sstevel@tonic-gate int i; 4430Sstevel@tonic-gate 4440Sstevel@tonic-gate /* 4453446Smrj * If CPU enabled the page table global bit, use it for the kernel 4463446Smrj * This is bit 7 in CR4 (PGE - Page Global Enable). 4470Sstevel@tonic-gate */ 4483446Smrj if ((x86_feature & X86_PGE) != 0 && (getcr4() & CR4_PGE) != 0) 4490Sstevel@tonic-gate mmu.pt_global = PT_GLOBAL; 4500Sstevel@tonic-gate 4510Sstevel@tonic-gate /* 4523446Smrj * Detect NX and PAE usage. 4530Sstevel@tonic-gate */ 4543446Smrj mmu.pae_hat = kbm_pae_support; 4553446Smrj if (kbm_nx_support) 4560Sstevel@tonic-gate mmu.pt_nx = PT_NX; 4573446Smrj else 4580Sstevel@tonic-gate mmu.pt_nx = 0; 4590Sstevel@tonic-gate 4600Sstevel@tonic-gate /* 461*4169Sjosephb * Intel CPUs allow speculative caching (in TLB-like h/w) of 462*4169Sjosephb * entries in upper page tables even though there may not be 463*4169Sjosephb * any valid entries in lower tables. This implies we have to 464*4169Sjosephb * re-INVLPG at every upper page table entry invalidation. 465*4169Sjosephb */ 466*4169Sjosephb if (cpuid_getvendor(CPU) == X86_VENDOR_Intel) 467*4169Sjosephb mmu.inval_nonleaf = 1; 468*4169Sjosephb else 469*4169Sjosephb mmu.inval_nonleaf = 0; 470*4169Sjosephb /* 4710Sstevel@tonic-gate * Use CPU info to set various MMU parameters 4720Sstevel@tonic-gate */ 4730Sstevel@tonic-gate cpuid_get_addrsize(CPU, &pa_bits, &va_bits); 4740Sstevel@tonic-gate 4750Sstevel@tonic-gate if (va_bits < sizeof (void *) * NBBY) { 4760Sstevel@tonic-gate mmu.hole_start = (1ul << (va_bits - 1)); 4770Sstevel@tonic-gate mmu.hole_end = 0ul - mmu.hole_start - 1; 4780Sstevel@tonic-gate } else { 4790Sstevel@tonic-gate mmu.hole_end = 0; 4800Sstevel@tonic-gate mmu.hole_start = mmu.hole_end - 1; 4810Sstevel@tonic-gate } 4820Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121) 4830Sstevel@tonic-gate /* 4840Sstevel@tonic-gate * If erratum 121 has already been detected at this time, hole_start 4850Sstevel@tonic-gate * contains the value to be subtracted from mmu.hole_start. 4860Sstevel@tonic-gate */ 4870Sstevel@tonic-gate ASSERT(hole_start == 0 || opteron_erratum_121 != 0); 4880Sstevel@tonic-gate hole_start = mmu.hole_start - hole_start; 4890Sstevel@tonic-gate #else 4900Sstevel@tonic-gate hole_start = mmu.hole_start; 4910Sstevel@tonic-gate #endif 4920Sstevel@tonic-gate hole_end = mmu.hole_end; 4930Sstevel@tonic-gate 4940Sstevel@tonic-gate mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1); 4950Sstevel@tonic-gate if (mmu.pae_hat == 0 && pa_bits > 32) 4960Sstevel@tonic-gate mmu.highest_pfn = PFN_4G - 1; 4970Sstevel@tonic-gate 4980Sstevel@tonic-gate if (mmu.pae_hat) { 4990Sstevel@tonic-gate mmu.pte_size = 8; /* 8 byte PTEs */ 5000Sstevel@tonic-gate mmu.pte_size_shift = 3; 5010Sstevel@tonic-gate } else { 5020Sstevel@tonic-gate mmu.pte_size = 4; /* 4 byte PTEs */ 5030Sstevel@tonic-gate mmu.pte_size_shift = 2; 5040Sstevel@tonic-gate } 5050Sstevel@tonic-gate 5060Sstevel@tonic-gate if (mmu.pae_hat && (x86_feature & X86_PAE) == 0) 5070Sstevel@tonic-gate panic("Processor does not support PAE"); 5080Sstevel@tonic-gate 5090Sstevel@tonic-gate if ((x86_feature & X86_CX8) == 0) 5100Sstevel@tonic-gate panic("Processor does not support cmpxchg8b instruction"); 5110Sstevel@tonic-gate 5120Sstevel@tonic-gate /* 5130Sstevel@tonic-gate * Initialize parameters based on the 64 or 32 bit kernels and 5140Sstevel@tonic-gate * for the 32 bit kernel decide if we should use PAE. 5150Sstevel@tonic-gate */ 5163446Smrj if (kbm_largepage_support) 5170Sstevel@tonic-gate mmu.max_page_level = 1; 5180Sstevel@tonic-gate else 5190Sstevel@tonic-gate mmu.max_page_level = 0; 5200Sstevel@tonic-gate mmu_page_sizes = mmu.max_page_level + 1; 5210Sstevel@tonic-gate mmu_exported_page_sizes = mmu_page_sizes; 5220Sstevel@tonic-gate 5230Sstevel@tonic-gate #if defined(__amd64) 5240Sstevel@tonic-gate 5250Sstevel@tonic-gate mmu.num_level = 4; 5260Sstevel@tonic-gate mmu.max_level = 3; 5270Sstevel@tonic-gate mmu.ptes_per_table = 512; 5280Sstevel@tonic-gate mmu.top_level_count = 512; 5290Sstevel@tonic-gate 5300Sstevel@tonic-gate mmu.level_shift[0] = 12; 5310Sstevel@tonic-gate mmu.level_shift[1] = 21; 5320Sstevel@tonic-gate mmu.level_shift[2] = 30; 5330Sstevel@tonic-gate mmu.level_shift[3] = 39; 5340Sstevel@tonic-gate 5350Sstevel@tonic-gate #elif defined(__i386) 5360Sstevel@tonic-gate 5370Sstevel@tonic-gate if (mmu.pae_hat) { 5380Sstevel@tonic-gate mmu.num_level = 3; 5390Sstevel@tonic-gate mmu.max_level = 2; 5400Sstevel@tonic-gate mmu.ptes_per_table = 512; 5410Sstevel@tonic-gate mmu.top_level_count = 4; 5420Sstevel@tonic-gate 5430Sstevel@tonic-gate mmu.level_shift[0] = 12; 5440Sstevel@tonic-gate mmu.level_shift[1] = 21; 5450Sstevel@tonic-gate mmu.level_shift[2] = 30; 5460Sstevel@tonic-gate 5470Sstevel@tonic-gate } else { 5480Sstevel@tonic-gate mmu.num_level = 2; 5490Sstevel@tonic-gate mmu.max_level = 1; 5500Sstevel@tonic-gate mmu.ptes_per_table = 1024; 5510Sstevel@tonic-gate mmu.top_level_count = 1024; 5520Sstevel@tonic-gate 5530Sstevel@tonic-gate mmu.level_shift[0] = 12; 5540Sstevel@tonic-gate mmu.level_shift[1] = 22; 5550Sstevel@tonic-gate } 5560Sstevel@tonic-gate 5570Sstevel@tonic-gate #endif /* __i386 */ 5580Sstevel@tonic-gate 5590Sstevel@tonic-gate for (i = 0; i < mmu.num_level; ++i) { 5600Sstevel@tonic-gate mmu.level_size[i] = 1UL << mmu.level_shift[i]; 5610Sstevel@tonic-gate mmu.level_offset[i] = mmu.level_size[i] - 1; 5620Sstevel@tonic-gate mmu.level_mask[i] = ~mmu.level_offset[i]; 5630Sstevel@tonic-gate } 5640Sstevel@tonic-gate 5653446Smrj for (i = 0; i <= mmu.max_page_level; ++i) { 5663446Smrj mmu.pte_bits[i] = PT_VALID; 5673446Smrj if (i > 0) 5683446Smrj mmu.pte_bits[i] |= PT_PAGESIZE; 5693446Smrj } 5700Sstevel@tonic-gate 5710Sstevel@tonic-gate /* 5720Sstevel@tonic-gate * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level. 5730Sstevel@tonic-gate */ 5740Sstevel@tonic-gate for (i = 1; i < mmu.num_level; ++i) 5750Sstevel@tonic-gate mmu.ptp_bits[i] = PT_PTPBITS; 5763446Smrj 5770Sstevel@tonic-gate #if defined(__i386) 5780Sstevel@tonic-gate mmu.ptp_bits[2] = PT_VALID; 5790Sstevel@tonic-gate #endif 5800Sstevel@tonic-gate 5810Sstevel@tonic-gate /* 5820Sstevel@tonic-gate * Compute how many hash table entries to have per process for htables. 5830Sstevel@tonic-gate * We start with 1 page's worth of entries. 5840Sstevel@tonic-gate * 5850Sstevel@tonic-gate * If physical memory is small, reduce the amount need to cover it. 5860Sstevel@tonic-gate */ 5870Sstevel@tonic-gate max_htables = physmax / mmu.ptes_per_table; 5880Sstevel@tonic-gate mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *); 5890Sstevel@tonic-gate while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables) 5900Sstevel@tonic-gate mmu.hash_cnt >>= 1; 5910Sstevel@tonic-gate mmu.vlp_hash_cnt = mmu.hash_cnt; 5920Sstevel@tonic-gate 5930Sstevel@tonic-gate #if defined(__amd64) 5940Sstevel@tonic-gate /* 5950Sstevel@tonic-gate * If running in 64 bits and physical memory is large, 5960Sstevel@tonic-gate * increase the size of the cache to cover all of memory for 5970Sstevel@tonic-gate * a 64 bit process. 5980Sstevel@tonic-gate */ 5990Sstevel@tonic-gate #define HASH_MAX_LENGTH 4 6000Sstevel@tonic-gate while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables) 6010Sstevel@tonic-gate mmu.hash_cnt <<= 1; 6020Sstevel@tonic-gate #endif 6030Sstevel@tonic-gate } 6040Sstevel@tonic-gate 6050Sstevel@tonic-gate 6060Sstevel@tonic-gate /* 6070Sstevel@tonic-gate * initialize hat data structures 6080Sstevel@tonic-gate */ 6090Sstevel@tonic-gate void 6100Sstevel@tonic-gate hat_init() 6110Sstevel@tonic-gate { 6120Sstevel@tonic-gate #if defined(__i386) 6130Sstevel@tonic-gate /* 6140Sstevel@tonic-gate * _userlimit must be aligned correctly 6150Sstevel@tonic-gate */ 6160Sstevel@tonic-gate if ((_userlimit & LEVEL_MASK(1)) != _userlimit) { 6170Sstevel@tonic-gate prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n", 6180Sstevel@tonic-gate (void *)_userlimit, (void *)LEVEL_SIZE(1)); 6190Sstevel@tonic-gate halt("hat_init(): Unable to continue"); 6200Sstevel@tonic-gate } 6210Sstevel@tonic-gate #endif 6220Sstevel@tonic-gate 6230Sstevel@tonic-gate cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL); 6240Sstevel@tonic-gate 6250Sstevel@tonic-gate /* 6260Sstevel@tonic-gate * initialize kmem caches 6270Sstevel@tonic-gate */ 6280Sstevel@tonic-gate htable_init(); 6290Sstevel@tonic-gate hment_init(); 6300Sstevel@tonic-gate 6310Sstevel@tonic-gate hat_cache = kmem_cache_create("hat_t", 6320Sstevel@tonic-gate sizeof (hat_t), 0, hati_constructor, NULL, NULL, 6330Sstevel@tonic-gate NULL, 0, 0); 6340Sstevel@tonic-gate 6350Sstevel@tonic-gate hat_hash_cache = kmem_cache_create("HatHash", 6360Sstevel@tonic-gate mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL, 6370Sstevel@tonic-gate NULL, 0, 0); 6380Sstevel@tonic-gate 6390Sstevel@tonic-gate /* 6400Sstevel@tonic-gate * VLP hats can use a smaller hash table size on large memroy machines 6410Sstevel@tonic-gate */ 6420Sstevel@tonic-gate if (mmu.hash_cnt == mmu.vlp_hash_cnt) { 6430Sstevel@tonic-gate vlp_hash_cache = hat_hash_cache; 6440Sstevel@tonic-gate } else { 6450Sstevel@tonic-gate vlp_hash_cache = kmem_cache_create("HatVlpHash", 6460Sstevel@tonic-gate mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL, 6470Sstevel@tonic-gate NULL, 0, 0); 6480Sstevel@tonic-gate } 6490Sstevel@tonic-gate 6500Sstevel@tonic-gate /* 6510Sstevel@tonic-gate * Set up the kernel's hat 6520Sstevel@tonic-gate */ 6530Sstevel@tonic-gate AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 6540Sstevel@tonic-gate kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP); 6550Sstevel@tonic-gate mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 6560Sstevel@tonic-gate kas.a_hat->hat_as = &kas; 6570Sstevel@tonic-gate kas.a_hat->hat_flags = 0; 6580Sstevel@tonic-gate AS_LOCK_EXIT(&kas, &kas.a_lock); 6590Sstevel@tonic-gate 6600Sstevel@tonic-gate CPUSET_ZERO(khat_cpuset); 6610Sstevel@tonic-gate CPUSET_ADD(khat_cpuset, CPU->cpu_id); 6620Sstevel@tonic-gate 6630Sstevel@tonic-gate /* 6640Sstevel@tonic-gate * The kernel hat's next pointer serves as the head of the hat list . 6651747Sjosephb * The kernel hat's prev pointer tracks the last hat on the list for 6661747Sjosephb * htable_steal() to use. 6670Sstevel@tonic-gate */ 6680Sstevel@tonic-gate kas.a_hat->hat_next = NULL; 6691747Sjosephb kas.a_hat->hat_prev = NULL; 6700Sstevel@tonic-gate 6710Sstevel@tonic-gate /* 6720Sstevel@tonic-gate * Allocate an htable hash bucket for the kernel 6730Sstevel@tonic-gate * XX64 - tune for 64 bit procs 6740Sstevel@tonic-gate */ 6750Sstevel@tonic-gate kas.a_hat->hat_num_hash = mmu.hash_cnt; 6760Sstevel@tonic-gate kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP); 6770Sstevel@tonic-gate bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *)); 6780Sstevel@tonic-gate 6790Sstevel@tonic-gate /* 6800Sstevel@tonic-gate * zero out the top level and cached htable pointers 6810Sstevel@tonic-gate */ 6820Sstevel@tonic-gate kas.a_hat->hat_ht_cached = NULL; 6830Sstevel@tonic-gate kas.a_hat->hat_htable = NULL; 6843258Strevtom 6853258Strevtom /* 6863258Strevtom * Pre-allocate hrm_hashtab before enabling the collection of 6873258Strevtom * refmod statistics. Allocating on the fly would mean us 6883258Strevtom * running the risk of suffering recursive mutex enters or 6893258Strevtom * deadlocks. 6903258Strevtom */ 6913258Strevtom hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *), 6923258Strevtom KM_SLEEP); 6930Sstevel@tonic-gate } 6940Sstevel@tonic-gate 6950Sstevel@tonic-gate /* 6960Sstevel@tonic-gate * Prepare CPU specific pagetables for VLP processes on 64 bit kernels. 6970Sstevel@tonic-gate * 6980Sstevel@tonic-gate * Each CPU has a set of 2 pagetables that are reused for any 32 bit 6990Sstevel@tonic-gate * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and 7000Sstevel@tonic-gate * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes. 7010Sstevel@tonic-gate */ 7020Sstevel@tonic-gate /*ARGSUSED*/ 7030Sstevel@tonic-gate static void 7040Sstevel@tonic-gate hat_vlp_setup(struct cpu *cpu) 7050Sstevel@tonic-gate { 7060Sstevel@tonic-gate #if defined(__amd64) 7070Sstevel@tonic-gate struct hat_cpu_info *hci = cpu->cpu_hat_info; 7080Sstevel@tonic-gate pfn_t pfn; 7090Sstevel@tonic-gate 7100Sstevel@tonic-gate /* 7110Sstevel@tonic-gate * allocate the level==2 page table for the bottom most 7120Sstevel@tonic-gate * 512Gig of address space (this is where 32 bit apps live) 7130Sstevel@tonic-gate */ 7140Sstevel@tonic-gate ASSERT(hci != NULL); 7150Sstevel@tonic-gate hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP); 7160Sstevel@tonic-gate 7170Sstevel@tonic-gate /* 7180Sstevel@tonic-gate * Allocate a top level pagetable and copy the kernel's 7190Sstevel@tonic-gate * entries into it. Then link in hci_vlp_l2ptes in the 1st entry. 7200Sstevel@tonic-gate */ 7210Sstevel@tonic-gate hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP); 7220Sstevel@tonic-gate hci->hci_vlp_pfn = 7230Sstevel@tonic-gate hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes); 7240Sstevel@tonic-gate ASSERT(hci->hci_vlp_pfn != PFN_INVALID); 7250Sstevel@tonic-gate bcopy(vlp_page + khat_start, hci->hci_vlp_l3ptes + khat_start, 7260Sstevel@tonic-gate khat_entries * sizeof (x86pte_t)); 7270Sstevel@tonic-gate 7280Sstevel@tonic-gate pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes); 7290Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 7300Sstevel@tonic-gate hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2); 7310Sstevel@tonic-gate #endif /* __amd64 */ 7320Sstevel@tonic-gate } 7330Sstevel@tonic-gate 7343446Smrj /*ARGSUSED*/ 7353446Smrj static void 7363446Smrj hat_vlp_teardown(cpu_t *cpu) 7373446Smrj { 7383446Smrj #if defined(__amd64) 7393446Smrj struct hat_cpu_info *hci; 7403446Smrj 7413446Smrj if ((hci = cpu->cpu_hat_info) == NULL) 7423446Smrj return; 7433446Smrj if (hci->hci_vlp_l2ptes) 7443446Smrj kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE); 7453446Smrj if (hci->hci_vlp_l3ptes) 7463446Smrj kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE); 7473446Smrj #endif /* __amd64 */ 7483446Smrj } 7493446Smrj 7500Sstevel@tonic-gate /* 7510Sstevel@tonic-gate * Finish filling in the kernel hat. 7520Sstevel@tonic-gate * Pre fill in all top level kernel page table entries for the kernel's 7530Sstevel@tonic-gate * part of the address range. From this point on we can't use any new 7540Sstevel@tonic-gate * kernel large pages if they need PTE's at max_level 7553446Smrj * 7563446Smrj * create the kmap mappings. 7570Sstevel@tonic-gate */ 7580Sstevel@tonic-gate void 7590Sstevel@tonic-gate hat_init_finish(void) 7600Sstevel@tonic-gate { 7610Sstevel@tonic-gate htable_t *top = kas.a_hat->hat_htable; 7620Sstevel@tonic-gate htable_t *ht; 7630Sstevel@tonic-gate uint_t e; 7640Sstevel@tonic-gate x86pte_t pte; 7650Sstevel@tonic-gate uintptr_t va = kernelbase; 7663446Smrj size_t size; 7670Sstevel@tonic-gate 7680Sstevel@tonic-gate 7690Sstevel@tonic-gate #if defined(__i386) 7700Sstevel@tonic-gate ASSERT((va & LEVEL_MASK(1)) == va); 7710Sstevel@tonic-gate 7720Sstevel@tonic-gate /* 7730Sstevel@tonic-gate * Deal with kernelbase not 1Gig aligned for 32 bit PAE hats. 7740Sstevel@tonic-gate */ 7750Sstevel@tonic-gate if (!mmu.pae_hat || (va & LEVEL_OFFSET(mmu.max_level)) == 0) { 7760Sstevel@tonic-gate khat_pae32_htable = NULL; 7770Sstevel@tonic-gate } else { 7780Sstevel@tonic-gate ASSERT(mmu.max_level == 2); 7790Sstevel@tonic-gate ASSERT((va & LEVEL_OFFSET(mmu.max_level - 1)) == 0); 7800Sstevel@tonic-gate khat_pae32_htable = 7810Sstevel@tonic-gate htable_create(kas.a_hat, va, mmu.max_level - 1, NULL); 7820Sstevel@tonic-gate khat_pae32_start = htable_va2entry(va, khat_pae32_htable); 7830Sstevel@tonic-gate khat_pae32_entries = mmu.ptes_per_table - khat_pae32_start; 7840Sstevel@tonic-gate for (e = khat_pae32_start; e < mmu.ptes_per_table; 7850Sstevel@tonic-gate ++e, va += LEVEL_SIZE(mmu.max_level - 1)) { 7860Sstevel@tonic-gate pte = x86pte_get(khat_pae32_htable, e); 7870Sstevel@tonic-gate if (PTE_ISVALID(pte)) 7880Sstevel@tonic-gate continue; 7890Sstevel@tonic-gate ht = htable_create(kas.a_hat, va, mmu.max_level - 2, 7900Sstevel@tonic-gate NULL); 7910Sstevel@tonic-gate ASSERT(ht != NULL); 7920Sstevel@tonic-gate } 7930Sstevel@tonic-gate } 7940Sstevel@tonic-gate #endif 7950Sstevel@tonic-gate 7960Sstevel@tonic-gate /* 7970Sstevel@tonic-gate * The kernel hat will need fixed values in the highest level 7980Sstevel@tonic-gate * ptable for copying to all other hat's. This implies 7990Sstevel@tonic-gate * alignment restrictions on _userlimit. 8000Sstevel@tonic-gate * 8010Sstevel@tonic-gate * Note we don't htable_release() these htables. This keeps them 8020Sstevel@tonic-gate * from ever being stolen or free'd. 8030Sstevel@tonic-gate * 8040Sstevel@tonic-gate * top_level_count is used instead of ptes_per_table, since 8050Sstevel@tonic-gate * on 32-bit PAE we only have 4 usable entries at the top level ptable. 8060Sstevel@tonic-gate */ 8070Sstevel@tonic-gate if (va == 0) 8080Sstevel@tonic-gate khat_start = mmu.top_level_count; 8090Sstevel@tonic-gate else 8100Sstevel@tonic-gate khat_start = htable_va2entry(va, kas.a_hat->hat_htable); 8110Sstevel@tonic-gate khat_entries = mmu.top_level_count - khat_start; 8120Sstevel@tonic-gate for (e = khat_start; e < mmu.top_level_count; 8130Sstevel@tonic-gate ++e, va += LEVEL_SIZE(mmu.max_level)) { 8143446Smrj if (IN_HYPERVISOR_VA(va)) 8153446Smrj continue; 8160Sstevel@tonic-gate pte = x86pte_get(top, e); 8170Sstevel@tonic-gate if (PTE_ISVALID(pte)) 8180Sstevel@tonic-gate continue; 8190Sstevel@tonic-gate ht = htable_create(kas.a_hat, va, mmu.max_level - 1, NULL); 8200Sstevel@tonic-gate ASSERT(ht != NULL); 8210Sstevel@tonic-gate } 8220Sstevel@tonic-gate 8230Sstevel@tonic-gate /* 8240Sstevel@tonic-gate * We are now effectively running on the kernel hat. 8250Sstevel@tonic-gate * Clearing use_boot_reserve shuts off using the pre-allocated boot 8260Sstevel@tonic-gate * reserve for all HAT allocations. From here on, the reserves are 8270Sstevel@tonic-gate * only used when mapping in memory for the hat's own allocations. 8280Sstevel@tonic-gate */ 8290Sstevel@tonic-gate use_boot_reserve = 0; 8300Sstevel@tonic-gate htable_adjust_reserve(); 8310Sstevel@tonic-gate 8320Sstevel@tonic-gate /* 8330Sstevel@tonic-gate * 32 bit kernels use only 4 of the 512 entries in its top level 8340Sstevel@tonic-gate * pagetable. We'll use the remainder for the "per CPU" page tables 8350Sstevel@tonic-gate * for VLP processes. 8360Sstevel@tonic-gate * 8373446Smrj * We also map the top level kernel pagetable into the kernel to make 8383446Smrj * it easy to use bcopy to initialize new address spaces. 8390Sstevel@tonic-gate */ 8400Sstevel@tonic-gate if (mmu.pae_hat) { 8410Sstevel@tonic-gate vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP); 8420Sstevel@tonic-gate hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE, 8430Sstevel@tonic-gate kas.a_hat->hat_htable->ht_pfn, 8443446Smrj PROT_WRITE | 8453446Smrj PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK, 8460Sstevel@tonic-gate HAT_LOAD | HAT_LOAD_NOCONSIST); 8470Sstevel@tonic-gate } 8480Sstevel@tonic-gate hat_vlp_setup(CPU); 8493446Smrj 8503446Smrj /* 8513446Smrj * Create kmap (cached mappings of kernel PTEs) 8523446Smrj * for 32 bit we map from segmap_start .. ekernelheap 8533446Smrj * for 64 bit we map from segmap_start .. segmap_start + segmapsize; 8543446Smrj */ 8553446Smrj #if defined(__i386) 8563446Smrj size = (uintptr_t)ekernelheap - segmap_start; 8573446Smrj #elif defined(__amd64) 8583446Smrj size = segmapsize; 8593446Smrj #endif 8603446Smrj hat_kmap_init((uintptr_t)segmap_start, size); 8610Sstevel@tonic-gate } 8620Sstevel@tonic-gate 8630Sstevel@tonic-gate /* 8640Sstevel@tonic-gate * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references 8650Sstevel@tonic-gate * are 32 bit, so for safety we must use cas64() to install these. 8660Sstevel@tonic-gate */ 8670Sstevel@tonic-gate #ifdef __i386 8680Sstevel@tonic-gate static void 8690Sstevel@tonic-gate reload_pae32(hat_t *hat, cpu_t *cpu) 8700Sstevel@tonic-gate { 8710Sstevel@tonic-gate x86pte_t *src; 8720Sstevel@tonic-gate x86pte_t *dest; 8730Sstevel@tonic-gate x86pte_t pte; 8740Sstevel@tonic-gate int i; 8750Sstevel@tonic-gate 8760Sstevel@tonic-gate /* 8770Sstevel@tonic-gate * Load the 4 entries of the level 2 page table into this 8780Sstevel@tonic-gate * cpu's range of the vlp_page and point cr3 at them. 8790Sstevel@tonic-gate */ 8800Sstevel@tonic-gate ASSERT(mmu.pae_hat); 8810Sstevel@tonic-gate src = hat->hat_vlp_ptes; 8820Sstevel@tonic-gate dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES; 8830Sstevel@tonic-gate for (i = 0; i < VLP_NUM_PTES; ++i) { 8840Sstevel@tonic-gate for (;;) { 8850Sstevel@tonic-gate pte = dest[i]; 8860Sstevel@tonic-gate if (pte == src[i]) 8870Sstevel@tonic-gate break; 8880Sstevel@tonic-gate if (cas64(dest + i, pte, src[i]) != src[i]) 8890Sstevel@tonic-gate break; 8900Sstevel@tonic-gate } 8910Sstevel@tonic-gate } 8920Sstevel@tonic-gate } 8930Sstevel@tonic-gate #endif 8940Sstevel@tonic-gate 8950Sstevel@tonic-gate /* 8960Sstevel@tonic-gate * Switch to a new active hat, maintaining bit masks to track active CPUs. 8970Sstevel@tonic-gate */ 8980Sstevel@tonic-gate void 8990Sstevel@tonic-gate hat_switch(hat_t *hat) 9000Sstevel@tonic-gate { 9010Sstevel@tonic-gate uintptr_t newcr3; 9020Sstevel@tonic-gate cpu_t *cpu = CPU; 9030Sstevel@tonic-gate hat_t *old = cpu->cpu_current_hat; 9040Sstevel@tonic-gate 9050Sstevel@tonic-gate /* 9060Sstevel@tonic-gate * set up this information first, so we don't miss any cross calls 9070Sstevel@tonic-gate */ 9080Sstevel@tonic-gate if (old != NULL) { 9090Sstevel@tonic-gate if (old == hat) 9100Sstevel@tonic-gate return; 9110Sstevel@tonic-gate if (old != kas.a_hat) 9120Sstevel@tonic-gate CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id); 9130Sstevel@tonic-gate } 9140Sstevel@tonic-gate 9150Sstevel@tonic-gate /* 9160Sstevel@tonic-gate * Wait for any in flight pagetable invalidates on this hat to finish. 9170Sstevel@tonic-gate * This is a spin lock at DISP_LEVEL 9180Sstevel@tonic-gate */ 9190Sstevel@tonic-gate if (hat != kas.a_hat) { 9203543Sjosephb mutex_enter(&hat->hat_switch_mutex); 9210Sstevel@tonic-gate CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id); 9223543Sjosephb mutex_exit(&hat->hat_switch_mutex); 9230Sstevel@tonic-gate } 9240Sstevel@tonic-gate cpu->cpu_current_hat = hat; 9250Sstevel@tonic-gate 9260Sstevel@tonic-gate /* 9270Sstevel@tonic-gate * now go ahead and load cr3 9280Sstevel@tonic-gate */ 9290Sstevel@tonic-gate if (hat->hat_flags & HAT_VLP) { 9300Sstevel@tonic-gate #if defined(__amd64) 9310Sstevel@tonic-gate x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes; 9320Sstevel@tonic-gate 9330Sstevel@tonic-gate VLP_COPY(hat->hat_vlp_ptes, vlpptep); 9340Sstevel@tonic-gate newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn); 9350Sstevel@tonic-gate #elif defined(__i386) 9360Sstevel@tonic-gate reload_pae32(hat, cpu); 9370Sstevel@tonic-gate newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) + 9380Sstevel@tonic-gate (cpu->cpu_id + 1) * VLP_SIZE; 9390Sstevel@tonic-gate #endif 9400Sstevel@tonic-gate } else { 9410Sstevel@tonic-gate newcr3 = MAKECR3(hat->hat_htable->ht_pfn); 9420Sstevel@tonic-gate } 9430Sstevel@tonic-gate setcr3(newcr3); 9440Sstevel@tonic-gate ASSERT(cpu == CPU); 9450Sstevel@tonic-gate } 9460Sstevel@tonic-gate 9470Sstevel@tonic-gate /* 9480Sstevel@tonic-gate * Utility to return a valid x86pte_t from protections, pfn, and level number 9490Sstevel@tonic-gate */ 9500Sstevel@tonic-gate static x86pte_t 9510Sstevel@tonic-gate hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags) 9520Sstevel@tonic-gate { 9530Sstevel@tonic-gate x86pte_t pte; 9540Sstevel@tonic-gate uint_t cache_attr = attr & HAT_ORDER_MASK; 9550Sstevel@tonic-gate 9560Sstevel@tonic-gate pte = MAKEPTE(pfn, level); 9570Sstevel@tonic-gate 9580Sstevel@tonic-gate if (attr & PROT_WRITE) 9590Sstevel@tonic-gate PTE_SET(pte, PT_WRITABLE); 9600Sstevel@tonic-gate 9610Sstevel@tonic-gate if (attr & PROT_USER) 9620Sstevel@tonic-gate PTE_SET(pte, PT_USER); 9630Sstevel@tonic-gate 9640Sstevel@tonic-gate if (!(attr & PROT_EXEC)) 9650Sstevel@tonic-gate PTE_SET(pte, mmu.pt_nx); 9660Sstevel@tonic-gate 9670Sstevel@tonic-gate /* 9683446Smrj * Set the software bits used track ref/mod sync's and hments. 9693446Smrj * If not using REF/MOD, set them to avoid h/w rewriting PTEs. 9700Sstevel@tonic-gate */ 9710Sstevel@tonic-gate if (flags & HAT_LOAD_NOCONSIST) 9723446Smrj PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD); 9733446Smrj else if (attr & HAT_NOSYNC) 9743446Smrj PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD); 9750Sstevel@tonic-gate 9760Sstevel@tonic-gate /* 9770Sstevel@tonic-gate * Set the caching attributes in the PTE. The combination 9780Sstevel@tonic-gate * of attributes are poorly defined, so we pay attention 9790Sstevel@tonic-gate * to them in the given order. 9800Sstevel@tonic-gate * 9810Sstevel@tonic-gate * The test for HAT_STRICTORDER is different because it's defined 9820Sstevel@tonic-gate * as "0" - which was a stupid thing to do, but is too late to change! 9830Sstevel@tonic-gate */ 9840Sstevel@tonic-gate if (cache_attr == HAT_STRICTORDER) { 9850Sstevel@tonic-gate PTE_SET(pte, PT_NOCACHE); 9860Sstevel@tonic-gate /*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */ 9870Sstevel@tonic-gate } else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) { 9880Sstevel@tonic-gate /* nothing to set */; 9890Sstevel@tonic-gate } else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) { 9900Sstevel@tonic-gate PTE_SET(pte, PT_NOCACHE); 9910Sstevel@tonic-gate if (x86_feature & X86_PAT) 9920Sstevel@tonic-gate PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE); 9930Sstevel@tonic-gate else 9940Sstevel@tonic-gate PTE_SET(pte, PT_WRITETHRU); 9950Sstevel@tonic-gate } else { 9960Sstevel@tonic-gate panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr); 9970Sstevel@tonic-gate } 9980Sstevel@tonic-gate 9990Sstevel@tonic-gate return (pte); 10000Sstevel@tonic-gate } 10010Sstevel@tonic-gate 10020Sstevel@tonic-gate /* 10030Sstevel@tonic-gate * Duplicate address translations of the parent to the child. 10040Sstevel@tonic-gate * This function really isn't used anymore. 10050Sstevel@tonic-gate */ 10060Sstevel@tonic-gate /*ARGSUSED*/ 10070Sstevel@tonic-gate int 10080Sstevel@tonic-gate hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag) 10090Sstevel@tonic-gate { 10100Sstevel@tonic-gate ASSERT((uintptr_t)addr < kernelbase); 10110Sstevel@tonic-gate ASSERT(new != kas.a_hat); 10120Sstevel@tonic-gate ASSERT(old != kas.a_hat); 10130Sstevel@tonic-gate return (0); 10140Sstevel@tonic-gate } 10150Sstevel@tonic-gate 10160Sstevel@tonic-gate /* 10170Sstevel@tonic-gate * Allocate any hat resources required for a process being swapped in. 10180Sstevel@tonic-gate */ 10190Sstevel@tonic-gate /*ARGSUSED*/ 10200Sstevel@tonic-gate void 10210Sstevel@tonic-gate hat_swapin(hat_t *hat) 10220Sstevel@tonic-gate { 10230Sstevel@tonic-gate /* do nothing - we let everything fault back in */ 10240Sstevel@tonic-gate } 10250Sstevel@tonic-gate 10260Sstevel@tonic-gate /* 10270Sstevel@tonic-gate * Unload all translations associated with an address space of a process 10280Sstevel@tonic-gate * that is being swapped out. 10290Sstevel@tonic-gate */ 10300Sstevel@tonic-gate void 10310Sstevel@tonic-gate hat_swapout(hat_t *hat) 10320Sstevel@tonic-gate { 10330Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)0; 10340Sstevel@tonic-gate uintptr_t eaddr = _userlimit; 10350Sstevel@tonic-gate htable_t *ht = NULL; 10360Sstevel@tonic-gate level_t l; 10370Sstevel@tonic-gate 10380Sstevel@tonic-gate /* 10390Sstevel@tonic-gate * We can't just call hat_unload(hat, 0, _userlimit...) here, because 10400Sstevel@tonic-gate * seg_spt and shared pagetables can't be swapped out. 10410Sstevel@tonic-gate * Take a look at segspt_shmswapout() - it's a big no-op. 10420Sstevel@tonic-gate * 10430Sstevel@tonic-gate * Instead we'll walk through all the address space and unload 10440Sstevel@tonic-gate * any mappings which we are sure are not shared, not locked. 10450Sstevel@tonic-gate */ 10460Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 10470Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 10480Sstevel@tonic-gate ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 10490Sstevel@tonic-gate if ((uintptr_t)hat->hat_as->a_userlimit < eaddr) 10500Sstevel@tonic-gate eaddr = (uintptr_t)hat->hat_as->a_userlimit; 10510Sstevel@tonic-gate 10520Sstevel@tonic-gate while (vaddr < eaddr) { 10530Sstevel@tonic-gate (void) htable_walk(hat, &ht, &vaddr, eaddr); 10540Sstevel@tonic-gate if (ht == NULL) 10550Sstevel@tonic-gate break; 10560Sstevel@tonic-gate 10570Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr)); 10580Sstevel@tonic-gate 10590Sstevel@tonic-gate /* 10600Sstevel@tonic-gate * If the page table is shared skip its entire range. 10610Sstevel@tonic-gate * This code knows that only level 0 page tables are shared 10620Sstevel@tonic-gate */ 10630Sstevel@tonic-gate l = ht->ht_level; 10640Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 10650Sstevel@tonic-gate ASSERT(l == 0); 10660Sstevel@tonic-gate vaddr = ht->ht_vaddr + LEVEL_SIZE(1); 10670Sstevel@tonic-gate htable_release(ht); 10680Sstevel@tonic-gate ht = NULL; 10690Sstevel@tonic-gate continue; 10700Sstevel@tonic-gate } 10710Sstevel@tonic-gate 10720Sstevel@tonic-gate /* 10730Sstevel@tonic-gate * If the page table has no locked entries, unload this one. 10740Sstevel@tonic-gate */ 10750Sstevel@tonic-gate if (ht->ht_lock_cnt == 0) 10760Sstevel@tonic-gate hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l), 10770Sstevel@tonic-gate HAT_UNLOAD_UNMAP); 10780Sstevel@tonic-gate 10790Sstevel@tonic-gate /* 10800Sstevel@tonic-gate * If we have a level 0 page table with locked entries, 10810Sstevel@tonic-gate * skip the entire page table, otherwise skip just one entry. 10820Sstevel@tonic-gate */ 10830Sstevel@tonic-gate if (ht->ht_lock_cnt > 0 && l == 0) 10840Sstevel@tonic-gate vaddr = ht->ht_vaddr + LEVEL_SIZE(1); 10850Sstevel@tonic-gate else 10860Sstevel@tonic-gate vaddr += LEVEL_SIZE(l); 10870Sstevel@tonic-gate } 10880Sstevel@tonic-gate if (ht) 10890Sstevel@tonic-gate htable_release(ht); 10900Sstevel@tonic-gate 10910Sstevel@tonic-gate /* 10920Sstevel@tonic-gate * We're in swapout because the system is low on memory, so 10930Sstevel@tonic-gate * go back and flush all the htables off the cached list. 10940Sstevel@tonic-gate */ 10950Sstevel@tonic-gate htable_purge_hat(hat); 10960Sstevel@tonic-gate } 10970Sstevel@tonic-gate 10980Sstevel@tonic-gate /* 10990Sstevel@tonic-gate * returns number of bytes that have valid mappings in hat. 11000Sstevel@tonic-gate */ 11010Sstevel@tonic-gate size_t 11020Sstevel@tonic-gate hat_get_mapped_size(hat_t *hat) 11030Sstevel@tonic-gate { 11040Sstevel@tonic-gate size_t total = 0; 11050Sstevel@tonic-gate int l; 11060Sstevel@tonic-gate 11070Sstevel@tonic-gate for (l = 0; l <= mmu.max_page_level; l++) 11080Sstevel@tonic-gate total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l)); 11090Sstevel@tonic-gate 11100Sstevel@tonic-gate return (total); 11110Sstevel@tonic-gate } 11120Sstevel@tonic-gate 11130Sstevel@tonic-gate /* 11140Sstevel@tonic-gate * enable/disable collection of stats for hat. 11150Sstevel@tonic-gate */ 11160Sstevel@tonic-gate int 11170Sstevel@tonic-gate hat_stats_enable(hat_t *hat) 11180Sstevel@tonic-gate { 11190Sstevel@tonic-gate atomic_add_32(&hat->hat_stats, 1); 11200Sstevel@tonic-gate return (1); 11210Sstevel@tonic-gate } 11220Sstevel@tonic-gate 11230Sstevel@tonic-gate void 11240Sstevel@tonic-gate hat_stats_disable(hat_t *hat) 11250Sstevel@tonic-gate { 11260Sstevel@tonic-gate atomic_add_32(&hat->hat_stats, -1); 11270Sstevel@tonic-gate } 11280Sstevel@tonic-gate 11290Sstevel@tonic-gate /* 11300Sstevel@tonic-gate * Utility to sync the ref/mod bits from a page table entry to the page_t 11310Sstevel@tonic-gate * We must be holding the mapping list lock when this is called. 11320Sstevel@tonic-gate */ 11330Sstevel@tonic-gate static void 11340Sstevel@tonic-gate hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level) 11350Sstevel@tonic-gate { 11360Sstevel@tonic-gate uint_t rm = 0; 11370Sstevel@tonic-gate pgcnt_t pgcnt; 11380Sstevel@tonic-gate 11393446Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC) 11400Sstevel@tonic-gate return; 11410Sstevel@tonic-gate 11420Sstevel@tonic-gate if (PTE_GET(pte, PT_REF)) 11430Sstevel@tonic-gate rm |= P_REF; 11440Sstevel@tonic-gate 11450Sstevel@tonic-gate if (PTE_GET(pte, PT_MOD)) 11460Sstevel@tonic-gate rm |= P_MOD; 11470Sstevel@tonic-gate 11480Sstevel@tonic-gate if (rm == 0) 11490Sstevel@tonic-gate return; 11500Sstevel@tonic-gate 11510Sstevel@tonic-gate /* 11520Sstevel@tonic-gate * sync to all constituent pages of a large page 11530Sstevel@tonic-gate */ 11540Sstevel@tonic-gate ASSERT(x86_hm_held(pp)); 11550Sstevel@tonic-gate pgcnt = page_get_pagecnt(level); 11560Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt)); 11570Sstevel@tonic-gate for (; pgcnt > 0; --pgcnt) { 11580Sstevel@tonic-gate /* 11590Sstevel@tonic-gate * hat_page_demote() can't decrease 11600Sstevel@tonic-gate * pszc below this mapping size 11610Sstevel@tonic-gate * since this large mapping existed after we 11620Sstevel@tonic-gate * took mlist lock. 11630Sstevel@tonic-gate */ 11640Sstevel@tonic-gate ASSERT(pp->p_szc >= level); 11650Sstevel@tonic-gate hat_page_setattr(pp, rm); 11660Sstevel@tonic-gate ++pp; 11670Sstevel@tonic-gate } 11680Sstevel@tonic-gate } 11690Sstevel@tonic-gate 11700Sstevel@tonic-gate /* 11710Sstevel@tonic-gate * This the set of PTE bits for PFN, permissions and caching 11723446Smrj * that require a TLB flush (hat_tlb_inval) if changed on a HAT_LOAD_REMAP 11730Sstevel@tonic-gate */ 11740Sstevel@tonic-gate #define PT_REMAP_BITS \ 11750Sstevel@tonic-gate (PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU | \ 11760Sstevel@tonic-gate PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE) 11770Sstevel@tonic-gate 1178510Skchow #define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX) 11790Sstevel@tonic-gate /* 11800Sstevel@tonic-gate * Do the low-level work to get a mapping entered into a HAT's pagetables 11810Sstevel@tonic-gate * and in the mapping list of the associated page_t. 11820Sstevel@tonic-gate */ 11833446Smrj static int 11840Sstevel@tonic-gate hati_pte_map( 11850Sstevel@tonic-gate htable_t *ht, 11860Sstevel@tonic-gate uint_t entry, 11870Sstevel@tonic-gate page_t *pp, 11880Sstevel@tonic-gate x86pte_t pte, 11890Sstevel@tonic-gate int flags, 11900Sstevel@tonic-gate void *pte_ptr) 11910Sstevel@tonic-gate { 11920Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 11930Sstevel@tonic-gate x86pte_t old_pte; 11940Sstevel@tonic-gate level_t l = ht->ht_level; 11950Sstevel@tonic-gate hment_t *hm; 11960Sstevel@tonic-gate uint_t is_consist; 11973446Smrj int rv = 0; 11980Sstevel@tonic-gate 11990Sstevel@tonic-gate /* 12000Sstevel@tonic-gate * Is this a consistant (ie. need mapping list lock) mapping? 12010Sstevel@tonic-gate */ 12020Sstevel@tonic-gate is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0); 12030Sstevel@tonic-gate 12040Sstevel@tonic-gate /* 12050Sstevel@tonic-gate * Track locked mapping count in the htable. Do this first, 12060Sstevel@tonic-gate * as we track locking even if there already is a mapping present. 12070Sstevel@tonic-gate */ 12080Sstevel@tonic-gate if ((flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat) 12090Sstevel@tonic-gate HTABLE_LOCK_INC(ht); 12100Sstevel@tonic-gate 12110Sstevel@tonic-gate /* 12120Sstevel@tonic-gate * Acquire the page's mapping list lock and get an hment to use. 12130Sstevel@tonic-gate * Note that hment_prepare() might return NULL. 12140Sstevel@tonic-gate */ 12150Sstevel@tonic-gate if (is_consist) { 12160Sstevel@tonic-gate x86_hm_enter(pp); 12170Sstevel@tonic-gate hm = hment_prepare(ht, entry, pp); 12180Sstevel@tonic-gate } 12190Sstevel@tonic-gate 12200Sstevel@tonic-gate /* 12210Sstevel@tonic-gate * Set the new pte, retrieving the old one at the same time. 12220Sstevel@tonic-gate */ 12230Sstevel@tonic-gate old_pte = x86pte_set(ht, entry, pte, pte_ptr); 12240Sstevel@tonic-gate 12250Sstevel@tonic-gate /* 12263446Smrj * did we get a large page / page table collision? 12273446Smrj */ 12283446Smrj if (old_pte == LPAGE_ERROR) { 12293446Smrj rv = -1; 12303446Smrj goto done; 12313446Smrj } 12323446Smrj 12333446Smrj /* 12340Sstevel@tonic-gate * If the mapping didn't change there is nothing more to do. 12350Sstevel@tonic-gate */ 12363446Smrj if (PTE_EQUIV(pte, old_pte)) 12373446Smrj goto done; 12380Sstevel@tonic-gate 12390Sstevel@tonic-gate /* 12400Sstevel@tonic-gate * Install a new mapping in the page's mapping list 12410Sstevel@tonic-gate */ 12420Sstevel@tonic-gate if (!PTE_ISVALID(old_pte)) { 12430Sstevel@tonic-gate if (is_consist) { 12440Sstevel@tonic-gate hment_assign(ht, entry, pp, hm); 12450Sstevel@tonic-gate x86_hm_exit(pp); 12460Sstevel@tonic-gate } else { 12470Sstevel@tonic-gate ASSERT(flags & HAT_LOAD_NOCONSIST); 12480Sstevel@tonic-gate } 12490Sstevel@tonic-gate HTABLE_INC(ht->ht_valid_cnt); 12500Sstevel@tonic-gate PGCNT_INC(hat, l); 12513446Smrj return (rv); 12520Sstevel@tonic-gate } 12530Sstevel@tonic-gate 12540Sstevel@tonic-gate /* 12550Sstevel@tonic-gate * Remap's are more complicated: 12560Sstevel@tonic-gate * - HAT_LOAD_REMAP must be specified if changing the pfn. 12570Sstevel@tonic-gate * We also require that NOCONSIST be specified. 12580Sstevel@tonic-gate * - Otherwise only permission or caching bits may change. 12590Sstevel@tonic-gate */ 12600Sstevel@tonic-gate if (!PTE_ISPAGE(old_pte, l)) 12610Sstevel@tonic-gate panic("non-null/page mapping pte=" FMT_PTE, old_pte); 12620Sstevel@tonic-gate 12630Sstevel@tonic-gate if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) { 1264510Skchow REMAPASSERT(flags & HAT_LOAD_REMAP); 1265510Skchow REMAPASSERT(flags & HAT_LOAD_NOCONSIST); 12663446Smrj REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST); 1267510Skchow REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) == 12680Sstevel@tonic-gate pf_is_memory(PTE2PFN(pte, l))); 1269510Skchow REMAPASSERT(!is_consist); 12700Sstevel@tonic-gate } 12710Sstevel@tonic-gate 12720Sstevel@tonic-gate /* 12730Sstevel@tonic-gate * We only let remaps change the bits for PFNs, permissions 12740Sstevel@tonic-gate * or caching type. 12750Sstevel@tonic-gate */ 12760Sstevel@tonic-gate ASSERT(PTE_GET(old_pte, ~(PT_REMAP_BITS | PT_REF | PT_MOD)) == 12770Sstevel@tonic-gate PTE_GET(pte, ~PT_REMAP_BITS)); 12780Sstevel@tonic-gate 12790Sstevel@tonic-gate /* 12800Sstevel@tonic-gate * We don't create any mapping list entries on a remap, so release 12810Sstevel@tonic-gate * any allocated hment after we drop the mapping list lock. 12820Sstevel@tonic-gate */ 12833446Smrj done: 12840Sstevel@tonic-gate if (is_consist) { 12850Sstevel@tonic-gate x86_hm_exit(pp); 12860Sstevel@tonic-gate if (hm != NULL) 12870Sstevel@tonic-gate hment_free(hm); 12880Sstevel@tonic-gate } 12893446Smrj return (rv); 12900Sstevel@tonic-gate } 12910Sstevel@tonic-gate 12920Sstevel@tonic-gate /* 12933446Smrj * Internal routine to load a single page table entry. This only fails if 12943446Smrj * we attempt to overwrite a page table link with a large page. 12950Sstevel@tonic-gate */ 12963446Smrj static int 12970Sstevel@tonic-gate hati_load_common( 12980Sstevel@tonic-gate hat_t *hat, 12990Sstevel@tonic-gate uintptr_t va, 13000Sstevel@tonic-gate page_t *pp, 13010Sstevel@tonic-gate uint_t attr, 13020Sstevel@tonic-gate uint_t flags, 13030Sstevel@tonic-gate level_t level, 13040Sstevel@tonic-gate pfn_t pfn) 13050Sstevel@tonic-gate { 13060Sstevel@tonic-gate htable_t *ht; 13070Sstevel@tonic-gate uint_t entry; 13080Sstevel@tonic-gate x86pte_t pte; 13093446Smrj int rv = 0; 13100Sstevel@tonic-gate 13114004Sjosephb /* 13124004Sjosephb * The number 16 is arbitrary and here to catch a recursion problem 13134004Sjosephb * early before we blow out the kernel stack. 13144004Sjosephb */ 13154004Sjosephb ++curthread->t_hatdepth; 13164004Sjosephb ASSERT(curthread->t_hatdepth < 16); 13174004Sjosephb 13180Sstevel@tonic-gate ASSERT(hat == kas.a_hat || 13190Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 13200Sstevel@tonic-gate 13210Sstevel@tonic-gate if (flags & HAT_LOAD_SHARE) 13220Sstevel@tonic-gate hat->hat_flags |= HAT_SHARED; 13230Sstevel@tonic-gate 13240Sstevel@tonic-gate /* 13250Sstevel@tonic-gate * Find the page table that maps this page if it already exists. 13260Sstevel@tonic-gate */ 13270Sstevel@tonic-gate ht = htable_lookup(hat, va, level); 13280Sstevel@tonic-gate 13290Sstevel@tonic-gate /* 13304004Sjosephb * We must have HAT_LOAD_NOCONSIST if page_t is NULL. 13310Sstevel@tonic-gate */ 13324004Sjosephb if (pp == NULL) 13330Sstevel@tonic-gate flags |= HAT_LOAD_NOCONSIST; 13340Sstevel@tonic-gate 13350Sstevel@tonic-gate if (ht == NULL) { 13360Sstevel@tonic-gate ht = htable_create(hat, va, level, NULL); 13370Sstevel@tonic-gate ASSERT(ht != NULL); 13380Sstevel@tonic-gate } 13390Sstevel@tonic-gate entry = htable_va2entry(va, ht); 13400Sstevel@tonic-gate 13410Sstevel@tonic-gate /* 13420Sstevel@tonic-gate * a bunch of paranoid error checking 13430Sstevel@tonic-gate */ 13440Sstevel@tonic-gate ASSERT(ht->ht_busy > 0); 13450Sstevel@tonic-gate if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht)) 13460Sstevel@tonic-gate panic("hati_load_common: bad htable %p, va %p", ht, (void *)va); 13470Sstevel@tonic-gate ASSERT(ht->ht_level == level); 13480Sstevel@tonic-gate 13490Sstevel@tonic-gate /* 13500Sstevel@tonic-gate * construct the new PTE 13510Sstevel@tonic-gate */ 13520Sstevel@tonic-gate if (hat == kas.a_hat) 13530Sstevel@tonic-gate attr &= ~PROT_USER; 13540Sstevel@tonic-gate pte = hati_mkpte(pfn, attr, level, flags); 13550Sstevel@tonic-gate if (hat == kas.a_hat && va >= kernelbase) 13560Sstevel@tonic-gate PTE_SET(pte, mmu.pt_global); 13570Sstevel@tonic-gate 13580Sstevel@tonic-gate /* 13590Sstevel@tonic-gate * establish the mapping 13600Sstevel@tonic-gate */ 13613446Smrj rv = hati_pte_map(ht, entry, pp, pte, flags, NULL); 13620Sstevel@tonic-gate 13630Sstevel@tonic-gate /* 13640Sstevel@tonic-gate * release the htable and any reserves 13650Sstevel@tonic-gate */ 13660Sstevel@tonic-gate htable_release(ht); 13674004Sjosephb --curthread->t_hatdepth; 13683446Smrj return (rv); 13690Sstevel@tonic-gate } 13700Sstevel@tonic-gate 13710Sstevel@tonic-gate /* 13720Sstevel@tonic-gate * special case of hat_memload to deal with some kernel addrs for performance 13730Sstevel@tonic-gate */ 13740Sstevel@tonic-gate static void 13750Sstevel@tonic-gate hat_kmap_load( 13760Sstevel@tonic-gate caddr_t addr, 13770Sstevel@tonic-gate page_t *pp, 13780Sstevel@tonic-gate uint_t attr, 13790Sstevel@tonic-gate uint_t flags) 13800Sstevel@tonic-gate { 13810Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 13820Sstevel@tonic-gate x86pte_t pte; 13830Sstevel@tonic-gate pfn_t pfn = page_pptonum(pp); 13840Sstevel@tonic-gate pgcnt_t pg_off = mmu_btop(va - mmu.kmap_addr); 13850Sstevel@tonic-gate htable_t *ht; 13860Sstevel@tonic-gate uint_t entry; 13870Sstevel@tonic-gate void *pte_ptr; 13880Sstevel@tonic-gate 13890Sstevel@tonic-gate /* 13900Sstevel@tonic-gate * construct the requested PTE 13910Sstevel@tonic-gate */ 13920Sstevel@tonic-gate attr &= ~PROT_USER; 13930Sstevel@tonic-gate attr |= HAT_STORECACHING_OK; 13940Sstevel@tonic-gate pte = hati_mkpte(pfn, attr, 0, flags); 13950Sstevel@tonic-gate PTE_SET(pte, mmu.pt_global); 13960Sstevel@tonic-gate 13970Sstevel@tonic-gate /* 13980Sstevel@tonic-gate * Figure out the pte_ptr and htable and use common code to finish up 13990Sstevel@tonic-gate */ 14000Sstevel@tonic-gate if (mmu.pae_hat) 14010Sstevel@tonic-gate pte_ptr = mmu.kmap_ptes + pg_off; 14020Sstevel@tonic-gate else 14030Sstevel@tonic-gate pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off; 14040Sstevel@tonic-gate ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >> 14050Sstevel@tonic-gate LEVEL_SHIFT(1)]; 14060Sstevel@tonic-gate entry = htable_va2entry(va, ht); 14074004Sjosephb ++curthread->t_hatdepth; 14084004Sjosephb ASSERT(curthread->t_hatdepth < 16); 14093446Smrj (void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr); 14104004Sjosephb --curthread->t_hatdepth; 14110Sstevel@tonic-gate } 14120Sstevel@tonic-gate 14130Sstevel@tonic-gate /* 14140Sstevel@tonic-gate * hat_memload() - load a translation to the given page struct 14150Sstevel@tonic-gate * 14160Sstevel@tonic-gate * Flags for hat_memload/hat_devload/hat_*attr. 14170Sstevel@tonic-gate * 14180Sstevel@tonic-gate * HAT_LOAD Default flags to load a translation to the page. 14190Sstevel@tonic-gate * 14200Sstevel@tonic-gate * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(), 14210Sstevel@tonic-gate * and hat_devload(). 14220Sstevel@tonic-gate * 14230Sstevel@tonic-gate * HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list. 14243446Smrj * sets PT_NOCONSIST 14250Sstevel@tonic-gate * 14260Sstevel@tonic-gate * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables 14270Sstevel@tonic-gate * that map some user pages (not kas) is shared by more 14280Sstevel@tonic-gate * than one process (eg. ISM). 14290Sstevel@tonic-gate * 14300Sstevel@tonic-gate * HAT_LOAD_REMAP Reload a valid pte with a different page frame. 14310Sstevel@tonic-gate * 14320Sstevel@tonic-gate * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this 14330Sstevel@tonic-gate * point, it's setting up mapping to allocate internal 14340Sstevel@tonic-gate * hat layer data structures. This flag forces hat layer 14350Sstevel@tonic-gate * to tap its reserves in order to prevent infinite 14360Sstevel@tonic-gate * recursion. 14370Sstevel@tonic-gate * 14380Sstevel@tonic-gate * The following is a protection attribute (like PROT_READ, etc.) 14390Sstevel@tonic-gate * 14403446Smrj * HAT_NOSYNC set PT_NOSYNC - this mapping's ref/mod bits 14410Sstevel@tonic-gate * are never cleared. 14420Sstevel@tonic-gate * 14430Sstevel@tonic-gate * Installing new valid PTE's and creation of the mapping list 14440Sstevel@tonic-gate * entry are controlled under the same lock. It's derived from the 14450Sstevel@tonic-gate * page_t being mapped. 14460Sstevel@tonic-gate */ 14470Sstevel@tonic-gate static uint_t supported_memload_flags = 14480Sstevel@tonic-gate HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST | 14490Sstevel@tonic-gate HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT; 14500Sstevel@tonic-gate 14510Sstevel@tonic-gate void 14520Sstevel@tonic-gate hat_memload( 14530Sstevel@tonic-gate hat_t *hat, 14540Sstevel@tonic-gate caddr_t addr, 14550Sstevel@tonic-gate page_t *pp, 14560Sstevel@tonic-gate uint_t attr, 14570Sstevel@tonic-gate uint_t flags) 14580Sstevel@tonic-gate { 14590Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 14600Sstevel@tonic-gate level_t level = 0; 14610Sstevel@tonic-gate pfn_t pfn = page_pptonum(pp); 14620Sstevel@tonic-gate 14630Sstevel@tonic-gate HATIN(hat_memload, hat, addr, (size_t)MMU_PAGESIZE); 14640Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 14653446Smrj ASSERT(hat == kas.a_hat || va < _userlimit); 14660Sstevel@tonic-gate ASSERT(hat == kas.a_hat || 14670Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 14680Sstevel@tonic-gate ASSERT((flags & supported_memload_flags) == flags); 14690Sstevel@tonic-gate 14700Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 14710Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 14720Sstevel@tonic-gate 14730Sstevel@tonic-gate /* 14740Sstevel@tonic-gate * kernel address special case for performance. 14750Sstevel@tonic-gate */ 14760Sstevel@tonic-gate if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { 14770Sstevel@tonic-gate ASSERT(hat == kas.a_hat); 14780Sstevel@tonic-gate hat_kmap_load(addr, pp, attr, flags); 14790Sstevel@tonic-gate return; 14800Sstevel@tonic-gate } 14810Sstevel@tonic-gate 14820Sstevel@tonic-gate /* 14830Sstevel@tonic-gate * This is used for memory with normal caching enabled, so 14840Sstevel@tonic-gate * always set HAT_STORECACHING_OK. 14850Sstevel@tonic-gate */ 14860Sstevel@tonic-gate attr |= HAT_STORECACHING_OK; 14873446Smrj if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0) 14883446Smrj panic("unexpected hati_load_common() failure"); 14890Sstevel@tonic-gate HATOUT(hat_memload, hat, addr); 14900Sstevel@tonic-gate } 14910Sstevel@tonic-gate 14920Sstevel@tonic-gate /* 14930Sstevel@tonic-gate * Load the given array of page structs using large pages when possible 14940Sstevel@tonic-gate */ 14950Sstevel@tonic-gate void 14960Sstevel@tonic-gate hat_memload_array( 14970Sstevel@tonic-gate hat_t *hat, 14980Sstevel@tonic-gate caddr_t addr, 14990Sstevel@tonic-gate size_t len, 15000Sstevel@tonic-gate page_t **pages, 15010Sstevel@tonic-gate uint_t attr, 15020Sstevel@tonic-gate uint_t flags) 15030Sstevel@tonic-gate { 15040Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 15050Sstevel@tonic-gate uintptr_t eaddr = va + len; 15060Sstevel@tonic-gate level_t level; 15070Sstevel@tonic-gate size_t pgsize; 15080Sstevel@tonic-gate pgcnt_t pgindx = 0; 15090Sstevel@tonic-gate pfn_t pfn; 15100Sstevel@tonic-gate pgcnt_t i; 15110Sstevel@tonic-gate 15120Sstevel@tonic-gate HATIN(hat_memload_array, hat, addr, len); 15130Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 15143446Smrj ASSERT(hat == kas.a_hat || va + len <= _userlimit); 15150Sstevel@tonic-gate ASSERT(hat == kas.a_hat || 15160Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 15170Sstevel@tonic-gate ASSERT((flags & supported_memload_flags) == flags); 15180Sstevel@tonic-gate 15190Sstevel@tonic-gate /* 15200Sstevel@tonic-gate * memload is used for memory with full caching enabled, so 15210Sstevel@tonic-gate * set HAT_STORECACHING_OK. 15220Sstevel@tonic-gate */ 15230Sstevel@tonic-gate attr |= HAT_STORECACHING_OK; 15240Sstevel@tonic-gate 15250Sstevel@tonic-gate /* 15260Sstevel@tonic-gate * handle all pages using largest possible pagesize 15270Sstevel@tonic-gate */ 15280Sstevel@tonic-gate while (va < eaddr) { 15290Sstevel@tonic-gate /* 15300Sstevel@tonic-gate * decide what level mapping to use (ie. pagesize) 15310Sstevel@tonic-gate */ 15320Sstevel@tonic-gate pfn = page_pptonum(pages[pgindx]); 15330Sstevel@tonic-gate for (level = mmu.max_page_level; ; --level) { 15340Sstevel@tonic-gate pgsize = LEVEL_SIZE(level); 15350Sstevel@tonic-gate if (level == 0) 15360Sstevel@tonic-gate break; 15373446Smrj 15380Sstevel@tonic-gate if (!IS_P2ALIGNED(va, pgsize) || 15390Sstevel@tonic-gate (eaddr - va) < pgsize || 15403446Smrj !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize)) 15410Sstevel@tonic-gate continue; 15420Sstevel@tonic-gate 15430Sstevel@tonic-gate /* 15440Sstevel@tonic-gate * To use a large mapping of this size, all the 15450Sstevel@tonic-gate * pages we are passed must be sequential subpages 15460Sstevel@tonic-gate * of the large page. 15470Sstevel@tonic-gate * hat_page_demote() can't change p_szc because 15480Sstevel@tonic-gate * all pages are locked. 15490Sstevel@tonic-gate */ 15500Sstevel@tonic-gate if (pages[pgindx]->p_szc >= level) { 15510Sstevel@tonic-gate for (i = 0; i < mmu_btop(pgsize); ++i) { 15520Sstevel@tonic-gate if (pfn + i != 15530Sstevel@tonic-gate page_pptonum(pages[pgindx + i])) 15540Sstevel@tonic-gate break; 15550Sstevel@tonic-gate ASSERT(pages[pgindx + i]->p_szc >= 15560Sstevel@tonic-gate level); 15570Sstevel@tonic-gate ASSERT(pages[pgindx] + i == 15580Sstevel@tonic-gate pages[pgindx + i]); 15590Sstevel@tonic-gate } 15600Sstevel@tonic-gate if (i == mmu_btop(pgsize)) 15610Sstevel@tonic-gate break; 15620Sstevel@tonic-gate } 15630Sstevel@tonic-gate } 15640Sstevel@tonic-gate 15650Sstevel@tonic-gate /* 15663446Smrj * Load this page mapping. If the load fails, try a smaller 15673446Smrj * pagesize. 15680Sstevel@tonic-gate */ 15690Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 15703446Smrj while (hati_load_common(hat, va, pages[pgindx], attr, 15713446Smrj flags, level, pfn) != 0) { 15723446Smrj if (level == 0) 15733446Smrj panic("unexpected hati_load_common() failure"); 15743446Smrj --level; 15753446Smrj pgsize = LEVEL_SIZE(level); 15763446Smrj } 15770Sstevel@tonic-gate 15780Sstevel@tonic-gate /* 15790Sstevel@tonic-gate * move to next page 15800Sstevel@tonic-gate */ 15810Sstevel@tonic-gate va += pgsize; 15820Sstevel@tonic-gate pgindx += mmu_btop(pgsize); 15830Sstevel@tonic-gate } 15840Sstevel@tonic-gate HATOUT(hat_memload_array, hat, addr); 15850Sstevel@tonic-gate } 15860Sstevel@tonic-gate 15870Sstevel@tonic-gate /* 15880Sstevel@tonic-gate * void hat_devload(hat, addr, len, pf, attr, flags) 15890Sstevel@tonic-gate * load/lock the given page frame number 15900Sstevel@tonic-gate * 15910Sstevel@tonic-gate * Advisory ordering attributes. Apply only to device mappings. 15920Sstevel@tonic-gate * 15930Sstevel@tonic-gate * HAT_STRICTORDER: the CPU must issue the references in order, as the 15940Sstevel@tonic-gate * programmer specified. This is the default. 15950Sstevel@tonic-gate * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds 15960Sstevel@tonic-gate * of reordering; store or load with store or load). 15970Sstevel@tonic-gate * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores 15980Sstevel@tonic-gate * to consecutive locations (for example, turn two consecutive byte 15990Sstevel@tonic-gate * stores into one halfword store), and it may batch individual loads 16000Sstevel@tonic-gate * (for example, turn two consecutive byte loads into one halfword load). 16010Sstevel@tonic-gate * This also implies re-ordering. 16020Sstevel@tonic-gate * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it 16030Sstevel@tonic-gate * until another store occurs. The default is to fetch new data 16040Sstevel@tonic-gate * on every load. This also implies merging. 16050Sstevel@tonic-gate * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to 16060Sstevel@tonic-gate * the device (perhaps with other data) at a later time. The default is 16070Sstevel@tonic-gate * to push the data right away. This also implies load caching. 16080Sstevel@tonic-gate * 16090Sstevel@tonic-gate * Equivalent of hat_memload(), but can be used for device memory where 16100Sstevel@tonic-gate * there are no page_t's and we support additional flags (write merging, etc). 16110Sstevel@tonic-gate * Note that we can have large page mappings with this interface. 16120Sstevel@tonic-gate */ 16130Sstevel@tonic-gate int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK | 16140Sstevel@tonic-gate HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK | 16150Sstevel@tonic-gate HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK; 16160Sstevel@tonic-gate 16170Sstevel@tonic-gate void 16180Sstevel@tonic-gate hat_devload( 16190Sstevel@tonic-gate hat_t *hat, 16200Sstevel@tonic-gate caddr_t addr, 16210Sstevel@tonic-gate size_t len, 16220Sstevel@tonic-gate pfn_t pfn, 16230Sstevel@tonic-gate uint_t attr, 16240Sstevel@tonic-gate int flags) 16250Sstevel@tonic-gate { 16260Sstevel@tonic-gate uintptr_t va = ALIGN2PAGE(addr); 16270Sstevel@tonic-gate uintptr_t eva = va + len; 16280Sstevel@tonic-gate level_t level; 16290Sstevel@tonic-gate size_t pgsize; 16300Sstevel@tonic-gate page_t *pp; 16310Sstevel@tonic-gate int f; /* per PTE copy of flags - maybe modified */ 16320Sstevel@tonic-gate uint_t a; /* per PTE copy of attr */ 16330Sstevel@tonic-gate 16340Sstevel@tonic-gate HATIN(hat_devload, hat, addr, len); 16350Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 16363446Smrj ASSERT(hat == kas.a_hat || eva <= _userlimit); 16370Sstevel@tonic-gate ASSERT(hat == kas.a_hat || 16380Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 16390Sstevel@tonic-gate ASSERT((flags & supported_devload_flags) == flags); 16400Sstevel@tonic-gate 16410Sstevel@tonic-gate /* 16420Sstevel@tonic-gate * handle all pages 16430Sstevel@tonic-gate */ 16440Sstevel@tonic-gate while (va < eva) { 16450Sstevel@tonic-gate 16460Sstevel@tonic-gate /* 16470Sstevel@tonic-gate * decide what level mapping to use (ie. pagesize) 16480Sstevel@tonic-gate */ 16490Sstevel@tonic-gate for (level = mmu.max_page_level; ; --level) { 16500Sstevel@tonic-gate pgsize = LEVEL_SIZE(level); 16510Sstevel@tonic-gate if (level == 0) 16520Sstevel@tonic-gate break; 16530Sstevel@tonic-gate if (IS_P2ALIGNED(va, pgsize) && 16540Sstevel@tonic-gate (eva - va) >= pgsize && 16550Sstevel@tonic-gate IS_P2ALIGNED(pfn, mmu_btop(pgsize))) 16560Sstevel@tonic-gate break; 16570Sstevel@tonic-gate } 16580Sstevel@tonic-gate 16590Sstevel@tonic-gate /* 16603446Smrj * If this is just memory then allow caching (this happens 16610Sstevel@tonic-gate * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used 16623446Smrj * to override that. If we don't have a page_t then make sure 16630Sstevel@tonic-gate * NOCONSIST is set. 16640Sstevel@tonic-gate */ 16650Sstevel@tonic-gate a = attr; 16660Sstevel@tonic-gate f = flags; 16670Sstevel@tonic-gate if (pf_is_memory(pfn)) { 16680Sstevel@tonic-gate if (!(a & HAT_PLAT_NOCACHE)) 16690Sstevel@tonic-gate a |= HAT_STORECACHING_OK; 16700Sstevel@tonic-gate 16710Sstevel@tonic-gate if (f & HAT_LOAD_NOCONSIST) 16720Sstevel@tonic-gate pp = NULL; 16730Sstevel@tonic-gate else 16740Sstevel@tonic-gate pp = page_numtopp_nolock(pfn); 16750Sstevel@tonic-gate } else { 16760Sstevel@tonic-gate pp = NULL; 16770Sstevel@tonic-gate f |= HAT_LOAD_NOCONSIST; 16780Sstevel@tonic-gate } 16790Sstevel@tonic-gate 16800Sstevel@tonic-gate /* 16810Sstevel@tonic-gate * load this page mapping 16820Sstevel@tonic-gate */ 16830Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 16843446Smrj while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) { 16853446Smrj if (level == 0) 16863446Smrj panic("unexpected hati_load_common() failure"); 16873446Smrj --level; 16883446Smrj pgsize = LEVEL_SIZE(level); 16893446Smrj } 16900Sstevel@tonic-gate 16910Sstevel@tonic-gate /* 16920Sstevel@tonic-gate * move to next page 16930Sstevel@tonic-gate */ 16940Sstevel@tonic-gate va += pgsize; 16950Sstevel@tonic-gate pfn += mmu_btop(pgsize); 16960Sstevel@tonic-gate } 16970Sstevel@tonic-gate HATOUT(hat_devload, hat, addr); 16980Sstevel@tonic-gate } 16990Sstevel@tonic-gate 17000Sstevel@tonic-gate /* 17010Sstevel@tonic-gate * void hat_unlock(hat, addr, len) 17020Sstevel@tonic-gate * unlock the mappings to a given range of addresses 17030Sstevel@tonic-gate * 17040Sstevel@tonic-gate * Locks are tracked by ht_lock_cnt in the htable. 17050Sstevel@tonic-gate */ 17060Sstevel@tonic-gate void 17070Sstevel@tonic-gate hat_unlock(hat_t *hat, caddr_t addr, size_t len) 17080Sstevel@tonic-gate { 17090Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr; 17100Sstevel@tonic-gate uintptr_t eaddr = vaddr + len; 17110Sstevel@tonic-gate htable_t *ht = NULL; 17120Sstevel@tonic-gate 17130Sstevel@tonic-gate /* 17140Sstevel@tonic-gate * kernel entries are always locked, we don't track lock counts 17150Sstevel@tonic-gate */ 17163446Smrj ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 17170Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 17180Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 17190Sstevel@tonic-gate if (hat == kas.a_hat) 17200Sstevel@tonic-gate return; 17210Sstevel@tonic-gate if (eaddr > _userlimit) 17220Sstevel@tonic-gate panic("hat_unlock() address out of range - above _userlimit"); 17230Sstevel@tonic-gate 17240Sstevel@tonic-gate ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 17250Sstevel@tonic-gate while (vaddr < eaddr) { 17260Sstevel@tonic-gate (void) htable_walk(hat, &ht, &vaddr, eaddr); 17270Sstevel@tonic-gate if (ht == NULL) 17280Sstevel@tonic-gate break; 17290Sstevel@tonic-gate 17300Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr)); 17310Sstevel@tonic-gate 17320Sstevel@tonic-gate if (ht->ht_lock_cnt < 1) 17330Sstevel@tonic-gate panic("hat_unlock(): lock_cnt < 1, " 17340Sstevel@tonic-gate "htable=%p, vaddr=%p\n", ht, (caddr_t)vaddr); 17350Sstevel@tonic-gate HTABLE_LOCK_DEC(ht); 17360Sstevel@tonic-gate 17370Sstevel@tonic-gate vaddr += LEVEL_SIZE(ht->ht_level); 17380Sstevel@tonic-gate } 17390Sstevel@tonic-gate if (ht) 17400Sstevel@tonic-gate htable_release(ht); 17410Sstevel@tonic-gate } 17420Sstevel@tonic-gate 17430Sstevel@tonic-gate /* 17440Sstevel@tonic-gate * Cross call service routine to demap a virtual page on 17450Sstevel@tonic-gate * the current CPU or flush all mappings in TLB. 17460Sstevel@tonic-gate */ 17470Sstevel@tonic-gate /*ARGSUSED*/ 17480Sstevel@tonic-gate static int 17490Sstevel@tonic-gate hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3) 17500Sstevel@tonic-gate { 17510Sstevel@tonic-gate hat_t *hat = (hat_t *)a1; 17520Sstevel@tonic-gate caddr_t addr = (caddr_t)a2; 17530Sstevel@tonic-gate 17540Sstevel@tonic-gate /* 17550Sstevel@tonic-gate * If the target hat isn't the kernel and this CPU isn't operating 17560Sstevel@tonic-gate * in the target hat, we can ignore the cross call. 17570Sstevel@tonic-gate */ 17580Sstevel@tonic-gate if (hat != kas.a_hat && hat != CPU->cpu_current_hat) 17590Sstevel@tonic-gate return (0); 17600Sstevel@tonic-gate 17610Sstevel@tonic-gate /* 17620Sstevel@tonic-gate * For a normal address, we just flush one page mapping 17630Sstevel@tonic-gate */ 17640Sstevel@tonic-gate if ((uintptr_t)addr != DEMAP_ALL_ADDR) { 17653446Smrj mmu_tlbflush_entry(addr); 17660Sstevel@tonic-gate return (0); 17670Sstevel@tonic-gate } 17680Sstevel@tonic-gate 17690Sstevel@tonic-gate /* 17700Sstevel@tonic-gate * Otherwise we reload cr3 to effect a complete TLB flush. 17710Sstevel@tonic-gate * 17720Sstevel@tonic-gate * A reload of cr3 on a VLP process also means we must also recopy in 17730Sstevel@tonic-gate * the pte values from the struct hat 17740Sstevel@tonic-gate */ 17750Sstevel@tonic-gate if (hat->hat_flags & HAT_VLP) { 17760Sstevel@tonic-gate #if defined(__amd64) 17770Sstevel@tonic-gate x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes; 17780Sstevel@tonic-gate 17790Sstevel@tonic-gate VLP_COPY(hat->hat_vlp_ptes, vlpptep); 17800Sstevel@tonic-gate #elif defined(__i386) 17810Sstevel@tonic-gate reload_pae32(hat, CPU); 17820Sstevel@tonic-gate #endif 17830Sstevel@tonic-gate } 17840Sstevel@tonic-gate reload_cr3(); 17850Sstevel@tonic-gate return (0); 17860Sstevel@tonic-gate } 17870Sstevel@tonic-gate 17880Sstevel@tonic-gate /* 17890Sstevel@tonic-gate * Internal routine to do cross calls to invalidate a range of pages on 17900Sstevel@tonic-gate * all CPUs using a given hat. 17910Sstevel@tonic-gate */ 17920Sstevel@tonic-gate void 17933446Smrj hat_tlb_inval(hat_t *hat, uintptr_t va) 17940Sstevel@tonic-gate { 17950Sstevel@tonic-gate extern int flushes_require_xcalls; /* from mp_startup.c */ 17960Sstevel@tonic-gate cpuset_t justme; 17973446Smrj cpuset_t cpus_to_shootdown; 17980Sstevel@tonic-gate 17990Sstevel@tonic-gate /* 18000Sstevel@tonic-gate * If the hat is being destroyed, there are no more users, so 18010Sstevel@tonic-gate * demap need not do anything. 18020Sstevel@tonic-gate */ 18030Sstevel@tonic-gate if (hat->hat_flags & HAT_FREEING) 18040Sstevel@tonic-gate return; 18050Sstevel@tonic-gate 18060Sstevel@tonic-gate /* 18070Sstevel@tonic-gate * If demapping from a shared pagetable, we best demap the 18080Sstevel@tonic-gate * entire set of user TLBs, since we don't know what addresses 18090Sstevel@tonic-gate * these were shared at. 18100Sstevel@tonic-gate */ 18110Sstevel@tonic-gate if (hat->hat_flags & HAT_SHARED) { 18120Sstevel@tonic-gate hat = kas.a_hat; 18130Sstevel@tonic-gate va = DEMAP_ALL_ADDR; 18140Sstevel@tonic-gate } 18150Sstevel@tonic-gate 18160Sstevel@tonic-gate /* 18170Sstevel@tonic-gate * if not running with multiple CPUs, don't use cross calls 18180Sstevel@tonic-gate */ 18190Sstevel@tonic-gate if (panicstr || !flushes_require_xcalls) { 18200Sstevel@tonic-gate (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL); 18210Sstevel@tonic-gate return; 18220Sstevel@tonic-gate } 18230Sstevel@tonic-gate 18240Sstevel@tonic-gate 18250Sstevel@tonic-gate /* 18263446Smrj * Determine CPUs to shootdown. Kernel changes always do all CPUs. 18273446Smrj * Otherwise it's just CPUs currently executing in this hat. 18280Sstevel@tonic-gate */ 18290Sstevel@tonic-gate kpreempt_disable(); 18300Sstevel@tonic-gate CPUSET_ONLY(justme, CPU->cpu_id); 18313446Smrj if (hat == kas.a_hat) 18323446Smrj cpus_to_shootdown = khat_cpuset; 18330Sstevel@tonic-gate else 18343446Smrj cpus_to_shootdown = hat->hat_cpus; 18353446Smrj 18363446Smrj if (CPUSET_ISNULL(cpus_to_shootdown) || 18373446Smrj CPUSET_ISEQUAL(cpus_to_shootdown, justme)) { 18383446Smrj 18393446Smrj (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL); 18403446Smrj 18413446Smrj } else { 18423446Smrj 18433446Smrj CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id); 18443446Smrj xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL, X_CALL_HIPRI, 18453446Smrj cpus_to_shootdown, hati_demap_func); 18463446Smrj 18473446Smrj } 18480Sstevel@tonic-gate kpreempt_enable(); 18490Sstevel@tonic-gate } 18500Sstevel@tonic-gate 18510Sstevel@tonic-gate /* 18520Sstevel@tonic-gate * Interior routine for HAT_UNLOADs from hat_unload_callback(), 18530Sstevel@tonic-gate * hat_kmap_unload() OR from hat_steal() code. This routine doesn't 18540Sstevel@tonic-gate * handle releasing of the htables. 18550Sstevel@tonic-gate */ 18560Sstevel@tonic-gate void 18570Sstevel@tonic-gate hat_pte_unmap( 18580Sstevel@tonic-gate htable_t *ht, 18590Sstevel@tonic-gate uint_t entry, 18600Sstevel@tonic-gate uint_t flags, 18610Sstevel@tonic-gate x86pte_t old_pte, 18620Sstevel@tonic-gate void *pte_ptr) 18630Sstevel@tonic-gate { 18640Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 18650Sstevel@tonic-gate hment_t *hm = NULL; 18660Sstevel@tonic-gate page_t *pp = NULL; 18670Sstevel@tonic-gate level_t l = ht->ht_level; 18680Sstevel@tonic-gate pfn_t pfn; 18690Sstevel@tonic-gate 18700Sstevel@tonic-gate /* 18710Sstevel@tonic-gate * We always track the locking counts, even if nothing is unmapped 18720Sstevel@tonic-gate */ 18730Sstevel@tonic-gate if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) { 18740Sstevel@tonic-gate ASSERT(ht->ht_lock_cnt > 0); 18750Sstevel@tonic-gate HTABLE_LOCK_DEC(ht); 18760Sstevel@tonic-gate } 18770Sstevel@tonic-gate 18780Sstevel@tonic-gate /* 18790Sstevel@tonic-gate * Figure out which page's mapping list lock to acquire using the PFN 18800Sstevel@tonic-gate * passed in "old" PTE. We then attempt to invalidate the PTE. 18810Sstevel@tonic-gate * If another thread, probably a hat_pageunload, has asynchronously 18820Sstevel@tonic-gate * unmapped/remapped this address we'll loop here. 18830Sstevel@tonic-gate */ 18840Sstevel@tonic-gate ASSERT(ht->ht_busy > 0); 18850Sstevel@tonic-gate while (PTE_ISVALID(old_pte)) { 18860Sstevel@tonic-gate pfn = PTE2PFN(old_pte, l); 18873446Smrj if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) { 18880Sstevel@tonic-gate pp = NULL; 18890Sstevel@tonic-gate } else { 18900Sstevel@tonic-gate pp = page_numtopp_nolock(pfn); 189147Sjosephb if (pp == NULL) { 189247Sjosephb panic("no page_t, not NOCONSIST: old_pte=" 189347Sjosephb FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx", 189447Sjosephb old_pte, (uintptr_t)ht, entry, 189547Sjosephb (uintptr_t)pte_ptr); 189647Sjosephb } 18970Sstevel@tonic-gate x86_hm_enter(pp); 18980Sstevel@tonic-gate } 189947Sjosephb 190047Sjosephb /* 190147Sjosephb * If freeing the address space, check that the PTE 190247Sjosephb * hasn't changed, as the mappings are no longer in use by 190347Sjosephb * any thread, invalidation is unnecessary. 190447Sjosephb * If not freeing, do a full invalidate. 190547Sjosephb */ 190647Sjosephb if (hat->hat_flags & HAT_FREEING) 190747Sjosephb old_pte = x86pte_get(ht, entry); 190847Sjosephb else 19093446Smrj old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr); 19100Sstevel@tonic-gate 19110Sstevel@tonic-gate /* 19120Sstevel@tonic-gate * If the page hadn't changed we've unmapped it and can proceed 19130Sstevel@tonic-gate */ 19140Sstevel@tonic-gate if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn) 19150Sstevel@tonic-gate break; 19160Sstevel@tonic-gate 19170Sstevel@tonic-gate /* 19180Sstevel@tonic-gate * Otherwise, we'll have to retry with the current old_pte. 19190Sstevel@tonic-gate * Drop the hment lock, since the pfn may have changed. 19200Sstevel@tonic-gate */ 19210Sstevel@tonic-gate if (pp != NULL) { 19220Sstevel@tonic-gate x86_hm_exit(pp); 19230Sstevel@tonic-gate pp = NULL; 19240Sstevel@tonic-gate } else { 19253446Smrj ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST); 19260Sstevel@tonic-gate } 19270Sstevel@tonic-gate } 19280Sstevel@tonic-gate 19290Sstevel@tonic-gate /* 19300Sstevel@tonic-gate * If the old mapping wasn't valid, there's nothing more to do 19310Sstevel@tonic-gate */ 19320Sstevel@tonic-gate if (!PTE_ISVALID(old_pte)) { 19330Sstevel@tonic-gate if (pp != NULL) 19340Sstevel@tonic-gate x86_hm_exit(pp); 19350Sstevel@tonic-gate return; 19360Sstevel@tonic-gate } 19370Sstevel@tonic-gate 19380Sstevel@tonic-gate /* 19390Sstevel@tonic-gate * Take care of syncing any MOD/REF bits and removing the hment. 19400Sstevel@tonic-gate */ 19410Sstevel@tonic-gate if (pp != NULL) { 19420Sstevel@tonic-gate if (!(flags & HAT_UNLOAD_NOSYNC)) 19430Sstevel@tonic-gate hati_sync_pte_to_page(pp, old_pte, l); 19440Sstevel@tonic-gate hm = hment_remove(pp, ht, entry); 19450Sstevel@tonic-gate x86_hm_exit(pp); 19460Sstevel@tonic-gate if (hm != NULL) 19470Sstevel@tonic-gate hment_free(hm); 19480Sstevel@tonic-gate } 19490Sstevel@tonic-gate 19500Sstevel@tonic-gate /* 19510Sstevel@tonic-gate * Handle book keeping in the htable and hat 19520Sstevel@tonic-gate */ 19530Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0); 19540Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt); 19550Sstevel@tonic-gate PGCNT_DEC(hat, l); 19560Sstevel@tonic-gate } 19570Sstevel@tonic-gate 19580Sstevel@tonic-gate /* 19590Sstevel@tonic-gate * very cheap unload implementation to special case some kernel addresses 19600Sstevel@tonic-gate */ 19610Sstevel@tonic-gate static void 19620Sstevel@tonic-gate hat_kmap_unload(caddr_t addr, size_t len, uint_t flags) 19630Sstevel@tonic-gate { 19640Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 19650Sstevel@tonic-gate uintptr_t eva = va + len; 19663446Smrj pgcnt_t pg_index; 19670Sstevel@tonic-gate htable_t *ht; 19680Sstevel@tonic-gate uint_t entry; 19693446Smrj x86pte_t *pte_ptr; 19700Sstevel@tonic-gate x86pte_t old_pte; 19710Sstevel@tonic-gate 19720Sstevel@tonic-gate for (; va < eva; va += MMU_PAGESIZE) { 19730Sstevel@tonic-gate /* 19740Sstevel@tonic-gate * Get the PTE 19750Sstevel@tonic-gate */ 19763446Smrj pg_index = mmu_btop(va - mmu.kmap_addr); 19773446Smrj pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index); 19783446Smrj old_pte = GET_PTE(pte_ptr); 19790Sstevel@tonic-gate 19800Sstevel@tonic-gate /* 19810Sstevel@tonic-gate * get the htable / entry 19820Sstevel@tonic-gate */ 19830Sstevel@tonic-gate ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) 19840Sstevel@tonic-gate >> LEVEL_SHIFT(1)]; 19850Sstevel@tonic-gate entry = htable_va2entry(va, ht); 19860Sstevel@tonic-gate 19870Sstevel@tonic-gate /* 19880Sstevel@tonic-gate * use mostly common code to unmap it. 19890Sstevel@tonic-gate */ 19900Sstevel@tonic-gate hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr); 19910Sstevel@tonic-gate } 19920Sstevel@tonic-gate } 19930Sstevel@tonic-gate 19940Sstevel@tonic-gate 19950Sstevel@tonic-gate /* 19960Sstevel@tonic-gate * unload a range of virtual address space (no callback) 19970Sstevel@tonic-gate */ 19980Sstevel@tonic-gate void 19990Sstevel@tonic-gate hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 20000Sstevel@tonic-gate { 20010Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 20023446Smrj 20033446Smrj ASSERT(hat == kas.a_hat || va + len <= _userlimit); 20040Sstevel@tonic-gate 20050Sstevel@tonic-gate /* 20060Sstevel@tonic-gate * special case for performance. 20070Sstevel@tonic-gate */ 20080Sstevel@tonic-gate if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { 20090Sstevel@tonic-gate ASSERT(hat == kas.a_hat); 20100Sstevel@tonic-gate hat_kmap_unload(addr, len, flags); 20113446Smrj } else { 20123446Smrj hat_unload_callback(hat, addr, len, flags, NULL); 20130Sstevel@tonic-gate } 20140Sstevel@tonic-gate } 20150Sstevel@tonic-gate 20160Sstevel@tonic-gate /* 20170Sstevel@tonic-gate * Do the callbacks for ranges being unloaded. 20180Sstevel@tonic-gate */ 20190Sstevel@tonic-gate typedef struct range_info { 20200Sstevel@tonic-gate uintptr_t rng_va; 20210Sstevel@tonic-gate ulong_t rng_cnt; 20220Sstevel@tonic-gate level_t rng_level; 20230Sstevel@tonic-gate } range_info_t; 20240Sstevel@tonic-gate 20250Sstevel@tonic-gate static void 20260Sstevel@tonic-gate handle_ranges(hat_callback_t *cb, uint_t cnt, range_info_t *range) 20270Sstevel@tonic-gate { 20280Sstevel@tonic-gate /* 20290Sstevel@tonic-gate * do callbacks to upper level VM system 20300Sstevel@tonic-gate */ 20310Sstevel@tonic-gate while (cb != NULL && cnt > 0) { 20320Sstevel@tonic-gate --cnt; 20330Sstevel@tonic-gate cb->hcb_start_addr = (caddr_t)range[cnt].rng_va; 20340Sstevel@tonic-gate cb->hcb_end_addr = cb->hcb_start_addr; 20350Sstevel@tonic-gate cb->hcb_end_addr += 20360Sstevel@tonic-gate range[cnt].rng_cnt << LEVEL_SIZE(range[cnt].rng_level); 20370Sstevel@tonic-gate cb->hcb_function(cb); 20380Sstevel@tonic-gate } 20390Sstevel@tonic-gate } 20400Sstevel@tonic-gate 20410Sstevel@tonic-gate /* 20420Sstevel@tonic-gate * Unload a given range of addresses (has optional callback) 20430Sstevel@tonic-gate * 20440Sstevel@tonic-gate * Flags: 20450Sstevel@tonic-gate * define HAT_UNLOAD 0x00 20460Sstevel@tonic-gate * define HAT_UNLOAD_NOSYNC 0x02 20470Sstevel@tonic-gate * define HAT_UNLOAD_UNLOCK 0x04 20480Sstevel@tonic-gate * define HAT_UNLOAD_OTHER 0x08 - not used 20490Sstevel@tonic-gate * define HAT_UNLOAD_UNMAP 0x10 - same as HAT_UNLOAD 20500Sstevel@tonic-gate */ 20510Sstevel@tonic-gate #define MAX_UNLOAD_CNT (8) 20520Sstevel@tonic-gate void 20530Sstevel@tonic-gate hat_unload_callback( 20540Sstevel@tonic-gate hat_t *hat, 20550Sstevel@tonic-gate caddr_t addr, 20560Sstevel@tonic-gate size_t len, 20570Sstevel@tonic-gate uint_t flags, 20580Sstevel@tonic-gate hat_callback_t *cb) 20590Sstevel@tonic-gate { 20600Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr; 20610Sstevel@tonic-gate uintptr_t eaddr = vaddr + len; 20620Sstevel@tonic-gate htable_t *ht = NULL; 20630Sstevel@tonic-gate uint_t entry; 206447Sjosephb uintptr_t contig_va = (uintptr_t)-1L; 20650Sstevel@tonic-gate range_info_t r[MAX_UNLOAD_CNT]; 20660Sstevel@tonic-gate uint_t r_cnt = 0; 20670Sstevel@tonic-gate x86pte_t old_pte; 20680Sstevel@tonic-gate 20690Sstevel@tonic-gate HATIN(hat_unload_callback, hat, addr, len); 20703446Smrj ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 20710Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 20720Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 20730Sstevel@tonic-gate 20743446Smrj /* 20753446Smrj * Special case a single page being unloaded for speed. This happens 20763446Smrj * quite frequently, COW faults after a fork() for example. 20773446Smrj */ 20783446Smrj if (cb == NULL && len == MMU_PAGESIZE) { 20793446Smrj ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0); 20803446Smrj if (ht != NULL) { 20813446Smrj if (PTE_ISVALID(old_pte)) 20823446Smrj hat_pte_unmap(ht, entry, flags, old_pte, NULL); 20833446Smrj htable_release(ht); 20843446Smrj } 20853446Smrj return; 20863446Smrj } 20873446Smrj 20880Sstevel@tonic-gate while (vaddr < eaddr) { 20890Sstevel@tonic-gate old_pte = htable_walk(hat, &ht, &vaddr, eaddr); 20900Sstevel@tonic-gate if (ht == NULL) 20910Sstevel@tonic-gate break; 20920Sstevel@tonic-gate 20930Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr)); 20940Sstevel@tonic-gate 20950Sstevel@tonic-gate if (vaddr < (uintptr_t)addr) 20960Sstevel@tonic-gate panic("hat_unload_callback(): unmap inside large page"); 20970Sstevel@tonic-gate 20980Sstevel@tonic-gate /* 20990Sstevel@tonic-gate * We'll do the call backs for contiguous ranges 21000Sstevel@tonic-gate */ 210147Sjosephb if (vaddr != contig_va || 21020Sstevel@tonic-gate (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) { 21030Sstevel@tonic-gate if (r_cnt == MAX_UNLOAD_CNT) { 21040Sstevel@tonic-gate handle_ranges(cb, r_cnt, r); 21050Sstevel@tonic-gate r_cnt = 0; 21060Sstevel@tonic-gate } 21070Sstevel@tonic-gate r[r_cnt].rng_va = vaddr; 21080Sstevel@tonic-gate r[r_cnt].rng_cnt = 0; 21090Sstevel@tonic-gate r[r_cnt].rng_level = ht->ht_level; 21100Sstevel@tonic-gate ++r_cnt; 21110Sstevel@tonic-gate } 21120Sstevel@tonic-gate 21130Sstevel@tonic-gate /* 21140Sstevel@tonic-gate * Unload one mapping from the page tables. 21150Sstevel@tonic-gate */ 21160Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht); 21170Sstevel@tonic-gate hat_pte_unmap(ht, entry, flags, old_pte, NULL); 21180Sstevel@tonic-gate ASSERT(ht->ht_level <= mmu.max_page_level); 21190Sstevel@tonic-gate vaddr += LEVEL_SIZE(ht->ht_level); 212047Sjosephb contig_va = vaddr; 21210Sstevel@tonic-gate ++r[r_cnt - 1].rng_cnt; 21220Sstevel@tonic-gate } 21230Sstevel@tonic-gate if (ht) 21240Sstevel@tonic-gate htable_release(ht); 21250Sstevel@tonic-gate 21260Sstevel@tonic-gate /* 21270Sstevel@tonic-gate * handle last range for callbacks 21280Sstevel@tonic-gate */ 21290Sstevel@tonic-gate if (r_cnt > 0) 21300Sstevel@tonic-gate handle_ranges(cb, r_cnt, r); 21310Sstevel@tonic-gate 21320Sstevel@tonic-gate HATOUT(hat_unload_callback, hat, addr); 21330Sstevel@tonic-gate } 21340Sstevel@tonic-gate 21350Sstevel@tonic-gate /* 21360Sstevel@tonic-gate * synchronize mapping with software data structures 21370Sstevel@tonic-gate * 21380Sstevel@tonic-gate * This interface is currently only used by the working set monitor 21390Sstevel@tonic-gate * driver. 21400Sstevel@tonic-gate */ 21410Sstevel@tonic-gate /*ARGSUSED*/ 21420Sstevel@tonic-gate void 21430Sstevel@tonic-gate hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 21440Sstevel@tonic-gate { 21450Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr; 21460Sstevel@tonic-gate uintptr_t eaddr = vaddr + len; 21470Sstevel@tonic-gate htable_t *ht = NULL; 21480Sstevel@tonic-gate uint_t entry; 21490Sstevel@tonic-gate x86pte_t pte; 21500Sstevel@tonic-gate x86pte_t save_pte; 21510Sstevel@tonic-gate x86pte_t new; 21520Sstevel@tonic-gate page_t *pp; 21530Sstevel@tonic-gate 21540Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr)); 21550Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 21560Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 21573446Smrj ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 21580Sstevel@tonic-gate 21590Sstevel@tonic-gate for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { 21600Sstevel@tonic-gate try_again: 21610Sstevel@tonic-gate pte = htable_walk(hat, &ht, &vaddr, eaddr); 21620Sstevel@tonic-gate if (ht == NULL) 21630Sstevel@tonic-gate break; 21640Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht); 21650Sstevel@tonic-gate 21663446Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC || 21670Sstevel@tonic-gate PTE_GET(pte, PT_REF | PT_MOD) == 0) 21680Sstevel@tonic-gate continue; 21690Sstevel@tonic-gate 21700Sstevel@tonic-gate /* 21710Sstevel@tonic-gate * We need to acquire the mapping list lock to protect 21720Sstevel@tonic-gate * against hat_pageunload(), hat_unload(), etc. 21730Sstevel@tonic-gate */ 21740Sstevel@tonic-gate pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level)); 21750Sstevel@tonic-gate if (pp == NULL) 21760Sstevel@tonic-gate break; 21770Sstevel@tonic-gate x86_hm_enter(pp); 21780Sstevel@tonic-gate save_pte = pte; 21790Sstevel@tonic-gate pte = x86pte_get(ht, entry); 21800Sstevel@tonic-gate if (pte != save_pte) { 21810Sstevel@tonic-gate x86_hm_exit(pp); 21820Sstevel@tonic-gate goto try_again; 21830Sstevel@tonic-gate } 21843446Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC || 21850Sstevel@tonic-gate PTE_GET(pte, PT_REF | PT_MOD) == 0) { 21860Sstevel@tonic-gate x86_hm_exit(pp); 21870Sstevel@tonic-gate continue; 21880Sstevel@tonic-gate } 21890Sstevel@tonic-gate 21900Sstevel@tonic-gate /* 21910Sstevel@tonic-gate * Need to clear ref or mod bits. We may compete with 21920Sstevel@tonic-gate * hardware updating the R/M bits and have to try again. 21930Sstevel@tonic-gate */ 21940Sstevel@tonic-gate if (flags == HAT_SYNC_ZERORM) { 21950Sstevel@tonic-gate new = pte; 21960Sstevel@tonic-gate PTE_CLR(new, PT_REF | PT_MOD); 21970Sstevel@tonic-gate pte = hati_update_pte(ht, entry, pte, new); 21980Sstevel@tonic-gate if (pte != 0) { 21990Sstevel@tonic-gate x86_hm_exit(pp); 22000Sstevel@tonic-gate goto try_again; 22010Sstevel@tonic-gate } 22020Sstevel@tonic-gate } else { 22030Sstevel@tonic-gate /* 22040Sstevel@tonic-gate * sync the PTE to the page_t 22050Sstevel@tonic-gate */ 22060Sstevel@tonic-gate hati_sync_pte_to_page(pp, save_pte, ht->ht_level); 22070Sstevel@tonic-gate } 22080Sstevel@tonic-gate x86_hm_exit(pp); 22090Sstevel@tonic-gate } 22100Sstevel@tonic-gate if (ht) 22110Sstevel@tonic-gate htable_release(ht); 22120Sstevel@tonic-gate } 22130Sstevel@tonic-gate 22140Sstevel@tonic-gate /* 22150Sstevel@tonic-gate * void hat_map(hat, addr, len, flags) 22160Sstevel@tonic-gate */ 22170Sstevel@tonic-gate /*ARGSUSED*/ 22180Sstevel@tonic-gate void 22190Sstevel@tonic-gate hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 22200Sstevel@tonic-gate { 22210Sstevel@tonic-gate /* does nothing */ 22220Sstevel@tonic-gate } 22230Sstevel@tonic-gate 22240Sstevel@tonic-gate /* 22250Sstevel@tonic-gate * uint_t hat_getattr(hat, addr, *attr) 22260Sstevel@tonic-gate * returns attr for <hat,addr> in *attr. returns 0 if there was a 22270Sstevel@tonic-gate * mapping and *attr is valid, nonzero if there was no mapping and 22280Sstevel@tonic-gate * *attr is not valid. 22290Sstevel@tonic-gate */ 22300Sstevel@tonic-gate uint_t 22310Sstevel@tonic-gate hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr) 22320Sstevel@tonic-gate { 22330Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr); 22340Sstevel@tonic-gate htable_t *ht = NULL; 22350Sstevel@tonic-gate x86pte_t pte; 22360Sstevel@tonic-gate 22373446Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 22380Sstevel@tonic-gate 22390Sstevel@tonic-gate if (IN_VA_HOLE(vaddr)) 22400Sstevel@tonic-gate return ((uint_t)-1); 22410Sstevel@tonic-gate 22423446Smrj ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level); 22430Sstevel@tonic-gate if (ht == NULL) 22440Sstevel@tonic-gate return ((uint_t)-1); 22450Sstevel@tonic-gate 22460Sstevel@tonic-gate if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) { 22470Sstevel@tonic-gate htable_release(ht); 22480Sstevel@tonic-gate return ((uint_t)-1); 22490Sstevel@tonic-gate } 22500Sstevel@tonic-gate 22510Sstevel@tonic-gate *attr = PROT_READ; 22520Sstevel@tonic-gate if (PTE_GET(pte, PT_WRITABLE)) 22530Sstevel@tonic-gate *attr |= PROT_WRITE; 22540Sstevel@tonic-gate if (PTE_GET(pte, PT_USER)) 22550Sstevel@tonic-gate *attr |= PROT_USER; 22560Sstevel@tonic-gate if (!PTE_GET(pte, mmu.pt_nx)) 22570Sstevel@tonic-gate *attr |= PROT_EXEC; 22583446Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC) 22590Sstevel@tonic-gate *attr |= HAT_NOSYNC; 22600Sstevel@tonic-gate htable_release(ht); 22610Sstevel@tonic-gate return (0); 22620Sstevel@tonic-gate } 22630Sstevel@tonic-gate 22640Sstevel@tonic-gate /* 22650Sstevel@tonic-gate * hat_updateattr() applies the given attribute change to an existing mapping 22660Sstevel@tonic-gate */ 22670Sstevel@tonic-gate #define HAT_LOAD_ATTR 1 22680Sstevel@tonic-gate #define HAT_SET_ATTR 2 22690Sstevel@tonic-gate #define HAT_CLR_ATTR 3 22700Sstevel@tonic-gate 22710Sstevel@tonic-gate static void 22720Sstevel@tonic-gate hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what) 22730Sstevel@tonic-gate { 22740Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr; 22750Sstevel@tonic-gate uintptr_t eaddr = (uintptr_t)addr + len; 22760Sstevel@tonic-gate htable_t *ht = NULL; 22770Sstevel@tonic-gate uint_t entry; 22780Sstevel@tonic-gate x86pte_t oldpte, newpte; 22790Sstevel@tonic-gate page_t *pp; 22800Sstevel@tonic-gate 22810Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 22820Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 22830Sstevel@tonic-gate ASSERT(hat == kas.a_hat || 22840Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 22850Sstevel@tonic-gate for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { 22860Sstevel@tonic-gate try_again: 22870Sstevel@tonic-gate oldpte = htable_walk(hat, &ht, &vaddr, eaddr); 22880Sstevel@tonic-gate if (ht == NULL) 22890Sstevel@tonic-gate break; 22903446Smrj if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST) 22910Sstevel@tonic-gate continue; 22920Sstevel@tonic-gate 22930Sstevel@tonic-gate pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level)); 22940Sstevel@tonic-gate if (pp == NULL) 22950Sstevel@tonic-gate continue; 22960Sstevel@tonic-gate x86_hm_enter(pp); 22970Sstevel@tonic-gate 22980Sstevel@tonic-gate newpte = oldpte; 22990Sstevel@tonic-gate /* 23000Sstevel@tonic-gate * We found a page table entry in the desired range, 23010Sstevel@tonic-gate * figure out the new attributes. 23020Sstevel@tonic-gate */ 23030Sstevel@tonic-gate if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) { 23040Sstevel@tonic-gate if ((attr & PROT_WRITE) && 23050Sstevel@tonic-gate !PTE_GET(oldpte, PT_WRITABLE)) 23060Sstevel@tonic-gate newpte |= PT_WRITABLE; 23070Sstevel@tonic-gate 23083446Smrj if ((attr & HAT_NOSYNC) && 23093446Smrj PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC) 23100Sstevel@tonic-gate newpte |= PT_NOSYNC; 23110Sstevel@tonic-gate 23120Sstevel@tonic-gate if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx)) 23130Sstevel@tonic-gate newpte &= ~mmu.pt_nx; 23140Sstevel@tonic-gate } 23150Sstevel@tonic-gate 23160Sstevel@tonic-gate if (what == HAT_LOAD_ATTR) { 23170Sstevel@tonic-gate if (!(attr & PROT_WRITE) && 23180Sstevel@tonic-gate PTE_GET(oldpte, PT_WRITABLE)) 23190Sstevel@tonic-gate newpte &= ~PT_WRITABLE; 23200Sstevel@tonic-gate 23213446Smrj if (!(attr & HAT_NOSYNC) && 23223446Smrj PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC) 23233446Smrj newpte &= ~PT_SOFTWARE; 23240Sstevel@tonic-gate 23250Sstevel@tonic-gate if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) 23260Sstevel@tonic-gate newpte |= mmu.pt_nx; 23270Sstevel@tonic-gate } 23280Sstevel@tonic-gate 23290Sstevel@tonic-gate if (what == HAT_CLR_ATTR) { 23300Sstevel@tonic-gate if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE)) 23310Sstevel@tonic-gate newpte &= ~PT_WRITABLE; 23320Sstevel@tonic-gate 23333446Smrj if ((attr & HAT_NOSYNC) && 23343446Smrj PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC) 23353446Smrj newpte &= ~PT_SOFTWARE; 23360Sstevel@tonic-gate 23370Sstevel@tonic-gate if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) 23380Sstevel@tonic-gate newpte |= mmu.pt_nx; 23390Sstevel@tonic-gate } 23400Sstevel@tonic-gate 23410Sstevel@tonic-gate /* 23423446Smrj * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set. 23433446Smrj * x86pte_set() depends on this. 23443446Smrj */ 23453446Smrj if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC) 23463446Smrj newpte |= PT_REF | PT_MOD; 23473446Smrj 23483446Smrj /* 23490Sstevel@tonic-gate * what about PROT_READ or others? this code only handles: 23500Sstevel@tonic-gate * EXEC, WRITE, NOSYNC 23510Sstevel@tonic-gate */ 23520Sstevel@tonic-gate 23530Sstevel@tonic-gate /* 23540Sstevel@tonic-gate * If new PTE really changed, update the table. 23550Sstevel@tonic-gate */ 23560Sstevel@tonic-gate if (newpte != oldpte) { 23570Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht); 23580Sstevel@tonic-gate oldpte = hati_update_pte(ht, entry, oldpte, newpte); 23590Sstevel@tonic-gate if (oldpte != 0) { 23600Sstevel@tonic-gate x86_hm_exit(pp); 23610Sstevel@tonic-gate goto try_again; 23620Sstevel@tonic-gate } 23630Sstevel@tonic-gate } 23640Sstevel@tonic-gate x86_hm_exit(pp); 23650Sstevel@tonic-gate } 23660Sstevel@tonic-gate if (ht) 23670Sstevel@tonic-gate htable_release(ht); 23680Sstevel@tonic-gate } 23690Sstevel@tonic-gate 23700Sstevel@tonic-gate /* 23710Sstevel@tonic-gate * Various wrappers for hat_updateattr() 23720Sstevel@tonic-gate */ 23730Sstevel@tonic-gate void 23740Sstevel@tonic-gate hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 23750Sstevel@tonic-gate { 23763446Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 23770Sstevel@tonic-gate hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR); 23780Sstevel@tonic-gate } 23790Sstevel@tonic-gate 23800Sstevel@tonic-gate void 23810Sstevel@tonic-gate hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 23820Sstevel@tonic-gate { 23833446Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 23840Sstevel@tonic-gate hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR); 23850Sstevel@tonic-gate } 23860Sstevel@tonic-gate 23870Sstevel@tonic-gate void 23880Sstevel@tonic-gate hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 23890Sstevel@tonic-gate { 23903446Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 23910Sstevel@tonic-gate hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR); 23920Sstevel@tonic-gate } 23930Sstevel@tonic-gate 23940Sstevel@tonic-gate void 23950Sstevel@tonic-gate hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot) 23960Sstevel@tonic-gate { 23973446Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 23980Sstevel@tonic-gate hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR); 23990Sstevel@tonic-gate } 24000Sstevel@tonic-gate 24010Sstevel@tonic-gate /* 24020Sstevel@tonic-gate * size_t hat_getpagesize(hat, addr) 24030Sstevel@tonic-gate * returns pagesize in bytes for <hat, addr>. returns -1 of there is 24040Sstevel@tonic-gate * no mapping. This is an advisory call. 24050Sstevel@tonic-gate */ 24060Sstevel@tonic-gate ssize_t 24070Sstevel@tonic-gate hat_getpagesize(hat_t *hat, caddr_t addr) 24080Sstevel@tonic-gate { 24090Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr); 24100Sstevel@tonic-gate htable_t *ht; 24110Sstevel@tonic-gate size_t pagesize; 24120Sstevel@tonic-gate 24133446Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 24140Sstevel@tonic-gate if (IN_VA_HOLE(vaddr)) 24150Sstevel@tonic-gate return (-1); 24160Sstevel@tonic-gate ht = htable_getpage(hat, vaddr, NULL); 24170Sstevel@tonic-gate if (ht == NULL) 24180Sstevel@tonic-gate return (-1); 24190Sstevel@tonic-gate pagesize = LEVEL_SIZE(ht->ht_level); 24200Sstevel@tonic-gate htable_release(ht); 24210Sstevel@tonic-gate return (pagesize); 24220Sstevel@tonic-gate } 24230Sstevel@tonic-gate 24240Sstevel@tonic-gate 24250Sstevel@tonic-gate 24260Sstevel@tonic-gate /* 24270Sstevel@tonic-gate * pfn_t hat_getpfnum(hat, addr) 24280Sstevel@tonic-gate * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid. 24290Sstevel@tonic-gate */ 24300Sstevel@tonic-gate pfn_t 24310Sstevel@tonic-gate hat_getpfnum(hat_t *hat, caddr_t addr) 24320Sstevel@tonic-gate { 24330Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr); 24340Sstevel@tonic-gate htable_t *ht; 24350Sstevel@tonic-gate uint_t entry; 24360Sstevel@tonic-gate pfn_t pfn = PFN_INVALID; 24370Sstevel@tonic-gate 24383446Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 24390Sstevel@tonic-gate if (khat_running == 0) 24403446Smrj return (PFN_INVALID); 24410Sstevel@tonic-gate 24420Sstevel@tonic-gate if (IN_VA_HOLE(vaddr)) 24430Sstevel@tonic-gate return (PFN_INVALID); 24440Sstevel@tonic-gate 24450Sstevel@tonic-gate /* 24460Sstevel@tonic-gate * A very common use of hat_getpfnum() is from the DDI for kernel pages. 24470Sstevel@tonic-gate * Use the kmap_ptes (which also covers the 32 bit heap) to speed 24480Sstevel@tonic-gate * this up. 24490Sstevel@tonic-gate */ 24500Sstevel@tonic-gate if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { 24510Sstevel@tonic-gate x86pte_t pte; 24523446Smrj pgcnt_t pg_index; 24533446Smrj 24543446Smrj pg_index = mmu_btop(vaddr - mmu.kmap_addr); 24553446Smrj pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index)); 24560Sstevel@tonic-gate if (!PTE_ISVALID(pte)) 24570Sstevel@tonic-gate return (PFN_INVALID); 24580Sstevel@tonic-gate /*LINTED [use of constant 0 causes a silly lint warning] */ 24590Sstevel@tonic-gate return (PTE2PFN(pte, 0)); 24600Sstevel@tonic-gate } 24610Sstevel@tonic-gate 24620Sstevel@tonic-gate ht = htable_getpage(hat, vaddr, &entry); 24630Sstevel@tonic-gate if (ht == NULL) 24640Sstevel@tonic-gate return (PFN_INVALID); 24650Sstevel@tonic-gate ASSERT(vaddr >= ht->ht_vaddr); 24660Sstevel@tonic-gate ASSERT(vaddr <= HTABLE_LAST_PAGE(ht)); 24670Sstevel@tonic-gate pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level); 24680Sstevel@tonic-gate if (ht->ht_level > 0) 24690Sstevel@tonic-gate pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level)); 24700Sstevel@tonic-gate htable_release(ht); 24710Sstevel@tonic-gate return (pfn); 24720Sstevel@tonic-gate } 24730Sstevel@tonic-gate 24740Sstevel@tonic-gate /* 24750Sstevel@tonic-gate * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged. 24760Sstevel@tonic-gate * Use hat_getpfnum(kas.a_hat, ...) instead. 24770Sstevel@tonic-gate * 24780Sstevel@tonic-gate * We'd like to return PFN_INVALID if the mappings have underlying page_t's 24790Sstevel@tonic-gate * but can't right now due to the fact that some software has grown to use 24800Sstevel@tonic-gate * this interface incorrectly. So for now when the interface is misused, 24810Sstevel@tonic-gate * return a warning to the user that in the future it won't work in the 24820Sstevel@tonic-gate * way they're abusing it, and carry on. 24830Sstevel@tonic-gate * 24840Sstevel@tonic-gate * Note that hat_getkpfnum() is never supported on amd64. 24850Sstevel@tonic-gate */ 24860Sstevel@tonic-gate #if !defined(__amd64) 24870Sstevel@tonic-gate pfn_t 24880Sstevel@tonic-gate hat_getkpfnum(caddr_t addr) 24890Sstevel@tonic-gate { 24900Sstevel@tonic-gate pfn_t pfn; 24910Sstevel@tonic-gate int badcaller = 0; 24920Sstevel@tonic-gate 24930Sstevel@tonic-gate if (khat_running == 0) 24940Sstevel@tonic-gate panic("hat_getkpfnum(): called too early\n"); 24950Sstevel@tonic-gate if ((uintptr_t)addr < kernelbase) 24960Sstevel@tonic-gate return (PFN_INVALID); 24970Sstevel@tonic-gate 24980Sstevel@tonic-gate 24990Sstevel@tonic-gate if (segkpm && IS_KPM_ADDR(addr)) { 25000Sstevel@tonic-gate badcaller = 1; 25010Sstevel@tonic-gate pfn = hat_kpm_va2pfn(addr); 25020Sstevel@tonic-gate } else { 25030Sstevel@tonic-gate pfn = hat_getpfnum(kas.a_hat, addr); 25040Sstevel@tonic-gate badcaller = pf_is_memory(pfn); 25050Sstevel@tonic-gate } 25060Sstevel@tonic-gate 25070Sstevel@tonic-gate if (badcaller) 25080Sstevel@tonic-gate hat_getkpfnum_badcall(caller()); 25090Sstevel@tonic-gate return (pfn); 25100Sstevel@tonic-gate } 25110Sstevel@tonic-gate #endif /* __amd64 */ 25120Sstevel@tonic-gate 25130Sstevel@tonic-gate /* 25140Sstevel@tonic-gate * int hat_probe(hat, addr) 25150Sstevel@tonic-gate * return 0 if no valid mapping is present. Faster version 25160Sstevel@tonic-gate * of hat_getattr in certain architectures. 25170Sstevel@tonic-gate */ 25180Sstevel@tonic-gate int 25190Sstevel@tonic-gate hat_probe(hat_t *hat, caddr_t addr) 25200Sstevel@tonic-gate { 25210Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr); 25220Sstevel@tonic-gate uint_t entry; 25230Sstevel@tonic-gate htable_t *ht; 25240Sstevel@tonic-gate pgcnt_t pg_off; 25250Sstevel@tonic-gate 25263446Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 25270Sstevel@tonic-gate ASSERT(hat == kas.a_hat || 25280Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 25290Sstevel@tonic-gate if (IN_VA_HOLE(vaddr)) 25300Sstevel@tonic-gate return (0); 25310Sstevel@tonic-gate 25320Sstevel@tonic-gate /* 25330Sstevel@tonic-gate * Most common use of hat_probe is from segmap. We special case it 25340Sstevel@tonic-gate * for performance. 25350Sstevel@tonic-gate */ 25360Sstevel@tonic-gate if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { 25370Sstevel@tonic-gate pg_off = mmu_btop(vaddr - mmu.kmap_addr); 25380Sstevel@tonic-gate if (mmu.pae_hat) 25390Sstevel@tonic-gate return (PTE_ISVALID(mmu.kmap_ptes[pg_off])); 25400Sstevel@tonic-gate else 25410Sstevel@tonic-gate return (PTE_ISVALID( 25420Sstevel@tonic-gate ((x86pte32_t *)mmu.kmap_ptes)[pg_off])); 25430Sstevel@tonic-gate } 25440Sstevel@tonic-gate 25450Sstevel@tonic-gate ht = htable_getpage(hat, vaddr, &entry); 25460Sstevel@tonic-gate if (ht == NULL) 25470Sstevel@tonic-gate return (0); 25480Sstevel@tonic-gate htable_release(ht); 25490Sstevel@tonic-gate return (1); 25500Sstevel@tonic-gate } 25510Sstevel@tonic-gate 25520Sstevel@tonic-gate /* 25530Sstevel@tonic-gate * Simple implementation of ISM. hat_share() is just like hat_memload_array(), 25540Sstevel@tonic-gate * except that we use the ism_hat's existing mappings to determine the pages 25550Sstevel@tonic-gate * and protections to use for this hat. In case we find a properly aligned 25560Sstevel@tonic-gate * and sized pagetable of 4K mappings, we will attempt to share the pagetable 25570Sstevel@tonic-gate * itself. 25580Sstevel@tonic-gate */ 25590Sstevel@tonic-gate /*ARGSUSED*/ 25600Sstevel@tonic-gate int 25610Sstevel@tonic-gate hat_share( 25620Sstevel@tonic-gate hat_t *hat, 25630Sstevel@tonic-gate caddr_t addr, 25640Sstevel@tonic-gate hat_t *ism_hat, 25650Sstevel@tonic-gate caddr_t src_addr, 25660Sstevel@tonic-gate size_t len, /* almost useless value, see below.. */ 25670Sstevel@tonic-gate uint_t ismszc) 25680Sstevel@tonic-gate { 25690Sstevel@tonic-gate uintptr_t vaddr_start = (uintptr_t)addr; 25700Sstevel@tonic-gate uintptr_t vaddr; 25710Sstevel@tonic-gate uintptr_t pt_vaddr; 25720Sstevel@tonic-gate uintptr_t eaddr = vaddr_start + len; 25730Sstevel@tonic-gate uintptr_t ism_addr_start = (uintptr_t)src_addr; 25740Sstevel@tonic-gate uintptr_t ism_addr = ism_addr_start; 25750Sstevel@tonic-gate uintptr_t e_ism_addr = ism_addr + len; 25760Sstevel@tonic-gate htable_t *ism_ht = NULL; 25770Sstevel@tonic-gate htable_t *ht; 25780Sstevel@tonic-gate x86pte_t pte; 25790Sstevel@tonic-gate page_t *pp; 25800Sstevel@tonic-gate pfn_t pfn; 25810Sstevel@tonic-gate level_t l; 25820Sstevel@tonic-gate pgcnt_t pgcnt; 25830Sstevel@tonic-gate uint_t prot; 25840Sstevel@tonic-gate uint_t valid_cnt; 25850Sstevel@tonic-gate 25860Sstevel@tonic-gate /* 25870Sstevel@tonic-gate * We might be asked to share an empty DISM hat by as_dup() 25880Sstevel@tonic-gate */ 25890Sstevel@tonic-gate ASSERT(hat != kas.a_hat); 25903446Smrj ASSERT(eaddr <= _userlimit); 25910Sstevel@tonic-gate if (!(ism_hat->hat_flags & HAT_SHARED)) { 25920Sstevel@tonic-gate ASSERT(hat_get_mapped_size(ism_hat) == 0); 25930Sstevel@tonic-gate return (0); 25940Sstevel@tonic-gate } 25950Sstevel@tonic-gate 25960Sstevel@tonic-gate /* 25970Sstevel@tonic-gate * The SPT segment driver often passes us a size larger than there are 25980Sstevel@tonic-gate * valid mappings. That's because it rounds the segment size up to a 25990Sstevel@tonic-gate * large pagesize, even if the actual memory mapped by ism_hat is less. 26000Sstevel@tonic-gate */ 26010Sstevel@tonic-gate HATIN(hat_share, hat, addr, len); 26020Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr_start)); 26030Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(ism_addr_start)); 26040Sstevel@tonic-gate ASSERT(ism_hat->hat_flags & HAT_SHARED); 26050Sstevel@tonic-gate while (ism_addr < e_ism_addr) { 26060Sstevel@tonic-gate /* 26070Sstevel@tonic-gate * use htable_walk to get the next valid ISM mapping 26080Sstevel@tonic-gate */ 26090Sstevel@tonic-gate pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr); 26100Sstevel@tonic-gate if (ism_ht == NULL) 26110Sstevel@tonic-gate break; 26120Sstevel@tonic-gate 26130Sstevel@tonic-gate /* 26140Sstevel@tonic-gate * Find the largest page size we can use, based on the 26150Sstevel@tonic-gate * ISM mapping size, our address alignment and the remaining 26160Sstevel@tonic-gate * map length. 26170Sstevel@tonic-gate */ 26180Sstevel@tonic-gate vaddr = vaddr_start + (ism_addr - ism_addr_start); 26190Sstevel@tonic-gate for (l = ism_ht->ht_level; l > 0; --l) { 26200Sstevel@tonic-gate if (LEVEL_SIZE(l) <= eaddr - vaddr && 26210Sstevel@tonic-gate (vaddr & LEVEL_OFFSET(l)) == 0) 26220Sstevel@tonic-gate break; 26230Sstevel@tonic-gate } 26240Sstevel@tonic-gate 26250Sstevel@tonic-gate /* 26260Sstevel@tonic-gate * attempt to share the pagetable 26270Sstevel@tonic-gate * 26280Sstevel@tonic-gate * - only 4K pagetables are shared (ie. level == 0) 26290Sstevel@tonic-gate * - the hat_share() length must cover the whole pagetable 26300Sstevel@tonic-gate * - the shared address must align at level 1 26310Sstevel@tonic-gate * - a shared PTE for this address already exists OR 26320Sstevel@tonic-gate * - no page table for this address exists yet 26330Sstevel@tonic-gate */ 26340Sstevel@tonic-gate pt_vaddr = 26350Sstevel@tonic-gate vaddr_start + (ism_ht->ht_vaddr - ism_addr_start); 26360Sstevel@tonic-gate if (ism_ht->ht_level == 0 && 26370Sstevel@tonic-gate ism_ht->ht_vaddr + LEVEL_SIZE(1) <= e_ism_addr && 26380Sstevel@tonic-gate (pt_vaddr & LEVEL_OFFSET(1)) == 0) { 26390Sstevel@tonic-gate 26400Sstevel@tonic-gate ht = htable_lookup(hat, pt_vaddr, 0); 26410Sstevel@tonic-gate if (ht == NULL) 26420Sstevel@tonic-gate ht = htable_create(hat, pt_vaddr, 0, ism_ht); 26430Sstevel@tonic-gate 26440Sstevel@tonic-gate if (ht->ht_level > 0 || 26450Sstevel@tonic-gate !(ht->ht_flags & HTABLE_SHARED_PFN)) { 26460Sstevel@tonic-gate 26470Sstevel@tonic-gate htable_release(ht); 26480Sstevel@tonic-gate 26490Sstevel@tonic-gate } else { 26500Sstevel@tonic-gate 26510Sstevel@tonic-gate /* 26520Sstevel@tonic-gate * share the page table 26530Sstevel@tonic-gate */ 26540Sstevel@tonic-gate ASSERT(ht->ht_level == 0); 26550Sstevel@tonic-gate ASSERT(ht->ht_shares == ism_ht); 26560Sstevel@tonic-gate valid_cnt = ism_ht->ht_valid_cnt; 26570Sstevel@tonic-gate atomic_add_long(&hat->hat_pages_mapped[0], 26580Sstevel@tonic-gate valid_cnt - ht->ht_valid_cnt); 26590Sstevel@tonic-gate ht->ht_valid_cnt = valid_cnt; 26600Sstevel@tonic-gate htable_release(ht); 26610Sstevel@tonic-gate ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(1); 26620Sstevel@tonic-gate htable_release(ism_ht); 26630Sstevel@tonic-gate ism_ht = NULL; 26640Sstevel@tonic-gate continue; 26650Sstevel@tonic-gate } 26660Sstevel@tonic-gate } 26670Sstevel@tonic-gate 26680Sstevel@tonic-gate /* 26690Sstevel@tonic-gate * Unable to share the page table. Instead we will 26700Sstevel@tonic-gate * create new mappings from the values in the ISM mappings. 26710Sstevel@tonic-gate * 26720Sstevel@tonic-gate * The ISM mapping might be larger than the share area, 26730Sstevel@tonic-gate * be careful to trunctate it if needed. 26740Sstevel@tonic-gate */ 26750Sstevel@tonic-gate if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) { 26760Sstevel@tonic-gate pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level)); 26770Sstevel@tonic-gate } else { 26780Sstevel@tonic-gate pgcnt = mmu_btop(eaddr - vaddr); 26790Sstevel@tonic-gate l = 0; 26800Sstevel@tonic-gate } 26810Sstevel@tonic-gate 26820Sstevel@tonic-gate pfn = PTE2PFN(pte, ism_ht->ht_level); 26830Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 26840Sstevel@tonic-gate while (pgcnt > 0) { 26850Sstevel@tonic-gate /* 26860Sstevel@tonic-gate * Make a new pte for the PFN for this level. 26870Sstevel@tonic-gate * Copy protections for the pte from the ISM pte. 26880Sstevel@tonic-gate */ 26890Sstevel@tonic-gate pp = page_numtopp_nolock(pfn); 26900Sstevel@tonic-gate ASSERT(pp != NULL); 26910Sstevel@tonic-gate 26920Sstevel@tonic-gate prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK; 26930Sstevel@tonic-gate if (PTE_GET(pte, PT_WRITABLE)) 26940Sstevel@tonic-gate prot |= PROT_WRITE; 26950Sstevel@tonic-gate if (!PTE_GET(pte, PT_NX)) 26960Sstevel@tonic-gate prot |= PROT_EXEC; 26970Sstevel@tonic-gate 26980Sstevel@tonic-gate /* 26990Sstevel@tonic-gate * XX64 -- can shm ever be written to swap? 27000Sstevel@tonic-gate * if not we could use HAT_NOSYNC here. 27010Sstevel@tonic-gate */ 27023446Smrj while (hati_load_common(hat, vaddr, pp, prot, HAT_LOAD, 27033446Smrj l, pfn) != 0) { 27043446Smrj if (l == 0) 27053446Smrj panic("hati_load_common() failure"); 27063446Smrj --l; 27073446Smrj } 27080Sstevel@tonic-gate 27090Sstevel@tonic-gate vaddr += LEVEL_SIZE(l); 27100Sstevel@tonic-gate ism_addr += LEVEL_SIZE(l); 27110Sstevel@tonic-gate pfn += mmu_btop(LEVEL_SIZE(l)); 27120Sstevel@tonic-gate pgcnt -= mmu_btop(LEVEL_SIZE(l)); 27130Sstevel@tonic-gate } 27140Sstevel@tonic-gate } 27150Sstevel@tonic-gate if (ism_ht != NULL) 27160Sstevel@tonic-gate htable_release(ism_ht); 27170Sstevel@tonic-gate 27180Sstevel@tonic-gate HATOUT(hat_share, hat, addr); 27190Sstevel@tonic-gate return (0); 27200Sstevel@tonic-gate } 27210Sstevel@tonic-gate 27220Sstevel@tonic-gate 27230Sstevel@tonic-gate /* 27240Sstevel@tonic-gate * hat_unshare() is similar to hat_unload_callback(), but 27250Sstevel@tonic-gate * we have to look for empty shared pagetables. Note that 27260Sstevel@tonic-gate * hat_unshare() is always invoked against an entire segment. 27270Sstevel@tonic-gate */ 27280Sstevel@tonic-gate /*ARGSUSED*/ 27290Sstevel@tonic-gate void 27300Sstevel@tonic-gate hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc) 27310Sstevel@tonic-gate { 27320Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr; 27330Sstevel@tonic-gate uintptr_t eaddr = vaddr + len; 27340Sstevel@tonic-gate htable_t *ht = NULL; 27350Sstevel@tonic-gate uint_t need_demaps = 0; 27360Sstevel@tonic-gate 27370Sstevel@tonic-gate ASSERT(hat != kas.a_hat); 27383446Smrj ASSERT(eaddr <= _userlimit); 27390Sstevel@tonic-gate HATIN(hat_unshare, hat, addr, len); 27400Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 27410Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 27420Sstevel@tonic-gate 27430Sstevel@tonic-gate /* 27440Sstevel@tonic-gate * First go through and remove any shared pagetables. 27450Sstevel@tonic-gate * 27463446Smrj * Note that it's ok to delay the TLB shootdown till the entire range is 27470Sstevel@tonic-gate * finished, because if hat_pageunload() were to unload a shared 27483446Smrj * pagetable page, its hat_tlb_inval() will do a global TLB invalidate. 27490Sstevel@tonic-gate */ 27500Sstevel@tonic-gate while (vaddr < eaddr) { 27510Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr)); 27520Sstevel@tonic-gate /* 27530Sstevel@tonic-gate * find the pagetable that would map the current address 27540Sstevel@tonic-gate */ 27550Sstevel@tonic-gate ht = htable_lookup(hat, vaddr, 0); 27560Sstevel@tonic-gate if (ht != NULL) { 27570Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 27580Sstevel@tonic-gate /* 27590Sstevel@tonic-gate * clear mapped pages count, set valid_cnt to 0 27600Sstevel@tonic-gate * and let htable_release() finish the job 27610Sstevel@tonic-gate */ 27620Sstevel@tonic-gate atomic_add_long(&hat->hat_pages_mapped[0], 27630Sstevel@tonic-gate -ht->ht_valid_cnt); 27640Sstevel@tonic-gate ht->ht_valid_cnt = 0; 27650Sstevel@tonic-gate need_demaps = 1; 27660Sstevel@tonic-gate } 27670Sstevel@tonic-gate htable_release(ht); 27680Sstevel@tonic-gate } 27690Sstevel@tonic-gate vaddr = (vaddr & LEVEL_MASK(1)) + LEVEL_SIZE(1); 27700Sstevel@tonic-gate } 27710Sstevel@tonic-gate 27720Sstevel@tonic-gate /* 27730Sstevel@tonic-gate * flush the TLBs - since we're probably dealing with MANY mappings 27740Sstevel@tonic-gate * we do just one CR3 reload. 27750Sstevel@tonic-gate */ 27760Sstevel@tonic-gate if (!(hat->hat_flags & HAT_FREEING) && need_demaps) 27773446Smrj hat_tlb_inval(hat, DEMAP_ALL_ADDR); 27780Sstevel@tonic-gate 27790Sstevel@tonic-gate /* 27800Sstevel@tonic-gate * Now go back and clean up any unaligned mappings that 27810Sstevel@tonic-gate * couldn't share pagetables. 27820Sstevel@tonic-gate */ 27830Sstevel@tonic-gate hat_unload(hat, addr, len, HAT_UNLOAD_UNMAP); 27840Sstevel@tonic-gate 27850Sstevel@tonic-gate HATOUT(hat_unshare, hat, addr); 27860Sstevel@tonic-gate } 27870Sstevel@tonic-gate 27880Sstevel@tonic-gate 27890Sstevel@tonic-gate /* 27900Sstevel@tonic-gate * hat_reserve() does nothing 27910Sstevel@tonic-gate */ 27920Sstevel@tonic-gate /*ARGSUSED*/ 27930Sstevel@tonic-gate void 27940Sstevel@tonic-gate hat_reserve(struct as *as, caddr_t addr, size_t len) 27950Sstevel@tonic-gate { 27960Sstevel@tonic-gate } 27970Sstevel@tonic-gate 27980Sstevel@tonic-gate 27990Sstevel@tonic-gate /* 28000Sstevel@tonic-gate * Called when all mappings to a page should have write permission removed. 28010Sstevel@tonic-gate * Mostly stolem from hat_pagesync() 28020Sstevel@tonic-gate */ 28030Sstevel@tonic-gate static void 28040Sstevel@tonic-gate hati_page_clrwrt(struct page *pp) 28050Sstevel@tonic-gate { 28060Sstevel@tonic-gate hment_t *hm = NULL; 28070Sstevel@tonic-gate htable_t *ht; 28080Sstevel@tonic-gate uint_t entry; 28090Sstevel@tonic-gate x86pte_t old; 28100Sstevel@tonic-gate x86pte_t new; 28110Sstevel@tonic-gate uint_t pszc = 0; 28120Sstevel@tonic-gate 28130Sstevel@tonic-gate next_size: 28140Sstevel@tonic-gate /* 28150Sstevel@tonic-gate * walk thru the mapping list clearing write permission 28160Sstevel@tonic-gate */ 28170Sstevel@tonic-gate x86_hm_enter(pp); 28180Sstevel@tonic-gate while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) { 28190Sstevel@tonic-gate if (ht->ht_level < pszc) 28200Sstevel@tonic-gate continue; 28210Sstevel@tonic-gate old = x86pte_get(ht, entry); 28220Sstevel@tonic-gate 28230Sstevel@tonic-gate for (;;) { 28240Sstevel@tonic-gate /* 28250Sstevel@tonic-gate * Is this mapping of interest? 28260Sstevel@tonic-gate */ 28270Sstevel@tonic-gate if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum || 28280Sstevel@tonic-gate PTE_GET(old, PT_WRITABLE) == 0) 28290Sstevel@tonic-gate break; 28300Sstevel@tonic-gate 28310Sstevel@tonic-gate /* 28320Sstevel@tonic-gate * Clear ref/mod writable bits. This requires cross 28330Sstevel@tonic-gate * calls to ensure any executing TLBs see cleared bits. 28340Sstevel@tonic-gate */ 28350Sstevel@tonic-gate new = old; 28360Sstevel@tonic-gate PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE); 28370Sstevel@tonic-gate old = hati_update_pte(ht, entry, old, new); 28380Sstevel@tonic-gate if (old != 0) 28390Sstevel@tonic-gate continue; 28400Sstevel@tonic-gate 28410Sstevel@tonic-gate break; 28420Sstevel@tonic-gate } 28430Sstevel@tonic-gate } 28440Sstevel@tonic-gate x86_hm_exit(pp); 28450Sstevel@tonic-gate while (pszc < pp->p_szc) { 28460Sstevel@tonic-gate page_t *tpp; 28470Sstevel@tonic-gate pszc++; 28480Sstevel@tonic-gate tpp = PP_GROUPLEADER(pp, pszc); 28490Sstevel@tonic-gate if (pp != tpp) { 28500Sstevel@tonic-gate pp = tpp; 28510Sstevel@tonic-gate goto next_size; 28520Sstevel@tonic-gate } 28530Sstevel@tonic-gate } 28540Sstevel@tonic-gate } 28550Sstevel@tonic-gate 28560Sstevel@tonic-gate /* 28570Sstevel@tonic-gate * void hat_page_setattr(pp, flag) 28580Sstevel@tonic-gate * void hat_page_clrattr(pp, flag) 28590Sstevel@tonic-gate * used to set/clr ref/mod bits. 28600Sstevel@tonic-gate */ 28610Sstevel@tonic-gate void 28620Sstevel@tonic-gate hat_page_setattr(struct page *pp, uint_t flag) 28630Sstevel@tonic-gate { 28640Sstevel@tonic-gate vnode_t *vp = pp->p_vnode; 28650Sstevel@tonic-gate kmutex_t *vphm = NULL; 28660Sstevel@tonic-gate page_t **listp; 28670Sstevel@tonic-gate 28680Sstevel@tonic-gate if (PP_GETRM(pp, flag) == flag) 28690Sstevel@tonic-gate return; 28700Sstevel@tonic-gate 28710Sstevel@tonic-gate if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 28720Sstevel@tonic-gate vphm = page_vnode_mutex(vp); 28730Sstevel@tonic-gate mutex_enter(vphm); 28740Sstevel@tonic-gate } 28750Sstevel@tonic-gate 28760Sstevel@tonic-gate PP_SETRM(pp, flag); 28770Sstevel@tonic-gate 28780Sstevel@tonic-gate if (vphm != NULL) { 28790Sstevel@tonic-gate 28800Sstevel@tonic-gate /* 28810Sstevel@tonic-gate * Some File Systems examine v_pages for NULL w/o 28820Sstevel@tonic-gate * grabbing the vphm mutex. Must not let it become NULL when 28830Sstevel@tonic-gate * pp is the only page on the list. 28840Sstevel@tonic-gate */ 28850Sstevel@tonic-gate if (pp->p_vpnext != pp) { 28860Sstevel@tonic-gate page_vpsub(&vp->v_pages, pp); 28870Sstevel@tonic-gate if (vp->v_pages != NULL) 28880Sstevel@tonic-gate listp = &vp->v_pages->p_vpprev->p_vpnext; 28890Sstevel@tonic-gate else 28900Sstevel@tonic-gate listp = &vp->v_pages; 28910Sstevel@tonic-gate page_vpadd(listp, pp); 28920Sstevel@tonic-gate } 28930Sstevel@tonic-gate mutex_exit(vphm); 28940Sstevel@tonic-gate } 28950Sstevel@tonic-gate } 28960Sstevel@tonic-gate 28970Sstevel@tonic-gate void 28980Sstevel@tonic-gate hat_page_clrattr(struct page *pp, uint_t flag) 28990Sstevel@tonic-gate { 29000Sstevel@tonic-gate vnode_t *vp = pp->p_vnode; 29010Sstevel@tonic-gate ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 29020Sstevel@tonic-gate 29030Sstevel@tonic-gate /* 29042999Sstans * Caller is expected to hold page's io lock for VMODSORT to work 29052999Sstans * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 29062999Sstans * bit is cleared. 29072999Sstans * We don't have assert to avoid tripping some existing third party 29082999Sstans * code. The dirty page is moved back to top of the v_page list 29092999Sstans * after IO is done in pvn_write_done(). 29100Sstevel@tonic-gate */ 29110Sstevel@tonic-gate PP_CLRRM(pp, flag); 29120Sstevel@tonic-gate 29132999Sstans if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 29140Sstevel@tonic-gate 29150Sstevel@tonic-gate /* 29160Sstevel@tonic-gate * VMODSORT works by removing write permissions and getting 29170Sstevel@tonic-gate * a fault when a page is made dirty. At this point 29180Sstevel@tonic-gate * we need to remove write permission from all mappings 29190Sstevel@tonic-gate * to this page. 29200Sstevel@tonic-gate */ 29210Sstevel@tonic-gate hati_page_clrwrt(pp); 29220Sstevel@tonic-gate } 29230Sstevel@tonic-gate } 29240Sstevel@tonic-gate 29250Sstevel@tonic-gate /* 29260Sstevel@tonic-gate * If flag is specified, returns 0 if attribute is disabled 29270Sstevel@tonic-gate * and non zero if enabled. If flag specifes multiple attributs 29280Sstevel@tonic-gate * then returns 0 if ALL atriibutes are disabled. This is an advisory 29290Sstevel@tonic-gate * call. 29300Sstevel@tonic-gate */ 29310Sstevel@tonic-gate uint_t 29320Sstevel@tonic-gate hat_page_getattr(struct page *pp, uint_t flag) 29330Sstevel@tonic-gate { 29340Sstevel@tonic-gate return (PP_GETRM(pp, flag)); 29350Sstevel@tonic-gate } 29360Sstevel@tonic-gate 29370Sstevel@tonic-gate 29380Sstevel@tonic-gate /* 29390Sstevel@tonic-gate * common code used by hat_pageunload() and hment_steal() 29400Sstevel@tonic-gate */ 29410Sstevel@tonic-gate hment_t * 29420Sstevel@tonic-gate hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry) 29430Sstevel@tonic-gate { 29440Sstevel@tonic-gate x86pte_t old_pte; 29450Sstevel@tonic-gate pfn_t pfn = pp->p_pagenum; 29460Sstevel@tonic-gate hment_t *hm; 29470Sstevel@tonic-gate 29480Sstevel@tonic-gate /* 29490Sstevel@tonic-gate * We need to acquire a hold on the htable in order to 29500Sstevel@tonic-gate * do the invalidate. We know the htable must exist, since 29510Sstevel@tonic-gate * unmap's don't release the htable until after removing any 29520Sstevel@tonic-gate * hment. Having x86_hm_enter() keeps that from proceeding. 29530Sstevel@tonic-gate */ 29540Sstevel@tonic-gate htable_acquire(ht); 29550Sstevel@tonic-gate 29560Sstevel@tonic-gate /* 29570Sstevel@tonic-gate * Invalidate the PTE and remove the hment. 29580Sstevel@tonic-gate */ 29593446Smrj old_pte = x86pte_inval(ht, entry, 0, NULL); 296047Sjosephb if (PTE2PFN(old_pte, ht->ht_level) != pfn) { 29613446Smrj panic("x86pte_inval() failure found PTE = " FMT_PTE 296247Sjosephb " pfn being unmapped is %lx ht=0x%lx entry=0x%x", 296347Sjosephb old_pte, pfn, (uintptr_t)ht, entry); 296447Sjosephb } 29650Sstevel@tonic-gate 29660Sstevel@tonic-gate /* 29670Sstevel@tonic-gate * Clean up all the htable information for this mapping 29680Sstevel@tonic-gate */ 29690Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0); 29700Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt); 29710Sstevel@tonic-gate PGCNT_DEC(ht->ht_hat, ht->ht_level); 29720Sstevel@tonic-gate 29730Sstevel@tonic-gate /* 29740Sstevel@tonic-gate * sync ref/mod bits to the page_t 29750Sstevel@tonic-gate */ 29763446Smrj if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC) 29770Sstevel@tonic-gate hati_sync_pte_to_page(pp, old_pte, ht->ht_level); 29780Sstevel@tonic-gate 29790Sstevel@tonic-gate /* 29800Sstevel@tonic-gate * Remove the mapping list entry for this page. 29810Sstevel@tonic-gate */ 29820Sstevel@tonic-gate hm = hment_remove(pp, ht, entry); 29830Sstevel@tonic-gate 29840Sstevel@tonic-gate /* 29850Sstevel@tonic-gate * drop the mapping list lock so that we might free the 29860Sstevel@tonic-gate * hment and htable. 29870Sstevel@tonic-gate */ 29880Sstevel@tonic-gate x86_hm_exit(pp); 29890Sstevel@tonic-gate htable_release(ht); 29900Sstevel@tonic-gate return (hm); 29910Sstevel@tonic-gate } 29920Sstevel@tonic-gate 29931841Spraks extern int vpm_enable; 29940Sstevel@tonic-gate /* 29950Sstevel@tonic-gate * Unload all translations to a page. If the page is a subpage of a large 29960Sstevel@tonic-gate * page, the large page mappings are also removed. 29970Sstevel@tonic-gate * 29980Sstevel@tonic-gate * The forceflags are unused. 29990Sstevel@tonic-gate */ 30000Sstevel@tonic-gate 30010Sstevel@tonic-gate /*ARGSUSED*/ 30020Sstevel@tonic-gate static int 30030Sstevel@tonic-gate hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag) 30040Sstevel@tonic-gate { 30050Sstevel@tonic-gate page_t *cur_pp = pp; 30060Sstevel@tonic-gate hment_t *hm; 30070Sstevel@tonic-gate hment_t *prev; 30080Sstevel@tonic-gate htable_t *ht; 30090Sstevel@tonic-gate uint_t entry; 30100Sstevel@tonic-gate level_t level; 30110Sstevel@tonic-gate 30121841Spraks #if defined(__amd64) 30131841Spraks /* 30141841Spraks * clear the vpm ref. 30151841Spraks */ 30161841Spraks if (vpm_enable) { 30171841Spraks pp->p_vpmref = 0; 30181841Spraks } 30191841Spraks #endif 30200Sstevel@tonic-gate /* 30210Sstevel@tonic-gate * The loop with next_size handles pages with multiple pagesize mappings 30220Sstevel@tonic-gate */ 30230Sstevel@tonic-gate next_size: 30240Sstevel@tonic-gate for (;;) { 30250Sstevel@tonic-gate 30260Sstevel@tonic-gate /* 30270Sstevel@tonic-gate * Get a mapping list entry 30280Sstevel@tonic-gate */ 30290Sstevel@tonic-gate x86_hm_enter(cur_pp); 30300Sstevel@tonic-gate for (prev = NULL; ; prev = hm) { 30310Sstevel@tonic-gate hm = hment_walk(cur_pp, &ht, &entry, prev); 30320Sstevel@tonic-gate if (hm == NULL) { 30330Sstevel@tonic-gate x86_hm_exit(cur_pp); 30340Sstevel@tonic-gate 30350Sstevel@tonic-gate /* 30360Sstevel@tonic-gate * If not part of a larger page, we're done. 30370Sstevel@tonic-gate */ 30383446Smrj if (cur_pp->p_szc <= pg_szcd) { 30390Sstevel@tonic-gate return (0); 30403446Smrj } 30410Sstevel@tonic-gate 30420Sstevel@tonic-gate /* 30430Sstevel@tonic-gate * Else check the next larger page size. 30440Sstevel@tonic-gate * hat_page_demote() may decrease p_szc 30450Sstevel@tonic-gate * but that's ok we'll just take an extra 30460Sstevel@tonic-gate * trip discover there're no larger mappings 30470Sstevel@tonic-gate * and return. 30480Sstevel@tonic-gate */ 30490Sstevel@tonic-gate ++pg_szcd; 30500Sstevel@tonic-gate cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd); 30510Sstevel@tonic-gate goto next_size; 30520Sstevel@tonic-gate } 30530Sstevel@tonic-gate 30540Sstevel@tonic-gate /* 30550Sstevel@tonic-gate * If this mapping size matches, remove it. 30560Sstevel@tonic-gate */ 30570Sstevel@tonic-gate level = ht->ht_level; 30580Sstevel@tonic-gate if (level == pg_szcd) 30590Sstevel@tonic-gate break; 30600Sstevel@tonic-gate } 30610Sstevel@tonic-gate 30620Sstevel@tonic-gate /* 30630Sstevel@tonic-gate * Remove the mapping list entry for this page. 30640Sstevel@tonic-gate * Note this does the x86_hm_exit() for us. 30650Sstevel@tonic-gate */ 30660Sstevel@tonic-gate hm = hati_page_unmap(cur_pp, ht, entry); 30670Sstevel@tonic-gate if (hm != NULL) 30680Sstevel@tonic-gate hment_free(hm); 30690Sstevel@tonic-gate } 30700Sstevel@tonic-gate } 30710Sstevel@tonic-gate 30720Sstevel@tonic-gate int 30730Sstevel@tonic-gate hat_pageunload(struct page *pp, uint_t forceflag) 30740Sstevel@tonic-gate { 30750Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 30760Sstevel@tonic-gate return (hati_pageunload(pp, 0, forceflag)); 30770Sstevel@tonic-gate } 30780Sstevel@tonic-gate 30790Sstevel@tonic-gate /* 30800Sstevel@tonic-gate * Unload all large mappings to pp and reduce by 1 p_szc field of every large 30810Sstevel@tonic-gate * page level that included pp. 30820Sstevel@tonic-gate * 30830Sstevel@tonic-gate * pp must be locked EXCL. Even though no other constituent pages are locked 30840Sstevel@tonic-gate * it's legal to unload large mappings to pp because all constituent pages of 30850Sstevel@tonic-gate * large locked mappings have to be locked SHARED. therefore if we have EXCL 30860Sstevel@tonic-gate * lock on one of constituent pages none of the large mappings to pp are 30870Sstevel@tonic-gate * locked. 30880Sstevel@tonic-gate * 30890Sstevel@tonic-gate * Change (always decrease) p_szc field starting from the last constituent 30900Sstevel@tonic-gate * page and ending with root constituent page so that root's pszc always shows 30910Sstevel@tonic-gate * the area where hat_page_demote() may be active. 30920Sstevel@tonic-gate * 30930Sstevel@tonic-gate * This mechanism is only used for file system pages where it's not always 30940Sstevel@tonic-gate * possible to get EXCL locks on all constituent pages to demote the size code 30950Sstevel@tonic-gate * (as is done for anonymous or kernel large pages). 30960Sstevel@tonic-gate */ 30970Sstevel@tonic-gate void 30980Sstevel@tonic-gate hat_page_demote(page_t *pp) 30990Sstevel@tonic-gate { 31000Sstevel@tonic-gate uint_t pszc; 31010Sstevel@tonic-gate uint_t rszc; 31020Sstevel@tonic-gate uint_t szc; 31030Sstevel@tonic-gate page_t *rootpp; 31040Sstevel@tonic-gate page_t *firstpp; 31050Sstevel@tonic-gate page_t *lastpp; 31060Sstevel@tonic-gate pgcnt_t pgcnt; 31070Sstevel@tonic-gate 31080Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 31090Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 31100Sstevel@tonic-gate ASSERT(page_szc_lock_assert(pp)); 31110Sstevel@tonic-gate 31120Sstevel@tonic-gate if (pp->p_szc == 0) 31130Sstevel@tonic-gate return; 31140Sstevel@tonic-gate 31150Sstevel@tonic-gate rootpp = PP_GROUPLEADER(pp, 1); 31160Sstevel@tonic-gate (void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD); 31170Sstevel@tonic-gate 31180Sstevel@tonic-gate /* 31190Sstevel@tonic-gate * all large mappings to pp are gone 31200Sstevel@tonic-gate * and no new can be setup since pp is locked exclusively. 31210Sstevel@tonic-gate * 31220Sstevel@tonic-gate * Lock the root to make sure there's only one hat_page_demote() 31230Sstevel@tonic-gate * outstanding within the area of this root's pszc. 31240Sstevel@tonic-gate * 31250Sstevel@tonic-gate * Second potential hat_page_demote() is already eliminated by upper 31260Sstevel@tonic-gate * VM layer via page_szc_lock() but we don't rely on it and use our 31270Sstevel@tonic-gate * own locking (so that upper layer locking can be changed without 31280Sstevel@tonic-gate * assumptions that hat depends on upper layer VM to prevent multiple 31290Sstevel@tonic-gate * hat_page_demote() to be issued simultaneously to the same large 31300Sstevel@tonic-gate * page). 31310Sstevel@tonic-gate */ 31320Sstevel@tonic-gate again: 31330Sstevel@tonic-gate pszc = pp->p_szc; 31340Sstevel@tonic-gate if (pszc == 0) 31350Sstevel@tonic-gate return; 31360Sstevel@tonic-gate rootpp = PP_GROUPLEADER(pp, pszc); 31370Sstevel@tonic-gate x86_hm_enter(rootpp); 31380Sstevel@tonic-gate /* 31390Sstevel@tonic-gate * If root's p_szc is different from pszc we raced with another 31400Sstevel@tonic-gate * hat_page_demote(). Drop the lock and try to find the root again. 31410Sstevel@tonic-gate * If root's p_szc is greater than pszc previous hat_page_demote() is 31420Sstevel@tonic-gate * not done yet. Take and release mlist lock of root's root to wait 31430Sstevel@tonic-gate * for previous hat_page_demote() to complete. 31440Sstevel@tonic-gate */ 31450Sstevel@tonic-gate if ((rszc = rootpp->p_szc) != pszc) { 31460Sstevel@tonic-gate x86_hm_exit(rootpp); 31470Sstevel@tonic-gate if (rszc > pszc) { 31480Sstevel@tonic-gate /* p_szc of a locked non free page can't increase */ 31490Sstevel@tonic-gate ASSERT(pp != rootpp); 31500Sstevel@tonic-gate 31510Sstevel@tonic-gate rootpp = PP_GROUPLEADER(rootpp, rszc); 31520Sstevel@tonic-gate x86_hm_enter(rootpp); 31530Sstevel@tonic-gate x86_hm_exit(rootpp); 31540Sstevel@tonic-gate } 31550Sstevel@tonic-gate goto again; 31560Sstevel@tonic-gate } 31570Sstevel@tonic-gate ASSERT(pp->p_szc == pszc); 31580Sstevel@tonic-gate 31590Sstevel@tonic-gate /* 31600Sstevel@tonic-gate * Decrement by 1 p_szc of every constituent page of a region that 31610Sstevel@tonic-gate * covered pp. For example if original szc is 3 it gets changed to 2 31620Sstevel@tonic-gate * everywhere except in region 2 that covered pp. Region 2 that 31630Sstevel@tonic-gate * covered pp gets demoted to 1 everywhere except in region 1 that 31640Sstevel@tonic-gate * covered pp. The region 1 that covered pp is demoted to region 31650Sstevel@tonic-gate * 0. It's done this way because from region 3 we removed level 3 31660Sstevel@tonic-gate * mappings, from region 2 that covered pp we removed level 2 mappings 31670Sstevel@tonic-gate * and from region 1 that covered pp we removed level 1 mappings. All 31680Sstevel@tonic-gate * changes are done from from high pfn's to low pfn's so that roots 31690Sstevel@tonic-gate * are changed last allowing one to know the largest region where 31700Sstevel@tonic-gate * hat_page_demote() is stil active by only looking at the root page. 31710Sstevel@tonic-gate * 31720Sstevel@tonic-gate * This algorithm is implemented in 2 while loops. First loop changes 31730Sstevel@tonic-gate * p_szc of pages to the right of pp's level 1 region and second 31740Sstevel@tonic-gate * loop changes p_szc of pages of level 1 region that covers pp 31750Sstevel@tonic-gate * and all pages to the left of level 1 region that covers pp. 31760Sstevel@tonic-gate * In the first loop p_szc keeps dropping with every iteration 31770Sstevel@tonic-gate * and in the second loop it keeps increasing with every iteration. 31780Sstevel@tonic-gate * 31790Sstevel@tonic-gate * First loop description: Demote pages to the right of pp outside of 31800Sstevel@tonic-gate * level 1 region that covers pp. In every iteration of the while 31810Sstevel@tonic-gate * loop below find the last page of szc region and the first page of 31820Sstevel@tonic-gate * (szc - 1) region that is immediately to the right of (szc - 1) 31830Sstevel@tonic-gate * region that covers pp. From last such page to first such page 31840Sstevel@tonic-gate * change every page's szc to szc - 1. Decrement szc and continue 31850Sstevel@tonic-gate * looping until szc is 1. If pp belongs to the last (szc - 1) region 31860Sstevel@tonic-gate * of szc region skip to the next iteration. 31870Sstevel@tonic-gate */ 31880Sstevel@tonic-gate szc = pszc; 31890Sstevel@tonic-gate while (szc > 1) { 31900Sstevel@tonic-gate lastpp = PP_GROUPLEADER(pp, szc); 31910Sstevel@tonic-gate pgcnt = page_get_pagecnt(szc); 31920Sstevel@tonic-gate lastpp += pgcnt - 1; 31930Sstevel@tonic-gate firstpp = PP_GROUPLEADER(pp, (szc - 1)); 31940Sstevel@tonic-gate pgcnt = page_get_pagecnt(szc - 1); 31950Sstevel@tonic-gate if (lastpp - firstpp < pgcnt) { 31960Sstevel@tonic-gate szc--; 31970Sstevel@tonic-gate continue; 31980Sstevel@tonic-gate } 31990Sstevel@tonic-gate firstpp += pgcnt; 32000Sstevel@tonic-gate while (lastpp != firstpp) { 32010Sstevel@tonic-gate ASSERT(lastpp->p_szc == pszc); 32020Sstevel@tonic-gate lastpp->p_szc = szc - 1; 32030Sstevel@tonic-gate lastpp--; 32040Sstevel@tonic-gate } 32050Sstevel@tonic-gate firstpp->p_szc = szc - 1; 32060Sstevel@tonic-gate szc--; 32070Sstevel@tonic-gate } 32080Sstevel@tonic-gate 32090Sstevel@tonic-gate /* 32100Sstevel@tonic-gate * Second loop description: 32110Sstevel@tonic-gate * First iteration changes p_szc to 0 of every 32120Sstevel@tonic-gate * page of level 1 region that covers pp. 32130Sstevel@tonic-gate * Subsequent iterations find last page of szc region 32140Sstevel@tonic-gate * immediately to the left of szc region that covered pp 32150Sstevel@tonic-gate * and first page of (szc + 1) region that covers pp. 32160Sstevel@tonic-gate * From last to first page change p_szc of every page to szc. 32170Sstevel@tonic-gate * Increment szc and continue looping until szc is pszc. 32180Sstevel@tonic-gate * If pp belongs to the fist szc region of (szc + 1) region 32190Sstevel@tonic-gate * skip to the next iteration. 32200Sstevel@tonic-gate * 32210Sstevel@tonic-gate */ 32220Sstevel@tonic-gate szc = 0; 32230Sstevel@tonic-gate while (szc < pszc) { 32240Sstevel@tonic-gate firstpp = PP_GROUPLEADER(pp, (szc + 1)); 32250Sstevel@tonic-gate if (szc == 0) { 32260Sstevel@tonic-gate pgcnt = page_get_pagecnt(1); 32270Sstevel@tonic-gate lastpp = firstpp + (pgcnt - 1); 32280Sstevel@tonic-gate } else { 32290Sstevel@tonic-gate lastpp = PP_GROUPLEADER(pp, szc); 32300Sstevel@tonic-gate if (firstpp == lastpp) { 32310Sstevel@tonic-gate szc++; 32320Sstevel@tonic-gate continue; 32330Sstevel@tonic-gate } 32340Sstevel@tonic-gate lastpp--; 32350Sstevel@tonic-gate pgcnt = page_get_pagecnt(szc); 32360Sstevel@tonic-gate } 32370Sstevel@tonic-gate while (lastpp != firstpp) { 32380Sstevel@tonic-gate ASSERT(lastpp->p_szc == pszc); 32390Sstevel@tonic-gate lastpp->p_szc = szc; 32400Sstevel@tonic-gate lastpp--; 32410Sstevel@tonic-gate } 32420Sstevel@tonic-gate firstpp->p_szc = szc; 32430Sstevel@tonic-gate if (firstpp == rootpp) 32440Sstevel@tonic-gate break; 32450Sstevel@tonic-gate szc++; 32460Sstevel@tonic-gate } 32470Sstevel@tonic-gate x86_hm_exit(rootpp); 32480Sstevel@tonic-gate } 32490Sstevel@tonic-gate 32500Sstevel@tonic-gate /* 32510Sstevel@tonic-gate * get hw stats from hardware into page struct and reset hw stats 32520Sstevel@tonic-gate * returns attributes of page 32530Sstevel@tonic-gate * Flags for hat_pagesync, hat_getstat, hat_sync 32540Sstevel@tonic-gate * 32550Sstevel@tonic-gate * define HAT_SYNC_ZERORM 0x01 32560Sstevel@tonic-gate * 32570Sstevel@tonic-gate * Additional flags for hat_pagesync 32580Sstevel@tonic-gate * 32590Sstevel@tonic-gate * define HAT_SYNC_STOPON_REF 0x02 32600Sstevel@tonic-gate * define HAT_SYNC_STOPON_MOD 0x04 32610Sstevel@tonic-gate * define HAT_SYNC_STOPON_RM 0x06 32620Sstevel@tonic-gate * define HAT_SYNC_STOPON_SHARED 0x08 32630Sstevel@tonic-gate */ 32640Sstevel@tonic-gate uint_t 32650Sstevel@tonic-gate hat_pagesync(struct page *pp, uint_t flags) 32660Sstevel@tonic-gate { 32670Sstevel@tonic-gate hment_t *hm = NULL; 32680Sstevel@tonic-gate htable_t *ht; 32690Sstevel@tonic-gate uint_t entry; 32700Sstevel@tonic-gate x86pte_t old, save_old; 32710Sstevel@tonic-gate x86pte_t new; 32720Sstevel@tonic-gate uchar_t nrmbits = P_REF|P_MOD|P_RO; 32730Sstevel@tonic-gate extern ulong_t po_share; 32740Sstevel@tonic-gate page_t *save_pp = pp; 32750Sstevel@tonic-gate uint_t pszc = 0; 32760Sstevel@tonic-gate 32770Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp) || panicstr); 32780Sstevel@tonic-gate 32790Sstevel@tonic-gate if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD)) 32800Sstevel@tonic-gate return (pp->p_nrm & nrmbits); 32810Sstevel@tonic-gate 32820Sstevel@tonic-gate if ((flags & HAT_SYNC_ZERORM) == 0) { 32830Sstevel@tonic-gate 32840Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp)) 32850Sstevel@tonic-gate return (pp->p_nrm & nrmbits); 32860Sstevel@tonic-gate 32870Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp)) 32880Sstevel@tonic-gate return (pp->p_nrm & nrmbits); 32890Sstevel@tonic-gate 32900Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_SHARED) != 0 && 32910Sstevel@tonic-gate hat_page_getshare(pp) > po_share) { 32920Sstevel@tonic-gate if (PP_ISRO(pp)) 32930Sstevel@tonic-gate PP_SETREF(pp); 32940Sstevel@tonic-gate return (pp->p_nrm & nrmbits); 32950Sstevel@tonic-gate } 32960Sstevel@tonic-gate } 32970Sstevel@tonic-gate 32980Sstevel@tonic-gate next_size: 32990Sstevel@tonic-gate /* 33000Sstevel@tonic-gate * walk thru the mapping list syncing (and clearing) ref/mod bits. 33010Sstevel@tonic-gate */ 33020Sstevel@tonic-gate x86_hm_enter(pp); 33030Sstevel@tonic-gate while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) { 33040Sstevel@tonic-gate if (ht->ht_level < pszc) 33050Sstevel@tonic-gate continue; 33060Sstevel@tonic-gate old = x86pte_get(ht, entry); 33070Sstevel@tonic-gate try_again: 33080Sstevel@tonic-gate 33090Sstevel@tonic-gate ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum); 33100Sstevel@tonic-gate 33110Sstevel@tonic-gate if (PTE_GET(old, PT_REF | PT_MOD) == 0) 33120Sstevel@tonic-gate continue; 33130Sstevel@tonic-gate 33140Sstevel@tonic-gate save_old = old; 33150Sstevel@tonic-gate if ((flags & HAT_SYNC_ZERORM) != 0) { 33160Sstevel@tonic-gate 33170Sstevel@tonic-gate /* 33180Sstevel@tonic-gate * Need to clear ref or mod bits. Need to demap 33190Sstevel@tonic-gate * to make sure any executing TLBs see cleared bits. 33200Sstevel@tonic-gate */ 33210Sstevel@tonic-gate new = old; 33220Sstevel@tonic-gate PTE_CLR(new, PT_REF | PT_MOD); 33230Sstevel@tonic-gate old = hati_update_pte(ht, entry, old, new); 33240Sstevel@tonic-gate if (old != 0) 33250Sstevel@tonic-gate goto try_again; 33260Sstevel@tonic-gate 33270Sstevel@tonic-gate old = save_old; 33280Sstevel@tonic-gate } 33290Sstevel@tonic-gate 33300Sstevel@tonic-gate /* 33310Sstevel@tonic-gate * Sync the PTE 33320Sstevel@tonic-gate */ 33333446Smrj if (!(flags & HAT_SYNC_ZERORM) && 33343446Smrj PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC) 33350Sstevel@tonic-gate hati_sync_pte_to_page(pp, old, ht->ht_level); 33360Sstevel@tonic-gate 33370Sstevel@tonic-gate /* 33380Sstevel@tonic-gate * can stop short if we found a ref'd or mod'd page 33390Sstevel@tonic-gate */ 33400Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) || 33410Sstevel@tonic-gate (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) { 33420Sstevel@tonic-gate x86_hm_exit(pp); 33433446Smrj goto done; 33440Sstevel@tonic-gate } 33450Sstevel@tonic-gate } 33460Sstevel@tonic-gate x86_hm_exit(pp); 33470Sstevel@tonic-gate while (pszc < pp->p_szc) { 33480Sstevel@tonic-gate page_t *tpp; 33490Sstevel@tonic-gate pszc++; 33500Sstevel@tonic-gate tpp = PP_GROUPLEADER(pp, pszc); 33510Sstevel@tonic-gate if (pp != tpp) { 33520Sstevel@tonic-gate pp = tpp; 33530Sstevel@tonic-gate goto next_size; 33540Sstevel@tonic-gate } 33550Sstevel@tonic-gate } 33563446Smrj done: 33570Sstevel@tonic-gate return (save_pp->p_nrm & nrmbits); 33580Sstevel@tonic-gate } 33590Sstevel@tonic-gate 33600Sstevel@tonic-gate /* 33610Sstevel@tonic-gate * returns approx number of mappings to this pp. A return of 0 implies 33620Sstevel@tonic-gate * there are no mappings to the page. 33630Sstevel@tonic-gate */ 33640Sstevel@tonic-gate ulong_t 33650Sstevel@tonic-gate hat_page_getshare(page_t *pp) 33660Sstevel@tonic-gate { 33670Sstevel@tonic-gate uint_t cnt; 33680Sstevel@tonic-gate cnt = hment_mapcnt(pp); 33691841Spraks #if defined(__amd64) 33701841Spraks if (vpm_enable && pp->p_vpmref) { 33711841Spraks cnt += 1; 33721841Spraks } 33731841Spraks #endif 33740Sstevel@tonic-gate return (cnt); 33750Sstevel@tonic-gate } 33760Sstevel@tonic-gate 33770Sstevel@tonic-gate /* 33780Sstevel@tonic-gate * hat_softlock isn't supported anymore 33790Sstevel@tonic-gate */ 33800Sstevel@tonic-gate /*ARGSUSED*/ 33810Sstevel@tonic-gate faultcode_t 33820Sstevel@tonic-gate hat_softlock( 33830Sstevel@tonic-gate hat_t *hat, 33840Sstevel@tonic-gate caddr_t addr, 33850Sstevel@tonic-gate size_t *len, 33860Sstevel@tonic-gate struct page **page_array, 33870Sstevel@tonic-gate uint_t flags) 33880Sstevel@tonic-gate { 33890Sstevel@tonic-gate return (FC_NOSUPPORT); 33900Sstevel@tonic-gate } 33910Sstevel@tonic-gate 33920Sstevel@tonic-gate 33930Sstevel@tonic-gate 33940Sstevel@tonic-gate /* 33950Sstevel@tonic-gate * Routine to expose supported HAT features to platform independent code. 33960Sstevel@tonic-gate */ 33970Sstevel@tonic-gate /*ARGSUSED*/ 33980Sstevel@tonic-gate int 33990Sstevel@tonic-gate hat_supported(enum hat_features feature, void *arg) 34000Sstevel@tonic-gate { 34010Sstevel@tonic-gate switch (feature) { 34020Sstevel@tonic-gate 34030Sstevel@tonic-gate case HAT_SHARED_PT: /* this is really ISM */ 34040Sstevel@tonic-gate return (1); 34050Sstevel@tonic-gate 34060Sstevel@tonic-gate case HAT_DYNAMIC_ISM_UNMAP: 34070Sstevel@tonic-gate return (0); 34080Sstevel@tonic-gate 34090Sstevel@tonic-gate case HAT_VMODSORT: 34100Sstevel@tonic-gate return (1); 34110Sstevel@tonic-gate 34120Sstevel@tonic-gate default: 34130Sstevel@tonic-gate panic("hat_supported() - unknown feature"); 34140Sstevel@tonic-gate } 34150Sstevel@tonic-gate return (0); 34160Sstevel@tonic-gate } 34170Sstevel@tonic-gate 34180Sstevel@tonic-gate /* 34190Sstevel@tonic-gate * Called when a thread is exiting and has been switched to the kernel AS 34200Sstevel@tonic-gate */ 34210Sstevel@tonic-gate void 34220Sstevel@tonic-gate hat_thread_exit(kthread_t *thd) 34230Sstevel@tonic-gate { 34240Sstevel@tonic-gate ASSERT(thd->t_procp->p_as == &kas); 34250Sstevel@tonic-gate hat_switch(thd->t_procp->p_as->a_hat); 34260Sstevel@tonic-gate } 34270Sstevel@tonic-gate 34280Sstevel@tonic-gate /* 34290Sstevel@tonic-gate * Setup the given brand new hat structure as the new HAT on this cpu's mmu. 34300Sstevel@tonic-gate */ 34310Sstevel@tonic-gate /*ARGSUSED*/ 34320Sstevel@tonic-gate void 34330Sstevel@tonic-gate hat_setup(hat_t *hat, int flags) 34340Sstevel@tonic-gate { 34350Sstevel@tonic-gate kpreempt_disable(); 34360Sstevel@tonic-gate 34370Sstevel@tonic-gate hat_switch(hat); 34380Sstevel@tonic-gate 34390Sstevel@tonic-gate kpreempt_enable(); 34400Sstevel@tonic-gate } 34410Sstevel@tonic-gate 34420Sstevel@tonic-gate /* 34430Sstevel@tonic-gate * Prepare for a CPU private mapping for the given address. 34440Sstevel@tonic-gate * 34450Sstevel@tonic-gate * The address can only be used from a single CPU and can be remapped 34460Sstevel@tonic-gate * using hat_mempte_remap(). Return the address of the PTE. 34470Sstevel@tonic-gate * 34480Sstevel@tonic-gate * We do the htable_create() if necessary and increment the valid count so 34490Sstevel@tonic-gate * the htable can't disappear. We also hat_devload() the page table into 34500Sstevel@tonic-gate * kernel so that the PTE is quickly accessed. 34510Sstevel@tonic-gate */ 34523446Smrj hat_mempte_t 34533446Smrj hat_mempte_setup(caddr_t addr) 34540Sstevel@tonic-gate { 34550Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 34560Sstevel@tonic-gate htable_t *ht; 34570Sstevel@tonic-gate uint_t entry; 34580Sstevel@tonic-gate x86pte_t oldpte; 34593446Smrj hat_mempte_t p; 34600Sstevel@tonic-gate 34610Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 34620Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 34634004Sjosephb ++curthread->t_hatdepth; 34640Sstevel@tonic-gate ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0); 34650Sstevel@tonic-gate if (ht == NULL) { 34660Sstevel@tonic-gate ht = htable_create(kas.a_hat, va, 0, NULL); 34670Sstevel@tonic-gate entry = htable_va2entry(va, ht); 34680Sstevel@tonic-gate ASSERT(ht->ht_level == 0); 34690Sstevel@tonic-gate oldpte = x86pte_get(ht, entry); 34700Sstevel@tonic-gate } 34710Sstevel@tonic-gate if (PTE_ISVALID(oldpte)) 34720Sstevel@tonic-gate panic("hat_mempte_setup(): address already mapped" 34730Sstevel@tonic-gate "ht=%p, entry=%d, pte=" FMT_PTE, ht, entry, oldpte); 34740Sstevel@tonic-gate 34750Sstevel@tonic-gate /* 34760Sstevel@tonic-gate * increment ht_valid_cnt so that the pagetable can't disappear 34770Sstevel@tonic-gate */ 34780Sstevel@tonic-gate HTABLE_INC(ht->ht_valid_cnt); 34790Sstevel@tonic-gate 34800Sstevel@tonic-gate /* 34813446Smrj * return the PTE physical address to the caller. 34820Sstevel@tonic-gate */ 34830Sstevel@tonic-gate htable_release(ht); 34843446Smrj p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry); 34854004Sjosephb --curthread->t_hatdepth; 34863446Smrj return (p); 34870Sstevel@tonic-gate } 34880Sstevel@tonic-gate 34890Sstevel@tonic-gate /* 34900Sstevel@tonic-gate * Release a CPU private mapping for the given address. 34910Sstevel@tonic-gate * We decrement the htable valid count so it might be destroyed. 34920Sstevel@tonic-gate */ 34933446Smrj /*ARGSUSED1*/ 34940Sstevel@tonic-gate void 34953446Smrj hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa) 34960Sstevel@tonic-gate { 34970Sstevel@tonic-gate htable_t *ht; 34980Sstevel@tonic-gate 34990Sstevel@tonic-gate /* 35003446Smrj * invalidate any left over mapping and decrement the htable valid count 35010Sstevel@tonic-gate */ 35023446Smrj { 35033446Smrj x86pte_t *pteptr; 35043446Smrj 35053446Smrj pteptr = x86pte_mapin(mmu_btop(pte_pa), 35063446Smrj (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); 35073446Smrj if (mmu.pae_hat) 35083446Smrj *pteptr = 0; 35093446Smrj else 35103446Smrj *(x86pte32_t *)pteptr = 0; 35113446Smrj mmu_tlbflush_entry(addr); 35123446Smrj x86pte_mapout(); 35133446Smrj } 35143446Smrj 35150Sstevel@tonic-gate ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0); 35160Sstevel@tonic-gate if (ht == NULL) 35170Sstevel@tonic-gate panic("hat_mempte_release(): invalid address"); 35180Sstevel@tonic-gate ASSERT(ht->ht_level == 0); 35190Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt); 35200Sstevel@tonic-gate htable_release(ht); 35210Sstevel@tonic-gate } 35220Sstevel@tonic-gate 35230Sstevel@tonic-gate /* 35240Sstevel@tonic-gate * Apply a temporary CPU private mapping to a page. We flush the TLB only 35250Sstevel@tonic-gate * on this CPU, so this ought to have been called with preemption disabled. 35260Sstevel@tonic-gate */ 35270Sstevel@tonic-gate void 35280Sstevel@tonic-gate hat_mempte_remap( 35293446Smrj pfn_t pfn, 35303446Smrj caddr_t addr, 35313446Smrj hat_mempte_t pte_pa, 35323446Smrj uint_t attr, 35333446Smrj uint_t flags) 35340Sstevel@tonic-gate { 35350Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 35360Sstevel@tonic-gate x86pte_t pte; 35370Sstevel@tonic-gate 35380Sstevel@tonic-gate /* 35390Sstevel@tonic-gate * Remap the given PTE to the new page's PFN. Invalidate only 35400Sstevel@tonic-gate * on this CPU. 35410Sstevel@tonic-gate */ 35420Sstevel@tonic-gate #ifdef DEBUG 35430Sstevel@tonic-gate htable_t *ht; 35440Sstevel@tonic-gate uint_t entry; 35450Sstevel@tonic-gate 35460Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 35470Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 35480Sstevel@tonic-gate ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0); 35490Sstevel@tonic-gate ASSERT(ht != NULL); 35500Sstevel@tonic-gate ASSERT(ht->ht_level == 0); 35510Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0); 35523446Smrj ASSERT(ht->ht_pfn == mmu_btop(pte_pa)); 35530Sstevel@tonic-gate htable_release(ht); 35540Sstevel@tonic-gate #endif 35550Sstevel@tonic-gate pte = hati_mkpte(pfn, attr, 0, flags); 35563446Smrj { 35573446Smrj x86pte_t *pteptr; 35583446Smrj 35593446Smrj pteptr = x86pte_mapin(mmu_btop(pte_pa), 35603446Smrj (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); 35613446Smrj if (mmu.pae_hat) 35623446Smrj *(x86pte_t *)pteptr = pte; 35633446Smrj else 35643446Smrj *(x86pte32_t *)pteptr = (x86pte32_t)pte; 35653446Smrj mmu_tlbflush_entry(addr); 35663446Smrj x86pte_mapout(); 35673446Smrj } 35680Sstevel@tonic-gate } 35690Sstevel@tonic-gate 35700Sstevel@tonic-gate 35710Sstevel@tonic-gate 35720Sstevel@tonic-gate /* 35730Sstevel@tonic-gate * Hat locking functions 35740Sstevel@tonic-gate * XXX - these two functions are currently being used by hatstats 35750Sstevel@tonic-gate * they can be removed by using a per-as mutex for hatstats. 35760Sstevel@tonic-gate */ 35770Sstevel@tonic-gate void 35780Sstevel@tonic-gate hat_enter(hat_t *hat) 35790Sstevel@tonic-gate { 35800Sstevel@tonic-gate mutex_enter(&hat->hat_mutex); 35810Sstevel@tonic-gate } 35820Sstevel@tonic-gate 35830Sstevel@tonic-gate void 35840Sstevel@tonic-gate hat_exit(hat_t *hat) 35850Sstevel@tonic-gate { 35860Sstevel@tonic-gate mutex_exit(&hat->hat_mutex); 35870Sstevel@tonic-gate } 35880Sstevel@tonic-gate 35890Sstevel@tonic-gate /* 35903446Smrj * HAT part of cpu initialization. 35910Sstevel@tonic-gate */ 35920Sstevel@tonic-gate void 35930Sstevel@tonic-gate hat_cpu_online(struct cpu *cpup) 35940Sstevel@tonic-gate { 35950Sstevel@tonic-gate if (cpup != CPU) { 35963446Smrj x86pte_cpu_init(cpup); 35970Sstevel@tonic-gate hat_vlp_setup(cpup); 35980Sstevel@tonic-gate } 35990Sstevel@tonic-gate CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id); 36000Sstevel@tonic-gate } 36010Sstevel@tonic-gate 36020Sstevel@tonic-gate /* 36033446Smrj * HAT part of cpu deletion. 36043446Smrj * (currently, we only call this after the cpu is safely passivated.) 36053446Smrj */ 36063446Smrj void 36073446Smrj hat_cpu_offline(struct cpu *cpup) 36083446Smrj { 36093446Smrj ASSERT(cpup != CPU); 36103446Smrj 36113446Smrj CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id); 36123446Smrj x86pte_cpu_fini(cpup); 36133446Smrj hat_vlp_teardown(cpup); 36143446Smrj } 36153446Smrj 36163446Smrj /* 36170Sstevel@tonic-gate * Function called after all CPUs are brought online. 36180Sstevel@tonic-gate * Used to remove low address boot mappings. 36190Sstevel@tonic-gate */ 36200Sstevel@tonic-gate void 36210Sstevel@tonic-gate clear_boot_mappings(uintptr_t low, uintptr_t high) 36220Sstevel@tonic-gate { 36230Sstevel@tonic-gate uintptr_t vaddr = low; 36240Sstevel@tonic-gate htable_t *ht = NULL; 36250Sstevel@tonic-gate level_t level; 36260Sstevel@tonic-gate uint_t entry; 36270Sstevel@tonic-gate x86pte_t pte; 36280Sstevel@tonic-gate 36290Sstevel@tonic-gate /* 36300Sstevel@tonic-gate * On 1st CPU we can unload the prom mappings, basically we blow away 36313446Smrj * all virtual mappings under _userlimit. 36320Sstevel@tonic-gate */ 36330Sstevel@tonic-gate while (vaddr < high) { 36340Sstevel@tonic-gate pte = htable_walk(kas.a_hat, &ht, &vaddr, high); 36350Sstevel@tonic-gate if (ht == NULL) 36360Sstevel@tonic-gate break; 36370Sstevel@tonic-gate 36380Sstevel@tonic-gate level = ht->ht_level; 36390Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht); 36400Sstevel@tonic-gate ASSERT(level <= mmu.max_page_level); 36410Sstevel@tonic-gate ASSERT(PTE_ISPAGE(pte, level)); 36420Sstevel@tonic-gate 36430Sstevel@tonic-gate /* 36440Sstevel@tonic-gate * Unload the mapping from the page tables. 36450Sstevel@tonic-gate */ 36463446Smrj (void) x86pte_inval(ht, entry, 0, NULL); 36470Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0); 36480Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt); 36490Sstevel@tonic-gate PGCNT_DEC(ht->ht_hat, ht->ht_level); 36500Sstevel@tonic-gate 36510Sstevel@tonic-gate vaddr += LEVEL_SIZE(ht->ht_level); 36520Sstevel@tonic-gate } 36530Sstevel@tonic-gate if (ht) 36540Sstevel@tonic-gate htable_release(ht); 36550Sstevel@tonic-gate } 36560Sstevel@tonic-gate 36570Sstevel@tonic-gate /* 36580Sstevel@tonic-gate * Atomically update a new translation for a single page. If the 36590Sstevel@tonic-gate * currently installed PTE doesn't match the value we expect to find, 36600Sstevel@tonic-gate * it's not updated and we return the PTE we found. 36610Sstevel@tonic-gate * 36620Sstevel@tonic-gate * If activating nosync or NOWRITE and the page was modified we need to sync 36630Sstevel@tonic-gate * with the page_t. Also sync with page_t if clearing ref/mod bits. 36640Sstevel@tonic-gate */ 36650Sstevel@tonic-gate static x86pte_t 36660Sstevel@tonic-gate hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new) 36670Sstevel@tonic-gate { 36680Sstevel@tonic-gate page_t *pp; 36690Sstevel@tonic-gate uint_t rm = 0; 36700Sstevel@tonic-gate x86pte_t replaced; 36710Sstevel@tonic-gate 36723446Smrj if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC && 36730Sstevel@tonic-gate PTE_GET(expected, PT_MOD | PT_REF) && 36740Sstevel@tonic-gate (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) || 36750Sstevel@tonic-gate !PTE_GET(new, PT_MOD | PT_REF))) { 36760Sstevel@tonic-gate 36773446Smrj ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level))); 36780Sstevel@tonic-gate pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level)); 36790Sstevel@tonic-gate ASSERT(pp != NULL); 36800Sstevel@tonic-gate if (PTE_GET(expected, PT_MOD)) 36810Sstevel@tonic-gate rm |= P_MOD; 36820Sstevel@tonic-gate if (PTE_GET(expected, PT_REF)) 36830Sstevel@tonic-gate rm |= P_REF; 36840Sstevel@tonic-gate PTE_CLR(new, PT_MOD | PT_REF); 36850Sstevel@tonic-gate } 36860Sstevel@tonic-gate 36870Sstevel@tonic-gate replaced = x86pte_update(ht, entry, expected, new); 36880Sstevel@tonic-gate if (replaced != expected) 36890Sstevel@tonic-gate return (replaced); 36900Sstevel@tonic-gate 36910Sstevel@tonic-gate if (rm) { 36920Sstevel@tonic-gate /* 36930Sstevel@tonic-gate * sync to all constituent pages of a large page 36940Sstevel@tonic-gate */ 36950Sstevel@tonic-gate pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level); 36960Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt)); 36970Sstevel@tonic-gate while (pgcnt-- > 0) { 36980Sstevel@tonic-gate /* 36990Sstevel@tonic-gate * hat_page_demote() can't decrease 37000Sstevel@tonic-gate * pszc below this mapping size 37010Sstevel@tonic-gate * since large mapping existed after we 37020Sstevel@tonic-gate * took mlist lock. 37030Sstevel@tonic-gate */ 37040Sstevel@tonic-gate ASSERT(pp->p_szc >= ht->ht_level); 37050Sstevel@tonic-gate hat_page_setattr(pp, rm); 37060Sstevel@tonic-gate ++pp; 37070Sstevel@tonic-gate } 37080Sstevel@tonic-gate } 37090Sstevel@tonic-gate 37100Sstevel@tonic-gate return (0); 37110Sstevel@tonic-gate } 37120Sstevel@tonic-gate 37130Sstevel@tonic-gate /* 37140Sstevel@tonic-gate * Kernel Physical Mapping (kpm) facility 37150Sstevel@tonic-gate * 37160Sstevel@tonic-gate * Most of the routines needed to support segkpm are almost no-ops on the 37170Sstevel@tonic-gate * x86 platform. We map in the entire segment when it is created and leave 37180Sstevel@tonic-gate * it mapped in, so there is no additional work required to set up and tear 37190Sstevel@tonic-gate * down individual mappings. All of these routines were created to support 37200Sstevel@tonic-gate * SPARC platforms that have to avoid aliasing in their virtually indexed 37210Sstevel@tonic-gate * caches. 37220Sstevel@tonic-gate * 37230Sstevel@tonic-gate * Most of the routines have sanity checks in them (e.g. verifying that the 37240Sstevel@tonic-gate * passed-in page is locked). We don't actually care about most of these 37250Sstevel@tonic-gate * checks on x86, but we leave them in place to identify problems in the 37260Sstevel@tonic-gate * upper levels. 37270Sstevel@tonic-gate */ 37280Sstevel@tonic-gate 37290Sstevel@tonic-gate /* 37300Sstevel@tonic-gate * Map in a locked page and return the vaddr. 37310Sstevel@tonic-gate */ 37320Sstevel@tonic-gate /*ARGSUSED*/ 37330Sstevel@tonic-gate caddr_t 37340Sstevel@tonic-gate hat_kpm_mapin(struct page *pp, struct kpme *kpme) 37350Sstevel@tonic-gate { 37360Sstevel@tonic-gate caddr_t vaddr; 37370Sstevel@tonic-gate 37380Sstevel@tonic-gate #ifdef DEBUG 37390Sstevel@tonic-gate if (kpm_enable == 0) { 37400Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n"); 37410Sstevel@tonic-gate return ((caddr_t)NULL); 37420Sstevel@tonic-gate } 37430Sstevel@tonic-gate 37440Sstevel@tonic-gate if (pp == NULL || PAGE_LOCKED(pp) == 0) { 37450Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n"); 37460Sstevel@tonic-gate return ((caddr_t)NULL); 37470Sstevel@tonic-gate } 37480Sstevel@tonic-gate #endif 37490Sstevel@tonic-gate 37500Sstevel@tonic-gate vaddr = hat_kpm_page2va(pp, 1); 37510Sstevel@tonic-gate 37520Sstevel@tonic-gate return (vaddr); 37530Sstevel@tonic-gate } 37540Sstevel@tonic-gate 37550Sstevel@tonic-gate /* 37560Sstevel@tonic-gate * Mapout a locked page. 37570Sstevel@tonic-gate */ 37580Sstevel@tonic-gate /*ARGSUSED*/ 37590Sstevel@tonic-gate void 37600Sstevel@tonic-gate hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr) 37610Sstevel@tonic-gate { 37620Sstevel@tonic-gate #ifdef DEBUG 37630Sstevel@tonic-gate if (kpm_enable == 0) { 37640Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n"); 37650Sstevel@tonic-gate return; 37660Sstevel@tonic-gate } 37670Sstevel@tonic-gate 37680Sstevel@tonic-gate if (IS_KPM_ADDR(vaddr) == 0) { 37690Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n"); 37700Sstevel@tonic-gate return; 37710Sstevel@tonic-gate } 37720Sstevel@tonic-gate 37730Sstevel@tonic-gate if (pp == NULL || PAGE_LOCKED(pp) == 0) { 37740Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n"); 37750Sstevel@tonic-gate return; 37760Sstevel@tonic-gate } 37770Sstevel@tonic-gate #endif 37780Sstevel@tonic-gate } 37790Sstevel@tonic-gate 37800Sstevel@tonic-gate /* 37810Sstevel@tonic-gate * Return the kpm virtual address for a specific pfn 37820Sstevel@tonic-gate */ 37830Sstevel@tonic-gate caddr_t 37840Sstevel@tonic-gate hat_kpm_pfn2va(pfn_t pfn) 37850Sstevel@tonic-gate { 37863446Smrj uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn); 37870Sstevel@tonic-gate 37880Sstevel@tonic-gate return ((caddr_t)vaddr); 37890Sstevel@tonic-gate } 37900Sstevel@tonic-gate 37910Sstevel@tonic-gate /* 37920Sstevel@tonic-gate * Return the kpm virtual address for the page at pp. 37930Sstevel@tonic-gate */ 37940Sstevel@tonic-gate /*ARGSUSED*/ 37950Sstevel@tonic-gate caddr_t 37960Sstevel@tonic-gate hat_kpm_page2va(struct page *pp, int checkswap) 37970Sstevel@tonic-gate { 37980Sstevel@tonic-gate return (hat_kpm_pfn2va(pp->p_pagenum)); 37990Sstevel@tonic-gate } 38000Sstevel@tonic-gate 38010Sstevel@tonic-gate /* 38020Sstevel@tonic-gate * Return the page frame number for the kpm virtual address vaddr. 38030Sstevel@tonic-gate */ 38040Sstevel@tonic-gate pfn_t 38050Sstevel@tonic-gate hat_kpm_va2pfn(caddr_t vaddr) 38060Sstevel@tonic-gate { 38070Sstevel@tonic-gate pfn_t pfn; 38080Sstevel@tonic-gate 38090Sstevel@tonic-gate ASSERT(IS_KPM_ADDR(vaddr)); 38100Sstevel@tonic-gate 38110Sstevel@tonic-gate pfn = (pfn_t)btop(vaddr - kpm_vbase); 38120Sstevel@tonic-gate 38130Sstevel@tonic-gate return (pfn); 38140Sstevel@tonic-gate } 38150Sstevel@tonic-gate 38160Sstevel@tonic-gate 38170Sstevel@tonic-gate /* 38180Sstevel@tonic-gate * Return the page for the kpm virtual address vaddr. 38190Sstevel@tonic-gate */ 38200Sstevel@tonic-gate page_t * 38210Sstevel@tonic-gate hat_kpm_vaddr2page(caddr_t vaddr) 38220Sstevel@tonic-gate { 38230Sstevel@tonic-gate pfn_t pfn; 38240Sstevel@tonic-gate 38250Sstevel@tonic-gate ASSERT(IS_KPM_ADDR(vaddr)); 38260Sstevel@tonic-gate 38270Sstevel@tonic-gate pfn = hat_kpm_va2pfn(vaddr); 38280Sstevel@tonic-gate 38290Sstevel@tonic-gate return (page_numtopp_nolock(pfn)); 38300Sstevel@tonic-gate } 38310Sstevel@tonic-gate 38320Sstevel@tonic-gate /* 38330Sstevel@tonic-gate * hat_kpm_fault is called from segkpm_fault when we take a page fault on a 38340Sstevel@tonic-gate * KPM page. This should never happen on x86 38350Sstevel@tonic-gate */ 38360Sstevel@tonic-gate int 38370Sstevel@tonic-gate hat_kpm_fault(hat_t *hat, caddr_t vaddr) 38380Sstevel@tonic-gate { 38390Sstevel@tonic-gate panic("pagefault in seg_kpm. hat: 0x%p vaddr: 0x%p", hat, vaddr); 38400Sstevel@tonic-gate 38410Sstevel@tonic-gate return (0); 38420Sstevel@tonic-gate } 38430Sstevel@tonic-gate 38440Sstevel@tonic-gate /*ARGSUSED*/ 38450Sstevel@tonic-gate void 38460Sstevel@tonic-gate hat_kpm_mseghash_clear(int nentries) 38470Sstevel@tonic-gate {} 38480Sstevel@tonic-gate 38490Sstevel@tonic-gate /*ARGSUSED*/ 38500Sstevel@tonic-gate void 38510Sstevel@tonic-gate hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp) 38520Sstevel@tonic-gate {} 3853