10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
51747Sjosephb * Common Development and Distribution License (the "License").
61747Sjosephb * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
2212532Sjoe.bonasera@oracle.com * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
230Sstevel@tonic-gate */
2412004Sjiang.liu@intel.com /*
2512004Sjiang.liu@intel.com * Copyright (c) 2010, Intel Corporation.
2612004Sjiang.liu@intel.com * All rights reserved.
2712004Sjiang.liu@intel.com */
280Sstevel@tonic-gate
290Sstevel@tonic-gate
300Sstevel@tonic-gate /*
310Sstevel@tonic-gate * VM - Hardware Address Translation management for i386 and amd64
320Sstevel@tonic-gate *
330Sstevel@tonic-gate * Implementation of the interfaces described in <common/vm/hat.h>
340Sstevel@tonic-gate *
350Sstevel@tonic-gate * Nearly all the details of how the hardware is managed should not be
360Sstevel@tonic-gate * visible outside this layer except for misc. machine specific functions
370Sstevel@tonic-gate * that work in conjunction with this code.
380Sstevel@tonic-gate *
390Sstevel@tonic-gate * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
400Sstevel@tonic-gate */
410Sstevel@tonic-gate
420Sstevel@tonic-gate #include <sys/machparam.h>
430Sstevel@tonic-gate #include <sys/machsystm.h>
440Sstevel@tonic-gate #include <sys/mman.h>
450Sstevel@tonic-gate #include <sys/types.h>
460Sstevel@tonic-gate #include <sys/systm.h>
470Sstevel@tonic-gate #include <sys/cpuvar.h>
480Sstevel@tonic-gate #include <sys/thread.h>
490Sstevel@tonic-gate #include <sys/proc.h>
500Sstevel@tonic-gate #include <sys/cpu.h>
510Sstevel@tonic-gate #include <sys/kmem.h>
520Sstevel@tonic-gate #include <sys/disp.h>
530Sstevel@tonic-gate #include <sys/shm.h>
540Sstevel@tonic-gate #include <sys/sysmacros.h>
550Sstevel@tonic-gate #include <sys/machparam.h>
560Sstevel@tonic-gate #include <sys/vmem.h>
570Sstevel@tonic-gate #include <sys/vmsystm.h>
580Sstevel@tonic-gate #include <sys/promif.h>
590Sstevel@tonic-gate #include <sys/var.h>
600Sstevel@tonic-gate #include <sys/x86_archext.h>
610Sstevel@tonic-gate #include <sys/atomic.h>
620Sstevel@tonic-gate #include <sys/bitmap.h>
633446Smrj #include <sys/controlregs.h>
643446Smrj #include <sys/bootconf.h>
653446Smrj #include <sys/bootsvcs.h>
663446Smrj #include <sys/bootinfo.h>
674191Sjosephb #include <sys/archsystm.h>
680Sstevel@tonic-gate
690Sstevel@tonic-gate #include <vm/seg_kmem.h>
700Sstevel@tonic-gate #include <vm/hat_i86.h>
710Sstevel@tonic-gate #include <vm/as.h>
720Sstevel@tonic-gate #include <vm/seg.h>
730Sstevel@tonic-gate #include <vm/page.h>
740Sstevel@tonic-gate #include <vm/seg_kp.h>
750Sstevel@tonic-gate #include <vm/seg_kpm.h>
760Sstevel@tonic-gate #include <vm/vm_dep.h>
775084Sjohnlev #ifdef __xpv
785084Sjohnlev #include <sys/hypervisor.h>
795084Sjohnlev #endif
803446Smrj #include <vm/kboot_mmu.h>
814381Sjosephb #include <vm/seg_spt.h>
820Sstevel@tonic-gate
830Sstevel@tonic-gate #include <sys/cmn_err.h>
840Sstevel@tonic-gate
850Sstevel@tonic-gate /*
860Sstevel@tonic-gate * Basic parameters for hat operation.
870Sstevel@tonic-gate */
880Sstevel@tonic-gate struct hat_mmu_info mmu;
890Sstevel@tonic-gate
900Sstevel@tonic-gate /*
910Sstevel@tonic-gate * The page that is the kernel's top level pagetable.
920Sstevel@tonic-gate *
935084Sjohnlev * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries
940Sstevel@tonic-gate * on this 4K page for its top level page table. The remaining groups of
950Sstevel@tonic-gate * 4 entries are used for per processor copies of user VLP pagetables for
960Sstevel@tonic-gate * running threads. See hat_switch() and reload_pae32() for details.
970Sstevel@tonic-gate *
985084Sjohnlev * vlp_page[0..3] - level==2 PTEs for kernel HAT
995084Sjohnlev * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0
1005084Sjohnlev * vlp_page[8..11] - level==2 PTE for user thread on cpu 1
1015084Sjohnlev * etc...
1020Sstevel@tonic-gate */
1030Sstevel@tonic-gate static x86pte_t *vlp_page;
1040Sstevel@tonic-gate
1050Sstevel@tonic-gate /*
1060Sstevel@tonic-gate * forward declaration of internal utility routines
1070Sstevel@tonic-gate */
1080Sstevel@tonic-gate static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
1090Sstevel@tonic-gate x86pte_t new);
1100Sstevel@tonic-gate
1110Sstevel@tonic-gate /*
1120Sstevel@tonic-gate * The kernel address space exists in all HATs. To implement this the
1135084Sjohnlev * kernel reserves a fixed number of entries in the topmost level(s) of page
1145084Sjohnlev * tables. The values are setup during startup and then copied to every user
1155084Sjohnlev * hat created by hat_alloc(). This means that kernelbase must be:
1160Sstevel@tonic-gate *
1170Sstevel@tonic-gate * 4Meg aligned for 32 bit kernels
1180Sstevel@tonic-gate * 512Gig aligned for x86_64 64 bit kernel
1190Sstevel@tonic-gate *
1205084Sjohnlev * The hat_kernel_range_ts describe what needs to be copied from kernel hat
1215084Sjohnlev * to each user hat.
1220Sstevel@tonic-gate */
1235084Sjohnlev typedef struct hat_kernel_range {
1245084Sjohnlev level_t hkr_level;
1255084Sjohnlev uintptr_t hkr_start_va;
1265084Sjohnlev uintptr_t hkr_end_va; /* zero means to end of memory */
1275084Sjohnlev } hat_kernel_range_t;
1285084Sjohnlev #define NUM_KERNEL_RANGE 2
1295084Sjohnlev static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE];
1305084Sjohnlev static int num_kernel_ranges;
1310Sstevel@tonic-gate
1320Sstevel@tonic-gate uint_t use_boot_reserve = 1; /* cleared after early boot process */
1330Sstevel@tonic-gate uint_t can_steal_post_boot = 0; /* set late in boot to enable stealing */
1340Sstevel@tonic-gate
1356691Skchow /*
1366691Skchow * enable_1gpg: controls 1g page support for user applications.
1376691Skchow * By default, 1g pages are exported to user applications. enable_1gpg can
1386691Skchow * be set to 0 to not export.
1396691Skchow */
1405466Skchow int enable_1gpg = 1;
1415349Skchow
1426691Skchow /*
1436691Skchow * AMD shanghai processors provide better management of 1gb ptes in its tlb.
1449903SPavel.Tatashin@Sun.COM * By default, 1g page support will be disabled for pre-shanghai AMD
1456691Skchow * processors that don't have optimal tlb support for the 1g page size.
1466691Skchow * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal
1476691Skchow * processors.
1486691Skchow */
1496691Skchow int chk_optimal_1gtlb = 1;
1506691Skchow
1516691Skchow
1525349Skchow #ifdef DEBUG
1535349Skchow uint_t map1gcnt;
1545349Skchow #endif
1555349Skchow
1565349Skchow
1570Sstevel@tonic-gate /*
1580Sstevel@tonic-gate * A cpuset for all cpus. This is used for kernel address cross calls, since
1590Sstevel@tonic-gate * the kernel addresses apply to all cpus.
1600Sstevel@tonic-gate */
1610Sstevel@tonic-gate cpuset_t khat_cpuset;
1620Sstevel@tonic-gate
1630Sstevel@tonic-gate /*
1640Sstevel@tonic-gate * management stuff for hat structures
1650Sstevel@tonic-gate */
1660Sstevel@tonic-gate kmutex_t hat_list_lock;
1670Sstevel@tonic-gate kcondvar_t hat_list_cv;
1680Sstevel@tonic-gate kmem_cache_t *hat_cache;
1690Sstevel@tonic-gate kmem_cache_t *hat_hash_cache;
1700Sstevel@tonic-gate kmem_cache_t *vlp_hash_cache;
1710Sstevel@tonic-gate
1720Sstevel@tonic-gate /*
1730Sstevel@tonic-gate * Simple statistics
1740Sstevel@tonic-gate */
1750Sstevel@tonic-gate struct hatstats hatstat;
1760Sstevel@tonic-gate
1770Sstevel@tonic-gate /*
1785316Sjohnlev * Some earlier hypervisor versions do not emulate cmpxchg of PTEs
1795316Sjohnlev * correctly. For such hypervisors we must set PT_USER for kernel
1805316Sjohnlev * entries ourselves (normally the emulation would set PT_USER for
1815316Sjohnlev * kernel entries and PT_USER|PT_GLOBAL for user entries). pt_kern is
1825316Sjohnlev * thus set appropriately. Note that dboot/kbm is OK, as only the full
1835316Sjohnlev * HAT uses cmpxchg() and the other paths (hypercall etc.) were never
1845316Sjohnlev * incorrect.
1855316Sjohnlev */
1865316Sjohnlev int pt_kern;
1875316Sjohnlev
1885316Sjohnlev /*
1890Sstevel@tonic-gate * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's.
1900Sstevel@tonic-gate */
1910Sstevel@tonic-gate extern void atomic_orb(uchar_t *addr, uchar_t val);
1920Sstevel@tonic-gate extern void atomic_andb(uchar_t *addr, uchar_t val);
1930Sstevel@tonic-gate
19412004Sjiang.liu@intel.com #ifndef __xpv
19512004Sjiang.liu@intel.com extern pfn_t memseg_get_start(struct memseg *);
19612004Sjiang.liu@intel.com #endif
19712004Sjiang.liu@intel.com
1980Sstevel@tonic-gate #define PP_GETRM(pp, rmmask) (pp->p_nrm & rmmask)
1990Sstevel@tonic-gate #define PP_ISMOD(pp) PP_GETRM(pp, P_MOD)
2000Sstevel@tonic-gate #define PP_ISREF(pp) PP_GETRM(pp, P_REF)
2010Sstevel@tonic-gate #define PP_ISRO(pp) PP_GETRM(pp, P_RO)
2020Sstevel@tonic-gate
2030Sstevel@tonic-gate #define PP_SETRM(pp, rm) atomic_orb(&(pp->p_nrm), rm)
2040Sstevel@tonic-gate #define PP_SETMOD(pp) PP_SETRM(pp, P_MOD)
2050Sstevel@tonic-gate #define PP_SETREF(pp) PP_SETRM(pp, P_REF)
2060Sstevel@tonic-gate #define PP_SETRO(pp) PP_SETRM(pp, P_RO)
2070Sstevel@tonic-gate
2080Sstevel@tonic-gate #define PP_CLRRM(pp, rm) atomic_andb(&(pp->p_nrm), ~(rm))
2090Sstevel@tonic-gate #define PP_CLRMOD(pp) PP_CLRRM(pp, P_MOD)
2100Sstevel@tonic-gate #define PP_CLRREF(pp) PP_CLRRM(pp, P_REF)
2110Sstevel@tonic-gate #define PP_CLRRO(pp) PP_CLRRM(pp, P_RO)
2120Sstevel@tonic-gate #define PP_CLRALL(pp) PP_CLRRM(pp, P_MOD | P_REF | P_RO)
2130Sstevel@tonic-gate
2140Sstevel@tonic-gate /*
2150Sstevel@tonic-gate * kmem cache constructor for struct hat
2160Sstevel@tonic-gate */
2170Sstevel@tonic-gate /*ARGSUSED*/
2180Sstevel@tonic-gate static int
hati_constructor(void * buf,void * handle,int kmflags)2190Sstevel@tonic-gate hati_constructor(void *buf, void *handle, int kmflags)
2200Sstevel@tonic-gate {
2210Sstevel@tonic-gate hat_t *hat = buf;
2220Sstevel@tonic-gate
2230Sstevel@tonic-gate mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
2240Sstevel@tonic-gate bzero(hat->hat_pages_mapped,
2250Sstevel@tonic-gate sizeof (pgcnt_t) * (mmu.max_page_level + 1));
2264381Sjosephb hat->hat_ism_pgcnt = 0;
2270Sstevel@tonic-gate hat->hat_stats = 0;
2280Sstevel@tonic-gate hat->hat_flags = 0;
2290Sstevel@tonic-gate CPUSET_ZERO(hat->hat_cpus);
2300Sstevel@tonic-gate hat->hat_htable = NULL;
2310Sstevel@tonic-gate hat->hat_ht_hash = NULL;
2320Sstevel@tonic-gate return (0);
2330Sstevel@tonic-gate }
2340Sstevel@tonic-gate
2350Sstevel@tonic-gate /*
2360Sstevel@tonic-gate * Allocate a hat structure for as. We also create the top level
2370Sstevel@tonic-gate * htable and initialize it to contain the kernel hat entries.
2380Sstevel@tonic-gate */
2390Sstevel@tonic-gate hat_t *
hat_alloc(struct as * as)2400Sstevel@tonic-gate hat_alloc(struct as *as)
2410Sstevel@tonic-gate {
2425084Sjohnlev hat_t *hat;
2435084Sjohnlev htable_t *ht; /* top level htable */
2445084Sjohnlev uint_t use_vlp;
2455084Sjohnlev uint_t r;
2465084Sjohnlev hat_kernel_range_t *rp;
2475084Sjohnlev uintptr_t va;
2485084Sjohnlev uintptr_t eva;
2495084Sjohnlev uint_t start;
2505084Sjohnlev uint_t cnt;
2515084Sjohnlev htable_t *src;
2520Sstevel@tonic-gate
2530Sstevel@tonic-gate /*
2540Sstevel@tonic-gate * Once we start creating user process HATs we can enable
2550Sstevel@tonic-gate * the htable_steal() code.
2560Sstevel@tonic-gate */
2570Sstevel@tonic-gate if (can_steal_post_boot == 0)
2580Sstevel@tonic-gate can_steal_post_boot = 1;
2590Sstevel@tonic-gate
2600Sstevel@tonic-gate ASSERT(AS_WRITE_HELD(as, &as->a_lock));
2610Sstevel@tonic-gate hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
2620Sstevel@tonic-gate hat->hat_as = as;
2630Sstevel@tonic-gate mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
2640Sstevel@tonic-gate ASSERT(hat->hat_flags == 0);
2650Sstevel@tonic-gate
2665084Sjohnlev #if defined(__xpv)
2670Sstevel@tonic-gate /*
2685084Sjohnlev * No VLP stuff on the hypervisor due to the 64-bit split top level
2695084Sjohnlev * page tables. On 32-bit it's not needed as the hypervisor takes
2705084Sjohnlev * care of copying the top level PTEs to a below 4Gig page.
2710Sstevel@tonic-gate */
2725084Sjohnlev use_vlp = 0;
2735084Sjohnlev #else /* __xpv */
2745084Sjohnlev /* 32 bit processes uses a VLP style hat when running with PAE */
2750Sstevel@tonic-gate #if defined(__amd64)
2760Sstevel@tonic-gate use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32);
2770Sstevel@tonic-gate #elif defined(__i386)
2780Sstevel@tonic-gate use_vlp = mmu.pae_hat;
2790Sstevel@tonic-gate #endif
2805084Sjohnlev #endif /* __xpv */
2810Sstevel@tonic-gate if (use_vlp) {
2820Sstevel@tonic-gate hat->hat_flags = HAT_VLP;
2830Sstevel@tonic-gate bzero(hat->hat_vlp_ptes, VLP_SIZE);
2840Sstevel@tonic-gate }
2850Sstevel@tonic-gate
2860Sstevel@tonic-gate /*
2870Sstevel@tonic-gate * Allocate the htable hash
2880Sstevel@tonic-gate */
2890Sstevel@tonic-gate if ((hat->hat_flags & HAT_VLP)) {
2900Sstevel@tonic-gate hat->hat_num_hash = mmu.vlp_hash_cnt;
2910Sstevel@tonic-gate hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
2920Sstevel@tonic-gate } else {
2930Sstevel@tonic-gate hat->hat_num_hash = mmu.hash_cnt;
2940Sstevel@tonic-gate hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
2950Sstevel@tonic-gate }
2960Sstevel@tonic-gate bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
2970Sstevel@tonic-gate
2980Sstevel@tonic-gate /*
2990Sstevel@tonic-gate * Initialize Kernel HAT entries at the top of the top level page
3005084Sjohnlev * tables for the new hat.
3010Sstevel@tonic-gate */
3020Sstevel@tonic-gate hat->hat_htable = NULL;
3030Sstevel@tonic-gate hat->hat_ht_cached = NULL;
3045084Sjohnlev XPV_DISALLOW_MIGRATE();
3050Sstevel@tonic-gate ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
3065084Sjohnlev hat->hat_htable = ht;
3075084Sjohnlev
3085084Sjohnlev #if defined(__amd64)
3095084Sjohnlev if (hat->hat_flags & HAT_VLP)
3105084Sjohnlev goto init_done;
3110Sstevel@tonic-gate #endif
3125084Sjohnlev
3135084Sjohnlev for (r = 0; r < num_kernel_ranges; ++r) {
3145084Sjohnlev rp = &kernel_ranges[r];
3155084Sjohnlev for (va = rp->hkr_start_va; va != rp->hkr_end_va;
3165084Sjohnlev va += cnt * LEVEL_SIZE(rp->hkr_level)) {
3175084Sjohnlev
3185084Sjohnlev if (rp->hkr_level == TOP_LEVEL(hat))
3195084Sjohnlev ht = hat->hat_htable;
3205084Sjohnlev else
3215084Sjohnlev ht = htable_create(hat, va, rp->hkr_level,
3225084Sjohnlev NULL);
3235084Sjohnlev
3245084Sjohnlev start = htable_va2entry(va, ht);
3255084Sjohnlev cnt = HTABLE_NUM_PTES(ht) - start;
3265084Sjohnlev eva = va +
3275084Sjohnlev ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level));
3285084Sjohnlev if (rp->hkr_end_va != 0 &&
3295084Sjohnlev (eva > rp->hkr_end_va || eva == 0))
3305084Sjohnlev cnt = htable_va2entry(rp->hkr_end_va, ht) -
3315084Sjohnlev start;
3325084Sjohnlev
3335084Sjohnlev #if defined(__i386) && !defined(__xpv)
3345084Sjohnlev if (ht->ht_flags & HTABLE_VLP) {
3355084Sjohnlev bcopy(&vlp_page[start],
3365084Sjohnlev &hat->hat_vlp_ptes[start],
3375084Sjohnlev cnt * sizeof (x86pte_t));
3385084Sjohnlev continue;
3395084Sjohnlev }
3405084Sjohnlev #endif
3415084Sjohnlev src = htable_lookup(kas.a_hat, va, rp->hkr_level);
3425084Sjohnlev ASSERT(src != NULL);
3435084Sjohnlev x86pte_copy(src, ht, start, cnt);
3445084Sjohnlev htable_release(src);
3455084Sjohnlev }
3465084Sjohnlev }
3475084Sjohnlev
3485084Sjohnlev init_done:
3495084Sjohnlev
3505084Sjohnlev #if defined(__xpv)
3510Sstevel@tonic-gate /*
3525084Sjohnlev * Pin top level page tables after initializing them
3530Sstevel@tonic-gate */
3545084Sjohnlev xen_pin(hat->hat_htable->ht_pfn, mmu.max_level);
3555084Sjohnlev #if defined(__amd64)
3565084Sjohnlev xen_pin(hat->hat_user_ptable, mmu.max_level);
3575084Sjohnlev #endif
3580Sstevel@tonic-gate #endif
3595741Smrj XPV_ALLOW_MIGRATE();
3600Sstevel@tonic-gate
3610Sstevel@tonic-gate /*
3621747Sjosephb * Put it at the start of the global list of all hats (used by stealing)
3631747Sjosephb *
3641747Sjosephb * kas.a_hat is not in the list but is instead used to find the
3651747Sjosephb * first and last items in the list.
3661747Sjosephb *
3671747Sjosephb * - kas.a_hat->hat_next points to the start of the user hats.
3681747Sjosephb * The list ends where hat->hat_next == NULL
3691747Sjosephb *
3701747Sjosephb * - kas.a_hat->hat_prev points to the last of the user hats.
3711747Sjosephb * The list begins where hat->hat_prev == NULL
3720Sstevel@tonic-gate */
3730Sstevel@tonic-gate mutex_enter(&hat_list_lock);
3741747Sjosephb hat->hat_prev = NULL;
3751747Sjosephb hat->hat_next = kas.a_hat->hat_next;
3761747Sjosephb if (hat->hat_next)
3771747Sjosephb hat->hat_next->hat_prev = hat;
3781747Sjosephb else
3791747Sjosephb kas.a_hat->hat_prev = hat;
3800Sstevel@tonic-gate kas.a_hat->hat_next = hat;
3810Sstevel@tonic-gate mutex_exit(&hat_list_lock);
3820Sstevel@tonic-gate
3830Sstevel@tonic-gate return (hat);
3840Sstevel@tonic-gate }
3850Sstevel@tonic-gate
3860Sstevel@tonic-gate /*
3870Sstevel@tonic-gate * process has finished executing but as has not been cleaned up yet.
3880Sstevel@tonic-gate */
3890Sstevel@tonic-gate /*ARGSUSED*/
3900Sstevel@tonic-gate void
hat_free_start(hat_t * hat)3910Sstevel@tonic-gate hat_free_start(hat_t *hat)
3920Sstevel@tonic-gate {
3930Sstevel@tonic-gate ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock));
3941747Sjosephb
3951747Sjosephb /*
3961747Sjosephb * If the hat is currently a stealing victim, wait for the stealing
3971747Sjosephb * to finish. Once we mark it as HAT_FREEING, htable_steal()
3981747Sjosephb * won't look at its pagetables anymore.
3991747Sjosephb */
4000Sstevel@tonic-gate mutex_enter(&hat_list_lock);
4011747Sjosephb while (hat->hat_flags & HAT_VICTIM)
4021747Sjosephb cv_wait(&hat_list_cv, &hat_list_lock);
4030Sstevel@tonic-gate hat->hat_flags |= HAT_FREEING;
4040Sstevel@tonic-gate mutex_exit(&hat_list_lock);
4050Sstevel@tonic-gate }
4060Sstevel@tonic-gate
4070Sstevel@tonic-gate /*
4080Sstevel@tonic-gate * An address space is being destroyed, so we destroy the associated hat.
4090Sstevel@tonic-gate */
4100Sstevel@tonic-gate void
hat_free_end(hat_t * hat)4110Sstevel@tonic-gate hat_free_end(hat_t *hat)
4120Sstevel@tonic-gate {
4130Sstevel@tonic-gate kmem_cache_t *cache;
4140Sstevel@tonic-gate
4150Sstevel@tonic-gate ASSERT(hat->hat_flags & HAT_FREEING);
4160Sstevel@tonic-gate
4170Sstevel@tonic-gate /*
4180Sstevel@tonic-gate * must not be running on the given hat
4190Sstevel@tonic-gate */
4200Sstevel@tonic-gate ASSERT(CPU->cpu_current_hat != hat);
4210Sstevel@tonic-gate
4220Sstevel@tonic-gate /*
4231747Sjosephb * Remove it from the list of HATs
4240Sstevel@tonic-gate */
4250Sstevel@tonic-gate mutex_enter(&hat_list_lock);
4261747Sjosephb if (hat->hat_prev)
4271747Sjosephb hat->hat_prev->hat_next = hat->hat_next;
4281747Sjosephb else
4290Sstevel@tonic-gate kas.a_hat->hat_next = hat->hat_next;
4301747Sjosephb if (hat->hat_next)
4311747Sjosephb hat->hat_next->hat_prev = hat->hat_prev;
4321747Sjosephb else
4331747Sjosephb kas.a_hat->hat_prev = hat->hat_prev;
4340Sstevel@tonic-gate mutex_exit(&hat_list_lock);
4351747Sjosephb hat->hat_next = hat->hat_prev = NULL;
4360Sstevel@tonic-gate
4375084Sjohnlev #if defined(__xpv)
4385084Sjohnlev /*
4395084Sjohnlev * On the hypervisor, unpin top level page table(s)
4405084Sjohnlev */
4415084Sjohnlev xen_unpin(hat->hat_htable->ht_pfn);
4425084Sjohnlev #if defined(__amd64)
4435084Sjohnlev xen_unpin(hat->hat_user_ptable);
4445084Sjohnlev #endif
4455084Sjohnlev #endif
4465084Sjohnlev
4470Sstevel@tonic-gate /*
4480Sstevel@tonic-gate * Make a pass through the htables freeing them all up.
4490Sstevel@tonic-gate */
4500Sstevel@tonic-gate htable_purge_hat(hat);
4510Sstevel@tonic-gate
4520Sstevel@tonic-gate /*
4530Sstevel@tonic-gate * Decide which kmem cache the hash table came from, then free it.
4540Sstevel@tonic-gate */
4550Sstevel@tonic-gate if (hat->hat_flags & HAT_VLP)
4560Sstevel@tonic-gate cache = vlp_hash_cache;
4570Sstevel@tonic-gate else
4580Sstevel@tonic-gate cache = hat_hash_cache;
4590Sstevel@tonic-gate kmem_cache_free(cache, hat->hat_ht_hash);
4600Sstevel@tonic-gate hat->hat_ht_hash = NULL;
4610Sstevel@tonic-gate
4620Sstevel@tonic-gate hat->hat_flags = 0;
4630Sstevel@tonic-gate kmem_cache_free(hat_cache, hat);
4640Sstevel@tonic-gate }
4650Sstevel@tonic-gate
4660Sstevel@tonic-gate /*
4670Sstevel@tonic-gate * round kernelbase down to a supported value to use for _userlimit
4680Sstevel@tonic-gate *
4690Sstevel@tonic-gate * userlimit must be aligned down to an entry in the top level htable.
4700Sstevel@tonic-gate * The one exception is for 32 bit HAT's running PAE.
4710Sstevel@tonic-gate */
4720Sstevel@tonic-gate uintptr_t
hat_kernelbase(uintptr_t va)4730Sstevel@tonic-gate hat_kernelbase(uintptr_t va)
4740Sstevel@tonic-gate {
4750Sstevel@tonic-gate #if defined(__i386)
4760Sstevel@tonic-gate va &= LEVEL_MASK(1);
4770Sstevel@tonic-gate #endif
4780Sstevel@tonic-gate if (IN_VA_HOLE(va))
4790Sstevel@tonic-gate panic("_userlimit %p will fall in VA hole\n", (void *)va);
4800Sstevel@tonic-gate return (va);
4810Sstevel@tonic-gate }
4820Sstevel@tonic-gate
4830Sstevel@tonic-gate /*
4846691Skchow *
4856691Skchow */
4866691Skchow static void
set_max_page_level()4876691Skchow set_max_page_level()
4886691Skchow {
4896691Skchow level_t lvl;
4906691Skchow
4916691Skchow if (!kbm_largepage_support) {
4926691Skchow lvl = 0;
4936720Skchow } else {
494*12826Skuriakose.kuruvilla@oracle.com if (is_x86_feature(x86_featureset, X86FSET_1GPG)) {
4956720Skchow lvl = 2;
4966720Skchow if (chk_optimal_1gtlb &&
4976720Skchow cpuid_opteron_erratum(CPU, 6671130)) {
4986720Skchow lvl = 1;
4996720Skchow }
5006720Skchow if (plat_mnode_xcheck(LEVEL_SIZE(2) >>
5016720Skchow LEVEL_SHIFT(0))) {
5026720Skchow lvl = 1;
5036720Skchow }
5046720Skchow } else {
5056691Skchow lvl = 1;
5066691Skchow }
5076691Skchow }
5086691Skchow mmu.max_page_level = lvl;
5096691Skchow
5106691Skchow if ((lvl == 2) && (enable_1gpg == 0))
5116691Skchow mmu.umax_page_level = 1;
5126691Skchow else
5136691Skchow mmu.umax_page_level = lvl;
5146691Skchow }
5156691Skchow
5166691Skchow /*
5170Sstevel@tonic-gate * Initialize hat data structures based on processor MMU information.
5180Sstevel@tonic-gate */
5190Sstevel@tonic-gate void
mmu_init(void)5200Sstevel@tonic-gate mmu_init(void)
5210Sstevel@tonic-gate {
5220Sstevel@tonic-gate uint_t max_htables;
5230Sstevel@tonic-gate uint_t pa_bits;
5240Sstevel@tonic-gate uint_t va_bits;
5250Sstevel@tonic-gate int i;
5260Sstevel@tonic-gate
5270Sstevel@tonic-gate /*
5283446Smrj * If CPU enabled the page table global bit, use it for the kernel
5293446Smrj * This is bit 7 in CR4 (PGE - Page Global Enable).
5300Sstevel@tonic-gate */
531*12826Skuriakose.kuruvilla@oracle.com if (is_x86_feature(x86_featureset, X86FSET_PGE) &&
532*12826Skuriakose.kuruvilla@oracle.com (getcr4() & CR4_PGE) != 0)
5330Sstevel@tonic-gate mmu.pt_global = PT_GLOBAL;
5340Sstevel@tonic-gate
5350Sstevel@tonic-gate /*
5363446Smrj * Detect NX and PAE usage.
5370Sstevel@tonic-gate */
5383446Smrj mmu.pae_hat = kbm_pae_support;
5393446Smrj if (kbm_nx_support)
5400Sstevel@tonic-gate mmu.pt_nx = PT_NX;
5413446Smrj else
5420Sstevel@tonic-gate mmu.pt_nx = 0;
5430Sstevel@tonic-gate
5440Sstevel@tonic-gate /*
5450Sstevel@tonic-gate * Use CPU info to set various MMU parameters
5460Sstevel@tonic-gate */
5470Sstevel@tonic-gate cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
5480Sstevel@tonic-gate
5490Sstevel@tonic-gate if (va_bits < sizeof (void *) * NBBY) {
5500Sstevel@tonic-gate mmu.hole_start = (1ul << (va_bits - 1));
5510Sstevel@tonic-gate mmu.hole_end = 0ul - mmu.hole_start - 1;
5520Sstevel@tonic-gate } else {
5530Sstevel@tonic-gate mmu.hole_end = 0;
5540Sstevel@tonic-gate mmu.hole_start = mmu.hole_end - 1;
5550Sstevel@tonic-gate }
5560Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121)
5570Sstevel@tonic-gate /*
5580Sstevel@tonic-gate * If erratum 121 has already been detected at this time, hole_start
5590Sstevel@tonic-gate * contains the value to be subtracted from mmu.hole_start.
5600Sstevel@tonic-gate */
5610Sstevel@tonic-gate ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
5620Sstevel@tonic-gate hole_start = mmu.hole_start - hole_start;
5630Sstevel@tonic-gate #else
5640Sstevel@tonic-gate hole_start = mmu.hole_start;
5650Sstevel@tonic-gate #endif
5660Sstevel@tonic-gate hole_end = mmu.hole_end;
5670Sstevel@tonic-gate
5680Sstevel@tonic-gate mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
5690Sstevel@tonic-gate if (mmu.pae_hat == 0 && pa_bits > 32)
5700Sstevel@tonic-gate mmu.highest_pfn = PFN_4G - 1;
5710Sstevel@tonic-gate
5720Sstevel@tonic-gate if (mmu.pae_hat) {
5730Sstevel@tonic-gate mmu.pte_size = 8; /* 8 byte PTEs */
5740Sstevel@tonic-gate mmu.pte_size_shift = 3;
5750Sstevel@tonic-gate } else {
5760Sstevel@tonic-gate mmu.pte_size = 4; /* 4 byte PTEs */
5770Sstevel@tonic-gate mmu.pte_size_shift = 2;
5780Sstevel@tonic-gate }
5790Sstevel@tonic-gate
580*12826Skuriakose.kuruvilla@oracle.com if (mmu.pae_hat && !is_x86_feature(x86_featureset, X86FSET_PAE))
5810Sstevel@tonic-gate panic("Processor does not support PAE");
5820Sstevel@tonic-gate
583*12826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_CX8))
5840Sstevel@tonic-gate panic("Processor does not support cmpxchg8b instruction");
5850Sstevel@tonic-gate
5860Sstevel@tonic-gate #if defined(__amd64)
5870Sstevel@tonic-gate
5880Sstevel@tonic-gate mmu.num_level = 4;
5890Sstevel@tonic-gate mmu.max_level = 3;
5900Sstevel@tonic-gate mmu.ptes_per_table = 512;
5910Sstevel@tonic-gate mmu.top_level_count = 512;
5920Sstevel@tonic-gate
5930Sstevel@tonic-gate mmu.level_shift[0] = 12;
5940Sstevel@tonic-gate mmu.level_shift[1] = 21;
5950Sstevel@tonic-gate mmu.level_shift[2] = 30;
5960Sstevel@tonic-gate mmu.level_shift[3] = 39;
5970Sstevel@tonic-gate
5980Sstevel@tonic-gate #elif defined(__i386)
5990Sstevel@tonic-gate
6000Sstevel@tonic-gate if (mmu.pae_hat) {
6010Sstevel@tonic-gate mmu.num_level = 3;
6020Sstevel@tonic-gate mmu.max_level = 2;
6030Sstevel@tonic-gate mmu.ptes_per_table = 512;
6040Sstevel@tonic-gate mmu.top_level_count = 4;
6050Sstevel@tonic-gate
6060Sstevel@tonic-gate mmu.level_shift[0] = 12;
6070Sstevel@tonic-gate mmu.level_shift[1] = 21;
6080Sstevel@tonic-gate mmu.level_shift[2] = 30;
6090Sstevel@tonic-gate
6100Sstevel@tonic-gate } else {
6110Sstevel@tonic-gate mmu.num_level = 2;
6120Sstevel@tonic-gate mmu.max_level = 1;
6130Sstevel@tonic-gate mmu.ptes_per_table = 1024;
6140Sstevel@tonic-gate mmu.top_level_count = 1024;
6150Sstevel@tonic-gate
6160Sstevel@tonic-gate mmu.level_shift[0] = 12;
6170Sstevel@tonic-gate mmu.level_shift[1] = 22;
6180Sstevel@tonic-gate }
6190Sstevel@tonic-gate
6200Sstevel@tonic-gate #endif /* __i386 */
6210Sstevel@tonic-gate
6220Sstevel@tonic-gate for (i = 0; i < mmu.num_level; ++i) {
6230Sstevel@tonic-gate mmu.level_size[i] = 1UL << mmu.level_shift[i];
6240Sstevel@tonic-gate mmu.level_offset[i] = mmu.level_size[i] - 1;
6250Sstevel@tonic-gate mmu.level_mask[i] = ~mmu.level_offset[i];
6260Sstevel@tonic-gate }
6270Sstevel@tonic-gate
6286691Skchow set_max_page_level();
6296691Skchow
6306291Skchow mmu_page_sizes = mmu.max_page_level + 1;
6316291Skchow mmu_exported_page_sizes = mmu.umax_page_level + 1;
6326291Skchow
6336291Skchow /* restrict legacy applications from using pagesizes 1g and above */
6346291Skchow mmu_legacy_page_sizes =
6356291Skchow (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes;
6366291Skchow
6376291Skchow
6383446Smrj for (i = 0; i <= mmu.max_page_level; ++i) {
6395316Sjohnlev mmu.pte_bits[i] = PT_VALID | pt_kern;
6403446Smrj if (i > 0)
6413446Smrj mmu.pte_bits[i] |= PT_PAGESIZE;
6423446Smrj }
6430Sstevel@tonic-gate
6440Sstevel@tonic-gate /*
6450Sstevel@tonic-gate * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
6460Sstevel@tonic-gate */
6470Sstevel@tonic-gate for (i = 1; i < mmu.num_level; ++i)
6480Sstevel@tonic-gate mmu.ptp_bits[i] = PT_PTPBITS;
6493446Smrj
6500Sstevel@tonic-gate #if defined(__i386)
6510Sstevel@tonic-gate mmu.ptp_bits[2] = PT_VALID;
6520Sstevel@tonic-gate #endif
6530Sstevel@tonic-gate
6540Sstevel@tonic-gate /*
6550Sstevel@tonic-gate * Compute how many hash table entries to have per process for htables.
6560Sstevel@tonic-gate * We start with 1 page's worth of entries.
6570Sstevel@tonic-gate *
6580Sstevel@tonic-gate * If physical memory is small, reduce the amount need to cover it.
6590Sstevel@tonic-gate */
6600Sstevel@tonic-gate max_htables = physmax / mmu.ptes_per_table;
6610Sstevel@tonic-gate mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
6620Sstevel@tonic-gate while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
6630Sstevel@tonic-gate mmu.hash_cnt >>= 1;
6640Sstevel@tonic-gate mmu.vlp_hash_cnt = mmu.hash_cnt;
6650Sstevel@tonic-gate
6660Sstevel@tonic-gate #if defined(__amd64)
6670Sstevel@tonic-gate /*
6680Sstevel@tonic-gate * If running in 64 bits and physical memory is large,
6690Sstevel@tonic-gate * increase the size of the cache to cover all of memory for
6700Sstevel@tonic-gate * a 64 bit process.
6710Sstevel@tonic-gate */
6720Sstevel@tonic-gate #define HASH_MAX_LENGTH 4
6730Sstevel@tonic-gate while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
6740Sstevel@tonic-gate mmu.hash_cnt <<= 1;
6750Sstevel@tonic-gate #endif
6760Sstevel@tonic-gate }
6770Sstevel@tonic-gate
6780Sstevel@tonic-gate
6790Sstevel@tonic-gate /*
6800Sstevel@tonic-gate * initialize hat data structures
6810Sstevel@tonic-gate */
6820Sstevel@tonic-gate void
hat_init()6830Sstevel@tonic-gate hat_init()
6840Sstevel@tonic-gate {
6850Sstevel@tonic-gate #if defined(__i386)
6860Sstevel@tonic-gate /*
6870Sstevel@tonic-gate * _userlimit must be aligned correctly
6880Sstevel@tonic-gate */
6890Sstevel@tonic-gate if ((_userlimit & LEVEL_MASK(1)) != _userlimit) {
6900Sstevel@tonic-gate prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n",
6910Sstevel@tonic-gate (void *)_userlimit, (void *)LEVEL_SIZE(1));
6920Sstevel@tonic-gate halt("hat_init(): Unable to continue");
6930Sstevel@tonic-gate }
6940Sstevel@tonic-gate #endif
6950Sstevel@tonic-gate
6960Sstevel@tonic-gate cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
6970Sstevel@tonic-gate
6980Sstevel@tonic-gate /*
6990Sstevel@tonic-gate * initialize kmem caches
7000Sstevel@tonic-gate */
7010Sstevel@tonic-gate htable_init();
7020Sstevel@tonic-gate hment_init();
7030Sstevel@tonic-gate
7040Sstevel@tonic-gate hat_cache = kmem_cache_create("hat_t",
7050Sstevel@tonic-gate sizeof (hat_t), 0, hati_constructor, NULL, NULL,
7060Sstevel@tonic-gate NULL, 0, 0);
7070Sstevel@tonic-gate
7080Sstevel@tonic-gate hat_hash_cache = kmem_cache_create("HatHash",
7090Sstevel@tonic-gate mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
7100Sstevel@tonic-gate NULL, 0, 0);
7110Sstevel@tonic-gate
7120Sstevel@tonic-gate /*
7130Sstevel@tonic-gate * VLP hats can use a smaller hash table size on large memroy machines
7140Sstevel@tonic-gate */
7150Sstevel@tonic-gate if (mmu.hash_cnt == mmu.vlp_hash_cnt) {
7160Sstevel@tonic-gate vlp_hash_cache = hat_hash_cache;
7170Sstevel@tonic-gate } else {
7180Sstevel@tonic-gate vlp_hash_cache = kmem_cache_create("HatVlpHash",
7190Sstevel@tonic-gate mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
7200Sstevel@tonic-gate NULL, 0, 0);
7210Sstevel@tonic-gate }
7220Sstevel@tonic-gate
7230Sstevel@tonic-gate /*
7240Sstevel@tonic-gate * Set up the kernel's hat
7250Sstevel@tonic-gate */
7260Sstevel@tonic-gate AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
7270Sstevel@tonic-gate kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
7280Sstevel@tonic-gate mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
7290Sstevel@tonic-gate kas.a_hat->hat_as = &kas;
7300Sstevel@tonic-gate kas.a_hat->hat_flags = 0;
7310Sstevel@tonic-gate AS_LOCK_EXIT(&kas, &kas.a_lock);
7320Sstevel@tonic-gate
7330Sstevel@tonic-gate CPUSET_ZERO(khat_cpuset);
7340Sstevel@tonic-gate CPUSET_ADD(khat_cpuset, CPU->cpu_id);
7350Sstevel@tonic-gate
7360Sstevel@tonic-gate /*
7370Sstevel@tonic-gate * The kernel hat's next pointer serves as the head of the hat list .
7381747Sjosephb * The kernel hat's prev pointer tracks the last hat on the list for
7391747Sjosephb * htable_steal() to use.
7400Sstevel@tonic-gate */
7410Sstevel@tonic-gate kas.a_hat->hat_next = NULL;
7421747Sjosephb kas.a_hat->hat_prev = NULL;
7430Sstevel@tonic-gate
7440Sstevel@tonic-gate /*
7450Sstevel@tonic-gate * Allocate an htable hash bucket for the kernel
7460Sstevel@tonic-gate * XX64 - tune for 64 bit procs
7470Sstevel@tonic-gate */
7480Sstevel@tonic-gate kas.a_hat->hat_num_hash = mmu.hash_cnt;
7490Sstevel@tonic-gate kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
7500Sstevel@tonic-gate bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
7510Sstevel@tonic-gate
7520Sstevel@tonic-gate /*
7530Sstevel@tonic-gate * zero out the top level and cached htable pointers
7540Sstevel@tonic-gate */
7550Sstevel@tonic-gate kas.a_hat->hat_ht_cached = NULL;
7560Sstevel@tonic-gate kas.a_hat->hat_htable = NULL;
7573258Strevtom
7583258Strevtom /*
7593258Strevtom * Pre-allocate hrm_hashtab before enabling the collection of
7603258Strevtom * refmod statistics. Allocating on the fly would mean us
7613258Strevtom * running the risk of suffering recursive mutex enters or
7623258Strevtom * deadlocks.
7633258Strevtom */
7643258Strevtom hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
7653258Strevtom KM_SLEEP);
7660Sstevel@tonic-gate }
7670Sstevel@tonic-gate
7680Sstevel@tonic-gate /*
7690Sstevel@tonic-gate * Prepare CPU specific pagetables for VLP processes on 64 bit kernels.
7700Sstevel@tonic-gate *
7710Sstevel@tonic-gate * Each CPU has a set of 2 pagetables that are reused for any 32 bit
7720Sstevel@tonic-gate * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and
7730Sstevel@tonic-gate * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes.
7740Sstevel@tonic-gate */
7750Sstevel@tonic-gate /*ARGSUSED*/
7760Sstevel@tonic-gate static void
hat_vlp_setup(struct cpu * cpu)7770Sstevel@tonic-gate hat_vlp_setup(struct cpu *cpu)
7780Sstevel@tonic-gate {
7795084Sjohnlev #if defined(__amd64) && !defined(__xpv)
7800Sstevel@tonic-gate struct hat_cpu_info *hci = cpu->cpu_hat_info;
7810Sstevel@tonic-gate pfn_t pfn;
7820Sstevel@tonic-gate
7830Sstevel@tonic-gate /*
7840Sstevel@tonic-gate * allocate the level==2 page table for the bottom most
7850Sstevel@tonic-gate * 512Gig of address space (this is where 32 bit apps live)
7860Sstevel@tonic-gate */
7870Sstevel@tonic-gate ASSERT(hci != NULL);
7880Sstevel@tonic-gate hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
7890Sstevel@tonic-gate
7900Sstevel@tonic-gate /*
7910Sstevel@tonic-gate * Allocate a top level pagetable and copy the kernel's
7920Sstevel@tonic-gate * entries into it. Then link in hci_vlp_l2ptes in the 1st entry.
7930Sstevel@tonic-gate */
7940Sstevel@tonic-gate hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
7950Sstevel@tonic-gate hci->hci_vlp_pfn =
7960Sstevel@tonic-gate hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes);
7970Sstevel@tonic-gate ASSERT(hci->hci_vlp_pfn != PFN_INVALID);
7985084Sjohnlev bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE);
7990Sstevel@tonic-gate
8000Sstevel@tonic-gate pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes);
8010Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID);
8020Sstevel@tonic-gate hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2);
8035084Sjohnlev #endif /* __amd64 && !__xpv */
8040Sstevel@tonic-gate }
8050Sstevel@tonic-gate
8063446Smrj /*ARGSUSED*/
8073446Smrj static void
hat_vlp_teardown(cpu_t * cpu)8083446Smrj hat_vlp_teardown(cpu_t *cpu)
8093446Smrj {
8105084Sjohnlev #if defined(__amd64) && !defined(__xpv)
8113446Smrj struct hat_cpu_info *hci;
8123446Smrj
8133446Smrj if ((hci = cpu->cpu_hat_info) == NULL)
8143446Smrj return;
8153446Smrj if (hci->hci_vlp_l2ptes)
8163446Smrj kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE);
8173446Smrj if (hci->hci_vlp_l3ptes)
8183446Smrj kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE);
8195084Sjohnlev #endif
8205084Sjohnlev }
8215084Sjohnlev
8225084Sjohnlev #define NEXT_HKR(r, l, s, e) { \
8235084Sjohnlev kernel_ranges[r].hkr_level = l; \
8245084Sjohnlev kernel_ranges[r].hkr_start_va = s; \
8255084Sjohnlev kernel_ranges[r].hkr_end_va = e; \
8265084Sjohnlev ++r; \
8273446Smrj }
8283446Smrj
8290Sstevel@tonic-gate /*
8300Sstevel@tonic-gate * Finish filling in the kernel hat.
8310Sstevel@tonic-gate * Pre fill in all top level kernel page table entries for the kernel's
8320Sstevel@tonic-gate * part of the address range. From this point on we can't use any new
8330Sstevel@tonic-gate * kernel large pages if they need PTE's at max_level
8343446Smrj *
8353446Smrj * create the kmap mappings.
8360Sstevel@tonic-gate */
8370Sstevel@tonic-gate void
hat_init_finish(void)8380Sstevel@tonic-gate hat_init_finish(void)
8390Sstevel@tonic-gate {
8403446Smrj size_t size;
8415084Sjohnlev uint_t r = 0;
8425084Sjohnlev uintptr_t va;
8435084Sjohnlev hat_kernel_range_t *rp;
8445084Sjohnlev
8450Sstevel@tonic-gate
8460Sstevel@tonic-gate /*
8470Sstevel@tonic-gate * We are now effectively running on the kernel hat.
8480Sstevel@tonic-gate * Clearing use_boot_reserve shuts off using the pre-allocated boot
8490Sstevel@tonic-gate * reserve for all HAT allocations. From here on, the reserves are
8505084Sjohnlev * only used when avoiding recursion in kmem_alloc().
8510Sstevel@tonic-gate */
8520Sstevel@tonic-gate use_boot_reserve = 0;
8530Sstevel@tonic-gate htable_adjust_reserve();
8540Sstevel@tonic-gate
8550Sstevel@tonic-gate /*
8565084Sjohnlev * User HATs are initialized with copies of all kernel mappings in
8575084Sjohnlev * higher level page tables. Ensure that those entries exist.
8585084Sjohnlev */
8595084Sjohnlev #if defined(__amd64)
8605084Sjohnlev
8615084Sjohnlev NEXT_HKR(r, 3, kernelbase, 0);
8625084Sjohnlev #if defined(__xpv)
8635084Sjohnlev NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END);
8645084Sjohnlev #endif
8655084Sjohnlev
8665084Sjohnlev #elif defined(__i386)
8675084Sjohnlev
8685084Sjohnlev #if !defined(__xpv)
8695084Sjohnlev if (mmu.pae_hat) {
8705084Sjohnlev va = kernelbase;
8715084Sjohnlev if ((va & LEVEL_MASK(2)) != va) {
8725084Sjohnlev va = P2ROUNDUP(va, LEVEL_SIZE(2));
8735084Sjohnlev NEXT_HKR(r, 1, kernelbase, va);
8745084Sjohnlev }
8755084Sjohnlev if (va != 0)
8765084Sjohnlev NEXT_HKR(r, 2, va, 0);
8775084Sjohnlev } else
8785084Sjohnlev #endif /* __xpv */
8795084Sjohnlev NEXT_HKR(r, 1, kernelbase, 0);
8805084Sjohnlev
8815084Sjohnlev #endif /* __i386 */
8825084Sjohnlev
8835084Sjohnlev num_kernel_ranges = r;
8845084Sjohnlev
8855084Sjohnlev /*
8865084Sjohnlev * Create all the kernel pagetables that will have entries
8875084Sjohnlev * shared to user HATs.
8885084Sjohnlev */
8895084Sjohnlev for (r = 0; r < num_kernel_ranges; ++r) {
8905084Sjohnlev rp = &kernel_ranges[r];
8915084Sjohnlev for (va = rp->hkr_start_va; va != rp->hkr_end_va;
8925084Sjohnlev va += LEVEL_SIZE(rp->hkr_level)) {
8935084Sjohnlev htable_t *ht;
8945084Sjohnlev
8955084Sjohnlev if (IN_HYPERVISOR_VA(va))
8965084Sjohnlev continue;
8975084Sjohnlev
8985084Sjohnlev /* can/must skip if a page mapping already exists */
8995084Sjohnlev if (rp->hkr_level <= mmu.max_page_level &&
9005084Sjohnlev (ht = htable_getpage(kas.a_hat, va, NULL)) !=
9015084Sjohnlev NULL) {
9025084Sjohnlev htable_release(ht);
9035084Sjohnlev continue;
9045084Sjohnlev }
9055084Sjohnlev
9065084Sjohnlev (void) htable_create(kas.a_hat, va, rp->hkr_level - 1,
9075084Sjohnlev NULL);
9085084Sjohnlev }
9095084Sjohnlev }
9105084Sjohnlev
9115084Sjohnlev /*
9125084Sjohnlev * 32 bit PAE metal kernels use only 4 of the 512 entries in the
9135084Sjohnlev * page holding the top level pagetable. We use the remainder for
9145084Sjohnlev * the "per CPU" page tables for VLP processes.
9155084Sjohnlev * Map the top level kernel pagetable into the kernel to make
9165084Sjohnlev * it easy to use bcopy access these tables.
9170Sstevel@tonic-gate */
9180Sstevel@tonic-gate if (mmu.pae_hat) {
9190Sstevel@tonic-gate vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
9200Sstevel@tonic-gate hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE,
9210Sstevel@tonic-gate kas.a_hat->hat_htable->ht_pfn,
9225084Sjohnlev #if !defined(__xpv)
9233446Smrj PROT_WRITE |
9245084Sjohnlev #endif
9253446Smrj PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
9260Sstevel@tonic-gate HAT_LOAD | HAT_LOAD_NOCONSIST);
9270Sstevel@tonic-gate }
9280Sstevel@tonic-gate hat_vlp_setup(CPU);
9293446Smrj
9303446Smrj /*
9313446Smrj * Create kmap (cached mappings of kernel PTEs)
9323446Smrj * for 32 bit we map from segmap_start .. ekernelheap
9333446Smrj * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
9343446Smrj */
9353446Smrj #if defined(__i386)
9363446Smrj size = (uintptr_t)ekernelheap - segmap_start;
9373446Smrj #elif defined(__amd64)
9383446Smrj size = segmapsize;
9393446Smrj #endif
9403446Smrj hat_kmap_init((uintptr_t)segmap_start, size);
9410Sstevel@tonic-gate }
9420Sstevel@tonic-gate
9430Sstevel@tonic-gate /*
9440Sstevel@tonic-gate * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
9450Sstevel@tonic-gate * are 32 bit, so for safety we must use cas64() to install these.
9460Sstevel@tonic-gate */
9470Sstevel@tonic-gate #ifdef __i386
9480Sstevel@tonic-gate static void
reload_pae32(hat_t * hat,cpu_t * cpu)9490Sstevel@tonic-gate reload_pae32(hat_t *hat, cpu_t *cpu)
9500Sstevel@tonic-gate {
9510Sstevel@tonic-gate x86pte_t *src;
9520Sstevel@tonic-gate x86pte_t *dest;
9530Sstevel@tonic-gate x86pte_t pte;
9540Sstevel@tonic-gate int i;
9550Sstevel@tonic-gate
9560Sstevel@tonic-gate /*
9570Sstevel@tonic-gate * Load the 4 entries of the level 2 page table into this
9580Sstevel@tonic-gate * cpu's range of the vlp_page and point cr3 at them.
9590Sstevel@tonic-gate */
9600Sstevel@tonic-gate ASSERT(mmu.pae_hat);
9610Sstevel@tonic-gate src = hat->hat_vlp_ptes;
9620Sstevel@tonic-gate dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
9630Sstevel@tonic-gate for (i = 0; i < VLP_NUM_PTES; ++i) {
9640Sstevel@tonic-gate for (;;) {
9650Sstevel@tonic-gate pte = dest[i];
9660Sstevel@tonic-gate if (pte == src[i])
9670Sstevel@tonic-gate break;
9680Sstevel@tonic-gate if (cas64(dest + i, pte, src[i]) != src[i])
9690Sstevel@tonic-gate break;
9700Sstevel@tonic-gate }
9710Sstevel@tonic-gate }
9720Sstevel@tonic-gate }
9730Sstevel@tonic-gate #endif
9740Sstevel@tonic-gate
9750Sstevel@tonic-gate /*
9760Sstevel@tonic-gate * Switch to a new active hat, maintaining bit masks to track active CPUs.
9775084Sjohnlev *
9785084Sjohnlev * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it
9795084Sjohnlev * remains a 32-bit value.
9800Sstevel@tonic-gate */
9810Sstevel@tonic-gate void
hat_switch(hat_t * hat)9820Sstevel@tonic-gate hat_switch(hat_t *hat)
9830Sstevel@tonic-gate {
9845084Sjohnlev uint64_t newcr3;
9850Sstevel@tonic-gate cpu_t *cpu = CPU;
9860Sstevel@tonic-gate hat_t *old = cpu->cpu_current_hat;
9870Sstevel@tonic-gate
9880Sstevel@tonic-gate /*
9890Sstevel@tonic-gate * set up this information first, so we don't miss any cross calls
9900Sstevel@tonic-gate */
9910Sstevel@tonic-gate if (old != NULL) {
9920Sstevel@tonic-gate if (old == hat)
9930Sstevel@tonic-gate return;
9940Sstevel@tonic-gate if (old != kas.a_hat)
9950Sstevel@tonic-gate CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
9960Sstevel@tonic-gate }
9970Sstevel@tonic-gate
9980Sstevel@tonic-gate /*
9994191Sjosephb * Add this CPU to the active set for this HAT.
10000Sstevel@tonic-gate */
10010Sstevel@tonic-gate if (hat != kas.a_hat) {
10020Sstevel@tonic-gate CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
10030Sstevel@tonic-gate }
10040Sstevel@tonic-gate cpu->cpu_current_hat = hat;
10050Sstevel@tonic-gate
10060Sstevel@tonic-gate /*
10070Sstevel@tonic-gate * now go ahead and load cr3
10080Sstevel@tonic-gate */
10090Sstevel@tonic-gate if (hat->hat_flags & HAT_VLP) {
10100Sstevel@tonic-gate #if defined(__amd64)
10110Sstevel@tonic-gate x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
10120Sstevel@tonic-gate
10130Sstevel@tonic-gate VLP_COPY(hat->hat_vlp_ptes, vlpptep);
10140Sstevel@tonic-gate newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn);
10150Sstevel@tonic-gate #elif defined(__i386)
10160Sstevel@tonic-gate reload_pae32(hat, cpu);
10170Sstevel@tonic-gate newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) +
10180Sstevel@tonic-gate (cpu->cpu_id + 1) * VLP_SIZE;
10190Sstevel@tonic-gate #endif
10200Sstevel@tonic-gate } else {
10215084Sjohnlev newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn);
10220Sstevel@tonic-gate }
10235084Sjohnlev #ifdef __xpv
10245084Sjohnlev {
10255084Sjohnlev struct mmuext_op t[2];
10265084Sjohnlev uint_t retcnt;
10275084Sjohnlev uint_t opcnt = 1;
10285084Sjohnlev
10295084Sjohnlev t[0].cmd = MMUEXT_NEW_BASEPTR;
10305084Sjohnlev t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
10315084Sjohnlev #if defined(__amd64)
10325084Sjohnlev /*
10335084Sjohnlev * There's an interesting problem here, as to what to
10345084Sjohnlev * actually specify when switching to the kernel hat.
10355084Sjohnlev * For now we'll reuse the kernel hat again.
10365084Sjohnlev */
10375084Sjohnlev t[1].cmd = MMUEXT_NEW_USER_BASEPTR;
10385084Sjohnlev if (hat == kas.a_hat)
10395084Sjohnlev t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
10405084Sjohnlev else
10415084Sjohnlev t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable);
10425084Sjohnlev ++opcnt;
10435084Sjohnlev #endif /* __amd64 */
10445084Sjohnlev if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0)
10455084Sjohnlev panic("HYPERVISOR_mmu_update() failed");
10465084Sjohnlev ASSERT(retcnt == opcnt);
10475084Sjohnlev
10485084Sjohnlev }
10495084Sjohnlev #else
10500Sstevel@tonic-gate setcr3(newcr3);
10515084Sjohnlev #endif
10520Sstevel@tonic-gate ASSERT(cpu == CPU);
10530Sstevel@tonic-gate }
10540Sstevel@tonic-gate
10550Sstevel@tonic-gate /*
10560Sstevel@tonic-gate * Utility to return a valid x86pte_t from protections, pfn, and level number
10570Sstevel@tonic-gate */
10580Sstevel@tonic-gate static x86pte_t
hati_mkpte(pfn_t pfn,uint_t attr,level_t level,uint_t flags)10590Sstevel@tonic-gate hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
10600Sstevel@tonic-gate {
10610Sstevel@tonic-gate x86pte_t pte;
10620Sstevel@tonic-gate uint_t cache_attr = attr & HAT_ORDER_MASK;
10630Sstevel@tonic-gate
10640Sstevel@tonic-gate pte = MAKEPTE(pfn, level);
10650Sstevel@tonic-gate
10660Sstevel@tonic-gate if (attr & PROT_WRITE)
10670Sstevel@tonic-gate PTE_SET(pte, PT_WRITABLE);
10680Sstevel@tonic-gate
10690Sstevel@tonic-gate if (attr & PROT_USER)
10700Sstevel@tonic-gate PTE_SET(pte, PT_USER);
10710Sstevel@tonic-gate
10720Sstevel@tonic-gate if (!(attr & PROT_EXEC))
10730Sstevel@tonic-gate PTE_SET(pte, mmu.pt_nx);
10740Sstevel@tonic-gate
10750Sstevel@tonic-gate /*
10763446Smrj * Set the software bits used track ref/mod sync's and hments.
10773446Smrj * If not using REF/MOD, set them to avoid h/w rewriting PTEs.
10780Sstevel@tonic-gate */
10790Sstevel@tonic-gate if (flags & HAT_LOAD_NOCONSIST)
10803446Smrj PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD);
10813446Smrj else if (attr & HAT_NOSYNC)
10823446Smrj PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD);
10830Sstevel@tonic-gate
10840Sstevel@tonic-gate /*
10850Sstevel@tonic-gate * Set the caching attributes in the PTE. The combination
10860Sstevel@tonic-gate * of attributes are poorly defined, so we pay attention
10870Sstevel@tonic-gate * to them in the given order.
10880Sstevel@tonic-gate *
10890Sstevel@tonic-gate * The test for HAT_STRICTORDER is different because it's defined
10900Sstevel@tonic-gate * as "0" - which was a stupid thing to do, but is too late to change!
10910Sstevel@tonic-gate */
10920Sstevel@tonic-gate if (cache_attr == HAT_STRICTORDER) {
10930Sstevel@tonic-gate PTE_SET(pte, PT_NOCACHE);
10940Sstevel@tonic-gate /*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
10950Sstevel@tonic-gate } else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
10960Sstevel@tonic-gate /* nothing to set */;
10970Sstevel@tonic-gate } else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
10980Sstevel@tonic-gate PTE_SET(pte, PT_NOCACHE);
1099*12826Skuriakose.kuruvilla@oracle.com if (is_x86_feature(x86_featureset, X86FSET_PAT))
11000Sstevel@tonic-gate PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
11010Sstevel@tonic-gate else
11020Sstevel@tonic-gate PTE_SET(pte, PT_WRITETHRU);
11030Sstevel@tonic-gate } else {
11040Sstevel@tonic-gate panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
11050Sstevel@tonic-gate }
11060Sstevel@tonic-gate
11070Sstevel@tonic-gate return (pte);
11080Sstevel@tonic-gate }
11090Sstevel@tonic-gate
11100Sstevel@tonic-gate /*
11110Sstevel@tonic-gate * Duplicate address translations of the parent to the child.
11120Sstevel@tonic-gate * This function really isn't used anymore.
11130Sstevel@tonic-gate */
11140Sstevel@tonic-gate /*ARGSUSED*/
11150Sstevel@tonic-gate int
hat_dup(hat_t * old,hat_t * new,caddr_t addr,size_t len,uint_t flag)11160Sstevel@tonic-gate hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
11170Sstevel@tonic-gate {
11180Sstevel@tonic-gate ASSERT((uintptr_t)addr < kernelbase);
11190Sstevel@tonic-gate ASSERT(new != kas.a_hat);
11200Sstevel@tonic-gate ASSERT(old != kas.a_hat);
11210Sstevel@tonic-gate return (0);
11220Sstevel@tonic-gate }
11230Sstevel@tonic-gate
11240Sstevel@tonic-gate /*
11250Sstevel@tonic-gate * Allocate any hat resources required for a process being swapped in.
11260Sstevel@tonic-gate */
11270Sstevel@tonic-gate /*ARGSUSED*/
11280Sstevel@tonic-gate void
hat_swapin(hat_t * hat)11290Sstevel@tonic-gate hat_swapin(hat_t *hat)
11300Sstevel@tonic-gate {
11310Sstevel@tonic-gate /* do nothing - we let everything fault back in */
11320Sstevel@tonic-gate }
11330Sstevel@tonic-gate
11340Sstevel@tonic-gate /*
11350Sstevel@tonic-gate * Unload all translations associated with an address space of a process
11360Sstevel@tonic-gate * that is being swapped out.
11370Sstevel@tonic-gate */
11380Sstevel@tonic-gate void
hat_swapout(hat_t * hat)11390Sstevel@tonic-gate hat_swapout(hat_t *hat)
11400Sstevel@tonic-gate {
11410Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)0;
11420Sstevel@tonic-gate uintptr_t eaddr = _userlimit;
11430Sstevel@tonic-gate htable_t *ht = NULL;
11440Sstevel@tonic-gate level_t l;
11450Sstevel@tonic-gate
11465084Sjohnlev XPV_DISALLOW_MIGRATE();
11470Sstevel@tonic-gate /*
11480Sstevel@tonic-gate * We can't just call hat_unload(hat, 0, _userlimit...) here, because
11490Sstevel@tonic-gate * seg_spt and shared pagetables can't be swapped out.
11500Sstevel@tonic-gate * Take a look at segspt_shmswapout() - it's a big no-op.
11510Sstevel@tonic-gate *
11520Sstevel@tonic-gate * Instead we'll walk through all the address space and unload
11530Sstevel@tonic-gate * any mappings which we are sure are not shared, not locked.
11540Sstevel@tonic-gate */
11550Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr));
11560Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr));
11570Sstevel@tonic-gate ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
11580Sstevel@tonic-gate if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
11590Sstevel@tonic-gate eaddr = (uintptr_t)hat->hat_as->a_userlimit;
11600Sstevel@tonic-gate
11610Sstevel@tonic-gate while (vaddr < eaddr) {
11620Sstevel@tonic-gate (void) htable_walk(hat, &ht, &vaddr, eaddr);
11630Sstevel@tonic-gate if (ht == NULL)
11640Sstevel@tonic-gate break;
11650Sstevel@tonic-gate
11660Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr));
11670Sstevel@tonic-gate
11680Sstevel@tonic-gate /*
11690Sstevel@tonic-gate * If the page table is shared skip its entire range.
11700Sstevel@tonic-gate */
11710Sstevel@tonic-gate l = ht->ht_level;
11720Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) {
11736285Speterte vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1);
11740Sstevel@tonic-gate htable_release(ht);
11750Sstevel@tonic-gate ht = NULL;
11760Sstevel@tonic-gate continue;
11770Sstevel@tonic-gate }
11780Sstevel@tonic-gate
11790Sstevel@tonic-gate /*
11800Sstevel@tonic-gate * If the page table has no locked entries, unload this one.
11810Sstevel@tonic-gate */
11820Sstevel@tonic-gate if (ht->ht_lock_cnt == 0)
11830Sstevel@tonic-gate hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
11840Sstevel@tonic-gate HAT_UNLOAD_UNMAP);
11850Sstevel@tonic-gate
11860Sstevel@tonic-gate /*
11870Sstevel@tonic-gate * If we have a level 0 page table with locked entries,
11880Sstevel@tonic-gate * skip the entire page table, otherwise skip just one entry.
11890Sstevel@tonic-gate */
11900Sstevel@tonic-gate if (ht->ht_lock_cnt > 0 && l == 0)
11910Sstevel@tonic-gate vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
11920Sstevel@tonic-gate else
11930Sstevel@tonic-gate vaddr += LEVEL_SIZE(l);
11940Sstevel@tonic-gate }
11950Sstevel@tonic-gate if (ht)
11960Sstevel@tonic-gate htable_release(ht);
11970Sstevel@tonic-gate
11980Sstevel@tonic-gate /*
11990Sstevel@tonic-gate * We're in swapout because the system is low on memory, so
12000Sstevel@tonic-gate * go back and flush all the htables off the cached list.
12010Sstevel@tonic-gate */
12020Sstevel@tonic-gate htable_purge_hat(hat);
12035084Sjohnlev XPV_ALLOW_MIGRATE();
12040Sstevel@tonic-gate }
12050Sstevel@tonic-gate
12060Sstevel@tonic-gate /*
12070Sstevel@tonic-gate * returns number of bytes that have valid mappings in hat.
12080Sstevel@tonic-gate */
12090Sstevel@tonic-gate size_t
hat_get_mapped_size(hat_t * hat)12100Sstevel@tonic-gate hat_get_mapped_size(hat_t *hat)
12110Sstevel@tonic-gate {
12120Sstevel@tonic-gate size_t total = 0;
12130Sstevel@tonic-gate int l;
12140Sstevel@tonic-gate
12150Sstevel@tonic-gate for (l = 0; l <= mmu.max_page_level; l++)
12160Sstevel@tonic-gate total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
12174381Sjosephb total += hat->hat_ism_pgcnt;
12180Sstevel@tonic-gate
12190Sstevel@tonic-gate return (total);
12200Sstevel@tonic-gate }
12210Sstevel@tonic-gate
12220Sstevel@tonic-gate /*
12230Sstevel@tonic-gate * enable/disable collection of stats for hat.
12240Sstevel@tonic-gate */
12250Sstevel@tonic-gate int
hat_stats_enable(hat_t * hat)12260Sstevel@tonic-gate hat_stats_enable(hat_t *hat)
12270Sstevel@tonic-gate {
12280Sstevel@tonic-gate atomic_add_32(&hat->hat_stats, 1);
12290Sstevel@tonic-gate return (1);
12300Sstevel@tonic-gate }
12310Sstevel@tonic-gate
12320Sstevel@tonic-gate void
hat_stats_disable(hat_t * hat)12330Sstevel@tonic-gate hat_stats_disable(hat_t *hat)
12340Sstevel@tonic-gate {
12350Sstevel@tonic-gate atomic_add_32(&hat->hat_stats, -1);
12360Sstevel@tonic-gate }
12370Sstevel@tonic-gate
12380Sstevel@tonic-gate /*
12390Sstevel@tonic-gate * Utility to sync the ref/mod bits from a page table entry to the page_t
12400Sstevel@tonic-gate * We must be holding the mapping list lock when this is called.
12410Sstevel@tonic-gate */
12420Sstevel@tonic-gate static void
hati_sync_pte_to_page(page_t * pp,x86pte_t pte,level_t level)12430Sstevel@tonic-gate hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
12440Sstevel@tonic-gate {
12450Sstevel@tonic-gate uint_t rm = 0;
12460Sstevel@tonic-gate pgcnt_t pgcnt;
12470Sstevel@tonic-gate
12483446Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
12490Sstevel@tonic-gate return;
12500Sstevel@tonic-gate
12510Sstevel@tonic-gate if (PTE_GET(pte, PT_REF))
12520Sstevel@tonic-gate rm |= P_REF;
12530Sstevel@tonic-gate
12540Sstevel@tonic-gate if (PTE_GET(pte, PT_MOD))
12550Sstevel@tonic-gate rm |= P_MOD;
12560Sstevel@tonic-gate
12570Sstevel@tonic-gate if (rm == 0)
12580Sstevel@tonic-gate return;
12590Sstevel@tonic-gate
12600Sstevel@tonic-gate /*
12610Sstevel@tonic-gate * sync to all constituent pages of a large page
12620Sstevel@tonic-gate */
12630Sstevel@tonic-gate ASSERT(x86_hm_held(pp));
12640Sstevel@tonic-gate pgcnt = page_get_pagecnt(level);
12650Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
12660Sstevel@tonic-gate for (; pgcnt > 0; --pgcnt) {
12670Sstevel@tonic-gate /*
12680Sstevel@tonic-gate * hat_page_demote() can't decrease
12690Sstevel@tonic-gate * pszc below this mapping size
12700Sstevel@tonic-gate * since this large mapping existed after we
12710Sstevel@tonic-gate * took mlist lock.
12720Sstevel@tonic-gate */
12730Sstevel@tonic-gate ASSERT(pp->p_szc >= level);
12740Sstevel@tonic-gate hat_page_setattr(pp, rm);
12750Sstevel@tonic-gate ++pp;
12760Sstevel@tonic-gate }
12770Sstevel@tonic-gate }
12780Sstevel@tonic-gate
12790Sstevel@tonic-gate /*
12800Sstevel@tonic-gate * This the set of PTE bits for PFN, permissions and caching
12815084Sjohnlev * that are allowed to change on a HAT_LOAD_REMAP
12820Sstevel@tonic-gate */
12830Sstevel@tonic-gate #define PT_REMAP_BITS \
12840Sstevel@tonic-gate (PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU | \
12855084Sjohnlev PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD)
12860Sstevel@tonic-gate
1287510Skchow #define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX)
12880Sstevel@tonic-gate /*
12890Sstevel@tonic-gate * Do the low-level work to get a mapping entered into a HAT's pagetables
12900Sstevel@tonic-gate * and in the mapping list of the associated page_t.
12910Sstevel@tonic-gate */
12923446Smrj static int
hati_pte_map(htable_t * ht,uint_t entry,page_t * pp,x86pte_t pte,int flags,void * pte_ptr)12930Sstevel@tonic-gate hati_pte_map(
12940Sstevel@tonic-gate htable_t *ht,
12950Sstevel@tonic-gate uint_t entry,
12960Sstevel@tonic-gate page_t *pp,
12970Sstevel@tonic-gate x86pte_t pte,
12980Sstevel@tonic-gate int flags,
12990Sstevel@tonic-gate void *pte_ptr)
13000Sstevel@tonic-gate {
13010Sstevel@tonic-gate hat_t *hat = ht->ht_hat;
13020Sstevel@tonic-gate x86pte_t old_pte;
13030Sstevel@tonic-gate level_t l = ht->ht_level;
13040Sstevel@tonic-gate hment_t *hm;
13050Sstevel@tonic-gate uint_t is_consist;
13068522SJakub.Jermar@Sun.COM uint_t is_locked;
13073446Smrj int rv = 0;
13080Sstevel@tonic-gate
13090Sstevel@tonic-gate /*
13109903SPavel.Tatashin@Sun.COM * Is this a consistent (ie. need mapping list lock) mapping?
13110Sstevel@tonic-gate */
13120Sstevel@tonic-gate is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
13130Sstevel@tonic-gate
13140Sstevel@tonic-gate /*
13150Sstevel@tonic-gate * Track locked mapping count in the htable. Do this first,
13160Sstevel@tonic-gate * as we track locking even if there already is a mapping present.
13170Sstevel@tonic-gate */
13188522SJakub.Jermar@Sun.COM is_locked = (flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat;
13198522SJakub.Jermar@Sun.COM if (is_locked)
13200Sstevel@tonic-gate HTABLE_LOCK_INC(ht);
13210Sstevel@tonic-gate
13220Sstevel@tonic-gate /*
13230Sstevel@tonic-gate * Acquire the page's mapping list lock and get an hment to use.
13240Sstevel@tonic-gate * Note that hment_prepare() might return NULL.
13250Sstevel@tonic-gate */
13260Sstevel@tonic-gate if (is_consist) {
13270Sstevel@tonic-gate x86_hm_enter(pp);
13280Sstevel@tonic-gate hm = hment_prepare(ht, entry, pp);
13290Sstevel@tonic-gate }
13300Sstevel@tonic-gate
13310Sstevel@tonic-gate /*
13320Sstevel@tonic-gate * Set the new pte, retrieving the old one at the same time.
13330Sstevel@tonic-gate */
13340Sstevel@tonic-gate old_pte = x86pte_set(ht, entry, pte, pte_ptr);
13350Sstevel@tonic-gate
13360Sstevel@tonic-gate /*
13378522SJakub.Jermar@Sun.COM * Did we get a large page / page table collision?
13383446Smrj */
13393446Smrj if (old_pte == LPAGE_ERROR) {
13408522SJakub.Jermar@Sun.COM if (is_locked)
13418522SJakub.Jermar@Sun.COM HTABLE_LOCK_DEC(ht);
13423446Smrj rv = -1;
13433446Smrj goto done;
13443446Smrj }
13453446Smrj
13463446Smrj /*
13470Sstevel@tonic-gate * If the mapping didn't change there is nothing more to do.
13480Sstevel@tonic-gate */
13493446Smrj if (PTE_EQUIV(pte, old_pte))
13503446Smrj goto done;
13510Sstevel@tonic-gate
13520Sstevel@tonic-gate /*
13530Sstevel@tonic-gate * Install a new mapping in the page's mapping list
13540Sstevel@tonic-gate */
13550Sstevel@tonic-gate if (!PTE_ISVALID(old_pte)) {
13560Sstevel@tonic-gate if (is_consist) {
13570Sstevel@tonic-gate hment_assign(ht, entry, pp, hm);
13580Sstevel@tonic-gate x86_hm_exit(pp);
13590Sstevel@tonic-gate } else {
13600Sstevel@tonic-gate ASSERT(flags & HAT_LOAD_NOCONSIST);
13610Sstevel@tonic-gate }
13625349Skchow #if defined(__amd64)
13635349Skchow if (ht->ht_flags & HTABLE_VLP) {
13645349Skchow cpu_t *cpu = CPU;
13655349Skchow x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
13665349Skchow VLP_COPY(hat->hat_vlp_ptes, vlpptep);
13675349Skchow }
13685349Skchow #endif
13690Sstevel@tonic-gate HTABLE_INC(ht->ht_valid_cnt);
13700Sstevel@tonic-gate PGCNT_INC(hat, l);
13713446Smrj return (rv);
13720Sstevel@tonic-gate }
13730Sstevel@tonic-gate
13740Sstevel@tonic-gate /*
13750Sstevel@tonic-gate * Remap's are more complicated:
13760Sstevel@tonic-gate * - HAT_LOAD_REMAP must be specified if changing the pfn.
13770Sstevel@tonic-gate * We also require that NOCONSIST be specified.
13780Sstevel@tonic-gate * - Otherwise only permission or caching bits may change.
13790Sstevel@tonic-gate */
13800Sstevel@tonic-gate if (!PTE_ISPAGE(old_pte, l))
13810Sstevel@tonic-gate panic("non-null/page mapping pte=" FMT_PTE, old_pte);
13820Sstevel@tonic-gate
13830Sstevel@tonic-gate if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1384510Skchow REMAPASSERT(flags & HAT_LOAD_REMAP);
1385510Skchow REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
13863446Smrj REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
1387510Skchow REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
13880Sstevel@tonic-gate pf_is_memory(PTE2PFN(pte, l)));
1389510Skchow REMAPASSERT(!is_consist);
13900Sstevel@tonic-gate }
13910Sstevel@tonic-gate
13920Sstevel@tonic-gate /*
13935084Sjohnlev * We only let remaps change the certain bits in the PTE.
13940Sstevel@tonic-gate */
13955084Sjohnlev if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS))
13965084Sjohnlev panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n",
13975084Sjohnlev old_pte, pte);
13980Sstevel@tonic-gate
13990Sstevel@tonic-gate /*
14000Sstevel@tonic-gate * We don't create any mapping list entries on a remap, so release
14010Sstevel@tonic-gate * any allocated hment after we drop the mapping list lock.
14020Sstevel@tonic-gate */
14033446Smrj done:
14040Sstevel@tonic-gate if (is_consist) {
14050Sstevel@tonic-gate x86_hm_exit(pp);
14060Sstevel@tonic-gate if (hm != NULL)
14070Sstevel@tonic-gate hment_free(hm);
14080Sstevel@tonic-gate }
14093446Smrj return (rv);
14100Sstevel@tonic-gate }
14110Sstevel@tonic-gate
14120Sstevel@tonic-gate /*
14133446Smrj * Internal routine to load a single page table entry. This only fails if
14143446Smrj * we attempt to overwrite a page table link with a large page.
14150Sstevel@tonic-gate */
14163446Smrj static int
hati_load_common(hat_t * hat,uintptr_t va,page_t * pp,uint_t attr,uint_t flags,level_t level,pfn_t pfn)14170Sstevel@tonic-gate hati_load_common(
14180Sstevel@tonic-gate hat_t *hat,
14190Sstevel@tonic-gate uintptr_t va,
14200Sstevel@tonic-gate page_t *pp,
14210Sstevel@tonic-gate uint_t attr,
14220Sstevel@tonic-gate uint_t flags,
14230Sstevel@tonic-gate level_t level,
14240Sstevel@tonic-gate pfn_t pfn)
14250Sstevel@tonic-gate {
14260Sstevel@tonic-gate htable_t *ht;
14270Sstevel@tonic-gate uint_t entry;
14280Sstevel@tonic-gate x86pte_t pte;
14293446Smrj int rv = 0;
14300Sstevel@tonic-gate
14314004Sjosephb /*
14324004Sjosephb * The number 16 is arbitrary and here to catch a recursion problem
14334004Sjosephb * early before we blow out the kernel stack.
14344004Sjosephb */
14354004Sjosephb ++curthread->t_hatdepth;
14364004Sjosephb ASSERT(curthread->t_hatdepth < 16);
14374004Sjosephb
14380Sstevel@tonic-gate ASSERT(hat == kas.a_hat ||
14390Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
14400Sstevel@tonic-gate
14410Sstevel@tonic-gate if (flags & HAT_LOAD_SHARE)
14420Sstevel@tonic-gate hat->hat_flags |= HAT_SHARED;
14430Sstevel@tonic-gate
14440Sstevel@tonic-gate /*
14450Sstevel@tonic-gate * Find the page table that maps this page if it already exists.
14460Sstevel@tonic-gate */
14470Sstevel@tonic-gate ht = htable_lookup(hat, va, level);
14480Sstevel@tonic-gate
14490Sstevel@tonic-gate /*
14504004Sjosephb * We must have HAT_LOAD_NOCONSIST if page_t is NULL.
14510Sstevel@tonic-gate */
14524004Sjosephb if (pp == NULL)
14530Sstevel@tonic-gate flags |= HAT_LOAD_NOCONSIST;
14540Sstevel@tonic-gate
14550Sstevel@tonic-gate if (ht == NULL) {
14560Sstevel@tonic-gate ht = htable_create(hat, va, level, NULL);
14570Sstevel@tonic-gate ASSERT(ht != NULL);
14580Sstevel@tonic-gate }
14590Sstevel@tonic-gate entry = htable_va2entry(va, ht);
14600Sstevel@tonic-gate
14610Sstevel@tonic-gate /*
14620Sstevel@tonic-gate * a bunch of paranoid error checking
14630Sstevel@tonic-gate */
14640Sstevel@tonic-gate ASSERT(ht->ht_busy > 0);
14650Sstevel@tonic-gate if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht))
14667240Srh87107 panic("hati_load_common: bad htable %p, va %p",
14677240Srh87107 (void *)ht, (void *)va);
14680Sstevel@tonic-gate ASSERT(ht->ht_level == level);
14690Sstevel@tonic-gate
14700Sstevel@tonic-gate /*
14710Sstevel@tonic-gate * construct the new PTE
14720Sstevel@tonic-gate */
14730Sstevel@tonic-gate if (hat == kas.a_hat)
14740Sstevel@tonic-gate attr &= ~PROT_USER;
14750Sstevel@tonic-gate pte = hati_mkpte(pfn, attr, level, flags);
14760Sstevel@tonic-gate if (hat == kas.a_hat && va >= kernelbase)
14770Sstevel@tonic-gate PTE_SET(pte, mmu.pt_global);
14780Sstevel@tonic-gate
14790Sstevel@tonic-gate /*
14800Sstevel@tonic-gate * establish the mapping
14810Sstevel@tonic-gate */
14823446Smrj rv = hati_pte_map(ht, entry, pp, pte, flags, NULL);
14830Sstevel@tonic-gate
14840Sstevel@tonic-gate /*
14850Sstevel@tonic-gate * release the htable and any reserves
14860Sstevel@tonic-gate */
14870Sstevel@tonic-gate htable_release(ht);
14884004Sjosephb --curthread->t_hatdepth;
14893446Smrj return (rv);
14900Sstevel@tonic-gate }
14910Sstevel@tonic-gate
14920Sstevel@tonic-gate /*
14930Sstevel@tonic-gate * special case of hat_memload to deal with some kernel addrs for performance
14940Sstevel@tonic-gate */
14950Sstevel@tonic-gate static void
hat_kmap_load(caddr_t addr,page_t * pp,uint_t attr,uint_t flags)14960Sstevel@tonic-gate hat_kmap_load(
14970Sstevel@tonic-gate caddr_t addr,
14980Sstevel@tonic-gate page_t *pp,
14990Sstevel@tonic-gate uint_t attr,
15000Sstevel@tonic-gate uint_t flags)
15010Sstevel@tonic-gate {
15020Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr;
15030Sstevel@tonic-gate x86pte_t pte;
15040Sstevel@tonic-gate pfn_t pfn = page_pptonum(pp);
15050Sstevel@tonic-gate pgcnt_t pg_off = mmu_btop(va - mmu.kmap_addr);
15060Sstevel@tonic-gate htable_t *ht;
15070Sstevel@tonic-gate uint_t entry;
15080Sstevel@tonic-gate void *pte_ptr;
15090Sstevel@tonic-gate
15100Sstevel@tonic-gate /*
15110Sstevel@tonic-gate * construct the requested PTE
15120Sstevel@tonic-gate */
15130Sstevel@tonic-gate attr &= ~PROT_USER;
15140Sstevel@tonic-gate attr |= HAT_STORECACHING_OK;
15150Sstevel@tonic-gate pte = hati_mkpte(pfn, attr, 0, flags);
15160Sstevel@tonic-gate PTE_SET(pte, mmu.pt_global);
15170Sstevel@tonic-gate
15180Sstevel@tonic-gate /*
15190Sstevel@tonic-gate * Figure out the pte_ptr and htable and use common code to finish up
15200Sstevel@tonic-gate */
15210Sstevel@tonic-gate if (mmu.pae_hat)
15220Sstevel@tonic-gate pte_ptr = mmu.kmap_ptes + pg_off;
15230Sstevel@tonic-gate else
15240Sstevel@tonic-gate pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
15250Sstevel@tonic-gate ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
15260Sstevel@tonic-gate LEVEL_SHIFT(1)];
15270Sstevel@tonic-gate entry = htable_va2entry(va, ht);
15284004Sjosephb ++curthread->t_hatdepth;
15294004Sjosephb ASSERT(curthread->t_hatdepth < 16);
15303446Smrj (void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
15314004Sjosephb --curthread->t_hatdepth;
15320Sstevel@tonic-gate }
15330Sstevel@tonic-gate
15340Sstevel@tonic-gate /*
15350Sstevel@tonic-gate * hat_memload() - load a translation to the given page struct
15360Sstevel@tonic-gate *
15370Sstevel@tonic-gate * Flags for hat_memload/hat_devload/hat_*attr.
15380Sstevel@tonic-gate *
15390Sstevel@tonic-gate * HAT_LOAD Default flags to load a translation to the page.
15400Sstevel@tonic-gate *
15410Sstevel@tonic-gate * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(),
15420Sstevel@tonic-gate * and hat_devload().
15430Sstevel@tonic-gate *
15440Sstevel@tonic-gate * HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
15453446Smrj * sets PT_NOCONSIST
15460Sstevel@tonic-gate *
15470Sstevel@tonic-gate * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables
15480Sstevel@tonic-gate * that map some user pages (not kas) is shared by more
15490Sstevel@tonic-gate * than one process (eg. ISM).
15500Sstevel@tonic-gate *
15510Sstevel@tonic-gate * HAT_LOAD_REMAP Reload a valid pte with a different page frame.
15520Sstevel@tonic-gate *
15530Sstevel@tonic-gate * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this
15540Sstevel@tonic-gate * point, it's setting up mapping to allocate internal
15550Sstevel@tonic-gate * hat layer data structures. This flag forces hat layer
15560Sstevel@tonic-gate * to tap its reserves in order to prevent infinite
15570Sstevel@tonic-gate * recursion.
15580Sstevel@tonic-gate *
15590Sstevel@tonic-gate * The following is a protection attribute (like PROT_READ, etc.)
15600Sstevel@tonic-gate *
15613446Smrj * HAT_NOSYNC set PT_NOSYNC - this mapping's ref/mod bits
15620Sstevel@tonic-gate * are never cleared.
15630Sstevel@tonic-gate *
15640Sstevel@tonic-gate * Installing new valid PTE's and creation of the mapping list
15650Sstevel@tonic-gate * entry are controlled under the same lock. It's derived from the
15660Sstevel@tonic-gate * page_t being mapped.
15670Sstevel@tonic-gate */
15680Sstevel@tonic-gate static uint_t supported_memload_flags =
15690Sstevel@tonic-gate HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
15700Sstevel@tonic-gate HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
15710Sstevel@tonic-gate
15720Sstevel@tonic-gate void
hat_memload(hat_t * hat,caddr_t addr,page_t * pp,uint_t attr,uint_t flags)15730Sstevel@tonic-gate hat_memload(
15740Sstevel@tonic-gate hat_t *hat,
15750Sstevel@tonic-gate caddr_t addr,
15760Sstevel@tonic-gate page_t *pp,
15770Sstevel@tonic-gate uint_t attr,
15780Sstevel@tonic-gate uint_t flags)
15790Sstevel@tonic-gate {
15800Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr;
15810Sstevel@tonic-gate level_t level = 0;
15820Sstevel@tonic-gate pfn_t pfn = page_pptonum(pp);
15830Sstevel@tonic-gate
15845084Sjohnlev XPV_DISALLOW_MIGRATE();
15850Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va));
15863446Smrj ASSERT(hat == kas.a_hat || va < _userlimit);
15870Sstevel@tonic-gate ASSERT(hat == kas.a_hat ||
15880Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
15890Sstevel@tonic-gate ASSERT((flags & supported_memload_flags) == flags);
15900Sstevel@tonic-gate
15910Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va));
15920Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp));
15930Sstevel@tonic-gate
15940Sstevel@tonic-gate /*
15950Sstevel@tonic-gate * kernel address special case for performance.
15960Sstevel@tonic-gate */
15970Sstevel@tonic-gate if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
15980Sstevel@tonic-gate ASSERT(hat == kas.a_hat);
15990Sstevel@tonic-gate hat_kmap_load(addr, pp, attr, flags);
16005084Sjohnlev XPV_ALLOW_MIGRATE();
16010Sstevel@tonic-gate return;
16020Sstevel@tonic-gate }
16030Sstevel@tonic-gate
16040Sstevel@tonic-gate /*
16050Sstevel@tonic-gate * This is used for memory with normal caching enabled, so
16060Sstevel@tonic-gate * always set HAT_STORECACHING_OK.
16070Sstevel@tonic-gate */
16080Sstevel@tonic-gate attr |= HAT_STORECACHING_OK;
16093446Smrj if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
16103446Smrj panic("unexpected hati_load_common() failure");
16115084Sjohnlev XPV_ALLOW_MIGRATE();
16120Sstevel@tonic-gate }
16130Sstevel@tonic-gate
16144528Spaulsan /* ARGSUSED */
16154528Spaulsan void
hat_memload_region(struct hat * hat,caddr_t addr,struct page * pp,uint_t attr,uint_t flags,hat_region_cookie_t rcookie)16164528Spaulsan hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
16174528Spaulsan uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
16184528Spaulsan {
16194528Spaulsan hat_memload(hat, addr, pp, attr, flags);
16204528Spaulsan }
16214528Spaulsan
16220Sstevel@tonic-gate /*
16230Sstevel@tonic-gate * Load the given array of page structs using large pages when possible
16240Sstevel@tonic-gate */
16250Sstevel@tonic-gate void
hat_memload_array(hat_t * hat,caddr_t addr,size_t len,page_t ** pages,uint_t attr,uint_t flags)16260Sstevel@tonic-gate hat_memload_array(
16270Sstevel@tonic-gate hat_t *hat,
16280Sstevel@tonic-gate caddr_t addr,
16290Sstevel@tonic-gate size_t len,
16300Sstevel@tonic-gate page_t **pages,
16310Sstevel@tonic-gate uint_t attr,
16320Sstevel@tonic-gate uint_t flags)
16330Sstevel@tonic-gate {
16340Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr;
16350Sstevel@tonic-gate uintptr_t eaddr = va + len;
16360Sstevel@tonic-gate level_t level;
16370Sstevel@tonic-gate size_t pgsize;
16380Sstevel@tonic-gate pgcnt_t pgindx = 0;
16390Sstevel@tonic-gate pfn_t pfn;
16400Sstevel@tonic-gate pgcnt_t i;
16410Sstevel@tonic-gate
16425084Sjohnlev XPV_DISALLOW_MIGRATE();
16430Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va));
16443446Smrj ASSERT(hat == kas.a_hat || va + len <= _userlimit);
16450Sstevel@tonic-gate ASSERT(hat == kas.a_hat ||
16460Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
16470Sstevel@tonic-gate ASSERT((flags & supported_memload_flags) == flags);
16480Sstevel@tonic-gate
16490Sstevel@tonic-gate /*
16500Sstevel@tonic-gate * memload is used for memory with full caching enabled, so
16510Sstevel@tonic-gate * set HAT_STORECACHING_OK.
16520Sstevel@tonic-gate */
16530Sstevel@tonic-gate attr |= HAT_STORECACHING_OK;
16540Sstevel@tonic-gate
16550Sstevel@tonic-gate /*
16560Sstevel@tonic-gate * handle all pages using largest possible pagesize
16570Sstevel@tonic-gate */
16580Sstevel@tonic-gate while (va < eaddr) {
16590Sstevel@tonic-gate /*
16600Sstevel@tonic-gate * decide what level mapping to use (ie. pagesize)
16610Sstevel@tonic-gate */
16620Sstevel@tonic-gate pfn = page_pptonum(pages[pgindx]);
16630Sstevel@tonic-gate for (level = mmu.max_page_level; ; --level) {
16640Sstevel@tonic-gate pgsize = LEVEL_SIZE(level);
16650Sstevel@tonic-gate if (level == 0)
16660Sstevel@tonic-gate break;
16673446Smrj
16680Sstevel@tonic-gate if (!IS_P2ALIGNED(va, pgsize) ||
16690Sstevel@tonic-gate (eaddr - va) < pgsize ||
16703446Smrj !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize))
16710Sstevel@tonic-gate continue;
16720Sstevel@tonic-gate
16730Sstevel@tonic-gate /*
16740Sstevel@tonic-gate * To use a large mapping of this size, all the
16750Sstevel@tonic-gate * pages we are passed must be sequential subpages
16760Sstevel@tonic-gate * of the large page.
16770Sstevel@tonic-gate * hat_page_demote() can't change p_szc because
16780Sstevel@tonic-gate * all pages are locked.
16790Sstevel@tonic-gate */
16800Sstevel@tonic-gate if (pages[pgindx]->p_szc >= level) {
16810Sstevel@tonic-gate for (i = 0; i < mmu_btop(pgsize); ++i) {
16820Sstevel@tonic-gate if (pfn + i !=
16830Sstevel@tonic-gate page_pptonum(pages[pgindx + i]))
16840Sstevel@tonic-gate break;
16850Sstevel@tonic-gate ASSERT(pages[pgindx + i]->p_szc >=
16860Sstevel@tonic-gate level);
16870Sstevel@tonic-gate ASSERT(pages[pgindx] + i ==
16880Sstevel@tonic-gate pages[pgindx + i]);
16890Sstevel@tonic-gate }
16905349Skchow if (i == mmu_btop(pgsize)) {
16915349Skchow #ifdef DEBUG
16925349Skchow if (level == 2)
16935349Skchow map1gcnt++;
16945349Skchow #endif
16950Sstevel@tonic-gate break;
16965349Skchow }
16970Sstevel@tonic-gate }
16980Sstevel@tonic-gate }
16990Sstevel@tonic-gate
17000Sstevel@tonic-gate /*
17013446Smrj * Load this page mapping. If the load fails, try a smaller
17023446Smrj * pagesize.
17030Sstevel@tonic-gate */
17040Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va));
17053446Smrj while (hati_load_common(hat, va, pages[pgindx], attr,
17064381Sjosephb flags, level, pfn) != 0) {
17073446Smrj if (level == 0)
17083446Smrj panic("unexpected hati_load_common() failure");
17093446Smrj --level;
17103446Smrj pgsize = LEVEL_SIZE(level);
17113446Smrj }
17120Sstevel@tonic-gate
17130Sstevel@tonic-gate /*
17140Sstevel@tonic-gate * move to next page
17150Sstevel@tonic-gate */
17160Sstevel@tonic-gate va += pgsize;
17170Sstevel@tonic-gate pgindx += mmu_btop(pgsize);
17180Sstevel@tonic-gate }
17195084Sjohnlev XPV_ALLOW_MIGRATE();
17200Sstevel@tonic-gate }
17210Sstevel@tonic-gate
17224528Spaulsan /* ARGSUSED */
17234528Spaulsan void
hat_memload_array_region(struct hat * hat,caddr_t addr,size_t len,struct page ** pps,uint_t attr,uint_t flags,hat_region_cookie_t rcookie)17244528Spaulsan hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
17254528Spaulsan struct page **pps, uint_t attr, uint_t flags,
17264528Spaulsan hat_region_cookie_t rcookie)
17274528Spaulsan {
17284528Spaulsan hat_memload_array(hat, addr, len, pps, attr, flags);
17294528Spaulsan }
17304528Spaulsan
17310Sstevel@tonic-gate /*
17320Sstevel@tonic-gate * void hat_devload(hat, addr, len, pf, attr, flags)
17330Sstevel@tonic-gate * load/lock the given page frame number
17340Sstevel@tonic-gate *
17350Sstevel@tonic-gate * Advisory ordering attributes. Apply only to device mappings.
17360Sstevel@tonic-gate *
17370Sstevel@tonic-gate * HAT_STRICTORDER: the CPU must issue the references in order, as the
17380Sstevel@tonic-gate * programmer specified. This is the default.
17390Sstevel@tonic-gate * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
17400Sstevel@tonic-gate * of reordering; store or load with store or load).
17410Sstevel@tonic-gate * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
17420Sstevel@tonic-gate * to consecutive locations (for example, turn two consecutive byte
17430Sstevel@tonic-gate * stores into one halfword store), and it may batch individual loads
17440Sstevel@tonic-gate * (for example, turn two consecutive byte loads into one halfword load).
17450Sstevel@tonic-gate * This also implies re-ordering.
17460Sstevel@tonic-gate * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
17470Sstevel@tonic-gate * until another store occurs. The default is to fetch new data
17480Sstevel@tonic-gate * on every load. This also implies merging.
17490Sstevel@tonic-gate * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
17500Sstevel@tonic-gate * the device (perhaps with other data) at a later time. The default is
17510Sstevel@tonic-gate * to push the data right away. This also implies load caching.
17520Sstevel@tonic-gate *
17530Sstevel@tonic-gate * Equivalent of hat_memload(), but can be used for device memory where
17540Sstevel@tonic-gate * there are no page_t's and we support additional flags (write merging, etc).
17550Sstevel@tonic-gate * Note that we can have large page mappings with this interface.
17560Sstevel@tonic-gate */
17570Sstevel@tonic-gate int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
17580Sstevel@tonic-gate HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
17590Sstevel@tonic-gate HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
17600Sstevel@tonic-gate
17610Sstevel@tonic-gate void
hat_devload(hat_t * hat,caddr_t addr,size_t len,pfn_t pfn,uint_t attr,int flags)17620Sstevel@tonic-gate hat_devload(
17630Sstevel@tonic-gate hat_t *hat,
17640Sstevel@tonic-gate caddr_t addr,
17650Sstevel@tonic-gate size_t len,
17660Sstevel@tonic-gate pfn_t pfn,
17670Sstevel@tonic-gate uint_t attr,
17680Sstevel@tonic-gate int flags)
17690Sstevel@tonic-gate {
17700Sstevel@tonic-gate uintptr_t va = ALIGN2PAGE(addr);
17710Sstevel@tonic-gate uintptr_t eva = va + len;
17720Sstevel@tonic-gate level_t level;
17730Sstevel@tonic-gate size_t pgsize;
17740Sstevel@tonic-gate page_t *pp;
17750Sstevel@tonic-gate int f; /* per PTE copy of flags - maybe modified */
17760Sstevel@tonic-gate uint_t a; /* per PTE copy of attr */
17770Sstevel@tonic-gate
17785084Sjohnlev XPV_DISALLOW_MIGRATE();
17790Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va));
17803446Smrj ASSERT(hat == kas.a_hat || eva <= _userlimit);
17810Sstevel@tonic-gate ASSERT(hat == kas.a_hat ||
17820Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
17830Sstevel@tonic-gate ASSERT((flags & supported_devload_flags) == flags);
17840Sstevel@tonic-gate
17850Sstevel@tonic-gate /*
17860Sstevel@tonic-gate * handle all pages
17870Sstevel@tonic-gate */
17880Sstevel@tonic-gate while (va < eva) {
17890Sstevel@tonic-gate
17900Sstevel@tonic-gate /*
17910Sstevel@tonic-gate * decide what level mapping to use (ie. pagesize)
17920Sstevel@tonic-gate */
17930Sstevel@tonic-gate for (level = mmu.max_page_level; ; --level) {
17940Sstevel@tonic-gate pgsize = LEVEL_SIZE(level);
17950Sstevel@tonic-gate if (level == 0)
17960Sstevel@tonic-gate break;
17970Sstevel@tonic-gate if (IS_P2ALIGNED(va, pgsize) &&
17980Sstevel@tonic-gate (eva - va) >= pgsize &&
17995349Skchow IS_P2ALIGNED(pfn, mmu_btop(pgsize))) {
18005349Skchow #ifdef DEBUG
18015349Skchow if (level == 2)
18025349Skchow map1gcnt++;
18035349Skchow #endif
18040Sstevel@tonic-gate break;
18055349Skchow }
18060Sstevel@tonic-gate }
18070Sstevel@tonic-gate
18080Sstevel@tonic-gate /*
18093446Smrj * If this is just memory then allow caching (this happens
18100Sstevel@tonic-gate * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
18113446Smrj * to override that. If we don't have a page_t then make sure
18120Sstevel@tonic-gate * NOCONSIST is set.
18130Sstevel@tonic-gate */
18140Sstevel@tonic-gate a = attr;
18150Sstevel@tonic-gate f = flags;
18165084Sjohnlev if (!pf_is_memory(pfn))
18175084Sjohnlev f |= HAT_LOAD_NOCONSIST;
18185084Sjohnlev else if (!(a & HAT_PLAT_NOCACHE))
18195084Sjohnlev a |= HAT_STORECACHING_OK;
18205084Sjohnlev
18215084Sjohnlev if (f & HAT_LOAD_NOCONSIST)
18220Sstevel@tonic-gate pp = NULL;
18235084Sjohnlev else
18245084Sjohnlev pp = page_numtopp_nolock(pfn);
18250Sstevel@tonic-gate
18260Sstevel@tonic-gate /*
18279441SPrakash.Sangappa@Sun.COM * Check to make sure we are really trying to map a valid
18289441SPrakash.Sangappa@Sun.COM * memory page. The caller wishing to intentionally map
18299441SPrakash.Sangappa@Sun.COM * free memory pages will have passed the HAT_LOAD_NOCONSIST
18309441SPrakash.Sangappa@Sun.COM * flag, then pp will be NULL.
18319441SPrakash.Sangappa@Sun.COM */
18329441SPrakash.Sangappa@Sun.COM if (pp != NULL) {
18339441SPrakash.Sangappa@Sun.COM if (PP_ISFREE(pp)) {
18349441SPrakash.Sangappa@Sun.COM panic("hat_devload: loading "
18359441SPrakash.Sangappa@Sun.COM "a mapping to free page %p", (void *)pp);
18369441SPrakash.Sangappa@Sun.COM }
18379441SPrakash.Sangappa@Sun.COM
18389441SPrakash.Sangappa@Sun.COM if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
18399441SPrakash.Sangappa@Sun.COM panic("hat_devload: loading a mapping "
18409441SPrakash.Sangappa@Sun.COM "to an unlocked page %p",
18419441SPrakash.Sangappa@Sun.COM (void *)pp);
18429441SPrakash.Sangappa@Sun.COM }
18439441SPrakash.Sangappa@Sun.COM }
18449441SPrakash.Sangappa@Sun.COM
18459441SPrakash.Sangappa@Sun.COM /*
18460Sstevel@tonic-gate * load this page mapping
18470Sstevel@tonic-gate */
18480Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va));
18493446Smrj while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
18503446Smrj if (level == 0)
18513446Smrj panic("unexpected hati_load_common() failure");
18523446Smrj --level;
18533446Smrj pgsize = LEVEL_SIZE(level);
18543446Smrj }
18550Sstevel@tonic-gate
18560Sstevel@tonic-gate /*
18570Sstevel@tonic-gate * move to next page
18580Sstevel@tonic-gate */
18590Sstevel@tonic-gate va += pgsize;
18600Sstevel@tonic-gate pfn += mmu_btop(pgsize);
18610Sstevel@tonic-gate }
18625084Sjohnlev XPV_ALLOW_MIGRATE();
18630Sstevel@tonic-gate }
18640Sstevel@tonic-gate
18650Sstevel@tonic-gate /*
18660Sstevel@tonic-gate * void hat_unlock(hat, addr, len)
18670Sstevel@tonic-gate * unlock the mappings to a given range of addresses
18680Sstevel@tonic-gate *
18690Sstevel@tonic-gate * Locks are tracked by ht_lock_cnt in the htable.
18700Sstevel@tonic-gate */
18710Sstevel@tonic-gate void
hat_unlock(hat_t * hat,caddr_t addr,size_t len)18720Sstevel@tonic-gate hat_unlock(hat_t *hat, caddr_t addr, size_t len)
18730Sstevel@tonic-gate {
18740Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr;
18750Sstevel@tonic-gate uintptr_t eaddr = vaddr + len;
18760Sstevel@tonic-gate htable_t *ht = NULL;
18770Sstevel@tonic-gate
18780Sstevel@tonic-gate /*
18790Sstevel@tonic-gate * kernel entries are always locked, we don't track lock counts
18800Sstevel@tonic-gate */
18813446Smrj ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
18820Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr));
18830Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr));
18840Sstevel@tonic-gate if (hat == kas.a_hat)
18850Sstevel@tonic-gate return;
18860Sstevel@tonic-gate if (eaddr > _userlimit)
18870Sstevel@tonic-gate panic("hat_unlock() address out of range - above _userlimit");
18880Sstevel@tonic-gate
18895084Sjohnlev XPV_DISALLOW_MIGRATE();
18900Sstevel@tonic-gate ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
18910Sstevel@tonic-gate while (vaddr < eaddr) {
18920Sstevel@tonic-gate (void) htable_walk(hat, &ht, &vaddr, eaddr);
18930Sstevel@tonic-gate if (ht == NULL)
18940Sstevel@tonic-gate break;
18950Sstevel@tonic-gate
18960Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr));
18970Sstevel@tonic-gate
18980Sstevel@tonic-gate if (ht->ht_lock_cnt < 1)
18990Sstevel@tonic-gate panic("hat_unlock(): lock_cnt < 1, "
19007240Srh87107 "htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr);
19010Sstevel@tonic-gate HTABLE_LOCK_DEC(ht);
19020Sstevel@tonic-gate
19030Sstevel@tonic-gate vaddr += LEVEL_SIZE(ht->ht_level);
19040Sstevel@tonic-gate }
19050Sstevel@tonic-gate if (ht)
19060Sstevel@tonic-gate htable_release(ht);
19075084Sjohnlev XPV_ALLOW_MIGRATE();
19080Sstevel@tonic-gate }
19090Sstevel@tonic-gate
19104528Spaulsan /* ARGSUSED */
19114528Spaulsan void
hat_unlock_region(struct hat * hat,caddr_t addr,size_t len,hat_region_cookie_t rcookie)19125075Spaulsan hat_unlock_region(struct hat *hat, caddr_t addr, size_t len,
19134528Spaulsan hat_region_cookie_t rcookie)
19144528Spaulsan {
19154528Spaulsan panic("No shared region support on x86");
19164528Spaulsan }
19174528Spaulsan
19185084Sjohnlev #if !defined(__xpv)
19190Sstevel@tonic-gate /*
19200Sstevel@tonic-gate * Cross call service routine to demap a virtual page on
19210Sstevel@tonic-gate * the current CPU or flush all mappings in TLB.
19220Sstevel@tonic-gate */
19230Sstevel@tonic-gate /*ARGSUSED*/
19240Sstevel@tonic-gate static int
hati_demap_func(xc_arg_t a1,xc_arg_t a2,xc_arg_t a3)19250Sstevel@tonic-gate hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
19260Sstevel@tonic-gate {
19270Sstevel@tonic-gate hat_t *hat = (hat_t *)a1;
19280Sstevel@tonic-gate caddr_t addr = (caddr_t)a2;
19290Sstevel@tonic-gate
19300Sstevel@tonic-gate /*
19310Sstevel@tonic-gate * If the target hat isn't the kernel and this CPU isn't operating
19320Sstevel@tonic-gate * in the target hat, we can ignore the cross call.
19330Sstevel@tonic-gate */
19340Sstevel@tonic-gate if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
19350Sstevel@tonic-gate return (0);
19360Sstevel@tonic-gate
19370Sstevel@tonic-gate /*
19380Sstevel@tonic-gate * For a normal address, we just flush one page mapping
19390Sstevel@tonic-gate */
19400Sstevel@tonic-gate if ((uintptr_t)addr != DEMAP_ALL_ADDR) {
19413446Smrj mmu_tlbflush_entry(addr);
19420Sstevel@tonic-gate return (0);
19430Sstevel@tonic-gate }
19440Sstevel@tonic-gate
19450Sstevel@tonic-gate /*
19460Sstevel@tonic-gate * Otherwise we reload cr3 to effect a complete TLB flush.
19470Sstevel@tonic-gate *
19480Sstevel@tonic-gate * A reload of cr3 on a VLP process also means we must also recopy in
19490Sstevel@tonic-gate * the pte values from the struct hat
19500Sstevel@tonic-gate */
19510Sstevel@tonic-gate if (hat->hat_flags & HAT_VLP) {
19520Sstevel@tonic-gate #if defined(__amd64)
19530Sstevel@tonic-gate x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes;
19540Sstevel@tonic-gate
19550Sstevel@tonic-gate VLP_COPY(hat->hat_vlp_ptes, vlpptep);
19560Sstevel@tonic-gate #elif defined(__i386)
19570Sstevel@tonic-gate reload_pae32(hat, CPU);
19580Sstevel@tonic-gate #endif
19590Sstevel@tonic-gate }
19600Sstevel@tonic-gate reload_cr3();
19610Sstevel@tonic-gate return (0);
19620Sstevel@tonic-gate }
19630Sstevel@tonic-gate
19640Sstevel@tonic-gate /*
19654191Sjosephb * Flush all TLB entries, including global (ie. kernel) ones.
19664191Sjosephb */
19674191Sjosephb static void
flush_all_tlb_entries(void)19684191Sjosephb flush_all_tlb_entries(void)
19694191Sjosephb {
19704191Sjosephb ulong_t cr4 = getcr4();
19714191Sjosephb
19724191Sjosephb if (cr4 & CR4_PGE) {
19734191Sjosephb setcr4(cr4 & ~(ulong_t)CR4_PGE);
19744191Sjosephb setcr4(cr4);
19754191Sjosephb
19764191Sjosephb /*
19774191Sjosephb * 32 bit PAE also needs to always reload_cr3()
19784191Sjosephb */
19794191Sjosephb if (mmu.max_level == 2)
19804191Sjosephb reload_cr3();
19814191Sjosephb } else {
19824191Sjosephb reload_cr3();
19834191Sjosephb }
19844191Sjosephb }
19854191Sjosephb
19864191Sjosephb #define TLB_CPU_HALTED (01ul)
19874191Sjosephb #define TLB_INVAL_ALL (02ul)
19884191Sjosephb #define CAS_TLB_INFO(cpu, old, new) \
19894191Sjosephb caslong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
19904191Sjosephb
19914191Sjosephb /*
19924191Sjosephb * Record that a CPU is going idle
19934191Sjosephb */
19944191Sjosephb void
tlb_going_idle(void)19954191Sjosephb tlb_going_idle(void)
19964191Sjosephb {
19974191Sjosephb atomic_or_long((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
19984191Sjosephb }
19994191Sjosephb
20004191Sjosephb /*
20014191Sjosephb * Service a delayed TLB flush if coming out of being idle.
20029903SPavel.Tatashin@Sun.COM * It will be called from cpu idle notification with interrupt disabled.
20034191Sjosephb */
20044191Sjosephb void
tlb_service(void)20054191Sjosephb tlb_service(void)
20064191Sjosephb {
20074191Sjosephb ulong_t tlb_info;
20084191Sjosephb ulong_t found;
20094191Sjosephb
20104191Sjosephb /*
20114191Sjosephb * We only have to do something if coming out of being idle.
20124191Sjosephb */
20134191Sjosephb tlb_info = CPU->cpu_m.mcpu_tlb_info;
20144191Sjosephb if (tlb_info & TLB_CPU_HALTED) {
20154191Sjosephb ASSERT(CPU->cpu_current_hat == kas.a_hat);
20164191Sjosephb
20174191Sjosephb /*
20184191Sjosephb * Atomic clear and fetch of old state.
20194191Sjosephb */
20204191Sjosephb while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) {
20214191Sjosephb ASSERT(found & TLB_CPU_HALTED);
20224191Sjosephb tlb_info = found;
20234191Sjosephb SMT_PAUSE();
20244191Sjosephb }
20254191Sjosephb if (tlb_info & TLB_INVAL_ALL)
20264191Sjosephb flush_all_tlb_entries();
20274191Sjosephb }
20284191Sjosephb }
20295084Sjohnlev #endif /* !__xpv */
20304191Sjosephb
20314191Sjosephb /*
20320Sstevel@tonic-gate * Internal routine to do cross calls to invalidate a range of pages on
20330Sstevel@tonic-gate * all CPUs using a given hat.
20340Sstevel@tonic-gate */
20350Sstevel@tonic-gate void
hat_tlb_inval(hat_t * hat,uintptr_t va)20363446Smrj hat_tlb_inval(hat_t *hat, uintptr_t va)
20370Sstevel@tonic-gate {
20380Sstevel@tonic-gate extern int flushes_require_xcalls; /* from mp_startup.c */
20390Sstevel@tonic-gate cpuset_t justme;
20405084Sjohnlev cpuset_t cpus_to_shootdown;
20415084Sjohnlev #ifndef __xpv
20424191Sjosephb cpuset_t check_cpus;
20434191Sjosephb cpu_t *cpup;
20444191Sjosephb int c;
20455084Sjohnlev #endif
20460Sstevel@tonic-gate
20470Sstevel@tonic-gate /*
20480Sstevel@tonic-gate * If the hat is being destroyed, there are no more users, so
20490Sstevel@tonic-gate * demap need not do anything.
20500Sstevel@tonic-gate */
20510Sstevel@tonic-gate if (hat->hat_flags & HAT_FREEING)
20520Sstevel@tonic-gate return;
20530Sstevel@tonic-gate
20540Sstevel@tonic-gate /*
20550Sstevel@tonic-gate * If demapping from a shared pagetable, we best demap the
20560Sstevel@tonic-gate * entire set of user TLBs, since we don't know what addresses
20570Sstevel@tonic-gate * these were shared at.
20580Sstevel@tonic-gate */
20590Sstevel@tonic-gate if (hat->hat_flags & HAT_SHARED) {
20600Sstevel@tonic-gate hat = kas.a_hat;
20610Sstevel@tonic-gate va = DEMAP_ALL_ADDR;
20620Sstevel@tonic-gate }
20630Sstevel@tonic-gate
20640Sstevel@tonic-gate /*
20650Sstevel@tonic-gate * if not running with multiple CPUs, don't use cross calls
20660Sstevel@tonic-gate */
20670Sstevel@tonic-gate if (panicstr || !flushes_require_xcalls) {
20685084Sjohnlev #ifdef __xpv
20695084Sjohnlev if (va == DEMAP_ALL_ADDR)
20705084Sjohnlev xen_flush_tlb();
20715084Sjohnlev else
20725084Sjohnlev xen_flush_va((caddr_t)va);
20735084Sjohnlev #else
20740Sstevel@tonic-gate (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
20755084Sjohnlev #endif
20760Sstevel@tonic-gate return;
20770Sstevel@tonic-gate }
20780Sstevel@tonic-gate
20790Sstevel@tonic-gate
20800Sstevel@tonic-gate /*
20813446Smrj * Determine CPUs to shootdown. Kernel changes always do all CPUs.
20823446Smrj * Otherwise it's just CPUs currently executing in this hat.
20830Sstevel@tonic-gate */
20840Sstevel@tonic-gate kpreempt_disable();
20850Sstevel@tonic-gate CPUSET_ONLY(justme, CPU->cpu_id);
20863446Smrj if (hat == kas.a_hat)
20873446Smrj cpus_to_shootdown = khat_cpuset;
20880Sstevel@tonic-gate else
20893446Smrj cpus_to_shootdown = hat->hat_cpus;
20903446Smrj
20915084Sjohnlev #ifndef __xpv
20924191Sjosephb /*
20934191Sjosephb * If any CPUs in the set are idle, just request a delayed flush
20944191Sjosephb * and avoid waking them up.
20954191Sjosephb */
20964191Sjosephb check_cpus = cpus_to_shootdown;
20974191Sjosephb for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) {
20984191Sjosephb ulong_t tlb_info;
20994191Sjosephb
21004191Sjosephb if (!CPU_IN_SET(check_cpus, c))
21014191Sjosephb continue;
21024191Sjosephb CPUSET_DEL(check_cpus, c);
21034191Sjosephb cpup = cpu[c];
21044191Sjosephb if (cpup == NULL)
21054191Sjosephb continue;
21064191Sjosephb
21074191Sjosephb tlb_info = cpup->cpu_m.mcpu_tlb_info;
21084191Sjosephb while (tlb_info == TLB_CPU_HALTED) {
21094191Sjosephb (void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED,
21104381Sjosephb TLB_CPU_HALTED | TLB_INVAL_ALL);
21114191Sjosephb SMT_PAUSE();
21124191Sjosephb tlb_info = cpup->cpu_m.mcpu_tlb_info;
21134191Sjosephb }
21144191Sjosephb if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) {
21154191Sjosephb HATSTAT_INC(hs_tlb_inval_delayed);
21164191Sjosephb CPUSET_DEL(cpus_to_shootdown, c);
21174191Sjosephb }
21184191Sjosephb }
21195084Sjohnlev #endif
21204191Sjosephb
21213446Smrj if (CPUSET_ISNULL(cpus_to_shootdown) ||
21223446Smrj CPUSET_ISEQUAL(cpus_to_shootdown, justme)) {
21233446Smrj
21245084Sjohnlev #ifdef __xpv
21255084Sjohnlev if (va == DEMAP_ALL_ADDR)
21265084Sjohnlev xen_flush_tlb();
21275084Sjohnlev else
21285084Sjohnlev xen_flush_va((caddr_t)va);
21295084Sjohnlev #else
21303446Smrj (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
21315084Sjohnlev #endif
21323446Smrj
21333446Smrj } else {
21343446Smrj
21353446Smrj CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
21365084Sjohnlev #ifdef __xpv
21375084Sjohnlev if (va == DEMAP_ALL_ADDR)
21385084Sjohnlev xen_gflush_tlb(cpus_to_shootdown);
21395084Sjohnlev else
21405084Sjohnlev xen_gflush_va((caddr_t)va, cpus_to_shootdown);
21415084Sjohnlev #else
21429489SJoe.Bonasera@sun.com xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL,
21439489SJoe.Bonasera@sun.com CPUSET2BV(cpus_to_shootdown), hati_demap_func);
21445084Sjohnlev #endif
21453446Smrj
21463446Smrj }
21470Sstevel@tonic-gate kpreempt_enable();
21480Sstevel@tonic-gate }
21490Sstevel@tonic-gate
21500Sstevel@tonic-gate /*
21510Sstevel@tonic-gate * Interior routine for HAT_UNLOADs from hat_unload_callback(),
21520Sstevel@tonic-gate * hat_kmap_unload() OR from hat_steal() code. This routine doesn't
21530Sstevel@tonic-gate * handle releasing of the htables.
21540Sstevel@tonic-gate */
21550Sstevel@tonic-gate void
hat_pte_unmap(htable_t * ht,uint_t entry,uint_t flags,x86pte_t old_pte,void * pte_ptr)21560Sstevel@tonic-gate hat_pte_unmap(
21570Sstevel@tonic-gate htable_t *ht,
21580Sstevel@tonic-gate uint_t entry,
21590Sstevel@tonic-gate uint_t flags,
21600Sstevel@tonic-gate x86pte_t old_pte,
21610Sstevel@tonic-gate void *pte_ptr)
21620Sstevel@tonic-gate {
21630Sstevel@tonic-gate hat_t *hat = ht->ht_hat;
21640Sstevel@tonic-gate hment_t *hm = NULL;
21650Sstevel@tonic-gate page_t *pp = NULL;
21660Sstevel@tonic-gate level_t l = ht->ht_level;
21670Sstevel@tonic-gate pfn_t pfn;
21680Sstevel@tonic-gate
21690Sstevel@tonic-gate /*
21700Sstevel@tonic-gate * We always track the locking counts, even if nothing is unmapped
21710Sstevel@tonic-gate */
21720Sstevel@tonic-gate if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
21730Sstevel@tonic-gate ASSERT(ht->ht_lock_cnt > 0);
21740Sstevel@tonic-gate HTABLE_LOCK_DEC(ht);
21750Sstevel@tonic-gate }
21760Sstevel@tonic-gate
21770Sstevel@tonic-gate /*
21780Sstevel@tonic-gate * Figure out which page's mapping list lock to acquire using the PFN
21790Sstevel@tonic-gate * passed in "old" PTE. We then attempt to invalidate the PTE.
21800Sstevel@tonic-gate * If another thread, probably a hat_pageunload, has asynchronously
21810Sstevel@tonic-gate * unmapped/remapped this address we'll loop here.
21820Sstevel@tonic-gate */
21830Sstevel@tonic-gate ASSERT(ht->ht_busy > 0);
21840Sstevel@tonic-gate while (PTE_ISVALID(old_pte)) {
21850Sstevel@tonic-gate pfn = PTE2PFN(old_pte, l);
21863446Smrj if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) {
21870Sstevel@tonic-gate pp = NULL;
21880Sstevel@tonic-gate } else {
21895084Sjohnlev #ifdef __xpv
21905084Sjohnlev if (pfn == PFN_INVALID)
21915084Sjohnlev panic("Invalid PFN, but not PT_NOCONSIST");
21925084Sjohnlev #endif
21930Sstevel@tonic-gate pp = page_numtopp_nolock(pfn);
219447Sjosephb if (pp == NULL) {
219547Sjosephb panic("no page_t, not NOCONSIST: old_pte="
219647Sjosephb FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
219747Sjosephb old_pte, (uintptr_t)ht, entry,
219847Sjosephb (uintptr_t)pte_ptr);
219947Sjosephb }
22000Sstevel@tonic-gate x86_hm_enter(pp);
22010Sstevel@tonic-gate }
220247Sjosephb
220312532Sjoe.bonasera@oracle.com old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr);
22040Sstevel@tonic-gate
22050Sstevel@tonic-gate /*
22060Sstevel@tonic-gate * If the page hadn't changed we've unmapped it and can proceed
22070Sstevel@tonic-gate */
22080Sstevel@tonic-gate if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
22090Sstevel@tonic-gate break;
22100Sstevel@tonic-gate
22110Sstevel@tonic-gate /*
22120Sstevel@tonic-gate * Otherwise, we'll have to retry with the current old_pte.
22130Sstevel@tonic-gate * Drop the hment lock, since the pfn may have changed.
22140Sstevel@tonic-gate */
22150Sstevel@tonic-gate if (pp != NULL) {
22160Sstevel@tonic-gate x86_hm_exit(pp);
22170Sstevel@tonic-gate pp = NULL;
22180Sstevel@tonic-gate } else {
22193446Smrj ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
22200Sstevel@tonic-gate }
22210Sstevel@tonic-gate }
22220Sstevel@tonic-gate
22230Sstevel@tonic-gate /*
22240Sstevel@tonic-gate * If the old mapping wasn't valid, there's nothing more to do
22250Sstevel@tonic-gate */
22260Sstevel@tonic-gate if (!PTE_ISVALID(old_pte)) {
22270Sstevel@tonic-gate if (pp != NULL)
22280Sstevel@tonic-gate x86_hm_exit(pp);
22290Sstevel@tonic-gate return;
22300Sstevel@tonic-gate }
22310Sstevel@tonic-gate
22320Sstevel@tonic-gate /*
22330Sstevel@tonic-gate * Take care of syncing any MOD/REF bits and removing the hment.
22340Sstevel@tonic-gate */
22350Sstevel@tonic-gate if (pp != NULL) {
22360Sstevel@tonic-gate if (!(flags & HAT_UNLOAD_NOSYNC))
22370Sstevel@tonic-gate hati_sync_pte_to_page(pp, old_pte, l);
22380Sstevel@tonic-gate hm = hment_remove(pp, ht, entry);
22390Sstevel@tonic-gate x86_hm_exit(pp);
22400Sstevel@tonic-gate if (hm != NULL)
22410Sstevel@tonic-gate hment_free(hm);
22420Sstevel@tonic-gate }
22430Sstevel@tonic-gate
22440Sstevel@tonic-gate /*
22450Sstevel@tonic-gate * Handle book keeping in the htable and hat
22460Sstevel@tonic-gate */
22470Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0);
22480Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt);
22490Sstevel@tonic-gate PGCNT_DEC(hat, l);
22500Sstevel@tonic-gate }
22510Sstevel@tonic-gate
22520Sstevel@tonic-gate /*
22530Sstevel@tonic-gate * very cheap unload implementation to special case some kernel addresses
22540Sstevel@tonic-gate */
22550Sstevel@tonic-gate static void
hat_kmap_unload(caddr_t addr,size_t len,uint_t flags)22560Sstevel@tonic-gate hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
22570Sstevel@tonic-gate {
22580Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr;
22590Sstevel@tonic-gate uintptr_t eva = va + len;
22603446Smrj pgcnt_t pg_index;
22610Sstevel@tonic-gate htable_t *ht;
22620Sstevel@tonic-gate uint_t entry;
22633446Smrj x86pte_t *pte_ptr;
22640Sstevel@tonic-gate x86pte_t old_pte;
22650Sstevel@tonic-gate
22660Sstevel@tonic-gate for (; va < eva; va += MMU_PAGESIZE) {
22670Sstevel@tonic-gate /*
22680Sstevel@tonic-gate * Get the PTE
22690Sstevel@tonic-gate */
22703446Smrj pg_index = mmu_btop(va - mmu.kmap_addr);
22713446Smrj pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index);
22723446Smrj old_pte = GET_PTE(pte_ptr);
22730Sstevel@tonic-gate
22740Sstevel@tonic-gate /*
22750Sstevel@tonic-gate * get the htable / entry
22760Sstevel@tonic-gate */
22770Sstevel@tonic-gate ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
22780Sstevel@tonic-gate >> LEVEL_SHIFT(1)];
22790Sstevel@tonic-gate entry = htable_va2entry(va, ht);
22800Sstevel@tonic-gate
22810Sstevel@tonic-gate /*
22820Sstevel@tonic-gate * use mostly common code to unmap it.
22830Sstevel@tonic-gate */
22840Sstevel@tonic-gate hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr);
22850Sstevel@tonic-gate }
22860Sstevel@tonic-gate }
22870Sstevel@tonic-gate
22880Sstevel@tonic-gate
22890Sstevel@tonic-gate /*
22900Sstevel@tonic-gate * unload a range of virtual address space (no callback)
22910Sstevel@tonic-gate */
22920Sstevel@tonic-gate void
hat_unload(hat_t * hat,caddr_t addr,size_t len,uint_t flags)22930Sstevel@tonic-gate hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
22940Sstevel@tonic-gate {
22950Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr;
22963446Smrj
22975084Sjohnlev XPV_DISALLOW_MIGRATE();
22983446Smrj ASSERT(hat == kas.a_hat || va + len <= _userlimit);
22990Sstevel@tonic-gate
23000Sstevel@tonic-gate /*
23010Sstevel@tonic-gate * special case for performance.
23020Sstevel@tonic-gate */
23030Sstevel@tonic-gate if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
23040Sstevel@tonic-gate ASSERT(hat == kas.a_hat);
23050Sstevel@tonic-gate hat_kmap_unload(addr, len, flags);
23063446Smrj } else {
23073446Smrj hat_unload_callback(hat, addr, len, flags, NULL);
23080Sstevel@tonic-gate }
23095084Sjohnlev XPV_ALLOW_MIGRATE();
23100Sstevel@tonic-gate }
23110Sstevel@tonic-gate
23120Sstevel@tonic-gate /*
23130Sstevel@tonic-gate * Do the callbacks for ranges being unloaded.
23140Sstevel@tonic-gate */
23150Sstevel@tonic-gate typedef struct range_info {
23160Sstevel@tonic-gate uintptr_t rng_va;
23170Sstevel@tonic-gate ulong_t rng_cnt;
23180Sstevel@tonic-gate level_t rng_level;
23190Sstevel@tonic-gate } range_info_t;
23200Sstevel@tonic-gate
23210Sstevel@tonic-gate static void
handle_ranges(hat_callback_t * cb,uint_t cnt,range_info_t * range)23220Sstevel@tonic-gate handle_ranges(hat_callback_t *cb, uint_t cnt, range_info_t *range)
23230Sstevel@tonic-gate {
23240Sstevel@tonic-gate /*
23250Sstevel@tonic-gate * do callbacks to upper level VM system
23260Sstevel@tonic-gate */
23270Sstevel@tonic-gate while (cb != NULL && cnt > 0) {
23280Sstevel@tonic-gate --cnt;
23290Sstevel@tonic-gate cb->hcb_start_addr = (caddr_t)range[cnt].rng_va;
23300Sstevel@tonic-gate cb->hcb_end_addr = cb->hcb_start_addr;
23310Sstevel@tonic-gate cb->hcb_end_addr +=
23320Sstevel@tonic-gate range[cnt].rng_cnt << LEVEL_SIZE(range[cnt].rng_level);
23330Sstevel@tonic-gate cb->hcb_function(cb);
23340Sstevel@tonic-gate }
23350Sstevel@tonic-gate }
23360Sstevel@tonic-gate
23370Sstevel@tonic-gate /*
23380Sstevel@tonic-gate * Unload a given range of addresses (has optional callback)
23390Sstevel@tonic-gate *
23400Sstevel@tonic-gate * Flags:
23410Sstevel@tonic-gate * define HAT_UNLOAD 0x00
23420Sstevel@tonic-gate * define HAT_UNLOAD_NOSYNC 0x02
23430Sstevel@tonic-gate * define HAT_UNLOAD_UNLOCK 0x04
23440Sstevel@tonic-gate * define HAT_UNLOAD_OTHER 0x08 - not used
23450Sstevel@tonic-gate * define HAT_UNLOAD_UNMAP 0x10 - same as HAT_UNLOAD
23460Sstevel@tonic-gate */
23470Sstevel@tonic-gate #define MAX_UNLOAD_CNT (8)
23480Sstevel@tonic-gate void
hat_unload_callback(hat_t * hat,caddr_t addr,size_t len,uint_t flags,hat_callback_t * cb)23490Sstevel@tonic-gate hat_unload_callback(
23500Sstevel@tonic-gate hat_t *hat,
23510Sstevel@tonic-gate caddr_t addr,
23520Sstevel@tonic-gate size_t len,
23530Sstevel@tonic-gate uint_t flags,
23540Sstevel@tonic-gate hat_callback_t *cb)
23550Sstevel@tonic-gate {
23560Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr;
23570Sstevel@tonic-gate uintptr_t eaddr = vaddr + len;
23580Sstevel@tonic-gate htable_t *ht = NULL;
23590Sstevel@tonic-gate uint_t entry;
236047Sjosephb uintptr_t contig_va = (uintptr_t)-1L;
23610Sstevel@tonic-gate range_info_t r[MAX_UNLOAD_CNT];
23620Sstevel@tonic-gate uint_t r_cnt = 0;
23630Sstevel@tonic-gate x86pte_t old_pte;
23640Sstevel@tonic-gate
23655084Sjohnlev XPV_DISALLOW_MIGRATE();
23663446Smrj ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
23670Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr));
23680Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr));
23690Sstevel@tonic-gate
23703446Smrj /*
23713446Smrj * Special case a single page being unloaded for speed. This happens
23723446Smrj * quite frequently, COW faults after a fork() for example.
23733446Smrj */
23743446Smrj if (cb == NULL && len == MMU_PAGESIZE) {
23753446Smrj ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
23763446Smrj if (ht != NULL) {
23773446Smrj if (PTE_ISVALID(old_pte))
23783446Smrj hat_pte_unmap(ht, entry, flags, old_pte, NULL);
23793446Smrj htable_release(ht);
23803446Smrj }
23815084Sjohnlev XPV_ALLOW_MIGRATE();
23823446Smrj return;
23833446Smrj }
23843446Smrj
23850Sstevel@tonic-gate while (vaddr < eaddr) {
23860Sstevel@tonic-gate old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
23870Sstevel@tonic-gate if (ht == NULL)
23880Sstevel@tonic-gate break;
23890Sstevel@tonic-gate
23900Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr));
23910Sstevel@tonic-gate
23920Sstevel@tonic-gate if (vaddr < (uintptr_t)addr)
23930Sstevel@tonic-gate panic("hat_unload_callback(): unmap inside large page");
23940Sstevel@tonic-gate
23950Sstevel@tonic-gate /*
23960Sstevel@tonic-gate * We'll do the call backs for contiguous ranges
23970Sstevel@tonic-gate */
239847Sjosephb if (vaddr != contig_va ||
23990Sstevel@tonic-gate (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) {
24000Sstevel@tonic-gate if (r_cnt == MAX_UNLOAD_CNT) {
24010Sstevel@tonic-gate handle_ranges(cb, r_cnt, r);
24020Sstevel@tonic-gate r_cnt = 0;
24030Sstevel@tonic-gate }
24040Sstevel@tonic-gate r[r_cnt].rng_va = vaddr;
24050Sstevel@tonic-gate r[r_cnt].rng_cnt = 0;
24060Sstevel@tonic-gate r[r_cnt].rng_level = ht->ht_level;
24070Sstevel@tonic-gate ++r_cnt;
24080Sstevel@tonic-gate }
24090Sstevel@tonic-gate
24100Sstevel@tonic-gate /*
24110Sstevel@tonic-gate * Unload one mapping from the page tables.
24120Sstevel@tonic-gate */
24130Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht);
24140Sstevel@tonic-gate hat_pte_unmap(ht, entry, flags, old_pte, NULL);
24150Sstevel@tonic-gate ASSERT(ht->ht_level <= mmu.max_page_level);
24160Sstevel@tonic-gate vaddr += LEVEL_SIZE(ht->ht_level);
241747Sjosephb contig_va = vaddr;
24180Sstevel@tonic-gate ++r[r_cnt - 1].rng_cnt;
24190Sstevel@tonic-gate }
24200Sstevel@tonic-gate if (ht)
24210Sstevel@tonic-gate htable_release(ht);
24220Sstevel@tonic-gate
24230Sstevel@tonic-gate /*
24240Sstevel@tonic-gate * handle last range for callbacks
24250Sstevel@tonic-gate */
24260Sstevel@tonic-gate if (r_cnt > 0)
24270Sstevel@tonic-gate handle_ranges(cb, r_cnt, r);
24285084Sjohnlev XPV_ALLOW_MIGRATE();
24290Sstevel@tonic-gate }
24300Sstevel@tonic-gate
24310Sstevel@tonic-gate /*
243211079SDave.Plauger@Sun.COM * Invalidate a virtual address translation on a slave CPU during
243311079SDave.Plauger@Sun.COM * panic() dumps.
243410843SDave.Plauger@Sun.COM */
243510843SDave.Plauger@Sun.COM void
hat_flush_range(hat_t * hat,caddr_t va,size_t size)243610843SDave.Plauger@Sun.COM hat_flush_range(hat_t *hat, caddr_t va, size_t size)
243710843SDave.Plauger@Sun.COM {
243810843SDave.Plauger@Sun.COM ssize_t sz;
243910843SDave.Plauger@Sun.COM caddr_t endva = va + size;
244010843SDave.Plauger@Sun.COM
244110843SDave.Plauger@Sun.COM while (va < endva) {
244210843SDave.Plauger@Sun.COM sz = hat_getpagesize(hat, va);
244311079SDave.Plauger@Sun.COM if (sz < 0) {
244410843SDave.Plauger@Sun.COM #ifdef __xpv
244510843SDave.Plauger@Sun.COM xen_flush_tlb();
244610843SDave.Plauger@Sun.COM #else
244711079SDave.Plauger@Sun.COM flush_all_tlb_entries();
244810843SDave.Plauger@Sun.COM #endif
244910843SDave.Plauger@Sun.COM break;
245011079SDave.Plauger@Sun.COM }
245111079SDave.Plauger@Sun.COM #ifdef __xpv
245211079SDave.Plauger@Sun.COM xen_flush_va(va);
245311079SDave.Plauger@Sun.COM #else
245411079SDave.Plauger@Sun.COM mmu_tlbflush_entry(va);
245511079SDave.Plauger@Sun.COM #endif
245610843SDave.Plauger@Sun.COM va += sz;
245710843SDave.Plauger@Sun.COM }
245810843SDave.Plauger@Sun.COM }
245910843SDave.Plauger@Sun.COM
246010843SDave.Plauger@Sun.COM /*
24610Sstevel@tonic-gate * synchronize mapping with software data structures
24620Sstevel@tonic-gate *
24630Sstevel@tonic-gate * This interface is currently only used by the working set monitor
24640Sstevel@tonic-gate * driver.
24650Sstevel@tonic-gate */
24660Sstevel@tonic-gate /*ARGSUSED*/
24670Sstevel@tonic-gate void
hat_sync(hat_t * hat,caddr_t addr,size_t len,uint_t flags)24680Sstevel@tonic-gate hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
24690Sstevel@tonic-gate {
24700Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr;
24710Sstevel@tonic-gate uintptr_t eaddr = vaddr + len;
24720Sstevel@tonic-gate htable_t *ht = NULL;
24730Sstevel@tonic-gate uint_t entry;
24740Sstevel@tonic-gate x86pte_t pte;
24750Sstevel@tonic-gate x86pte_t save_pte;
24760Sstevel@tonic-gate x86pte_t new;
24770Sstevel@tonic-gate page_t *pp;
24780Sstevel@tonic-gate
24790Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr));
24800Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr));
24810Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr));
24823446Smrj ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
24830Sstevel@tonic-gate
24845084Sjohnlev XPV_DISALLOW_MIGRATE();
24850Sstevel@tonic-gate for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
24860Sstevel@tonic-gate try_again:
24870Sstevel@tonic-gate pte = htable_walk(hat, &ht, &vaddr, eaddr);
24880Sstevel@tonic-gate if (ht == NULL)
24890Sstevel@tonic-gate break;
24900Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht);
24910Sstevel@tonic-gate
24923446Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
24930Sstevel@tonic-gate PTE_GET(pte, PT_REF | PT_MOD) == 0)
24940Sstevel@tonic-gate continue;
24950Sstevel@tonic-gate
24960Sstevel@tonic-gate /*
24970Sstevel@tonic-gate * We need to acquire the mapping list lock to protect
24980Sstevel@tonic-gate * against hat_pageunload(), hat_unload(), etc.
24990Sstevel@tonic-gate */
25000Sstevel@tonic-gate pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
25010Sstevel@tonic-gate if (pp == NULL)
25020Sstevel@tonic-gate break;
25030Sstevel@tonic-gate x86_hm_enter(pp);
25040Sstevel@tonic-gate save_pte = pte;
25050Sstevel@tonic-gate pte = x86pte_get(ht, entry);
25060Sstevel@tonic-gate if (pte != save_pte) {
25070Sstevel@tonic-gate x86_hm_exit(pp);
25080Sstevel@tonic-gate goto try_again;
25090Sstevel@tonic-gate }
25103446Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
25110Sstevel@tonic-gate PTE_GET(pte, PT_REF | PT_MOD) == 0) {
25120Sstevel@tonic-gate x86_hm_exit(pp);
25130Sstevel@tonic-gate continue;
25140Sstevel@tonic-gate }
25150Sstevel@tonic-gate
25160Sstevel@tonic-gate /*
25170Sstevel@tonic-gate * Need to clear ref or mod bits. We may compete with
25180Sstevel@tonic-gate * hardware updating the R/M bits and have to try again.
25190Sstevel@tonic-gate */
25200Sstevel@tonic-gate if (flags == HAT_SYNC_ZERORM) {
25210Sstevel@tonic-gate new = pte;
25220Sstevel@tonic-gate PTE_CLR(new, PT_REF | PT_MOD);
25230Sstevel@tonic-gate pte = hati_update_pte(ht, entry, pte, new);
25240Sstevel@tonic-gate if (pte != 0) {
25250Sstevel@tonic-gate x86_hm_exit(pp);
25260Sstevel@tonic-gate goto try_again;
25270Sstevel@tonic-gate }
25280Sstevel@tonic-gate } else {
25290Sstevel@tonic-gate /*
25300Sstevel@tonic-gate * sync the PTE to the page_t
25310Sstevel@tonic-gate */
25320Sstevel@tonic-gate hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
25330Sstevel@tonic-gate }
25340Sstevel@tonic-gate x86_hm_exit(pp);
25350Sstevel@tonic-gate }
25360Sstevel@tonic-gate if (ht)
25370Sstevel@tonic-gate htable_release(ht);
25385084Sjohnlev XPV_ALLOW_MIGRATE();
25390Sstevel@tonic-gate }
25400Sstevel@tonic-gate
25410Sstevel@tonic-gate /*
25420Sstevel@tonic-gate * void hat_map(hat, addr, len, flags)
25430Sstevel@tonic-gate */
25440Sstevel@tonic-gate /*ARGSUSED*/
25450Sstevel@tonic-gate void
hat_map(hat_t * hat,caddr_t addr,size_t len,uint_t flags)25460Sstevel@tonic-gate hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
25470Sstevel@tonic-gate {
25480Sstevel@tonic-gate /* does nothing */
25490Sstevel@tonic-gate }
25500Sstevel@tonic-gate
25510Sstevel@tonic-gate /*
25520Sstevel@tonic-gate * uint_t hat_getattr(hat, addr, *attr)
25530Sstevel@tonic-gate * returns attr for <hat,addr> in *attr. returns 0 if there was a
25540Sstevel@tonic-gate * mapping and *attr is valid, nonzero if there was no mapping and
25550Sstevel@tonic-gate * *attr is not valid.
25560Sstevel@tonic-gate */
25570Sstevel@tonic-gate uint_t
hat_getattr(hat_t * hat,caddr_t addr,uint_t * attr)25580Sstevel@tonic-gate hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
25590Sstevel@tonic-gate {
25600Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr);
25610Sstevel@tonic-gate htable_t *ht = NULL;
25620Sstevel@tonic-gate x86pte_t pte;
25630Sstevel@tonic-gate
25643446Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
25650Sstevel@tonic-gate
25660Sstevel@tonic-gate if (IN_VA_HOLE(vaddr))
25670Sstevel@tonic-gate return ((uint_t)-1);
25680Sstevel@tonic-gate
25693446Smrj ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
25700Sstevel@tonic-gate if (ht == NULL)
25710Sstevel@tonic-gate return ((uint_t)-1);
25720Sstevel@tonic-gate
25730Sstevel@tonic-gate if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
25740Sstevel@tonic-gate htable_release(ht);
25750Sstevel@tonic-gate return ((uint_t)-1);
25760Sstevel@tonic-gate }
25770Sstevel@tonic-gate
25780Sstevel@tonic-gate *attr = PROT_READ;
25790Sstevel@tonic-gate if (PTE_GET(pte, PT_WRITABLE))
25800Sstevel@tonic-gate *attr |= PROT_WRITE;
25810Sstevel@tonic-gate if (PTE_GET(pte, PT_USER))
25820Sstevel@tonic-gate *attr |= PROT_USER;
25830Sstevel@tonic-gate if (!PTE_GET(pte, mmu.pt_nx))
25840Sstevel@tonic-gate *attr |= PROT_EXEC;
25853446Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
25860Sstevel@tonic-gate *attr |= HAT_NOSYNC;
25870Sstevel@tonic-gate htable_release(ht);
25880Sstevel@tonic-gate return (0);
25890Sstevel@tonic-gate }
25900Sstevel@tonic-gate
25910Sstevel@tonic-gate /*
25920Sstevel@tonic-gate * hat_updateattr() applies the given attribute change to an existing mapping
25930Sstevel@tonic-gate */
25940Sstevel@tonic-gate #define HAT_LOAD_ATTR 1
25950Sstevel@tonic-gate #define HAT_SET_ATTR 2
25960Sstevel@tonic-gate #define HAT_CLR_ATTR 3
25970Sstevel@tonic-gate
25980Sstevel@tonic-gate static void
hat_updateattr(hat_t * hat,caddr_t addr,size_t len,uint_t attr,int what)25990Sstevel@tonic-gate hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
26000Sstevel@tonic-gate {
26010Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr;
26020Sstevel@tonic-gate uintptr_t eaddr = (uintptr_t)addr + len;
26030Sstevel@tonic-gate htable_t *ht = NULL;
26040Sstevel@tonic-gate uint_t entry;
26050Sstevel@tonic-gate x86pte_t oldpte, newpte;
26060Sstevel@tonic-gate page_t *pp;
26070Sstevel@tonic-gate
26085084Sjohnlev XPV_DISALLOW_MIGRATE();
26090Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr));
26100Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr));
26110Sstevel@tonic-gate ASSERT(hat == kas.a_hat ||
26120Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
26130Sstevel@tonic-gate for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
26140Sstevel@tonic-gate try_again:
26150Sstevel@tonic-gate oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
26160Sstevel@tonic-gate if (ht == NULL)
26170Sstevel@tonic-gate break;
26183446Smrj if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
26190Sstevel@tonic-gate continue;
26200Sstevel@tonic-gate
26210Sstevel@tonic-gate pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
26220Sstevel@tonic-gate if (pp == NULL)
26230Sstevel@tonic-gate continue;
26240Sstevel@tonic-gate x86_hm_enter(pp);
26250Sstevel@tonic-gate
26260Sstevel@tonic-gate newpte = oldpte;
26270Sstevel@tonic-gate /*
26280Sstevel@tonic-gate * We found a page table entry in the desired range,
26290Sstevel@tonic-gate * figure out the new attributes.
26300Sstevel@tonic-gate */
26310Sstevel@tonic-gate if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
26320Sstevel@tonic-gate if ((attr & PROT_WRITE) &&
26330Sstevel@tonic-gate !PTE_GET(oldpte, PT_WRITABLE))
26340Sstevel@tonic-gate newpte |= PT_WRITABLE;
26350Sstevel@tonic-gate
26363446Smrj if ((attr & HAT_NOSYNC) &&
26373446Smrj PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC)
26380Sstevel@tonic-gate newpte |= PT_NOSYNC;
26390Sstevel@tonic-gate
26400Sstevel@tonic-gate if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
26410Sstevel@tonic-gate newpte &= ~mmu.pt_nx;
26420Sstevel@tonic-gate }
26430Sstevel@tonic-gate
26440Sstevel@tonic-gate if (what == HAT_LOAD_ATTR) {
26450Sstevel@tonic-gate if (!(attr & PROT_WRITE) &&
26460Sstevel@tonic-gate PTE_GET(oldpte, PT_WRITABLE))
26470Sstevel@tonic-gate newpte &= ~PT_WRITABLE;
26480Sstevel@tonic-gate
26493446Smrj if (!(attr & HAT_NOSYNC) &&
26503446Smrj PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
26513446Smrj newpte &= ~PT_SOFTWARE;
26520Sstevel@tonic-gate
26530Sstevel@tonic-gate if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
26540Sstevel@tonic-gate newpte |= mmu.pt_nx;
26550Sstevel@tonic-gate }
26560Sstevel@tonic-gate
26570Sstevel@tonic-gate if (what == HAT_CLR_ATTR) {
26580Sstevel@tonic-gate if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
26590Sstevel@tonic-gate newpte &= ~PT_WRITABLE;
26600Sstevel@tonic-gate
26613446Smrj if ((attr & HAT_NOSYNC) &&
26623446Smrj PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
26633446Smrj newpte &= ~PT_SOFTWARE;
26640Sstevel@tonic-gate
26650Sstevel@tonic-gate if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
26660Sstevel@tonic-gate newpte |= mmu.pt_nx;
26670Sstevel@tonic-gate }
26680Sstevel@tonic-gate
26690Sstevel@tonic-gate /*
26703446Smrj * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set.
26713446Smrj * x86pte_set() depends on this.
26723446Smrj */
26733446Smrj if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC)
26743446Smrj newpte |= PT_REF | PT_MOD;
26753446Smrj
26763446Smrj /*
26770Sstevel@tonic-gate * what about PROT_READ or others? this code only handles:
26780Sstevel@tonic-gate * EXEC, WRITE, NOSYNC
26790Sstevel@tonic-gate */
26800Sstevel@tonic-gate
26810Sstevel@tonic-gate /*
26820Sstevel@tonic-gate * If new PTE really changed, update the table.
26830Sstevel@tonic-gate */
26840Sstevel@tonic-gate if (newpte != oldpte) {
26850Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht);
26860Sstevel@tonic-gate oldpte = hati_update_pte(ht, entry, oldpte, newpte);
26870Sstevel@tonic-gate if (oldpte != 0) {
26880Sstevel@tonic-gate x86_hm_exit(pp);
26890Sstevel@tonic-gate goto try_again;
26900Sstevel@tonic-gate }
26910Sstevel@tonic-gate }
26920Sstevel@tonic-gate x86_hm_exit(pp);
26930Sstevel@tonic-gate }
26940Sstevel@tonic-gate if (ht)
26950Sstevel@tonic-gate htable_release(ht);
26965084Sjohnlev XPV_ALLOW_MIGRATE();
26970Sstevel@tonic-gate }
26980Sstevel@tonic-gate
26990Sstevel@tonic-gate /*
27000Sstevel@tonic-gate * Various wrappers for hat_updateattr()
27010Sstevel@tonic-gate */
27020Sstevel@tonic-gate void
hat_setattr(hat_t * hat,caddr_t addr,size_t len,uint_t attr)27030Sstevel@tonic-gate hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
27040Sstevel@tonic-gate {
27053446Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
27060Sstevel@tonic-gate hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
27070Sstevel@tonic-gate }
27080Sstevel@tonic-gate
27090Sstevel@tonic-gate void
hat_clrattr(hat_t * hat,caddr_t addr,size_t len,uint_t attr)27100Sstevel@tonic-gate hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
27110Sstevel@tonic-gate {
27123446Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
27130Sstevel@tonic-gate hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
27140Sstevel@tonic-gate }
27150Sstevel@tonic-gate
27160Sstevel@tonic-gate void
hat_chgattr(hat_t * hat,caddr_t addr,size_t len,uint_t attr)27170Sstevel@tonic-gate hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
27180Sstevel@tonic-gate {
27193446Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
27200Sstevel@tonic-gate hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
27210Sstevel@tonic-gate }
27220Sstevel@tonic-gate
27230Sstevel@tonic-gate void
hat_chgprot(hat_t * hat,caddr_t addr,size_t len,uint_t vprot)27240Sstevel@tonic-gate hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
27250Sstevel@tonic-gate {
27263446Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
27270Sstevel@tonic-gate hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
27280Sstevel@tonic-gate }
27290Sstevel@tonic-gate
27300Sstevel@tonic-gate /*
27310Sstevel@tonic-gate * size_t hat_getpagesize(hat, addr)
27320Sstevel@tonic-gate * returns pagesize in bytes for <hat, addr>. returns -1 of there is
27330Sstevel@tonic-gate * no mapping. This is an advisory call.
27340Sstevel@tonic-gate */
27350Sstevel@tonic-gate ssize_t
hat_getpagesize(hat_t * hat,caddr_t addr)27360Sstevel@tonic-gate hat_getpagesize(hat_t *hat, caddr_t addr)
27370Sstevel@tonic-gate {
27380Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr);
27390Sstevel@tonic-gate htable_t *ht;
27400Sstevel@tonic-gate size_t pagesize;
27410Sstevel@tonic-gate
27423446Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
27430Sstevel@tonic-gate if (IN_VA_HOLE(vaddr))
27440Sstevel@tonic-gate return (-1);
27450Sstevel@tonic-gate ht = htable_getpage(hat, vaddr, NULL);
27460Sstevel@tonic-gate if (ht == NULL)
27470Sstevel@tonic-gate return (-1);
27480Sstevel@tonic-gate pagesize = LEVEL_SIZE(ht->ht_level);
27490Sstevel@tonic-gate htable_release(ht);
27500Sstevel@tonic-gate return (pagesize);
27510Sstevel@tonic-gate }
27520Sstevel@tonic-gate
27530Sstevel@tonic-gate
27540Sstevel@tonic-gate
27550Sstevel@tonic-gate /*
27560Sstevel@tonic-gate * pfn_t hat_getpfnum(hat, addr)
27570Sstevel@tonic-gate * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
27580Sstevel@tonic-gate */
27590Sstevel@tonic-gate pfn_t
hat_getpfnum(hat_t * hat,caddr_t addr)27600Sstevel@tonic-gate hat_getpfnum(hat_t *hat, caddr_t addr)
27610Sstevel@tonic-gate {
27620Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr);
27630Sstevel@tonic-gate htable_t *ht;
27640Sstevel@tonic-gate uint_t entry;
27650Sstevel@tonic-gate pfn_t pfn = PFN_INVALID;
27660Sstevel@tonic-gate
27673446Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
27680Sstevel@tonic-gate if (khat_running == 0)
27693446Smrj return (PFN_INVALID);
27700Sstevel@tonic-gate
27710Sstevel@tonic-gate if (IN_VA_HOLE(vaddr))
27720Sstevel@tonic-gate return (PFN_INVALID);
27730Sstevel@tonic-gate
27745084Sjohnlev XPV_DISALLOW_MIGRATE();
27750Sstevel@tonic-gate /*
27760Sstevel@tonic-gate * A very common use of hat_getpfnum() is from the DDI for kernel pages.
27770Sstevel@tonic-gate * Use the kmap_ptes (which also covers the 32 bit heap) to speed
27780Sstevel@tonic-gate * this up.
27790Sstevel@tonic-gate */
27800Sstevel@tonic-gate if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
27810Sstevel@tonic-gate x86pte_t pte;
27823446Smrj pgcnt_t pg_index;
27833446Smrj
27843446Smrj pg_index = mmu_btop(vaddr - mmu.kmap_addr);
27853446Smrj pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index));
27865084Sjohnlev if (PTE_ISVALID(pte))
27875084Sjohnlev /*LINTED [use of constant 0 causes a lint warning] */
27885084Sjohnlev pfn = PTE2PFN(pte, 0);
27895084Sjohnlev XPV_ALLOW_MIGRATE();
27905084Sjohnlev return (pfn);
27910Sstevel@tonic-gate }
27920Sstevel@tonic-gate
27930Sstevel@tonic-gate ht = htable_getpage(hat, vaddr, &entry);
27945084Sjohnlev if (ht == NULL) {
27955084Sjohnlev XPV_ALLOW_MIGRATE();
27960Sstevel@tonic-gate return (PFN_INVALID);
27975084Sjohnlev }
27980Sstevel@tonic-gate ASSERT(vaddr >= ht->ht_vaddr);
27990Sstevel@tonic-gate ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
28000Sstevel@tonic-gate pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
28010Sstevel@tonic-gate if (ht->ht_level > 0)
28020Sstevel@tonic-gate pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
28030Sstevel@tonic-gate htable_release(ht);
28045084Sjohnlev XPV_ALLOW_MIGRATE();
28050Sstevel@tonic-gate return (pfn);
28060Sstevel@tonic-gate }
28070Sstevel@tonic-gate
28080Sstevel@tonic-gate /*
28090Sstevel@tonic-gate * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged.
28100Sstevel@tonic-gate * Use hat_getpfnum(kas.a_hat, ...) instead.
28110Sstevel@tonic-gate *
28120Sstevel@tonic-gate * We'd like to return PFN_INVALID if the mappings have underlying page_t's
28130Sstevel@tonic-gate * but can't right now due to the fact that some software has grown to use
28140Sstevel@tonic-gate * this interface incorrectly. So for now when the interface is misused,
28150Sstevel@tonic-gate * return a warning to the user that in the future it won't work in the
28160Sstevel@tonic-gate * way they're abusing it, and carry on.
28170Sstevel@tonic-gate *
28180Sstevel@tonic-gate * Note that hat_getkpfnum() is never supported on amd64.
28190Sstevel@tonic-gate */
28200Sstevel@tonic-gate #if !defined(__amd64)
28210Sstevel@tonic-gate pfn_t
hat_getkpfnum(caddr_t addr)28220Sstevel@tonic-gate hat_getkpfnum(caddr_t addr)
28230Sstevel@tonic-gate {
28240Sstevel@tonic-gate pfn_t pfn;
28250Sstevel@tonic-gate int badcaller = 0;
28260Sstevel@tonic-gate
28270Sstevel@tonic-gate if (khat_running == 0)
28280Sstevel@tonic-gate panic("hat_getkpfnum(): called too early\n");
28290Sstevel@tonic-gate if ((uintptr_t)addr < kernelbase)
28300Sstevel@tonic-gate return (PFN_INVALID);
28310Sstevel@tonic-gate
28325084Sjohnlev XPV_DISALLOW_MIGRATE();
28330Sstevel@tonic-gate if (segkpm && IS_KPM_ADDR(addr)) {
28340Sstevel@tonic-gate badcaller = 1;
28350Sstevel@tonic-gate pfn = hat_kpm_va2pfn(addr);
28360Sstevel@tonic-gate } else {
28370Sstevel@tonic-gate pfn = hat_getpfnum(kas.a_hat, addr);
28380Sstevel@tonic-gate badcaller = pf_is_memory(pfn);
28390Sstevel@tonic-gate }
28400Sstevel@tonic-gate
28410Sstevel@tonic-gate if (badcaller)
28420Sstevel@tonic-gate hat_getkpfnum_badcall(caller());
28435084Sjohnlev XPV_ALLOW_MIGRATE();
28440Sstevel@tonic-gate return (pfn);
28450Sstevel@tonic-gate }
28460Sstevel@tonic-gate #endif /* __amd64 */
28470Sstevel@tonic-gate
28480Sstevel@tonic-gate /*
28490Sstevel@tonic-gate * int hat_probe(hat, addr)
28500Sstevel@tonic-gate * return 0 if no valid mapping is present. Faster version
28510Sstevel@tonic-gate * of hat_getattr in certain architectures.
28520Sstevel@tonic-gate */
28530Sstevel@tonic-gate int
hat_probe(hat_t * hat,caddr_t addr)28540Sstevel@tonic-gate hat_probe(hat_t *hat, caddr_t addr)
28550Sstevel@tonic-gate {
28560Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr);
28570Sstevel@tonic-gate uint_t entry;
28580Sstevel@tonic-gate htable_t *ht;
28590Sstevel@tonic-gate pgcnt_t pg_off;
28600Sstevel@tonic-gate
28613446Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
28620Sstevel@tonic-gate ASSERT(hat == kas.a_hat ||
28630Sstevel@tonic-gate AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
28640Sstevel@tonic-gate if (IN_VA_HOLE(vaddr))
28650Sstevel@tonic-gate return (0);
28660Sstevel@tonic-gate
28670Sstevel@tonic-gate /*
28680Sstevel@tonic-gate * Most common use of hat_probe is from segmap. We special case it
28690Sstevel@tonic-gate * for performance.
28700Sstevel@tonic-gate */
28710Sstevel@tonic-gate if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
28720Sstevel@tonic-gate pg_off = mmu_btop(vaddr - mmu.kmap_addr);
28730Sstevel@tonic-gate if (mmu.pae_hat)
28740Sstevel@tonic-gate return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
28750Sstevel@tonic-gate else
28760Sstevel@tonic-gate return (PTE_ISVALID(
28770Sstevel@tonic-gate ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
28780Sstevel@tonic-gate }
28790Sstevel@tonic-gate
28800Sstevel@tonic-gate ht = htable_getpage(hat, vaddr, &entry);
28810Sstevel@tonic-gate htable_release(ht);
28825084Sjohnlev return (ht != NULL);
28830Sstevel@tonic-gate }
28840Sstevel@tonic-gate
28850Sstevel@tonic-gate /*
28864381Sjosephb * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM.
28874381Sjosephb */
28884381Sjosephb static int
is_it_dism(hat_t * hat,caddr_t va)28894381Sjosephb is_it_dism(hat_t *hat, caddr_t va)
28904381Sjosephb {
28914381Sjosephb struct seg *seg;
28924381Sjosephb struct shm_data *shmd;
28934381Sjosephb struct spt_data *sptd;
28944381Sjosephb
28954381Sjosephb seg = as_findseg(hat->hat_as, va, 0);
28964381Sjosephb ASSERT(seg != NULL);
28974381Sjosephb ASSERT(seg->s_base <= va);
28984381Sjosephb shmd = (struct shm_data *)seg->s_data;
28994381Sjosephb ASSERT(shmd != NULL);
29004381Sjosephb sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
29014381Sjosephb ASSERT(sptd != NULL);
29024381Sjosephb if (sptd->spt_flags & SHM_PAGEABLE)
29034381Sjosephb return (1);
29044381Sjosephb return (0);
29054381Sjosephb }
29064381Sjosephb
29074381Sjosephb /*
29084381Sjosephb * Simple implementation of ISM. hat_share() is similar to hat_memload_array(),
29090Sstevel@tonic-gate * except that we use the ism_hat's existing mappings to determine the pages
29104381Sjosephb * and protections to use for this hat. If we find a full properly aligned
29114381Sjosephb * and sized pagetable, we will attempt to share the pagetable itself.
29120Sstevel@tonic-gate */
29130Sstevel@tonic-gate /*ARGSUSED*/
29140Sstevel@tonic-gate int
hat_share(hat_t * hat,caddr_t addr,hat_t * ism_hat,caddr_t src_addr,size_t len,uint_t ismszc)29150Sstevel@tonic-gate hat_share(
29160Sstevel@tonic-gate hat_t *hat,
29170Sstevel@tonic-gate caddr_t addr,
29180Sstevel@tonic-gate hat_t *ism_hat,
29190Sstevel@tonic-gate caddr_t src_addr,
29200Sstevel@tonic-gate size_t len, /* almost useless value, see below.. */
29210Sstevel@tonic-gate uint_t ismszc)
29220Sstevel@tonic-gate {
29230Sstevel@tonic-gate uintptr_t vaddr_start = (uintptr_t)addr;
29240Sstevel@tonic-gate uintptr_t vaddr;
29250Sstevel@tonic-gate uintptr_t eaddr = vaddr_start + len;
29260Sstevel@tonic-gate uintptr_t ism_addr_start = (uintptr_t)src_addr;
29270Sstevel@tonic-gate uintptr_t ism_addr = ism_addr_start;
29280Sstevel@tonic-gate uintptr_t e_ism_addr = ism_addr + len;
29290Sstevel@tonic-gate htable_t *ism_ht = NULL;
29300Sstevel@tonic-gate htable_t *ht;
29310Sstevel@tonic-gate x86pte_t pte;
29320Sstevel@tonic-gate page_t *pp;
29330Sstevel@tonic-gate pfn_t pfn;
29340Sstevel@tonic-gate level_t l;
29350Sstevel@tonic-gate pgcnt_t pgcnt;
29360Sstevel@tonic-gate uint_t prot;
29374381Sjosephb int is_dism;
29384381Sjosephb int flags;
29390Sstevel@tonic-gate
29400Sstevel@tonic-gate /*
29410Sstevel@tonic-gate * We might be asked to share an empty DISM hat by as_dup()
29420Sstevel@tonic-gate */
29430Sstevel@tonic-gate ASSERT(hat != kas.a_hat);
29443446Smrj ASSERT(eaddr <= _userlimit);
29450Sstevel@tonic-gate if (!(ism_hat->hat_flags & HAT_SHARED)) {
29460Sstevel@tonic-gate ASSERT(hat_get_mapped_size(ism_hat) == 0);
29470Sstevel@tonic-gate return (0);
29480Sstevel@tonic-gate }
29495084Sjohnlev XPV_DISALLOW_MIGRATE();
29500Sstevel@tonic-gate
29510Sstevel@tonic-gate /*
29520Sstevel@tonic-gate * The SPT segment driver often passes us a size larger than there are
29530Sstevel@tonic-gate * valid mappings. That's because it rounds the segment size up to a
29540Sstevel@tonic-gate * large pagesize, even if the actual memory mapped by ism_hat is less.
29550Sstevel@tonic-gate */
29560Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr_start));
29570Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(ism_addr_start));
29580Sstevel@tonic-gate ASSERT(ism_hat->hat_flags & HAT_SHARED);
29594381Sjosephb is_dism = is_it_dism(hat, addr);
29600Sstevel@tonic-gate while (ism_addr < e_ism_addr) {
29610Sstevel@tonic-gate /*
29620Sstevel@tonic-gate * use htable_walk to get the next valid ISM mapping
29630Sstevel@tonic-gate */
29640Sstevel@tonic-gate pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
29650Sstevel@tonic-gate if (ism_ht == NULL)
29660Sstevel@tonic-gate break;
29670Sstevel@tonic-gate
29680Sstevel@tonic-gate /*
29694381Sjosephb * First check to see if we already share the page table.
29704381Sjosephb */
29714381Sjosephb l = ism_ht->ht_level;
29724381Sjosephb vaddr = vaddr_start + (ism_addr - ism_addr_start);
29734381Sjosephb ht = htable_lookup(hat, vaddr, l);
29744381Sjosephb if (ht != NULL) {
29754381Sjosephb if (ht->ht_flags & HTABLE_SHARED_PFN)
29764381Sjosephb goto shared;
29774381Sjosephb htable_release(ht);
29784381Sjosephb goto not_shared;
29794381Sjosephb }
29804381Sjosephb
29814381Sjosephb /*
29824381Sjosephb * Can't ever share top table.
29834381Sjosephb */
29844381Sjosephb if (l == mmu.max_level)
29854381Sjosephb goto not_shared;
29864381Sjosephb
29874381Sjosephb /*
29884381Sjosephb * Avoid level mismatches later due to DISM faults.
29894381Sjosephb */
29904381Sjosephb if (is_dism && l > 0)
29914381Sjosephb goto not_shared;
29924381Sjosephb
29934381Sjosephb /*
29944381Sjosephb * addresses and lengths must align
29954381Sjosephb * table must be fully populated
29964381Sjosephb * no lower level page tables
29974381Sjosephb */
29984381Sjosephb if (ism_addr != ism_ht->ht_vaddr ||
29994381Sjosephb (vaddr & LEVEL_OFFSET(l + 1)) != 0)
30004381Sjosephb goto not_shared;
30014381Sjosephb
30024381Sjosephb /*
30034381Sjosephb * The range of address space must cover a full table.
30040Sstevel@tonic-gate */
30055159Sjohnlev if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1))
30064381Sjosephb goto not_shared;
30074381Sjosephb
30084381Sjosephb /*
30094381Sjosephb * All entries in the ISM page table must be leaf PTEs.
30104381Sjosephb */
30114381Sjosephb if (l > 0) {
30124381Sjosephb int e;
30134381Sjosephb
30144381Sjosephb /*
30154381Sjosephb * We know the 0th is from htable_walk() above.
30164381Sjosephb */
30174381Sjosephb for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) {
30184381Sjosephb x86pte_t pte;
30194381Sjosephb pte = x86pte_get(ism_ht, e);
30204381Sjosephb if (!PTE_ISPAGE(pte, l))
30214381Sjosephb goto not_shared;
30224381Sjosephb }
30234381Sjosephb }
30244381Sjosephb
30254381Sjosephb /*
30264381Sjosephb * share the page table
30274381Sjosephb */
30284381Sjosephb ht = htable_create(hat, vaddr, l, ism_ht);
30294381Sjosephb shared:
30304381Sjosephb ASSERT(ht->ht_flags & HTABLE_SHARED_PFN);
30314381Sjosephb ASSERT(ht->ht_shares == ism_ht);
30324381Sjosephb hat->hat_ism_pgcnt +=
30334381Sjosephb (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) <<
30344381Sjosephb (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
30354381Sjosephb ht->ht_valid_cnt = ism_ht->ht_valid_cnt;
30364381Sjosephb htable_release(ht);
30374381Sjosephb ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1);
30384381Sjosephb htable_release(ism_ht);
30394381Sjosephb ism_ht = NULL;
30404381Sjosephb continue;
30414381Sjosephb
30424381Sjosephb not_shared:
30434381Sjosephb /*
30444381Sjosephb * Unable to share the page table. Instead we will
30454381Sjosephb * create new mappings from the values in the ISM mappings.
30464381Sjosephb * Figure out what level size mappings to use;
30474381Sjosephb */
30480Sstevel@tonic-gate for (l = ism_ht->ht_level; l > 0; --l) {
30490Sstevel@tonic-gate if (LEVEL_SIZE(l) <= eaddr - vaddr &&
30500Sstevel@tonic-gate (vaddr & LEVEL_OFFSET(l)) == 0)
30510Sstevel@tonic-gate break;
30520Sstevel@tonic-gate }
30530Sstevel@tonic-gate
30540Sstevel@tonic-gate /*
30550Sstevel@tonic-gate * The ISM mapping might be larger than the share area,
30564381Sjosephb * be careful to truncate it if needed.
30570Sstevel@tonic-gate */
30580Sstevel@tonic-gate if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
30590Sstevel@tonic-gate pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
30600Sstevel@tonic-gate } else {
30610Sstevel@tonic-gate pgcnt = mmu_btop(eaddr - vaddr);
30620Sstevel@tonic-gate l = 0;
30630Sstevel@tonic-gate }
30640Sstevel@tonic-gate
30650Sstevel@tonic-gate pfn = PTE2PFN(pte, ism_ht->ht_level);
30660Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID);
30670Sstevel@tonic-gate while (pgcnt > 0) {
30680Sstevel@tonic-gate /*
30690Sstevel@tonic-gate * Make a new pte for the PFN for this level.
30700Sstevel@tonic-gate * Copy protections for the pte from the ISM pte.
30710Sstevel@tonic-gate */
30720Sstevel@tonic-gate pp = page_numtopp_nolock(pfn);
30730Sstevel@tonic-gate ASSERT(pp != NULL);
30740Sstevel@tonic-gate
30750Sstevel@tonic-gate prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
30760Sstevel@tonic-gate if (PTE_GET(pte, PT_WRITABLE))
30770Sstevel@tonic-gate prot |= PROT_WRITE;
30780Sstevel@tonic-gate if (!PTE_GET(pte, PT_NX))
30790Sstevel@tonic-gate prot |= PROT_EXEC;
30800Sstevel@tonic-gate
30814381Sjosephb flags = HAT_LOAD;
30824381Sjosephb if (!is_dism)
30834381Sjosephb flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST;
30844381Sjosephb while (hati_load_common(hat, vaddr, pp, prot, flags,
30853446Smrj l, pfn) != 0) {
30863446Smrj if (l == 0)
30873446Smrj panic("hati_load_common() failure");
30883446Smrj --l;
30893446Smrj }
30900Sstevel@tonic-gate
30910Sstevel@tonic-gate vaddr += LEVEL_SIZE(l);
30920Sstevel@tonic-gate ism_addr += LEVEL_SIZE(l);
30930Sstevel@tonic-gate pfn += mmu_btop(LEVEL_SIZE(l));
30940Sstevel@tonic-gate pgcnt -= mmu_btop(LEVEL_SIZE(l));
30950Sstevel@tonic-gate }
30960Sstevel@tonic-gate }
30970Sstevel@tonic-gate if (ism_ht != NULL)
30980Sstevel@tonic-gate htable_release(ism_ht);
30995084Sjohnlev XPV_ALLOW_MIGRATE();
31000Sstevel@tonic-gate return (0);
31010Sstevel@tonic-gate }
31020Sstevel@tonic-gate
31030Sstevel@tonic-gate
31040Sstevel@tonic-gate /*
31050Sstevel@tonic-gate * hat_unshare() is similar to hat_unload_callback(), but
31060Sstevel@tonic-gate * we have to look for empty shared pagetables. Note that
31070Sstevel@tonic-gate * hat_unshare() is always invoked against an entire segment.
31080Sstevel@tonic-gate */
31090Sstevel@tonic-gate /*ARGSUSED*/
31100Sstevel@tonic-gate void
hat_unshare(hat_t * hat,caddr_t addr,size_t len,uint_t ismszc)31110Sstevel@tonic-gate hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
31120Sstevel@tonic-gate {
31134654Sjosephb uint64_t vaddr = (uintptr_t)addr;
31140Sstevel@tonic-gate uintptr_t eaddr = vaddr + len;
31150Sstevel@tonic-gate htable_t *ht = NULL;
31160Sstevel@tonic-gate uint_t need_demaps = 0;
31174381Sjosephb int flags = HAT_UNLOAD_UNMAP;
31184381Sjosephb level_t l;
31190Sstevel@tonic-gate
31200Sstevel@tonic-gate ASSERT(hat != kas.a_hat);
31213446Smrj ASSERT(eaddr <= _userlimit);
31220Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr));
31230Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr));
31245084Sjohnlev XPV_DISALLOW_MIGRATE();
31250Sstevel@tonic-gate
31260Sstevel@tonic-gate /*
31270Sstevel@tonic-gate * First go through and remove any shared pagetables.
31280Sstevel@tonic-gate *
31293446Smrj * Note that it's ok to delay the TLB shootdown till the entire range is
31300Sstevel@tonic-gate * finished, because if hat_pageunload() were to unload a shared
31313446Smrj * pagetable page, its hat_tlb_inval() will do a global TLB invalidate.
31320Sstevel@tonic-gate */
31334381Sjosephb l = mmu.max_page_level;
31344381Sjosephb if (l == mmu.max_level)
31354381Sjosephb --l;
31364381Sjosephb for (; l >= 0; --l) {
31374381Sjosephb for (vaddr = (uintptr_t)addr; vaddr < eaddr;
31384381Sjosephb vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) {
31394381Sjosephb ASSERT(!IN_VA_HOLE(vaddr));
31404381Sjosephb /*
31414381Sjosephb * find a pagetable that maps the current address
31424381Sjosephb */
31434381Sjosephb ht = htable_lookup(hat, vaddr, l);
31444381Sjosephb if (ht == NULL)
31454381Sjosephb continue;
31460Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) {
31470Sstevel@tonic-gate /*
31484381Sjosephb * clear page count, set valid_cnt to 0,
31494381Sjosephb * let htable_release() finish the job
31500Sstevel@tonic-gate */
31514381Sjosephb hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
31524381Sjosephb (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
31530Sstevel@tonic-gate ht->ht_valid_cnt = 0;
31540Sstevel@tonic-gate need_demaps = 1;
31550Sstevel@tonic-gate }
31560Sstevel@tonic-gate htable_release(ht);
31570Sstevel@tonic-gate }
31580Sstevel@tonic-gate }
31590Sstevel@tonic-gate
31600Sstevel@tonic-gate /*
31610Sstevel@tonic-gate * flush the TLBs - since we're probably dealing with MANY mappings
31620Sstevel@tonic-gate * we do just one CR3 reload.
31630Sstevel@tonic-gate */
31640Sstevel@tonic-gate if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
31653446Smrj hat_tlb_inval(hat, DEMAP_ALL_ADDR);
31660Sstevel@tonic-gate
31670Sstevel@tonic-gate /*
31680Sstevel@tonic-gate * Now go back and clean up any unaligned mappings that
31690Sstevel@tonic-gate * couldn't share pagetables.
31700Sstevel@tonic-gate */
31714381Sjosephb if (!is_it_dism(hat, addr))
31724381Sjosephb flags |= HAT_UNLOAD_UNLOCK;
31734381Sjosephb hat_unload(hat, addr, len, flags);
31745084Sjohnlev XPV_ALLOW_MIGRATE();
31750Sstevel@tonic-gate }
31760Sstevel@tonic-gate
31770Sstevel@tonic-gate
31780Sstevel@tonic-gate /*
31790Sstevel@tonic-gate * hat_reserve() does nothing
31800Sstevel@tonic-gate */
31810Sstevel@tonic-gate /*ARGSUSED*/
31820Sstevel@tonic-gate void
hat_reserve(struct as * as,caddr_t addr,size_t len)31830Sstevel@tonic-gate hat_reserve(struct as *as, caddr_t addr, size_t len)
31840Sstevel@tonic-gate {
31850Sstevel@tonic-gate }
31860Sstevel@tonic-gate
31870Sstevel@tonic-gate
31880Sstevel@tonic-gate /*
31890Sstevel@tonic-gate * Called when all mappings to a page should have write permission removed.
31909903SPavel.Tatashin@Sun.COM * Mostly stolen from hat_pagesync()
31910Sstevel@tonic-gate */
31920Sstevel@tonic-gate static void
hati_page_clrwrt(struct page * pp)31930Sstevel@tonic-gate hati_page_clrwrt(struct page *pp)
31940Sstevel@tonic-gate {
31950Sstevel@tonic-gate hment_t *hm = NULL;
31960Sstevel@tonic-gate htable_t *ht;
31970Sstevel@tonic-gate uint_t entry;
31980Sstevel@tonic-gate x86pte_t old;
31990Sstevel@tonic-gate x86pte_t new;
32000Sstevel@tonic-gate uint_t pszc = 0;
32010Sstevel@tonic-gate
32025084Sjohnlev XPV_DISALLOW_MIGRATE();
32030Sstevel@tonic-gate next_size:
32040Sstevel@tonic-gate /*
32050Sstevel@tonic-gate * walk thru the mapping list clearing write permission
32060Sstevel@tonic-gate */
32070Sstevel@tonic-gate x86_hm_enter(pp);
32080Sstevel@tonic-gate while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
32090Sstevel@tonic-gate if (ht->ht_level < pszc)
32100Sstevel@tonic-gate continue;
32110Sstevel@tonic-gate old = x86pte_get(ht, entry);
32120Sstevel@tonic-gate
32130Sstevel@tonic-gate for (;;) {
32140Sstevel@tonic-gate /*
32150Sstevel@tonic-gate * Is this mapping of interest?
32160Sstevel@tonic-gate */
32170Sstevel@tonic-gate if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
32180Sstevel@tonic-gate PTE_GET(old, PT_WRITABLE) == 0)
32190Sstevel@tonic-gate break;
32200Sstevel@tonic-gate
32210Sstevel@tonic-gate /*
32220Sstevel@tonic-gate * Clear ref/mod writable bits. This requires cross
32230Sstevel@tonic-gate * calls to ensure any executing TLBs see cleared bits.
32240Sstevel@tonic-gate */
32250Sstevel@tonic-gate new = old;
32260Sstevel@tonic-gate PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
32270Sstevel@tonic-gate old = hati_update_pte(ht, entry, old, new);
32280Sstevel@tonic-gate if (old != 0)
32290Sstevel@tonic-gate continue;
32300Sstevel@tonic-gate
32310Sstevel@tonic-gate break;
32320Sstevel@tonic-gate }
32330Sstevel@tonic-gate }
32340Sstevel@tonic-gate x86_hm_exit(pp);
32350Sstevel@tonic-gate while (pszc < pp->p_szc) {
32360Sstevel@tonic-gate page_t *tpp;
32370Sstevel@tonic-gate pszc++;
32380Sstevel@tonic-gate tpp = PP_GROUPLEADER(pp, pszc);
32390Sstevel@tonic-gate if (pp != tpp) {
32400Sstevel@tonic-gate pp = tpp;
32410Sstevel@tonic-gate goto next_size;
32420Sstevel@tonic-gate }
32430Sstevel@tonic-gate }
32445084Sjohnlev XPV_ALLOW_MIGRATE();
32450Sstevel@tonic-gate }
32460Sstevel@tonic-gate
32470Sstevel@tonic-gate /*
32480Sstevel@tonic-gate * void hat_page_setattr(pp, flag)
32490Sstevel@tonic-gate * void hat_page_clrattr(pp, flag)
32500Sstevel@tonic-gate * used to set/clr ref/mod bits.
32510Sstevel@tonic-gate */
32520Sstevel@tonic-gate void
hat_page_setattr(struct page * pp,uint_t flag)32530Sstevel@tonic-gate hat_page_setattr(struct page *pp, uint_t flag)
32540Sstevel@tonic-gate {
32550Sstevel@tonic-gate vnode_t *vp = pp->p_vnode;
32560Sstevel@tonic-gate kmutex_t *vphm = NULL;
32570Sstevel@tonic-gate page_t **listp;
32584324Sqiao int noshuffle;
32594324Sqiao
32604324Sqiao noshuffle = flag & P_NSH;
32614324Sqiao flag &= ~P_NSH;
32620Sstevel@tonic-gate
32630Sstevel@tonic-gate if (PP_GETRM(pp, flag) == flag)
32640Sstevel@tonic-gate return;
32650Sstevel@tonic-gate
32664324Sqiao if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
32674324Sqiao !noshuffle) {
32680Sstevel@tonic-gate vphm = page_vnode_mutex(vp);
32690Sstevel@tonic-gate mutex_enter(vphm);
32700Sstevel@tonic-gate }
32710Sstevel@tonic-gate
32720Sstevel@tonic-gate PP_SETRM(pp, flag);
32730Sstevel@tonic-gate
32740Sstevel@tonic-gate if (vphm != NULL) {
32750Sstevel@tonic-gate
32760Sstevel@tonic-gate /*
32770Sstevel@tonic-gate * Some File Systems examine v_pages for NULL w/o
32780Sstevel@tonic-gate * grabbing the vphm mutex. Must not let it become NULL when
32790Sstevel@tonic-gate * pp is the only page on the list.
32800Sstevel@tonic-gate */
32810Sstevel@tonic-gate if (pp->p_vpnext != pp) {
32820Sstevel@tonic-gate page_vpsub(&vp->v_pages, pp);
32830Sstevel@tonic-gate if (vp->v_pages != NULL)
32840Sstevel@tonic-gate listp = &vp->v_pages->p_vpprev->p_vpnext;
32850Sstevel@tonic-gate else
32860Sstevel@tonic-gate listp = &vp->v_pages;
32870Sstevel@tonic-gate page_vpadd(listp, pp);
32880Sstevel@tonic-gate }
32890Sstevel@tonic-gate mutex_exit(vphm);
32900Sstevel@tonic-gate }
32910Sstevel@tonic-gate }
32920Sstevel@tonic-gate
32930Sstevel@tonic-gate void
hat_page_clrattr(struct page * pp,uint_t flag)32940Sstevel@tonic-gate hat_page_clrattr(struct page *pp, uint_t flag)
32950Sstevel@tonic-gate {
32960Sstevel@tonic-gate vnode_t *vp = pp->p_vnode;
32970Sstevel@tonic-gate ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
32980Sstevel@tonic-gate
32990Sstevel@tonic-gate /*
33002999Sstans * Caller is expected to hold page's io lock for VMODSORT to work
33012999Sstans * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
33022999Sstans * bit is cleared.
33032999Sstans * We don't have assert to avoid tripping some existing third party
33042999Sstans * code. The dirty page is moved back to top of the v_page list
33052999Sstans * after IO is done in pvn_write_done().
33060Sstevel@tonic-gate */
33070Sstevel@tonic-gate PP_CLRRM(pp, flag);
33080Sstevel@tonic-gate
33092999Sstans if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
33100Sstevel@tonic-gate
33110Sstevel@tonic-gate /*
33120Sstevel@tonic-gate * VMODSORT works by removing write permissions and getting
33130Sstevel@tonic-gate * a fault when a page is made dirty. At this point
33140Sstevel@tonic-gate * we need to remove write permission from all mappings
33150Sstevel@tonic-gate * to this page.
33160Sstevel@tonic-gate */
33170Sstevel@tonic-gate hati_page_clrwrt(pp);
33180Sstevel@tonic-gate }
33190Sstevel@tonic-gate }
33200Sstevel@tonic-gate
33210Sstevel@tonic-gate /*
33220Sstevel@tonic-gate * If flag is specified, returns 0 if attribute is disabled
33239903SPavel.Tatashin@Sun.COM * and non zero if enabled. If flag specifes multiple attributes
33249903SPavel.Tatashin@Sun.COM * then returns 0 if ALL attributes are disabled. This is an advisory
33250Sstevel@tonic-gate * call.
33260Sstevel@tonic-gate */
33270Sstevel@tonic-gate uint_t
hat_page_getattr(struct page * pp,uint_t flag)33280Sstevel@tonic-gate hat_page_getattr(struct page *pp, uint_t flag)
33290Sstevel@tonic-gate {
33300Sstevel@tonic-gate return (PP_GETRM(pp, flag));
33310Sstevel@tonic-gate }
33320Sstevel@tonic-gate
33330Sstevel@tonic-gate
33340Sstevel@tonic-gate /*
33350Sstevel@tonic-gate * common code used by hat_pageunload() and hment_steal()
33360Sstevel@tonic-gate */
33370Sstevel@tonic-gate hment_t *
hati_page_unmap(page_t * pp,htable_t * ht,uint_t entry)33380Sstevel@tonic-gate hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
33390Sstevel@tonic-gate {
33400Sstevel@tonic-gate x86pte_t old_pte;
33410Sstevel@tonic-gate pfn_t pfn = pp->p_pagenum;
33420Sstevel@tonic-gate hment_t *hm;
33430Sstevel@tonic-gate
33440Sstevel@tonic-gate /*
33450Sstevel@tonic-gate * We need to acquire a hold on the htable in order to
33460Sstevel@tonic-gate * do the invalidate. We know the htable must exist, since
33470Sstevel@tonic-gate * unmap's don't release the htable until after removing any
33480Sstevel@tonic-gate * hment. Having x86_hm_enter() keeps that from proceeding.
33490Sstevel@tonic-gate */
33500Sstevel@tonic-gate htable_acquire(ht);
33510Sstevel@tonic-gate
33520Sstevel@tonic-gate /*
33530Sstevel@tonic-gate * Invalidate the PTE and remove the hment.
33540Sstevel@tonic-gate */
33553446Smrj old_pte = x86pte_inval(ht, entry, 0, NULL);
335647Sjosephb if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
33573446Smrj panic("x86pte_inval() failure found PTE = " FMT_PTE
335847Sjosephb " pfn being unmapped is %lx ht=0x%lx entry=0x%x",
335947Sjosephb old_pte, pfn, (uintptr_t)ht, entry);
336047Sjosephb }
33610Sstevel@tonic-gate
33620Sstevel@tonic-gate /*
33630Sstevel@tonic-gate * Clean up all the htable information for this mapping
33640Sstevel@tonic-gate */
33650Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0);
33660Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt);
33670Sstevel@tonic-gate PGCNT_DEC(ht->ht_hat, ht->ht_level);
33680Sstevel@tonic-gate
33690Sstevel@tonic-gate /*
33700Sstevel@tonic-gate * sync ref/mod bits to the page_t
33710Sstevel@tonic-gate */
33723446Smrj if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC)
33730Sstevel@tonic-gate hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
33740Sstevel@tonic-gate
33750Sstevel@tonic-gate /*
33760Sstevel@tonic-gate * Remove the mapping list entry for this page.
33770Sstevel@tonic-gate */
33780Sstevel@tonic-gate hm = hment_remove(pp, ht, entry);
33790Sstevel@tonic-gate
33800Sstevel@tonic-gate /*
33810Sstevel@tonic-gate * drop the mapping list lock so that we might free the
33820Sstevel@tonic-gate * hment and htable.
33830Sstevel@tonic-gate */
33840Sstevel@tonic-gate x86_hm_exit(pp);
33850Sstevel@tonic-gate htable_release(ht);
33860Sstevel@tonic-gate return (hm);
33870Sstevel@tonic-gate }
33880Sstevel@tonic-gate
33891841Spraks extern int vpm_enable;
33900Sstevel@tonic-gate /*
33910Sstevel@tonic-gate * Unload all translations to a page. If the page is a subpage of a large
33920Sstevel@tonic-gate * page, the large page mappings are also removed.
33930Sstevel@tonic-gate *
33940Sstevel@tonic-gate * The forceflags are unused.
33950Sstevel@tonic-gate */
33960Sstevel@tonic-gate
33970Sstevel@tonic-gate /*ARGSUSED*/
33980Sstevel@tonic-gate static int
hati_pageunload(struct page * pp,uint_t pg_szcd,uint_t forceflag)33990Sstevel@tonic-gate hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
34000Sstevel@tonic-gate {
34010Sstevel@tonic-gate page_t *cur_pp = pp;
34020Sstevel@tonic-gate hment_t *hm;
34030Sstevel@tonic-gate hment_t *prev;
34040Sstevel@tonic-gate htable_t *ht;
34050Sstevel@tonic-gate uint_t entry;
34060Sstevel@tonic-gate level_t level;
34070Sstevel@tonic-gate
34085084Sjohnlev XPV_DISALLOW_MIGRATE();
340912532Sjoe.bonasera@oracle.com
341012532Sjoe.bonasera@oracle.com /*
341112532Sjoe.bonasera@oracle.com * prevent recursion due to kmem_free()
341212532Sjoe.bonasera@oracle.com */
341312532Sjoe.bonasera@oracle.com ++curthread->t_hatdepth;
341412532Sjoe.bonasera@oracle.com ASSERT(curthread->t_hatdepth < 16);
341512532Sjoe.bonasera@oracle.com
34161841Spraks #if defined(__amd64)
34171841Spraks /*
34181841Spraks * clear the vpm ref.
34191841Spraks */
34201841Spraks if (vpm_enable) {
34211841Spraks pp->p_vpmref = 0;
34221841Spraks }
34231841Spraks #endif
34240Sstevel@tonic-gate /*
34250Sstevel@tonic-gate * The loop with next_size handles pages with multiple pagesize mappings
34260Sstevel@tonic-gate */
34270Sstevel@tonic-gate next_size:
34280Sstevel@tonic-gate for (;;) {
34290Sstevel@tonic-gate
34300Sstevel@tonic-gate /*
34310Sstevel@tonic-gate * Get a mapping list entry
34320Sstevel@tonic-gate */
34330Sstevel@tonic-gate x86_hm_enter(cur_pp);
34340Sstevel@tonic-gate for (prev = NULL; ; prev = hm) {
34350Sstevel@tonic-gate hm = hment_walk(cur_pp, &ht, &entry, prev);
34360Sstevel@tonic-gate if (hm == NULL) {
34370Sstevel@tonic-gate x86_hm_exit(cur_pp);
34380Sstevel@tonic-gate
34390Sstevel@tonic-gate /*
34400Sstevel@tonic-gate * If not part of a larger page, we're done.
34410Sstevel@tonic-gate */
34423446Smrj if (cur_pp->p_szc <= pg_szcd) {
344312532Sjoe.bonasera@oracle.com ASSERT(curthread->t_hatdepth > 0);
344412532Sjoe.bonasera@oracle.com --curthread->t_hatdepth;
34455084Sjohnlev XPV_ALLOW_MIGRATE();
34460Sstevel@tonic-gate return (0);
34473446Smrj }
34480Sstevel@tonic-gate
34490Sstevel@tonic-gate /*
34500Sstevel@tonic-gate * Else check the next larger page size.
34510Sstevel@tonic-gate * hat_page_demote() may decrease p_szc
34520Sstevel@tonic-gate * but that's ok we'll just take an extra
34530Sstevel@tonic-gate * trip discover there're no larger mappings
34540Sstevel@tonic-gate * and return.
34550Sstevel@tonic-gate */
34560Sstevel@tonic-gate ++pg_szcd;
34570Sstevel@tonic-gate cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
34580Sstevel@tonic-gate goto next_size;
34590Sstevel@tonic-gate }
34600Sstevel@tonic-gate
34610Sstevel@tonic-gate /*
34620Sstevel@tonic-gate * If this mapping size matches, remove it.
34630Sstevel@tonic-gate */
34640Sstevel@tonic-gate level = ht->ht_level;
34650Sstevel@tonic-gate if (level == pg_szcd)
34660Sstevel@tonic-gate break;
34670Sstevel@tonic-gate }
34680Sstevel@tonic-gate
34690Sstevel@tonic-gate /*
34700Sstevel@tonic-gate * Remove the mapping list entry for this page.
34710Sstevel@tonic-gate * Note this does the x86_hm_exit() for us.
34720Sstevel@tonic-gate */
34730Sstevel@tonic-gate hm = hati_page_unmap(cur_pp, ht, entry);
34740Sstevel@tonic-gate if (hm != NULL)
34750Sstevel@tonic-gate hment_free(hm);
34760Sstevel@tonic-gate }
34770Sstevel@tonic-gate }
34780Sstevel@tonic-gate
34790Sstevel@tonic-gate int
hat_pageunload(struct page * pp,uint_t forceflag)34800Sstevel@tonic-gate hat_pageunload(struct page *pp, uint_t forceflag)
34810Sstevel@tonic-gate {
34820Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp));
34830Sstevel@tonic-gate return (hati_pageunload(pp, 0, forceflag));
34840Sstevel@tonic-gate }
34850Sstevel@tonic-gate
34860Sstevel@tonic-gate /*
34870Sstevel@tonic-gate * Unload all large mappings to pp and reduce by 1 p_szc field of every large
34880Sstevel@tonic-gate * page level that included pp.
34890Sstevel@tonic-gate *
34900Sstevel@tonic-gate * pp must be locked EXCL. Even though no other constituent pages are locked
34910Sstevel@tonic-gate * it's legal to unload large mappings to pp because all constituent pages of
34920Sstevel@tonic-gate * large locked mappings have to be locked SHARED. therefore if we have EXCL
34930Sstevel@tonic-gate * lock on one of constituent pages none of the large mappings to pp are
34940Sstevel@tonic-gate * locked.
34950Sstevel@tonic-gate *
34960Sstevel@tonic-gate * Change (always decrease) p_szc field starting from the last constituent
34970Sstevel@tonic-gate * page and ending with root constituent page so that root's pszc always shows
34980Sstevel@tonic-gate * the area where hat_page_demote() may be active.
34990Sstevel@tonic-gate *
35000Sstevel@tonic-gate * This mechanism is only used for file system pages where it's not always
35010Sstevel@tonic-gate * possible to get EXCL locks on all constituent pages to demote the size code
35020Sstevel@tonic-gate * (as is done for anonymous or kernel large pages).
35030Sstevel@tonic-gate */
35040Sstevel@tonic-gate void
hat_page_demote(page_t * pp)35050Sstevel@tonic-gate hat_page_demote(page_t *pp)
35060Sstevel@tonic-gate {
35070Sstevel@tonic-gate uint_t pszc;
35080Sstevel@tonic-gate uint_t rszc;
35090Sstevel@tonic-gate uint_t szc;
35100Sstevel@tonic-gate page_t *rootpp;
35110Sstevel@tonic-gate page_t *firstpp;
35120Sstevel@tonic-gate page_t *lastpp;
35130Sstevel@tonic-gate pgcnt_t pgcnt;
35140Sstevel@tonic-gate
35150Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp));
35160Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp));
35170Sstevel@tonic-gate ASSERT(page_szc_lock_assert(pp));
35180Sstevel@tonic-gate
35190Sstevel@tonic-gate if (pp->p_szc == 0)
35200Sstevel@tonic-gate return;
35210Sstevel@tonic-gate
35220Sstevel@tonic-gate rootpp = PP_GROUPLEADER(pp, 1);
35230Sstevel@tonic-gate (void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
35240Sstevel@tonic-gate
35250Sstevel@tonic-gate /*
35260Sstevel@tonic-gate * all large mappings to pp are gone
35270Sstevel@tonic-gate * and no new can be setup since pp is locked exclusively.
35280Sstevel@tonic-gate *
35290Sstevel@tonic-gate * Lock the root to make sure there's only one hat_page_demote()
35300Sstevel@tonic-gate * outstanding within the area of this root's pszc.
35310Sstevel@tonic-gate *
35320Sstevel@tonic-gate * Second potential hat_page_demote() is already eliminated by upper
35330Sstevel@tonic-gate * VM layer via page_szc_lock() but we don't rely on it and use our
35340Sstevel@tonic-gate * own locking (so that upper layer locking can be changed without
35350Sstevel@tonic-gate * assumptions that hat depends on upper layer VM to prevent multiple
35360Sstevel@tonic-gate * hat_page_demote() to be issued simultaneously to the same large
35370Sstevel@tonic-gate * page).
35380Sstevel@tonic-gate */
35390Sstevel@tonic-gate again:
35400Sstevel@tonic-gate pszc = pp->p_szc;
35410Sstevel@tonic-gate if (pszc == 0)
35420Sstevel@tonic-gate return;
35430Sstevel@tonic-gate rootpp = PP_GROUPLEADER(pp, pszc);
35440Sstevel@tonic-gate x86_hm_enter(rootpp);
35450Sstevel@tonic-gate /*
35460Sstevel@tonic-gate * If root's p_szc is different from pszc we raced with another
35470Sstevel@tonic-gate * hat_page_demote(). Drop the lock and try to find the root again.
35480Sstevel@tonic-gate * If root's p_szc is greater than pszc previous hat_page_demote() is
35490Sstevel@tonic-gate * not done yet. Take and release mlist lock of root's root to wait
35500Sstevel@tonic-gate * for previous hat_page_demote() to complete.
35510Sstevel@tonic-gate */
35520Sstevel@tonic-gate if ((rszc = rootpp->p_szc) != pszc) {
35530Sstevel@tonic-gate x86_hm_exit(rootpp);
35540Sstevel@tonic-gate if (rszc > pszc) {
35550Sstevel@tonic-gate /* p_szc of a locked non free page can't increase */
35560Sstevel@tonic-gate ASSERT(pp != rootpp);
35570Sstevel@tonic-gate
35580Sstevel@tonic-gate rootpp = PP_GROUPLEADER(rootpp, rszc);
35590Sstevel@tonic-gate x86_hm_enter(rootpp);
35600Sstevel@tonic-gate x86_hm_exit(rootpp);
35610Sstevel@tonic-gate }
35620Sstevel@tonic-gate goto again;
35630Sstevel@tonic-gate }
35640Sstevel@tonic-gate ASSERT(pp->p_szc == pszc);
35650Sstevel@tonic-gate
35660Sstevel@tonic-gate /*
35670Sstevel@tonic-gate * Decrement by 1 p_szc of every constituent page of a region that
35680Sstevel@tonic-gate * covered pp. For example if original szc is 3 it gets changed to 2
35690Sstevel@tonic-gate * everywhere except in region 2 that covered pp. Region 2 that
35700Sstevel@tonic-gate * covered pp gets demoted to 1 everywhere except in region 1 that
35710Sstevel@tonic-gate * covered pp. The region 1 that covered pp is demoted to region
35720Sstevel@tonic-gate * 0. It's done this way because from region 3 we removed level 3
35730Sstevel@tonic-gate * mappings, from region 2 that covered pp we removed level 2 mappings
35740Sstevel@tonic-gate * and from region 1 that covered pp we removed level 1 mappings. All
35750Sstevel@tonic-gate * changes are done from from high pfn's to low pfn's so that roots
35760Sstevel@tonic-gate * are changed last allowing one to know the largest region where
35770Sstevel@tonic-gate * hat_page_demote() is stil active by only looking at the root page.
35780Sstevel@tonic-gate *
35790Sstevel@tonic-gate * This algorithm is implemented in 2 while loops. First loop changes
35800Sstevel@tonic-gate * p_szc of pages to the right of pp's level 1 region and second
35810Sstevel@tonic-gate * loop changes p_szc of pages of level 1 region that covers pp
35820Sstevel@tonic-gate * and all pages to the left of level 1 region that covers pp.
35830Sstevel@tonic-gate * In the first loop p_szc keeps dropping with every iteration
35840Sstevel@tonic-gate * and in the second loop it keeps increasing with every iteration.
35850Sstevel@tonic-gate *
35860Sstevel@tonic-gate * First loop description: Demote pages to the right of pp outside of
35870Sstevel@tonic-gate * level 1 region that covers pp. In every iteration of the while
35880Sstevel@tonic-gate * loop below find the last page of szc region and the first page of
35890Sstevel@tonic-gate * (szc - 1) region that is immediately to the right of (szc - 1)
35900Sstevel@tonic-gate * region that covers pp. From last such page to first such page
35910Sstevel@tonic-gate * change every page's szc to szc - 1. Decrement szc and continue
35920Sstevel@tonic-gate * looping until szc is 1. If pp belongs to the last (szc - 1) region
35930Sstevel@tonic-gate * of szc region skip to the next iteration.
35940Sstevel@tonic-gate */
35950Sstevel@tonic-gate szc = pszc;
35960Sstevel@tonic-gate while (szc > 1) {
35970Sstevel@tonic-gate lastpp = PP_GROUPLEADER(pp, szc);
35980Sstevel@tonic-gate pgcnt = page_get_pagecnt(szc);
35990Sstevel@tonic-gate lastpp += pgcnt - 1;
36000Sstevel@tonic-gate firstpp = PP_GROUPLEADER(pp, (szc - 1));
36010Sstevel@tonic-gate pgcnt = page_get_pagecnt(szc - 1);
36020Sstevel@tonic-gate if (lastpp - firstpp < pgcnt) {
36030Sstevel@tonic-gate szc--;
36040Sstevel@tonic-gate continue;
36050Sstevel@tonic-gate }
36060Sstevel@tonic-gate firstpp += pgcnt;
36070Sstevel@tonic-gate while (lastpp != firstpp) {
36080Sstevel@tonic-gate ASSERT(lastpp->p_szc == pszc);
36090Sstevel@tonic-gate lastpp->p_szc = szc - 1;
36100Sstevel@tonic-gate lastpp--;
36110Sstevel@tonic-gate }
36120Sstevel@tonic-gate firstpp->p_szc = szc - 1;
36130Sstevel@tonic-gate szc--;
36140Sstevel@tonic-gate }
36150Sstevel@tonic-gate
36160Sstevel@tonic-gate /*
36170Sstevel@tonic-gate * Second loop description:
36180Sstevel@tonic-gate * First iteration changes p_szc to 0 of every
36190Sstevel@tonic-gate * page of level 1 region that covers pp.
36200Sstevel@tonic-gate * Subsequent iterations find last page of szc region
36210Sstevel@tonic-gate * immediately to the left of szc region that covered pp
36220Sstevel@tonic-gate * and first page of (szc + 1) region that covers pp.
36230Sstevel@tonic-gate * From last to first page change p_szc of every page to szc.
36240Sstevel@tonic-gate * Increment szc and continue looping until szc is pszc.
36250Sstevel@tonic-gate * If pp belongs to the fist szc region of (szc + 1) region
36260Sstevel@tonic-gate * skip to the next iteration.
36270Sstevel@tonic-gate *
36280Sstevel@tonic-gate */
36290Sstevel@tonic-gate szc = 0;
36300Sstevel@tonic-gate while (szc < pszc) {
36310Sstevel@tonic-gate firstpp = PP_GROUPLEADER(pp, (szc + 1));
36320Sstevel@tonic-gate if (szc == 0) {
36330Sstevel@tonic-gate pgcnt = page_get_pagecnt(1);
36340Sstevel@tonic-gate lastpp = firstpp + (pgcnt - 1);
36350Sstevel@tonic-gate } else {
36360Sstevel@tonic-gate lastpp = PP_GROUPLEADER(pp, szc);
36370Sstevel@tonic-gate if (firstpp == lastpp) {
36380Sstevel@tonic-gate szc++;
36390Sstevel@tonic-gate continue;
36400Sstevel@tonic-gate }
36410Sstevel@tonic-gate lastpp--;
36420Sstevel@tonic-gate pgcnt = page_get_pagecnt(szc);
36430Sstevel@tonic-gate }
36440Sstevel@tonic-gate while (lastpp != firstpp) {
36450Sstevel@tonic-gate ASSERT(lastpp->p_szc == pszc);
36460Sstevel@tonic-gate lastpp->p_szc = szc;
36470Sstevel@tonic-gate lastpp--;
36480Sstevel@tonic-gate }
36490Sstevel@tonic-gate firstpp->p_szc = szc;
36500Sstevel@tonic-gate if (firstpp == rootpp)
36510Sstevel@tonic-gate break;
36520Sstevel@tonic-gate szc++;
36530Sstevel@tonic-gate }
36540Sstevel@tonic-gate x86_hm_exit(rootpp);
36550Sstevel@tonic-gate }
36560Sstevel@tonic-gate
36570Sstevel@tonic-gate /*
36580Sstevel@tonic-gate * get hw stats from hardware into page struct and reset hw stats
36590Sstevel@tonic-gate * returns attributes of page
36600Sstevel@tonic-gate * Flags for hat_pagesync, hat_getstat, hat_sync
36610Sstevel@tonic-gate *
36620Sstevel@tonic-gate * define HAT_SYNC_ZERORM 0x01
36630Sstevel@tonic-gate *
36640Sstevel@tonic-gate * Additional flags for hat_pagesync
36650Sstevel@tonic-gate *
36660Sstevel@tonic-gate * define HAT_SYNC_STOPON_REF 0x02
36670Sstevel@tonic-gate * define HAT_SYNC_STOPON_MOD 0x04
36680Sstevel@tonic-gate * define HAT_SYNC_STOPON_RM 0x06
36690Sstevel@tonic-gate * define HAT_SYNC_STOPON_SHARED 0x08
36700Sstevel@tonic-gate */
36710Sstevel@tonic-gate uint_t
hat_pagesync(struct page * pp,uint_t flags)36720Sstevel@tonic-gate hat_pagesync(struct page *pp, uint_t flags)
36730Sstevel@tonic-gate {
36740Sstevel@tonic-gate hment_t *hm = NULL;
36750Sstevel@tonic-gate htable_t *ht;
36760Sstevel@tonic-gate uint_t entry;
36770Sstevel@tonic-gate x86pte_t old, save_old;
36780Sstevel@tonic-gate x86pte_t new;
36790Sstevel@tonic-gate uchar_t nrmbits = P_REF|P_MOD|P_RO;
36800Sstevel@tonic-gate extern ulong_t po_share;
36810Sstevel@tonic-gate page_t *save_pp = pp;
36820Sstevel@tonic-gate uint_t pszc = 0;
36830Sstevel@tonic-gate
36840Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp) || panicstr);
36850Sstevel@tonic-gate
36860Sstevel@tonic-gate if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
36870Sstevel@tonic-gate return (pp->p_nrm & nrmbits);
36880Sstevel@tonic-gate
36890Sstevel@tonic-gate if ((flags & HAT_SYNC_ZERORM) == 0) {
36900Sstevel@tonic-gate
36910Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
36920Sstevel@tonic-gate return (pp->p_nrm & nrmbits);
36930Sstevel@tonic-gate
36940Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
36950Sstevel@tonic-gate return (pp->p_nrm & nrmbits);
36960Sstevel@tonic-gate
36970Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
36980Sstevel@tonic-gate hat_page_getshare(pp) > po_share) {
36990Sstevel@tonic-gate if (PP_ISRO(pp))
37000Sstevel@tonic-gate PP_SETREF(pp);
37010Sstevel@tonic-gate return (pp->p_nrm & nrmbits);
37020Sstevel@tonic-gate }
37030Sstevel@tonic-gate }
37040Sstevel@tonic-gate
37055084Sjohnlev XPV_DISALLOW_MIGRATE();
37060Sstevel@tonic-gate next_size:
37070Sstevel@tonic-gate /*
37080Sstevel@tonic-gate * walk thru the mapping list syncing (and clearing) ref/mod bits.
37090Sstevel@tonic-gate */
37100Sstevel@tonic-gate x86_hm_enter(pp);
37110Sstevel@tonic-gate while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
37120Sstevel@tonic-gate if (ht->ht_level < pszc)
37130Sstevel@tonic-gate continue;
37140Sstevel@tonic-gate old = x86pte_get(ht, entry);
37150Sstevel@tonic-gate try_again:
37160Sstevel@tonic-gate
37170Sstevel@tonic-gate ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
37180Sstevel@tonic-gate
37190Sstevel@tonic-gate if (PTE_GET(old, PT_REF | PT_MOD) == 0)
37200Sstevel@tonic-gate continue;
37210Sstevel@tonic-gate
37220Sstevel@tonic-gate save_old = old;
37230Sstevel@tonic-gate if ((flags & HAT_SYNC_ZERORM) != 0) {
37240Sstevel@tonic-gate
37250Sstevel@tonic-gate /*
37260Sstevel@tonic-gate * Need to clear ref or mod bits. Need to demap
37270Sstevel@tonic-gate * to make sure any executing TLBs see cleared bits.
37280Sstevel@tonic-gate */
37290Sstevel@tonic-gate new = old;
37300Sstevel@tonic-gate PTE_CLR(new, PT_REF | PT_MOD);
37310Sstevel@tonic-gate old = hati_update_pte(ht, entry, old, new);
37320Sstevel@tonic-gate if (old != 0)
37330Sstevel@tonic-gate goto try_again;
37340Sstevel@tonic-gate
37350Sstevel@tonic-gate old = save_old;
37360Sstevel@tonic-gate }
37370Sstevel@tonic-gate
37380Sstevel@tonic-gate /*
37390Sstevel@tonic-gate * Sync the PTE
37400Sstevel@tonic-gate */
37413446Smrj if (!(flags & HAT_SYNC_ZERORM) &&
37423446Smrj PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC)
37430Sstevel@tonic-gate hati_sync_pte_to_page(pp, old, ht->ht_level);
37440Sstevel@tonic-gate
37450Sstevel@tonic-gate /*
37460Sstevel@tonic-gate * can stop short if we found a ref'd or mod'd page
37470Sstevel@tonic-gate */
37480Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) ||
37490Sstevel@tonic-gate (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) {
37500Sstevel@tonic-gate x86_hm_exit(pp);
37513446Smrj goto done;
37520Sstevel@tonic-gate }
37530Sstevel@tonic-gate }
37540Sstevel@tonic-gate x86_hm_exit(pp);
37550Sstevel@tonic-gate while (pszc < pp->p_szc) {
37560Sstevel@tonic-gate page_t *tpp;
37570Sstevel@tonic-gate pszc++;
37580Sstevel@tonic-gate tpp = PP_GROUPLEADER(pp, pszc);
37590Sstevel@tonic-gate if (pp != tpp) {
37600Sstevel@tonic-gate pp = tpp;
37610Sstevel@tonic-gate goto next_size;
37620Sstevel@tonic-gate }
37630Sstevel@tonic-gate }
37643446Smrj done:
37655084Sjohnlev XPV_ALLOW_MIGRATE();
37660Sstevel@tonic-gate return (save_pp->p_nrm & nrmbits);
37670Sstevel@tonic-gate }
37680Sstevel@tonic-gate
37690Sstevel@tonic-gate /*
37700Sstevel@tonic-gate * returns approx number of mappings to this pp. A return of 0 implies
37710Sstevel@tonic-gate * there are no mappings to the page.
37720Sstevel@tonic-gate */
37730Sstevel@tonic-gate ulong_t
hat_page_getshare(page_t * pp)37740Sstevel@tonic-gate hat_page_getshare(page_t *pp)
37750Sstevel@tonic-gate {
37760Sstevel@tonic-gate uint_t cnt;
37770Sstevel@tonic-gate cnt = hment_mapcnt(pp);
37781841Spraks #if defined(__amd64)
37791841Spraks if (vpm_enable && pp->p_vpmref) {
37801841Spraks cnt += 1;
37811841Spraks }
37821841Spraks #endif
37830Sstevel@tonic-gate return (cnt);
37840Sstevel@tonic-gate }
37850Sstevel@tonic-gate
37860Sstevel@tonic-gate /*
37874528Spaulsan * Return 1 the number of mappings exceeds sh_thresh. Return 0
37884528Spaulsan * otherwise.
37894528Spaulsan */
37904528Spaulsan int
hat_page_checkshare(page_t * pp,ulong_t sh_thresh)37914528Spaulsan hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
37924528Spaulsan {
37934528Spaulsan return (hat_page_getshare(pp) > sh_thresh);
37944528Spaulsan }
37954528Spaulsan
37964528Spaulsan /*
37970Sstevel@tonic-gate * hat_softlock isn't supported anymore
37980Sstevel@tonic-gate */
37990Sstevel@tonic-gate /*ARGSUSED*/
38000Sstevel@tonic-gate faultcode_t
hat_softlock(hat_t * hat,caddr_t addr,size_t * len,struct page ** page_array,uint_t flags)38010Sstevel@tonic-gate hat_softlock(
38020Sstevel@tonic-gate hat_t *hat,
38030Sstevel@tonic-gate caddr_t addr,
38040Sstevel@tonic-gate size_t *len,
38050Sstevel@tonic-gate struct page **page_array,
38060Sstevel@tonic-gate uint_t flags)
38070Sstevel@tonic-gate {
38080Sstevel@tonic-gate return (FC_NOSUPPORT);
38090Sstevel@tonic-gate }
38100Sstevel@tonic-gate
38110Sstevel@tonic-gate
38120Sstevel@tonic-gate
38130Sstevel@tonic-gate /*
38140Sstevel@tonic-gate * Routine to expose supported HAT features to platform independent code.
38150Sstevel@tonic-gate */
38160Sstevel@tonic-gate /*ARGSUSED*/
38170Sstevel@tonic-gate int
hat_supported(enum hat_features feature,void * arg)38180Sstevel@tonic-gate hat_supported(enum hat_features feature, void *arg)
38190Sstevel@tonic-gate {
38200Sstevel@tonic-gate switch (feature) {
38210Sstevel@tonic-gate
38220Sstevel@tonic-gate case HAT_SHARED_PT: /* this is really ISM */
38230Sstevel@tonic-gate return (1);
38240Sstevel@tonic-gate
38250Sstevel@tonic-gate case HAT_DYNAMIC_ISM_UNMAP:
38260Sstevel@tonic-gate return (0);
38270Sstevel@tonic-gate
38280Sstevel@tonic-gate case HAT_VMODSORT:
38290Sstevel@tonic-gate return (1);
38300Sstevel@tonic-gate
38314528Spaulsan case HAT_SHARED_REGIONS:
38324528Spaulsan return (0);
38334528Spaulsan
38340Sstevel@tonic-gate default:
38350Sstevel@tonic-gate panic("hat_supported() - unknown feature");
38360Sstevel@tonic-gate }
38370Sstevel@tonic-gate return (0);
38380Sstevel@tonic-gate }
38390Sstevel@tonic-gate
38400Sstevel@tonic-gate /*
38410Sstevel@tonic-gate * Called when a thread is exiting and has been switched to the kernel AS
38420Sstevel@tonic-gate */
38430Sstevel@tonic-gate void
hat_thread_exit(kthread_t * thd)38440Sstevel@tonic-gate hat_thread_exit(kthread_t *thd)
38450Sstevel@tonic-gate {
38460Sstevel@tonic-gate ASSERT(thd->t_procp->p_as == &kas);
38475084Sjohnlev XPV_DISALLOW_MIGRATE();
38480Sstevel@tonic-gate hat_switch(thd->t_procp->p_as->a_hat);
38495084Sjohnlev XPV_ALLOW_MIGRATE();
38500Sstevel@tonic-gate }
38510Sstevel@tonic-gate
38520Sstevel@tonic-gate /*
38530Sstevel@tonic-gate * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
38540Sstevel@tonic-gate */
38550Sstevel@tonic-gate /*ARGSUSED*/
38560Sstevel@tonic-gate void
hat_setup(hat_t * hat,int flags)38570Sstevel@tonic-gate hat_setup(hat_t *hat, int flags)
38580Sstevel@tonic-gate {
38595084Sjohnlev XPV_DISALLOW_MIGRATE();
38600Sstevel@tonic-gate kpreempt_disable();
38610Sstevel@tonic-gate
38620Sstevel@tonic-gate hat_switch(hat);
38630Sstevel@tonic-gate
38640Sstevel@tonic-gate kpreempt_enable();
38655084Sjohnlev XPV_ALLOW_MIGRATE();
38660Sstevel@tonic-gate }
38670Sstevel@tonic-gate
38680Sstevel@tonic-gate /*
38690Sstevel@tonic-gate * Prepare for a CPU private mapping for the given address.
38700Sstevel@tonic-gate *
38710Sstevel@tonic-gate * The address can only be used from a single CPU and can be remapped
38720Sstevel@tonic-gate * using hat_mempte_remap(). Return the address of the PTE.
38730Sstevel@tonic-gate *
38740Sstevel@tonic-gate * We do the htable_create() if necessary and increment the valid count so
38750Sstevel@tonic-gate * the htable can't disappear. We also hat_devload() the page table into
38760Sstevel@tonic-gate * kernel so that the PTE is quickly accessed.
38770Sstevel@tonic-gate */
38783446Smrj hat_mempte_t
hat_mempte_setup(caddr_t addr)38793446Smrj hat_mempte_setup(caddr_t addr)
38800Sstevel@tonic-gate {
38810Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr;
38820Sstevel@tonic-gate htable_t *ht;
38830Sstevel@tonic-gate uint_t entry;
38840Sstevel@tonic-gate x86pte_t oldpte;
38853446Smrj hat_mempte_t p;
38860Sstevel@tonic-gate
38870Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va));
38880Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va));
38894004Sjosephb ++curthread->t_hatdepth;
38905741Smrj XPV_DISALLOW_MIGRATE();
38910Sstevel@tonic-gate ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
38920Sstevel@tonic-gate if (ht == NULL) {
38930Sstevel@tonic-gate ht = htable_create(kas.a_hat, va, 0, NULL);
38940Sstevel@tonic-gate entry = htable_va2entry(va, ht);
38950Sstevel@tonic-gate ASSERT(ht->ht_level == 0);
38960Sstevel@tonic-gate oldpte = x86pte_get(ht, entry);
38970Sstevel@tonic-gate }
38980Sstevel@tonic-gate if (PTE_ISVALID(oldpte))
38990Sstevel@tonic-gate panic("hat_mempte_setup(): address already mapped"
39007240Srh87107 "ht=%p, entry=%d, pte=" FMT_PTE, (void *)ht, entry, oldpte);
39010Sstevel@tonic-gate
39020Sstevel@tonic-gate /*
39030Sstevel@tonic-gate * increment ht_valid_cnt so that the pagetable can't disappear
39040Sstevel@tonic-gate */
39050Sstevel@tonic-gate HTABLE_INC(ht->ht_valid_cnt);
39060Sstevel@tonic-gate
39070Sstevel@tonic-gate /*
39083446Smrj * return the PTE physical address to the caller.
39090Sstevel@tonic-gate */
39100Sstevel@tonic-gate htable_release(ht);
39115741Smrj XPV_ALLOW_MIGRATE();
39123446Smrj p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry);
39134004Sjosephb --curthread->t_hatdepth;
39143446Smrj return (p);
39150Sstevel@tonic-gate }
39160Sstevel@tonic-gate
39170Sstevel@tonic-gate /*
39180Sstevel@tonic-gate * Release a CPU private mapping for the given address.
39190Sstevel@tonic-gate * We decrement the htable valid count so it might be destroyed.
39200Sstevel@tonic-gate */
39213446Smrj /*ARGSUSED1*/
39220Sstevel@tonic-gate void
hat_mempte_release(caddr_t addr,hat_mempte_t pte_pa)39233446Smrj hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa)
39240Sstevel@tonic-gate {
39250Sstevel@tonic-gate htable_t *ht;
39260Sstevel@tonic-gate
39275741Smrj XPV_DISALLOW_MIGRATE();
39280Sstevel@tonic-gate /*
39293446Smrj * invalidate any left over mapping and decrement the htable valid count
39300Sstevel@tonic-gate */
39315084Sjohnlev #ifdef __xpv
39325084Sjohnlev if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0,
39335084Sjohnlev UVMF_INVLPG | UVMF_LOCAL))
39345084Sjohnlev panic("HYPERVISOR_update_va_mapping() failed");
39355084Sjohnlev #else
39363446Smrj {
39373446Smrj x86pte_t *pteptr;
39383446Smrj
39393446Smrj pteptr = x86pte_mapin(mmu_btop(pte_pa),
39403446Smrj (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
39413446Smrj if (mmu.pae_hat)
39423446Smrj *pteptr = 0;
39433446Smrj else
39443446Smrj *(x86pte32_t *)pteptr = 0;
39453446Smrj mmu_tlbflush_entry(addr);
39463446Smrj x86pte_mapout();
39473446Smrj }
39485084Sjohnlev #endif
39493446Smrj
39500Sstevel@tonic-gate ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
39510Sstevel@tonic-gate if (ht == NULL)
39520Sstevel@tonic-gate panic("hat_mempte_release(): invalid address");
39530Sstevel@tonic-gate ASSERT(ht->ht_level == 0);
39540Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt);
39550Sstevel@tonic-gate htable_release(ht);
39565741Smrj XPV_ALLOW_MIGRATE();
39570Sstevel@tonic-gate }
39580Sstevel@tonic-gate
39590Sstevel@tonic-gate /*
39600Sstevel@tonic-gate * Apply a temporary CPU private mapping to a page. We flush the TLB only
39610Sstevel@tonic-gate * on this CPU, so this ought to have been called with preemption disabled.
39620Sstevel@tonic-gate */
39630Sstevel@tonic-gate void
hat_mempte_remap(pfn_t pfn,caddr_t addr,hat_mempte_t pte_pa,uint_t attr,uint_t flags)39640Sstevel@tonic-gate hat_mempte_remap(
39653446Smrj pfn_t pfn,
39663446Smrj caddr_t addr,
39673446Smrj hat_mempte_t pte_pa,
39683446Smrj uint_t attr,
39693446Smrj uint_t flags)
39700Sstevel@tonic-gate {
39710Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr;
39720Sstevel@tonic-gate x86pte_t pte;
39730Sstevel@tonic-gate
39740Sstevel@tonic-gate /*
39750Sstevel@tonic-gate * Remap the given PTE to the new page's PFN. Invalidate only
39760Sstevel@tonic-gate * on this CPU.
39770Sstevel@tonic-gate */
39780Sstevel@tonic-gate #ifdef DEBUG
39790Sstevel@tonic-gate htable_t *ht;
39800Sstevel@tonic-gate uint_t entry;
39810Sstevel@tonic-gate
39820Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va));
39830Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va));
39840Sstevel@tonic-gate ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
39850Sstevel@tonic-gate ASSERT(ht != NULL);
39860Sstevel@tonic-gate ASSERT(ht->ht_level == 0);
39870Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0);
39883446Smrj ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
39890Sstevel@tonic-gate htable_release(ht);
39900Sstevel@tonic-gate #endif
39915084Sjohnlev XPV_DISALLOW_MIGRATE();
39920Sstevel@tonic-gate pte = hati_mkpte(pfn, attr, 0, flags);
39935084Sjohnlev #ifdef __xpv
39945084Sjohnlev if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL))
39955084Sjohnlev panic("HYPERVISOR_update_va_mapping() failed");
39965084Sjohnlev #else
39973446Smrj {
39983446Smrj x86pte_t *pteptr;
39993446Smrj
40003446Smrj pteptr = x86pte_mapin(mmu_btop(pte_pa),
40013446Smrj (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
40023446Smrj if (mmu.pae_hat)
40033446Smrj *(x86pte_t *)pteptr = pte;
40043446Smrj else
40053446Smrj *(x86pte32_t *)pteptr = (x86pte32_t)pte;
40063446Smrj mmu_tlbflush_entry(addr);
40073446Smrj x86pte_mapout();
40083446Smrj }
40095084Sjohnlev #endif
40105084Sjohnlev XPV_ALLOW_MIGRATE();
40110Sstevel@tonic-gate }
40120Sstevel@tonic-gate
40130Sstevel@tonic-gate
40140Sstevel@tonic-gate
40150Sstevel@tonic-gate /*
40160Sstevel@tonic-gate * Hat locking functions
40170Sstevel@tonic-gate * XXX - these two functions are currently being used by hatstats
40180Sstevel@tonic-gate * they can be removed by using a per-as mutex for hatstats.
40190Sstevel@tonic-gate */
40200Sstevel@tonic-gate void
hat_enter(hat_t * hat)40210Sstevel@tonic-gate hat_enter(hat_t *hat)
40220Sstevel@tonic-gate {
40230Sstevel@tonic-gate mutex_enter(&hat->hat_mutex);
40240Sstevel@tonic-gate }
40250Sstevel@tonic-gate
40260Sstevel@tonic-gate void
hat_exit(hat_t * hat)40270Sstevel@tonic-gate hat_exit(hat_t *hat)
40280Sstevel@tonic-gate {
40290Sstevel@tonic-gate mutex_exit(&hat->hat_mutex);
40300Sstevel@tonic-gate }
40310Sstevel@tonic-gate
40320Sstevel@tonic-gate /*
40333446Smrj * HAT part of cpu initialization.
40340Sstevel@tonic-gate */
40350Sstevel@tonic-gate void
hat_cpu_online(struct cpu * cpup)40360Sstevel@tonic-gate hat_cpu_online(struct cpu *cpup)
40370Sstevel@tonic-gate {
40380Sstevel@tonic-gate if (cpup != CPU) {
40393446Smrj x86pte_cpu_init(cpup);
40400Sstevel@tonic-gate hat_vlp_setup(cpup);
40410Sstevel@tonic-gate }
40420Sstevel@tonic-gate CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
40430Sstevel@tonic-gate }
40440Sstevel@tonic-gate
40450Sstevel@tonic-gate /*
40463446Smrj * HAT part of cpu deletion.
40473446Smrj * (currently, we only call this after the cpu is safely passivated.)
40483446Smrj */
40493446Smrj void
hat_cpu_offline(struct cpu * cpup)40503446Smrj hat_cpu_offline(struct cpu *cpup)
40513446Smrj {
40523446Smrj ASSERT(cpup != CPU);
40533446Smrj
40543446Smrj CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id);
405512004Sjiang.liu@intel.com hat_vlp_teardown(cpup);
40563446Smrj x86pte_cpu_fini(cpup);
40573446Smrj }
40583446Smrj
40593446Smrj /*
40600Sstevel@tonic-gate * Function called after all CPUs are brought online.
40610Sstevel@tonic-gate * Used to remove low address boot mappings.
40620Sstevel@tonic-gate */
40630Sstevel@tonic-gate void
clear_boot_mappings(uintptr_t low,uintptr_t high)40640Sstevel@tonic-gate clear_boot_mappings(uintptr_t low, uintptr_t high)
40650Sstevel@tonic-gate {
40660Sstevel@tonic-gate uintptr_t vaddr = low;
40670Sstevel@tonic-gate htable_t *ht = NULL;
40680Sstevel@tonic-gate level_t level;
40690Sstevel@tonic-gate uint_t entry;
40700Sstevel@tonic-gate x86pte_t pte;
40710Sstevel@tonic-gate
40720Sstevel@tonic-gate /*
40730Sstevel@tonic-gate * On 1st CPU we can unload the prom mappings, basically we blow away
40743446Smrj * all virtual mappings under _userlimit.
40750Sstevel@tonic-gate */
40760Sstevel@tonic-gate while (vaddr < high) {
40770Sstevel@tonic-gate pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
40780Sstevel@tonic-gate if (ht == NULL)
40790Sstevel@tonic-gate break;
40800Sstevel@tonic-gate
40810Sstevel@tonic-gate level = ht->ht_level;
40820Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht);
40830Sstevel@tonic-gate ASSERT(level <= mmu.max_page_level);
40840Sstevel@tonic-gate ASSERT(PTE_ISPAGE(pte, level));
40850Sstevel@tonic-gate
40860Sstevel@tonic-gate /*
40870Sstevel@tonic-gate * Unload the mapping from the page tables.
40880Sstevel@tonic-gate */
40893446Smrj (void) x86pte_inval(ht, entry, 0, NULL);
40900Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0);
40910Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt);
40920Sstevel@tonic-gate PGCNT_DEC(ht->ht_hat, ht->ht_level);
40930Sstevel@tonic-gate
40940Sstevel@tonic-gate vaddr += LEVEL_SIZE(ht->ht_level);
40950Sstevel@tonic-gate }
40960Sstevel@tonic-gate if (ht)
40970Sstevel@tonic-gate htable_release(ht);
40980Sstevel@tonic-gate }
40990Sstevel@tonic-gate
41000Sstevel@tonic-gate /*
41010Sstevel@tonic-gate * Atomically update a new translation for a single page. If the
41020Sstevel@tonic-gate * currently installed PTE doesn't match the value we expect to find,
41030Sstevel@tonic-gate * it's not updated and we return the PTE we found.
41040Sstevel@tonic-gate *
41050Sstevel@tonic-gate * If activating nosync or NOWRITE and the page was modified we need to sync
41060Sstevel@tonic-gate * with the page_t. Also sync with page_t if clearing ref/mod bits.
41070Sstevel@tonic-gate */
41080Sstevel@tonic-gate static x86pte_t
hati_update_pte(htable_t * ht,uint_t entry,x86pte_t expected,x86pte_t new)41090Sstevel@tonic-gate hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
41100Sstevel@tonic-gate {
41110Sstevel@tonic-gate page_t *pp;
41120Sstevel@tonic-gate uint_t rm = 0;
41130Sstevel@tonic-gate x86pte_t replaced;
41140Sstevel@tonic-gate
41153446Smrj if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC &&
41160Sstevel@tonic-gate PTE_GET(expected, PT_MOD | PT_REF) &&
41170Sstevel@tonic-gate (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
41184381Sjosephb !PTE_GET(new, PT_MOD | PT_REF))) {
41190Sstevel@tonic-gate
41203446Smrj ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level)));
41210Sstevel@tonic-gate pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
41220Sstevel@tonic-gate ASSERT(pp != NULL);
41230Sstevel@tonic-gate if (PTE_GET(expected, PT_MOD))
41240Sstevel@tonic-gate rm |= P_MOD;
41250Sstevel@tonic-gate if (PTE_GET(expected, PT_REF))
41260Sstevel@tonic-gate rm |= P_REF;
41270Sstevel@tonic-gate PTE_CLR(new, PT_MOD | PT_REF);
41280Sstevel@tonic-gate }
41290Sstevel@tonic-gate
41300Sstevel@tonic-gate replaced = x86pte_update(ht, entry, expected, new);
41310Sstevel@tonic-gate if (replaced != expected)
41320Sstevel@tonic-gate return (replaced);
41330Sstevel@tonic-gate
41340Sstevel@tonic-gate if (rm) {
41350Sstevel@tonic-gate /*
41360Sstevel@tonic-gate * sync to all constituent pages of a large page
41370Sstevel@tonic-gate */
41380Sstevel@tonic-gate pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
41390Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
41400Sstevel@tonic-gate while (pgcnt-- > 0) {
41410Sstevel@tonic-gate /*
41420Sstevel@tonic-gate * hat_page_demote() can't decrease
41430Sstevel@tonic-gate * pszc below this mapping size
41440Sstevel@tonic-gate * since large mapping existed after we
41450Sstevel@tonic-gate * took mlist lock.
41460Sstevel@tonic-gate */
41470Sstevel@tonic-gate ASSERT(pp->p_szc >= ht->ht_level);
41480Sstevel@tonic-gate hat_page_setattr(pp, rm);
41490Sstevel@tonic-gate ++pp;
41500Sstevel@tonic-gate }
41510Sstevel@tonic-gate }
41520Sstevel@tonic-gate
41530Sstevel@tonic-gate return (0);
41540Sstevel@tonic-gate }
41550Sstevel@tonic-gate
41564528Spaulsan /* ARGSUSED */
41574528Spaulsan void
hat_join_srd(struct hat * hat,vnode_t * evp)41585075Spaulsan hat_join_srd(struct hat *hat, vnode_t *evp)
41594528Spaulsan {
41604528Spaulsan }
41614528Spaulsan
41624528Spaulsan /* ARGSUSED */
41634528Spaulsan hat_region_cookie_t
hat_join_region(struct hat * hat,caddr_t r_saddr,size_t r_size,void * r_obj,u_offset_t r_objoff,uchar_t r_perm,uchar_t r_pgszc,hat_rgn_cb_func_t r_cb_function,uint_t flags)41645075Spaulsan hat_join_region(struct hat *hat,
41654528Spaulsan caddr_t r_saddr,
41664528Spaulsan size_t r_size,
41674528Spaulsan void *r_obj,
41684528Spaulsan u_offset_t r_objoff,
41694528Spaulsan uchar_t r_perm,
41704528Spaulsan uchar_t r_pgszc,
41714528Spaulsan hat_rgn_cb_func_t r_cb_function,
41724528Spaulsan uint_t flags)
41734528Spaulsan {
41744528Spaulsan panic("No shared region support on x86");
41754528Spaulsan return (HAT_INVALID_REGION_COOKIE);
41764528Spaulsan }
41774528Spaulsan
41784528Spaulsan /* ARGSUSED */
41794528Spaulsan void
hat_leave_region(struct hat * hat,hat_region_cookie_t rcookie,uint_t flags)41805075Spaulsan hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags)
41814528Spaulsan {
41824528Spaulsan panic("No shared region support on x86");
41834528Spaulsan }
41844528Spaulsan
41854528Spaulsan /* ARGSUSED */
41864528Spaulsan void
hat_dup_region(struct hat * hat,hat_region_cookie_t rcookie)41875075Spaulsan hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie)
41884528Spaulsan {
41894528Spaulsan panic("No shared region support on x86");
41904528Spaulsan }
41914528Spaulsan
41924528Spaulsan
41930Sstevel@tonic-gate /*
41940Sstevel@tonic-gate * Kernel Physical Mapping (kpm) facility
41950Sstevel@tonic-gate *
41960Sstevel@tonic-gate * Most of the routines needed to support segkpm are almost no-ops on the
41970Sstevel@tonic-gate * x86 platform. We map in the entire segment when it is created and leave
41980Sstevel@tonic-gate * it mapped in, so there is no additional work required to set up and tear
41990Sstevel@tonic-gate * down individual mappings. All of these routines were created to support
42000Sstevel@tonic-gate * SPARC platforms that have to avoid aliasing in their virtually indexed
42010Sstevel@tonic-gate * caches.
42020Sstevel@tonic-gate *
42030Sstevel@tonic-gate * Most of the routines have sanity checks in them (e.g. verifying that the
42040Sstevel@tonic-gate * passed-in page is locked). We don't actually care about most of these
42050Sstevel@tonic-gate * checks on x86, but we leave them in place to identify problems in the
42060Sstevel@tonic-gate * upper levels.
42070Sstevel@tonic-gate */
42080Sstevel@tonic-gate
42090Sstevel@tonic-gate /*
42100Sstevel@tonic-gate * Map in a locked page and return the vaddr.
42110Sstevel@tonic-gate */
42120Sstevel@tonic-gate /*ARGSUSED*/
42130Sstevel@tonic-gate caddr_t
hat_kpm_mapin(struct page * pp,struct kpme * kpme)42140Sstevel@tonic-gate hat_kpm_mapin(struct page *pp, struct kpme *kpme)
42150Sstevel@tonic-gate {
42160Sstevel@tonic-gate caddr_t vaddr;
42170Sstevel@tonic-gate
42180Sstevel@tonic-gate #ifdef DEBUG
42190Sstevel@tonic-gate if (kpm_enable == 0) {
42200Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
42210Sstevel@tonic-gate return ((caddr_t)NULL);
42220Sstevel@tonic-gate }
42230Sstevel@tonic-gate
42240Sstevel@tonic-gate if (pp == NULL || PAGE_LOCKED(pp) == 0) {
42250Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
42260Sstevel@tonic-gate return ((caddr_t)NULL);
42270Sstevel@tonic-gate }
42280Sstevel@tonic-gate #endif
42290Sstevel@tonic-gate
42300Sstevel@tonic-gate vaddr = hat_kpm_page2va(pp, 1);
42310Sstevel@tonic-gate
42320Sstevel@tonic-gate return (vaddr);
42330Sstevel@tonic-gate }
42340Sstevel@tonic-gate
42350Sstevel@tonic-gate /*
42360Sstevel@tonic-gate * Mapout a locked page.
42370Sstevel@tonic-gate */
42380Sstevel@tonic-gate /*ARGSUSED*/
42390Sstevel@tonic-gate void
hat_kpm_mapout(struct page * pp,struct kpme * kpme,caddr_t vaddr)42400Sstevel@tonic-gate hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
42410Sstevel@tonic-gate {
42420Sstevel@tonic-gate #ifdef DEBUG
42430Sstevel@tonic-gate if (kpm_enable == 0) {
42440Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
42450Sstevel@tonic-gate return;
42460Sstevel@tonic-gate }
42470Sstevel@tonic-gate
42480Sstevel@tonic-gate if (IS_KPM_ADDR(vaddr) == 0) {
42490Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
42500Sstevel@tonic-gate return;
42510Sstevel@tonic-gate }
42520Sstevel@tonic-gate
42530Sstevel@tonic-gate if (pp == NULL || PAGE_LOCKED(pp) == 0) {
42540Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
42550Sstevel@tonic-gate return;
42560Sstevel@tonic-gate }
42570Sstevel@tonic-gate #endif
42580Sstevel@tonic-gate }
42590Sstevel@tonic-gate
42600Sstevel@tonic-gate /*
42619894SPavel.Tatashin@Sun.COM * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical
42629894SPavel.Tatashin@Sun.COM * memory addresses that are not described by a page_t. It can
42639894SPavel.Tatashin@Sun.COM * also be used for normal pages that are not locked, but beware
42649894SPavel.Tatashin@Sun.COM * this is dangerous - no locking is performed, so the identity of
42659894SPavel.Tatashin@Sun.COM * the page could change. hat_kpm_mapin_pfn is not supported when
42669894SPavel.Tatashin@Sun.COM * vac_colors > 1, because the chosen va depends on the page identity,
42679894SPavel.Tatashin@Sun.COM * which could change.
42689894SPavel.Tatashin@Sun.COM * The caller must only pass pfn's for valid physical addresses; violation
42699894SPavel.Tatashin@Sun.COM * of this rule will cause panic.
42709894SPavel.Tatashin@Sun.COM */
42719894SPavel.Tatashin@Sun.COM caddr_t
hat_kpm_mapin_pfn(pfn_t pfn)42729894SPavel.Tatashin@Sun.COM hat_kpm_mapin_pfn(pfn_t pfn)
42739894SPavel.Tatashin@Sun.COM {
42749894SPavel.Tatashin@Sun.COM caddr_t paddr, vaddr;
42759894SPavel.Tatashin@Sun.COM
42769894SPavel.Tatashin@Sun.COM if (kpm_enable == 0)
42779894SPavel.Tatashin@Sun.COM return ((caddr_t)NULL);
42789894SPavel.Tatashin@Sun.COM
42799894SPavel.Tatashin@Sun.COM paddr = (caddr_t)ptob(pfn);
42809894SPavel.Tatashin@Sun.COM vaddr = (uintptr_t)kpm_vbase + paddr;
42819894SPavel.Tatashin@Sun.COM
42829894SPavel.Tatashin@Sun.COM return ((caddr_t)vaddr);
42839894SPavel.Tatashin@Sun.COM }
42849894SPavel.Tatashin@Sun.COM
42859894SPavel.Tatashin@Sun.COM /*ARGSUSED*/
42869894SPavel.Tatashin@Sun.COM void
hat_kpm_mapout_pfn(pfn_t pfn)42879894SPavel.Tatashin@Sun.COM hat_kpm_mapout_pfn(pfn_t pfn)
42889894SPavel.Tatashin@Sun.COM {
42899894SPavel.Tatashin@Sun.COM /* empty */
42909894SPavel.Tatashin@Sun.COM }
42919894SPavel.Tatashin@Sun.COM
42929894SPavel.Tatashin@Sun.COM /*
42930Sstevel@tonic-gate * Return the kpm virtual address for a specific pfn
42940Sstevel@tonic-gate */
42950Sstevel@tonic-gate caddr_t
hat_kpm_pfn2va(pfn_t pfn)42960Sstevel@tonic-gate hat_kpm_pfn2va(pfn_t pfn)
42970Sstevel@tonic-gate {
42983446Smrj uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
42990Sstevel@tonic-gate
43005262Srscott ASSERT(!pfn_is_foreign(pfn));
43010Sstevel@tonic-gate return ((caddr_t)vaddr);
43020Sstevel@tonic-gate }
43030Sstevel@tonic-gate
43040Sstevel@tonic-gate /*
43050Sstevel@tonic-gate * Return the kpm virtual address for the page at pp.
43060Sstevel@tonic-gate */
43070Sstevel@tonic-gate /*ARGSUSED*/
43080Sstevel@tonic-gate caddr_t
hat_kpm_page2va(struct page * pp,int checkswap)43090Sstevel@tonic-gate hat_kpm_page2va(struct page *pp, int checkswap)
43100Sstevel@tonic-gate {
43110Sstevel@tonic-gate return (hat_kpm_pfn2va(pp->p_pagenum));
43120Sstevel@tonic-gate }
43130Sstevel@tonic-gate
43140Sstevel@tonic-gate /*
43150Sstevel@tonic-gate * Return the page frame number for the kpm virtual address vaddr.
43160Sstevel@tonic-gate */
43170Sstevel@tonic-gate pfn_t
hat_kpm_va2pfn(caddr_t vaddr)43180Sstevel@tonic-gate hat_kpm_va2pfn(caddr_t vaddr)
43190Sstevel@tonic-gate {
43200Sstevel@tonic-gate pfn_t pfn;
43210Sstevel@tonic-gate
43220Sstevel@tonic-gate ASSERT(IS_KPM_ADDR(vaddr));
43230Sstevel@tonic-gate
43240Sstevel@tonic-gate pfn = (pfn_t)btop(vaddr - kpm_vbase);
43250Sstevel@tonic-gate
43260Sstevel@tonic-gate return (pfn);
43270Sstevel@tonic-gate }
43280Sstevel@tonic-gate
43290Sstevel@tonic-gate
43300Sstevel@tonic-gate /*
43310Sstevel@tonic-gate * Return the page for the kpm virtual address vaddr.
43320Sstevel@tonic-gate */
43330Sstevel@tonic-gate page_t *
hat_kpm_vaddr2page(caddr_t vaddr)43340Sstevel@tonic-gate hat_kpm_vaddr2page(caddr_t vaddr)
43350Sstevel@tonic-gate {
43360Sstevel@tonic-gate pfn_t pfn;
43370Sstevel@tonic-gate
43380Sstevel@tonic-gate ASSERT(IS_KPM_ADDR(vaddr));
43390Sstevel@tonic-gate
43400Sstevel@tonic-gate pfn = hat_kpm_va2pfn(vaddr);
43410Sstevel@tonic-gate
43420Sstevel@tonic-gate return (page_numtopp_nolock(pfn));
43430Sstevel@tonic-gate }
43440Sstevel@tonic-gate
43450Sstevel@tonic-gate /*
43460Sstevel@tonic-gate * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
43470Sstevel@tonic-gate * KPM page. This should never happen on x86
43480Sstevel@tonic-gate */
43490Sstevel@tonic-gate int
hat_kpm_fault(hat_t * hat,caddr_t vaddr)43500Sstevel@tonic-gate hat_kpm_fault(hat_t *hat, caddr_t vaddr)
43510Sstevel@tonic-gate {
43527240Srh87107 panic("pagefault in seg_kpm. hat: 0x%p vaddr: 0x%p",
43537240Srh87107 (void *)hat, (void *)vaddr);
43540Sstevel@tonic-gate
43550Sstevel@tonic-gate return (0);
43560Sstevel@tonic-gate }
43570Sstevel@tonic-gate
43580Sstevel@tonic-gate /*ARGSUSED*/
43590Sstevel@tonic-gate void
hat_kpm_mseghash_clear(int nentries)43600Sstevel@tonic-gate hat_kpm_mseghash_clear(int nentries)
43610Sstevel@tonic-gate {}
43620Sstevel@tonic-gate
43630Sstevel@tonic-gate /*ARGSUSED*/
43640Sstevel@tonic-gate void
hat_kpm_mseghash_update(pgcnt_t inx,struct memseg * msp)43650Sstevel@tonic-gate hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
43660Sstevel@tonic-gate {}
43675084Sjohnlev
436812004Sjiang.liu@intel.com #ifndef __xpv
436912004Sjiang.liu@intel.com void
hat_kpm_addmem_mseg_update(struct memseg * msp,pgcnt_t nkpmpgs,offset_t kpm_pages_off)437012004Sjiang.liu@intel.com hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
437112004Sjiang.liu@intel.com offset_t kpm_pages_off)
437212004Sjiang.liu@intel.com {
437312004Sjiang.liu@intel.com _NOTE(ARGUNUSED(nkpmpgs, kpm_pages_off));
437412004Sjiang.liu@intel.com pfn_t base, end;
437512004Sjiang.liu@intel.com
437612004Sjiang.liu@intel.com /*
437712004Sjiang.liu@intel.com * kphysm_add_memory_dynamic() does not set nkpmpgs
437812004Sjiang.liu@intel.com * when page_t memory is externally allocated. That
437912004Sjiang.liu@intel.com * code must properly calculate nkpmpgs in all cases
438012004Sjiang.liu@intel.com * if nkpmpgs needs to be used at some point.
438112004Sjiang.liu@intel.com */
438212004Sjiang.liu@intel.com
438312004Sjiang.liu@intel.com /*
438412004Sjiang.liu@intel.com * The meta (page_t) pages for dynamically added memory are allocated
438512004Sjiang.liu@intel.com * either from the incoming memory itself or from existing memory.
438612004Sjiang.liu@intel.com * In the former case the base of the incoming pages will be different
438712004Sjiang.liu@intel.com * than the base of the dynamic segment so call memseg_get_start() to
438812004Sjiang.liu@intel.com * get the actual base of the incoming memory for each case.
438912004Sjiang.liu@intel.com */
439012004Sjiang.liu@intel.com
439112004Sjiang.liu@intel.com base = memseg_get_start(msp);
439212004Sjiang.liu@intel.com end = msp->pages_end;
439312004Sjiang.liu@intel.com
439412004Sjiang.liu@intel.com hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base),
439512004Sjiang.liu@intel.com mmu_ptob(end - base), base, PROT_READ | PROT_WRITE,
439612004Sjiang.liu@intel.com HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
439712004Sjiang.liu@intel.com }
439812004Sjiang.liu@intel.com
439912004Sjiang.liu@intel.com void
hat_kpm_addmem_mseg_insert(struct memseg * msp)440012004Sjiang.liu@intel.com hat_kpm_addmem_mseg_insert(struct memseg *msp)
440112004Sjiang.liu@intel.com {
440212004Sjiang.liu@intel.com _NOTE(ARGUNUSED(msp));
440312004Sjiang.liu@intel.com }
440412004Sjiang.liu@intel.com
440512004Sjiang.liu@intel.com void
hat_kpm_addmem_memsegs_update(struct memseg * msp)440612004Sjiang.liu@intel.com hat_kpm_addmem_memsegs_update(struct memseg *msp)
440712004Sjiang.liu@intel.com {
440812004Sjiang.liu@intel.com _NOTE(ARGUNUSED(msp));
440912004Sjiang.liu@intel.com }
441012004Sjiang.liu@intel.com
441112004Sjiang.liu@intel.com /*
441212004Sjiang.liu@intel.com * Return end of metadata for an already setup memseg.
441312004Sjiang.liu@intel.com * X86 platforms don't need per-page meta data to support kpm.
441412004Sjiang.liu@intel.com */
441512004Sjiang.liu@intel.com caddr_t
hat_kpm_mseg_reuse(struct memseg * msp)441612004Sjiang.liu@intel.com hat_kpm_mseg_reuse(struct memseg *msp)
441712004Sjiang.liu@intel.com {
441812004Sjiang.liu@intel.com return ((caddr_t)msp->epages);
441912004Sjiang.liu@intel.com }
442012004Sjiang.liu@intel.com
442112004Sjiang.liu@intel.com void
hat_kpm_delmem_mseg_update(struct memseg * msp,struct memseg ** mspp)442212004Sjiang.liu@intel.com hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
442312004Sjiang.liu@intel.com {
442412004Sjiang.liu@intel.com _NOTE(ARGUNUSED(msp, mspp));
442512004Sjiang.liu@intel.com ASSERT(0);
442612004Sjiang.liu@intel.com }
442712004Sjiang.liu@intel.com
442812004Sjiang.liu@intel.com void
hat_kpm_split_mseg_update(struct memseg * msp,struct memseg ** mspp,struct memseg * lo,struct memseg * mid,struct memseg * hi)442912004Sjiang.liu@intel.com hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
443012004Sjiang.liu@intel.com struct memseg *lo, struct memseg *mid, struct memseg *hi)
443112004Sjiang.liu@intel.com {
443212004Sjiang.liu@intel.com _NOTE(ARGUNUSED(msp, mspp, lo, mid, hi));
443312004Sjiang.liu@intel.com ASSERT(0);
443412004Sjiang.liu@intel.com }
443512004Sjiang.liu@intel.com
443612004Sjiang.liu@intel.com /*
443712004Sjiang.liu@intel.com * Walk the memsegs chain, applying func to each memseg span.
443812004Sjiang.liu@intel.com */
443912004Sjiang.liu@intel.com void
hat_kpm_walk(void (* func)(void *,void *,size_t),void * arg)444012004Sjiang.liu@intel.com hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
444112004Sjiang.liu@intel.com {
444212004Sjiang.liu@intel.com pfn_t pbase, pend;
444312004Sjiang.liu@intel.com void *base;
444412004Sjiang.liu@intel.com size_t size;
444512004Sjiang.liu@intel.com struct memseg *msp;
444612004Sjiang.liu@intel.com
444712004Sjiang.liu@intel.com for (msp = memsegs; msp; msp = msp->next) {
444812004Sjiang.liu@intel.com pbase = msp->pages_base;
444912004Sjiang.liu@intel.com pend = msp->pages_end;
445012004Sjiang.liu@intel.com base = ptob(pbase) + kpm_vbase;
445112004Sjiang.liu@intel.com size = ptob(pend - pbase);
445212004Sjiang.liu@intel.com func(arg, base, size);
445312004Sjiang.liu@intel.com }
445412004Sjiang.liu@intel.com }
445512004Sjiang.liu@intel.com
445612004Sjiang.liu@intel.com #else /* __xpv */
445712004Sjiang.liu@intel.com
44585084Sjohnlev /*
44595084Sjohnlev * There are specific Hypervisor calls to establish and remove mappings
44605084Sjohnlev * to grant table references and the privcmd driver. We have to ensure
44615084Sjohnlev * that a page table actually exists.
44625084Sjohnlev */
44635084Sjohnlev void
hat_prepare_mapping(hat_t * hat,caddr_t addr,uint64_t * pte_ma)44647756SMark.Johnson@Sun.COM hat_prepare_mapping(hat_t *hat, caddr_t addr, uint64_t *pte_ma)
44655084Sjohnlev {
44667756SMark.Johnson@Sun.COM maddr_t base_ma;
44677756SMark.Johnson@Sun.COM htable_t *ht;
44687756SMark.Johnson@Sun.COM uint_t entry;
44697756SMark.Johnson@Sun.COM
44705084Sjohnlev ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
44715741Smrj XPV_DISALLOW_MIGRATE();
44727756SMark.Johnson@Sun.COM ht = htable_create(hat, (uintptr_t)addr, 0, NULL);
44737756SMark.Johnson@Sun.COM
44747756SMark.Johnson@Sun.COM /*
44757756SMark.Johnson@Sun.COM * if an address for pte_ma is passed in, return the MA of the pte
44767756SMark.Johnson@Sun.COM * for this specific address. This address is only valid as long
44777756SMark.Johnson@Sun.COM * as the htable stays locked.
44787756SMark.Johnson@Sun.COM */
44797756SMark.Johnson@Sun.COM if (pte_ma != NULL) {
44807756SMark.Johnson@Sun.COM entry = htable_va2entry((uintptr_t)addr, ht);
44817756SMark.Johnson@Sun.COM base_ma = pa_to_ma(ptob(ht->ht_pfn));
44827756SMark.Johnson@Sun.COM *pte_ma = base_ma + (entry << mmu.pte_size_shift);
44837756SMark.Johnson@Sun.COM }
44845741Smrj XPV_ALLOW_MIGRATE();
44855084Sjohnlev }
44865084Sjohnlev
44875084Sjohnlev void
hat_release_mapping(hat_t * hat,caddr_t addr)44885084Sjohnlev hat_release_mapping(hat_t *hat, caddr_t addr)
44895084Sjohnlev {
44905084Sjohnlev htable_t *ht;
44915084Sjohnlev
44925084Sjohnlev ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
44935741Smrj XPV_DISALLOW_MIGRATE();
44945084Sjohnlev ht = htable_lookup(hat, (uintptr_t)addr, 0);
44955084Sjohnlev ASSERT(ht != NULL);
44965084Sjohnlev ASSERT(ht->ht_busy >= 2);
44975084Sjohnlev htable_release(ht);
44985084Sjohnlev htable_release(ht);
44995741Smrj XPV_ALLOW_MIGRATE();
450012004Sjiang.liu@intel.com }
450112004Sjiang.liu@intel.com #endif /* __xpv */
4502