12296Sae112802 /* 22296Sae112802 * CDDL HEADER START 32296Sae112802 * 42296Sae112802 * The contents of this file are subject to the terms of the 52296Sae112802 * Common Development and Distribution License (the "License"). 62296Sae112802 * You may not use this file except in compliance with the License. 72296Sae112802 * 82296Sae112802 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 92296Sae112802 * or http://www.opensolaris.org/os/licensing. 102296Sae112802 * See the License for the specific language governing permissions 112296Sae112802 * and limitations under the License. 122296Sae112802 * 132296Sae112802 * When distributing Covered Code, include this CDDL HEADER in each 142296Sae112802 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 152296Sae112802 * If applicable, add the following below this CDDL HEADER, with the 162296Sae112802 * fields enclosed by brackets "[]" replaced with your own identifying 172296Sae112802 * information: Portions Copyright [yyyy] [name of copyright owner] 182296Sae112802 * 192296Sae112802 * CDDL HEADER END 202296Sae112802 */ 212296Sae112802 /* 223446Smrj * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 232296Sae112802 * Use is subject to license terms. 242296Sae112802 */ 252296Sae112802 262296Sae112802 #pragma ident "%Z%%M% %I% %E% SMI" 272296Sae112802 282296Sae112802 /* 292296Sae112802 * Kernel Physical Mapping (segkpm) hat interface routines for sun4u. 302296Sae112802 */ 312296Sae112802 322296Sae112802 #include <sys/types.h> 332296Sae112802 #include <vm/hat.h> 342296Sae112802 #include <vm/hat_sfmmu.h> 352296Sae112802 #include <vm/page.h> 362296Sae112802 #include <sys/sysmacros.h> 372296Sae112802 #include <sys/cmn_err.h> 382296Sae112802 #include <sys/machsystm.h> 392296Sae112802 #include <vm/seg_kpm.h> 402296Sae112802 #include <sys/cpu_module.h> 412296Sae112802 #include <vm/mach_kpm.h> 422296Sae112802 432296Sae112802 /* kpm prototypes */ 442296Sae112802 static caddr_t sfmmu_kpm_mapin(page_t *); 452296Sae112802 static void sfmmu_kpm_mapout(page_t *, caddr_t); 462296Sae112802 static int sfmmu_kpme_lookup(struct kpme *, page_t *); 472296Sae112802 static void sfmmu_kpme_add(struct kpme *, page_t *); 482296Sae112802 static void sfmmu_kpme_sub(struct kpme *, page_t *); 492296Sae112802 static caddr_t sfmmu_kpm_getvaddr(page_t *, int *); 502296Sae112802 static int sfmmu_kpm_fault(caddr_t, struct memseg *, page_t *); 512296Sae112802 static int sfmmu_kpm_fault_small(caddr_t, struct memseg *, page_t *); 522296Sae112802 static void sfmmu_kpm_vac_conflict(page_t *, caddr_t); 532296Sae112802 void sfmmu_kpm_pageunload(page_t *); 542296Sae112802 void sfmmu_kpm_vac_unload(page_t *, caddr_t); 552296Sae112802 static void sfmmu_kpm_demap_large(caddr_t); 562296Sae112802 static void sfmmu_kpm_demap_small(caddr_t); 572296Sae112802 static void sfmmu_kpm_demap_tlbs(caddr_t); 582296Sae112802 void sfmmu_kpm_hme_unload(page_t *); 592296Sae112802 kpm_hlk_t *sfmmu_kpm_kpmp_enter(page_t *, pgcnt_t); 602296Sae112802 void sfmmu_kpm_kpmp_exit(kpm_hlk_t *kpmp); 612296Sae112802 void sfmmu_kpm_page_cache(page_t *, int, int); 622296Sae112802 632296Sae112802 /* 642296Sae112802 * Kernel Physical Mapping (kpm) facility 652296Sae112802 */ 662296Sae112802 672296Sae112802 void 682296Sae112802 mach_kpm_init() 692296Sae112802 {} 702296Sae112802 712296Sae112802 /* -- hat_kpm interface section -- */ 722296Sae112802 732296Sae112802 /* 742296Sae112802 * Mapin a locked page and return the vaddr. 752296Sae112802 * When a kpme is provided by the caller it is added to 762296Sae112802 * the page p_kpmelist. The page to be mapped in must 772296Sae112802 * be at least read locked (p_selock). 782296Sae112802 */ 792296Sae112802 caddr_t 802296Sae112802 hat_kpm_mapin(struct page *pp, struct kpme *kpme) 812296Sae112802 { 822296Sae112802 kmutex_t *pml; 832296Sae112802 caddr_t vaddr; 842296Sae112802 852296Sae112802 if (kpm_enable == 0) { 862296Sae112802 cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set"); 872296Sae112802 return ((caddr_t)NULL); 882296Sae112802 } 892296Sae112802 902296Sae112802 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 912296Sae112802 cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked"); 922296Sae112802 return ((caddr_t)NULL); 932296Sae112802 } 942296Sae112802 952296Sae112802 pml = sfmmu_mlist_enter(pp); 962296Sae112802 ASSERT(pp->p_kpmref >= 0); 972296Sae112802 982296Sae112802 vaddr = (pp->p_kpmref == 0) ? 992296Sae112802 sfmmu_kpm_mapin(pp) : hat_kpm_page2va(pp, 1); 1002296Sae112802 1012296Sae112802 if (kpme != NULL) { 1022296Sae112802 /* 1032296Sae112802 * Tolerate multiple mapins for the same kpme to avoid 1042296Sae112802 * the need for an extra serialization. 1052296Sae112802 */ 1062296Sae112802 if ((sfmmu_kpme_lookup(kpme, pp)) == 0) 1072296Sae112802 sfmmu_kpme_add(kpme, pp); 1082296Sae112802 1092296Sae112802 ASSERT(pp->p_kpmref > 0); 1102296Sae112802 1112296Sae112802 } else { 1122296Sae112802 pp->p_kpmref++; 1132296Sae112802 } 1142296Sae112802 1152296Sae112802 sfmmu_mlist_exit(pml); 1162296Sae112802 return (vaddr); 1172296Sae112802 } 1182296Sae112802 1192296Sae112802 /* 1202296Sae112802 * Mapout a locked page. 1212296Sae112802 * When a kpme is provided by the caller it is removed from 1222296Sae112802 * the page p_kpmelist. The page to be mapped out must be at 1232296Sae112802 * least read locked (p_selock). 1242296Sae112802 * Note: The seg_kpm layer provides a mapout interface for the 1252296Sae112802 * case that a kpme is used and the underlying page is unlocked. 1262296Sae112802 * This can be used instead of calling this function directly. 1272296Sae112802 */ 1282296Sae112802 void 1292296Sae112802 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr) 1302296Sae112802 { 1312296Sae112802 kmutex_t *pml; 1322296Sae112802 1332296Sae112802 if (kpm_enable == 0) { 1342296Sae112802 cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set"); 1352296Sae112802 return; 1362296Sae112802 } 1372296Sae112802 1382296Sae112802 if (IS_KPM_ADDR(vaddr) == 0) { 1392296Sae112802 cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address"); 1402296Sae112802 return; 1412296Sae112802 } 1422296Sae112802 1432296Sae112802 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 1442296Sae112802 cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked"); 1452296Sae112802 return; 1462296Sae112802 } 1472296Sae112802 1482296Sae112802 if (kpme != NULL) { 1492296Sae112802 ASSERT(pp == kpme->kpe_page); 1502296Sae112802 pp = kpme->kpe_page; 1512296Sae112802 pml = sfmmu_mlist_enter(pp); 1522296Sae112802 1532296Sae112802 if (sfmmu_kpme_lookup(kpme, pp) == 0) 1542296Sae112802 panic("hat_kpm_mapout: kpme not found pp=%p", 1552296Sae112802 (void *)pp); 1562296Sae112802 1572296Sae112802 ASSERT(pp->p_kpmref > 0); 1582296Sae112802 sfmmu_kpme_sub(kpme, pp); 1592296Sae112802 1602296Sae112802 } else { 1612296Sae112802 pml = sfmmu_mlist_enter(pp); 1622296Sae112802 pp->p_kpmref--; 1632296Sae112802 } 1642296Sae112802 1652296Sae112802 ASSERT(pp->p_kpmref >= 0); 1662296Sae112802 if (pp->p_kpmref == 0) 1672296Sae112802 sfmmu_kpm_mapout(pp, vaddr); 1682296Sae112802 1692296Sae112802 sfmmu_mlist_exit(pml); 1702296Sae112802 } 1712296Sae112802 1722296Sae112802 /* 1732296Sae112802 * Return the kpm virtual address for the page at pp. 1742296Sae112802 * If checkswap is non zero and the page is backed by a 1752296Sae112802 * swap vnode the physical address is used rather than 1762296Sae112802 * p_offset to determine the kpm region. 1772296Sae112802 * Note: The function has to be used w/ extreme care. The 1782296Sae112802 * stability of the page identity is in the responsibility 1792296Sae112802 * of the caller. 1802296Sae112802 */ 1812296Sae112802 /*ARGSUSED*/ 1822296Sae112802 caddr_t 1832296Sae112802 hat_kpm_page2va(struct page *pp, int checkswap) 1842296Sae112802 { 1852296Sae112802 int vcolor, vcolor_pa; 1862296Sae112802 uintptr_t paddr, vaddr; 1872296Sae112802 1882296Sae112802 ASSERT(kpm_enable); 1892296Sae112802 1902296Sae112802 paddr = ptob(pp->p_pagenum); 1912296Sae112802 vcolor_pa = addr_to_vcolor(paddr); 1922296Sae112802 1932296Sae112802 if (checkswap && pp->p_vnode && IS_SWAPFSVP(pp->p_vnode)) 1942296Sae112802 vcolor = (PP_ISNC(pp)) ? vcolor_pa : PP_GET_VCOLOR(pp); 1952296Sae112802 else 1962296Sae112802 vcolor = addr_to_vcolor(pp->p_offset); 1972296Sae112802 1982296Sae112802 vaddr = (uintptr_t)kpm_vbase + paddr; 1992296Sae112802 2002296Sae112802 if (vcolor_pa != vcolor) { 2012296Sae112802 vaddr += ((uintptr_t)(vcolor - vcolor_pa) << MMU_PAGESHIFT); 2022296Sae112802 vaddr += (vcolor_pa > vcolor) ? 2032296Sae112802 ((uintptr_t)vcolor_pa << kpm_size_shift) : 2042296Sae112802 ((uintptr_t)(vcolor - vcolor_pa) << kpm_size_shift); 2052296Sae112802 } 2062296Sae112802 2072296Sae112802 return ((caddr_t)vaddr); 2082296Sae112802 } 2092296Sae112802 2102296Sae112802 /* 2112296Sae112802 * Return the page for the kpm virtual address vaddr. 2122296Sae112802 * Caller is responsible for the kpm mapping and lock 2132296Sae112802 * state of the page. 2142296Sae112802 */ 2152296Sae112802 page_t * 2162296Sae112802 hat_kpm_vaddr2page(caddr_t vaddr) 2172296Sae112802 { 2182296Sae112802 uintptr_t paddr; 2192296Sae112802 pfn_t pfn; 2202296Sae112802 2212296Sae112802 ASSERT(IS_KPM_ADDR(vaddr)); 2222296Sae112802 2232296Sae112802 SFMMU_KPM_VTOP(vaddr, paddr); 2242296Sae112802 pfn = (pfn_t)btop(paddr); 2252296Sae112802 2262296Sae112802 return (page_numtopp_nolock(pfn)); 2272296Sae112802 } 2282296Sae112802 2292296Sae112802 /* page to kpm_page */ 2302296Sae112802 #define PP2KPMPG(pp, kp) { \ 2312296Sae112802 struct memseg *mseg; \ 2322296Sae112802 pgcnt_t inx; \ 2332296Sae112802 pfn_t pfn; \ 2342296Sae112802 \ 2352296Sae112802 pfn = pp->p_pagenum; \ 2362296Sae112802 mseg = page_numtomemseg_nolock(pfn); \ 2372296Sae112802 ASSERT(mseg); \ 2382296Sae112802 inx = ptokpmp(kpmptop(ptokpmp(pfn)) - mseg->kpm_pbase); \ 2392296Sae112802 ASSERT(inx < mseg->kpm_nkpmpgs); \ 2402296Sae112802 kp = &mseg->kpm_pages[inx]; \ 2412296Sae112802 } 2422296Sae112802 2432296Sae112802 /* page to kpm_spage */ 2442296Sae112802 #define PP2KPMSPG(pp, ksp) { \ 2452296Sae112802 struct memseg *mseg; \ 2462296Sae112802 pgcnt_t inx; \ 2472296Sae112802 pfn_t pfn; \ 2482296Sae112802 \ 2492296Sae112802 pfn = pp->p_pagenum; \ 2502296Sae112802 mseg = page_numtomemseg_nolock(pfn); \ 2512296Sae112802 ASSERT(mseg); \ 2522296Sae112802 inx = pfn - mseg->kpm_pbase; \ 2532296Sae112802 ksp = &mseg->kpm_spages[inx]; \ 2542296Sae112802 } 2552296Sae112802 2562296Sae112802 /* 2572296Sae112802 * hat_kpm_fault is called from segkpm_fault when a kpm tsbmiss occurred 2582296Sae112802 * which could not be resolved by the trap level tsbmiss handler for the 2592296Sae112802 * following reasons: 2602296Sae112802 * . The vaddr is in VAC alias range (always PAGESIZE mapping size). 2612296Sae112802 * . The kpm (s)page range of vaddr is in a VAC alias prevention state. 2622296Sae112802 * . tsbmiss handling at trap level is not desired (DEBUG kernel only, 2632296Sae112802 * kpm_tsbmtl == 0). 2642296Sae112802 */ 2652296Sae112802 int 2662296Sae112802 hat_kpm_fault(struct hat *hat, caddr_t vaddr) 2672296Sae112802 { 2682296Sae112802 int error; 2692296Sae112802 uintptr_t paddr; 2702296Sae112802 pfn_t pfn; 2712296Sae112802 struct memseg *mseg; 2722296Sae112802 page_t *pp; 2732296Sae112802 2742296Sae112802 if (kpm_enable == 0) { 2752296Sae112802 cmn_err(CE_WARN, "hat_kpm_fault: kpm_enable not set"); 2762296Sae112802 return (ENOTSUP); 2772296Sae112802 } 2782296Sae112802 2792296Sae112802 ASSERT(hat == ksfmmup); 2802296Sae112802 ASSERT(IS_KPM_ADDR(vaddr)); 2812296Sae112802 2822296Sae112802 SFMMU_KPM_VTOP(vaddr, paddr); 2832296Sae112802 pfn = (pfn_t)btop(paddr); 2842296Sae112802 mseg = page_numtomemseg_nolock(pfn); 2852296Sae112802 if (mseg == NULL) 2862296Sae112802 return (EFAULT); 2872296Sae112802 2882296Sae112802 pp = &mseg->pages[(pgcnt_t)(pfn - mseg->pages_base)]; 2892296Sae112802 ASSERT((pfn_t)pp->p_pagenum == pfn); 2902296Sae112802 2912296Sae112802 if (!PAGE_LOCKED(pp)) 2922296Sae112802 return (EFAULT); 2932296Sae112802 2942296Sae112802 if (kpm_smallpages == 0) 2952296Sae112802 error = sfmmu_kpm_fault(vaddr, mseg, pp); 2962296Sae112802 else 2972296Sae112802 error = sfmmu_kpm_fault_small(vaddr, mseg, pp); 2982296Sae112802 2992296Sae112802 return (error); 3002296Sae112802 } 3012296Sae112802 3022296Sae112802 /* 3032296Sae112802 * memseg_hash[] was cleared, need to clear memseg_phash[] too. 3042296Sae112802 */ 3052296Sae112802 void 3062296Sae112802 hat_kpm_mseghash_clear(int nentries) 3072296Sae112802 { 3082296Sae112802 pgcnt_t i; 3092296Sae112802 3102296Sae112802 if (kpm_enable == 0) 3112296Sae112802 return; 3122296Sae112802 3132296Sae112802 for (i = 0; i < nentries; i++) 3142296Sae112802 memseg_phash[i] = MSEG_NULLPTR_PA; 3152296Sae112802 } 3162296Sae112802 3172296Sae112802 /* 3182296Sae112802 * Update memseg_phash[inx] when memseg_hash[inx] was changed. 3192296Sae112802 */ 3202296Sae112802 void 3212296Sae112802 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp) 3222296Sae112802 { 3232296Sae112802 if (kpm_enable == 0) 3242296Sae112802 return; 3252296Sae112802 3262296Sae112802 memseg_phash[inx] = (msp) ? va_to_pa(msp) : MSEG_NULLPTR_PA; 3272296Sae112802 } 3282296Sae112802 3292296Sae112802 /* 3302296Sae112802 * Update kpm memseg members from basic memseg info. 3312296Sae112802 */ 3322296Sae112802 void 3332296Sae112802 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs, 3342296Sae112802 offset_t kpm_pages_off) 3352296Sae112802 { 3362296Sae112802 if (kpm_enable == 0) 3372296Sae112802 return; 3382296Sae112802 3392296Sae112802 msp->kpm_pages = (kpm_page_t *)((caddr_t)msp->pages + kpm_pages_off); 3402296Sae112802 msp->kpm_nkpmpgs = nkpmpgs; 3412296Sae112802 msp->kpm_pbase = kpmptop(ptokpmp(msp->pages_base)); 3422296Sae112802 msp->pagespa = va_to_pa(msp->pages); 3432296Sae112802 msp->epagespa = va_to_pa(msp->epages); 3442296Sae112802 msp->kpm_pagespa = va_to_pa(msp->kpm_pages); 3452296Sae112802 } 3462296Sae112802 3472296Sae112802 /* 3482296Sae112802 * Setup nextpa when a memseg is inserted. 3492296Sae112802 * Assumes that the memsegslock is already held. 3502296Sae112802 */ 3512296Sae112802 void 3522296Sae112802 hat_kpm_addmem_mseg_insert(struct memseg *msp) 3532296Sae112802 { 3542296Sae112802 if (kpm_enable == 0) 3552296Sae112802 return; 3562296Sae112802 3573446Smrj ASSERT(memsegs_lock_held()); 3582296Sae112802 msp->nextpa = (memsegs) ? va_to_pa(memsegs) : MSEG_NULLPTR_PA; 3592296Sae112802 } 3602296Sae112802 3612296Sae112802 /* 3622296Sae112802 * Setup memsegspa when a memseg is (head) inserted. 3632296Sae112802 * Called before memsegs is updated to complete a 3642296Sae112802 * memseg insert operation. 3652296Sae112802 * Assumes that the memsegslock is already held. 3662296Sae112802 */ 3672296Sae112802 void 3682296Sae112802 hat_kpm_addmem_memsegs_update(struct memseg *msp) 3692296Sae112802 { 3702296Sae112802 if (kpm_enable == 0) 3712296Sae112802 return; 3722296Sae112802 3733446Smrj ASSERT(memsegs_lock_held()); 3742296Sae112802 ASSERT(memsegs); 3752296Sae112802 memsegspa = va_to_pa(msp); 3762296Sae112802 } 3772296Sae112802 3782296Sae112802 /* 3792296Sae112802 * Return end of metadata for an already setup memseg. 3802296Sae112802 * 3812296Sae112802 * Note: kpm_pages and kpm_spages are aliases and the underlying 3822296Sae112802 * member of struct memseg is a union, therefore they always have 3832296Sae112802 * the same address within a memseg. They must be differentiated 3842296Sae112802 * when pointer arithmetic is used with them. 3852296Sae112802 */ 3862296Sae112802 caddr_t 3872296Sae112802 hat_kpm_mseg_reuse(struct memseg *msp) 3882296Sae112802 { 3892296Sae112802 caddr_t end; 3902296Sae112802 3912296Sae112802 if (kpm_smallpages == 0) 3922296Sae112802 end = (caddr_t)(msp->kpm_pages + msp->kpm_nkpmpgs); 3932296Sae112802 else 3942296Sae112802 end = (caddr_t)(msp->kpm_spages + msp->kpm_nkpmpgs); 3952296Sae112802 3962296Sae112802 return (end); 3972296Sae112802 } 3982296Sae112802 3992296Sae112802 /* 4002296Sae112802 * Update memsegspa (when first memseg in list 4012296Sae112802 * is deleted) or nextpa when a memseg deleted. 4022296Sae112802 * Assumes that the memsegslock is already held. 4032296Sae112802 */ 4042296Sae112802 void 4052296Sae112802 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp) 4062296Sae112802 { 4072296Sae112802 struct memseg *lmsp; 4082296Sae112802 4092296Sae112802 if (kpm_enable == 0) 4102296Sae112802 return; 4112296Sae112802 4123446Smrj ASSERT(memsegs_lock_held()); 4132296Sae112802 4142296Sae112802 if (mspp == &memsegs) { 4152296Sae112802 memsegspa = (msp->next) ? 4162296Sae112802 va_to_pa(msp->next) : MSEG_NULLPTR_PA; 4172296Sae112802 } else { 4182296Sae112802 lmsp = (struct memseg *) 4192296Sae112802 ((uint64_t)mspp - offsetof(struct memseg, next)); 4202296Sae112802 lmsp->nextpa = (msp->next) ? 4212296Sae112802 va_to_pa(msp->next) : MSEG_NULLPTR_PA; 4222296Sae112802 } 4232296Sae112802 } 4242296Sae112802 4252296Sae112802 /* 4262296Sae112802 * Update kpm members for all memseg's involved in a split operation 4272296Sae112802 * and do the atomic update of the physical memseg chain. 4282296Sae112802 * 4292296Sae112802 * Note: kpm_pages and kpm_spages are aliases and the underlying member 4302296Sae112802 * of struct memseg is a union, therefore they always have the same 4312296Sae112802 * address within a memseg. With that the direct assignments and 4322296Sae112802 * va_to_pa conversions below don't have to be distinguished wrt. to 4332296Sae112802 * kpm_smallpages. They must be differentiated when pointer arithmetic 4342296Sae112802 * is used with them. 4352296Sae112802 * 4362296Sae112802 * Assumes that the memsegslock is already held. 4372296Sae112802 */ 4382296Sae112802 void 4392296Sae112802 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp, 4402296Sae112802 struct memseg *lo, struct memseg *mid, struct memseg *hi) 4412296Sae112802 { 4422296Sae112802 pgcnt_t start, end, kbase, kstart, num; 4432296Sae112802 struct memseg *lmsp; 4442296Sae112802 4452296Sae112802 if (kpm_enable == 0) 4462296Sae112802 return; 4472296Sae112802 4483446Smrj ASSERT(memsegs_lock_held()); 4492296Sae112802 ASSERT(msp && mid && msp->kpm_pages); 4502296Sae112802 4512296Sae112802 kbase = ptokpmp(msp->kpm_pbase); 4522296Sae112802 4532296Sae112802 if (lo) { 4542296Sae112802 num = lo->pages_end - lo->pages_base; 4552296Sae112802 start = kpmptop(ptokpmp(lo->pages_base)); 4562296Sae112802 /* align end to kpm page size granularity */ 4572296Sae112802 end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs; 4582296Sae112802 lo->kpm_pbase = start; 4592296Sae112802 lo->kpm_nkpmpgs = ptokpmp(end - start); 4602296Sae112802 lo->kpm_pages = msp->kpm_pages; 4612296Sae112802 lo->kpm_pagespa = va_to_pa(lo->kpm_pages); 4622296Sae112802 lo->pagespa = va_to_pa(lo->pages); 4632296Sae112802 lo->epagespa = va_to_pa(lo->epages); 4642296Sae112802 lo->nextpa = va_to_pa(lo->next); 4652296Sae112802 } 4662296Sae112802 4672296Sae112802 /* mid */ 4682296Sae112802 num = mid->pages_end - mid->pages_base; 4692296Sae112802 kstart = ptokpmp(mid->pages_base); 4702296Sae112802 start = kpmptop(kstart); 4712296Sae112802 /* align end to kpm page size granularity */ 4722296Sae112802 end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs; 4732296Sae112802 mid->kpm_pbase = start; 4742296Sae112802 mid->kpm_nkpmpgs = ptokpmp(end - start); 4752296Sae112802 if (kpm_smallpages == 0) { 4762296Sae112802 mid->kpm_pages = msp->kpm_pages + (kstart - kbase); 4772296Sae112802 } else { 4782296Sae112802 mid->kpm_spages = msp->kpm_spages + (kstart - kbase); 4792296Sae112802 } 4802296Sae112802 mid->kpm_pagespa = va_to_pa(mid->kpm_pages); 4812296Sae112802 mid->pagespa = va_to_pa(mid->pages); 4822296Sae112802 mid->epagespa = va_to_pa(mid->epages); 4832296Sae112802 mid->nextpa = (mid->next) ? va_to_pa(mid->next) : MSEG_NULLPTR_PA; 4842296Sae112802 4852296Sae112802 if (hi) { 4862296Sae112802 num = hi->pages_end - hi->pages_base; 4872296Sae112802 kstart = ptokpmp(hi->pages_base); 4882296Sae112802 start = kpmptop(kstart); 4892296Sae112802 /* align end to kpm page size granularity */ 4902296Sae112802 end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs; 4912296Sae112802 hi->kpm_pbase = start; 4922296Sae112802 hi->kpm_nkpmpgs = ptokpmp(end - start); 4932296Sae112802 if (kpm_smallpages == 0) { 4942296Sae112802 hi->kpm_pages = msp->kpm_pages + (kstart - kbase); 4952296Sae112802 } else { 4962296Sae112802 hi->kpm_spages = msp->kpm_spages + (kstart - kbase); 4972296Sae112802 } 4982296Sae112802 hi->kpm_pagespa = va_to_pa(hi->kpm_pages); 4992296Sae112802 hi->pagespa = va_to_pa(hi->pages); 5002296Sae112802 hi->epagespa = va_to_pa(hi->epages); 5012296Sae112802 hi->nextpa = (hi->next) ? va_to_pa(hi->next) : MSEG_NULLPTR_PA; 5022296Sae112802 } 5032296Sae112802 5042296Sae112802 /* 5052296Sae112802 * Atomic update of the physical memseg chain 5062296Sae112802 */ 5072296Sae112802 if (mspp == &memsegs) { 5082296Sae112802 memsegspa = (lo) ? va_to_pa(lo) : va_to_pa(mid); 5092296Sae112802 } else { 5102296Sae112802 lmsp = (struct memseg *) 5112296Sae112802 ((uint64_t)mspp - offsetof(struct memseg, next)); 5122296Sae112802 lmsp->nextpa = (lo) ? va_to_pa(lo) : va_to_pa(mid); 5132296Sae112802 } 5142296Sae112802 } 5152296Sae112802 5162296Sae112802 /* 5172296Sae112802 * Walk the memsegs chain, applying func to each memseg span and vcolor. 5182296Sae112802 */ 5192296Sae112802 void 5202296Sae112802 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg) 5212296Sae112802 { 5222296Sae112802 pfn_t pbase, pend; 5232296Sae112802 int vcolor; 5242296Sae112802 void *base; 5252296Sae112802 size_t size; 5262296Sae112802 struct memseg *msp; 5272296Sae112802 extern uint_t vac_colors; 5282296Sae112802 5292296Sae112802 for (msp = memsegs; msp; msp = msp->next) { 5302296Sae112802 pbase = msp->pages_base; 5312296Sae112802 pend = msp->pages_end; 5322296Sae112802 for (vcolor = 0; vcolor < vac_colors; vcolor++) { 5332296Sae112802 base = ptob(pbase) + kpm_vbase + kpm_size * vcolor; 5342296Sae112802 size = ptob(pend - pbase); 5352296Sae112802 func(arg, base, size); 5362296Sae112802 } 5372296Sae112802 } 5382296Sae112802 } 5392296Sae112802 5402296Sae112802 5412296Sae112802 /* -- sfmmu_kpm internal section -- */ 5422296Sae112802 5432296Sae112802 /* 5442296Sae112802 * Return the page frame number if a valid segkpm mapping exists 5452296Sae112802 * for vaddr, otherwise return PFN_INVALID. No locks are grabbed. 5462296Sae112802 * Should only be used by other sfmmu routines. 5472296Sae112802 */ 5482296Sae112802 pfn_t 5492296Sae112802 sfmmu_kpm_vatopfn(caddr_t vaddr) 5502296Sae112802 { 5512296Sae112802 uintptr_t paddr; 5522296Sae112802 pfn_t pfn; 5532296Sae112802 page_t *pp; 5542296Sae112802 5552296Sae112802 ASSERT(kpm_enable && IS_KPM_ADDR(vaddr)); 5562296Sae112802 5572296Sae112802 SFMMU_KPM_VTOP(vaddr, paddr); 5582296Sae112802 pfn = (pfn_t)btop(paddr); 5592296Sae112802 pp = page_numtopp_nolock(pfn); 5602296Sae112802 if (pp && pp->p_kpmref) 5612296Sae112802 return (pfn); 5622296Sae112802 else 5632296Sae112802 return ((pfn_t)PFN_INVALID); 5642296Sae112802 } 5652296Sae112802 5662296Sae112802 /* 5672296Sae112802 * Lookup a kpme in the p_kpmelist. 5682296Sae112802 */ 5692296Sae112802 static int 5702296Sae112802 sfmmu_kpme_lookup(struct kpme *kpme, page_t *pp) 5712296Sae112802 { 5722296Sae112802 struct kpme *p; 5732296Sae112802 5742296Sae112802 for (p = pp->p_kpmelist; p; p = p->kpe_next) { 5752296Sae112802 if (p == kpme) 5762296Sae112802 return (1); 5772296Sae112802 } 5782296Sae112802 return (0); 5792296Sae112802 } 5802296Sae112802 5812296Sae112802 /* 5822296Sae112802 * Insert a kpme into the p_kpmelist and increment 5832296Sae112802 * the per page kpm reference count. 5842296Sae112802 */ 5852296Sae112802 static void 5862296Sae112802 sfmmu_kpme_add(struct kpme *kpme, page_t *pp) 5872296Sae112802 { 5882296Sae112802 ASSERT(pp->p_kpmref >= 0); 5892296Sae112802 5902296Sae112802 /* head insert */ 5912296Sae112802 kpme->kpe_prev = NULL; 5922296Sae112802 kpme->kpe_next = pp->p_kpmelist; 5932296Sae112802 5942296Sae112802 if (pp->p_kpmelist) 5952296Sae112802 pp->p_kpmelist->kpe_prev = kpme; 5962296Sae112802 5972296Sae112802 pp->p_kpmelist = kpme; 5982296Sae112802 kpme->kpe_page = pp; 5992296Sae112802 pp->p_kpmref++; 6002296Sae112802 } 6012296Sae112802 6022296Sae112802 /* 6032296Sae112802 * Remove a kpme from the p_kpmelist and decrement 6042296Sae112802 * the per page kpm reference count. 6052296Sae112802 */ 6062296Sae112802 static void 6072296Sae112802 sfmmu_kpme_sub(struct kpme *kpme, page_t *pp) 6082296Sae112802 { 6092296Sae112802 ASSERT(pp->p_kpmref > 0); 6102296Sae112802 6112296Sae112802 if (kpme->kpe_prev) { 6122296Sae112802 ASSERT(pp->p_kpmelist != kpme); 6132296Sae112802 ASSERT(kpme->kpe_prev->kpe_page == pp); 6142296Sae112802 kpme->kpe_prev->kpe_next = kpme->kpe_next; 6152296Sae112802 } else { 6162296Sae112802 ASSERT(pp->p_kpmelist == kpme); 6172296Sae112802 pp->p_kpmelist = kpme->kpe_next; 6182296Sae112802 } 6192296Sae112802 6202296Sae112802 if (kpme->kpe_next) { 6212296Sae112802 ASSERT(kpme->kpe_next->kpe_page == pp); 6222296Sae112802 kpme->kpe_next->kpe_prev = kpme->kpe_prev; 6232296Sae112802 } 6242296Sae112802 6252296Sae112802 kpme->kpe_next = kpme->kpe_prev = NULL; 6262296Sae112802 kpme->kpe_page = NULL; 6272296Sae112802 pp->p_kpmref--; 6282296Sae112802 } 6292296Sae112802 6302296Sae112802 /* 6312296Sae112802 * Mapin a single page, it is called every time a page changes it's state 6322296Sae112802 * from kpm-unmapped to kpm-mapped. It may not be called, when only a new 6332296Sae112802 * kpm instance does a mapin and wants to share the mapping. 6342296Sae112802 * Assumes that the mlist mutex is already grabbed. 6352296Sae112802 */ 6362296Sae112802 static caddr_t 6372296Sae112802 sfmmu_kpm_mapin(page_t *pp) 6382296Sae112802 { 6392296Sae112802 kpm_page_t *kp; 6402296Sae112802 kpm_hlk_t *kpmp; 6412296Sae112802 caddr_t vaddr; 6422296Sae112802 int kpm_vac_range; 6432296Sae112802 pfn_t pfn; 6442296Sae112802 tte_t tte; 6452296Sae112802 kmutex_t *pmtx; 6462296Sae112802 int uncached; 6472296Sae112802 kpm_spage_t *ksp; 6482296Sae112802 kpm_shlk_t *kpmsp; 6492296Sae112802 int oldval; 6502296Sae112802 6512296Sae112802 ASSERT(sfmmu_mlist_held(pp)); 6522296Sae112802 ASSERT(pp->p_kpmref == 0); 6532296Sae112802 6542296Sae112802 vaddr = sfmmu_kpm_getvaddr(pp, &kpm_vac_range); 6552296Sae112802 6562296Sae112802 ASSERT(IS_KPM_ADDR(vaddr)); 6572296Sae112802 uncached = PP_ISNC(pp); 6582296Sae112802 pfn = pp->p_pagenum; 6592296Sae112802 6602296Sae112802 if (kpm_smallpages) 6612296Sae112802 goto smallpages_mapin; 6622296Sae112802 6632296Sae112802 PP2KPMPG(pp, kp); 6642296Sae112802 6652296Sae112802 kpmp = KPMP_HASH(kp); 6662296Sae112802 mutex_enter(&kpmp->khl_mutex); 6672296Sae112802 6682296Sae112802 ASSERT(PP_ISKPMC(pp) == 0); 6692296Sae112802 ASSERT(PP_ISKPMS(pp) == 0); 6702296Sae112802 6712296Sae112802 if (uncached) { 6722296Sae112802 /* ASSERT(pp->p_share); XXX use hat_page_getshare */ 6732296Sae112802 if (kpm_vac_range == 0) { 6742296Sae112802 if (kp->kp_refcnts == 0) { 6752296Sae112802 /* 6762296Sae112802 * Must remove large page mapping if it exists. 6772296Sae112802 * Pages in uncached state can only be mapped 6782296Sae112802 * small (PAGESIZE) within the regular kpm 6792296Sae112802 * range. 6802296Sae112802 */ 6812296Sae112802 if (kp->kp_refcntc == -1) { 6822296Sae112802 /* remove go indication */ 6832296Sae112802 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, 6842296Sae112802 &kpmp->khl_lock, KPMTSBM_STOP); 6852296Sae112802 } 6862296Sae112802 if (kp->kp_refcnt > 0 && kp->kp_refcntc == 0) 6872296Sae112802 sfmmu_kpm_demap_large(vaddr); 6882296Sae112802 } 6892296Sae112802 ASSERT(kp->kp_refcntc >= 0); 6902296Sae112802 kp->kp_refcntc++; 6912296Sae112802 } 6922296Sae112802 pmtx = sfmmu_page_enter(pp); 6932296Sae112802 PP_SETKPMC(pp); 6942296Sae112802 sfmmu_page_exit(pmtx); 6952296Sae112802 } 6962296Sae112802 6972296Sae112802 if ((kp->kp_refcntc > 0 || kp->kp_refcnts > 0) && kpm_vac_range == 0) { 6982296Sae112802 /* 6992296Sae112802 * Have to do a small (PAGESIZE) mapin within this kpm_page 7002296Sae112802 * range since it is marked to be in VAC conflict mode or 7012296Sae112802 * when there are still other small mappings around. 7022296Sae112802 */ 7032296Sae112802 7042296Sae112802 /* tte assembly */ 7052296Sae112802 if (uncached == 0) 7062296Sae112802 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K); 7072296Sae112802 else 7082296Sae112802 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K); 7092296Sae112802 7102296Sae112802 /* tsb dropin */ 7112296Sae112802 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 7122296Sae112802 7132296Sae112802 pmtx = sfmmu_page_enter(pp); 7142296Sae112802 PP_SETKPMS(pp); 7152296Sae112802 sfmmu_page_exit(pmtx); 7162296Sae112802 7172296Sae112802 kp->kp_refcnts++; 7182296Sae112802 ASSERT(kp->kp_refcnts > 0); 7192296Sae112802 goto exit; 7202296Sae112802 } 7212296Sae112802 7222296Sae112802 if (kpm_vac_range == 0) { 7232296Sae112802 /* 7242296Sae112802 * Fast path / regular case, no VAC conflict handling 7252296Sae112802 * in progress within this kpm_page range. 7262296Sae112802 */ 7272296Sae112802 if (kp->kp_refcnt == 0) { 7282296Sae112802 7292296Sae112802 /* tte assembly */ 7302296Sae112802 KPM_TTE_VCACHED(tte.ll, pfn, TTE4M); 7312296Sae112802 7322296Sae112802 /* tsb dropin */ 7332296Sae112802 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT4M); 7342296Sae112802 7352296Sae112802 /* Set go flag for TL tsbmiss handler */ 7362296Sae112802 if (kp->kp_refcntc == 0) 7372296Sae112802 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, 7382296Sae112802 &kpmp->khl_lock, KPMTSBM_START); 7392296Sae112802 7402296Sae112802 ASSERT(kp->kp_refcntc == -1); 7412296Sae112802 } 7422296Sae112802 kp->kp_refcnt++; 7432296Sae112802 ASSERT(kp->kp_refcnt); 7442296Sae112802 7452296Sae112802 } else { 7462296Sae112802 /* 7472296Sae112802 * The page is not setup according to the common VAC 7482296Sae112802 * prevention rules for the regular and kpm mapping layer 7492296Sae112802 * E.g. the page layer was not able to deliver a right 7502296Sae112802 * vcolor'ed page for a given vaddr corresponding to 7512296Sae112802 * the wanted p_offset. It has to be mapped in small in 7522296Sae112802 * within the corresponding kpm vac range in order to 7532296Sae112802 * prevent VAC alias conflicts. 7542296Sae112802 */ 7552296Sae112802 7562296Sae112802 /* tte assembly */ 7572296Sae112802 if (uncached == 0) { 7582296Sae112802 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K); 7592296Sae112802 } else { 7602296Sae112802 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K); 7612296Sae112802 } 7622296Sae112802 7632296Sae112802 /* tsb dropin */ 7642296Sae112802 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 7652296Sae112802 7662296Sae112802 kp->kp_refcnta++; 7672296Sae112802 if (kp->kp_refcntc == -1) { 7682296Sae112802 ASSERT(kp->kp_refcnt > 0); 7692296Sae112802 7702296Sae112802 /* remove go indication */ 7712296Sae112802 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, &kpmp->khl_lock, 7722296Sae112802 KPMTSBM_STOP); 7732296Sae112802 } 7742296Sae112802 ASSERT(kp->kp_refcntc >= 0); 7752296Sae112802 } 7762296Sae112802 exit: 7772296Sae112802 mutex_exit(&kpmp->khl_mutex); 7782296Sae112802 return (vaddr); 7792296Sae112802 7802296Sae112802 smallpages_mapin: 7812296Sae112802 if (uncached == 0) { 7822296Sae112802 /* tte assembly */ 7832296Sae112802 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K); 7842296Sae112802 } else { 7852296Sae112802 /* ASSERT(pp->p_share); XXX use hat_page_getshare */ 7862296Sae112802 pmtx = sfmmu_page_enter(pp); 7872296Sae112802 PP_SETKPMC(pp); 7882296Sae112802 sfmmu_page_exit(pmtx); 7892296Sae112802 /* tte assembly */ 7902296Sae112802 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K); 7912296Sae112802 } 7922296Sae112802 7932296Sae112802 /* tsb dropin */ 7942296Sae112802 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 7952296Sae112802 7962296Sae112802 PP2KPMSPG(pp, ksp); 7972296Sae112802 kpmsp = KPMP_SHASH(ksp); 7982296Sae112802 7992296Sae112802 oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, &kpmsp->kshl_lock, 8002296Sae112802 (uncached) ? KPM_MAPPEDSC : KPM_MAPPEDS); 8012296Sae112802 8022296Sae112802 if (oldval != 0) 8032296Sae112802 panic("sfmmu_kpm_mapin: stale smallpages mapping"); 8042296Sae112802 8052296Sae112802 return (vaddr); 8062296Sae112802 } 8072296Sae112802 8082296Sae112802 /* 8092296Sae112802 * Mapout a single page, it is called every time a page changes it's state 8102296Sae112802 * from kpm-mapped to kpm-unmapped. It may not be called, when only a kpm 8112296Sae112802 * instance calls mapout and there are still other instances mapping the 8122296Sae112802 * page. Assumes that the mlist mutex is already grabbed. 8132296Sae112802 * 8142296Sae112802 * Note: In normal mode (no VAC conflict prevention pending) TLB's are 8152296Sae112802 * not flushed. This is the core segkpm behavior to avoid xcalls. It is 8162296Sae112802 * no problem because a translation from a segkpm virtual address to a 8172296Sae112802 * physical address is always the same. The only downside is a slighty 8182296Sae112802 * increased window of vulnerability for misbehaving _kernel_ modules. 8192296Sae112802 */ 8202296Sae112802 static void 8212296Sae112802 sfmmu_kpm_mapout(page_t *pp, caddr_t vaddr) 8222296Sae112802 { 8232296Sae112802 kpm_page_t *kp; 8242296Sae112802 kpm_hlk_t *kpmp; 8252296Sae112802 int alias_range; 8262296Sae112802 kmutex_t *pmtx; 8272296Sae112802 kpm_spage_t *ksp; 8282296Sae112802 kpm_shlk_t *kpmsp; 8292296Sae112802 int oldval; 8302296Sae112802 8312296Sae112802 ASSERT(sfmmu_mlist_held(pp)); 8322296Sae112802 ASSERT(pp->p_kpmref == 0); 8332296Sae112802 8342296Sae112802 alias_range = IS_KPM_ALIAS_RANGE(vaddr); 8352296Sae112802 8362296Sae112802 if (kpm_smallpages) 8372296Sae112802 goto smallpages_mapout; 8382296Sae112802 8392296Sae112802 PP2KPMPG(pp, kp); 8402296Sae112802 kpmp = KPMP_HASH(kp); 8412296Sae112802 mutex_enter(&kpmp->khl_mutex); 8422296Sae112802 8432296Sae112802 if (alias_range) { 8442296Sae112802 ASSERT(PP_ISKPMS(pp) == 0); 8452296Sae112802 if (kp->kp_refcnta <= 0) { 8462296Sae112802 panic("sfmmu_kpm_mapout: bad refcnta kp=%p", 8472296Sae112802 (void *)kp); 8482296Sae112802 } 8492296Sae112802 8502296Sae112802 if (PP_ISTNC(pp)) { 8512296Sae112802 if (PP_ISKPMC(pp) == 0) { 8522296Sae112802 /* 8532296Sae112802 * Uncached kpm mappings must always have 8542296Sae112802 * forced "small page" mode. 8552296Sae112802 */ 8562296Sae112802 panic("sfmmu_kpm_mapout: uncached page not " 8572296Sae112802 "kpm marked"); 8582296Sae112802 } 8592296Sae112802 sfmmu_kpm_demap_small(vaddr); 8602296Sae112802 8612296Sae112802 pmtx = sfmmu_page_enter(pp); 8622296Sae112802 PP_CLRKPMC(pp); 8632296Sae112802 sfmmu_page_exit(pmtx); 8642296Sae112802 8652296Sae112802 /* 8662296Sae112802 * Check if we can resume cached mode. This might 8672296Sae112802 * be the case if the kpm mapping was the only 8682296Sae112802 * mapping in conflict with other non rule 8692296Sae112802 * compliant mappings. The page is no more marked 8702296Sae112802 * as kpm mapped, so the conv_tnc path will not 8712296Sae112802 * change kpm state. 8722296Sae112802 */ 8732296Sae112802 conv_tnc(pp, TTE8K); 8742296Sae112802 8752296Sae112802 } else if (PP_ISKPMC(pp) == 0) { 8762296Sae112802 /* remove TSB entry only */ 8772296Sae112802 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT); 8782296Sae112802 8792296Sae112802 } else { 8802296Sae112802 /* already demapped */ 8812296Sae112802 pmtx = sfmmu_page_enter(pp); 8822296Sae112802 PP_CLRKPMC(pp); 8832296Sae112802 sfmmu_page_exit(pmtx); 8842296Sae112802 } 8852296Sae112802 kp->kp_refcnta--; 8862296Sae112802 goto exit; 8872296Sae112802 } 8882296Sae112802 8892296Sae112802 if (kp->kp_refcntc <= 0 && kp->kp_refcnts == 0) { 8902296Sae112802 /* 8912296Sae112802 * Fast path / regular case. 8922296Sae112802 */ 8932296Sae112802 ASSERT(kp->kp_refcntc >= -1); 8942296Sae112802 ASSERT(!(pp->p_nrm & (P_KPMC | P_KPMS | P_TNC | P_PNC))); 8952296Sae112802 8962296Sae112802 if (kp->kp_refcnt <= 0) 8972296Sae112802 panic("sfmmu_kpm_mapout: bad refcnt kp=%p", (void *)kp); 8982296Sae112802 8992296Sae112802 if (--kp->kp_refcnt == 0) { 9002296Sae112802 /* remove go indication */ 9012296Sae112802 if (kp->kp_refcntc == -1) { 9022296Sae112802 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, 9032296Sae112802 &kpmp->khl_lock, KPMTSBM_STOP); 9042296Sae112802 } 9052296Sae112802 ASSERT(kp->kp_refcntc == 0); 9062296Sae112802 9072296Sae112802 /* remove TSB entry */ 9082296Sae112802 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT4M); 9092296Sae112802 #ifdef DEBUG 9102296Sae112802 if (kpm_tlb_flush) 9112296Sae112802 sfmmu_kpm_demap_tlbs(vaddr); 9122296Sae112802 #endif 9132296Sae112802 } 9142296Sae112802 9152296Sae112802 } else { 9162296Sae112802 /* 9172296Sae112802 * The VAC alias path. 9182296Sae112802 * We come here if the kpm vaddr is not in any alias_range 9192296Sae112802 * and we are unmapping a page within the regular kpm_page 9202296Sae112802 * range. The kpm_page either holds conflict pages and/or 9212296Sae112802 * is in "small page" mode. If the page is not marked 9222296Sae112802 * P_KPMS it couldn't have a valid PAGESIZE sized TSB 9232296Sae112802 * entry. Dcache flushing is done lazy and follows the 9242296Sae112802 * rules of the regular virtual page coloring scheme. 9252296Sae112802 * 9262296Sae112802 * Per page states and required actions: 9272296Sae112802 * P_KPMC: remove a kpm mapping that is conflicting. 9282296Sae112802 * P_KPMS: remove a small kpm mapping within a kpm_page. 9292296Sae112802 * P_TNC: check if we can re-cache the page. 9302296Sae112802 * P_PNC: we cannot re-cache, sorry. 9312296Sae112802 * Per kpm_page: 9322296Sae112802 * kp_refcntc > 0: page is part of a kpm_page with conflicts. 9332296Sae112802 * kp_refcnts > 0: rm a small mapped page within a kpm_page. 9342296Sae112802 */ 9352296Sae112802 9362296Sae112802 if (PP_ISKPMS(pp)) { 9372296Sae112802 if (kp->kp_refcnts < 1) { 9382296Sae112802 panic("sfmmu_kpm_mapout: bad refcnts kp=%p", 9392296Sae112802 (void *)kp); 9402296Sae112802 } 9412296Sae112802 sfmmu_kpm_demap_small(vaddr); 9422296Sae112802 9432296Sae112802 /* 9442296Sae112802 * Check if we can resume cached mode. This might 9452296Sae112802 * be the case if the kpm mapping was the only 9462296Sae112802 * mapping in conflict with other non rule 9472296Sae112802 * compliant mappings. The page is no more marked 9482296Sae112802 * as kpm mapped, so the conv_tnc path will not 9492296Sae112802 * change kpm state. 9502296Sae112802 */ 9512296Sae112802 if (PP_ISTNC(pp)) { 9522296Sae112802 if (!PP_ISKPMC(pp)) { 9532296Sae112802 /* 9542296Sae112802 * Uncached kpm mappings must always 9552296Sae112802 * have forced "small page" mode. 9562296Sae112802 */ 9572296Sae112802 panic("sfmmu_kpm_mapout: uncached " 9582296Sae112802 "page not kpm marked"); 9592296Sae112802 } 9602296Sae112802 conv_tnc(pp, TTE8K); 9612296Sae112802 } 9622296Sae112802 kp->kp_refcnts--; 9632296Sae112802 kp->kp_refcnt++; 9642296Sae112802 pmtx = sfmmu_page_enter(pp); 9652296Sae112802 PP_CLRKPMS(pp); 9662296Sae112802 sfmmu_page_exit(pmtx); 9672296Sae112802 } 9682296Sae112802 9692296Sae112802 if (PP_ISKPMC(pp)) { 9702296Sae112802 if (kp->kp_refcntc < 1) { 9712296Sae112802 panic("sfmmu_kpm_mapout: bad refcntc kp=%p", 9722296Sae112802 (void *)kp); 9732296Sae112802 } 9742296Sae112802 pmtx = sfmmu_page_enter(pp); 9752296Sae112802 PP_CLRKPMC(pp); 9762296Sae112802 sfmmu_page_exit(pmtx); 9772296Sae112802 kp->kp_refcntc--; 9782296Sae112802 } 9792296Sae112802 9802296Sae112802 if (kp->kp_refcnt-- < 1) 9812296Sae112802 panic("sfmmu_kpm_mapout: bad refcnt kp=%p", (void *)kp); 9822296Sae112802 } 9832296Sae112802 exit: 9842296Sae112802 mutex_exit(&kpmp->khl_mutex); 9852296Sae112802 return; 9862296Sae112802 9872296Sae112802 smallpages_mapout: 9882296Sae112802 PP2KPMSPG(pp, ksp); 9892296Sae112802 kpmsp = KPMP_SHASH(ksp); 9902296Sae112802 9912296Sae112802 if (PP_ISKPMC(pp) == 0) { 9922296Sae112802 oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, 9932296Sae112802 &kpmsp->kshl_lock, 0); 9942296Sae112802 9952296Sae112802 if (oldval != KPM_MAPPEDS) { 9962296Sae112802 /* 9972296Sae112802 * When we're called after sfmmu_kpm_hme_unload, 9982296Sae112802 * KPM_MAPPEDSC is valid too. 9992296Sae112802 */ 10002296Sae112802 if (oldval != KPM_MAPPEDSC) 10012296Sae112802 panic("sfmmu_kpm_mapout: incorrect mapping"); 10022296Sae112802 } 10032296Sae112802 10042296Sae112802 /* remove TSB entry */ 10052296Sae112802 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT); 10062296Sae112802 #ifdef DEBUG 10072296Sae112802 if (kpm_tlb_flush) 10082296Sae112802 sfmmu_kpm_demap_tlbs(vaddr); 10092296Sae112802 #endif 10102296Sae112802 10112296Sae112802 } else if (PP_ISTNC(pp)) { 10122296Sae112802 oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, 10132296Sae112802 &kpmsp->kshl_lock, 0); 10142296Sae112802 10152296Sae112802 if (oldval != KPM_MAPPEDSC || PP_ISKPMC(pp) == 0) 10162296Sae112802 panic("sfmmu_kpm_mapout: inconsistent TNC mapping"); 10172296Sae112802 10182296Sae112802 sfmmu_kpm_demap_small(vaddr); 10192296Sae112802 10202296Sae112802 pmtx = sfmmu_page_enter(pp); 10212296Sae112802 PP_CLRKPMC(pp); 10222296Sae112802 sfmmu_page_exit(pmtx); 10232296Sae112802 10242296Sae112802 /* 10252296Sae112802 * Check if we can resume cached mode. This might be 10262296Sae112802 * the case if the kpm mapping was the only mapping 10272296Sae112802 * in conflict with other non rule compliant mappings. 10282296Sae112802 * The page is no more marked as kpm mapped, so the 10292296Sae112802 * conv_tnc path will not change the kpm state. 10302296Sae112802 */ 10312296Sae112802 conv_tnc(pp, TTE8K); 10322296Sae112802 10332296Sae112802 } else { 10342296Sae112802 oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, 10352296Sae112802 &kpmsp->kshl_lock, 0); 10362296Sae112802 10372296Sae112802 if (oldval != KPM_MAPPEDSC) 10382296Sae112802 panic("sfmmu_kpm_mapout: inconsistent mapping"); 10392296Sae112802 10402296Sae112802 pmtx = sfmmu_page_enter(pp); 10412296Sae112802 PP_CLRKPMC(pp); 10422296Sae112802 sfmmu_page_exit(pmtx); 10432296Sae112802 } 10442296Sae112802 } 10452296Sae112802 10462296Sae112802 #define abs(x) ((x) < 0 ? -(x) : (x)) 10472296Sae112802 10482296Sae112802 /* 10492296Sae112802 * Determine appropriate kpm mapping address and handle any kpm/hme 10502296Sae112802 * conflicts. Page mapping list and its vcolor parts must be protected. 10512296Sae112802 */ 10522296Sae112802 static caddr_t 10532296Sae112802 sfmmu_kpm_getvaddr(page_t *pp, int *kpm_vac_rangep) 10542296Sae112802 { 10552296Sae112802 int vcolor, vcolor_pa; 10562296Sae112802 caddr_t vaddr; 10572296Sae112802 uintptr_t paddr; 10582296Sae112802 10592296Sae112802 10602296Sae112802 ASSERT(sfmmu_mlist_held(pp)); 10612296Sae112802 10622296Sae112802 paddr = ptob(pp->p_pagenum); 10632296Sae112802 vcolor_pa = addr_to_vcolor(paddr); 10642296Sae112802 10652296Sae112802 if (pp->p_vnode && IS_SWAPFSVP(pp->p_vnode)) { 10662296Sae112802 vcolor = (PP_NEWPAGE(pp) || PP_ISNC(pp)) ? 10672296Sae112802 vcolor_pa : PP_GET_VCOLOR(pp); 10682296Sae112802 } else { 10692296Sae112802 vcolor = addr_to_vcolor(pp->p_offset); 10702296Sae112802 } 10712296Sae112802 10722296Sae112802 vaddr = kpm_vbase + paddr; 10732296Sae112802 *kpm_vac_rangep = 0; 10742296Sae112802 10752296Sae112802 if (vcolor_pa != vcolor) { 10762296Sae112802 *kpm_vac_rangep = abs(vcolor - vcolor_pa); 10772296Sae112802 vaddr += ((uintptr_t)(vcolor - vcolor_pa) << MMU_PAGESHIFT); 10782296Sae112802 vaddr += (vcolor_pa > vcolor) ? 10792296Sae112802 ((uintptr_t)vcolor_pa << kpm_size_shift) : 10802296Sae112802 ((uintptr_t)(vcolor - vcolor_pa) << kpm_size_shift); 10812296Sae112802 10822296Sae112802 ASSERT(!PP_ISMAPPED_LARGE(pp)); 10832296Sae112802 } 10842296Sae112802 10852296Sae112802 if (PP_ISNC(pp)) 10862296Sae112802 return (vaddr); 10872296Sae112802 10882296Sae112802 if (PP_NEWPAGE(pp)) { 10892296Sae112802 PP_SET_VCOLOR(pp, vcolor); 10902296Sae112802 return (vaddr); 10912296Sae112802 } 10922296Sae112802 10932296Sae112802 if (PP_GET_VCOLOR(pp) == vcolor) 10942296Sae112802 return (vaddr); 10952296Sae112802 10962296Sae112802 ASSERT(!PP_ISMAPPED_KPM(pp)); 10972296Sae112802 sfmmu_kpm_vac_conflict(pp, vaddr); 10982296Sae112802 10992296Sae112802 return (vaddr); 11002296Sae112802 } 11012296Sae112802 11022296Sae112802 /* 11032296Sae112802 * VAC conflict state bit values. 11042296Sae112802 * The following defines are used to make the handling of the 11052296Sae112802 * various input states more concise. For that the kpm states 11062296Sae112802 * per kpm_page and per page are combined in a summary state. 11072296Sae112802 * Each single state has a corresponding bit value in the 11082296Sae112802 * summary state. These defines only apply for kpm large page 11092296Sae112802 * mappings. Within comments the abbreviations "kc, c, ks, s" 11102296Sae112802 * are used as short form of the actual state, e.g. "kc" for 11112296Sae112802 * "kp_refcntc > 0", etc. 11122296Sae112802 */ 11132296Sae112802 #define KPM_KC 0x00000008 /* kpm_page: kp_refcntc > 0 */ 11142296Sae112802 #define KPM_C 0x00000004 /* page: P_KPMC set */ 11152296Sae112802 #define KPM_KS 0x00000002 /* kpm_page: kp_refcnts > 0 */ 11162296Sae112802 #define KPM_S 0x00000001 /* page: P_KPMS set */ 11172296Sae112802 11182296Sae112802 /* 11192296Sae112802 * Summary states used in sfmmu_kpm_fault (KPM_TSBM_*). 11202296Sae112802 * See also more detailed comments within in the sfmmu_kpm_fault switch. 11212296Sae112802 * Abbreviations used: 11222296Sae112802 * CONFL: VAC conflict(s) within a kpm_page. 11232296Sae112802 * MAPS: Mapped small: Page mapped in using a regular page size kpm mapping. 11242296Sae112802 * RASM: Re-assembling of a large page mapping possible. 11252296Sae112802 * RPLS: Replace: TSB miss due to TSB replacement only. 11262296Sae112802 * BRKO: Breakup Other: A large kpm mapping has to be broken because another 11272296Sae112802 * page within the kpm_page is already involved in a VAC conflict. 11282296Sae112802 * BRKT: Breakup This: A large kpm mapping has to be broken, this page is 11292296Sae112802 * is involved in a VAC conflict. 11302296Sae112802 */ 11312296Sae112802 #define KPM_TSBM_CONFL_GONE (0) 11322296Sae112802 #define KPM_TSBM_MAPS_RASM (KPM_KS) 11332296Sae112802 #define KPM_TSBM_RPLS_RASM (KPM_KS | KPM_S) 11342296Sae112802 #define KPM_TSBM_MAPS_BRKO (KPM_KC) 11352296Sae112802 #define KPM_TSBM_MAPS (KPM_KC | KPM_KS) 11362296Sae112802 #define KPM_TSBM_RPLS (KPM_KC | KPM_KS | KPM_S) 11372296Sae112802 #define KPM_TSBM_MAPS_BRKT (KPM_KC | KPM_C) 11382296Sae112802 #define KPM_TSBM_MAPS_CONFL (KPM_KC | KPM_C | KPM_KS) 11392296Sae112802 #define KPM_TSBM_RPLS_CONFL (KPM_KC | KPM_C | KPM_KS | KPM_S) 11402296Sae112802 11412296Sae112802 /* 11422296Sae112802 * kpm fault handler for mappings with large page size. 11432296Sae112802 */ 11442296Sae112802 int 11452296Sae112802 sfmmu_kpm_fault(caddr_t vaddr, struct memseg *mseg, page_t *pp) 11462296Sae112802 { 11472296Sae112802 int error; 11482296Sae112802 pgcnt_t inx; 11492296Sae112802 kpm_page_t *kp; 11502296Sae112802 tte_t tte; 11512296Sae112802 pfn_t pfn = pp->p_pagenum; 11522296Sae112802 kpm_hlk_t *kpmp; 11532296Sae112802 kmutex_t *pml; 11542296Sae112802 int alias_range; 11552296Sae112802 int uncached = 0; 11562296Sae112802 kmutex_t *pmtx; 11572296Sae112802 int badstate; 11582296Sae112802 uint_t tsbmcase; 11592296Sae112802 11602296Sae112802 alias_range = IS_KPM_ALIAS_RANGE(vaddr); 11612296Sae112802 11622296Sae112802 inx = ptokpmp(kpmptop(ptokpmp(pfn)) - mseg->kpm_pbase); 11632296Sae112802 if (inx >= mseg->kpm_nkpmpgs) { 11642296Sae112802 cmn_err(CE_PANIC, "sfmmu_kpm_fault: kpm overflow in memseg " 11652296Sae112802 "0x%p pp 0x%p", (void *)mseg, (void *)pp); 11662296Sae112802 } 11672296Sae112802 11682296Sae112802 kp = &mseg->kpm_pages[inx]; 11692296Sae112802 kpmp = KPMP_HASH(kp); 11702296Sae112802 11712296Sae112802 pml = sfmmu_mlist_enter(pp); 11722296Sae112802 11732296Sae112802 if (!PP_ISMAPPED_KPM(pp)) { 11742296Sae112802 sfmmu_mlist_exit(pml); 11752296Sae112802 return (EFAULT); 11762296Sae112802 } 11772296Sae112802 11782296Sae112802 mutex_enter(&kpmp->khl_mutex); 11792296Sae112802 11802296Sae112802 if (alias_range) { 11812296Sae112802 ASSERT(!PP_ISMAPPED_LARGE(pp)); 11822296Sae112802 if (kp->kp_refcnta > 0) { 11832296Sae112802 if (PP_ISKPMC(pp)) { 11842296Sae112802 pmtx = sfmmu_page_enter(pp); 11852296Sae112802 PP_CLRKPMC(pp); 11862296Sae112802 sfmmu_page_exit(pmtx); 11872296Sae112802 } 11882296Sae112802 /* 11892296Sae112802 * Check for vcolor conflicts. Return here 11902296Sae112802 * w/ either no conflict (fast path), removed hme 11912296Sae112802 * mapping chains (unload conflict) or uncached 11922296Sae112802 * (uncache conflict). VACaches are cleaned and 11932296Sae112802 * p_vcolor and PP_TNC are set accordingly for the 11942296Sae112802 * conflict cases. Drop kpmp for uncache conflict 11952296Sae112802 * cases since it will be grabbed within 11962296Sae112802 * sfmmu_kpm_page_cache in case of an uncache 11972296Sae112802 * conflict. 11982296Sae112802 */ 11992296Sae112802 mutex_exit(&kpmp->khl_mutex); 12002296Sae112802 sfmmu_kpm_vac_conflict(pp, vaddr); 12012296Sae112802 mutex_enter(&kpmp->khl_mutex); 12022296Sae112802 12032296Sae112802 if (PP_ISNC(pp)) { 12042296Sae112802 uncached = 1; 12052296Sae112802 pmtx = sfmmu_page_enter(pp); 12062296Sae112802 PP_SETKPMC(pp); 12072296Sae112802 sfmmu_page_exit(pmtx); 12082296Sae112802 } 12092296Sae112802 goto smallexit; 12102296Sae112802 12112296Sae112802 } else { 12122296Sae112802 /* 12132296Sae112802 * We got a tsbmiss on a not active kpm_page range. 12142296Sae112802 * Let segkpm_fault decide how to panic. 12152296Sae112802 */ 12162296Sae112802 error = EFAULT; 12172296Sae112802 } 12182296Sae112802 goto exit; 12192296Sae112802 } 12202296Sae112802 12212296Sae112802 badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0); 12222296Sae112802 if (kp->kp_refcntc == -1) { 12232296Sae112802 /* 12242296Sae112802 * We should come here only if trap level tsb miss 12252296Sae112802 * handler is disabled. 12262296Sae112802 */ 12272296Sae112802 badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 || 12282296Sae112802 PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp)); 12292296Sae112802 12302296Sae112802 if (badstate == 0) 12312296Sae112802 goto largeexit; 12322296Sae112802 } 12332296Sae112802 12342296Sae112802 if (badstate || kp->kp_refcntc < 0) 12352296Sae112802 goto badstate_exit; 12362296Sae112802 12372296Sae112802 /* 12382296Sae112802 * Combine the per kpm_page and per page kpm VAC states to 12392296Sae112802 * a summary state in order to make the kpm fault handling 12402296Sae112802 * more concise. 12412296Sae112802 */ 12422296Sae112802 tsbmcase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) | 12432296Sae112802 ((kp->kp_refcnts > 0) ? KPM_KS : 0) | 12442296Sae112802 (PP_ISKPMC(pp) ? KPM_C : 0) | 12452296Sae112802 (PP_ISKPMS(pp) ? KPM_S : 0)); 12462296Sae112802 12472296Sae112802 switch (tsbmcase) { 12482296Sae112802 case KPM_TSBM_CONFL_GONE: /* - - - - */ 12492296Sae112802 /* 12502296Sae112802 * That's fine, we either have no more vac conflict in 12512296Sae112802 * this kpm page or someone raced in and has solved the 12522296Sae112802 * vac conflict for us -- call sfmmu_kpm_vac_conflict 12532296Sae112802 * to take care for correcting the vcolor and flushing 12542296Sae112802 * the dcache if required. 12552296Sae112802 */ 12562296Sae112802 mutex_exit(&kpmp->khl_mutex); 12572296Sae112802 sfmmu_kpm_vac_conflict(pp, vaddr); 12582296Sae112802 mutex_enter(&kpmp->khl_mutex); 12592296Sae112802 12602296Sae112802 if (PP_ISNC(pp) || kp->kp_refcnt <= 0 || 12612296Sae112802 addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) { 12622296Sae112802 panic("sfmmu_kpm_fault: inconsistent CONFL_GONE " 12632296Sae112802 "state, pp=%p", (void *)pp); 12642296Sae112802 } 12652296Sae112802 goto largeexit; 12662296Sae112802 12672296Sae112802 case KPM_TSBM_MAPS_RASM: /* - - ks - */ 12682296Sae112802 /* 12692296Sae112802 * All conflicts in this kpm page are gone but there are 12702296Sae112802 * already small mappings around, so we also map this 12712296Sae112802 * page small. This could be the trigger case for a 12722296Sae112802 * small mapping reaper, if this is really needed. 12732296Sae112802 * For now fall thru to the KPM_TSBM_MAPS handling. 12742296Sae112802 */ 12752296Sae112802 12762296Sae112802 case KPM_TSBM_MAPS: /* kc - ks - */ 12772296Sae112802 /* 12782296Sae112802 * Large page mapping is already broken, this page is not 12792296Sae112802 * conflicting, so map it small. Call sfmmu_kpm_vac_conflict 12802296Sae112802 * to take care for correcting the vcolor and flushing 12812296Sae112802 * the dcache if required. 12822296Sae112802 */ 12832296Sae112802 mutex_exit(&kpmp->khl_mutex); 12842296Sae112802 sfmmu_kpm_vac_conflict(pp, vaddr); 12852296Sae112802 mutex_enter(&kpmp->khl_mutex); 12862296Sae112802 12872296Sae112802 if (PP_ISNC(pp) || kp->kp_refcnt <= 0 || 12882296Sae112802 addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) { 12892296Sae112802 panic("sfmmu_kpm_fault: inconsistent MAPS state, " 12902296Sae112802 "pp=%p", (void *)pp); 12912296Sae112802 } 12922296Sae112802 kp->kp_refcnt--; 12932296Sae112802 kp->kp_refcnts++; 12942296Sae112802 pmtx = sfmmu_page_enter(pp); 12952296Sae112802 PP_SETKPMS(pp); 12962296Sae112802 sfmmu_page_exit(pmtx); 12972296Sae112802 goto smallexit; 12982296Sae112802 12992296Sae112802 case KPM_TSBM_RPLS_RASM: /* - - ks s */ 13002296Sae112802 /* 13012296Sae112802 * All conflicts in this kpm page are gone but this page 13022296Sae112802 * is mapped small. This could be the trigger case for a 13032296Sae112802 * small mapping reaper, if this is really needed. 13042296Sae112802 * For now we drop it in small again. Fall thru to the 13052296Sae112802 * KPM_TSBM_RPLS handling. 13062296Sae112802 */ 13072296Sae112802 13082296Sae112802 case KPM_TSBM_RPLS: /* kc - ks s */ 13092296Sae112802 /* 13102296Sae112802 * Large page mapping is already broken, this page is not 13112296Sae112802 * conflicting but already mapped small, so drop it in 13122296Sae112802 * small again. 13132296Sae112802 */ 13142296Sae112802 if (PP_ISNC(pp) || 13152296Sae112802 addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) { 13162296Sae112802 panic("sfmmu_kpm_fault: inconsistent RPLS state, " 13172296Sae112802 "pp=%p", (void *)pp); 13182296Sae112802 } 13192296Sae112802 goto smallexit; 13202296Sae112802 13212296Sae112802 case KPM_TSBM_MAPS_BRKO: /* kc - - - */ 13222296Sae112802 /* 13232296Sae112802 * The kpm page where we live in is marked conflicting 13242296Sae112802 * but this page is not conflicting. So we have to map it 13252296Sae112802 * in small. Call sfmmu_kpm_vac_conflict to take care for 13262296Sae112802 * correcting the vcolor and flushing the dcache if required. 13272296Sae112802 */ 13282296Sae112802 mutex_exit(&kpmp->khl_mutex); 13292296Sae112802 sfmmu_kpm_vac_conflict(pp, vaddr); 13302296Sae112802 mutex_enter(&kpmp->khl_mutex); 13312296Sae112802 13322296Sae112802 if (PP_ISNC(pp) || kp->kp_refcnt <= 0 || 13332296Sae112802 addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) { 13342296Sae112802 panic("sfmmu_kpm_fault: inconsistent MAPS_BRKO state, " 13352296Sae112802 "pp=%p", (void *)pp); 13362296Sae112802 } 13372296Sae112802 kp->kp_refcnt--; 13382296Sae112802 kp->kp_refcnts++; 13392296Sae112802 pmtx = sfmmu_page_enter(pp); 13402296Sae112802 PP_SETKPMS(pp); 13412296Sae112802 sfmmu_page_exit(pmtx); 13422296Sae112802 goto smallexit; 13432296Sae112802 13442296Sae112802 case KPM_TSBM_MAPS_BRKT: /* kc c - - */ 13452296Sae112802 case KPM_TSBM_MAPS_CONFL: /* kc c ks - */ 13462296Sae112802 if (!PP_ISMAPPED(pp)) { 13472296Sae112802 /* 13482296Sae112802 * We got a tsbmiss on kpm large page range that is 13492296Sae112802 * marked to contain vac conflicting pages introduced 13502296Sae112802 * by hme mappings. The hme mappings are all gone and 13512296Sae112802 * must have bypassed the kpm alias prevention logic. 13522296Sae112802 */ 13532296Sae112802 panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p", 13542296Sae112802 (void *)pp); 13552296Sae112802 } 13562296Sae112802 13572296Sae112802 /* 13582296Sae112802 * Check for vcolor conflicts. Return here w/ either no 13592296Sae112802 * conflict (fast path), removed hme mapping chains 13602296Sae112802 * (unload conflict) or uncached (uncache conflict). 13612296Sae112802 * Dcache is cleaned and p_vcolor and P_TNC are set 13622296Sae112802 * accordingly. Drop kpmp for uncache conflict cases 13632296Sae112802 * since it will be grabbed within sfmmu_kpm_page_cache 13642296Sae112802 * in case of an uncache conflict. 13652296Sae112802 */ 13662296Sae112802 mutex_exit(&kpmp->khl_mutex); 13672296Sae112802 sfmmu_kpm_vac_conflict(pp, vaddr); 13682296Sae112802 mutex_enter(&kpmp->khl_mutex); 13692296Sae112802 13702296Sae112802 if (kp->kp_refcnt <= 0) 13712296Sae112802 panic("sfmmu_kpm_fault: bad refcnt kp=%p", (void *)kp); 13722296Sae112802 13732296Sae112802 if (PP_ISNC(pp)) { 13742296Sae112802 uncached = 1; 13752296Sae112802 } else { 13762296Sae112802 /* 13772296Sae112802 * When an unload conflict is solved and there are 13782296Sae112802 * no other small mappings around, we can resume 13792296Sae112802 * largepage mode. Otherwise we have to map or drop 13802296Sae112802 * in small. This could be a trigger for a small 13812296Sae112802 * mapping reaper when this was the last conflict 13822296Sae112802 * within the kpm page and when there are only 13832296Sae112802 * other small mappings around. 13842296Sae112802 */ 13852296Sae112802 ASSERT(addr_to_vcolor(vaddr) == PP_GET_VCOLOR(pp)); 13862296Sae112802 ASSERT(kp->kp_refcntc > 0); 13872296Sae112802 kp->kp_refcntc--; 13882296Sae112802 pmtx = sfmmu_page_enter(pp); 13892296Sae112802 PP_CLRKPMC(pp); 13902296Sae112802 sfmmu_page_exit(pmtx); 13912296Sae112802 ASSERT(PP_ISKPMS(pp) == 0); 13922296Sae112802 if (kp->kp_refcntc == 0 && kp->kp_refcnts == 0) 13932296Sae112802 goto largeexit; 13942296Sae112802 } 13952296Sae112802 13962296Sae112802 kp->kp_refcnt--; 13972296Sae112802 kp->kp_refcnts++; 13982296Sae112802 pmtx = sfmmu_page_enter(pp); 13992296Sae112802 PP_SETKPMS(pp); 14002296Sae112802 sfmmu_page_exit(pmtx); 14012296Sae112802 goto smallexit; 14022296Sae112802 14032296Sae112802 case KPM_TSBM_RPLS_CONFL: /* kc c ks s */ 14042296Sae112802 if (!PP_ISMAPPED(pp)) { 14052296Sae112802 /* 14062296Sae112802 * We got a tsbmiss on kpm large page range that is 14072296Sae112802 * marked to contain vac conflicting pages introduced 14082296Sae112802 * by hme mappings. They are all gone and must have 14092296Sae112802 * somehow bypassed the kpm alias prevention logic. 14102296Sae112802 */ 14112296Sae112802 panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p", 14122296Sae112802 (void *)pp); 14132296Sae112802 } 14142296Sae112802 14152296Sae112802 /* 14162296Sae112802 * This state is only possible for an uncached mapping. 14172296Sae112802 */ 14182296Sae112802 if (!PP_ISNC(pp)) { 14192296Sae112802 panic("sfmmu_kpm_fault: page not uncached, pp=%p", 14202296Sae112802 (void *)pp); 14212296Sae112802 } 14222296Sae112802 uncached = 1; 14232296Sae112802 goto smallexit; 14242296Sae112802 14252296Sae112802 default: 14262296Sae112802 badstate_exit: 14272296Sae112802 panic("sfmmu_kpm_fault: inconsistent VAC state, vaddr=%p kp=%p " 14282296Sae112802 "pp=%p", (void *)vaddr, (void *)kp, (void *)pp); 14292296Sae112802 } 14302296Sae112802 14312296Sae112802 smallexit: 14322296Sae112802 /* tte assembly */ 14332296Sae112802 if (uncached == 0) 14342296Sae112802 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K); 14352296Sae112802 else 14362296Sae112802 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K); 14372296Sae112802 14382296Sae112802 /* tsb dropin */ 14392296Sae112802 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 14402296Sae112802 14412296Sae112802 error = 0; 14422296Sae112802 goto exit; 14432296Sae112802 14442296Sae112802 largeexit: 14452296Sae112802 if (kp->kp_refcnt > 0) { 14462296Sae112802 14472296Sae112802 /* tte assembly */ 14482296Sae112802 KPM_TTE_VCACHED(tte.ll, pfn, TTE4M); 14492296Sae112802 14502296Sae112802 /* tsb dropin */ 14512296Sae112802 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT4M); 14522296Sae112802 14532296Sae112802 if (kp->kp_refcntc == 0) { 14542296Sae112802 /* Set "go" flag for TL tsbmiss handler */ 14552296Sae112802 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, &kpmp->khl_lock, 14562296Sae112802 KPMTSBM_START); 14572296Sae112802 } 14582296Sae112802 ASSERT(kp->kp_refcntc == -1); 14592296Sae112802 error = 0; 14602296Sae112802 14612296Sae112802 } else 14622296Sae112802 error = EFAULT; 14632296Sae112802 exit: 14642296Sae112802 mutex_exit(&kpmp->khl_mutex); 14652296Sae112802 sfmmu_mlist_exit(pml); 14662296Sae112802 return (error); 14672296Sae112802 } 14682296Sae112802 14692296Sae112802 /* 14702296Sae112802 * kpm fault handler for mappings with small page size. 14712296Sae112802 */ 14722296Sae112802 int 14732296Sae112802 sfmmu_kpm_fault_small(caddr_t vaddr, struct memseg *mseg, page_t *pp) 14742296Sae112802 { 14752296Sae112802 int error = 0; 14762296Sae112802 pgcnt_t inx; 14772296Sae112802 kpm_spage_t *ksp; 14782296Sae112802 kpm_shlk_t *kpmsp; 14792296Sae112802 kmutex_t *pml; 14802296Sae112802 pfn_t pfn = pp->p_pagenum; 14812296Sae112802 tte_t tte; 14822296Sae112802 kmutex_t *pmtx; 14832296Sae112802 int oldval; 14842296Sae112802 14852296Sae112802 inx = pfn - mseg->kpm_pbase; 14862296Sae112802 ksp = &mseg->kpm_spages[inx]; 14872296Sae112802 kpmsp = KPMP_SHASH(ksp); 14882296Sae112802 14892296Sae112802 pml = sfmmu_mlist_enter(pp); 14902296Sae112802 14912296Sae112802 if (!PP_ISMAPPED_KPM(pp)) { 14922296Sae112802 sfmmu_mlist_exit(pml); 14932296Sae112802 return (EFAULT); 14942296Sae112802 } 14952296Sae112802 14962296Sae112802 /* 14972296Sae112802 * kp_mapped lookup protected by mlist mutex 14982296Sae112802 */ 14992296Sae112802 if (ksp->kp_mapped == KPM_MAPPEDS) { 15002296Sae112802 /* 15012296Sae112802 * Fast path tsbmiss 15022296Sae112802 */ 15032296Sae112802 ASSERT(!PP_ISKPMC(pp)); 15042296Sae112802 ASSERT(!PP_ISNC(pp)); 15052296Sae112802 15062296Sae112802 /* tte assembly */ 15072296Sae112802 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K); 15082296Sae112802 15092296Sae112802 /* tsb dropin */ 15102296Sae112802 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 15112296Sae112802 15122296Sae112802 } else if (ksp->kp_mapped == KPM_MAPPEDSC) { 15132296Sae112802 /* 15142296Sae112802 * Got here due to existing or gone kpm/hme VAC conflict. 15152296Sae112802 * Recheck for vcolor conflicts. Return here w/ either 15162296Sae112802 * no conflict, removed hme mapping chain (unload 15172296Sae112802 * conflict) or uncached (uncache conflict). VACaches 15182296Sae112802 * are cleaned and p_vcolor and PP_TNC are set accordingly 15192296Sae112802 * for the conflict cases. 15202296Sae112802 */ 15212296Sae112802 sfmmu_kpm_vac_conflict(pp, vaddr); 15222296Sae112802 15232296Sae112802 if (PP_ISNC(pp)) { 15242296Sae112802 /* ASSERT(pp->p_share); XXX use hat_page_getshare */ 15252296Sae112802 15262296Sae112802 /* tte assembly */ 15272296Sae112802 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K); 15282296Sae112802 15292296Sae112802 /* tsb dropin */ 15302296Sae112802 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 15312296Sae112802 15322296Sae112802 } else { 15332296Sae112802 if (PP_ISKPMC(pp)) { 15342296Sae112802 pmtx = sfmmu_page_enter(pp); 15352296Sae112802 PP_CLRKPMC(pp); 15362296Sae112802 sfmmu_page_exit(pmtx); 15372296Sae112802 } 15382296Sae112802 15392296Sae112802 /* tte assembly */ 15402296Sae112802 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K); 15412296Sae112802 15422296Sae112802 /* tsb dropin */ 15432296Sae112802 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 15442296Sae112802 15452296Sae112802 oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, 15462296Sae112802 &kpmsp->kshl_lock, KPM_MAPPEDS); 15472296Sae112802 15482296Sae112802 if (oldval != KPM_MAPPEDSC) 15492296Sae112802 panic("sfmmu_kpm_fault_small: " 15502296Sae112802 "stale smallpages mapping"); 15512296Sae112802 } 15522296Sae112802 15532296Sae112802 } else { 15542296Sae112802 /* 15552296Sae112802 * We got a tsbmiss on a not active kpm_page range. 15562296Sae112802 * Let decide segkpm_fault how to panic. 15572296Sae112802 */ 15582296Sae112802 error = EFAULT; 15592296Sae112802 } 15602296Sae112802 15612296Sae112802 sfmmu_mlist_exit(pml); 15622296Sae112802 return (error); 15632296Sae112802 } 15642296Sae112802 15652296Sae112802 /* 15662296Sae112802 * Check/handle potential hme/kpm mapping conflicts 15672296Sae112802 */ 15682296Sae112802 static void 15692296Sae112802 sfmmu_kpm_vac_conflict(page_t *pp, caddr_t vaddr) 15702296Sae112802 { 15712296Sae112802 int vcolor; 15722296Sae112802 struct sf_hment *sfhmep; 15732296Sae112802 struct hat *tmphat; 15742296Sae112802 struct sf_hment *tmphme = NULL; 15752296Sae112802 struct hme_blk *hmeblkp; 15762296Sae112802 tte_t tte; 15772296Sae112802 15782296Sae112802 ASSERT(sfmmu_mlist_held(pp)); 15792296Sae112802 15802296Sae112802 if (PP_ISNC(pp)) 15812296Sae112802 return; 15822296Sae112802 15832296Sae112802 vcolor = addr_to_vcolor(vaddr); 15842296Sae112802 if (PP_GET_VCOLOR(pp) == vcolor) 15852296Sae112802 return; 15862296Sae112802 15872296Sae112802 /* 15882296Sae112802 * There could be no vcolor conflict between a large cached 15892296Sae112802 * hme page and a non alias range kpm page (neither large nor 15902296Sae112802 * small mapped). So if a hme conflict already exists between 15912296Sae112802 * a constituent page of a large hme mapping and a shared small 15922296Sae112802 * conflicting hme mapping, both mappings must be already 15932296Sae112802 * uncached at this point. 15942296Sae112802 */ 15952296Sae112802 ASSERT(!PP_ISMAPPED_LARGE(pp)); 15962296Sae112802 15972296Sae112802 if (!PP_ISMAPPED(pp)) { 15982296Sae112802 /* 15992296Sae112802 * Previous hme user of page had a different color 16002296Sae112802 * but since there are no current users 16012296Sae112802 * we just flush the cache and change the color. 16022296Sae112802 */ 16032296Sae112802 SFMMU_STAT(sf_pgcolor_conflict); 16042296Sae112802 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 16052296Sae112802 PP_SET_VCOLOR(pp, vcolor); 16062296Sae112802 return; 16072296Sae112802 } 16082296Sae112802 16092296Sae112802 /* 16102296Sae112802 * If we get here we have a vac conflict with a current hme 16112296Sae112802 * mapping. This must have been established by forcing a wrong 16122296Sae112802 * colored mapping, e.g. by using mmap(2) with MAP_FIXED. 16132296Sae112802 */ 16142296Sae112802 16152296Sae112802 /* 16162296Sae112802 * Check if any mapping is in same as or if it is locked 16172296Sae112802 * since in that case we need to uncache. 16182296Sae112802 */ 16192296Sae112802 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 16202296Sae112802 tmphme = sfhmep->hme_next; 1621*5075Spaulsan if (IS_PAHME(sfhmep)) 1622*5075Spaulsan continue; 16232296Sae112802 hmeblkp = sfmmu_hmetohblk(sfhmep); 16242296Sae112802 if (hmeblkp->hblk_xhat_bit) 16252296Sae112802 continue; 16262296Sae112802 tmphat = hblktosfmmu(hmeblkp); 16272296Sae112802 sfmmu_copytte(&sfhmep->hme_tte, &tte); 16282296Sae112802 ASSERT(TTE_IS_VALID(&tte)); 16292296Sae112802 if ((tmphat == ksfmmup) || hmeblkp->hblk_lckcnt) { 16302296Sae112802 /* 16312296Sae112802 * We have an uncache conflict 16322296Sae112802 */ 16332296Sae112802 SFMMU_STAT(sf_uncache_conflict); 16342296Sae112802 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1); 16352296Sae112802 return; 16362296Sae112802 } 16372296Sae112802 } 16382296Sae112802 16392296Sae112802 /* 16402296Sae112802 * We have an unload conflict 16412296Sae112802 */ 16422296Sae112802 SFMMU_STAT(sf_unload_conflict); 16432296Sae112802 16442296Sae112802 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 16452296Sae112802 tmphme = sfhmep->hme_next; 1646*5075Spaulsan if (IS_PAHME(sfhmep)) 1647*5075Spaulsan continue; 16482296Sae112802 hmeblkp = sfmmu_hmetohblk(sfhmep); 16492296Sae112802 if (hmeblkp->hblk_xhat_bit) 16502296Sae112802 continue; 16512296Sae112802 (void) sfmmu_pageunload(pp, sfhmep, TTE8K); 16522296Sae112802 } 16532296Sae112802 16542296Sae112802 /* 16552296Sae112802 * Unloads only does tlb flushes so we need to flush the 16562296Sae112802 * dcache vcolor here. 16572296Sae112802 */ 16582296Sae112802 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 16592296Sae112802 PP_SET_VCOLOR(pp, vcolor); 16602296Sae112802 } 16612296Sae112802 16622296Sae112802 /* 16632296Sae112802 * Remove all kpm mappings using kpme's for pp and check that 16642296Sae112802 * all kpm mappings (w/ and w/o kpme's) are gone. 16652296Sae112802 */ 16662296Sae112802 void 16672296Sae112802 sfmmu_kpm_pageunload(page_t *pp) 16682296Sae112802 { 16692296Sae112802 caddr_t vaddr; 16702296Sae112802 struct kpme *kpme, *nkpme; 16712296Sae112802 16722296Sae112802 ASSERT(pp != NULL); 16732296Sae112802 ASSERT(pp->p_kpmref); 16742296Sae112802 ASSERT(sfmmu_mlist_held(pp)); 16752296Sae112802 16762296Sae112802 vaddr = hat_kpm_page2va(pp, 1); 16772296Sae112802 16782296Sae112802 for (kpme = pp->p_kpmelist; kpme; kpme = nkpme) { 16792296Sae112802 ASSERT(kpme->kpe_page == pp); 16802296Sae112802 16812296Sae112802 if (pp->p_kpmref == 0) 16822296Sae112802 panic("sfmmu_kpm_pageunload: stale p_kpmref pp=%p " 16832296Sae112802 "kpme=%p", (void *)pp, (void *)kpme); 16842296Sae112802 16852296Sae112802 nkpme = kpme->kpe_next; 16862296Sae112802 16872296Sae112802 /* Add instance callback here here if needed later */ 16882296Sae112802 sfmmu_kpme_sub(kpme, pp); 16892296Sae112802 } 16902296Sae112802 16912296Sae112802 /* 16922296Sae112802 * Also correct after mixed kpme/nonkpme mappings. If nonkpme 16932296Sae112802 * segkpm clients have unlocked the page and forgot to mapout 16942296Sae112802 * we panic here. 16952296Sae112802 */ 16962296Sae112802 if (pp->p_kpmref != 0) 16972296Sae112802 panic("sfmmu_kpm_pageunload: bad refcnt pp=%p", (void *)pp); 16982296Sae112802 16992296Sae112802 sfmmu_kpm_mapout(pp, vaddr); 17002296Sae112802 } 17012296Sae112802 17022296Sae112802 /* 17032296Sae112802 * Remove a large kpm mapping from kernel TSB and all TLB's. 17042296Sae112802 */ 17052296Sae112802 static void 17062296Sae112802 sfmmu_kpm_demap_large(caddr_t vaddr) 17072296Sae112802 { 17082296Sae112802 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT4M); 17092296Sae112802 sfmmu_kpm_demap_tlbs(vaddr); 17102296Sae112802 } 17112296Sae112802 17122296Sae112802 /* 17132296Sae112802 * Remove a small kpm mapping from kernel TSB and all TLB's. 17142296Sae112802 */ 17152296Sae112802 static void 17162296Sae112802 sfmmu_kpm_demap_small(caddr_t vaddr) 17172296Sae112802 { 17182296Sae112802 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT); 17192296Sae112802 sfmmu_kpm_demap_tlbs(vaddr); 17202296Sae112802 } 17212296Sae112802 17222296Sae112802 /* 17232296Sae112802 * Demap a kpm mapping in all TLB's. 17242296Sae112802 */ 17252296Sae112802 static void 17262296Sae112802 sfmmu_kpm_demap_tlbs(caddr_t vaddr) 17272296Sae112802 { 17282296Sae112802 cpuset_t cpuset; 17292296Sae112802 17302296Sae112802 kpreempt_disable(); 17312296Sae112802 cpuset = ksfmmup->sfmmu_cpusran; 17322296Sae112802 CPUSET_AND(cpuset, cpu_ready_set); 17332296Sae112802 CPUSET_DEL(cpuset, CPU->cpu_id); 17342296Sae112802 SFMMU_XCALL_STATS(ksfmmup); 17352296Sae112802 17362296Sae112802 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)vaddr, 17372296Sae112802 (uint64_t)ksfmmup); 17382296Sae112802 vtag_flushpage(vaddr, (uint64_t)ksfmmup); 17392296Sae112802 17402296Sae112802 kpreempt_enable(); 17412296Sae112802 } 17422296Sae112802 17432296Sae112802 /* 17442296Sae112802 * Summary states used in sfmmu_kpm_vac_unload (KPM_VUL__*). 17452296Sae112802 * See also more detailed comments within in the sfmmu_kpm_vac_unload switch. 17462296Sae112802 * Abbreviations used: 17472296Sae112802 * BIG: Large page kpm mapping in use. 17482296Sae112802 * CONFL: VAC conflict(s) within a kpm_page. 17492296Sae112802 * INCR: Count of conflicts within a kpm_page is going to be incremented. 17502296Sae112802 * DECR: Count of conflicts within a kpm_page is going to be decremented. 17512296Sae112802 * UNMAP_SMALL: A small (regular page size) mapping is going to be unmapped. 17522296Sae112802 * TNC: Temporary non cached: a kpm mapped page is mapped in TNC state. 17532296Sae112802 */ 17542296Sae112802 #define KPM_VUL_BIG (0) 17552296Sae112802 #define KPM_VUL_CONFL_INCR1 (KPM_KS) 17562296Sae112802 #define KPM_VUL_UNMAP_SMALL1 (KPM_KS | KPM_S) 17572296Sae112802 #define KPM_VUL_CONFL_INCR2 (KPM_KC) 17582296Sae112802 #define KPM_VUL_CONFL_INCR3 (KPM_KC | KPM_KS) 17592296Sae112802 #define KPM_VUL_UNMAP_SMALL2 (KPM_KC | KPM_KS | KPM_S) 17602296Sae112802 #define KPM_VUL_CONFL_DECR1 (KPM_KC | KPM_C) 17612296Sae112802 #define KPM_VUL_CONFL_DECR2 (KPM_KC | KPM_C | KPM_KS) 17622296Sae112802 #define KPM_VUL_TNC (KPM_KC | KPM_C | KPM_KS | KPM_S) 17632296Sae112802 17642296Sae112802 /* 17652296Sae112802 * Handle VAC unload conflicts introduced by hme mappings or vice 17662296Sae112802 * versa when a hme conflict mapping is replaced by a non conflict 17672296Sae112802 * one. Perform actions and state transitions according to the 17682296Sae112802 * various page and kpm_page entry states. VACache flushes are in 17692296Sae112802 * the responsibiliy of the caller. We still hold the mlist lock. 17702296Sae112802 */ 17712296Sae112802 void 17722296Sae112802 sfmmu_kpm_vac_unload(page_t *pp, caddr_t vaddr) 17732296Sae112802 { 17742296Sae112802 kpm_page_t *kp; 17752296Sae112802 kpm_hlk_t *kpmp; 17762296Sae112802 caddr_t kpmvaddr = hat_kpm_page2va(pp, 1); 17772296Sae112802 int newcolor; 17782296Sae112802 kmutex_t *pmtx; 17792296Sae112802 uint_t vacunlcase; 17802296Sae112802 int badstate = 0; 17812296Sae112802 kpm_spage_t *ksp; 17822296Sae112802 kpm_shlk_t *kpmsp; 17832296Sae112802 17842296Sae112802 ASSERT(PAGE_LOCKED(pp)); 17852296Sae112802 ASSERT(sfmmu_mlist_held(pp)); 17862296Sae112802 ASSERT(!PP_ISNC(pp)); 17872296Sae112802 17882296Sae112802 newcolor = addr_to_vcolor(kpmvaddr) != addr_to_vcolor(vaddr); 17892296Sae112802 if (kpm_smallpages) 17902296Sae112802 goto smallpages_vac_unload; 17912296Sae112802 17922296Sae112802 PP2KPMPG(pp, kp); 17932296Sae112802 kpmp = KPMP_HASH(kp); 17942296Sae112802 mutex_enter(&kpmp->khl_mutex); 17952296Sae112802 17962296Sae112802 if (IS_KPM_ALIAS_RANGE(kpmvaddr)) { 17972296Sae112802 if (kp->kp_refcnta < 1) { 17982296Sae112802 panic("sfmmu_kpm_vac_unload: bad refcnta kpm_page=%p\n", 17992296Sae112802 (void *)kp); 18002296Sae112802 } 18012296Sae112802 18022296Sae112802 if (PP_ISKPMC(pp) == 0) { 18032296Sae112802 if (newcolor == 0) 18042296Sae112802 goto exit; 18052296Sae112802 sfmmu_kpm_demap_small(kpmvaddr); 18062296Sae112802 pmtx = sfmmu_page_enter(pp); 18072296Sae112802 PP_SETKPMC(pp); 18082296Sae112802 sfmmu_page_exit(pmtx); 18092296Sae112802 18102296Sae112802 } else if (newcolor == 0) { 18112296Sae112802 pmtx = sfmmu_page_enter(pp); 18122296Sae112802 PP_CLRKPMC(pp); 18132296Sae112802 sfmmu_page_exit(pmtx); 18142296Sae112802 18152296Sae112802 } else { 18162296Sae112802 badstate++; 18172296Sae112802 } 18182296Sae112802 18192296Sae112802 goto exit; 18202296Sae112802 } 18212296Sae112802 18222296Sae112802 badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0); 18232296Sae112802 if (kp->kp_refcntc == -1) { 18242296Sae112802 /* 18252296Sae112802 * We should come here only if trap level tsb miss 18262296Sae112802 * handler is disabled. 18272296Sae112802 */ 18282296Sae112802 badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 || 18292296Sae112802 PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp)); 18302296Sae112802 } else { 18312296Sae112802 badstate |= (kp->kp_refcntc < 0); 18322296Sae112802 } 18332296Sae112802 18342296Sae112802 if (badstate) 18352296Sae112802 goto exit; 18362296Sae112802 18372296Sae112802 if (PP_ISKPMC(pp) == 0 && newcolor == 0) { 18382296Sae112802 ASSERT(PP_ISKPMS(pp) == 0); 18392296Sae112802 goto exit; 18402296Sae112802 } 18412296Sae112802 18422296Sae112802 /* 18432296Sae112802 * Combine the per kpm_page and per page kpm VAC states 18442296Sae112802 * to a summary state in order to make the vac unload 18452296Sae112802 * handling more concise. 18462296Sae112802 */ 18472296Sae112802 vacunlcase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) | 18482296Sae112802 ((kp->kp_refcnts > 0) ? KPM_KS : 0) | 18492296Sae112802 (PP_ISKPMC(pp) ? KPM_C : 0) | 18502296Sae112802 (PP_ISKPMS(pp) ? KPM_S : 0)); 18512296Sae112802 18522296Sae112802 switch (vacunlcase) { 18532296Sae112802 case KPM_VUL_BIG: /* - - - - */ 18542296Sae112802 /* 18552296Sae112802 * Have to breakup the large page mapping to be 18562296Sae112802 * able to handle the conflicting hme vaddr. 18572296Sae112802 */ 18582296Sae112802 if (kp->kp_refcntc == -1) { 18592296Sae112802 /* remove go indication */ 18602296Sae112802 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, 18612296Sae112802 &kpmp->khl_lock, KPMTSBM_STOP); 18622296Sae112802 } 18632296Sae112802 sfmmu_kpm_demap_large(kpmvaddr); 18642296Sae112802 18652296Sae112802 ASSERT(kp->kp_refcntc == 0); 18662296Sae112802 kp->kp_refcntc++; 18672296Sae112802 pmtx = sfmmu_page_enter(pp); 18682296Sae112802 PP_SETKPMC(pp); 18692296Sae112802 sfmmu_page_exit(pmtx); 18702296Sae112802 break; 18712296Sae112802 18722296Sae112802 case KPM_VUL_UNMAP_SMALL1: /* - - ks s */ 18732296Sae112802 case KPM_VUL_UNMAP_SMALL2: /* kc - ks s */ 18742296Sae112802 /* 18752296Sae112802 * New conflict w/ an active kpm page, actually mapped 18762296Sae112802 * in by small TSB/TLB entries. Remove the mapping and 18772296Sae112802 * update states. 18782296Sae112802 */ 18792296Sae112802 ASSERT(newcolor); 18802296Sae112802 sfmmu_kpm_demap_small(kpmvaddr); 18812296Sae112802 kp->kp_refcnts--; 18822296Sae112802 kp->kp_refcnt++; 18832296Sae112802 kp->kp_refcntc++; 18842296Sae112802 pmtx = sfmmu_page_enter(pp); 18852296Sae112802 PP_CLRKPMS(pp); 18862296Sae112802 PP_SETKPMC(pp); 18872296Sae112802 sfmmu_page_exit(pmtx); 18882296Sae112802 break; 18892296Sae112802 18902296Sae112802 case KPM_VUL_CONFL_INCR1: /* - - ks - */ 18912296Sae112802 case KPM_VUL_CONFL_INCR2: /* kc - - - */ 18922296Sae112802 case KPM_VUL_CONFL_INCR3: /* kc - ks - */ 18932296Sae112802 /* 18942296Sae112802 * New conflict on a active kpm mapped page not yet in 18952296Sae112802 * TSB/TLB. Mark page and increment the kpm_page conflict 18962296Sae112802 * count. 18972296Sae112802 */ 18982296Sae112802 ASSERT(newcolor); 18992296Sae112802 kp->kp_refcntc++; 19002296Sae112802 pmtx = sfmmu_page_enter(pp); 19012296Sae112802 PP_SETKPMC(pp); 19022296Sae112802 sfmmu_page_exit(pmtx); 19032296Sae112802 break; 19042296Sae112802 19052296Sae112802 case KPM_VUL_CONFL_DECR1: /* kc c - - */ 19062296Sae112802 case KPM_VUL_CONFL_DECR2: /* kc c ks - */ 19072296Sae112802 /* 19082296Sae112802 * A conflicting hme mapping is removed for an active 19092296Sae112802 * kpm page not yet in TSB/TLB. Unmark page and decrement 19102296Sae112802 * the kpm_page conflict count. 19112296Sae112802 */ 19122296Sae112802 ASSERT(newcolor == 0); 19132296Sae112802 kp->kp_refcntc--; 19142296Sae112802 pmtx = sfmmu_page_enter(pp); 19152296Sae112802 PP_CLRKPMC(pp); 19162296Sae112802 sfmmu_page_exit(pmtx); 19172296Sae112802 break; 19182296Sae112802 19192296Sae112802 case KPM_VUL_TNC: /* kc c ks s */ 19202296Sae112802 cmn_err(CE_NOTE, "sfmmu_kpm_vac_unload: " 19212296Sae112802 "page not in NC state"); 19222296Sae112802 /* FALLTHRU */ 19232296Sae112802 19242296Sae112802 default: 19252296Sae112802 badstate++; 19262296Sae112802 } 19272296Sae112802 exit: 19282296Sae112802 if (badstate) { 19292296Sae112802 panic("sfmmu_kpm_vac_unload: inconsistent VAC state, " 19302296Sae112802 "kpmvaddr=%p kp=%p pp=%p", 19312296Sae112802 (void *)kpmvaddr, (void *)kp, (void *)pp); 19322296Sae112802 } 19332296Sae112802 mutex_exit(&kpmp->khl_mutex); 19342296Sae112802 19352296Sae112802 return; 19362296Sae112802 19372296Sae112802 smallpages_vac_unload: 19382296Sae112802 if (newcolor == 0) 19392296Sae112802 return; 19402296Sae112802 19412296Sae112802 PP2KPMSPG(pp, ksp); 19422296Sae112802 kpmsp = KPMP_SHASH(ksp); 19432296Sae112802 19442296Sae112802 if (PP_ISKPMC(pp) == 0) { 19452296Sae112802 if (ksp->kp_mapped == KPM_MAPPEDS) { 19462296Sae112802 /* 19472296Sae112802 * Stop TL tsbmiss handling 19482296Sae112802 */ 19492296Sae112802 (void) sfmmu_kpm_stsbmtl(&ksp->kp_mapped, 19502296Sae112802 &kpmsp->kshl_lock, KPM_MAPPEDSC); 19512296Sae112802 19522296Sae112802 sfmmu_kpm_demap_small(kpmvaddr); 19532296Sae112802 19542296Sae112802 } else if (ksp->kp_mapped != KPM_MAPPEDSC) { 19552296Sae112802 panic("sfmmu_kpm_vac_unload: inconsistent mapping"); 19562296Sae112802 } 19572296Sae112802 19582296Sae112802 pmtx = sfmmu_page_enter(pp); 19592296Sae112802 PP_SETKPMC(pp); 19602296Sae112802 sfmmu_page_exit(pmtx); 19612296Sae112802 19622296Sae112802 } else { 19632296Sae112802 if (ksp->kp_mapped != KPM_MAPPEDSC) 19642296Sae112802 panic("sfmmu_kpm_vac_unload: inconsistent mapping"); 19652296Sae112802 } 19662296Sae112802 } 19672296Sae112802 19682296Sae112802 /* 19692296Sae112802 * Page is marked to be in VAC conflict to an existing kpm mapping 19702296Sae112802 * or is kpm mapped using only the regular pagesize. Called from 19712296Sae112802 * sfmmu_hblk_unload when a mlist is completely removed. 19722296Sae112802 */ 19732296Sae112802 void 19742296Sae112802 sfmmu_kpm_hme_unload(page_t *pp) 19752296Sae112802 { 19762296Sae112802 /* tte assembly */ 19772296Sae112802 kpm_page_t *kp; 19782296Sae112802 kpm_hlk_t *kpmp; 19792296Sae112802 caddr_t vaddr; 19802296Sae112802 kmutex_t *pmtx; 19812296Sae112802 uint_t flags; 19822296Sae112802 kpm_spage_t *ksp; 19832296Sae112802 19842296Sae112802 ASSERT(sfmmu_mlist_held(pp)); 19852296Sae112802 ASSERT(PP_ISMAPPED_KPM(pp)); 19862296Sae112802 19872296Sae112802 flags = pp->p_nrm & (P_KPMC | P_KPMS); 19882296Sae112802 if (kpm_smallpages) 19892296Sae112802 goto smallpages_hme_unload; 19902296Sae112802 19912296Sae112802 if (flags == (P_KPMC | P_KPMS)) { 19922296Sae112802 panic("sfmmu_kpm_hme_unload: page should be uncached"); 19932296Sae112802 19942296Sae112802 } else if (flags == P_KPMS) { 19952296Sae112802 /* 19962296Sae112802 * Page mapped small but not involved in VAC conflict 19972296Sae112802 */ 19982296Sae112802 return; 19992296Sae112802 } 20002296Sae112802 20012296Sae112802 vaddr = hat_kpm_page2va(pp, 1); 20022296Sae112802 20032296Sae112802 PP2KPMPG(pp, kp); 20042296Sae112802 kpmp = KPMP_HASH(kp); 20052296Sae112802 mutex_enter(&kpmp->khl_mutex); 20062296Sae112802 20072296Sae112802 if (IS_KPM_ALIAS_RANGE(vaddr)) { 20082296Sae112802 if (kp->kp_refcnta < 1) { 20092296Sae112802 panic("sfmmu_kpm_hme_unload: bad refcnta kpm_page=%p\n", 20102296Sae112802 (void *)kp); 20112296Sae112802 } 20122296Sae112802 } else { 20132296Sae112802 if (kp->kp_refcntc < 1) { 20142296Sae112802 panic("sfmmu_kpm_hme_unload: bad refcntc kpm_page=%p\n", 20152296Sae112802 (void *)kp); 20162296Sae112802 } 20172296Sae112802 kp->kp_refcntc--; 20182296Sae112802 } 20192296Sae112802 20202296Sae112802 pmtx = sfmmu_page_enter(pp); 20212296Sae112802 PP_CLRKPMC(pp); 20222296Sae112802 sfmmu_page_exit(pmtx); 20232296Sae112802 20242296Sae112802 mutex_exit(&kpmp->khl_mutex); 20252296Sae112802 return; 20262296Sae112802 20272296Sae112802 smallpages_hme_unload: 20282296Sae112802 if (flags != P_KPMC) 20292296Sae112802 panic("sfmmu_kpm_hme_unload: page should be uncached"); 20302296Sae112802 20312296Sae112802 vaddr = hat_kpm_page2va(pp, 1); 20322296Sae112802 PP2KPMSPG(pp, ksp); 20332296Sae112802 20342296Sae112802 if (ksp->kp_mapped != KPM_MAPPEDSC) 20352296Sae112802 panic("sfmmu_kpm_hme_unload: inconsistent mapping"); 20362296Sae112802 20372296Sae112802 /* 20382296Sae112802 * Keep KPM_MAPPEDSC until the next kpm tsbmiss where it 20392296Sae112802 * prevents TL tsbmiss handling and force a hat_kpm_fault. 20402296Sae112802 * There we can start over again. 20412296Sae112802 */ 20422296Sae112802 20432296Sae112802 pmtx = sfmmu_page_enter(pp); 20442296Sae112802 PP_CLRKPMC(pp); 20452296Sae112802 sfmmu_page_exit(pmtx); 20462296Sae112802 } 20472296Sae112802 20482296Sae112802 /* 20492296Sae112802 * Special hooks for sfmmu_page_cache_array() when changing the 20502296Sae112802 * cacheability of a page. It is used to obey the hat_kpm lock 20512296Sae112802 * ordering (mlist -> kpmp -> spl, and back). 20522296Sae112802 */ 20532296Sae112802 kpm_hlk_t * 20542296Sae112802 sfmmu_kpm_kpmp_enter(page_t *pp, pgcnt_t npages) 20552296Sae112802 { 20562296Sae112802 kpm_page_t *kp; 20572296Sae112802 kpm_hlk_t *kpmp; 20582296Sae112802 20592296Sae112802 ASSERT(sfmmu_mlist_held(pp)); 20602296Sae112802 20612296Sae112802 if (kpm_smallpages || PP_ISMAPPED_KPM(pp) == 0) 20622296Sae112802 return (NULL); 20632296Sae112802 20642296Sae112802 ASSERT(npages <= kpmpnpgs); 20652296Sae112802 20662296Sae112802 PP2KPMPG(pp, kp); 20672296Sae112802 kpmp = KPMP_HASH(kp); 20682296Sae112802 mutex_enter(&kpmp->khl_mutex); 20692296Sae112802 20702296Sae112802 return (kpmp); 20712296Sae112802 } 20722296Sae112802 20732296Sae112802 void 20742296Sae112802 sfmmu_kpm_kpmp_exit(kpm_hlk_t *kpmp) 20752296Sae112802 { 20762296Sae112802 if (kpm_smallpages || kpmp == NULL) 20772296Sae112802 return; 20782296Sae112802 20792296Sae112802 mutex_exit(&kpmp->khl_mutex); 20802296Sae112802 } 20812296Sae112802 20822296Sae112802 /* 20832296Sae112802 * Summary states used in sfmmu_kpm_page_cache (KPM_*). 20842296Sae112802 * See also more detailed comments within in the sfmmu_kpm_page_cache switch. 20852296Sae112802 * Abbreviations used: 20862296Sae112802 * UNC: Input state for an uncache request. 20872296Sae112802 * BIG: Large page kpm mapping in use. 20882296Sae112802 * SMALL: Page has a small kpm mapping within a kpm_page range. 20892296Sae112802 * NODEMAP: No demap needed. 20902296Sae112802 * NOP: No operation needed on this input state. 20912296Sae112802 * CACHE: Input state for a re-cache request. 20922296Sae112802 * MAPS: Page is in TNC and kpm VAC conflict state and kpm mapped small. 20932296Sae112802 * NOMAP: Page is in TNC and kpm VAC conflict state, but not small kpm 20942296Sae112802 * mapped. 20952296Sae112802 * NOMAPO: Page is in TNC and kpm VAC conflict state, but not small kpm 20962296Sae112802 * mapped. There are also other small kpm mappings within this 20972296Sae112802 * kpm_page. 20982296Sae112802 */ 20992296Sae112802 #define KPM_UNC_BIG (0) 21002296Sae112802 #define KPM_UNC_NODEMAP1 (KPM_KS) 21012296Sae112802 #define KPM_UNC_SMALL1 (KPM_KS | KPM_S) 21022296Sae112802 #define KPM_UNC_NODEMAP2 (KPM_KC) 21032296Sae112802 #define KPM_UNC_NODEMAP3 (KPM_KC | KPM_KS) 21042296Sae112802 #define KPM_UNC_SMALL2 (KPM_KC | KPM_KS | KPM_S) 21052296Sae112802 #define KPM_UNC_NOP1 (KPM_KC | KPM_C) 21062296Sae112802 #define KPM_UNC_NOP2 (KPM_KC | KPM_C | KPM_KS) 21072296Sae112802 #define KPM_CACHE_NOMAP (KPM_KC | KPM_C) 21082296Sae112802 #define KPM_CACHE_NOMAPO (KPM_KC | KPM_C | KPM_KS) 21092296Sae112802 #define KPM_CACHE_MAPS (KPM_KC | KPM_C | KPM_KS | KPM_S) 21102296Sae112802 21112296Sae112802 /* 21122296Sae112802 * This function is called when the virtual cacheability of a page 21132296Sae112802 * is changed and the page has an actice kpm mapping. The mlist mutex, 21142296Sae112802 * the spl hash lock and the kpmp mutex (if needed) are already grabbed. 21152296Sae112802 */ 21162296Sae112802 /*ARGSUSED2*/ 21172296Sae112802 void 21182296Sae112802 sfmmu_kpm_page_cache(page_t *pp, int flags, int cache_flush_tag) 21192296Sae112802 { 21202296Sae112802 kpm_page_t *kp; 21212296Sae112802 kpm_hlk_t *kpmp; 21222296Sae112802 caddr_t kpmvaddr; 21232296Sae112802 int badstate = 0; 21242296Sae112802 uint_t pgcacase; 21252296Sae112802 kpm_spage_t *ksp; 21262296Sae112802 kpm_shlk_t *kpmsp; 21272296Sae112802 int oldval; 21282296Sae112802 21292296Sae112802 ASSERT(PP_ISMAPPED_KPM(pp)); 21302296Sae112802 ASSERT(sfmmu_mlist_held(pp)); 21312296Sae112802 ASSERT(sfmmu_page_spl_held(pp)); 21322296Sae112802 21332296Sae112802 if (flags != HAT_TMPNC && flags != HAT_CACHE) 21342296Sae112802 panic("sfmmu_kpm_page_cache: bad flags"); 21352296Sae112802 21362296Sae112802 kpmvaddr = hat_kpm_page2va(pp, 1); 21372296Sae112802 21382296Sae112802 if (flags == HAT_TMPNC && cache_flush_tag == CACHE_FLUSH) { 21392296Sae112802 pfn_t pfn = pp->p_pagenum; 21402296Sae112802 int vcolor = addr_to_vcolor(kpmvaddr); 21412296Sae112802 cpuset_t cpuset = cpu_ready_set; 21422296Sae112802 21432296Sae112802 /* Flush vcolor in DCache */ 21442296Sae112802 CPUSET_DEL(cpuset, CPU->cpu_id); 21452296Sae112802 SFMMU_XCALL_STATS(ksfmmup); 21462296Sae112802 xt_some(cpuset, vac_flushpage_tl1, pfn, vcolor); 21472296Sae112802 vac_flushpage(pfn, vcolor); 21482296Sae112802 } 21492296Sae112802 21502296Sae112802 if (kpm_smallpages) 21512296Sae112802 goto smallpages_page_cache; 21522296Sae112802 21532296Sae112802 PP2KPMPG(pp, kp); 21542296Sae112802 kpmp = KPMP_HASH(kp); 21552296Sae112802 ASSERT(MUTEX_HELD(&kpmp->khl_mutex)); 21562296Sae112802 21572296Sae112802 if (IS_KPM_ALIAS_RANGE(kpmvaddr)) { 21582296Sae112802 if (kp->kp_refcnta < 1) { 21592296Sae112802 panic("sfmmu_kpm_page_cache: bad refcnta " 21602296Sae112802 "kpm_page=%p\n", (void *)kp); 21612296Sae112802 } 21622296Sae112802 sfmmu_kpm_demap_small(kpmvaddr); 21632296Sae112802 if (flags == HAT_TMPNC) { 21642296Sae112802 PP_SETKPMC(pp); 21652296Sae112802 ASSERT(!PP_ISKPMS(pp)); 21662296Sae112802 } else { 21672296Sae112802 ASSERT(PP_ISKPMC(pp)); 21682296Sae112802 PP_CLRKPMC(pp); 21692296Sae112802 } 21702296Sae112802 goto exit; 21712296Sae112802 } 21722296Sae112802 21732296Sae112802 badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0); 21742296Sae112802 if (kp->kp_refcntc == -1) { 21752296Sae112802 /* 21762296Sae112802 * We should come here only if trap level tsb miss 21772296Sae112802 * handler is disabled. 21782296Sae112802 */ 21792296Sae112802 badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 || 21802296Sae112802 PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp)); 21812296Sae112802 } else { 21822296Sae112802 badstate |= (kp->kp_refcntc < 0); 21832296Sae112802 } 21842296Sae112802 21852296Sae112802 if (badstate) 21862296Sae112802 goto exit; 21872296Sae112802 21882296Sae112802 /* 21892296Sae112802 * Combine the per kpm_page and per page kpm VAC states to 21902296Sae112802 * a summary state in order to make the VAC cache/uncache 21912296Sae112802 * handling more concise. 21922296Sae112802 */ 21932296Sae112802 pgcacase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) | 21942296Sae112802 ((kp->kp_refcnts > 0) ? KPM_KS : 0) | 21952296Sae112802 (PP_ISKPMC(pp) ? KPM_C : 0) | 21962296Sae112802 (PP_ISKPMS(pp) ? KPM_S : 0)); 21972296Sae112802 21982296Sae112802 if (flags == HAT_CACHE) { 21992296Sae112802 switch (pgcacase) { 22002296Sae112802 case KPM_CACHE_MAPS: /* kc c ks s */ 22012296Sae112802 sfmmu_kpm_demap_small(kpmvaddr); 22022296Sae112802 if (kp->kp_refcnts < 1) { 22032296Sae112802 panic("sfmmu_kpm_page_cache: bad refcnts " 22042296Sae112802 "kpm_page=%p\n", (void *)kp); 22052296Sae112802 } 22062296Sae112802 kp->kp_refcnts--; 22072296Sae112802 kp->kp_refcnt++; 22082296Sae112802 PP_CLRKPMS(pp); 22092296Sae112802 /* FALLTHRU */ 22102296Sae112802 22112296Sae112802 case KPM_CACHE_NOMAP: /* kc c - - */ 22122296Sae112802 case KPM_CACHE_NOMAPO: /* kc c ks - */ 22132296Sae112802 kp->kp_refcntc--; 22142296Sae112802 PP_CLRKPMC(pp); 22152296Sae112802 break; 22162296Sae112802 22172296Sae112802 default: 22182296Sae112802 badstate++; 22192296Sae112802 } 22202296Sae112802 goto exit; 22212296Sae112802 } 22222296Sae112802 22232296Sae112802 switch (pgcacase) { 22242296Sae112802 case KPM_UNC_BIG: /* - - - - */ 22252296Sae112802 if (kp->kp_refcnt < 1) { 22262296Sae112802 panic("sfmmu_kpm_page_cache: bad refcnt " 22272296Sae112802 "kpm_page=%p\n", (void *)kp); 22282296Sae112802 } 22292296Sae112802 22302296Sae112802 /* 22312296Sae112802 * Have to breakup the large page mapping in preparation 22322296Sae112802 * to the upcoming TNC mode handled by small mappings. 22332296Sae112802 * The demap can already be done due to another conflict 22342296Sae112802 * within the kpm_page. 22352296Sae112802 */ 22362296Sae112802 if (kp->kp_refcntc == -1) { 22372296Sae112802 /* remove go indication */ 22382296Sae112802 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, 22392296Sae112802 &kpmp->khl_lock, KPMTSBM_STOP); 22402296Sae112802 } 22412296Sae112802 ASSERT(kp->kp_refcntc == 0); 22422296Sae112802 sfmmu_kpm_demap_large(kpmvaddr); 22432296Sae112802 kp->kp_refcntc++; 22442296Sae112802 PP_SETKPMC(pp); 22452296Sae112802 break; 22462296Sae112802 22472296Sae112802 case KPM_UNC_SMALL1: /* - - ks s */ 22482296Sae112802 case KPM_UNC_SMALL2: /* kc - ks s */ 22492296Sae112802 /* 22502296Sae112802 * Have to demap an already small kpm mapping in preparation 22512296Sae112802 * to the upcoming TNC mode. The demap can already be done 22522296Sae112802 * due to another conflict within the kpm_page. 22532296Sae112802 */ 22542296Sae112802 sfmmu_kpm_demap_small(kpmvaddr); 22552296Sae112802 kp->kp_refcntc++; 22562296Sae112802 kp->kp_refcnts--; 22572296Sae112802 kp->kp_refcnt++; 22582296Sae112802 PP_CLRKPMS(pp); 22592296Sae112802 PP_SETKPMC(pp); 22602296Sae112802 break; 22612296Sae112802 22622296Sae112802 case KPM_UNC_NODEMAP1: /* - - ks - */ 22632296Sae112802 /* fallthru */ 22642296Sae112802 22652296Sae112802 case KPM_UNC_NODEMAP2: /* kc - - - */ 22662296Sae112802 case KPM_UNC_NODEMAP3: /* kc - ks - */ 22672296Sae112802 kp->kp_refcntc++; 22682296Sae112802 PP_SETKPMC(pp); 22692296Sae112802 break; 22702296Sae112802 22712296Sae112802 case KPM_UNC_NOP1: /* kc c - - */ 22722296Sae112802 case KPM_UNC_NOP2: /* kc c ks - */ 22732296Sae112802 break; 22742296Sae112802 22752296Sae112802 default: 22762296Sae112802 badstate++; 22772296Sae112802 } 22782296Sae112802 exit: 22792296Sae112802 if (badstate) { 22802296Sae112802 panic("sfmmu_kpm_page_cache: inconsistent VAC state " 22812296Sae112802 "kpmvaddr=%p kp=%p pp=%p", (void *)kpmvaddr, 22822296Sae112802 (void *)kp, (void *)pp); 22832296Sae112802 } 22842296Sae112802 return; 22852296Sae112802 22862296Sae112802 smallpages_page_cache: 22872296Sae112802 PP2KPMSPG(pp, ksp); 22882296Sae112802 kpmsp = KPMP_SHASH(ksp); 22892296Sae112802 22902296Sae112802 oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, 22912296Sae112802 &kpmsp->kshl_lock, KPM_MAPPEDSC); 22922296Sae112802 22932296Sae112802 if (!(oldval == KPM_MAPPEDS || oldval == KPM_MAPPEDSC)) 22942296Sae112802 panic("smallpages_page_cache: inconsistent mapping"); 22952296Sae112802 22962296Sae112802 sfmmu_kpm_demap_small(kpmvaddr); 22972296Sae112802 22982296Sae112802 if (flags == HAT_TMPNC) { 22992296Sae112802 PP_SETKPMC(pp); 23002296Sae112802 ASSERT(!PP_ISKPMS(pp)); 23012296Sae112802 23022296Sae112802 } else { 23032296Sae112802 ASSERT(PP_ISKPMC(pp)); 23042296Sae112802 PP_CLRKPMC(pp); 23052296Sae112802 } 23062296Sae112802 23072296Sae112802 /* 23082296Sae112802 * Keep KPM_MAPPEDSC until the next kpm tsbmiss where it 23092296Sae112802 * prevents TL tsbmiss handling and force a hat_kpm_fault. 23102296Sae112802 * There we can start over again. 23112296Sae112802 */ 23122296Sae112802 } 2313