1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate #include <sys/types.h> 30*0Sstevel@tonic-gate #include <vm/hat.h> 31*0Sstevel@tonic-gate #include <vm/hat_sfmmu.h> 32*0Sstevel@tonic-gate #include <vm/page.h> 33*0Sstevel@tonic-gate #include <sys/pte.h> 34*0Sstevel@tonic-gate #include <sys/systm.h> 35*0Sstevel@tonic-gate #include <sys/mman.h> 36*0Sstevel@tonic-gate #include <sys/sysmacros.h> 37*0Sstevel@tonic-gate #include <sys/machparam.h> 38*0Sstevel@tonic-gate #include <sys/vtrace.h> 39*0Sstevel@tonic-gate #include <sys/kmem.h> 40*0Sstevel@tonic-gate #include <sys/mmu.h> 41*0Sstevel@tonic-gate #include <sys/cmn_err.h> 42*0Sstevel@tonic-gate #include <sys/cpu.h> 43*0Sstevel@tonic-gate #include <sys/cpuvar.h> 44*0Sstevel@tonic-gate #include <sys/debug.h> 45*0Sstevel@tonic-gate #include <sys/lgrp.h> 46*0Sstevel@tonic-gate #include <sys/archsystm.h> 47*0Sstevel@tonic-gate #include <sys/machsystm.h> 48*0Sstevel@tonic-gate #include <sys/vmsystm.h> 49*0Sstevel@tonic-gate #include <sys/bitmap.h> 50*0Sstevel@tonic-gate #include <vm/rm.h> 51*0Sstevel@tonic-gate #include <sys/t_lock.h> 52*0Sstevel@tonic-gate #include <sys/vm_machparam.h> 53*0Sstevel@tonic-gate #include <sys/promif.h> 54*0Sstevel@tonic-gate #include <sys/prom_isa.h> 55*0Sstevel@tonic-gate #include <sys/prom_plat.h> 56*0Sstevel@tonic-gate #include <sys/prom_debug.h> 57*0Sstevel@tonic-gate #include <sys/privregs.h> 58*0Sstevel@tonic-gate #include <sys/bootconf.h> 59*0Sstevel@tonic-gate #include <sys/memlist.h> 60*0Sstevel@tonic-gate #include <sys/memlist_plat.h> 61*0Sstevel@tonic-gate #include <sys/cpu_module.h> 62*0Sstevel@tonic-gate #include <sys/reboot.h> 63*0Sstevel@tonic-gate #include <sys/kdi.h> 64*0Sstevel@tonic-gate #include <sys/fpu/fpusystm.h> 65*0Sstevel@tonic-gate 66*0Sstevel@tonic-gate /* 67*0Sstevel@tonic-gate * External routines and data structures 68*0Sstevel@tonic-gate */ 69*0Sstevel@tonic-gate extern void sfmmu_cache_flushcolor(int, pfn_t); 70*0Sstevel@tonic-gate 71*0Sstevel@tonic-gate /* 72*0Sstevel@tonic-gate * Static routines 73*0Sstevel@tonic-gate */ 74*0Sstevel@tonic-gate static void sfmmu_set_tlb(void); 75*0Sstevel@tonic-gate 76*0Sstevel@tonic-gate /* 77*0Sstevel@tonic-gate * Global Data: 78*0Sstevel@tonic-gate */ 79*0Sstevel@tonic-gate caddr_t textva, datava; 80*0Sstevel@tonic-gate tte_t ktext_tte, kdata_tte; /* ttes for kernel text and data */ 81*0Sstevel@tonic-gate 82*0Sstevel@tonic-gate int enable_bigktsb = 1; 83*0Sstevel@tonic-gate 84*0Sstevel@tonic-gate tte_t bigktsb_ttes[MAX_BIGKTSB_TTES]; 85*0Sstevel@tonic-gate int bigktsb_nttes = 0; 86*0Sstevel@tonic-gate 87*0Sstevel@tonic-gate 88*0Sstevel@tonic-gate /* 89*0Sstevel@tonic-gate * Controls the logic which enables the use of the 90*0Sstevel@tonic-gate * QUAD_LDD_PHYS ASI for TSB accesses. 91*0Sstevel@tonic-gate */ 92*0Sstevel@tonic-gate int ktsb_phys = 0; 93*0Sstevel@tonic-gate 94*0Sstevel@tonic-gate 95*0Sstevel@tonic-gate 96*0Sstevel@tonic-gate /* 97*0Sstevel@tonic-gate * This routine remaps the kernel using large ttes 98*0Sstevel@tonic-gate * All entries except locked ones will be removed from the tlb. 99*0Sstevel@tonic-gate * It assumes that both the text and data segments reside in a separate 100*0Sstevel@tonic-gate * 4mb virtual and physical contigous memory chunk. This routine 101*0Sstevel@tonic-gate * is only executed by the first cpu. The remaining cpus execute 102*0Sstevel@tonic-gate * sfmmu_mp_startup() instead. 103*0Sstevel@tonic-gate * XXX It assumes that the start of the text segment is KERNELBASE. It should 104*0Sstevel@tonic-gate * actually be based on start. 105*0Sstevel@tonic-gate */ 106*0Sstevel@tonic-gate void 107*0Sstevel@tonic-gate sfmmu_remap_kernel(void) 108*0Sstevel@tonic-gate { 109*0Sstevel@tonic-gate pfn_t pfn; 110*0Sstevel@tonic-gate uint_t attr; 111*0Sstevel@tonic-gate int flags; 112*0Sstevel@tonic-gate 113*0Sstevel@tonic-gate extern char end[]; 114*0Sstevel@tonic-gate extern struct as kas; 115*0Sstevel@tonic-gate 116*0Sstevel@tonic-gate textva = (caddr_t)(KERNELBASE & MMU_PAGEMASK4M); 117*0Sstevel@tonic-gate pfn = va_to_pfn(textva); 118*0Sstevel@tonic-gate if (pfn == PFN_INVALID) 119*0Sstevel@tonic-gate prom_panic("can't find kernel text pfn"); 120*0Sstevel@tonic-gate pfn &= TTE_PFNMASK(TTE4M); 121*0Sstevel@tonic-gate 122*0Sstevel@tonic-gate attr = PROC_TEXT | HAT_NOSYNC; 123*0Sstevel@tonic-gate flags = HAT_LOAD_LOCK | SFMMU_NO_TSBLOAD; 124*0Sstevel@tonic-gate sfmmu_memtte(&ktext_tte, pfn, attr, TTE4M); 125*0Sstevel@tonic-gate /* 126*0Sstevel@tonic-gate * We set the lock bit in the tte to lock the translation in 127*0Sstevel@tonic-gate * the tlb. Note we cannot lock Panther 32M/256M pages into the tlb. 128*0Sstevel@tonic-gate * This note is here to make sure that no one tries to remap the 129*0Sstevel@tonic-gate * kernel using 32M or 256M tte's on Panther cpus. 130*0Sstevel@tonic-gate */ 131*0Sstevel@tonic-gate TTE_SET_LOCKED(&ktext_tte); 132*0Sstevel@tonic-gate sfmmu_tteload(kas.a_hat, &ktext_tte, textva, NULL, flags); 133*0Sstevel@tonic-gate 134*0Sstevel@tonic-gate datava = (caddr_t)((uintptr_t)end & MMU_PAGEMASK4M); 135*0Sstevel@tonic-gate pfn = va_to_pfn(datava); 136*0Sstevel@tonic-gate if (pfn == PFN_INVALID) 137*0Sstevel@tonic-gate prom_panic("can't find kernel data pfn"); 138*0Sstevel@tonic-gate pfn &= TTE_PFNMASK(TTE4M); 139*0Sstevel@tonic-gate 140*0Sstevel@tonic-gate attr = PROC_DATA | HAT_NOSYNC; 141*0Sstevel@tonic-gate sfmmu_memtte(&kdata_tte, pfn, attr, TTE4M); 142*0Sstevel@tonic-gate /* 143*0Sstevel@tonic-gate * We set the lock bit in the tte to lock the translation in 144*0Sstevel@tonic-gate * the tlb. We also set the mod bit to avoid taking dirty bit 145*0Sstevel@tonic-gate * traps on kernel data. 146*0Sstevel@tonic-gate */ 147*0Sstevel@tonic-gate TTE_SET_LOCKED(&kdata_tte); 148*0Sstevel@tonic-gate TTE_SET_LOFLAGS(&kdata_tte, 0, TTE_HWWR_INT); 149*0Sstevel@tonic-gate sfmmu_tteload(kas.a_hat, &kdata_tte, datava, 150*0Sstevel@tonic-gate (struct page *)NULL, flags); 151*0Sstevel@tonic-gate 152*0Sstevel@tonic-gate /* 153*0Sstevel@tonic-gate * create bigktsb ttes if necessary. 154*0Sstevel@tonic-gate */ 155*0Sstevel@tonic-gate if (enable_bigktsb) { 156*0Sstevel@tonic-gate int i = 0; 157*0Sstevel@tonic-gate caddr_t va = ktsb_base; 158*0Sstevel@tonic-gate size_t tsbsz = ktsb_sz; 159*0Sstevel@tonic-gate tte_t tte; 160*0Sstevel@tonic-gate 161*0Sstevel@tonic-gate ASSERT(va >= datava + MMU_PAGESIZE4M); 162*0Sstevel@tonic-gate ASSERT(tsbsz >= MMU_PAGESIZE4M); 163*0Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(tsbsz, tsbsz)); 164*0Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(va, tsbsz)); 165*0Sstevel@tonic-gate attr = PROC_DATA | HAT_NOSYNC; 166*0Sstevel@tonic-gate while (tsbsz != 0) { 167*0Sstevel@tonic-gate ASSERT(i < MAX_BIGKTSB_TTES); 168*0Sstevel@tonic-gate pfn = va_to_pfn(va); 169*0Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 170*0Sstevel@tonic-gate ASSERT((pfn & ~TTE_PFNMASK(TTE4M)) == 0); 171*0Sstevel@tonic-gate sfmmu_memtte(&tte, pfn, attr, TTE4M); 172*0Sstevel@tonic-gate ASSERT(TTE_IS_MOD(&tte)); 173*0Sstevel@tonic-gate /* 174*0Sstevel@tonic-gate * No need to lock if we use physical addresses. 175*0Sstevel@tonic-gate * Since we invalidate the kernel TSB using virtual 176*0Sstevel@tonic-gate * addresses, it's an optimization to load them now 177*0Sstevel@tonic-gate * so that we won't have to load them later. 178*0Sstevel@tonic-gate */ 179*0Sstevel@tonic-gate if (!ktsb_phys) { 180*0Sstevel@tonic-gate TTE_SET_LOCKED(&tte); 181*0Sstevel@tonic-gate } 182*0Sstevel@tonic-gate sfmmu_tteload(kas.a_hat, &tte, va, NULL, flags); 183*0Sstevel@tonic-gate bigktsb_ttes[i] = tte; 184*0Sstevel@tonic-gate va += MMU_PAGESIZE4M; 185*0Sstevel@tonic-gate tsbsz -= MMU_PAGESIZE4M; 186*0Sstevel@tonic-gate i++; 187*0Sstevel@tonic-gate } 188*0Sstevel@tonic-gate bigktsb_nttes = i; 189*0Sstevel@tonic-gate } 190*0Sstevel@tonic-gate 191*0Sstevel@tonic-gate sfmmu_set_tlb(); 192*0Sstevel@tonic-gate } 193*0Sstevel@tonic-gate 194*0Sstevel@tonic-gate /* 195*0Sstevel@tonic-gate * Unmap all references to user TSBs from the TLB of the current processor. 196*0Sstevel@tonic-gate */ 197*0Sstevel@tonic-gate static void 198*0Sstevel@tonic-gate sfmmu_clear_user_tsbs() 199*0Sstevel@tonic-gate { 200*0Sstevel@tonic-gate caddr_t va; 201*0Sstevel@tonic-gate caddr_t end_va; 202*0Sstevel@tonic-gate 203*0Sstevel@tonic-gate /* Demap all pages in the VA range for the first user TSB */ 204*0Sstevel@tonic-gate va = utsb_vabase; 205*0Sstevel@tonic-gate end_va = va + tsb_slab_size; 206*0Sstevel@tonic-gate while (va < end_va) { 207*0Sstevel@tonic-gate vtag_flushpage(va, KCONTEXT); 208*0Sstevel@tonic-gate va += MMU_PAGESIZE; 209*0Sstevel@tonic-gate } 210*0Sstevel@tonic-gate 211*0Sstevel@tonic-gate /* Demap all pages in the VA range for the second user TSB */ 212*0Sstevel@tonic-gate va = utsb4m_vabase; 213*0Sstevel@tonic-gate end_va = va + tsb_slab_size; 214*0Sstevel@tonic-gate while (va < end_va) { 215*0Sstevel@tonic-gate vtag_flushpage(va, KCONTEXT); 216*0Sstevel@tonic-gate va += MMU_PAGESIZE; 217*0Sstevel@tonic-gate } 218*0Sstevel@tonic-gate } 219*0Sstevel@tonic-gate 220*0Sstevel@tonic-gate /* 221*0Sstevel@tonic-gate * Setup the kernel's locked tte's 222*0Sstevel@tonic-gate */ 223*0Sstevel@tonic-gate void 224*0Sstevel@tonic-gate sfmmu_set_tlb(void) 225*0Sstevel@tonic-gate { 226*0Sstevel@tonic-gate uint_t index; 227*0Sstevel@tonic-gate struct cpu_node *cpunode; 228*0Sstevel@tonic-gate 229*0Sstevel@tonic-gate cpunode = &cpunodes[getprocessorid()]; 230*0Sstevel@tonic-gate index = cpunode->itlb_size; 231*0Sstevel@tonic-gate 232*0Sstevel@tonic-gate /* 233*0Sstevel@tonic-gate * NOTE: the prom will do an explicit unmap of the VAs from the TLBs 234*0Sstevel@tonic-gate * in the following functions before loading the new value into the 235*0Sstevel@tonic-gate * TLB. Thus if there was an entry already in the TLB at a different 236*0Sstevel@tonic-gate * location, it will get unmapped before we load the entry at the 237*0Sstevel@tonic-gate * specified location. 238*0Sstevel@tonic-gate */ 239*0Sstevel@tonic-gate (void) prom_itlb_load(index - 1, *(uint64_t *)&ktext_tte, textva); 240*0Sstevel@tonic-gate index = cpunode->dtlb_size; 241*0Sstevel@tonic-gate (void) prom_dtlb_load(index - 1, *(uint64_t *)&kdata_tte, datava); 242*0Sstevel@tonic-gate (void) prom_dtlb_load(index - 2, *(uint64_t *)&ktext_tte, textva); 243*0Sstevel@tonic-gate index -= 3; 244*0Sstevel@tonic-gate 245*0Sstevel@tonic-gate utsb_dtlb_ttenum = index--; 246*0Sstevel@tonic-gate utsb4m_dtlb_ttenum = index--; 247*0Sstevel@tonic-gate sfmmu_clear_user_tsbs(); 248*0Sstevel@tonic-gate 249*0Sstevel@tonic-gate if (!ktsb_phys && enable_bigktsb) { 250*0Sstevel@tonic-gate int i; 251*0Sstevel@tonic-gate caddr_t va = ktsb_base; 252*0Sstevel@tonic-gate uint64_t tte; 253*0Sstevel@tonic-gate 254*0Sstevel@tonic-gate ASSERT(bigktsb_nttes <= MAX_BIGKTSB_TTES); 255*0Sstevel@tonic-gate for (i = 0; i < bigktsb_nttes; i++) { 256*0Sstevel@tonic-gate tte = *(uint64_t *)&bigktsb_ttes[i]; 257*0Sstevel@tonic-gate (void) prom_dtlb_load(index, tte, va); 258*0Sstevel@tonic-gate va += MMU_PAGESIZE4M; 259*0Sstevel@tonic-gate index--; 260*0Sstevel@tonic-gate } 261*0Sstevel@tonic-gate } 262*0Sstevel@tonic-gate 263*0Sstevel@tonic-gate dtlb_resv_ttenum = index + 1; 264*0Sstevel@tonic-gate } 265*0Sstevel@tonic-gate 266*0Sstevel@tonic-gate /* 267*0Sstevel@tonic-gate * This routine is executed by all other cpus except the first one 268*0Sstevel@tonic-gate * at initialization time. It is responsible for taking over the 269*0Sstevel@tonic-gate * mmu from the prom. We follow these steps. 270*0Sstevel@tonic-gate * Lock the kernel's ttes in the TLB 271*0Sstevel@tonic-gate * Initialize the tsb hardware registers 272*0Sstevel@tonic-gate * Take over the trap table 273*0Sstevel@tonic-gate * Flush the prom's locked entries from the TLB 274*0Sstevel@tonic-gate */ 275*0Sstevel@tonic-gate void 276*0Sstevel@tonic-gate sfmmu_mp_startup(void) 277*0Sstevel@tonic-gate { 278*0Sstevel@tonic-gate sfmmu_set_tlb(); 279*0Sstevel@tonic-gate setwstate(WSTATE_KERN); 280*0Sstevel@tonic-gate prom_set_traptable(&trap_table); 281*0Sstevel@tonic-gate install_va_to_tte(); 282*0Sstevel@tonic-gate } 283*0Sstevel@tonic-gate 284*0Sstevel@tonic-gate void 285*0Sstevel@tonic-gate kdi_tlb_page_lock(caddr_t va, int do_dtlb) 286*0Sstevel@tonic-gate { 287*0Sstevel@tonic-gate tte_t tte; 288*0Sstevel@tonic-gate pfn_t pfn = va_to_pfn(va); 289*0Sstevel@tonic-gate 290*0Sstevel@tonic-gate tte.tte_inthi = TTE_VALID_INT | TTE_SZ_INT(TTE8K) | TTE_PFN_INTHI(pfn); 291*0Sstevel@tonic-gate tte.tte_intlo = TTE_PFN_INTLO(pfn) | TTE_LCK_INT | TTE_CP_INT | 292*0Sstevel@tonic-gate TTE_PRIV_INT | TTE_HWWR_INT; 293*0Sstevel@tonic-gate 294*0Sstevel@tonic-gate vtag_flushpage(va, KCONTEXT); 295*0Sstevel@tonic-gate 296*0Sstevel@tonic-gate sfmmu_itlb_ld(va, KCONTEXT, &tte); 297*0Sstevel@tonic-gate if (do_dtlb) 298*0Sstevel@tonic-gate sfmmu_dtlb_ld(va, KCONTEXT, &tte); 299*0Sstevel@tonic-gate } 300*0Sstevel@tonic-gate 301*0Sstevel@tonic-gate /*ARGSUSED*/ 302*0Sstevel@tonic-gate void 303*0Sstevel@tonic-gate kdi_tlb_page_unlock(caddr_t va, int do_dtlb) 304*0Sstevel@tonic-gate { 305*0Sstevel@tonic-gate vtag_flushpage(va, KCONTEXT); 306*0Sstevel@tonic-gate } 307*0Sstevel@tonic-gate 308*0Sstevel@tonic-gate /* clear user TSB information (applicable to hardware TSB walkers) */ 309*0Sstevel@tonic-gate void 310*0Sstevel@tonic-gate sfmmu_clear_utsbinfo() 311*0Sstevel@tonic-gate { 312*0Sstevel@tonic-gate } 313*0Sstevel@tonic-gate 314*0Sstevel@tonic-gate /*ARGSUSED*/ 315*0Sstevel@tonic-gate void 316*0Sstevel@tonic-gate sfmmu_setup_tsbinfo(sfmmu_t *sfmmup) 317*0Sstevel@tonic-gate { 318*0Sstevel@tonic-gate } 319*0Sstevel@tonic-gate 320*0Sstevel@tonic-gate /* 321*0Sstevel@tonic-gate * Invalidate a TSB. If floating point is enabled we use 322*0Sstevel@tonic-gate * a fast block-store routine, otherwise we use the old method 323*0Sstevel@tonic-gate * of walking the TSB setting each tag to TSBTAG_INVALID. 324*0Sstevel@tonic-gate */ 325*0Sstevel@tonic-gate void 326*0Sstevel@tonic-gate sfmmu_inv_tsb(caddr_t tsb_base, uint_t tsb_bytes) 327*0Sstevel@tonic-gate { 328*0Sstevel@tonic-gate extern void sfmmu_inv_tsb_fast(caddr_t, uint_t); 329*0Sstevel@tonic-gate struct tsbe *tsbaddr; 330*0Sstevel@tonic-gate 331*0Sstevel@tonic-gate /* CONSTCOND */ 332*0Sstevel@tonic-gate if (fpu_exists) { 333*0Sstevel@tonic-gate sfmmu_inv_tsb_fast(tsb_base, tsb_bytes); 334*0Sstevel@tonic-gate return; 335*0Sstevel@tonic-gate } 336*0Sstevel@tonic-gate 337*0Sstevel@tonic-gate for (tsbaddr = (struct tsbe *)tsb_base; 338*0Sstevel@tonic-gate (uintptr_t)tsbaddr < (uintptr_t)(tsb_base + tsb_bytes); 339*0Sstevel@tonic-gate tsbaddr++) { 340*0Sstevel@tonic-gate tsbaddr->tte_tag.tag_inthi = TSBTAG_INVALID; 341*0Sstevel@tonic-gate } 342*0Sstevel@tonic-gate 343*0Sstevel@tonic-gate if (ktsb_phys && tsb_base == ktsb_base) 344*0Sstevel@tonic-gate dcache_flushall(); 345*0Sstevel@tonic-gate } 346*0Sstevel@tonic-gate 347*0Sstevel@tonic-gate /* 348*0Sstevel@tonic-gate * Completely flush the D-cache on all cpus. 349*0Sstevel@tonic-gate */ 350*0Sstevel@tonic-gate void 351*0Sstevel@tonic-gate sfmmu_cache_flushall() 352*0Sstevel@tonic-gate { 353*0Sstevel@tonic-gate int i; 354*0Sstevel@tonic-gate 355*0Sstevel@tonic-gate for (i = 0; i < CACHE_NUM_COLOR; i++) 356*0Sstevel@tonic-gate sfmmu_cache_flushcolor(i, 0); 357*0Sstevel@tonic-gate } 358