10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51592Sgirish * Common Development and Distribution License (the "License"). 61592Sgirish * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 221592Sgirish * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate #include <sys/machsystm.h> 290Sstevel@tonic-gate #include <sys/archsystm.h> 300Sstevel@tonic-gate #include <sys/vm.h> 310Sstevel@tonic-gate #include <sys/cpu.h> 32*1772Sjl139090 #include <sys/cpupart.h> 330Sstevel@tonic-gate #include <sys/atomic.h> 340Sstevel@tonic-gate #include <sys/reboot.h> 350Sstevel@tonic-gate #include <sys/kdi.h> 360Sstevel@tonic-gate #include <sys/bootconf.h> 370Sstevel@tonic-gate #include <sys/memlist_plat.h> 380Sstevel@tonic-gate #include <sys/memlist_impl.h> 390Sstevel@tonic-gate #include <sys/prom_plat.h> 400Sstevel@tonic-gate #include <sys/prom_isa.h> 410Sstevel@tonic-gate #include <sys/autoconf.h> 420Sstevel@tonic-gate #include <sys/intreg.h> 430Sstevel@tonic-gate #include <sys/ivintr.h> 440Sstevel@tonic-gate #include <sys/fpu/fpusystm.h> 450Sstevel@tonic-gate #include <sys/iommutsb.h> 460Sstevel@tonic-gate #include <vm/vm_dep.h> 470Sstevel@tonic-gate #include <vm/seg_kmem.h> 480Sstevel@tonic-gate #include <vm/seg_kpm.h> 490Sstevel@tonic-gate #include <vm/seg_map.h> 500Sstevel@tonic-gate #include <vm/seg_kp.h> 510Sstevel@tonic-gate #include <sys/sysconf.h> 520Sstevel@tonic-gate #include <vm/hat_sfmmu.h> 530Sstevel@tonic-gate #include <sys/kobj.h> 540Sstevel@tonic-gate #include <sys/sun4asi.h> 550Sstevel@tonic-gate #include <sys/clconf.h> 560Sstevel@tonic-gate #include <sys/platform_module.h> 570Sstevel@tonic-gate #include <sys/panic.h> 580Sstevel@tonic-gate #include <sys/cpu_sgnblk_defs.h> 590Sstevel@tonic-gate #include <sys/clock.h> 600Sstevel@tonic-gate #include <sys/fpras_impl.h> 610Sstevel@tonic-gate #include <sys/prom_debug.h> 620Sstevel@tonic-gate #include <sys/traptrace.h> 630Sstevel@tonic-gate #include <sys/memnode.h> 640Sstevel@tonic-gate #include <sys/mem_cage.h> 650Sstevel@tonic-gate 660Sstevel@tonic-gate /* 670Sstevel@tonic-gate * fpRAS implementation structures. 680Sstevel@tonic-gate */ 690Sstevel@tonic-gate struct fpras_chkfn *fpras_chkfnaddrs[FPRAS_NCOPYOPS]; 700Sstevel@tonic-gate struct fpras_chkfngrp *fpras_chkfngrps; 710Sstevel@tonic-gate struct fpras_chkfngrp *fpras_chkfngrps_base; 720Sstevel@tonic-gate int fpras_frequency = -1; 730Sstevel@tonic-gate int64_t fpras_interval = -1; 740Sstevel@tonic-gate 75*1772Sjl139090 /* 76*1772Sjl139090 * Halt idling cpus optimization 77*1772Sjl139090 * 78*1772Sjl139090 * This optimation is only enabled in platforms that have 79*1772Sjl139090 * the CPU halt support. The cpu_halt_cpu() support is provided 80*1772Sjl139090 * in the cpu module and it is referenced here with a pragma weak. 81*1772Sjl139090 * The presence of this routine automatically enable the halt idling 82*1772Sjl139090 * cpus functionality if the global switch enable_halt_idle_cpus 83*1772Sjl139090 * is set (default is set). 84*1772Sjl139090 * 85*1772Sjl139090 */ 86*1772Sjl139090 #pragma weak cpu_halt_cpu 87*1772Sjl139090 extern void cpu_halt_cpu(); 88*1772Sjl139090 89*1772Sjl139090 int enable_halt_idle_cpus = 1; /* global switch */ 90*1772Sjl139090 910Sstevel@tonic-gate void 920Sstevel@tonic-gate setup_trap_table(void) 930Sstevel@tonic-gate { 940Sstevel@tonic-gate intr_init(CPU); /* init interrupt request free list */ 950Sstevel@tonic-gate setwstate(WSTATE_KERN); 960Sstevel@tonic-gate prom_set_traptable(&trap_table); 970Sstevel@tonic-gate } 980Sstevel@tonic-gate 990Sstevel@tonic-gate void 1000Sstevel@tonic-gate mach_fpras() 1010Sstevel@tonic-gate { 1020Sstevel@tonic-gate if (fpras_implemented && !fpras_disable) { 1030Sstevel@tonic-gate int i; 1040Sstevel@tonic-gate struct fpras_chkfngrp *fcgp; 1050Sstevel@tonic-gate size_t chkfngrpsallocsz; 1060Sstevel@tonic-gate 1070Sstevel@tonic-gate /* 1080Sstevel@tonic-gate * Note that we size off of NCPU and setup for 1090Sstevel@tonic-gate * all those possibilities regardless of whether 1100Sstevel@tonic-gate * the cpu id is present or not. We do this so that 1110Sstevel@tonic-gate * we don't have any construction or destruction 1120Sstevel@tonic-gate * activity to perform at DR time, and it's not 1130Sstevel@tonic-gate * costly in memory. We require block alignment. 1140Sstevel@tonic-gate */ 1150Sstevel@tonic-gate chkfngrpsallocsz = NCPU * sizeof (struct fpras_chkfngrp); 1160Sstevel@tonic-gate fpras_chkfngrps_base = kmem_alloc(chkfngrpsallocsz, KM_SLEEP); 1170Sstevel@tonic-gate if (IS_P2ALIGNED((uintptr_t)fpras_chkfngrps_base, 64)) { 1180Sstevel@tonic-gate fpras_chkfngrps = fpras_chkfngrps_base; 1190Sstevel@tonic-gate } else { 1200Sstevel@tonic-gate kmem_free(fpras_chkfngrps_base, chkfngrpsallocsz); 1210Sstevel@tonic-gate chkfngrpsallocsz += 64; 1220Sstevel@tonic-gate fpras_chkfngrps_base = kmem_alloc(chkfngrpsallocsz, 1230Sstevel@tonic-gate KM_SLEEP); 1240Sstevel@tonic-gate fpras_chkfngrps = (struct fpras_chkfngrp *) 1250Sstevel@tonic-gate P2ROUNDUP((uintptr_t)fpras_chkfngrps_base, 64); 1260Sstevel@tonic-gate } 1270Sstevel@tonic-gate 1280Sstevel@tonic-gate /* 1290Sstevel@tonic-gate * Copy our check function into place for each copy operation 1300Sstevel@tonic-gate * and each cpu id. 1310Sstevel@tonic-gate */ 1320Sstevel@tonic-gate fcgp = &fpras_chkfngrps[0]; 1330Sstevel@tonic-gate for (i = 0; i < FPRAS_NCOPYOPS; ++i) 1340Sstevel@tonic-gate bcopy((void *)fpras_chkfn_type1, &fcgp->fpras_fn[i], 1350Sstevel@tonic-gate sizeof (struct fpras_chkfn)); 1360Sstevel@tonic-gate for (i = 1; i < NCPU; ++i) 1370Sstevel@tonic-gate *(&fpras_chkfngrps[i]) = *fcgp; 1380Sstevel@tonic-gate 1390Sstevel@tonic-gate /* 1400Sstevel@tonic-gate * At definition fpras_frequency is set to -1, and it will 1410Sstevel@tonic-gate * still have that value unless changed in /etc/system (not 1420Sstevel@tonic-gate * strictly supported, but not preventable). The following 1430Sstevel@tonic-gate * both sets the default and sanity checks anything from 1440Sstevel@tonic-gate * /etc/system. 1450Sstevel@tonic-gate */ 1460Sstevel@tonic-gate if (fpras_frequency < 0) 1470Sstevel@tonic-gate fpras_frequency = FPRAS_DEFAULT_FREQUENCY; 1480Sstevel@tonic-gate 1490Sstevel@tonic-gate /* 1500Sstevel@tonic-gate * Now calculate fpras_interval. When fpras_interval 1510Sstevel@tonic-gate * becomes non-negative fpras checks will commence 1520Sstevel@tonic-gate * (copies before this point in boot will bypass fpras). 1530Sstevel@tonic-gate * Our stores of instructions must be visible; no need 1540Sstevel@tonic-gate * to flush as they're never been executed before. 1550Sstevel@tonic-gate */ 1560Sstevel@tonic-gate membar_producer(); 1570Sstevel@tonic-gate fpras_interval = (fpras_frequency == 0) ? 1580Sstevel@tonic-gate 0 : sys_tick_freq / fpras_frequency; 1590Sstevel@tonic-gate } 1600Sstevel@tonic-gate } 1610Sstevel@tonic-gate 1620Sstevel@tonic-gate void 1630Sstevel@tonic-gate mach_hw_copy_limit(void) 1640Sstevel@tonic-gate { 1650Sstevel@tonic-gate if (!fpu_exists) { 1660Sstevel@tonic-gate use_hw_bcopy = 0; 1670Sstevel@tonic-gate hw_copy_limit_1 = 0; 1680Sstevel@tonic-gate hw_copy_limit_2 = 0; 1690Sstevel@tonic-gate hw_copy_limit_4 = 0; 1700Sstevel@tonic-gate hw_copy_limit_8 = 0; 1710Sstevel@tonic-gate use_hw_bzero = 0; 1720Sstevel@tonic-gate } 1730Sstevel@tonic-gate } 1740Sstevel@tonic-gate 1750Sstevel@tonic-gate void 1760Sstevel@tonic-gate load_tod_module() 1770Sstevel@tonic-gate { 1780Sstevel@tonic-gate /* 1790Sstevel@tonic-gate * Load tod driver module for the tod part found on this system. 1800Sstevel@tonic-gate * Recompute the cpu frequency/delays based on tod as tod part 1810Sstevel@tonic-gate * tends to keep time more accurately. 1820Sstevel@tonic-gate */ 1830Sstevel@tonic-gate if (tod_module_name == NULL || modload("tod", tod_module_name) == -1) 1840Sstevel@tonic-gate halt("Can't load tod module"); 1850Sstevel@tonic-gate } 1860Sstevel@tonic-gate 1870Sstevel@tonic-gate void 1880Sstevel@tonic-gate mach_memscrub(void) 1890Sstevel@tonic-gate { 1900Sstevel@tonic-gate /* 1910Sstevel@tonic-gate * Startup memory scrubber, if not running fpu emulation code. 1920Sstevel@tonic-gate */ 1930Sstevel@tonic-gate 194*1772Sjl139090 #ifndef _HW_MEMSCRUB_SUPPORT 1950Sstevel@tonic-gate if (fpu_exists) { 1960Sstevel@tonic-gate if (memscrub_init()) { 1970Sstevel@tonic-gate cmn_err(CE_WARN, 1980Sstevel@tonic-gate "Memory scrubber failed to initialize"); 1990Sstevel@tonic-gate } 2000Sstevel@tonic-gate } 201*1772Sjl139090 #endif /* _HW_MEMSCRUB_SUPPORT */ 202*1772Sjl139090 } 203*1772Sjl139090 204*1772Sjl139090 /* 205*1772Sjl139090 * Halt the calling CPU until awoken via an interrupt 206*1772Sjl139090 * This routine should only be invoked if cpu_halt_cpu() 207*1772Sjl139090 * exists and is supported, see mach_cpu_halt_idle() 208*1772Sjl139090 */ 209*1772Sjl139090 static void 210*1772Sjl139090 cpu_halt(void) 211*1772Sjl139090 { 212*1772Sjl139090 cpu_t *cpup = CPU; 213*1772Sjl139090 processorid_t cpun = cpup->cpu_id; 214*1772Sjl139090 cpupart_t *cp = cpup->cpu_part; 215*1772Sjl139090 int hset_update = 1; 216*1772Sjl139090 uint_t pstate; 217*1772Sjl139090 extern uint_t getpstate(void); 218*1772Sjl139090 extern void setpstate(uint_t); 219*1772Sjl139090 220*1772Sjl139090 /* 221*1772Sjl139090 * If this CPU is online, and there's multiple CPUs 222*1772Sjl139090 * in the system, then we should notate our halting 223*1772Sjl139090 * by adding ourselves to the partition's halted CPU 224*1772Sjl139090 * bitmap. This allows other CPUs to find/awaken us when 225*1772Sjl139090 * work becomes available. 226*1772Sjl139090 */ 227*1772Sjl139090 if (CPU->cpu_flags & CPU_OFFLINE || ncpus == 1) 228*1772Sjl139090 hset_update = 0; 229*1772Sjl139090 230*1772Sjl139090 /* 231*1772Sjl139090 * Add ourselves to the partition's halted CPUs bitmask 232*1772Sjl139090 * and set our HALTED flag, if necessary. 233*1772Sjl139090 * 234*1772Sjl139090 * When a thread becomes runnable, it is placed on the queue 235*1772Sjl139090 * and then the halted cpuset is checked to determine who 236*1772Sjl139090 * (if anyone) should be awoken. We therefore need to first 237*1772Sjl139090 * add ourselves to the halted cpuset, and then check if there 238*1772Sjl139090 * is any work available. 239*1772Sjl139090 */ 240*1772Sjl139090 if (hset_update) { 241*1772Sjl139090 cpup->cpu_disp_flags |= CPU_DISP_HALTED; 242*1772Sjl139090 membar_producer(); 243*1772Sjl139090 CPUSET_ATOMIC_ADD(cp->cp_haltset, cpun); 244*1772Sjl139090 } 245*1772Sjl139090 246*1772Sjl139090 /* 247*1772Sjl139090 * Check to make sure there's really nothing to do. 248*1772Sjl139090 * Work destined for this CPU may become available after 249*1772Sjl139090 * this check. We'll be notified through the clearing of our 250*1772Sjl139090 * bit in the halted CPU bitmask, and a poke. 251*1772Sjl139090 */ 252*1772Sjl139090 if (disp_anywork()) { 253*1772Sjl139090 if (hset_update) { 254*1772Sjl139090 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 255*1772Sjl139090 CPUSET_ATOMIC_DEL(cp->cp_haltset, cpun); 256*1772Sjl139090 } 257*1772Sjl139090 return; 258*1772Sjl139090 } 259*1772Sjl139090 260*1772Sjl139090 /* 261*1772Sjl139090 * We're on our way to being halted. 262*1772Sjl139090 * 263*1772Sjl139090 * Disable interrupts now, so that we'll awaken immediately 264*1772Sjl139090 * after halting if someone tries to poke us between now and 265*1772Sjl139090 * the time we actually halt. 266*1772Sjl139090 * 267*1772Sjl139090 * We check for the presence of our bit after disabling interrupts. 268*1772Sjl139090 * If it's cleared, we'll return. If the bit is cleared after 269*1772Sjl139090 * we check then the poke will pop us out of the halted state. 270*1772Sjl139090 * 271*1772Sjl139090 * The ordering of the poke and the clearing of the bit by cpu_wakeup 272*1772Sjl139090 * is important. 273*1772Sjl139090 * cpu_wakeup() must clear, then poke. 274*1772Sjl139090 * cpu_halt() must disable interrupts, then check for the bit. 275*1772Sjl139090 */ 276*1772Sjl139090 pstate = getpstate(); 277*1772Sjl139090 setpstate(pstate & ~PSTATE_IE); 278*1772Sjl139090 279*1772Sjl139090 if (hset_update && !CPU_IN_SET(cp->cp_haltset, cpun)) { 280*1772Sjl139090 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 281*1772Sjl139090 setpstate(pstate); 282*1772Sjl139090 return; 283*1772Sjl139090 } 284*1772Sjl139090 285*1772Sjl139090 /* 286*1772Sjl139090 * The check for anything locally runnable is here for performance 287*1772Sjl139090 * and isn't needed for correctness. disp_nrunnable ought to be 288*1772Sjl139090 * in our cache still, so it's inexpensive to check, and if there 289*1772Sjl139090 * is anything runnable we won't have to wait for the poke. 290*1772Sjl139090 */ 291*1772Sjl139090 if (cpup->cpu_disp->disp_nrunnable != 0) { 292*1772Sjl139090 if (hset_update) { 293*1772Sjl139090 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 294*1772Sjl139090 CPUSET_ATOMIC_DEL(cp->cp_haltset, cpun); 295*1772Sjl139090 } 296*1772Sjl139090 setpstate(pstate); 297*1772Sjl139090 return; 298*1772Sjl139090 } 299*1772Sjl139090 300*1772Sjl139090 /* 301*1772Sjl139090 * Halt the strand. 302*1772Sjl139090 */ 303*1772Sjl139090 if (&cpu_halt_cpu) 304*1772Sjl139090 cpu_halt_cpu(); 305*1772Sjl139090 306*1772Sjl139090 /* 307*1772Sjl139090 * We're no longer halted 308*1772Sjl139090 */ 309*1772Sjl139090 setpstate(pstate); 310*1772Sjl139090 if (hset_update) { 311*1772Sjl139090 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 312*1772Sjl139090 CPUSET_ATOMIC_DEL(cp->cp_haltset, cpun); 313*1772Sjl139090 } 314*1772Sjl139090 } 315*1772Sjl139090 316*1772Sjl139090 /* 317*1772Sjl139090 * If "cpu" is halted, then wake it up clearing its halted bit in advance. 318*1772Sjl139090 * Otherwise, see if other CPUs in the cpu partition are halted and need to 319*1772Sjl139090 * be woken up so that they can steal the thread we placed on this CPU. 320*1772Sjl139090 * This function is only used on MP systems. 321*1772Sjl139090 * This function should only be invoked if cpu_halt_cpu() 322*1772Sjl139090 * exists and is supported, see mach_cpu_halt_idle() 323*1772Sjl139090 */ 324*1772Sjl139090 static void 325*1772Sjl139090 cpu_wakeup(cpu_t *cpu, int bound) 326*1772Sjl139090 { 327*1772Sjl139090 uint_t cpu_found; 328*1772Sjl139090 int result; 329*1772Sjl139090 cpupart_t *cp; 330*1772Sjl139090 331*1772Sjl139090 cp = cpu->cpu_part; 332*1772Sjl139090 if (CPU_IN_SET(cp->cp_haltset, cpu->cpu_id)) { 333*1772Sjl139090 /* 334*1772Sjl139090 * Clear the halted bit for that CPU since it will be 335*1772Sjl139090 * poked in a moment. 336*1772Sjl139090 */ 337*1772Sjl139090 CPUSET_ATOMIC_DEL(cp->cp_haltset, cpu->cpu_id); 338*1772Sjl139090 /* 339*1772Sjl139090 * We may find the current CPU present in the halted cpuset 340*1772Sjl139090 * if we're in the context of an interrupt that occurred 341*1772Sjl139090 * before we had a chance to clear our bit in cpu_halt(). 342*1772Sjl139090 * Poking ourself is obviously unnecessary, since if 343*1772Sjl139090 * we're here, we're not halted. 344*1772Sjl139090 */ 345*1772Sjl139090 if (cpu != CPU) 346*1772Sjl139090 poke_cpu(cpu->cpu_id); 347*1772Sjl139090 return; 348*1772Sjl139090 } else { 349*1772Sjl139090 /* 350*1772Sjl139090 * This cpu isn't halted, but it's idle or undergoing a 351*1772Sjl139090 * context switch. No need to awaken anyone else. 352*1772Sjl139090 */ 353*1772Sjl139090 if (cpu->cpu_thread == cpu->cpu_idle_thread || 354*1772Sjl139090 cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL) 355*1772Sjl139090 return; 356*1772Sjl139090 } 357*1772Sjl139090 358*1772Sjl139090 /* 359*1772Sjl139090 * No need to wake up other CPUs if the thread we just enqueued 360*1772Sjl139090 * is bound. 361*1772Sjl139090 */ 362*1772Sjl139090 if (bound) 363*1772Sjl139090 return; 364*1772Sjl139090 365*1772Sjl139090 /* 366*1772Sjl139090 * See if there's any other halted CPUs. If there are, then 367*1772Sjl139090 * select one, and awaken it. 368*1772Sjl139090 * It's possible that after we find a CPU, somebody else 369*1772Sjl139090 * will awaken it before we get the chance. 370*1772Sjl139090 * In that case, look again. 371*1772Sjl139090 */ 372*1772Sjl139090 do { 373*1772Sjl139090 CPUSET_FIND(cp->cp_haltset, cpu_found); 374*1772Sjl139090 if (cpu_found == CPUSET_NOTINSET) 375*1772Sjl139090 return; 376*1772Sjl139090 377*1772Sjl139090 ASSERT(cpu_found >= 0 && cpu_found < NCPU); 378*1772Sjl139090 CPUSET_ATOMIC_XDEL(cp->cp_haltset, cpu_found, result); 379*1772Sjl139090 } while (result < 0); 380*1772Sjl139090 381*1772Sjl139090 if (cpu_found != CPU->cpu_id) 382*1772Sjl139090 poke_cpu(cpu_found); 3830Sstevel@tonic-gate } 3840Sstevel@tonic-gate 3850Sstevel@tonic-gate void 3860Sstevel@tonic-gate mach_cpu_halt_idle() 3870Sstevel@tonic-gate { 388*1772Sjl139090 if (enable_halt_idle_cpus) { 389*1772Sjl139090 if (&cpu_halt_cpu) { 390*1772Sjl139090 idle_cpu = cpu_halt; 391*1772Sjl139090 disp_enq_thread = cpu_wakeup; 392*1772Sjl139090 } 393*1772Sjl139090 } 3940Sstevel@tonic-gate } 3950Sstevel@tonic-gate 3960Sstevel@tonic-gate /*ARGSUSED*/ 3970Sstevel@tonic-gate void 3980Sstevel@tonic-gate cpu_intrq_setup(struct cpu *cp) 3990Sstevel@tonic-gate { 4000Sstevel@tonic-gate /* Interrupt mondo queues not applicable to sun4u */ 4010Sstevel@tonic-gate } 4020Sstevel@tonic-gate 4030Sstevel@tonic-gate /*ARGSUSED*/ 4040Sstevel@tonic-gate void 4050Sstevel@tonic-gate cpu_intrq_register(struct cpu *cp) 4060Sstevel@tonic-gate { 4070Sstevel@tonic-gate /* Interrupt/error queues not applicable to sun4u */ 4080Sstevel@tonic-gate } 4090Sstevel@tonic-gate 4100Sstevel@tonic-gate /*ARGSUSED*/ 4110Sstevel@tonic-gate void 4121077Ssvemuri mach_htraptrace_setup(int cpuid) 4130Sstevel@tonic-gate { 4140Sstevel@tonic-gate /* Setup hypervisor traptrace buffer, not applicable to sun4u */ 4150Sstevel@tonic-gate } 4160Sstevel@tonic-gate 4170Sstevel@tonic-gate /*ARGSUSED*/ 4180Sstevel@tonic-gate void 4191077Ssvemuri mach_htraptrace_configure(int cpuid) 4201077Ssvemuri { 4211077Ssvemuri /* enable/ disable hypervisor traptracing, not applicable to sun4u */ 4221077Ssvemuri } 4231077Ssvemuri 4241077Ssvemuri /*ARGSUSED*/ 4251077Ssvemuri void 4261077Ssvemuri mach_htraptrace_init(void) 4270Sstevel@tonic-gate { 4281077Ssvemuri /* allocate hypervisor traptrace buffer, not applicable to sun4u */ 4290Sstevel@tonic-gate } 4301077Ssvemuri 4311077Ssvemuri /*ARGSUSED*/ 4321077Ssvemuri void 4331077Ssvemuri mach_htraptrace_cleanup(int cpuid) 4341077Ssvemuri { 4351077Ssvemuri /* cleanup hypervisor traptrace buffer, not applicable to sun4u */ 4361077Ssvemuri } 4370Sstevel@tonic-gate 4380Sstevel@tonic-gate void 4390Sstevel@tonic-gate mach_descrip_init(void) 4400Sstevel@tonic-gate { 4410Sstevel@tonic-gate /* Obtain Machine description - only for sun4v */ 4420Sstevel@tonic-gate } 4430Sstevel@tonic-gate 4441592Sgirish void 4451592Sgirish hsvc_setup(void) 4461592Sgirish { 4471592Sgirish /* Setup hypervisor services, not applicable to sun4u */ 4481592Sgirish } 4491592Sgirish 4500Sstevel@tonic-gate /* 4510Sstevel@tonic-gate * Return true if the machine we're running on is a Positron. 4520Sstevel@tonic-gate * (Positron is an unsupported developers platform.) 4530Sstevel@tonic-gate */ 4540Sstevel@tonic-gate int 4550Sstevel@tonic-gate iam_positron(void) 4560Sstevel@tonic-gate { 4570Sstevel@tonic-gate char model[32]; 4580Sstevel@tonic-gate const char proto_model[] = "SUNW,501-2732"; 459789Sahrens pnode_t root = prom_rootnode(); 4600Sstevel@tonic-gate 4610Sstevel@tonic-gate if (prom_getproplen(root, "model") != sizeof (proto_model)) 4620Sstevel@tonic-gate return (0); 4630Sstevel@tonic-gate 4640Sstevel@tonic-gate (void) prom_getprop(root, "model", model); 4650Sstevel@tonic-gate if (strcmp(model, proto_model) == 0) 4660Sstevel@tonic-gate return (1); 4670Sstevel@tonic-gate return (0); 4680Sstevel@tonic-gate } 4690Sstevel@tonic-gate 4700Sstevel@tonic-gate /* 4710Sstevel@tonic-gate * Find a physically contiguous area of twice the largest ecache size 4720Sstevel@tonic-gate * to be used while doing displacement flush of ecaches. 4730Sstevel@tonic-gate */ 4740Sstevel@tonic-gate uint64_t 4750Sstevel@tonic-gate ecache_flush_address(void) 4760Sstevel@tonic-gate { 4770Sstevel@tonic-gate struct memlist *pmem; 4780Sstevel@tonic-gate uint64_t flush_size; 4790Sstevel@tonic-gate uint64_t ret_val; 4800Sstevel@tonic-gate 4810Sstevel@tonic-gate flush_size = ecache_size * 2; 4820Sstevel@tonic-gate for (pmem = phys_install; pmem; pmem = pmem->next) { 4830Sstevel@tonic-gate ret_val = P2ROUNDUP(pmem->address, ecache_size); 4840Sstevel@tonic-gate if (ret_val + flush_size <= pmem->address + pmem->size) 4850Sstevel@tonic-gate return (ret_val); 4860Sstevel@tonic-gate } 4870Sstevel@tonic-gate return ((uint64_t)-1); 4880Sstevel@tonic-gate } 4890Sstevel@tonic-gate 4900Sstevel@tonic-gate /* 4910Sstevel@tonic-gate * Called with the memlist lock held to say that phys_install has 4920Sstevel@tonic-gate * changed. 4930Sstevel@tonic-gate */ 4940Sstevel@tonic-gate void 4950Sstevel@tonic-gate phys_install_has_changed(void) 4960Sstevel@tonic-gate { 4970Sstevel@tonic-gate /* 4980Sstevel@tonic-gate * Get the new address into a temporary just in case panicking 4990Sstevel@tonic-gate * involves use of ecache_flushaddr. 5000Sstevel@tonic-gate */ 5010Sstevel@tonic-gate uint64_t new_addr; 5020Sstevel@tonic-gate 5030Sstevel@tonic-gate new_addr = ecache_flush_address(); 5040Sstevel@tonic-gate if (new_addr == (uint64_t)-1) { 5050Sstevel@tonic-gate cmn_err(CE_PANIC, 5060Sstevel@tonic-gate "ecache_flush_address(): failed, ecache_size=%x", 5070Sstevel@tonic-gate ecache_size); 5080Sstevel@tonic-gate /*NOTREACHED*/ 5090Sstevel@tonic-gate } 5100Sstevel@tonic-gate ecache_flushaddr = new_addr; 5110Sstevel@tonic-gate membar_producer(); 5120Sstevel@tonic-gate } 513