xref: /onnv-gate/usr/src/uts/sun4u/os/mach_startup.c (revision 8931:56a00eaca3d2)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51592Sgirish  * Common Development and Distribution License (the "License").
61592Sgirish  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
228906SEric.Saxe@Sun.COM  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #include <sys/machsystm.h>
270Sstevel@tonic-gate #include <sys/archsystm.h>
280Sstevel@tonic-gate #include <sys/vm.h>
290Sstevel@tonic-gate #include <sys/cpu.h>
301772Sjl139090 #include <sys/cpupart.h>
318408SEric.Saxe@Sun.COM #include <sys/cmt.h>
328408SEric.Saxe@Sun.COM #include <sys/bitset.h>
330Sstevel@tonic-gate #include <sys/reboot.h>
340Sstevel@tonic-gate #include <sys/kdi.h>
350Sstevel@tonic-gate #include <sys/bootconf.h>
360Sstevel@tonic-gate #include <sys/memlist_plat.h>
370Sstevel@tonic-gate #include <sys/memlist_impl.h>
380Sstevel@tonic-gate #include <sys/prom_plat.h>
390Sstevel@tonic-gate #include <sys/prom_isa.h>
400Sstevel@tonic-gate #include <sys/autoconf.h>
410Sstevel@tonic-gate #include <sys/intreg.h>
420Sstevel@tonic-gate #include <sys/ivintr.h>
430Sstevel@tonic-gate #include <sys/fpu/fpusystm.h>
440Sstevel@tonic-gate #include <sys/iommutsb.h>
450Sstevel@tonic-gate #include <vm/vm_dep.h>
460Sstevel@tonic-gate #include <vm/seg_kmem.h>
470Sstevel@tonic-gate #include <vm/seg_kpm.h>
480Sstevel@tonic-gate #include <vm/seg_map.h>
490Sstevel@tonic-gate #include <vm/seg_kp.h>
500Sstevel@tonic-gate #include <sys/sysconf.h>
510Sstevel@tonic-gate #include <vm/hat_sfmmu.h>
520Sstevel@tonic-gate #include <sys/kobj.h>
530Sstevel@tonic-gate #include <sys/sun4asi.h>
540Sstevel@tonic-gate #include <sys/clconf.h>
550Sstevel@tonic-gate #include <sys/platform_module.h>
560Sstevel@tonic-gate #include <sys/panic.h>
570Sstevel@tonic-gate #include <sys/cpu_sgnblk_defs.h>
580Sstevel@tonic-gate #include <sys/clock.h>
590Sstevel@tonic-gate #include <sys/fpras_impl.h>
600Sstevel@tonic-gate #include <sys/prom_debug.h>
610Sstevel@tonic-gate #include <sys/traptrace.h>
620Sstevel@tonic-gate #include <sys/memnode.h>
630Sstevel@tonic-gate #include <sys/mem_cage.h>
640Sstevel@tonic-gate 
650Sstevel@tonic-gate /*
660Sstevel@tonic-gate  * fpRAS implementation structures.
670Sstevel@tonic-gate  */
680Sstevel@tonic-gate struct fpras_chkfn *fpras_chkfnaddrs[FPRAS_NCOPYOPS];
690Sstevel@tonic-gate struct fpras_chkfngrp *fpras_chkfngrps;
700Sstevel@tonic-gate struct fpras_chkfngrp *fpras_chkfngrps_base;
710Sstevel@tonic-gate int fpras_frequency = -1;
720Sstevel@tonic-gate int64_t fpras_interval = -1;
730Sstevel@tonic-gate 
741772Sjl139090 /*
751772Sjl139090  * Halt idling cpus optimization
761772Sjl139090  *
771772Sjl139090  * This optimation is only enabled in platforms that have
781772Sjl139090  * the CPU halt support. The cpu_halt_cpu() support is provided
791772Sjl139090  * in the cpu module and it is referenced here with a pragma weak.
801772Sjl139090  * The presence of this routine automatically enable the halt idling
811772Sjl139090  * cpus functionality if the global switch enable_halt_idle_cpus
821772Sjl139090  * is set (default is set).
831772Sjl139090  *
841772Sjl139090  */
851772Sjl139090 #pragma weak	cpu_halt_cpu
861772Sjl139090 extern void	cpu_halt_cpu();
871772Sjl139090 
885864Sesaxe /*
895864Sesaxe  * Defines for the idle_state_transition DTrace probe
905864Sesaxe  *
915864Sesaxe  * The probe fires when the CPU undergoes an idle state change (e.g. halting)
925864Sesaxe  * The agument passed is the state to which the CPU is transitioning.
935864Sesaxe  *
945864Sesaxe  * The states are defined here.
955864Sesaxe  */
965864Sesaxe #define	IDLE_STATE_NORMAL 0
975864Sesaxe #define	IDLE_STATE_HALTED 1
985864Sesaxe 
991772Sjl139090 int		enable_halt_idle_cpus = 1; /* global switch */
1001772Sjl139090 
1010Sstevel@tonic-gate void
1020Sstevel@tonic-gate setup_trap_table(void)
1030Sstevel@tonic-gate {
1040Sstevel@tonic-gate 	intr_init(CPU);			/* init interrupt request free list */
1050Sstevel@tonic-gate 	setwstate(WSTATE_KERN);
1060Sstevel@tonic-gate 	prom_set_traptable(&trap_table);
1070Sstevel@tonic-gate }
1080Sstevel@tonic-gate 
1090Sstevel@tonic-gate void
1100Sstevel@tonic-gate mach_fpras()
1110Sstevel@tonic-gate {
1120Sstevel@tonic-gate 	if (fpras_implemented && !fpras_disable) {
1130Sstevel@tonic-gate 		int i;
1140Sstevel@tonic-gate 		struct fpras_chkfngrp *fcgp;
1150Sstevel@tonic-gate 		size_t chkfngrpsallocsz;
1160Sstevel@tonic-gate 
1170Sstevel@tonic-gate 		/*
1180Sstevel@tonic-gate 		 * Note that we size off of NCPU and setup for
1190Sstevel@tonic-gate 		 * all those possibilities regardless of whether
1200Sstevel@tonic-gate 		 * the cpu id is present or not.  We do this so that
1210Sstevel@tonic-gate 		 * we don't have any construction or destruction
1220Sstevel@tonic-gate 		 * activity to perform at DR time, and it's not
1230Sstevel@tonic-gate 		 * costly in memory.  We require block alignment.
1240Sstevel@tonic-gate 		 */
1250Sstevel@tonic-gate 		chkfngrpsallocsz = NCPU * sizeof (struct fpras_chkfngrp);
1260Sstevel@tonic-gate 		fpras_chkfngrps_base = kmem_alloc(chkfngrpsallocsz, KM_SLEEP);
1270Sstevel@tonic-gate 		if (IS_P2ALIGNED((uintptr_t)fpras_chkfngrps_base, 64)) {
1280Sstevel@tonic-gate 			fpras_chkfngrps = fpras_chkfngrps_base;
1290Sstevel@tonic-gate 		} else {
1300Sstevel@tonic-gate 			kmem_free(fpras_chkfngrps_base, chkfngrpsallocsz);
1310Sstevel@tonic-gate 			chkfngrpsallocsz += 64;
1320Sstevel@tonic-gate 			fpras_chkfngrps_base = kmem_alloc(chkfngrpsallocsz,
1330Sstevel@tonic-gate 			    KM_SLEEP);
1340Sstevel@tonic-gate 			fpras_chkfngrps = (struct fpras_chkfngrp *)
1350Sstevel@tonic-gate 			    P2ROUNDUP((uintptr_t)fpras_chkfngrps_base, 64);
1360Sstevel@tonic-gate 		}
1370Sstevel@tonic-gate 
1380Sstevel@tonic-gate 		/*
1390Sstevel@tonic-gate 		 * Copy our check function into place for each copy operation
1400Sstevel@tonic-gate 		 * and each cpu id.
1410Sstevel@tonic-gate 		 */
1420Sstevel@tonic-gate 		fcgp = &fpras_chkfngrps[0];
1430Sstevel@tonic-gate 		for (i = 0; i < FPRAS_NCOPYOPS; ++i)
1440Sstevel@tonic-gate 			bcopy((void *)fpras_chkfn_type1, &fcgp->fpras_fn[i],
1450Sstevel@tonic-gate 			    sizeof (struct fpras_chkfn));
1460Sstevel@tonic-gate 		for (i = 1; i < NCPU; ++i)
1470Sstevel@tonic-gate 			*(&fpras_chkfngrps[i]) = *fcgp;
1480Sstevel@tonic-gate 
1490Sstevel@tonic-gate 		/*
1500Sstevel@tonic-gate 		 * At definition fpras_frequency is set to -1, and it will
1510Sstevel@tonic-gate 		 * still have that value unless changed in /etc/system (not
1520Sstevel@tonic-gate 		 * strictly supported, but not preventable).  The following
1530Sstevel@tonic-gate 		 * both sets the default and sanity checks anything from
1540Sstevel@tonic-gate 		 * /etc/system.
1550Sstevel@tonic-gate 		 */
1560Sstevel@tonic-gate 		if (fpras_frequency < 0)
1570Sstevel@tonic-gate 			fpras_frequency = FPRAS_DEFAULT_FREQUENCY;
1580Sstevel@tonic-gate 
1590Sstevel@tonic-gate 		/*
1600Sstevel@tonic-gate 		 * Now calculate fpras_interval.  When fpras_interval
1610Sstevel@tonic-gate 		 * becomes non-negative fpras checks will commence
1620Sstevel@tonic-gate 		 * (copies before this point in boot will bypass fpras).
1630Sstevel@tonic-gate 		 * Our stores of instructions must be visible; no need
1640Sstevel@tonic-gate 		 * to flush as they're never been executed before.
1650Sstevel@tonic-gate 		 */
1660Sstevel@tonic-gate 		membar_producer();
1670Sstevel@tonic-gate 		fpras_interval = (fpras_frequency == 0) ?
1680Sstevel@tonic-gate 		    0 : sys_tick_freq / fpras_frequency;
1690Sstevel@tonic-gate 	}
1700Sstevel@tonic-gate }
1710Sstevel@tonic-gate 
1720Sstevel@tonic-gate void
1730Sstevel@tonic-gate mach_hw_copy_limit(void)
1740Sstevel@tonic-gate {
1750Sstevel@tonic-gate 	if (!fpu_exists) {
1760Sstevel@tonic-gate 		use_hw_bcopy = 0;
1770Sstevel@tonic-gate 		hw_copy_limit_1 = 0;
1780Sstevel@tonic-gate 		hw_copy_limit_2 = 0;
1790Sstevel@tonic-gate 		hw_copy_limit_4 = 0;
1800Sstevel@tonic-gate 		hw_copy_limit_8 = 0;
1810Sstevel@tonic-gate 		use_hw_bzero = 0;
1820Sstevel@tonic-gate 	}
1830Sstevel@tonic-gate }
1840Sstevel@tonic-gate 
1850Sstevel@tonic-gate void
1860Sstevel@tonic-gate load_tod_module()
1870Sstevel@tonic-gate {
1880Sstevel@tonic-gate 	/*
1890Sstevel@tonic-gate 	 * Load tod driver module for the tod part found on this system.
1900Sstevel@tonic-gate 	 * Recompute the cpu frequency/delays based on tod as tod part
1910Sstevel@tonic-gate 	 * tends to keep time more accurately.
1920Sstevel@tonic-gate 	 */
1930Sstevel@tonic-gate 	if (tod_module_name == NULL || modload("tod", tod_module_name) == -1)
1940Sstevel@tonic-gate 		halt("Can't load tod module");
1950Sstevel@tonic-gate }
1960Sstevel@tonic-gate 
1970Sstevel@tonic-gate void
1980Sstevel@tonic-gate mach_memscrub(void)
1990Sstevel@tonic-gate {
2000Sstevel@tonic-gate 	/*
2010Sstevel@tonic-gate 	 * Startup memory scrubber, if not running fpu emulation code.
2020Sstevel@tonic-gate 	 */
2030Sstevel@tonic-gate 
2041772Sjl139090 #ifndef _HW_MEMSCRUB_SUPPORT
2050Sstevel@tonic-gate 	if (fpu_exists) {
2060Sstevel@tonic-gate 		if (memscrub_init()) {
2070Sstevel@tonic-gate 			cmn_err(CE_WARN,
2080Sstevel@tonic-gate 			    "Memory scrubber failed to initialize");
2090Sstevel@tonic-gate 		}
2100Sstevel@tonic-gate 	}
2111772Sjl139090 #endif /* _HW_MEMSCRUB_SUPPORT */
2121772Sjl139090 }
2131772Sjl139090 
2141772Sjl139090 /*
215*8931SDave.Plauger@Sun.COM  * Halt the present CPU until awoken via an interrupt.
2161772Sjl139090  * This routine should only be invoked if cpu_halt_cpu()
2171772Sjl139090  * exists and is supported, see mach_cpu_halt_idle()
2181772Sjl139090  */
219*8931SDave.Plauger@Sun.COM void
2201772Sjl139090 cpu_halt(void)
2211772Sjl139090 {
222*8931SDave.Plauger@Sun.COM 	cpu_t *cpup = CPU;
223*8931SDave.Plauger@Sun.COM 	processorid_t cpu_sid = cpup->cpu_seqid;
224*8931SDave.Plauger@Sun.COM 	cpupart_t *cp = cpup->cpu_part;
225*8931SDave.Plauger@Sun.COM 	int hset_update = 1;
226*8931SDave.Plauger@Sun.COM 	volatile int *p = &cpup->cpu_disp->disp_nrunnable;
227*8931SDave.Plauger@Sun.COM 	uint_t s;
2281772Sjl139090 
2291772Sjl139090 	/*
230*8931SDave.Plauger@Sun.COM 	 * If this CPU is online then we should notate our halting
2311772Sjl139090 	 * by adding ourselves to the partition's halted CPU
2328408SEric.Saxe@Sun.COM 	 * bitset. This allows other CPUs to find/awaken us when
2331772Sjl139090 	 * work becomes available.
2341772Sjl139090 	 */
235*8931SDave.Plauger@Sun.COM 	if (CPU->cpu_flags & CPU_OFFLINE)
2361772Sjl139090 		hset_update = 0;
2371772Sjl139090 
2381772Sjl139090 	/*
239*8931SDave.Plauger@Sun.COM 	 * Add ourselves to the partition's halted CPUs bitset
2401772Sjl139090 	 * and set our HALTED flag, if necessary.
2411772Sjl139090 	 *
2421772Sjl139090 	 * When a thread becomes runnable, it is placed on the queue
2438408SEric.Saxe@Sun.COM 	 * and then the halted cpu bitset is checked to determine who
2441772Sjl139090 	 * (if anyone) should be awoken. We therefore need to first
245*8931SDave.Plauger@Sun.COM 	 * add ourselves to the halted bitset, and then check if there
246*8931SDave.Plauger@Sun.COM 	 * is any work available.  The order is important to prevent a race
2478408SEric.Saxe@Sun.COM 	 * that can lead to work languishing on a run queue somewhere while
2488408SEric.Saxe@Sun.COM 	 * this CPU remains halted.
2498408SEric.Saxe@Sun.COM 	 *
2508408SEric.Saxe@Sun.COM 	 * Either the producing CPU will see we're halted and will awaken us,
2518408SEric.Saxe@Sun.COM 	 * or this CPU will see the work available in disp_anywork()
2521772Sjl139090 	 */
2531772Sjl139090 	if (hset_update) {
2541772Sjl139090 		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
2551772Sjl139090 		membar_producer();
2568408SEric.Saxe@Sun.COM 		bitset_atomic_add(&cp->cp_haltset, cpu_sid);
2571772Sjl139090 	}
2581772Sjl139090 
2591772Sjl139090 	/*
2601772Sjl139090 	 * Check to make sure there's really nothing to do.
2611772Sjl139090 	 * Work destined for this CPU may become available after
2621772Sjl139090 	 * this check. We'll be notified through the clearing of our
2638408SEric.Saxe@Sun.COM 	 * bit in the halted CPU bitset, and a poke.
2641772Sjl139090 	 */
2651772Sjl139090 	if (disp_anywork()) {
2661772Sjl139090 		if (hset_update) {
2671772Sjl139090 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
2688408SEric.Saxe@Sun.COM 			bitset_atomic_del(&cp->cp_haltset, cpu_sid);
2691772Sjl139090 		}
2701772Sjl139090 		return;
2711772Sjl139090 	}
2721772Sjl139090 
2731772Sjl139090 	/*
274*8931SDave.Plauger@Sun.COM 	 * We're on our way to being halted.  Wait until something becomes
275*8931SDave.Plauger@Sun.COM 	 * runnable locally or we are awaken (i.e. removed from the halt set).
276*8931SDave.Plauger@Sun.COM 	 * Note that the call to hv_cpu_yield() can return even if we have
277*8931SDave.Plauger@Sun.COM 	 * nothing to do.
2781772Sjl139090 	 *
2791772Sjl139090 	 * Disable interrupts now, so that we'll awaken immediately
2801772Sjl139090 	 * after halting if someone tries to poke us between now and
2811772Sjl139090 	 * the time we actually halt.
2821772Sjl139090 	 *
2831772Sjl139090 	 * We check for the presence of our bit after disabling interrupts.
2841772Sjl139090 	 * If it's cleared, we'll return. If the bit is cleared after
2851772Sjl139090 	 * we check then the poke will pop us out of the halted state.
286*8931SDave.Plauger@Sun.COM 	 * Also, if the offlined CPU has been brought back on-line, then
287*8931SDave.Plauger@Sun.COM 	 * we return as well.
2881772Sjl139090 	 *
2891772Sjl139090 	 * The ordering of the poke and the clearing of the bit by cpu_wakeup
2901772Sjl139090 	 * is important.
2911772Sjl139090 	 * cpu_wakeup() must clear, then poke.
2921772Sjl139090 	 * cpu_halt() must disable interrupts, then check for the bit.
293*8931SDave.Plauger@Sun.COM 	 *
2941772Sjl139090 	 * The check for anything locally runnable is here for performance
2951772Sjl139090 	 * and isn't needed for correctness. disp_nrunnable ought to be
2961772Sjl139090 	 * in our cache still, so it's inexpensive to check, and if there
2971772Sjl139090 	 * is anything runnable we won't have to wait for the poke.
298*8931SDave.Plauger@Sun.COM 	 *
299*8931SDave.Plauger@Sun.COM 	 * Any interrupt will awaken the cpu from halt. Looping here
300*8931SDave.Plauger@Sun.COM 	 * will filter spurious interrupts that wake us up, but don't
301*8931SDave.Plauger@Sun.COM 	 * represent a need for us to head back out to idle().  This
302*8931SDave.Plauger@Sun.COM 	 * will enable the idle loop to be more efficient and sleep in
303*8931SDave.Plauger@Sun.COM 	 * the processor pipeline for a larger percent of the time,
304*8931SDave.Plauger@Sun.COM 	 * which returns useful cycles to the peer hardware strand
305*8931SDave.Plauger@Sun.COM 	 * that shares the pipeline.
3061772Sjl139090 	 */
307*8931SDave.Plauger@Sun.COM 	s = disable_vec_intr();
308*8931SDave.Plauger@Sun.COM 	while (*p == 0 &&
309*8931SDave.Plauger@Sun.COM 	    ((hset_update && bitset_in_set(&cp->cp_haltset, cpu_sid)) ||
310*8931SDave.Plauger@Sun.COM 	    (!hset_update && (CPU->cpu_flags & CPU_OFFLINE)))) {
3111772Sjl139090 
3125864Sesaxe 		DTRACE_PROBE1(idle__state__transition,
3135864Sesaxe 		    uint_t, IDLE_STATE_HALTED);
314*8931SDave.Plauger@Sun.COM 		(void) cpu_halt_cpu();
3155864Sesaxe 		DTRACE_PROBE1(idle__state__transition,
3165864Sesaxe 		    uint_t, IDLE_STATE_NORMAL);
317*8931SDave.Plauger@Sun.COM 
318*8931SDave.Plauger@Sun.COM 		enable_vec_intr(s);
319*8931SDave.Plauger@Sun.COM 		s = disable_vec_intr();
3205864Sesaxe 	}
3215864Sesaxe 
3221772Sjl139090 	/*
3231772Sjl139090 	 * We're no longer halted
3241772Sjl139090 	 */
325*8931SDave.Plauger@Sun.COM 	enable_vec_intr(s);
3261772Sjl139090 	if (hset_update) {
3271772Sjl139090 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
3288408SEric.Saxe@Sun.COM 		bitset_atomic_del(&cp->cp_haltset, cpu_sid);
3291772Sjl139090 	}
3301772Sjl139090 }
3311772Sjl139090 
3321772Sjl139090 /*
3331772Sjl139090  * If "cpu" is halted, then wake it up clearing its halted bit in advance.
3341772Sjl139090  * Otherwise, see if other CPUs in the cpu partition are halted and need to
3351772Sjl139090  * be woken up so that they can steal the thread we placed on this CPU.
3361772Sjl139090  * This function is only used on MP systems.
3371772Sjl139090  * This function should only be invoked if cpu_halt_cpu()
3381772Sjl139090  * exists and is supported, see mach_cpu_halt_idle()
3391772Sjl139090  */
3401772Sjl139090 static void
3411772Sjl139090 cpu_wakeup(cpu_t *cpu, int bound)
3421772Sjl139090 {
3431772Sjl139090 	uint_t		cpu_found;
3448408SEric.Saxe@Sun.COM 	processorid_t	cpu_sid;
3451772Sjl139090 	cpupart_t	*cp;
3461772Sjl139090 
3471772Sjl139090 	cp = cpu->cpu_part;
3488408SEric.Saxe@Sun.COM 	cpu_sid = cpu->cpu_seqid;
3498408SEric.Saxe@Sun.COM 	if (bitset_in_set(&cp->cp_haltset, cpu_sid)) {
3501772Sjl139090 		/*
3511772Sjl139090 		 * Clear the halted bit for that CPU since it will be
3521772Sjl139090 		 * poked in a moment.
3531772Sjl139090 		 */
3548408SEric.Saxe@Sun.COM 		bitset_atomic_del(&cp->cp_haltset, cpu_sid);
3551772Sjl139090 		/*
3568408SEric.Saxe@Sun.COM 		 * We may find the current CPU present in the halted cpu bitset
3571772Sjl139090 		 * if we're in the context of an interrupt that occurred
3581772Sjl139090 		 * before we had a chance to clear our bit in cpu_halt().
3591772Sjl139090 		 * Poking ourself is obviously unnecessary, since if
3601772Sjl139090 		 * we're here, we're not halted.
3611772Sjl139090 		 */
3621772Sjl139090 		if (cpu != CPU)
3631772Sjl139090 			poke_cpu(cpu->cpu_id);
3641772Sjl139090 		return;
3651772Sjl139090 	} else {
3661772Sjl139090 		/*
3671772Sjl139090 		 * This cpu isn't halted, but it's idle or undergoing a
3681772Sjl139090 		 * context switch. No need to awaken anyone else.
3691772Sjl139090 		 */
3701772Sjl139090 		if (cpu->cpu_thread == cpu->cpu_idle_thread ||
3711772Sjl139090 		    cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL)
3721772Sjl139090 			return;
3731772Sjl139090 	}
3741772Sjl139090 
3751772Sjl139090 	/*
3768408SEric.Saxe@Sun.COM 	 * No need to wake up other CPUs if this is for a bound thread.
3771772Sjl139090 	 */
3781772Sjl139090 	if (bound)
3791772Sjl139090 		return;
3801772Sjl139090 
3811772Sjl139090 	/*
3828408SEric.Saxe@Sun.COM 	 * The CPU specified for wakeup isn't currently halted, so check
3838408SEric.Saxe@Sun.COM 	 * to see if there are any other halted CPUs in the partition,
3848408SEric.Saxe@Sun.COM 	 * and if there are then awaken one.
3858408SEric.Saxe@Sun.COM 	 *
3868408SEric.Saxe@Sun.COM 	 * If possible, try to select a CPU close to the target, since this
3878408SEric.Saxe@Sun.COM 	 * will likely trigger a migration.
3881772Sjl139090 	 */
3891772Sjl139090 	do {
3908408SEric.Saxe@Sun.COM 		cpu_found = bitset_find(&cp->cp_haltset);
3918408SEric.Saxe@Sun.COM 		if (cpu_found == (uint_t)-1)
3921772Sjl139090 			return;
3938408SEric.Saxe@Sun.COM 	} while (bitset_atomic_test_and_del(&cp->cp_haltset, cpu_found) < 0);
3941772Sjl139090 
3958408SEric.Saxe@Sun.COM 	if (cpu_found != CPU->cpu_seqid)
3968408SEric.Saxe@Sun.COM 		poke_cpu(cpu_seq[cpu_found]->cpu_id);
3970Sstevel@tonic-gate }
3980Sstevel@tonic-gate 
3990Sstevel@tonic-gate void
4008906SEric.Saxe@Sun.COM mach_cpu_halt_idle(void)
4010Sstevel@tonic-gate {
4021772Sjl139090 	if (enable_halt_idle_cpus) {
4031772Sjl139090 		if (&cpu_halt_cpu) {
4041772Sjl139090 			idle_cpu = cpu_halt;
4051772Sjl139090 			disp_enq_thread = cpu_wakeup;
4061772Sjl139090 		}
4071772Sjl139090 	}
4080Sstevel@tonic-gate }
4090Sstevel@tonic-gate 
4100Sstevel@tonic-gate /*ARGSUSED*/
4114050Sjb145095 int
4120Sstevel@tonic-gate cpu_intrq_setup(struct cpu *cp)
4130Sstevel@tonic-gate {
4140Sstevel@tonic-gate 	/* Interrupt mondo queues not applicable to sun4u */
4154050Sjb145095 	return (0);
4160Sstevel@tonic-gate }
4170Sstevel@tonic-gate 
4180Sstevel@tonic-gate /*ARGSUSED*/
4190Sstevel@tonic-gate void
4201991Sheppo cpu_intrq_cleanup(struct cpu *cp)
4211991Sheppo {
4221991Sheppo 	/* Interrupt mondo queues not applicable to sun4u */
4231991Sheppo }
4241991Sheppo 
4251991Sheppo /*ARGSUSED*/
4261991Sheppo void
4270Sstevel@tonic-gate cpu_intrq_register(struct cpu *cp)
4280Sstevel@tonic-gate {
4290Sstevel@tonic-gate 	/* Interrupt/error queues not applicable to sun4u */
4300Sstevel@tonic-gate }
4310Sstevel@tonic-gate 
4320Sstevel@tonic-gate /*ARGSUSED*/
4330Sstevel@tonic-gate void
4341077Ssvemuri mach_htraptrace_setup(int cpuid)
4350Sstevel@tonic-gate {
4360Sstevel@tonic-gate 	/* Setup hypervisor traptrace buffer, not applicable to sun4u */
4370Sstevel@tonic-gate }
4380Sstevel@tonic-gate 
4390Sstevel@tonic-gate /*ARGSUSED*/
4400Sstevel@tonic-gate void
4411077Ssvemuri mach_htraptrace_configure(int cpuid)
4421077Ssvemuri {
4431077Ssvemuri 	/* enable/ disable hypervisor traptracing, not applicable to sun4u */
4441077Ssvemuri }
4451077Ssvemuri 
4461077Ssvemuri /*ARGSUSED*/
4471077Ssvemuri void
4481077Ssvemuri mach_htraptrace_cleanup(int cpuid)
4491077Ssvemuri {
4501077Ssvemuri 	/* cleanup hypervisor traptrace buffer, not applicable to sun4u */
4511077Ssvemuri }
4520Sstevel@tonic-gate 
4530Sstevel@tonic-gate void
4541991Sheppo mach_descrip_startup_init(void)
4551991Sheppo {
4561991Sheppo 	/*
4571991Sheppo 	 * Only for sun4v.
4581991Sheppo 	 * Initialize Machine description framework during startup.
4591991Sheppo 	 */
4601991Sheppo }
4611991Sheppo void
4621991Sheppo mach_descrip_startup_fini(void)
4631991Sheppo {
4641991Sheppo 	/*
4651991Sheppo 	 * Only for sun4v.
4661991Sheppo 	 * Clean up Machine Description framework during startup.
4671991Sheppo 	 */
4681991Sheppo }
4691991Sheppo 
4701991Sheppo void
4710Sstevel@tonic-gate mach_descrip_init(void)
4720Sstevel@tonic-gate {
4731991Sheppo 	/*
4741991Sheppo 	 * Only for sun4v.
4751991Sheppo 	 * Initialize Machine description framework.
4761991Sheppo 	 */
4770Sstevel@tonic-gate }
4780Sstevel@tonic-gate 
4791592Sgirish void
4801592Sgirish hsvc_setup(void)
4811592Sgirish {
4821592Sgirish 	/* Setup hypervisor services, not applicable to sun4u */
4831592Sgirish }
4841592Sgirish 
4851991Sheppo void
4861991Sheppo load_mach_drivers(void)
4871991Sheppo {
4881991Sheppo 	/* Currently no machine class (sun4u) specific drivers to load */
4891991Sheppo }
4901991Sheppo 
4910Sstevel@tonic-gate /*
4920Sstevel@tonic-gate  * Return true if the machine we're running on is a Positron.
4930Sstevel@tonic-gate  * (Positron is an unsupported developers platform.)
4940Sstevel@tonic-gate  */
4950Sstevel@tonic-gate int
4960Sstevel@tonic-gate iam_positron(void)
4970Sstevel@tonic-gate {
4980Sstevel@tonic-gate 	char model[32];
4990Sstevel@tonic-gate 	const char proto_model[] = "SUNW,501-2732";
500789Sahrens 	pnode_t root = prom_rootnode();
5010Sstevel@tonic-gate 
5020Sstevel@tonic-gate 	if (prom_getproplen(root, "model") != sizeof (proto_model))
5030Sstevel@tonic-gate 		return (0);
5040Sstevel@tonic-gate 
5050Sstevel@tonic-gate 	(void) prom_getprop(root, "model", model);
5060Sstevel@tonic-gate 	if (strcmp(model, proto_model) == 0)
5070Sstevel@tonic-gate 		return (1);
5080Sstevel@tonic-gate 	return (0);
5090Sstevel@tonic-gate }
5100Sstevel@tonic-gate 
5110Sstevel@tonic-gate /*
5120Sstevel@tonic-gate  * Find a physically contiguous area of twice the largest ecache size
5130Sstevel@tonic-gate  * to be used while doing displacement flush of ecaches.
5140Sstevel@tonic-gate  */
5150Sstevel@tonic-gate uint64_t
5160Sstevel@tonic-gate ecache_flush_address(void)
5170Sstevel@tonic-gate {
5180Sstevel@tonic-gate 	struct memlist *pmem;
5190Sstevel@tonic-gate 	uint64_t flush_size;
5200Sstevel@tonic-gate 	uint64_t ret_val;
5210Sstevel@tonic-gate 
5220Sstevel@tonic-gate 	flush_size = ecache_size * 2;
5230Sstevel@tonic-gate 	for (pmem = phys_install; pmem; pmem = pmem->next) {
5240Sstevel@tonic-gate 		ret_val = P2ROUNDUP(pmem->address, ecache_size);
5250Sstevel@tonic-gate 		if (ret_val + flush_size <= pmem->address + pmem->size)
5260Sstevel@tonic-gate 			return (ret_val);
5270Sstevel@tonic-gate 	}
5280Sstevel@tonic-gate 	return ((uint64_t)-1);
5290Sstevel@tonic-gate }
5300Sstevel@tonic-gate 
5310Sstevel@tonic-gate /*
5320Sstevel@tonic-gate  * Called with the memlist lock held to say that phys_install has
5330Sstevel@tonic-gate  * changed.
5340Sstevel@tonic-gate  */
5350Sstevel@tonic-gate void
5360Sstevel@tonic-gate phys_install_has_changed(void)
5370Sstevel@tonic-gate {
5380Sstevel@tonic-gate 	/*
5390Sstevel@tonic-gate 	 * Get the new address into a temporary just in case panicking
5400Sstevel@tonic-gate 	 * involves use of ecache_flushaddr.
5410Sstevel@tonic-gate 	 */
5420Sstevel@tonic-gate 	uint64_t new_addr;
5430Sstevel@tonic-gate 
5440Sstevel@tonic-gate 	new_addr = ecache_flush_address();
5450Sstevel@tonic-gate 	if (new_addr == (uint64_t)-1) {
5460Sstevel@tonic-gate 		cmn_err(CE_PANIC,
5470Sstevel@tonic-gate 		    "ecache_flush_address(): failed, ecache_size=%x",
5480Sstevel@tonic-gate 		    ecache_size);
5490Sstevel@tonic-gate 		/*NOTREACHED*/
5500Sstevel@tonic-gate 	}
5510Sstevel@tonic-gate 	ecache_flushaddr = new_addr;
5520Sstevel@tonic-gate 	membar_producer();
5530Sstevel@tonic-gate }
554