10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5*12640SDave.Plauger@Sun.COM * Common Development and Distribution License (the "License").
6*12640SDave.Plauger@Sun.COM * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
22*12640SDave.Plauger@Sun.COM * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
230Sstevel@tonic-gate */
240Sstevel@tonic-gate
250Sstevel@tonic-gate #include <sys/systm.h>
260Sstevel@tonic-gate #include <sys/membar.h>
270Sstevel@tonic-gate #include <sys/machsystm.h>
280Sstevel@tonic-gate #include <sys/x_call.h>
290Sstevel@tonic-gate #include <sys/platform_module.h>
300Sstevel@tonic-gate #include <sys/cpuvar.h>
310Sstevel@tonic-gate #include <sys/cpu_module.h>
320Sstevel@tonic-gate #include <sys/cmp.h>
33*12640SDave.Plauger@Sun.COM #include <sys/dumphdr.h>
340Sstevel@tonic-gate
350Sstevel@tonic-gate #include <sys/cpu_sgnblk_defs.h>
360Sstevel@tonic-gate
370Sstevel@tonic-gate static cpuset_t cpu_idle_set;
380Sstevel@tonic-gate static kmutex_t cpu_idle_lock;
390Sstevel@tonic-gate typedef const char *fn_t;
400Sstevel@tonic-gate
410Sstevel@tonic-gate /*
420Sstevel@tonic-gate * flags to determine if the PROM routines
430Sstevel@tonic-gate * should be used to idle/resume/stop cpus
440Sstevel@tonic-gate */
450Sstevel@tonic-gate static int kern_idle[NCPU]; /* kernel's idle loop */
460Sstevel@tonic-gate static int cpu_are_paused;
470Sstevel@tonic-gate extern void debug_flush_windows();
480Sstevel@tonic-gate
490Sstevel@tonic-gate /*
500Sstevel@tonic-gate * Initialize the idlestop mutex
510Sstevel@tonic-gate */
520Sstevel@tonic-gate void
idlestop_init(void)530Sstevel@tonic-gate idlestop_init(void)
540Sstevel@tonic-gate {
550Sstevel@tonic-gate mutex_init(&cpu_idle_lock, NULL, MUTEX_SPIN, (void *)ipltospl(PIL_15));
560Sstevel@tonic-gate }
570Sstevel@tonic-gate
580Sstevel@tonic-gate static void
cpu_idle_self(void)590Sstevel@tonic-gate cpu_idle_self(void)
600Sstevel@tonic-gate {
610Sstevel@tonic-gate uint_t s;
620Sstevel@tonic-gate label_t save;
630Sstevel@tonic-gate
640Sstevel@tonic-gate s = spl8();
650Sstevel@tonic-gate debug_flush_windows();
660Sstevel@tonic-gate
670Sstevel@tonic-gate CPU->cpu_m.in_prom = 1;
680Sstevel@tonic-gate membar_stld();
690Sstevel@tonic-gate
700Sstevel@tonic-gate save = curthread->t_pcb;
710Sstevel@tonic-gate (void) setjmp(&curthread->t_pcb);
720Sstevel@tonic-gate
730Sstevel@tonic-gate kern_idle[CPU->cpu_id] = 1;
740Sstevel@tonic-gate while (kern_idle[CPU->cpu_id])
75*12640SDave.Plauger@Sun.COM dumpsys_helper_nw();
760Sstevel@tonic-gate
770Sstevel@tonic-gate CPU->cpu_m.in_prom = 0;
780Sstevel@tonic-gate membar_stld();
790Sstevel@tonic-gate
800Sstevel@tonic-gate curthread->t_pcb = save;
810Sstevel@tonic-gate splx(s);
820Sstevel@tonic-gate }
830Sstevel@tonic-gate
840Sstevel@tonic-gate void
idle_other_cpus(void)850Sstevel@tonic-gate idle_other_cpus(void)
860Sstevel@tonic-gate {
870Sstevel@tonic-gate int i, cpuid, ntries;
880Sstevel@tonic-gate int failed = 0;
890Sstevel@tonic-gate
900Sstevel@tonic-gate if (ncpus == 1)
910Sstevel@tonic-gate return;
920Sstevel@tonic-gate
930Sstevel@tonic-gate mutex_enter(&cpu_idle_lock);
940Sstevel@tonic-gate
950Sstevel@tonic-gate cpuid = CPU->cpu_id;
960Sstevel@tonic-gate ASSERT(cpuid < NCPU);
970Sstevel@tonic-gate
980Sstevel@tonic-gate cpu_idle_set = cpu_ready_set;
990Sstevel@tonic-gate CPUSET_DEL(cpu_idle_set, cpuid);
1000Sstevel@tonic-gate
1010Sstevel@tonic-gate if (CPUSET_ISNULL(cpu_idle_set))
1020Sstevel@tonic-gate return;
1030Sstevel@tonic-gate
1040Sstevel@tonic-gate xt_some(cpu_idle_set, (xcfunc_t *)idle_stop_xcall,
1050Sstevel@tonic-gate (uint64_t)cpu_idle_self, NULL);
1060Sstevel@tonic-gate
1070Sstevel@tonic-gate for (i = 0; i < NCPU; i++) {
1080Sstevel@tonic-gate if (!CPU_IN_SET(cpu_idle_set, i))
1090Sstevel@tonic-gate continue;
1100Sstevel@tonic-gate
1110Sstevel@tonic-gate ntries = 0x10000;
1120Sstevel@tonic-gate while (!cpu[i]->cpu_m.in_prom && ntries) {
1130Sstevel@tonic-gate DELAY(50);
1140Sstevel@tonic-gate ntries--;
1150Sstevel@tonic-gate }
1160Sstevel@tonic-gate
1170Sstevel@tonic-gate /*
1180Sstevel@tonic-gate * A cpu failing to idle is an error condition, since
1190Sstevel@tonic-gate * we can't be sure anymore of its state.
1200Sstevel@tonic-gate */
1210Sstevel@tonic-gate if (!cpu[i]->cpu_m.in_prom) {
1220Sstevel@tonic-gate cmn_err(CE_WARN, "cpuid 0x%x failed to idle", i);
1230Sstevel@tonic-gate failed++;
1240Sstevel@tonic-gate }
1250Sstevel@tonic-gate }
1260Sstevel@tonic-gate
1270Sstevel@tonic-gate if (failed) {
1280Sstevel@tonic-gate mutex_exit(&cpu_idle_lock);
1290Sstevel@tonic-gate cmn_err(CE_PANIC, "idle_other_cpus: not all cpus idled");
1300Sstevel@tonic-gate }
1310Sstevel@tonic-gate }
1320Sstevel@tonic-gate
1330Sstevel@tonic-gate void
resume_other_cpus(void)1340Sstevel@tonic-gate resume_other_cpus(void)
1350Sstevel@tonic-gate {
1360Sstevel@tonic-gate int i, ntries;
1370Sstevel@tonic-gate int cpuid = CPU->cpu_id;
1380Sstevel@tonic-gate boolean_t failed = B_FALSE;
1390Sstevel@tonic-gate
1400Sstevel@tonic-gate if (ncpus == 1)
1410Sstevel@tonic-gate return;
1420Sstevel@tonic-gate
1430Sstevel@tonic-gate ASSERT(cpuid < NCPU);
1440Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_idle_lock));
1450Sstevel@tonic-gate
1460Sstevel@tonic-gate for (i = 0; i < NCPU; i++) {
1470Sstevel@tonic-gate if (!CPU_IN_SET(cpu_idle_set, i))
1480Sstevel@tonic-gate continue;
1490Sstevel@tonic-gate
1500Sstevel@tonic-gate kern_idle[i] = 0;
1510Sstevel@tonic-gate membar_stld();
1520Sstevel@tonic-gate }
1530Sstevel@tonic-gate
1540Sstevel@tonic-gate for (i = 0; i < NCPU; i++) {
1550Sstevel@tonic-gate if (!CPU_IN_SET(cpu_idle_set, i))
1560Sstevel@tonic-gate continue;
1570Sstevel@tonic-gate
1580Sstevel@tonic-gate ntries = 0x10000;
1590Sstevel@tonic-gate while (cpu[i]->cpu_m.in_prom && ntries) {
1600Sstevel@tonic-gate DELAY(50);
1610Sstevel@tonic-gate ntries--;
1620Sstevel@tonic-gate }
1630Sstevel@tonic-gate
1640Sstevel@tonic-gate /*
1650Sstevel@tonic-gate * A cpu failing to resume is an error condition, since
1660Sstevel@tonic-gate * intrs may have been directed there.
1670Sstevel@tonic-gate */
1680Sstevel@tonic-gate if (cpu[i]->cpu_m.in_prom) {
1690Sstevel@tonic-gate cmn_err(CE_WARN, "cpuid 0x%x failed to resume", i);
1700Sstevel@tonic-gate continue;
1710Sstevel@tonic-gate }
1720Sstevel@tonic-gate CPUSET_DEL(cpu_idle_set, i);
1730Sstevel@tonic-gate }
1740Sstevel@tonic-gate
1750Sstevel@tonic-gate failed = !CPUSET_ISNULL(cpu_idle_set);
1760Sstevel@tonic-gate
1770Sstevel@tonic-gate mutex_exit(&cpu_idle_lock);
1780Sstevel@tonic-gate
1790Sstevel@tonic-gate /*
1800Sstevel@tonic-gate * Non-zero if a cpu failed to resume
1810Sstevel@tonic-gate */
1820Sstevel@tonic-gate if (failed)
1830Sstevel@tonic-gate cmn_err(CE_PANIC, "resume_other_cpus: not all cpus resumed");
1840Sstevel@tonic-gate
1850Sstevel@tonic-gate }
1860Sstevel@tonic-gate
1870Sstevel@tonic-gate /*
1880Sstevel@tonic-gate * Stop all other cpu's before halting or rebooting. We pause the cpu's
1890Sstevel@tonic-gate * instead of sending a cross call.
1900Sstevel@tonic-gate */
1910Sstevel@tonic-gate void
stop_other_cpus(void)1920Sstevel@tonic-gate stop_other_cpus(void)
1930Sstevel@tonic-gate {
1940Sstevel@tonic-gate mutex_enter(&cpu_lock);
1950Sstevel@tonic-gate if (cpu_are_paused) {
1960Sstevel@tonic-gate mutex_exit(&cpu_lock);
1970Sstevel@tonic-gate return;
1980Sstevel@tonic-gate }
1990Sstevel@tonic-gate
2000Sstevel@tonic-gate if (ncpus > 1)
2010Sstevel@tonic-gate intr_redist_all_cpus_shutdown();
2020Sstevel@tonic-gate
2030Sstevel@tonic-gate pause_cpus(NULL);
2040Sstevel@tonic-gate cpu_are_paused = 1;
2050Sstevel@tonic-gate
2060Sstevel@tonic-gate mutex_exit(&cpu_lock);
2070Sstevel@tonic-gate }
2080Sstevel@tonic-gate
2090Sstevel@tonic-gate int cpu_quiesce_microsecond_sanity_limit = 60 * 1000000;
2100Sstevel@tonic-gate
2110Sstevel@tonic-gate void
mp_cpu_quiesce(cpu_t * cp0)2120Sstevel@tonic-gate mp_cpu_quiesce(cpu_t *cp0)
2130Sstevel@tonic-gate {
2140Sstevel@tonic-gate
2150Sstevel@tonic-gate volatile cpu_t *cp = (volatile cpu_t *) cp0;
2160Sstevel@tonic-gate int i, sanity_limit = cpu_quiesce_microsecond_sanity_limit;
2170Sstevel@tonic-gate int cpuid = cp->cpu_id;
2180Sstevel@tonic-gate int found_intr = 1;
2190Sstevel@tonic-gate static fn_t f = "mp_cpu_quiesce";
2200Sstevel@tonic-gate
2210Sstevel@tonic-gate ASSERT(CPU->cpu_id != cpuid);
2220Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
2230Sstevel@tonic-gate ASSERT(cp->cpu_flags & CPU_QUIESCED);
2240Sstevel@tonic-gate
2250Sstevel@tonic-gate
2260Sstevel@tonic-gate /*
2270Sstevel@tonic-gate * Declare CPU as no longer being READY to process interrupts and
2280Sstevel@tonic-gate * wait for them to stop. A CPU that is not READY can no longer
2290Sstevel@tonic-gate * participate in x-calls or x-traps.
2300Sstevel@tonic-gate */
2310Sstevel@tonic-gate cp->cpu_flags &= ~CPU_READY;
2320Sstevel@tonic-gate CPUSET_DEL(cpu_ready_set, cpuid);
2330Sstevel@tonic-gate membar_sync();
2340Sstevel@tonic-gate
2350Sstevel@tonic-gate for (i = 0; i < sanity_limit; i++) {
2360Sstevel@tonic-gate if (cp->cpu_intr_actv == 0 &&
237834Sandrei (cp->cpu_thread == cp->cpu_idle_thread ||
238834Sandrei cp->cpu_thread == cp->cpu_startup_thread)) {
2390Sstevel@tonic-gate found_intr = 0;
2400Sstevel@tonic-gate break;
2410Sstevel@tonic-gate }
2420Sstevel@tonic-gate DELAY(1);
2430Sstevel@tonic-gate }
2440Sstevel@tonic-gate
2450Sstevel@tonic-gate if (found_intr) {
2460Sstevel@tonic-gate
2470Sstevel@tonic-gate if (cp->cpu_intr_actv) {
2480Sstevel@tonic-gate cmn_err(CE_PANIC, "%s: cpu_intr_actv != 0", f);
249834Sandrei } else if (cp->cpu_thread != cp->cpu_idle_thread &&
250834Sandrei cp->cpu_thread != cp->cpu_startup_thread) {
251834Sandrei cmn_err(CE_PANIC, "%s: CPU %d is not quiesced",
252834Sandrei f, cpuid);
2530Sstevel@tonic-gate }
2540Sstevel@tonic-gate
2550Sstevel@tonic-gate }
2560Sstevel@tonic-gate }
2570Sstevel@tonic-gate
2580Sstevel@tonic-gate /*
2590Sstevel@tonic-gate * Start CPU on user request.
2600Sstevel@tonic-gate */
2610Sstevel@tonic-gate /* ARGSUSED */
2620Sstevel@tonic-gate int
mp_cpu_start(struct cpu * cp)2630Sstevel@tonic-gate mp_cpu_start(struct cpu *cp)
2640Sstevel@tonic-gate {
2650Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
2660Sstevel@tonic-gate /*
2670Sstevel@tonic-gate * Platforms that use CPU signatures require the signature
2680Sstevel@tonic-gate * block update to indicate that this CPU is in the OS now.
2690Sstevel@tonic-gate */
2700Sstevel@tonic-gate CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, cp->cpu_id);
2710Sstevel@tonic-gate
2720Sstevel@tonic-gate cmp_error_resteer(cp->cpu_id);
2730Sstevel@tonic-gate
2740Sstevel@tonic-gate return (0); /* nothing special to do on this arch */
2750Sstevel@tonic-gate }
2760Sstevel@tonic-gate
2770Sstevel@tonic-gate /*
2780Sstevel@tonic-gate * Stop CPU on user request.
2790Sstevel@tonic-gate */
2800Sstevel@tonic-gate /* ARGSUSED */
2810Sstevel@tonic-gate int
mp_cpu_stop(struct cpu * cp)2820Sstevel@tonic-gate mp_cpu_stop(struct cpu *cp)
2830Sstevel@tonic-gate {
2840Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
2850Sstevel@tonic-gate
2860Sstevel@tonic-gate cmp_error_resteer(cp->cpu_id);
2870Sstevel@tonic-gate
2880Sstevel@tonic-gate /*
2890Sstevel@tonic-gate * Platforms that use CPU signatures require the signature
2900Sstevel@tonic-gate * block update to indicate that this CPU is offlined now.
2910Sstevel@tonic-gate */
2920Sstevel@tonic-gate CPU_SIGNATURE(OS_SIG, SIGST_OFFLINE, SIGSUBST_NULL, cp->cpu_id);
2930Sstevel@tonic-gate return (0); /* nothing special to do on this arch */
2940Sstevel@tonic-gate }
2950Sstevel@tonic-gate
2960Sstevel@tonic-gate /*
2970Sstevel@tonic-gate * Power on CPU.
2980Sstevel@tonic-gate */
2990Sstevel@tonic-gate int
mp_cpu_poweron(struct cpu * cp)3000Sstevel@tonic-gate mp_cpu_poweron(struct cpu *cp)
3010Sstevel@tonic-gate {
3020Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
3030Sstevel@tonic-gate if (&plat_cpu_poweron)
3040Sstevel@tonic-gate return (plat_cpu_poweron(cp)); /* platform-dependent hook */
3050Sstevel@tonic-gate
3060Sstevel@tonic-gate return (ENOTSUP);
3070Sstevel@tonic-gate }
3080Sstevel@tonic-gate
3090Sstevel@tonic-gate /*
3100Sstevel@tonic-gate * Power off CPU.
3110Sstevel@tonic-gate */
3120Sstevel@tonic-gate int
mp_cpu_poweroff(struct cpu * cp)3130Sstevel@tonic-gate mp_cpu_poweroff(struct cpu *cp)
3140Sstevel@tonic-gate {
3150Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
3160Sstevel@tonic-gate if (&plat_cpu_poweroff)
3170Sstevel@tonic-gate return (plat_cpu_poweroff(cp)); /* platform-dependent hook */
3180Sstevel@tonic-gate
3190Sstevel@tonic-gate return (ENOTSUP);
3200Sstevel@tonic-gate }
3210Sstevel@tonic-gate
3220Sstevel@tonic-gate void
mp_cpu_faulted_enter(struct cpu * cp)3230Sstevel@tonic-gate mp_cpu_faulted_enter(struct cpu *cp)
3240Sstevel@tonic-gate {
3250Sstevel@tonic-gate cpu_faulted_enter(cp);
3260Sstevel@tonic-gate }
3270Sstevel@tonic-gate
3280Sstevel@tonic-gate void
mp_cpu_faulted_exit(struct cpu * cp)3290Sstevel@tonic-gate mp_cpu_faulted_exit(struct cpu *cp)
3300Sstevel@tonic-gate {
3310Sstevel@tonic-gate cpu_faulted_exit(cp);
3320Sstevel@tonic-gate }
333