1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate #include <sys/systm.h> 30*0Sstevel@tonic-gate #include <sys/membar.h> 31*0Sstevel@tonic-gate #include <sys/machsystm.h> 32*0Sstevel@tonic-gate #include <sys/x_call.h> 33*0Sstevel@tonic-gate #include <sys/platform_module.h> 34*0Sstevel@tonic-gate #include <sys/cpuvar.h> 35*0Sstevel@tonic-gate #include <sys/cpu_module.h> 36*0Sstevel@tonic-gate #include <sys/cmp.h> 37*0Sstevel@tonic-gate 38*0Sstevel@tonic-gate #include <sys/cpu_sgnblk_defs.h> 39*0Sstevel@tonic-gate 40*0Sstevel@tonic-gate static cpuset_t cpu_idle_set; 41*0Sstevel@tonic-gate static kmutex_t cpu_idle_lock; 42*0Sstevel@tonic-gate typedef const char *fn_t; 43*0Sstevel@tonic-gate 44*0Sstevel@tonic-gate /* 45*0Sstevel@tonic-gate * flags to determine if the PROM routines 46*0Sstevel@tonic-gate * should be used to idle/resume/stop cpus 47*0Sstevel@tonic-gate */ 48*0Sstevel@tonic-gate static int kern_idle[NCPU]; /* kernel's idle loop */ 49*0Sstevel@tonic-gate static int cpu_are_paused; 50*0Sstevel@tonic-gate extern void debug_flush_windows(); 51*0Sstevel@tonic-gate 52*0Sstevel@tonic-gate /* 53*0Sstevel@tonic-gate * Initialize the idlestop mutex 54*0Sstevel@tonic-gate */ 55*0Sstevel@tonic-gate void 56*0Sstevel@tonic-gate idlestop_init(void) 57*0Sstevel@tonic-gate { 58*0Sstevel@tonic-gate mutex_init(&cpu_idle_lock, NULL, MUTEX_SPIN, (void *)ipltospl(PIL_15)); 59*0Sstevel@tonic-gate } 60*0Sstevel@tonic-gate 61*0Sstevel@tonic-gate static void 62*0Sstevel@tonic-gate cpu_idle_self(void) 63*0Sstevel@tonic-gate { 64*0Sstevel@tonic-gate uint_t s; 65*0Sstevel@tonic-gate label_t save; 66*0Sstevel@tonic-gate 67*0Sstevel@tonic-gate s = spl8(); 68*0Sstevel@tonic-gate debug_flush_windows(); 69*0Sstevel@tonic-gate 70*0Sstevel@tonic-gate CPU->cpu_m.in_prom = 1; 71*0Sstevel@tonic-gate membar_stld(); 72*0Sstevel@tonic-gate 73*0Sstevel@tonic-gate save = curthread->t_pcb; 74*0Sstevel@tonic-gate (void) setjmp(&curthread->t_pcb); 75*0Sstevel@tonic-gate 76*0Sstevel@tonic-gate kern_idle[CPU->cpu_id] = 1; 77*0Sstevel@tonic-gate while (kern_idle[CPU->cpu_id]) 78*0Sstevel@tonic-gate /* SPIN */; 79*0Sstevel@tonic-gate 80*0Sstevel@tonic-gate CPU->cpu_m.in_prom = 0; 81*0Sstevel@tonic-gate membar_stld(); 82*0Sstevel@tonic-gate 83*0Sstevel@tonic-gate curthread->t_pcb = save; 84*0Sstevel@tonic-gate splx(s); 85*0Sstevel@tonic-gate } 86*0Sstevel@tonic-gate 87*0Sstevel@tonic-gate void 88*0Sstevel@tonic-gate idle_other_cpus(void) 89*0Sstevel@tonic-gate { 90*0Sstevel@tonic-gate int i, cpuid, ntries; 91*0Sstevel@tonic-gate int failed = 0; 92*0Sstevel@tonic-gate 93*0Sstevel@tonic-gate if (ncpus == 1) 94*0Sstevel@tonic-gate return; 95*0Sstevel@tonic-gate 96*0Sstevel@tonic-gate mutex_enter(&cpu_idle_lock); 97*0Sstevel@tonic-gate 98*0Sstevel@tonic-gate cpuid = CPU->cpu_id; 99*0Sstevel@tonic-gate ASSERT(cpuid < NCPU); 100*0Sstevel@tonic-gate 101*0Sstevel@tonic-gate cpu_idle_set = cpu_ready_set; 102*0Sstevel@tonic-gate CPUSET_DEL(cpu_idle_set, cpuid); 103*0Sstevel@tonic-gate 104*0Sstevel@tonic-gate if (CPUSET_ISNULL(cpu_idle_set)) 105*0Sstevel@tonic-gate return; 106*0Sstevel@tonic-gate 107*0Sstevel@tonic-gate xt_some(cpu_idle_set, (xcfunc_t *)idle_stop_xcall, 108*0Sstevel@tonic-gate (uint64_t)cpu_idle_self, NULL); 109*0Sstevel@tonic-gate 110*0Sstevel@tonic-gate for (i = 0; i < NCPU; i++) { 111*0Sstevel@tonic-gate if (!CPU_IN_SET(cpu_idle_set, i)) 112*0Sstevel@tonic-gate continue; 113*0Sstevel@tonic-gate 114*0Sstevel@tonic-gate ntries = 0x10000; 115*0Sstevel@tonic-gate while (!cpu[i]->cpu_m.in_prom && ntries) { 116*0Sstevel@tonic-gate DELAY(50); 117*0Sstevel@tonic-gate ntries--; 118*0Sstevel@tonic-gate } 119*0Sstevel@tonic-gate 120*0Sstevel@tonic-gate /* 121*0Sstevel@tonic-gate * A cpu failing to idle is an error condition, since 122*0Sstevel@tonic-gate * we can't be sure anymore of its state. 123*0Sstevel@tonic-gate */ 124*0Sstevel@tonic-gate if (!cpu[i]->cpu_m.in_prom) { 125*0Sstevel@tonic-gate cmn_err(CE_WARN, "cpuid 0x%x failed to idle", i); 126*0Sstevel@tonic-gate failed++; 127*0Sstevel@tonic-gate } 128*0Sstevel@tonic-gate } 129*0Sstevel@tonic-gate 130*0Sstevel@tonic-gate if (failed) { 131*0Sstevel@tonic-gate mutex_exit(&cpu_idle_lock); 132*0Sstevel@tonic-gate cmn_err(CE_PANIC, "idle_other_cpus: not all cpus idled"); 133*0Sstevel@tonic-gate } 134*0Sstevel@tonic-gate } 135*0Sstevel@tonic-gate 136*0Sstevel@tonic-gate void 137*0Sstevel@tonic-gate resume_other_cpus(void) 138*0Sstevel@tonic-gate { 139*0Sstevel@tonic-gate int i, ntries; 140*0Sstevel@tonic-gate int cpuid = CPU->cpu_id; 141*0Sstevel@tonic-gate boolean_t failed = B_FALSE; 142*0Sstevel@tonic-gate 143*0Sstevel@tonic-gate if (ncpus == 1) 144*0Sstevel@tonic-gate return; 145*0Sstevel@tonic-gate 146*0Sstevel@tonic-gate ASSERT(cpuid < NCPU); 147*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_idle_lock)); 148*0Sstevel@tonic-gate 149*0Sstevel@tonic-gate for (i = 0; i < NCPU; i++) { 150*0Sstevel@tonic-gate if (!CPU_IN_SET(cpu_idle_set, i)) 151*0Sstevel@tonic-gate continue; 152*0Sstevel@tonic-gate 153*0Sstevel@tonic-gate kern_idle[i] = 0; 154*0Sstevel@tonic-gate membar_stld(); 155*0Sstevel@tonic-gate } 156*0Sstevel@tonic-gate 157*0Sstevel@tonic-gate for (i = 0; i < NCPU; i++) { 158*0Sstevel@tonic-gate if (!CPU_IN_SET(cpu_idle_set, i)) 159*0Sstevel@tonic-gate continue; 160*0Sstevel@tonic-gate 161*0Sstevel@tonic-gate ntries = 0x10000; 162*0Sstevel@tonic-gate while (cpu[i]->cpu_m.in_prom && ntries) { 163*0Sstevel@tonic-gate DELAY(50); 164*0Sstevel@tonic-gate ntries--; 165*0Sstevel@tonic-gate } 166*0Sstevel@tonic-gate 167*0Sstevel@tonic-gate /* 168*0Sstevel@tonic-gate * A cpu failing to resume is an error condition, since 169*0Sstevel@tonic-gate * intrs may have been directed there. 170*0Sstevel@tonic-gate */ 171*0Sstevel@tonic-gate if (cpu[i]->cpu_m.in_prom) { 172*0Sstevel@tonic-gate cmn_err(CE_WARN, "cpuid 0x%x failed to resume", i); 173*0Sstevel@tonic-gate continue; 174*0Sstevel@tonic-gate } 175*0Sstevel@tonic-gate CPUSET_DEL(cpu_idle_set, i); 176*0Sstevel@tonic-gate } 177*0Sstevel@tonic-gate 178*0Sstevel@tonic-gate failed = !CPUSET_ISNULL(cpu_idle_set); 179*0Sstevel@tonic-gate 180*0Sstevel@tonic-gate mutex_exit(&cpu_idle_lock); 181*0Sstevel@tonic-gate 182*0Sstevel@tonic-gate /* 183*0Sstevel@tonic-gate * Non-zero if a cpu failed to resume 184*0Sstevel@tonic-gate */ 185*0Sstevel@tonic-gate if (failed) 186*0Sstevel@tonic-gate cmn_err(CE_PANIC, "resume_other_cpus: not all cpus resumed"); 187*0Sstevel@tonic-gate 188*0Sstevel@tonic-gate } 189*0Sstevel@tonic-gate 190*0Sstevel@tonic-gate /* 191*0Sstevel@tonic-gate * Stop all other cpu's before halting or rebooting. We pause the cpu's 192*0Sstevel@tonic-gate * instead of sending a cross call. 193*0Sstevel@tonic-gate */ 194*0Sstevel@tonic-gate void 195*0Sstevel@tonic-gate stop_other_cpus(void) 196*0Sstevel@tonic-gate { 197*0Sstevel@tonic-gate mutex_enter(&cpu_lock); 198*0Sstevel@tonic-gate if (cpu_are_paused) { 199*0Sstevel@tonic-gate mutex_exit(&cpu_lock); 200*0Sstevel@tonic-gate return; 201*0Sstevel@tonic-gate } 202*0Sstevel@tonic-gate 203*0Sstevel@tonic-gate if (ncpus > 1) 204*0Sstevel@tonic-gate intr_redist_all_cpus_shutdown(); 205*0Sstevel@tonic-gate 206*0Sstevel@tonic-gate pause_cpus(NULL); 207*0Sstevel@tonic-gate cpu_are_paused = 1; 208*0Sstevel@tonic-gate 209*0Sstevel@tonic-gate mutex_exit(&cpu_lock); 210*0Sstevel@tonic-gate } 211*0Sstevel@tonic-gate 212*0Sstevel@tonic-gate int cpu_quiesce_microsecond_sanity_limit = 60 * 1000000; 213*0Sstevel@tonic-gate 214*0Sstevel@tonic-gate void 215*0Sstevel@tonic-gate mp_cpu_quiesce(cpu_t *cp0) 216*0Sstevel@tonic-gate { 217*0Sstevel@tonic-gate 218*0Sstevel@tonic-gate volatile cpu_t *cp = (volatile cpu_t *) cp0; 219*0Sstevel@tonic-gate int i, sanity_limit = cpu_quiesce_microsecond_sanity_limit; 220*0Sstevel@tonic-gate int cpuid = cp->cpu_id; 221*0Sstevel@tonic-gate int found_intr = 1; 222*0Sstevel@tonic-gate static fn_t f = "mp_cpu_quiesce"; 223*0Sstevel@tonic-gate 224*0Sstevel@tonic-gate ASSERT(CPU->cpu_id != cpuid); 225*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 226*0Sstevel@tonic-gate ASSERT(cp->cpu_flags & CPU_QUIESCED); 227*0Sstevel@tonic-gate 228*0Sstevel@tonic-gate 229*0Sstevel@tonic-gate /* 230*0Sstevel@tonic-gate * Declare CPU as no longer being READY to process interrupts and 231*0Sstevel@tonic-gate * wait for them to stop. A CPU that is not READY can no longer 232*0Sstevel@tonic-gate * participate in x-calls or x-traps. 233*0Sstevel@tonic-gate */ 234*0Sstevel@tonic-gate cp->cpu_flags &= ~CPU_READY; 235*0Sstevel@tonic-gate CPUSET_DEL(cpu_ready_set, cpuid); 236*0Sstevel@tonic-gate membar_sync(); 237*0Sstevel@tonic-gate 238*0Sstevel@tonic-gate for (i = 0; i < sanity_limit; i++) { 239*0Sstevel@tonic-gate if (cp->cpu_intr_actv == 0 && 240*0Sstevel@tonic-gate cp->cpu_thread == cp->cpu_idle_thread) { 241*0Sstevel@tonic-gate found_intr = 0; 242*0Sstevel@tonic-gate break; 243*0Sstevel@tonic-gate } 244*0Sstevel@tonic-gate DELAY(1); 245*0Sstevel@tonic-gate } 246*0Sstevel@tonic-gate 247*0Sstevel@tonic-gate if (found_intr) { 248*0Sstevel@tonic-gate 249*0Sstevel@tonic-gate if (cp->cpu_intr_actv) { 250*0Sstevel@tonic-gate cmn_err(CE_PANIC, "%s: cpu_intr_actv != 0", f); 251*0Sstevel@tonic-gate } else if (cp->cpu_thread != cp->cpu_idle_thread) { 252*0Sstevel@tonic-gate cmn_err(CE_PANIC, "%s: cpu_thread != cpu_idle_thread", 253*0Sstevel@tonic-gate f); 254*0Sstevel@tonic-gate } 255*0Sstevel@tonic-gate 256*0Sstevel@tonic-gate } 257*0Sstevel@tonic-gate } 258*0Sstevel@tonic-gate 259*0Sstevel@tonic-gate /* 260*0Sstevel@tonic-gate * Start CPU on user request. 261*0Sstevel@tonic-gate */ 262*0Sstevel@tonic-gate /* ARGSUSED */ 263*0Sstevel@tonic-gate int 264*0Sstevel@tonic-gate mp_cpu_start(struct cpu *cp) 265*0Sstevel@tonic-gate { 266*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 267*0Sstevel@tonic-gate /* 268*0Sstevel@tonic-gate * Platforms that use CPU signatures require the signature 269*0Sstevel@tonic-gate * block update to indicate that this CPU is in the OS now. 270*0Sstevel@tonic-gate */ 271*0Sstevel@tonic-gate CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, cp->cpu_id); 272*0Sstevel@tonic-gate 273*0Sstevel@tonic-gate cmp_error_resteer(cp->cpu_id); 274*0Sstevel@tonic-gate 275*0Sstevel@tonic-gate return (0); /* nothing special to do on this arch */ 276*0Sstevel@tonic-gate } 277*0Sstevel@tonic-gate 278*0Sstevel@tonic-gate /* 279*0Sstevel@tonic-gate * Stop CPU on user request. 280*0Sstevel@tonic-gate */ 281*0Sstevel@tonic-gate /* ARGSUSED */ 282*0Sstevel@tonic-gate int 283*0Sstevel@tonic-gate mp_cpu_stop(struct cpu *cp) 284*0Sstevel@tonic-gate { 285*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 286*0Sstevel@tonic-gate 287*0Sstevel@tonic-gate cmp_error_resteer(cp->cpu_id); 288*0Sstevel@tonic-gate 289*0Sstevel@tonic-gate /* 290*0Sstevel@tonic-gate * Platforms that use CPU signatures require the signature 291*0Sstevel@tonic-gate * block update to indicate that this CPU is offlined now. 292*0Sstevel@tonic-gate */ 293*0Sstevel@tonic-gate CPU_SIGNATURE(OS_SIG, SIGST_OFFLINE, SIGSUBST_NULL, cp->cpu_id); 294*0Sstevel@tonic-gate return (0); /* nothing special to do on this arch */ 295*0Sstevel@tonic-gate } 296*0Sstevel@tonic-gate 297*0Sstevel@tonic-gate /* 298*0Sstevel@tonic-gate * Power on CPU. 299*0Sstevel@tonic-gate */ 300*0Sstevel@tonic-gate int 301*0Sstevel@tonic-gate mp_cpu_poweron(struct cpu *cp) 302*0Sstevel@tonic-gate { 303*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 304*0Sstevel@tonic-gate if (&plat_cpu_poweron) 305*0Sstevel@tonic-gate return (plat_cpu_poweron(cp)); /* platform-dependent hook */ 306*0Sstevel@tonic-gate 307*0Sstevel@tonic-gate return (ENOTSUP); 308*0Sstevel@tonic-gate } 309*0Sstevel@tonic-gate 310*0Sstevel@tonic-gate /* 311*0Sstevel@tonic-gate * Power off CPU. 312*0Sstevel@tonic-gate */ 313*0Sstevel@tonic-gate int 314*0Sstevel@tonic-gate mp_cpu_poweroff(struct cpu *cp) 315*0Sstevel@tonic-gate { 316*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 317*0Sstevel@tonic-gate if (&plat_cpu_poweroff) 318*0Sstevel@tonic-gate return (plat_cpu_poweroff(cp)); /* platform-dependent hook */ 319*0Sstevel@tonic-gate 320*0Sstevel@tonic-gate return (ENOTSUP); 321*0Sstevel@tonic-gate } 322*0Sstevel@tonic-gate 323*0Sstevel@tonic-gate void 324*0Sstevel@tonic-gate mp_cpu_faulted_enter(struct cpu *cp) 325*0Sstevel@tonic-gate { 326*0Sstevel@tonic-gate cpu_faulted_enter(cp); 327*0Sstevel@tonic-gate } 328*0Sstevel@tonic-gate 329*0Sstevel@tonic-gate void 330*0Sstevel@tonic-gate mp_cpu_faulted_exit(struct cpu *cp) 331*0Sstevel@tonic-gate { 332*0Sstevel@tonic-gate cpu_faulted_exit(cp); 333*0Sstevel@tonic-gate } 334