xref: /onnv-gate/usr/src/uts/sun4v/os/mach_mp_states.c (revision 2957:a3f9fceeda60)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51991Sheppo  * Common Development and Distribution License (the "License").
61991Sheppo  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
211991Sheppo 
220Sstevel@tonic-gate /*
231991Sheppo  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
291991Sheppo #include <sys/cpuvar.h>
300Sstevel@tonic-gate #include <sys/cpu_module.h>
311991Sheppo #include <sys/machsystm.h>
321991Sheppo #include <sys/archsystm.h>
331991Sheppo #include <sys/prom_plat.h>
341991Sheppo #include <sys/hypervisor_api.h>
351991Sheppo #include <sys/hsvc.h>
361991Sheppo 
371991Sheppo extern uint64_t xc_tick_limit;
381991Sheppo extern uint64_t xc_tick_jump_limit;
391991Sheppo 
401991Sheppo extern void cpu_intrq_unregister_powerdown(uint64_t doneflag_va);
410Sstevel@tonic-gate 
420Sstevel@tonic-gate /*
430Sstevel@tonic-gate  * set_idle_cpu is called from idle() when a CPU becomes idle.
440Sstevel@tonic-gate  */
450Sstevel@tonic-gate /*ARGSUSED*/
460Sstevel@tonic-gate void
set_idle_cpu(int cpun)470Sstevel@tonic-gate set_idle_cpu(int cpun)
480Sstevel@tonic-gate {
490Sstevel@tonic-gate }
500Sstevel@tonic-gate 
510Sstevel@tonic-gate /*
520Sstevel@tonic-gate  * unset_idle_cpu is called from idle() when a CPU is no longer idle.
530Sstevel@tonic-gate  */
540Sstevel@tonic-gate /*ARGSUSED*/
550Sstevel@tonic-gate void
unset_idle_cpu(int cpun)560Sstevel@tonic-gate unset_idle_cpu(int cpun)
570Sstevel@tonic-gate {
580Sstevel@tonic-gate }
591991Sheppo 
601991Sheppo /*
611991Sheppo  * Stop a CPU based on its cpuid, using the cpu_stop hypervisor call.
621991Sheppo  * Since this requires that the hypervisor force a remote CPU to stop,
631991Sheppo  * the assumption is made that this should take roughly the same amount
64*2957Sjm22469  * of time as a executing a cross-call.  Consequently, the xcall
65*2957Sjm22469  * timeout is used to determine when to give up waiting for the CPU to
66*2957Sjm22469  * stop.
671991Sheppo  *
681991Sheppo  * Attempts to stop a CPU already in the stopped or error state will
691991Sheppo  * silently succeed. Zero is returned on success and a non-negative
701991Sheppo  * errno value is returned on failure.
711991Sheppo  */
721991Sheppo int
stopcpu_bycpuid(int cpuid)731991Sheppo stopcpu_bycpuid(int cpuid)
741991Sheppo {
751991Sheppo 	uint64_t	loop_cnt;
761991Sheppo 	uint64_t	state;
771991Sheppo 	uint64_t	rv;
781991Sheppo 	uint64_t	major = 0;
791991Sheppo 	uint64_t	minor = 0;
801991Sheppo 	uint64_t	cpu_stop_time_limit;
81*2957Sjm22469 	extern uint64_t	xc_func_time_limit;
821991Sheppo 
831991Sheppo 	ASSERT(MUTEX_HELD(&cpu_lock));
841991Sheppo 
851991Sheppo 	/*
861991Sheppo 	 * Check the state of the CPU up front to see if an
871991Sheppo 	 * attempt to stop it is even necessary.
881991Sheppo 	 */
891991Sheppo 	if (hv_cpu_state(cpuid, &state) != H_EOK)
901991Sheppo 		return (EINVAL);
911991Sheppo 
921991Sheppo 	/* treat stopped and error state the same */
931991Sheppo 	if (state != CPU_STATE_RUNNING) {
941991Sheppo 		/* nothing to do */
951991Sheppo 		return (0);
961991Sheppo 	}
971991Sheppo 
981991Sheppo 	/*
991991Sheppo 	 * The HV API to stop a CPU is only supported in
1001991Sheppo 	 * version 1.1 and later of the core group. If an
1011991Sheppo 	 * older version of the HV is in use, return not
1021991Sheppo 	 * supported.
1031991Sheppo 	 */
1041991Sheppo 	if (hsvc_version(HSVC_GROUP_CORE, &major, &minor) != 0)
1051991Sheppo 		return (EINVAL);
1061991Sheppo 
1071991Sheppo 	ASSERT(major != 0);
1081991Sheppo 
1091991Sheppo 	if ((major == 1) && (minor < 1))
1101991Sheppo 		return (ENOTSUP);
1111991Sheppo 
1121991Sheppo 	/* use the mondo timeout if it has been initialized */
113*2957Sjm22469 	cpu_stop_time_limit = xc_func_time_limit;
1141991Sheppo 
1151991Sheppo 	/*
1161991Sheppo 	 * If called early in boot before the mondo time limit
1171991Sheppo 	 * is set, use a reasonable timeout based on the the
1181991Sheppo 	 * clock frequency of the current CPU.
1191991Sheppo 	 */
1201991Sheppo 	if (cpu_stop_time_limit == 0)
1211991Sheppo 		cpu_stop_time_limit = cpunodes[CPU->cpu_id].clock_freq;
1221991Sheppo 
1231991Sheppo 	/* should only fail if called too early in boot */
1241991Sheppo 	ASSERT(cpu_stop_time_limit > 0);
1251991Sheppo 
1261991Sheppo 	loop_cnt = 0;
1271991Sheppo 
1281991Sheppo 	/*
1291991Sheppo 	 * Attempt to stop the CPU, retrying if it is busy.
1301991Sheppo 	 */
1311991Sheppo 	while (loop_cnt++ < cpu_stop_time_limit) {
1321991Sheppo 
1331991Sheppo 		if ((rv = hv_cpu_stop(cpuid)) != H_EWOULDBLOCK)
1341991Sheppo 			break;
1351991Sheppo 	}
1361991Sheppo 
1371991Sheppo 	if (loop_cnt == cpu_stop_time_limit)
1381991Sheppo 		return (ETIMEDOUT);
1391991Sheppo 
1401991Sheppo 	if (rv != H_EOK)
1411991Sheppo 		return (EINVAL);
1421991Sheppo 
1431991Sheppo 	/*
1441991Sheppo 	 * Verify that the CPU has reached the stopped state.
1451991Sheppo 	 */
1461991Sheppo 	while (loop_cnt++ < cpu_stop_time_limit) {
1471991Sheppo 
1481991Sheppo 		if (hv_cpu_state(cpuid, &state) != H_EOK)
1491991Sheppo 			return (EINVAL);
1501991Sheppo 
1511991Sheppo 		/* treat stopped and error state the same */
1521991Sheppo 		if (state != CPU_STATE_RUNNING)
1531991Sheppo 			break;
1541991Sheppo 	}
1551991Sheppo 
1561991Sheppo 	return ((loop_cnt == cpu_stop_time_limit) ? ETIMEDOUT : 0);
1571991Sheppo }
1581991Sheppo 
1591991Sheppo /*
1601991Sheppo  * X-trap to the target to unregister its interrupt and error queues
1611991Sheppo  * and put it in a safe place just before the CPU is stopped. After
1621991Sheppo  * unregistering its queues, the target CPU must not return from the
1631991Sheppo  * trap to priv or user context. Ensure that the interrupt CPU unregister
1641991Sheppo  * succeeded.
1651991Sheppo  */
1661991Sheppo void
xt_cpu_unreg_powerdown(struct cpu * cpup)1671991Sheppo xt_cpu_unreg_powerdown(struct cpu *cpup)
1681991Sheppo {
1691991Sheppo 	uint8_t volatile not_done;
1701991Sheppo 	uint64_t starttick, endtick, tick, lasttick;
1711991Sheppo 	processorid_t cpuid = cpup->cpu_id;
1721991Sheppo 
1731991Sheppo 	kpreempt_disable();
1741991Sheppo 
1751991Sheppo 	/*
1761991Sheppo 	 * Sun4v uses a queue for receiving mondos. Successful
1771991Sheppo 	 * transmission of a mondo only indicates that the mondo
1781991Sheppo 	 * has been written into the queue.
1791991Sheppo 	 *
1801991Sheppo 	 * Set the not_done flag to 1 before sending the cross
1811991Sheppo 	 * trap and wait until the other cpu resets it to 0.
1821991Sheppo 	 */
1831991Sheppo 
1841991Sheppo 	not_done = 1;
1851991Sheppo 
1861991Sheppo 	xt_one_unchecked(cpuid, (xcfunc_t *)cpu_intrq_unregister_powerdown,
1871991Sheppo 	    (uint64_t)&not_done, 0);
1881991Sheppo 
1891991Sheppo 	starttick = lasttick = gettick();
1901991Sheppo 	endtick = starttick + xc_tick_limit;
1911991Sheppo 
1921991Sheppo 	while (not_done) {
1931991Sheppo 
1941991Sheppo 		tick = gettick();
1951991Sheppo 
1961991Sheppo 		/*
1971991Sheppo 		 * If there is a big jump between the current tick
1981991Sheppo 		 * count and lasttick, we have probably hit a break
1991991Sheppo 		 * point. Adjust endtick accordingly to avoid panic.
2001991Sheppo 		 */
2011991Sheppo 		if (tick > (lasttick + xc_tick_jump_limit)) {
2021991Sheppo 			endtick += (tick - lasttick);
2031991Sheppo 		}
2041991Sheppo 
2051991Sheppo 		lasttick = tick;
2061991Sheppo 		if (tick > endtick) {
2071991Sheppo 			cmn_err(CE_CONT, "Cross trap timeout at cpu id %x\n",
2081991Sheppo 			    cpuid);
2091991Sheppo 			cmn_err(CE_WARN, "xt_intrq_unreg_powerdown: timeout");
2101991Sheppo 		}
2111991Sheppo 	}
2121991Sheppo 
2131991Sheppo 	kpreempt_enable();
2141991Sheppo }
2151991Sheppo 
2161991Sheppo int
plat_cpu_poweroff(struct cpu * cp)2171991Sheppo plat_cpu_poweroff(struct cpu *cp)
2181991Sheppo {
2191991Sheppo 	int		rv = 0;
2201991Sheppo 	int		status;
2211991Sheppo 	processorid_t	cpuid = cp->cpu_id;
2221991Sheppo 
2231991Sheppo 	ASSERT(MUTEX_HELD(&cpu_lock));
2241991Sheppo 
2251991Sheppo 	/*
2261991Sheppo 	 * Capture all CPUs (except for detaching proc) to prevent
2271991Sheppo 	 * crosscalls to the detaching proc until it has cleared its
2281991Sheppo 	 * bit in cpu_ready_set.
2291991Sheppo 	 *
2301991Sheppo 	 * The CPU's remain paused and the prom_mutex is known to be free.
2311991Sheppo 	 * This prevents the x-trap victim from blocking when doing prom
2321991Sheppo 	 * IEEE-1275 calls at a high PIL level.
2331991Sheppo 	 */
2341991Sheppo 	promsafe_pause_cpus();
2351991Sheppo 
2361991Sheppo 	/*
2371991Sheppo 	 * Quiesce interrupts on the target CPU. We do this by setting
2381991Sheppo 	 * the CPU 'not ready'- (i.e. removing the CPU from cpu_ready_set)
2391991Sheppo 	 * to prevent it from receiving cross calls and cross traps. This
2401991Sheppo 	 * prevents the processor from receiving any new soft interrupts.
2411991Sheppo 	 */
2421991Sheppo 	mp_cpu_quiesce(cp);
2431991Sheppo 
2441991Sheppo 	/*
2451991Sheppo 	 * Send a cross trap to the cpu to unregister its interrupt
2461991Sheppo 	 * error queues.
2471991Sheppo 	 */
2481991Sheppo 	xt_cpu_unreg_powerdown(cp);
2491991Sheppo 
2501991Sheppo 	cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF;
2511991Sheppo 
2521991Sheppo 	/* call into the Hypervisor to stop the CPU */
2531991Sheppo 	if ((status = stopcpu_bycpuid(cpuid)) != 0) {
2541991Sheppo 		rv = -1;
2551991Sheppo 	}
2561991Sheppo 
2571991Sheppo 	start_cpus();
2581991Sheppo 
2591991Sheppo 	if (rv != 0) {
2601991Sheppo 		cmn_err(CE_WARN, "failed to stop cpu %d (%d)", cpuid, status);
2611991Sheppo 		/* mark the CPU faulted so that it cannot be onlined */
2621991Sheppo 		cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_FAULTED;
2631991Sheppo 	}
2641991Sheppo 
2651991Sheppo 	return (rv);
2661991Sheppo }
2671991Sheppo 
2681991Sheppo int
plat_cpu_poweron(struct cpu * cp)2691991Sheppo plat_cpu_poweron(struct cpu *cp)
2701991Sheppo {
2711991Sheppo 	extern void	restart_other_cpu(int);
2721991Sheppo 
2731991Sheppo 	ASSERT(MUTEX_HELD(&cpu_lock));
2741991Sheppo 
2751991Sheppo 	cp->cpu_flags &= ~CPU_POWEROFF;
2761991Sheppo 
2771991Sheppo 	restart_other_cpu(cp->cpu_id);
2781991Sheppo 
2791991Sheppo 	return (0);
2801991Sheppo }
281