xref: /onnv-gate/usr/src/uts/i86pc/os/x_call.c (revision 3446:5903aece022d)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
52006Sandrei  * Common Development and Distribution License (the "License").
62006Sandrei  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
22*3446Smrj  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
270Sstevel@tonic-gate 
280Sstevel@tonic-gate /*
290Sstevel@tonic-gate  * Facilities for cross-processor subroutine calls using "mailbox" interrupts.
300Sstevel@tonic-gate  *
310Sstevel@tonic-gate  */
320Sstevel@tonic-gate 
330Sstevel@tonic-gate #include <sys/types.h>
340Sstevel@tonic-gate #include <sys/param.h>
350Sstevel@tonic-gate #include <sys/t_lock.h>
360Sstevel@tonic-gate #include <sys/thread.h>
370Sstevel@tonic-gate #include <sys/cpuvar.h>
380Sstevel@tonic-gate #include <sys/x_call.h>
390Sstevel@tonic-gate #include <sys/cpu.h>
400Sstevel@tonic-gate #include <sys/psw.h>
410Sstevel@tonic-gate #include <sys/sunddi.h>
420Sstevel@tonic-gate #include <sys/debug.h>
430Sstevel@tonic-gate #include <sys/systm.h>
44*3446Smrj #include <sys/archsystm.h>
450Sstevel@tonic-gate #include <sys/machsystm.h>
460Sstevel@tonic-gate #include <sys/mutex_impl.h>
47*3446Smrj #include <sys/traptrace.h>
48*3446Smrj 
490Sstevel@tonic-gate 
500Sstevel@tonic-gate static struct	xc_mbox xc_mboxes[X_CALL_LEVELS];
510Sstevel@tonic-gate static kmutex_t xc_mbox_lock[X_CALL_LEVELS];
520Sstevel@tonic-gate static uint_t 	xc_xlat_xcptoipl[X_CALL_LEVELS] = {
530Sstevel@tonic-gate 	XC_LO_PIL,
540Sstevel@tonic-gate 	XC_MED_PIL,
550Sstevel@tonic-gate 	XC_HI_PIL
560Sstevel@tonic-gate };
570Sstevel@tonic-gate 
580Sstevel@tonic-gate static void xc_common(xc_func_t, xc_arg_t, xc_arg_t, xc_arg_t,
590Sstevel@tonic-gate     int, cpuset_t, int);
600Sstevel@tonic-gate 
610Sstevel@tonic-gate static int	xc_initialized = 0;
620Sstevel@tonic-gate 
630Sstevel@tonic-gate void
640Sstevel@tonic-gate xc_init()
650Sstevel@tonic-gate {
660Sstevel@tonic-gate 	/*
670Sstevel@tonic-gate 	 * By making these mutexes type MUTEX_DRIVER, the ones below
680Sstevel@tonic-gate 	 * LOCK_LEVEL will be implemented as adaptive mutexes, and the
690Sstevel@tonic-gate 	 * ones above LOCK_LEVEL will be spin mutexes.
700Sstevel@tonic-gate 	 */
710Sstevel@tonic-gate 	mutex_init(&xc_mbox_lock[0], NULL, MUTEX_DRIVER,
720Sstevel@tonic-gate 	    (void *)ipltospl(XC_LO_PIL));
730Sstevel@tonic-gate 	mutex_init(&xc_mbox_lock[1], NULL, MUTEX_DRIVER,
740Sstevel@tonic-gate 	    (void *)ipltospl(XC_MED_PIL));
750Sstevel@tonic-gate 	mutex_init(&xc_mbox_lock[2], NULL, MUTEX_DRIVER,
760Sstevel@tonic-gate 	    (void *)ipltospl(XC_HI_PIL));
770Sstevel@tonic-gate 
780Sstevel@tonic-gate 	xc_initialized = 1;
790Sstevel@tonic-gate }
800Sstevel@tonic-gate 
81*3446Smrj #if defined(TRAPTRACE)
82*3446Smrj 
830Sstevel@tonic-gate /*
84*3446Smrj  * When xc_traptrace is on, put x-call records into the trap trace buffer.
850Sstevel@tonic-gate  */
86*3446Smrj int xc_traptrace;
87*3446Smrj 
88*3446Smrj void
89*3446Smrj xc_make_trap_trace_entry(uint8_t marker, int pri, ulong_t arg)
900Sstevel@tonic-gate {
91*3446Smrj 	trap_trace_rec_t *ttr;
92*3446Smrj 	struct _xc_entry *xce;
93*3446Smrj 
94*3446Smrj 	if (xc_traptrace == 0)
95*3446Smrj 		return;
96*3446Smrj 
97*3446Smrj 	ttr = trap_trace_get_traceptr(TT_XCALL,
98*3446Smrj 	    (ulong_t)caller(), (ulong_t)getfp());
99*3446Smrj 	xce = &(ttr->ttr_info.xc_entry);
100*3446Smrj 
101*3446Smrj 	xce->xce_marker = marker;
102*3446Smrj 	xce->xce_pri = pri;
103*3446Smrj 	xce->xce_arg = arg;
104*3446Smrj 
105*3446Smrj 	if ((uint_t)pri < X_CALL_LEVELS) {
106*3446Smrj 		struct machcpu *mcpu = &CPU->cpu_m;
107*3446Smrj 
108*3446Smrj 		xce->xce_pend = mcpu->xc_pend[pri];
109*3446Smrj 		xce->xce_ack = mcpu->xc_ack[pri];
110*3446Smrj 		xce->xce_state = mcpu->xc_state[pri];
111*3446Smrj 		xce->xce_retval = mcpu->xc_retval[pri];
112*3446Smrj 		xce->xce_func = (uintptr_t)xc_mboxes[pri].func;
113*3446Smrj 	}
1140Sstevel@tonic-gate }
115*3446Smrj #endif
1160Sstevel@tonic-gate 
1172006Sandrei #define	CAPTURE_CPU_ARG	~0UL
1180Sstevel@tonic-gate 
1190Sstevel@tonic-gate /*
1200Sstevel@tonic-gate  * X-call interrupt service routine.
1210Sstevel@tonic-gate  *
1220Sstevel@tonic-gate  * arg == X_CALL_MEDPRI	-  capture cpus.
1230Sstevel@tonic-gate  *
1240Sstevel@tonic-gate  * We're protected against changing CPUs by being a high-priority interrupt.
1250Sstevel@tonic-gate  */
1260Sstevel@tonic-gate /*ARGSUSED*/
1270Sstevel@tonic-gate uint_t
1280Sstevel@tonic-gate xc_serv(caddr_t arg1, caddr_t arg2)
1290Sstevel@tonic-gate {
130*3446Smrj 	int op;
131*3446Smrj 	int pri = (int)(uintptr_t)arg1;
1320Sstevel@tonic-gate 	struct cpu *cpup = CPU;
1330Sstevel@tonic-gate 	xc_arg_t arg2val;
134*3446Smrj 
135*3446Smrj 	XC_TRACE(TT_XC_SVC_BEGIN, pri, (ulong_t)arg2);
1360Sstevel@tonic-gate 
1370Sstevel@tonic-gate 	if (pri == X_CALL_MEDPRI) {
1380Sstevel@tonic-gate 
139*3446Smrj 		arg2val = xc_mboxes[X_CALL_MEDPRI].arg2;
140*3446Smrj 
1410Sstevel@tonic-gate 		if (arg2val != CAPTURE_CPU_ARG &&
1422006Sandrei 		    !CPU_IN_SET((cpuset_t)arg2val, cpup->cpu_id))
143*3446Smrj 			goto unclaimed;
144*3446Smrj 
1450Sstevel@tonic-gate 		ASSERT(arg2val == CAPTURE_CPU_ARG);
146*3446Smrj 
1470Sstevel@tonic-gate 		if (cpup->cpu_m.xc_pend[pri] == 0)
148*3446Smrj 			goto unclaimed;
1490Sstevel@tonic-gate 
1500Sstevel@tonic-gate 		cpup->cpu_m.xc_pend[X_CALL_MEDPRI] = 0;
1510Sstevel@tonic-gate 		cpup->cpu_m.xc_ack[X_CALL_MEDPRI] = 1;
1520Sstevel@tonic-gate 
1530Sstevel@tonic-gate 		for (;;) {
1540Sstevel@tonic-gate 			if ((cpup->cpu_m.xc_state[X_CALL_MEDPRI] == XC_DONE) ||
155*3446Smrj 			    (cpup->cpu_m.xc_pend[X_CALL_MEDPRI]))
1560Sstevel@tonic-gate 				break;
157*3446Smrj 			SMT_PAUSE();
1580Sstevel@tonic-gate 		}
159*3446Smrj 		XC_TRACE(TT_XC_SVC_END, pri, DDI_INTR_CLAIMED);
1600Sstevel@tonic-gate 		return (DDI_INTR_CLAIMED);
1610Sstevel@tonic-gate 	}
162*3446Smrj 
1630Sstevel@tonic-gate 	if (cpup->cpu_m.xc_pend[pri] == 0)
164*3446Smrj 		goto unclaimed;
1650Sstevel@tonic-gate 
1660Sstevel@tonic-gate 	cpup->cpu_m.xc_pend[pri] = 0;
1670Sstevel@tonic-gate 	op = cpup->cpu_m.xc_state[pri];
1680Sstevel@tonic-gate 
1690Sstevel@tonic-gate 	/*
1700Sstevel@tonic-gate 	 * Don't invoke a null function.
1710Sstevel@tonic-gate 	 */
172*3446Smrj 	if (xc_mboxes[pri].func != NULL)
173*3446Smrj 		cpup->cpu_m.xc_retval[pri] = (*xc_mboxes[pri].func)
174*3446Smrj 		    (xc_mboxes[pri].arg1, xc_mboxes[pri].arg2,
175*3446Smrj 		    xc_mboxes[pri].arg3);
176*3446Smrj 	else
1770Sstevel@tonic-gate 		cpup->cpu_m.xc_retval[pri] = 0;
1780Sstevel@tonic-gate 
1790Sstevel@tonic-gate 	/*
1800Sstevel@tonic-gate 	 * Acknowledge that we have completed the x-call operation.
1810Sstevel@tonic-gate 	 */
1820Sstevel@tonic-gate 	cpup->cpu_m.xc_ack[pri] = 1;
1830Sstevel@tonic-gate 
184*3446Smrj 	if (op != XC_CALL_OP) {
185*3446Smrj 		/*
186*3446Smrj 		 * for (op == XC_SYNC_OP)
187*3446Smrj 		 * Wait for the initiator of the x-call to indicate
188*3446Smrj 		 * that all CPUs involved can proceed.
189*3446Smrj 		 */
190*3446Smrj 		while (cpup->cpu_m.xc_wait[pri])
191*3446Smrj 			SMT_PAUSE();
1920Sstevel@tonic-gate 
193*3446Smrj 		while (cpup->cpu_m.xc_state[pri] != XC_DONE)
194*3446Smrj 			SMT_PAUSE();
1950Sstevel@tonic-gate 
196*3446Smrj 		/*
197*3446Smrj 		 * Acknowledge that we have received the directive to continue.
198*3446Smrj 		 */
199*3446Smrj 		ASSERT(cpup->cpu_m.xc_ack[pri] == 0);
200*3446Smrj 		cpup->cpu_m.xc_ack[pri] = 1;
2010Sstevel@tonic-gate 	}
2020Sstevel@tonic-gate 
203*3446Smrj 	XC_TRACE(TT_XC_SVC_END, pri, DDI_INTR_CLAIMED);
204*3446Smrj 	return (DDI_INTR_CLAIMED);
2050Sstevel@tonic-gate 
206*3446Smrj unclaimed:
207*3446Smrj 	XC_TRACE(TT_XC_SVC_END, pri, DDI_INTR_UNCLAIMED);
208*3446Smrj 	return (DDI_INTR_UNCLAIMED);
2090Sstevel@tonic-gate }
2100Sstevel@tonic-gate 
2110Sstevel@tonic-gate 
2120Sstevel@tonic-gate /*
2130Sstevel@tonic-gate  * xc_do_call:
2140Sstevel@tonic-gate  */
2150Sstevel@tonic-gate static void
2160Sstevel@tonic-gate xc_do_call(
2170Sstevel@tonic-gate 	xc_arg_t arg1,
2180Sstevel@tonic-gate 	xc_arg_t arg2,
2190Sstevel@tonic-gate 	xc_arg_t arg3,
2200Sstevel@tonic-gate 	int pri,
2210Sstevel@tonic-gate 	cpuset_t set,
2220Sstevel@tonic-gate 	xc_func_t func,
2230Sstevel@tonic-gate 	int sync)
2240Sstevel@tonic-gate {
2250Sstevel@tonic-gate 	/*
2260Sstevel@tonic-gate 	 * If the pri indicates a low priority lock (below LOCK_LEVEL),
2270Sstevel@tonic-gate 	 * we must disable preemption to avoid migrating to another CPU
2280Sstevel@tonic-gate 	 * during the call.
2290Sstevel@tonic-gate 	 */
2300Sstevel@tonic-gate 	if (pri == X_CALL_LOPRI) {
2310Sstevel@tonic-gate 		kpreempt_disable();
2320Sstevel@tonic-gate 	} else {
2330Sstevel@tonic-gate 		pri = X_CALL_HIPRI;
2340Sstevel@tonic-gate 	}
2350Sstevel@tonic-gate 
2360Sstevel@tonic-gate 	/* always grab highest mutex to avoid deadlock */
2370Sstevel@tonic-gate 	mutex_enter(&xc_mbox_lock[X_CALL_HIPRI]);
2380Sstevel@tonic-gate 	xc_common(func, arg1, arg2, arg3, pri, set, sync);
2390Sstevel@tonic-gate 	mutex_exit(&xc_mbox_lock[X_CALL_HIPRI]);
2400Sstevel@tonic-gate 	if (pri == X_CALL_LOPRI)
2410Sstevel@tonic-gate 		kpreempt_enable();
2420Sstevel@tonic-gate }
2430Sstevel@tonic-gate 
2440Sstevel@tonic-gate 
2450Sstevel@tonic-gate /*
2460Sstevel@tonic-gate  * xc_call: call specified function on all processors
2470Sstevel@tonic-gate  * remotes may continue after service
2480Sstevel@tonic-gate  * we wait here until everybody has completed.
2490Sstevel@tonic-gate  */
2500Sstevel@tonic-gate void
2510Sstevel@tonic-gate xc_call(
2520Sstevel@tonic-gate 	xc_arg_t arg1,
2530Sstevel@tonic-gate 	xc_arg_t arg2,
2540Sstevel@tonic-gate 	xc_arg_t arg3,
2550Sstevel@tonic-gate 	int pri,
2560Sstevel@tonic-gate 	cpuset_t set,
2570Sstevel@tonic-gate 	xc_func_t func)
2580Sstevel@tonic-gate {
2590Sstevel@tonic-gate 	xc_do_call(arg1, arg2, arg3, pri, set, func, 0);
2600Sstevel@tonic-gate }
2610Sstevel@tonic-gate 
2620Sstevel@tonic-gate /*
2630Sstevel@tonic-gate  * xc_sync: call specified function on all processors
2640Sstevel@tonic-gate  * after doing work, each remote waits until we let
2650Sstevel@tonic-gate  * it continue; send the contiunue after everyone has
2660Sstevel@tonic-gate  * informed us that they are done.
2670Sstevel@tonic-gate  */
2680Sstevel@tonic-gate void
2690Sstevel@tonic-gate xc_sync(
2700Sstevel@tonic-gate 	xc_arg_t arg1,
2710Sstevel@tonic-gate 	xc_arg_t arg2,
2720Sstevel@tonic-gate 	xc_arg_t arg3,
2730Sstevel@tonic-gate 	int pri,
2740Sstevel@tonic-gate 	cpuset_t set,
2750Sstevel@tonic-gate 	xc_func_t func)
2760Sstevel@tonic-gate {
2770Sstevel@tonic-gate 	xc_do_call(arg1, arg2, arg3, pri, set, func, 1);
2780Sstevel@tonic-gate }
2790Sstevel@tonic-gate 
2800Sstevel@tonic-gate 
2810Sstevel@tonic-gate /*
2820Sstevel@tonic-gate  * The routines xc_capture_cpus and xc_release_cpus
2830Sstevel@tonic-gate  * can be used in place of xc_sync in order to implement a critical
2840Sstevel@tonic-gate  * code section where all CPUs in the system can be controlled.
2850Sstevel@tonic-gate  * xc_capture_cpus is used to start the critical code section, and
2860Sstevel@tonic-gate  * xc_release_cpus is used to end the critical code section.
2870Sstevel@tonic-gate  */
2880Sstevel@tonic-gate 
2890Sstevel@tonic-gate /*
2900Sstevel@tonic-gate  * Capture the CPUs specified in order to start a x-call session,
2910Sstevel@tonic-gate  * and/or to begin a critical section.
2920Sstevel@tonic-gate  */
2930Sstevel@tonic-gate void
2940Sstevel@tonic-gate xc_capture_cpus(cpuset_t set)
2950Sstevel@tonic-gate {
2960Sstevel@tonic-gate 	int cix;
2970Sstevel@tonic-gate 	int lcx;
2980Sstevel@tonic-gate 	struct cpu *cpup;
2990Sstevel@tonic-gate 	int	i;
3000Sstevel@tonic-gate 	cpuset_t *cpus;
3010Sstevel@tonic-gate 	cpuset_t c;
3020Sstevel@tonic-gate 
3030Sstevel@tonic-gate 	CPU_STATS_ADDQ(CPU, sys, xcalls, 1);
3040Sstevel@tonic-gate 
3050Sstevel@tonic-gate 	/*
3060Sstevel@tonic-gate 	 * Prevent deadlocks where we take an interrupt and are waiting
3070Sstevel@tonic-gate 	 * for a mutex owned by one of the CPUs that is captured for
3080Sstevel@tonic-gate 	 * the x-call, while that CPU is waiting for some x-call signal
3090Sstevel@tonic-gate 	 * to be set by us.
3100Sstevel@tonic-gate 	 *
3110Sstevel@tonic-gate 	 * This mutex also prevents preemption, since it raises SPL above
3120Sstevel@tonic-gate 	 * LOCK_LEVEL (it is a spin-type driver mutex).
3130Sstevel@tonic-gate 	 */
3140Sstevel@tonic-gate 	/* always grab highest mutex to avoid deadlock */
3150Sstevel@tonic-gate 	mutex_enter(&xc_mbox_lock[X_CALL_HIPRI]);
3160Sstevel@tonic-gate 	lcx = CPU->cpu_id;	/* now we're safe */
3170Sstevel@tonic-gate 
3180Sstevel@tonic-gate 	ASSERT(CPU->cpu_flags & CPU_READY);
3190Sstevel@tonic-gate 
3200Sstevel@tonic-gate 	/*
3210Sstevel@tonic-gate 	 * Wait for all cpus
3220Sstevel@tonic-gate 	 */
3230Sstevel@tonic-gate 	cpus = (cpuset_t *)&xc_mboxes[X_CALL_MEDPRI].arg2;
3240Sstevel@tonic-gate 	if (CPU_IN_SET(*cpus, CPU->cpu_id))
3250Sstevel@tonic-gate 		CPUSET_ATOMIC_DEL(*cpus, CPU->cpu_id);
3260Sstevel@tonic-gate 	for (;;) {
3270Sstevel@tonic-gate 		c = *(volatile cpuset_t *)cpus;
3280Sstevel@tonic-gate 		CPUSET_AND(c, cpu_ready_set);
3290Sstevel@tonic-gate 		if (CPUSET_ISNULL(c))
3300Sstevel@tonic-gate 			break;
331*3446Smrj 		SMT_PAUSE();
3320Sstevel@tonic-gate 	}
3330Sstevel@tonic-gate 
3340Sstevel@tonic-gate 	/*
3350Sstevel@tonic-gate 	 * Store the set of CPUs involved in the x-call session, so that
3360Sstevel@tonic-gate 	 * xc_release_cpus will know what CPUs to act upon.
3370Sstevel@tonic-gate 	 */
3380Sstevel@tonic-gate 	xc_mboxes[X_CALL_MEDPRI].set = set;
3390Sstevel@tonic-gate 	xc_mboxes[X_CALL_MEDPRI].arg2 = CAPTURE_CPU_ARG;
3400Sstevel@tonic-gate 
3410Sstevel@tonic-gate 	/*
3420Sstevel@tonic-gate 	 * Now capture each CPU in the set and cause it to go into a
3430Sstevel@tonic-gate 	 * holding pattern.
3440Sstevel@tonic-gate 	 */
3450Sstevel@tonic-gate 	i = 0;
3460Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
3470Sstevel@tonic-gate 		if ((cpup = cpu[cix]) == NULL ||
3480Sstevel@tonic-gate 		    (cpup->cpu_flags & CPU_READY) == 0) {
3490Sstevel@tonic-gate 			/*
3500Sstevel@tonic-gate 			 * In case CPU wasn't ready, but becomes ready later,
3510Sstevel@tonic-gate 			 * take the CPU out of the set now.
3520Sstevel@tonic-gate 			 */
3530Sstevel@tonic-gate 			CPUSET_DEL(set, cix);
3540Sstevel@tonic-gate 			continue;
3550Sstevel@tonic-gate 		}
3560Sstevel@tonic-gate 		if (cix != lcx && CPU_IN_SET(set, cix)) {
3570Sstevel@tonic-gate 			cpup->cpu_m.xc_ack[X_CALL_MEDPRI] = 0;
3580Sstevel@tonic-gate 			cpup->cpu_m.xc_state[X_CALL_MEDPRI] = XC_HOLD;
3590Sstevel@tonic-gate 			cpup->cpu_m.xc_pend[X_CALL_MEDPRI] = 1;
360*3446Smrj 			XC_TRACE(TT_XC_CAPTURE, X_CALL_MEDPRI, cix);
3610Sstevel@tonic-gate 			send_dirint(cix, XC_MED_PIL);
3620Sstevel@tonic-gate 		}
3630Sstevel@tonic-gate 		i++;
3640Sstevel@tonic-gate 		if (i >= ncpus)
3650Sstevel@tonic-gate 			break;
3660Sstevel@tonic-gate 	}
3670Sstevel@tonic-gate 
3680Sstevel@tonic-gate 	/*
369*3446Smrj 	 * Wait here until all remote calls to acknowledge.
3700Sstevel@tonic-gate 	 */
3710Sstevel@tonic-gate 	i = 0;
3720Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
3730Sstevel@tonic-gate 		if (lcx != cix && CPU_IN_SET(set, cix)) {
3740Sstevel@tonic-gate 			cpup = cpu[cix];
375355Ssherrym 			while (cpup->cpu_m.xc_ack[X_CALL_MEDPRI] == 0)
376*3446Smrj 				SMT_PAUSE();
3770Sstevel@tonic-gate 			cpup->cpu_m.xc_ack[X_CALL_MEDPRI] = 0;
3780Sstevel@tonic-gate 		}
3790Sstevel@tonic-gate 		i++;
3800Sstevel@tonic-gate 		if (i >= ncpus)
3810Sstevel@tonic-gate 			break;
3820Sstevel@tonic-gate 	}
3830Sstevel@tonic-gate 
3840Sstevel@tonic-gate }
3850Sstevel@tonic-gate 
3860Sstevel@tonic-gate /*
3870Sstevel@tonic-gate  * Release the CPUs captured by xc_capture_cpus, thus terminating the
3880Sstevel@tonic-gate  * x-call session and exiting the critical section.
3890Sstevel@tonic-gate  */
3900Sstevel@tonic-gate void
3910Sstevel@tonic-gate xc_release_cpus(void)
3920Sstevel@tonic-gate {
3930Sstevel@tonic-gate 	int cix;
3940Sstevel@tonic-gate 	int lcx = (int)(CPU->cpu_id);
3950Sstevel@tonic-gate 	cpuset_t set = xc_mboxes[X_CALL_MEDPRI].set;
3960Sstevel@tonic-gate 	struct cpu *cpup;
3970Sstevel@tonic-gate 	int	i;
3980Sstevel@tonic-gate 
3990Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&xc_mbox_lock[X_CALL_HIPRI]));
4000Sstevel@tonic-gate 
4010Sstevel@tonic-gate 	/*
4020Sstevel@tonic-gate 	 * Allow each CPU to exit its holding pattern.
4030Sstevel@tonic-gate 	 */
4040Sstevel@tonic-gate 	i = 0;
4050Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
4060Sstevel@tonic-gate 		if ((cpup = cpu[cix]) == NULL)
4070Sstevel@tonic-gate 			continue;
4080Sstevel@tonic-gate 		if ((cpup->cpu_flags & CPU_READY) &&
4090Sstevel@tonic-gate 		    (cix != lcx) && CPU_IN_SET(set, cix)) {
4100Sstevel@tonic-gate 			/*
4110Sstevel@tonic-gate 			 * Clear xc_ack since we will be waiting for it
4120Sstevel@tonic-gate 			 * to be set again after we set XC_DONE.
4130Sstevel@tonic-gate 			 */
414*3446Smrj 			XC_TRACE(TT_XC_RELEASE, X_CALL_MEDPRI, cix);
4150Sstevel@tonic-gate 			cpup->cpu_m.xc_state[X_CALL_MEDPRI] = XC_DONE;
4160Sstevel@tonic-gate 		}
4170Sstevel@tonic-gate 		i++;
4180Sstevel@tonic-gate 		if (i >= ncpus)
4190Sstevel@tonic-gate 			break;
4200Sstevel@tonic-gate 	}
4210Sstevel@tonic-gate 
4220Sstevel@tonic-gate 	xc_mboxes[X_CALL_MEDPRI].arg2 = 0;
4230Sstevel@tonic-gate 	mutex_exit(&xc_mbox_lock[X_CALL_HIPRI]);
4240Sstevel@tonic-gate }
4250Sstevel@tonic-gate 
4260Sstevel@tonic-gate /*
4270Sstevel@tonic-gate  * Common code to call a specified function on a set of processors.
4280Sstevel@tonic-gate  * sync specifies what kind of waiting is done.
4290Sstevel@tonic-gate  *	-1 - no waiting, don't release remotes
4300Sstevel@tonic-gate  *	0 - no waiting, release remotes immediately
4310Sstevel@tonic-gate  *	1 - run service locally w/o waiting for remotes.
4320Sstevel@tonic-gate  */
4330Sstevel@tonic-gate static void
4340Sstevel@tonic-gate xc_common(
4350Sstevel@tonic-gate 	xc_func_t func,
4360Sstevel@tonic-gate 	xc_arg_t arg1,
4370Sstevel@tonic-gate 	xc_arg_t arg2,
4380Sstevel@tonic-gate 	xc_arg_t arg3,
4390Sstevel@tonic-gate 	int pri,
4400Sstevel@tonic-gate 	cpuset_t set,
4410Sstevel@tonic-gate 	int sync)
4420Sstevel@tonic-gate {
4430Sstevel@tonic-gate 	int cix;
4440Sstevel@tonic-gate 	int lcx = (int)(CPU->cpu_id);
4450Sstevel@tonic-gate 	struct cpu *cpup;
4460Sstevel@tonic-gate 
4470Sstevel@tonic-gate 	ASSERT(panicstr == NULL);
4480Sstevel@tonic-gate 
4490Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&xc_mbox_lock[X_CALL_HIPRI]));
4500Sstevel@tonic-gate 	ASSERT(CPU->cpu_flags & CPU_READY);
4510Sstevel@tonic-gate 
4520Sstevel@tonic-gate 	/*
4530Sstevel@tonic-gate 	 * Set up the service definition mailbox.
4540Sstevel@tonic-gate 	 */
4550Sstevel@tonic-gate 	xc_mboxes[pri].func = func;
4560Sstevel@tonic-gate 	xc_mboxes[pri].arg1 = arg1;
4570Sstevel@tonic-gate 	xc_mboxes[pri].arg2 = arg2;
4580Sstevel@tonic-gate 	xc_mboxes[pri].arg3 = arg3;
4590Sstevel@tonic-gate 
4600Sstevel@tonic-gate 	/*
4610Sstevel@tonic-gate 	 * Request service on all remote processors.
4620Sstevel@tonic-gate 	 */
4630Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
4640Sstevel@tonic-gate 		if ((cpup = cpu[cix]) == NULL ||
4650Sstevel@tonic-gate 		    (cpup->cpu_flags & CPU_READY) == 0) {
4660Sstevel@tonic-gate 			/*
4671251Skchow 			 * In case the non-local CPU is not ready but becomes
4681251Skchow 			 * ready later, take it out of the set now. The local
4691251Skchow 			 * CPU needs to remain in the set to complete the
4701251Skchow 			 * requested function.
4710Sstevel@tonic-gate 			 */
4721251Skchow 			if (cix != lcx)
4731251Skchow 				CPUSET_DEL(set, cix);
4740Sstevel@tonic-gate 		} else if (cix != lcx && CPU_IN_SET(set, cix)) {
4750Sstevel@tonic-gate 			CPU_STATS_ADDQ(CPU, sys, xcalls, 1);
4760Sstevel@tonic-gate 			cpup->cpu_m.xc_ack[pri] = 0;
4770Sstevel@tonic-gate 			cpup->cpu_m.xc_wait[pri] = sync;
4780Sstevel@tonic-gate 			if (sync > 0)
4790Sstevel@tonic-gate 				cpup->cpu_m.xc_state[pri] = XC_SYNC_OP;
4800Sstevel@tonic-gate 			else
4810Sstevel@tonic-gate 				cpup->cpu_m.xc_state[pri] = XC_CALL_OP;
4820Sstevel@tonic-gate 			cpup->cpu_m.xc_pend[pri] = 1;
483*3446Smrj 			XC_TRACE(TT_XC_START, pri, cix);
4840Sstevel@tonic-gate 			send_dirint(cix, xc_xlat_xcptoipl[pri]);
4850Sstevel@tonic-gate 		}
4860Sstevel@tonic-gate 	}
4870Sstevel@tonic-gate 
4880Sstevel@tonic-gate 	/*
489*3446Smrj 	 * Run service locally.
4900Sstevel@tonic-gate 	 */
491*3446Smrj 	if (CPU_IN_SET(set, lcx) && func != NULL) {
492*3446Smrj 		XC_TRACE(TT_XC_START, pri, CPU->cpu_id);
4930Sstevel@tonic-gate 		CPU->cpu_m.xc_retval[pri] = (*func)(arg1, arg2, arg3);
494*3446Smrj 	}
4950Sstevel@tonic-gate 
4960Sstevel@tonic-gate 	if (sync == -1)
4970Sstevel@tonic-gate 		return;
4980Sstevel@tonic-gate 
4990Sstevel@tonic-gate 	/*
500*3446Smrj 	 * Wait here until all remote calls acknowledge.
5010Sstevel@tonic-gate 	 */
5020Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
5030Sstevel@tonic-gate 		if (lcx != cix && CPU_IN_SET(set, cix)) {
5040Sstevel@tonic-gate 			cpup = cpu[cix];
505355Ssherrym 			while (cpup->cpu_m.xc_ack[pri] == 0)
506*3446Smrj 				SMT_PAUSE();
507*3446Smrj 			XC_TRACE(TT_XC_WAIT, pri, cix);
5080Sstevel@tonic-gate 			cpup->cpu_m.xc_ack[pri] = 0;
5090Sstevel@tonic-gate 		}
5100Sstevel@tonic-gate 	}
5110Sstevel@tonic-gate 
5120Sstevel@tonic-gate 	if (sync == 0)
5130Sstevel@tonic-gate 		return;
5140Sstevel@tonic-gate 
5150Sstevel@tonic-gate 	/*
5160Sstevel@tonic-gate 	 * Release any waiting CPUs
5170Sstevel@tonic-gate 	 */
5180Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
5190Sstevel@tonic-gate 		if (lcx != cix && CPU_IN_SET(set, cix)) {
5200Sstevel@tonic-gate 			cpup = cpu[cix];
5210Sstevel@tonic-gate 			if (cpup != NULL && (cpup->cpu_flags & CPU_READY)) {
5220Sstevel@tonic-gate 				cpup->cpu_m.xc_wait[pri] = 0;
5230Sstevel@tonic-gate 				cpup->cpu_m.xc_state[pri] = XC_DONE;
5240Sstevel@tonic-gate 			}
5250Sstevel@tonic-gate 		}
5260Sstevel@tonic-gate 	}
5270Sstevel@tonic-gate 
5280Sstevel@tonic-gate 	/*
5290Sstevel@tonic-gate 	 * Wait for all CPUs to acknowledge completion before we continue.
5300Sstevel@tonic-gate 	 * Without this check it's possible (on a VM or hyper-threaded CPUs
5310Sstevel@tonic-gate 	 * or in the presence of Service Management Interrupts which can all
5320Sstevel@tonic-gate 	 * cause delays) for the remote processor to still be waiting by
5330Sstevel@tonic-gate 	 * the time xc_common() is next invoked with the sync flag set
5340Sstevel@tonic-gate 	 * resulting in a deadlock.
5350Sstevel@tonic-gate 	 */
5360Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
5370Sstevel@tonic-gate 		if (lcx != cix && CPU_IN_SET(set, cix)) {
5380Sstevel@tonic-gate 			cpup = cpu[cix];
5390Sstevel@tonic-gate 			if (cpup != NULL && (cpup->cpu_flags & CPU_READY)) {
540355Ssherrym 				while (cpup->cpu_m.xc_ack[pri] == 0)
541*3446Smrj 					SMT_PAUSE();
542*3446Smrj 				XC_TRACE(TT_XC_ACK, pri, cix);
5430Sstevel@tonic-gate 				cpup->cpu_m.xc_ack[pri] = 0;
5440Sstevel@tonic-gate 			}
5450Sstevel@tonic-gate 		}
5460Sstevel@tonic-gate 	}
5470Sstevel@tonic-gate }
5480Sstevel@tonic-gate 
5490Sstevel@tonic-gate /*
5500Sstevel@tonic-gate  * xc_trycall: attempt to call specified function on all processors
5510Sstevel@tonic-gate  * remotes may wait for a long time
5520Sstevel@tonic-gate  * we continue immediately
5530Sstevel@tonic-gate  */
5540Sstevel@tonic-gate void
5550Sstevel@tonic-gate xc_trycall(
5560Sstevel@tonic-gate 	xc_arg_t arg1,
5570Sstevel@tonic-gate 	xc_arg_t arg2,
5580Sstevel@tonic-gate 	xc_arg_t arg3,
5590Sstevel@tonic-gate 	cpuset_t set,
5600Sstevel@tonic-gate 	xc_func_t func)
5610Sstevel@tonic-gate {
5620Sstevel@tonic-gate 	int		save_kernel_preemption;
5630Sstevel@tonic-gate 	extern int	IGNORE_KERNEL_PREEMPTION;
5640Sstevel@tonic-gate 
5650Sstevel@tonic-gate 	/*
5660Sstevel@tonic-gate 	 * If we can grab the mutex, we'll do the cross-call.  If not -- if
5670Sstevel@tonic-gate 	 * someone else is already doing a cross-call -- we won't.
5680Sstevel@tonic-gate 	 */
5690Sstevel@tonic-gate 
5700Sstevel@tonic-gate 	save_kernel_preemption = IGNORE_KERNEL_PREEMPTION;
5710Sstevel@tonic-gate 	IGNORE_KERNEL_PREEMPTION = 1;
5720Sstevel@tonic-gate 	if (mutex_tryenter(&xc_mbox_lock[X_CALL_HIPRI])) {
5730Sstevel@tonic-gate 		xc_common(func, arg1, arg2, arg3, X_CALL_HIPRI, set, -1);
5740Sstevel@tonic-gate 		mutex_exit(&xc_mbox_lock[X_CALL_HIPRI]);
5750Sstevel@tonic-gate 	}
5760Sstevel@tonic-gate 	IGNORE_KERNEL_PREEMPTION = save_kernel_preemption;
5770Sstevel@tonic-gate }
5780Sstevel@tonic-gate 
5790Sstevel@tonic-gate /*
5800Sstevel@tonic-gate  * Used by the debugger to cross-call the other CPUs, thus causing them to
5810Sstevel@tonic-gate  * enter the debugger.  We can't hold locks, so we spin on the cross-call
5820Sstevel@tonic-gate  * lock until we get it.  When we get it, we send the cross-call, and assume
5830Sstevel@tonic-gate  * that we successfully stopped the other CPUs.
5840Sstevel@tonic-gate  */
5850Sstevel@tonic-gate void
5860Sstevel@tonic-gate kdi_xc_others(int this_cpu, void (*func)(void))
5870Sstevel@tonic-gate {
5880Sstevel@tonic-gate 	extern int	IGNORE_KERNEL_PREEMPTION;
5890Sstevel@tonic-gate 	int save_kernel_preemption;
5900Sstevel@tonic-gate 	mutex_impl_t *lp;
5910Sstevel@tonic-gate 	cpuset_t set;
5920Sstevel@tonic-gate 	int x;
5930Sstevel@tonic-gate 
594*3446Smrj 	if (!xc_initialized)
595*3446Smrj 		return;
596*3446Smrj 
5970Sstevel@tonic-gate 	CPUSET_ALL_BUT(set, this_cpu);
5980Sstevel@tonic-gate 
5990Sstevel@tonic-gate 	save_kernel_preemption = IGNORE_KERNEL_PREEMPTION;
6000Sstevel@tonic-gate 	IGNORE_KERNEL_PREEMPTION = 1;
6010Sstevel@tonic-gate 
6020Sstevel@tonic-gate 	lp = (mutex_impl_t *)&xc_mbox_lock[X_CALL_HIPRI];
6030Sstevel@tonic-gate 	for (x = 0; x < 0x400000; x++) {
6040Sstevel@tonic-gate 		if (lock_spin_try(&lp->m_spin.m_spinlock)) {
6050Sstevel@tonic-gate 			xc_common((xc_func_t)func, 0, 0, 0, X_CALL_HIPRI,
6060Sstevel@tonic-gate 			    set, -1);
6070Sstevel@tonic-gate 			lp->m_spin.m_spinlock = 0; /* XXX */
6080Sstevel@tonic-gate 			break;
6090Sstevel@tonic-gate 		}
6100Sstevel@tonic-gate 		(void) xc_serv((caddr_t)X_CALL_MEDPRI, NULL);
6110Sstevel@tonic-gate 	}
6120Sstevel@tonic-gate 	IGNORE_KERNEL_PREEMPTION = save_kernel_preemption;
6130Sstevel@tonic-gate }
614