xref: /onnv-gate/usr/src/uts/i86pc/os/x_call.c (revision 355:193aa6ae85a0)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
50Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
60Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
70Sstevel@tonic-gate  * with the License.
80Sstevel@tonic-gate  *
90Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
100Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
110Sstevel@tonic-gate  * See the License for the specific language governing permissions
120Sstevel@tonic-gate  * and limitations under the License.
130Sstevel@tonic-gate  *
140Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
150Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
160Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
170Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
180Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
190Sstevel@tonic-gate  *
200Sstevel@tonic-gate  * CDDL HEADER END
210Sstevel@tonic-gate  */
220Sstevel@tonic-gate /*
230Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
290Sstevel@tonic-gate /*
300Sstevel@tonic-gate  * Facilities for cross-processor subroutine calls using "mailbox" interrupts.
310Sstevel@tonic-gate  *
320Sstevel@tonic-gate  */
330Sstevel@tonic-gate 
340Sstevel@tonic-gate #include <sys/types.h>
350Sstevel@tonic-gate 
360Sstevel@tonic-gate #include <sys/param.h>
370Sstevel@tonic-gate #include <sys/t_lock.h>
380Sstevel@tonic-gate #include <sys/thread.h>
390Sstevel@tonic-gate #include <sys/cpuvar.h>
400Sstevel@tonic-gate #include <sys/x_call.h>
410Sstevel@tonic-gate #include <sys/cpu.h>
420Sstevel@tonic-gate #include <sys/psw.h>
430Sstevel@tonic-gate #include <sys/sunddi.h>
440Sstevel@tonic-gate #include <sys/mmu.h>
450Sstevel@tonic-gate #include <sys/debug.h>
460Sstevel@tonic-gate #include <sys/systm.h>
470Sstevel@tonic-gate #include <sys/machsystm.h>
480Sstevel@tonic-gate #include <sys/mutex_impl.h>
490Sstevel@tonic-gate 
500Sstevel@tonic-gate static struct	xc_mbox xc_mboxes[X_CALL_LEVELS];
510Sstevel@tonic-gate static kmutex_t xc_mbox_lock[X_CALL_LEVELS];
520Sstevel@tonic-gate static uint_t 	xc_xlat_xcptoipl[X_CALL_LEVELS] = {
530Sstevel@tonic-gate 	XC_LO_PIL,
540Sstevel@tonic-gate 	XC_MED_PIL,
550Sstevel@tonic-gate 	XC_HI_PIL
560Sstevel@tonic-gate };
570Sstevel@tonic-gate 
580Sstevel@tonic-gate static void xc_common(xc_func_t, xc_arg_t, xc_arg_t, xc_arg_t,
590Sstevel@tonic-gate     int, cpuset_t, int);
600Sstevel@tonic-gate 
610Sstevel@tonic-gate static int	xc_initialized = 0;
620Sstevel@tonic-gate extern ulong_t	cpu_ready_set;
630Sstevel@tonic-gate 
640Sstevel@tonic-gate void
650Sstevel@tonic-gate xc_init()
660Sstevel@tonic-gate {
670Sstevel@tonic-gate 	/*
680Sstevel@tonic-gate 	 * By making these mutexes type MUTEX_DRIVER, the ones below
690Sstevel@tonic-gate 	 * LOCK_LEVEL will be implemented as adaptive mutexes, and the
700Sstevel@tonic-gate 	 * ones above LOCK_LEVEL will be spin mutexes.
710Sstevel@tonic-gate 	 */
720Sstevel@tonic-gate 	mutex_init(&xc_mbox_lock[0], NULL, MUTEX_DRIVER,
730Sstevel@tonic-gate 	    (void *)ipltospl(XC_LO_PIL));
740Sstevel@tonic-gate 	mutex_init(&xc_mbox_lock[1], NULL, MUTEX_DRIVER,
750Sstevel@tonic-gate 	    (void *)ipltospl(XC_MED_PIL));
760Sstevel@tonic-gate 	mutex_init(&xc_mbox_lock[2], NULL, MUTEX_DRIVER,
770Sstevel@tonic-gate 	    (void *)ipltospl(XC_HI_PIL));
780Sstevel@tonic-gate 
790Sstevel@tonic-gate 	xc_initialized = 1;
800Sstevel@tonic-gate }
810Sstevel@tonic-gate 
820Sstevel@tonic-gate /*
830Sstevel@tonic-gate  * Used by the debugger to determine whether or not cross calls have been
840Sstevel@tonic-gate  * initialized and are safe to use.
850Sstevel@tonic-gate  */
860Sstevel@tonic-gate int
870Sstevel@tonic-gate kdi_xc_initialized(void)
880Sstevel@tonic-gate {
890Sstevel@tonic-gate 	return (xc_initialized);
900Sstevel@tonic-gate }
910Sstevel@tonic-gate 
920Sstevel@tonic-gate #define	CAPTURE_CPU_ARG	0xffffffff
930Sstevel@tonic-gate 
940Sstevel@tonic-gate /*
950Sstevel@tonic-gate  * X-call interrupt service routine.
960Sstevel@tonic-gate  *
970Sstevel@tonic-gate  * arg == X_CALL_MEDPRI	-  capture cpus.
980Sstevel@tonic-gate  *
990Sstevel@tonic-gate  * We're protected against changing CPUs by being a high-priority interrupt.
1000Sstevel@tonic-gate  */
1010Sstevel@tonic-gate /*ARGSUSED*/
1020Sstevel@tonic-gate uint_t
1030Sstevel@tonic-gate xc_serv(caddr_t arg1, caddr_t arg2)
1040Sstevel@tonic-gate {
1050Sstevel@tonic-gate 	int	op;
1060Sstevel@tonic-gate 	int	pri = (int)(uintptr_t)arg1;
1070Sstevel@tonic-gate 	struct cpu *cpup = CPU;
1080Sstevel@tonic-gate 	xc_arg_t *argp;
1090Sstevel@tonic-gate 	xc_arg_t arg2val;
1100Sstevel@tonic-gate 	uint_t	tlbflush;
1110Sstevel@tonic-gate 
1120Sstevel@tonic-gate 	if (pri == X_CALL_MEDPRI) {
1130Sstevel@tonic-gate 
1140Sstevel@tonic-gate 		argp = &xc_mboxes[X_CALL_MEDPRI].arg2;
1150Sstevel@tonic-gate 		arg2val = *argp;
1160Sstevel@tonic-gate 		if (arg2val != CAPTURE_CPU_ARG &&
1170Sstevel@tonic-gate 		    !(arg2val & (1 << cpup->cpu_id)))
1180Sstevel@tonic-gate 			return (DDI_INTR_UNCLAIMED);
1190Sstevel@tonic-gate 		ASSERT(arg2val == CAPTURE_CPU_ARG);
1200Sstevel@tonic-gate 		if (cpup->cpu_m.xc_pend[pri] == 0)
1210Sstevel@tonic-gate 			return (DDI_INTR_UNCLAIMED);
1220Sstevel@tonic-gate 
1230Sstevel@tonic-gate 		cpup->cpu_m.xc_pend[X_CALL_MEDPRI] = 0;
1240Sstevel@tonic-gate 		cpup->cpu_m.xc_ack[X_CALL_MEDPRI] = 1;
1250Sstevel@tonic-gate 
1260Sstevel@tonic-gate 		for (;;) {
1270Sstevel@tonic-gate 			if ((cpup->cpu_m.xc_state[X_CALL_MEDPRI] == XC_DONE) ||
1280Sstevel@tonic-gate 				(cpup->cpu_m.xc_pend[X_CALL_MEDPRI]))
1290Sstevel@tonic-gate 				break;
1300Sstevel@tonic-gate 			ht_pause();
1310Sstevel@tonic-gate 		}
1320Sstevel@tonic-gate 		return (DDI_INTR_CLAIMED);
1330Sstevel@tonic-gate 	}
1340Sstevel@tonic-gate 	if (cpup->cpu_m.xc_pend[pri] == 0)
1350Sstevel@tonic-gate 		return (DDI_INTR_UNCLAIMED);
1360Sstevel@tonic-gate 
1370Sstevel@tonic-gate 	cpup->cpu_m.xc_pend[pri] = 0;
1380Sstevel@tonic-gate 	op = cpup->cpu_m.xc_state[pri];
1390Sstevel@tonic-gate 
1400Sstevel@tonic-gate 	/*
1410Sstevel@tonic-gate 	 * When invalidating TLB entries, wait until the initiator changes the
1420Sstevel@tonic-gate 	 * memory PTE before doing any INVLPG. Otherwise, if the PTE in memory
1430Sstevel@tonic-gate 	 * hasn't been changed, the processor's TLB Flush filter may ignore
1440Sstevel@tonic-gate 	 * the INVLPG instruction.
1450Sstevel@tonic-gate 	 */
1460Sstevel@tonic-gate 	tlbflush = (cpup->cpu_m.xc_wait[pri] == 2);
1470Sstevel@tonic-gate 
1480Sstevel@tonic-gate 	/*
1490Sstevel@tonic-gate 	 * Don't invoke a null function.
1500Sstevel@tonic-gate 	 */
1510Sstevel@tonic-gate 	if (xc_mboxes[pri].func != NULL) {
1520Sstevel@tonic-gate 		if (!tlbflush)
1530Sstevel@tonic-gate 			cpup->cpu_m.xc_retval[pri] = (*xc_mboxes[pri].func)
1540Sstevel@tonic-gate 			    (xc_mboxes[pri].arg1, xc_mboxes[pri].arg2,
1550Sstevel@tonic-gate 				xc_mboxes[pri].arg3);
1560Sstevel@tonic-gate 	} else
1570Sstevel@tonic-gate 		cpup->cpu_m.xc_retval[pri] = 0;
1580Sstevel@tonic-gate 
1590Sstevel@tonic-gate 	/*
1600Sstevel@tonic-gate 	 * Acknowledge that we have completed the x-call operation.
1610Sstevel@tonic-gate 	 */
1620Sstevel@tonic-gate 	cpup->cpu_m.xc_ack[pri] = 1;
1630Sstevel@tonic-gate 
1640Sstevel@tonic-gate 	if (op == XC_CALL_OP)
1650Sstevel@tonic-gate 		return (DDI_INTR_CLAIMED);
1660Sstevel@tonic-gate 
1670Sstevel@tonic-gate 	/*
1680Sstevel@tonic-gate 	 * for (op == XC_SYNC_OP)
1690Sstevel@tonic-gate 	 * Wait for the initiator of the x-call to indicate
1700Sstevel@tonic-gate 	 * that all CPUs involved can proceed.
1710Sstevel@tonic-gate 	 */
172*355Ssherrym 	while (cpup->cpu_m.xc_wait[pri])
1730Sstevel@tonic-gate 		ht_pause();
1740Sstevel@tonic-gate 
175*355Ssherrym 	while (cpup->cpu_m.xc_state[pri] != XC_DONE)
1760Sstevel@tonic-gate 		ht_pause();
1770Sstevel@tonic-gate 
1780Sstevel@tonic-gate 	/*
1790Sstevel@tonic-gate 	 * Flush the TLB, if that's what is requested.
1800Sstevel@tonic-gate 	 */
1810Sstevel@tonic-gate 	if (xc_mboxes[pri].func != NULL && tlbflush) {
1820Sstevel@tonic-gate 		cpup->cpu_m.xc_retval[pri] = (*xc_mboxes[pri].func)
1830Sstevel@tonic-gate 		    (xc_mboxes[pri].arg1, xc_mboxes[pri].arg2,
1840Sstevel@tonic-gate 			xc_mboxes[pri].arg3);
1850Sstevel@tonic-gate 	}
1860Sstevel@tonic-gate 
1870Sstevel@tonic-gate 	/*
1880Sstevel@tonic-gate 	 * Acknowledge that we have received the directive to continue.
1890Sstevel@tonic-gate 	 */
1900Sstevel@tonic-gate 	ASSERT(cpup->cpu_m.xc_ack[pri] == 0);
1910Sstevel@tonic-gate 	cpup->cpu_m.xc_ack[pri] = 1;
1920Sstevel@tonic-gate 
1930Sstevel@tonic-gate 	return (DDI_INTR_CLAIMED);
1940Sstevel@tonic-gate }
1950Sstevel@tonic-gate 
1960Sstevel@tonic-gate 
1970Sstevel@tonic-gate /*
1980Sstevel@tonic-gate  * xc_do_call:
1990Sstevel@tonic-gate  */
2000Sstevel@tonic-gate static void
2010Sstevel@tonic-gate xc_do_call(
2020Sstevel@tonic-gate 	xc_arg_t arg1,
2030Sstevel@tonic-gate 	xc_arg_t arg2,
2040Sstevel@tonic-gate 	xc_arg_t arg3,
2050Sstevel@tonic-gate 	int pri,
2060Sstevel@tonic-gate 	cpuset_t set,
2070Sstevel@tonic-gate 	xc_func_t func,
2080Sstevel@tonic-gate 	int sync)
2090Sstevel@tonic-gate {
2100Sstevel@tonic-gate 	/*
2110Sstevel@tonic-gate 	 * If the pri indicates a low priority lock (below LOCK_LEVEL),
2120Sstevel@tonic-gate 	 * we must disable preemption to avoid migrating to another CPU
2130Sstevel@tonic-gate 	 * during the call.
2140Sstevel@tonic-gate 	 */
2150Sstevel@tonic-gate 	if (pri == X_CALL_LOPRI) {
2160Sstevel@tonic-gate 		kpreempt_disable();
2170Sstevel@tonic-gate 	} else {
2180Sstevel@tonic-gate 		pri = X_CALL_HIPRI;
2190Sstevel@tonic-gate 	}
2200Sstevel@tonic-gate 
2210Sstevel@tonic-gate 	/* always grab highest mutex to avoid deadlock */
2220Sstevel@tonic-gate 	mutex_enter(&xc_mbox_lock[X_CALL_HIPRI]);
2230Sstevel@tonic-gate 	xc_common(func, arg1, arg2, arg3, pri, set, sync);
2240Sstevel@tonic-gate 	mutex_exit(&xc_mbox_lock[X_CALL_HIPRI]);
2250Sstevel@tonic-gate 	if (pri == X_CALL_LOPRI)
2260Sstevel@tonic-gate 		kpreempt_enable();
2270Sstevel@tonic-gate }
2280Sstevel@tonic-gate 
2290Sstevel@tonic-gate 
2300Sstevel@tonic-gate /*
2310Sstevel@tonic-gate  * xc_call: call specified function on all processors
2320Sstevel@tonic-gate  * remotes may continue after service
2330Sstevel@tonic-gate  * we wait here until everybody has completed.
2340Sstevel@tonic-gate  */
2350Sstevel@tonic-gate void
2360Sstevel@tonic-gate xc_call(
2370Sstevel@tonic-gate 	xc_arg_t arg1,
2380Sstevel@tonic-gate 	xc_arg_t arg2,
2390Sstevel@tonic-gate 	xc_arg_t arg3,
2400Sstevel@tonic-gate 	int pri,
2410Sstevel@tonic-gate 	cpuset_t set,
2420Sstevel@tonic-gate 	xc_func_t func)
2430Sstevel@tonic-gate {
2440Sstevel@tonic-gate 	xc_do_call(arg1, arg2, arg3, pri, set, func, 0);
2450Sstevel@tonic-gate }
2460Sstevel@tonic-gate 
2470Sstevel@tonic-gate /*
2480Sstevel@tonic-gate  * xc_sync: call specified function on all processors
2490Sstevel@tonic-gate  * after doing work, each remote waits until we let
2500Sstevel@tonic-gate  * it continue; send the contiunue after everyone has
2510Sstevel@tonic-gate  * informed us that they are done.
2520Sstevel@tonic-gate  */
2530Sstevel@tonic-gate void
2540Sstevel@tonic-gate xc_sync(
2550Sstevel@tonic-gate 	xc_arg_t arg1,
2560Sstevel@tonic-gate 	xc_arg_t arg2,
2570Sstevel@tonic-gate 	xc_arg_t arg3,
2580Sstevel@tonic-gate 	int pri,
2590Sstevel@tonic-gate 	cpuset_t set,
2600Sstevel@tonic-gate 	xc_func_t func)
2610Sstevel@tonic-gate {
2620Sstevel@tonic-gate 	xc_do_call(arg1, arg2, arg3, pri, set, func, 1);
2630Sstevel@tonic-gate }
2640Sstevel@tonic-gate 
2650Sstevel@tonic-gate /*
2660Sstevel@tonic-gate  * xc_sync_wait: similar to xc_sync(), except that the starting
2670Sstevel@tonic-gate  * cpu waits for all other cpus to check in before running its
2680Sstevel@tonic-gate  * service locally.
2690Sstevel@tonic-gate  */
2700Sstevel@tonic-gate void
2710Sstevel@tonic-gate xc_wait_sync(
2720Sstevel@tonic-gate 	xc_arg_t arg1,
2730Sstevel@tonic-gate 	xc_arg_t arg2,
2740Sstevel@tonic-gate 	xc_arg_t arg3,
2750Sstevel@tonic-gate 	int pri,
2760Sstevel@tonic-gate 	cpuset_t set,
2770Sstevel@tonic-gate 	xc_func_t func)
2780Sstevel@tonic-gate {
2790Sstevel@tonic-gate 	xc_do_call(arg1, arg2, arg3, pri, set, func, 2);
2800Sstevel@tonic-gate }
2810Sstevel@tonic-gate 
2820Sstevel@tonic-gate 
2830Sstevel@tonic-gate /*
2840Sstevel@tonic-gate  * The routines xc_capture_cpus and xc_release_cpus
2850Sstevel@tonic-gate  * can be used in place of xc_sync in order to implement a critical
2860Sstevel@tonic-gate  * code section where all CPUs in the system can be controlled.
2870Sstevel@tonic-gate  * xc_capture_cpus is used to start the critical code section, and
2880Sstevel@tonic-gate  * xc_release_cpus is used to end the critical code section.
2890Sstevel@tonic-gate  */
2900Sstevel@tonic-gate 
2910Sstevel@tonic-gate /*
2920Sstevel@tonic-gate  * Capture the CPUs specified in order to start a x-call session,
2930Sstevel@tonic-gate  * and/or to begin a critical section.
2940Sstevel@tonic-gate  */
2950Sstevel@tonic-gate void
2960Sstevel@tonic-gate xc_capture_cpus(cpuset_t set)
2970Sstevel@tonic-gate {
2980Sstevel@tonic-gate 	int cix;
2990Sstevel@tonic-gate 	int lcx;
3000Sstevel@tonic-gate 	struct cpu *cpup;
3010Sstevel@tonic-gate 	int	i;
3020Sstevel@tonic-gate 	cpuset_t *cpus;
3030Sstevel@tonic-gate 	cpuset_t c;
3040Sstevel@tonic-gate 
3050Sstevel@tonic-gate 	CPU_STATS_ADDQ(CPU, sys, xcalls, 1);
3060Sstevel@tonic-gate 
3070Sstevel@tonic-gate 	/*
3080Sstevel@tonic-gate 	 * Prevent deadlocks where we take an interrupt and are waiting
3090Sstevel@tonic-gate 	 * for a mutex owned by one of the CPUs that is captured for
3100Sstevel@tonic-gate 	 * the x-call, while that CPU is waiting for some x-call signal
3110Sstevel@tonic-gate 	 * to be set by us.
3120Sstevel@tonic-gate 	 *
3130Sstevel@tonic-gate 	 * This mutex also prevents preemption, since it raises SPL above
3140Sstevel@tonic-gate 	 * LOCK_LEVEL (it is a spin-type driver mutex).
3150Sstevel@tonic-gate 	 */
3160Sstevel@tonic-gate 	/* always grab highest mutex to avoid deadlock */
3170Sstevel@tonic-gate 	mutex_enter(&xc_mbox_lock[X_CALL_HIPRI]);
3180Sstevel@tonic-gate 	lcx = CPU->cpu_id;	/* now we're safe */
3190Sstevel@tonic-gate 
3200Sstevel@tonic-gate 	ASSERT(CPU->cpu_flags & CPU_READY);
3210Sstevel@tonic-gate 
3220Sstevel@tonic-gate 	/*
3230Sstevel@tonic-gate 	 * Wait for all cpus
3240Sstevel@tonic-gate 	 */
3250Sstevel@tonic-gate 	cpus = (cpuset_t *)&xc_mboxes[X_CALL_MEDPRI].arg2;
3260Sstevel@tonic-gate 	if (CPU_IN_SET(*cpus, CPU->cpu_id))
3270Sstevel@tonic-gate 		CPUSET_ATOMIC_DEL(*cpus, CPU->cpu_id);
3280Sstevel@tonic-gate 	for (;;) {
3290Sstevel@tonic-gate 		c = *(volatile cpuset_t *)cpus;
3300Sstevel@tonic-gate 		CPUSET_AND(c, cpu_ready_set);
3310Sstevel@tonic-gate 		if (CPUSET_ISNULL(c))
3320Sstevel@tonic-gate 			break;
3330Sstevel@tonic-gate 		ht_pause();
3340Sstevel@tonic-gate 	}
3350Sstevel@tonic-gate 
3360Sstevel@tonic-gate 	/*
3370Sstevel@tonic-gate 	 * Store the set of CPUs involved in the x-call session, so that
3380Sstevel@tonic-gate 	 * xc_release_cpus will know what CPUs to act upon.
3390Sstevel@tonic-gate 	 */
3400Sstevel@tonic-gate 	xc_mboxes[X_CALL_MEDPRI].set = set;
3410Sstevel@tonic-gate 	xc_mboxes[X_CALL_MEDPRI].arg2 = CAPTURE_CPU_ARG;
3420Sstevel@tonic-gate 
3430Sstevel@tonic-gate 	/*
3440Sstevel@tonic-gate 	 * Now capture each CPU in the set and cause it to go into a
3450Sstevel@tonic-gate 	 * holding pattern.
3460Sstevel@tonic-gate 	 */
3470Sstevel@tonic-gate 	i = 0;
3480Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
3490Sstevel@tonic-gate 		if ((cpup = cpu[cix]) == NULL ||
3500Sstevel@tonic-gate 		    (cpup->cpu_flags & CPU_READY) == 0) {
3510Sstevel@tonic-gate 			/*
3520Sstevel@tonic-gate 			 * In case CPU wasn't ready, but becomes ready later,
3530Sstevel@tonic-gate 			 * take the CPU out of the set now.
3540Sstevel@tonic-gate 			 */
3550Sstevel@tonic-gate 			CPUSET_DEL(set, cix);
3560Sstevel@tonic-gate 			continue;
3570Sstevel@tonic-gate 		}
3580Sstevel@tonic-gate 		if (cix != lcx && CPU_IN_SET(set, cix)) {
3590Sstevel@tonic-gate 			cpup->cpu_m.xc_ack[X_CALL_MEDPRI] = 0;
3600Sstevel@tonic-gate 			cpup->cpu_m.xc_state[X_CALL_MEDPRI] = XC_HOLD;
3610Sstevel@tonic-gate 			cpup->cpu_m.xc_pend[X_CALL_MEDPRI] = 1;
3620Sstevel@tonic-gate 			send_dirint(cix, XC_MED_PIL);
3630Sstevel@tonic-gate 		}
3640Sstevel@tonic-gate 		i++;
3650Sstevel@tonic-gate 		if (i >= ncpus)
3660Sstevel@tonic-gate 			break;
3670Sstevel@tonic-gate 	}
3680Sstevel@tonic-gate 
3690Sstevel@tonic-gate 	/*
3700Sstevel@tonic-gate 	 * Wait here until all remote calls to complete.
3710Sstevel@tonic-gate 	 */
3720Sstevel@tonic-gate 	i = 0;
3730Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
3740Sstevel@tonic-gate 		if (lcx != cix && CPU_IN_SET(set, cix)) {
3750Sstevel@tonic-gate 			cpup = cpu[cix];
376*355Ssherrym 			while (cpup->cpu_m.xc_ack[X_CALL_MEDPRI] == 0)
3770Sstevel@tonic-gate 				ht_pause();
3780Sstevel@tonic-gate 			cpup->cpu_m.xc_ack[X_CALL_MEDPRI] = 0;
3790Sstevel@tonic-gate 		}
3800Sstevel@tonic-gate 		i++;
3810Sstevel@tonic-gate 		if (i >= ncpus)
3820Sstevel@tonic-gate 			break;
3830Sstevel@tonic-gate 	}
3840Sstevel@tonic-gate 
3850Sstevel@tonic-gate }
3860Sstevel@tonic-gate 
3870Sstevel@tonic-gate /*
3880Sstevel@tonic-gate  * Release the CPUs captured by xc_capture_cpus, thus terminating the
3890Sstevel@tonic-gate  * x-call session and exiting the critical section.
3900Sstevel@tonic-gate  */
3910Sstevel@tonic-gate void
3920Sstevel@tonic-gate xc_release_cpus(void)
3930Sstevel@tonic-gate {
3940Sstevel@tonic-gate 	int cix;
3950Sstevel@tonic-gate 	int lcx = (int)(CPU->cpu_id);
3960Sstevel@tonic-gate 	cpuset_t set = xc_mboxes[X_CALL_MEDPRI].set;
3970Sstevel@tonic-gate 	struct cpu *cpup;
3980Sstevel@tonic-gate 	int	i;
3990Sstevel@tonic-gate 
4000Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&xc_mbox_lock[X_CALL_HIPRI]));
4010Sstevel@tonic-gate 
4020Sstevel@tonic-gate 	/*
4030Sstevel@tonic-gate 	 * Allow each CPU to exit its holding pattern.
4040Sstevel@tonic-gate 	 */
4050Sstevel@tonic-gate 	i = 0;
4060Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
4070Sstevel@tonic-gate 		if ((cpup = cpu[cix]) == NULL)
4080Sstevel@tonic-gate 			continue;
4090Sstevel@tonic-gate 		if ((cpup->cpu_flags & CPU_READY) &&
4100Sstevel@tonic-gate 		    (cix != lcx) && CPU_IN_SET(set, cix)) {
4110Sstevel@tonic-gate 			/*
4120Sstevel@tonic-gate 			 * Clear xc_ack since we will be waiting for it
4130Sstevel@tonic-gate 			 * to be set again after we set XC_DONE.
4140Sstevel@tonic-gate 			 */
4150Sstevel@tonic-gate 			cpup->cpu_m.xc_state[X_CALL_MEDPRI] = XC_DONE;
4160Sstevel@tonic-gate 		}
4170Sstevel@tonic-gate 		i++;
4180Sstevel@tonic-gate 		if (i >= ncpus)
4190Sstevel@tonic-gate 			break;
4200Sstevel@tonic-gate 	}
4210Sstevel@tonic-gate 
4220Sstevel@tonic-gate 	xc_mboxes[X_CALL_MEDPRI].arg2 = 0;
4230Sstevel@tonic-gate 	mutex_exit(&xc_mbox_lock[X_CALL_HIPRI]);
4240Sstevel@tonic-gate }
4250Sstevel@tonic-gate 
4260Sstevel@tonic-gate /*
4270Sstevel@tonic-gate  * Common code to call a specified function on a set of processors.
4280Sstevel@tonic-gate  * sync specifies what kind of waiting is done.
4290Sstevel@tonic-gate  *	-1 - no waiting, don't release remotes
4300Sstevel@tonic-gate  *	0 - no waiting, release remotes immediately
4310Sstevel@tonic-gate  *	1 - run service locally w/o waiting for remotes.
4320Sstevel@tonic-gate  *	2 - wait for remotes before running locally
4330Sstevel@tonic-gate  */
4340Sstevel@tonic-gate static void
4350Sstevel@tonic-gate xc_common(
4360Sstevel@tonic-gate 	xc_func_t func,
4370Sstevel@tonic-gate 	xc_arg_t arg1,
4380Sstevel@tonic-gate 	xc_arg_t arg2,
4390Sstevel@tonic-gate 	xc_arg_t arg3,
4400Sstevel@tonic-gate 	int pri,
4410Sstevel@tonic-gate 	cpuset_t set,
4420Sstevel@tonic-gate 	int sync)
4430Sstevel@tonic-gate {
4440Sstevel@tonic-gate 	int cix;
4450Sstevel@tonic-gate 	int lcx = (int)(CPU->cpu_id);
4460Sstevel@tonic-gate 	struct cpu *cpup;
4470Sstevel@tonic-gate 
4480Sstevel@tonic-gate 	ASSERT(panicstr == NULL);
4490Sstevel@tonic-gate 
4500Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&xc_mbox_lock[X_CALL_HIPRI]));
4510Sstevel@tonic-gate 	ASSERT(CPU->cpu_flags & CPU_READY);
4520Sstevel@tonic-gate 
4530Sstevel@tonic-gate 	/*
4540Sstevel@tonic-gate 	 * Set up the service definition mailbox.
4550Sstevel@tonic-gate 	 */
4560Sstevel@tonic-gate 	xc_mboxes[pri].func = func;
4570Sstevel@tonic-gate 	xc_mboxes[pri].arg1 = arg1;
4580Sstevel@tonic-gate 	xc_mboxes[pri].arg2 = arg2;
4590Sstevel@tonic-gate 	xc_mboxes[pri].arg3 = arg3;
4600Sstevel@tonic-gate 
4610Sstevel@tonic-gate 	/*
4620Sstevel@tonic-gate 	 * Request service on all remote processors.
4630Sstevel@tonic-gate 	 */
4640Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
4650Sstevel@tonic-gate 		if ((cpup = cpu[cix]) == NULL ||
4660Sstevel@tonic-gate 		    (cpup->cpu_flags & CPU_READY) == 0) {
4670Sstevel@tonic-gate 			/*
4680Sstevel@tonic-gate 			 * In case CPU wasn't ready, but becomes ready later,
4690Sstevel@tonic-gate 			 * take the CPU out of the set now.
4700Sstevel@tonic-gate 			 */
4710Sstevel@tonic-gate 			CPUSET_DEL(set, cix);
4720Sstevel@tonic-gate 		} else if (cix != lcx && CPU_IN_SET(set, cix)) {
4730Sstevel@tonic-gate 			CPU_STATS_ADDQ(CPU, sys, xcalls, 1);
4740Sstevel@tonic-gate 			cpup->cpu_m.xc_ack[pri] = 0;
4750Sstevel@tonic-gate 			cpup->cpu_m.xc_wait[pri] = sync;
4760Sstevel@tonic-gate 			if (sync > 0)
4770Sstevel@tonic-gate 				cpup->cpu_m.xc_state[pri] = XC_SYNC_OP;
4780Sstevel@tonic-gate 			else
4790Sstevel@tonic-gate 				cpup->cpu_m.xc_state[pri] = XC_CALL_OP;
4800Sstevel@tonic-gate 			cpup->cpu_m.xc_pend[pri] = 1;
4810Sstevel@tonic-gate 			send_dirint(cix, xc_xlat_xcptoipl[pri]);
4820Sstevel@tonic-gate 		}
4830Sstevel@tonic-gate 	}
4840Sstevel@tonic-gate 
4850Sstevel@tonic-gate 	/*
4860Sstevel@tonic-gate 	 * Run service locally if not waiting for remotes.
4870Sstevel@tonic-gate 	 */
4880Sstevel@tonic-gate 	if (sync != 2 && CPU_IN_SET(set, lcx) && func != NULL)
4890Sstevel@tonic-gate 		CPU->cpu_m.xc_retval[pri] = (*func)(arg1, arg2, arg3);
4900Sstevel@tonic-gate 
4910Sstevel@tonic-gate 	if (sync == -1)
4920Sstevel@tonic-gate 		return;
4930Sstevel@tonic-gate 
4940Sstevel@tonic-gate 	/*
4950Sstevel@tonic-gate 	 * Wait here until all remote calls complete.
4960Sstevel@tonic-gate 	 */
4970Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
4980Sstevel@tonic-gate 		if (lcx != cix && CPU_IN_SET(set, cix)) {
4990Sstevel@tonic-gate 			cpup = cpu[cix];
500*355Ssherrym 			while (cpup->cpu_m.xc_ack[pri] == 0)
5010Sstevel@tonic-gate 				ht_pause();
5020Sstevel@tonic-gate 			cpup->cpu_m.xc_ack[pri] = 0;
5030Sstevel@tonic-gate 		}
5040Sstevel@tonic-gate 	}
5050Sstevel@tonic-gate 
5060Sstevel@tonic-gate 	/*
5070Sstevel@tonic-gate 	 * Run service locally if waiting for remotes.
5080Sstevel@tonic-gate 	 */
5090Sstevel@tonic-gate 	if (sync == 2 && CPU_IN_SET(set, lcx) && func != NULL)
5100Sstevel@tonic-gate 		CPU->cpu_m.xc_retval[pri] = (*func)(arg1, arg2, arg3);
5110Sstevel@tonic-gate 
5120Sstevel@tonic-gate 	if (sync == 0)
5130Sstevel@tonic-gate 		return;
5140Sstevel@tonic-gate 
5150Sstevel@tonic-gate 	/*
5160Sstevel@tonic-gate 	 * Release any waiting CPUs
5170Sstevel@tonic-gate 	 */
5180Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
5190Sstevel@tonic-gate 		if (lcx != cix && CPU_IN_SET(set, cix)) {
5200Sstevel@tonic-gate 			cpup = cpu[cix];
5210Sstevel@tonic-gate 			if (cpup != NULL && (cpup->cpu_flags & CPU_READY)) {
5220Sstevel@tonic-gate 				cpup->cpu_m.xc_wait[pri] = 0;
5230Sstevel@tonic-gate 				cpup->cpu_m.xc_state[pri] = XC_DONE;
5240Sstevel@tonic-gate 			}
5250Sstevel@tonic-gate 		}
5260Sstevel@tonic-gate 	}
5270Sstevel@tonic-gate 
5280Sstevel@tonic-gate 	/*
5290Sstevel@tonic-gate 	 * Wait for all CPUs to acknowledge completion before we continue.
5300Sstevel@tonic-gate 	 * Without this check it's possible (on a VM or hyper-threaded CPUs
5310Sstevel@tonic-gate 	 * or in the presence of Service Management Interrupts which can all
5320Sstevel@tonic-gate 	 * cause delays) for the remote processor to still be waiting by
5330Sstevel@tonic-gate 	 * the time xc_common() is next invoked with the sync flag set
5340Sstevel@tonic-gate 	 * resulting in a deadlock.
5350Sstevel@tonic-gate 	 */
5360Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
5370Sstevel@tonic-gate 		if (lcx != cix && CPU_IN_SET(set, cix)) {
5380Sstevel@tonic-gate 			cpup = cpu[cix];
5390Sstevel@tonic-gate 			if (cpup != NULL && (cpup->cpu_flags & CPU_READY)) {
540*355Ssherrym 				while (cpup->cpu_m.xc_ack[pri] == 0)
5410Sstevel@tonic-gate 					ht_pause();
5420Sstevel@tonic-gate 				cpup->cpu_m.xc_ack[pri] = 0;
5430Sstevel@tonic-gate 			}
5440Sstevel@tonic-gate 		}
5450Sstevel@tonic-gate 	}
5460Sstevel@tonic-gate }
5470Sstevel@tonic-gate 
5480Sstevel@tonic-gate /*
5490Sstevel@tonic-gate  * xc_trycall: attempt to call specified function on all processors
5500Sstevel@tonic-gate  * remotes may wait for a long time
5510Sstevel@tonic-gate  * we continue immediately
5520Sstevel@tonic-gate  */
5530Sstevel@tonic-gate void
5540Sstevel@tonic-gate xc_trycall(
5550Sstevel@tonic-gate 	xc_arg_t arg1,
5560Sstevel@tonic-gate 	xc_arg_t arg2,
5570Sstevel@tonic-gate 	xc_arg_t arg3,
5580Sstevel@tonic-gate 	cpuset_t set,
5590Sstevel@tonic-gate 	xc_func_t func)
5600Sstevel@tonic-gate {
5610Sstevel@tonic-gate 	int		save_kernel_preemption;
5620Sstevel@tonic-gate 	extern int	IGNORE_KERNEL_PREEMPTION;
5630Sstevel@tonic-gate 
5640Sstevel@tonic-gate 	/*
5650Sstevel@tonic-gate 	 * If we can grab the mutex, we'll do the cross-call.  If not -- if
5660Sstevel@tonic-gate 	 * someone else is already doing a cross-call -- we won't.
5670Sstevel@tonic-gate 	 */
5680Sstevel@tonic-gate 
5690Sstevel@tonic-gate 	save_kernel_preemption = IGNORE_KERNEL_PREEMPTION;
5700Sstevel@tonic-gate 	IGNORE_KERNEL_PREEMPTION = 1;
5710Sstevel@tonic-gate 	if (mutex_tryenter(&xc_mbox_lock[X_CALL_HIPRI])) {
5720Sstevel@tonic-gate 		xc_common(func, arg1, arg2, arg3, X_CALL_HIPRI, set, -1);
5730Sstevel@tonic-gate 		mutex_exit(&xc_mbox_lock[X_CALL_HIPRI]);
5740Sstevel@tonic-gate 	}
5750Sstevel@tonic-gate 	IGNORE_KERNEL_PREEMPTION = save_kernel_preemption;
5760Sstevel@tonic-gate }
5770Sstevel@tonic-gate 
5780Sstevel@tonic-gate /*
5790Sstevel@tonic-gate  * Used by the debugger to cross-call the other CPUs, thus causing them to
5800Sstevel@tonic-gate  * enter the debugger.  We can't hold locks, so we spin on the cross-call
5810Sstevel@tonic-gate  * lock until we get it.  When we get it, we send the cross-call, and assume
5820Sstevel@tonic-gate  * that we successfully stopped the other CPUs.
5830Sstevel@tonic-gate  */
5840Sstevel@tonic-gate void
5850Sstevel@tonic-gate kdi_xc_others(int this_cpu, void (*func)(void))
5860Sstevel@tonic-gate {
5870Sstevel@tonic-gate 	extern int	IGNORE_KERNEL_PREEMPTION;
5880Sstevel@tonic-gate 	int save_kernel_preemption;
5890Sstevel@tonic-gate 	mutex_impl_t *lp;
5900Sstevel@tonic-gate 	cpuset_t set;
5910Sstevel@tonic-gate 	int x;
5920Sstevel@tonic-gate 
5930Sstevel@tonic-gate 	CPUSET_ALL_BUT(set, this_cpu);
5940Sstevel@tonic-gate 
5950Sstevel@tonic-gate 	save_kernel_preemption = IGNORE_KERNEL_PREEMPTION;
5960Sstevel@tonic-gate 	IGNORE_KERNEL_PREEMPTION = 1;
5970Sstevel@tonic-gate 
5980Sstevel@tonic-gate 	lp = (mutex_impl_t *)&xc_mbox_lock[X_CALL_HIPRI];
5990Sstevel@tonic-gate 	for (x = 0; x < 0x400000; x++) {
6000Sstevel@tonic-gate 		if (lock_spin_try(&lp->m_spin.m_spinlock)) {
6010Sstevel@tonic-gate 			xc_common((xc_func_t)func, 0, 0, 0, X_CALL_HIPRI,
6020Sstevel@tonic-gate 			    set, -1);
6030Sstevel@tonic-gate 			lp->m_spin.m_spinlock = 0; /* XXX */
6040Sstevel@tonic-gate 			break;
6050Sstevel@tonic-gate 		}
6060Sstevel@tonic-gate 		(void) xc_serv((caddr_t)X_CALL_MEDPRI, NULL);
6070Sstevel@tonic-gate 	}
6080Sstevel@tonic-gate 	IGNORE_KERNEL_PREEMPTION = save_kernel_preemption;
6090Sstevel@tonic-gate }
610