xref: /onnv-gate/usr/src/uts/sun4/os/x_call.c (revision 11389:dd00b884e84f)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
52973Sgovinda  * Common Development and Distribution License (the "License").
62973Sgovinda  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
22*11389SAlexander.Kolbasov@Sun.COM  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #include <sys/systm.h>
270Sstevel@tonic-gate #include <sys/archsystm.h>
280Sstevel@tonic-gate #include <sys/machsystm.h>
290Sstevel@tonic-gate #include <sys/cpuvar.h>
300Sstevel@tonic-gate #include <sys/intreg.h>
310Sstevel@tonic-gate #include <sys/x_call.h>
320Sstevel@tonic-gate #include <sys/cmn_err.h>
330Sstevel@tonic-gate #include <sys/membar.h>
340Sstevel@tonic-gate #include <sys/disp.h>
350Sstevel@tonic-gate #include <sys/debug.h>
360Sstevel@tonic-gate #include <sys/privregs.h>
370Sstevel@tonic-gate #include <sys/xc_impl.h>
380Sstevel@tonic-gate #include <sys/ivintr.h>
390Sstevel@tonic-gate #include <sys/dmv.h>
40193Sjkennedy #include <sys/sysmacros.h>
410Sstevel@tonic-gate 
421395Srjnoe #ifdef TRAPTRACE
430Sstevel@tonic-gate uint_t x_dstat[NCPU][XC_LOOP_EXIT+1];
440Sstevel@tonic-gate uint_t x_rstat[NCPU][4];
451395Srjnoe #endif /* TRAPTRACE */
460Sstevel@tonic-gate 
472973Sgovinda static uint64_t xc_serv_inum;	/* software interrupt number for xc_serv() */
482973Sgovinda static uint64_t xc_loop_inum;	/* software interrupt number for xc_loop() */
490Sstevel@tonic-gate kmutex_t xc_sys_mutex;		/* protect xcall session and xc_mbox */
500Sstevel@tonic-gate int xc_spl_enter[NCPU];		/* protect sending x-call */
510Sstevel@tonic-gate static int xc_holder = -1; /* the cpu who initiates xc_attention, 0 is valid */
520Sstevel@tonic-gate 
530Sstevel@tonic-gate /*
540Sstevel@tonic-gate  * Mail box for handshaking and xcall request; protected by xc_sys_mutex
550Sstevel@tonic-gate  */
560Sstevel@tonic-gate static struct xc_mbox {
570Sstevel@tonic-gate 	xcfunc_t *xc_func;
580Sstevel@tonic-gate 	uint64_t xc_arg1;
590Sstevel@tonic-gate 	uint64_t xc_arg2;
600Sstevel@tonic-gate 	cpuset_t xc_cpuset;
610Sstevel@tonic-gate 	volatile uint_t	xc_state;
620Sstevel@tonic-gate } xc_mbox[NCPU];
630Sstevel@tonic-gate 
640Sstevel@tonic-gate uint64_t xc_tick_limit;		/* send_mondo() tick limit value */
650Sstevel@tonic-gate uint64_t xc_tick_limit_scale = 1;	/* scale used to increase the limit */
660Sstevel@tonic-gate uint64_t xc_tick_jump_limit;	/* send_mondo() irregular tick jump limit */
676138Ssvemuri uint64_t xc_sync_tick_limit;	/* timeout limit for xt_sync() calls */
680Sstevel@tonic-gate 
690Sstevel@tonic-gate /* timeout value for xcalls to be received by the target CPU */
700Sstevel@tonic-gate uint64_t xc_mondo_time_limit;
710Sstevel@tonic-gate 
720Sstevel@tonic-gate /* timeout value for xcall functions to be executed on the target CPU */
730Sstevel@tonic-gate uint64_t xc_func_time_limit;
740Sstevel@tonic-gate 
750Sstevel@tonic-gate uint64_t xc_scale = 1;	/* scale used to calculate timeout limits */
76193Sjkennedy uint64_t xc_mondo_multiplier = 10;
770Sstevel@tonic-gate 
780Sstevel@tonic-gate uint_t sendmondo_in_recover;
790Sstevel@tonic-gate 
800Sstevel@tonic-gate /*
810Sstevel@tonic-gate  * sending x-calls
820Sstevel@tonic-gate  */
830Sstevel@tonic-gate void	init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2);
840Sstevel@tonic-gate void	send_one_mondo(int cpuid);
850Sstevel@tonic-gate void	send_mondo_set(cpuset_t set);
860Sstevel@tonic-gate 
870Sstevel@tonic-gate /*
88193Sjkennedy  * Adjust xc_attention timeout if a faster cpu is dynamically added.
89193Sjkennedy  * Ignore the dynamic removal of a cpu that would lower these timeout
90193Sjkennedy  * values.
91193Sjkennedy  */
92193Sjkennedy static int
xc_func_timeout_adj(cpu_setup_t what,int cpuid)93193Sjkennedy xc_func_timeout_adj(cpu_setup_t what, int cpuid) {
94193Sjkennedy 	uint64_t freq = cpunodes[cpuid].clock_freq;
95193Sjkennedy 
96193Sjkennedy 	switch (what) {
97193Sjkennedy 	case CPU_ON:
98193Sjkennedy 	case CPU_INIT:
99193Sjkennedy 	case CPU_CONFIG:
100193Sjkennedy 	case CPU_CPUPART_IN:
101193Sjkennedy 		if (freq * xc_scale > xc_mondo_time_limit) {
102193Sjkennedy 			xc_mondo_time_limit = freq * xc_scale;
103193Sjkennedy 			xc_func_time_limit = xc_mondo_time_limit *
104193Sjkennedy 			    xc_mondo_multiplier;
105193Sjkennedy 		}
106193Sjkennedy 		break;
107193Sjkennedy 	case CPU_OFF:
108193Sjkennedy 	case CPU_UNCONFIG:
109193Sjkennedy 	case CPU_CPUPART_OUT:
110193Sjkennedy 	default:
111193Sjkennedy 		break;
112193Sjkennedy 	}
113193Sjkennedy 
114193Sjkennedy 	return (0);
115193Sjkennedy }
116193Sjkennedy 
117193Sjkennedy /*
1180Sstevel@tonic-gate  * xc_init - initialize x-call related locks
1190Sstevel@tonic-gate  */
1200Sstevel@tonic-gate void
xc_init(void)1210Sstevel@tonic-gate xc_init(void)
1220Sstevel@tonic-gate {
1230Sstevel@tonic-gate 	int pix;
124193Sjkennedy 	uint64_t maxfreq = 0;
1250Sstevel@tonic-gate 
1260Sstevel@tonic-gate 	mutex_init(&xc_sys_mutex, NULL, MUTEX_SPIN,
1270Sstevel@tonic-gate 	    (void *)ipltospl(XCALL_PIL));
1280Sstevel@tonic-gate 
1291395Srjnoe #ifdef TRAPTRACE
1300Sstevel@tonic-gate 	/* Initialize for all possible CPUs. */
1310Sstevel@tonic-gate 	for (pix = 0; pix < NCPU; pix++) {
1320Sstevel@tonic-gate 		XC_STAT_INIT(pix);
1330Sstevel@tonic-gate 	}
1341395Srjnoe #endif /* TRAPTRACE */
1350Sstevel@tonic-gate 
1362973Sgovinda 	xc_serv_inum = add_softintr(XCALL_PIL, (softintrfunc)xc_serv, 0,
1372973Sgovinda 	    SOFTINT_MT);
1382973Sgovinda 	xc_loop_inum = add_softintr(XCALL_PIL, (softintrfunc)xc_loop, 0,
1392973Sgovinda 	    SOFTINT_MT);
1400Sstevel@tonic-gate 
1410Sstevel@tonic-gate 	/*
1420Sstevel@tonic-gate 	 * Initialize the calibrated tick limit for send_mondo.
1430Sstevel@tonic-gate 	 * The value represents the maximum tick count to wait.
1440Sstevel@tonic-gate 	 */
1450Sstevel@tonic-gate 	xc_tick_limit =
1460Sstevel@tonic-gate 	    ((uint64_t)sys_tick_freq * XC_SEND_MONDO_MSEC) / 1000;
1470Sstevel@tonic-gate 	xc_tick_jump_limit = xc_tick_limit / 32;
1480Sstevel@tonic-gate 	xc_tick_limit *= xc_tick_limit_scale;
1496138Ssvemuri 	xc_sync_tick_limit = xc_tick_limit;
1500Sstevel@tonic-gate 
1510Sstevel@tonic-gate 	/*
1520Sstevel@tonic-gate 	 * Maximum number of loops to wait before timing out in xc_attention.
1530Sstevel@tonic-gate 	 */
154193Sjkennedy 	for (pix = 0; pix < NCPU; pix++) {
155193Sjkennedy 		maxfreq = MAX(cpunodes[pix].clock_freq, maxfreq);
156193Sjkennedy 	}
157193Sjkennedy 	xc_mondo_time_limit = maxfreq * xc_scale;
158193Sjkennedy 	register_cpu_setup_func((cpu_setup_func_t *)xc_func_timeout_adj, NULL);
1590Sstevel@tonic-gate 
1600Sstevel@tonic-gate 	/*
1610Sstevel@tonic-gate 	 * Maximum number of loops to wait for a xcall function to be
162193Sjkennedy 	 * executed on the target CPU.
1630Sstevel@tonic-gate 	 */
164193Sjkennedy 	xc_func_time_limit = xc_mondo_time_limit * xc_mondo_multiplier;
1650Sstevel@tonic-gate }
1660Sstevel@tonic-gate 
1670Sstevel@tonic-gate /*
1680Sstevel@tonic-gate  * The following routines basically provide callers with two kinds of
1690Sstevel@tonic-gate  * inter-processor interrupt services:
1700Sstevel@tonic-gate  *	1. cross calls (x-calls) - requests are handled at target cpu's TL=0
1710Sstevel@tonic-gate  *	2. cross traps (c-traps) - requests are handled at target cpu's TL>0
1720Sstevel@tonic-gate  *
1730Sstevel@tonic-gate  * Although these routines protect the services from migrating to other cpus
1740Sstevel@tonic-gate  * "after" they are called, it is the caller's choice or responsibility to
1750Sstevel@tonic-gate  * prevent the cpu migration "before" calling them.
1760Sstevel@tonic-gate  *
1770Sstevel@tonic-gate  * X-call routines:
1780Sstevel@tonic-gate  *
1790Sstevel@tonic-gate  *	xc_one()  - send a request to one processor
1800Sstevel@tonic-gate  *	xc_some() - send a request to some processors
1810Sstevel@tonic-gate  *	xc_all()  - send a request to all processors
1820Sstevel@tonic-gate  *
1830Sstevel@tonic-gate  *	Their common parameters:
1840Sstevel@tonic-gate  *		func - a TL=0 handler address
1850Sstevel@tonic-gate  *		arg1 and arg2  - optional
1860Sstevel@tonic-gate  *
1870Sstevel@tonic-gate  *	The services provided by x-call routines allow callers
1880Sstevel@tonic-gate  *	to send a request to target cpus to execute a TL=0
1890Sstevel@tonic-gate  *	handler.
1900Sstevel@tonic-gate  *	The interface of the registers of the TL=0 handler:
1910Sstevel@tonic-gate  *		%o0: arg1
1920Sstevel@tonic-gate  *		%o1: arg2
1930Sstevel@tonic-gate  *
1940Sstevel@tonic-gate  * X-trap routines:
1950Sstevel@tonic-gate  *
1960Sstevel@tonic-gate  *	xt_one()  - send a request to one processor
1970Sstevel@tonic-gate  *	xt_some() - send a request to some processors
1980Sstevel@tonic-gate  *	xt_all()  - send a request to all processors
1990Sstevel@tonic-gate  *
2000Sstevel@tonic-gate  *	Their common parameters:
2010Sstevel@tonic-gate  *		func - a TL>0 handler address or an interrupt number
2020Sstevel@tonic-gate  *		arg1, arg2
2030Sstevel@tonic-gate  *		       optional when "func" is an address;
2040Sstevel@tonic-gate  *		       0        when "func" is an interrupt number
2050Sstevel@tonic-gate  *
2060Sstevel@tonic-gate  *	If the request of "func" is a kernel address, then
2070Sstevel@tonic-gate  *	the target cpu will execute the request of "func" with
2080Sstevel@tonic-gate  *	args at "TL>0" level.
2090Sstevel@tonic-gate  *	The interface of the registers of the TL>0 handler:
2100Sstevel@tonic-gate  *		%g1: arg1
2110Sstevel@tonic-gate  *		%g2: arg2
2120Sstevel@tonic-gate  *
2130Sstevel@tonic-gate  *	If the request of "func" is not a kernel address, then it has
2140Sstevel@tonic-gate  *	to be an assigned interrupt number through add_softintr().
2150Sstevel@tonic-gate  *	An interrupt number is an index to the interrupt vector table,
2160Sstevel@tonic-gate  *	which entry contains an interrupt handler address with its
2170Sstevel@tonic-gate  *	corresponding interrupt level and argument.
2180Sstevel@tonic-gate  *	The target cpu will arrange the request to be serviced according
2190Sstevel@tonic-gate  *	to its pre-registered information.
2200Sstevel@tonic-gate  *	args are assumed to be zeros in this case.
2210Sstevel@tonic-gate  *
2220Sstevel@tonic-gate  * In addition, callers are allowed to capture and release cpus by
2230Sstevel@tonic-gate  * calling the routines: xc_attention() and xc_dismissed().
2240Sstevel@tonic-gate  */
2250Sstevel@tonic-gate 
2260Sstevel@tonic-gate /*
227*11389SAlexander.Kolbasov@Sun.COM  * spl_xcall - set PIL to xcall level
228*11389SAlexander.Kolbasov@Sun.COM  */
229*11389SAlexander.Kolbasov@Sun.COM int
spl_xcall(void)230*11389SAlexander.Kolbasov@Sun.COM spl_xcall(void)
231*11389SAlexander.Kolbasov@Sun.COM {
232*11389SAlexander.Kolbasov@Sun.COM 	return (splr(XCALL_PIL));
233*11389SAlexander.Kolbasov@Sun.COM }
234*11389SAlexander.Kolbasov@Sun.COM 
235*11389SAlexander.Kolbasov@Sun.COM /*
2360Sstevel@tonic-gate  * xt_one - send a "x-trap" to a cpu
2370Sstevel@tonic-gate  */
2380Sstevel@tonic-gate void
xt_one(int cix,xcfunc_t * func,uint64_t arg1,uint64_t arg2)2390Sstevel@tonic-gate xt_one(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
2400Sstevel@tonic-gate {
2410Sstevel@tonic-gate 	if (!CPU_IN_SET(cpu_ready_set, cix)) {
2420Sstevel@tonic-gate 		return;
2430Sstevel@tonic-gate 	}
2440Sstevel@tonic-gate 	xt_one_unchecked(cix, func, arg1, arg2);
2450Sstevel@tonic-gate }
2460Sstevel@tonic-gate 
2470Sstevel@tonic-gate /*
2480Sstevel@tonic-gate  * xt_one_unchecked - send a "x-trap" to a cpu without checking for its
2490Sstevel@tonic-gate  * existance in cpu_ready_set
2500Sstevel@tonic-gate  */
2510Sstevel@tonic-gate void
xt_one_unchecked(int cix,xcfunc_t * func,uint64_t arg1,uint64_t arg2)2520Sstevel@tonic-gate xt_one_unchecked(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
2530Sstevel@tonic-gate {
2540Sstevel@tonic-gate 	int lcx;
2550Sstevel@tonic-gate 	int opl;
2560Sstevel@tonic-gate 	cpuset_t tset;
2570Sstevel@tonic-gate 
2580Sstevel@tonic-gate 	/*
2590Sstevel@tonic-gate 	 * Make sure the function address will not be interpreted as a
2600Sstevel@tonic-gate 	 * dmv interrupt
2610Sstevel@tonic-gate 	 */
2620Sstevel@tonic-gate 	ASSERT(!DMV_IS_DMV(func));
2630Sstevel@tonic-gate 
2640Sstevel@tonic-gate 	/*
2650Sstevel@tonic-gate 	 * It's illegal to send software inums through the cross-trap
2660Sstevel@tonic-gate 	 * interface.
2670Sstevel@tonic-gate 	 */
2680Sstevel@tonic-gate 	ASSERT((uintptr_t)func >= KERNELBASE);
2690Sstevel@tonic-gate 
2700Sstevel@tonic-gate 	CPUSET_ZERO(tset);
2710Sstevel@tonic-gate 
2720Sstevel@tonic-gate 	XC_SPL_ENTER(lcx, opl);			/* lcx set by the macro */
2730Sstevel@tonic-gate 
2740Sstevel@tonic-gate 	CPUSET_ADD(tset, cix);
2750Sstevel@tonic-gate 
2760Sstevel@tonic-gate 	if (cix == lcx) {
2770Sstevel@tonic-gate 		/*
2780Sstevel@tonic-gate 		 * same cpu - use software fast trap
2790Sstevel@tonic-gate 		 */
2800Sstevel@tonic-gate 		send_self_xcall(CPU, arg1, arg2, func);
2810Sstevel@tonic-gate 		XC_STAT_INC(x_dstat[lcx][XT_ONE_SELF]);
2820Sstevel@tonic-gate 		XC_TRACE(XT_ONE_SELF, &tset, func, arg1, arg2);
2830Sstevel@tonic-gate 	} else {	/* other cpu - send a mondo to the target cpu */
2840Sstevel@tonic-gate 		/*
2850Sstevel@tonic-gate 		 * other cpu - send a mondo to the target cpu
2860Sstevel@tonic-gate 		 */
2870Sstevel@tonic-gate 		XC_TRACE(XT_ONE_OTHER, &tset, func, arg1, arg2);
2880Sstevel@tonic-gate 		init_mondo(func, arg1, arg2);
2890Sstevel@tonic-gate 		send_one_mondo(cix);
2900Sstevel@tonic-gate 		XC_STAT_INC(x_dstat[lcx][XT_ONE_OTHER]);
2910Sstevel@tonic-gate 	}
2920Sstevel@tonic-gate 	XC_SPL_EXIT(lcx, opl);
2930Sstevel@tonic-gate }
2940Sstevel@tonic-gate 
2950Sstevel@tonic-gate /*
2960Sstevel@tonic-gate  * xt_some - send a "x-trap" to some cpus
2970Sstevel@tonic-gate  */
2980Sstevel@tonic-gate void
xt_some(cpuset_t cpuset,xcfunc_t * func,uint64_t arg1,uint64_t arg2)2990Sstevel@tonic-gate xt_some(cpuset_t cpuset, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
3000Sstevel@tonic-gate {
3010Sstevel@tonic-gate 	int lcx;
3020Sstevel@tonic-gate 	int opl;
3030Sstevel@tonic-gate 	cpuset_t xc_cpuset, tset;
3040Sstevel@tonic-gate 
3050Sstevel@tonic-gate 	/*
3060Sstevel@tonic-gate 	 * Make sure the function address will not be interpreted as a
3070Sstevel@tonic-gate 	 * dmv interrupt
3080Sstevel@tonic-gate 	 */
3090Sstevel@tonic-gate 	ASSERT(!DMV_IS_DMV(func));
3100Sstevel@tonic-gate 
3110Sstevel@tonic-gate 	/*
3120Sstevel@tonic-gate 	 * It's illegal to send software inums through the cross-trap
3130Sstevel@tonic-gate 	 * interface.
3140Sstevel@tonic-gate 	 */
3150Sstevel@tonic-gate 	ASSERT((uintptr_t)func >= KERNELBASE);
3160Sstevel@tonic-gate 
3170Sstevel@tonic-gate 	CPUSET_ZERO(tset);
3180Sstevel@tonic-gate 
3190Sstevel@tonic-gate 	XC_SPL_ENTER(lcx, opl);		/* lcx set by the macro */
3200Sstevel@tonic-gate 
3210Sstevel@tonic-gate 	CPUSET_ADD(tset, lcx);
3220Sstevel@tonic-gate 
3230Sstevel@tonic-gate 	/*
3240Sstevel@tonic-gate 	 * only send to the CPU_READY ones
3250Sstevel@tonic-gate 	 */
3260Sstevel@tonic-gate 	xc_cpuset = cpu_ready_set;
3270Sstevel@tonic-gate 	CPUSET_AND(xc_cpuset, cpuset);
3280Sstevel@tonic-gate 
3290Sstevel@tonic-gate 	/*
3300Sstevel@tonic-gate 	 * send to nobody; just return
3310Sstevel@tonic-gate 	 */
3320Sstevel@tonic-gate 	if (CPUSET_ISNULL(xc_cpuset)) {
3330Sstevel@tonic-gate 		XC_SPL_EXIT(lcx, opl);
3340Sstevel@tonic-gate 		return;
3350Sstevel@tonic-gate 	}
3360Sstevel@tonic-gate 
3370Sstevel@tonic-gate 	/*
3380Sstevel@tonic-gate 	 * don't send mondo to self
3390Sstevel@tonic-gate 	 */
3400Sstevel@tonic-gate 	if (CPU_IN_SET(xc_cpuset, lcx)) {
3410Sstevel@tonic-gate 		/*
3420Sstevel@tonic-gate 		 * same cpu - use software fast trap
3430Sstevel@tonic-gate 		 */
3440Sstevel@tonic-gate 		send_self_xcall(CPU, arg1, arg2, func);
3450Sstevel@tonic-gate 		XC_STAT_INC(x_dstat[lcx][XT_SOME_SELF]);
3460Sstevel@tonic-gate 		XC_TRACE(XT_SOME_SELF, &tset, func, arg1, arg2);
3470Sstevel@tonic-gate 		CPUSET_DEL(xc_cpuset, lcx);
3480Sstevel@tonic-gate 		if (CPUSET_ISNULL(xc_cpuset)) {
3490Sstevel@tonic-gate 			XC_SPL_EXIT(lcx, opl);
3500Sstevel@tonic-gate 			return;
3510Sstevel@tonic-gate 		}
3520Sstevel@tonic-gate 	}
3530Sstevel@tonic-gate 	XC_TRACE(XT_SOME_OTHER, &xc_cpuset, func, arg1, arg2);
3540Sstevel@tonic-gate 	init_mondo(func, arg1, arg2);
3550Sstevel@tonic-gate 	send_mondo_set(xc_cpuset);
3560Sstevel@tonic-gate 	XC_STAT_INC(x_dstat[lcx][XT_SOME_OTHER]);
3570Sstevel@tonic-gate 
3580Sstevel@tonic-gate 	XC_SPL_EXIT(lcx, opl);
3590Sstevel@tonic-gate }
3600Sstevel@tonic-gate 
3610Sstevel@tonic-gate /*
3620Sstevel@tonic-gate  * xt_all - send a "x-trap" to all cpus
3630Sstevel@tonic-gate  */
3640Sstevel@tonic-gate void
xt_all(xcfunc_t * func,uint64_t arg1,uint64_t arg2)3650Sstevel@tonic-gate xt_all(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
3660Sstevel@tonic-gate {
3670Sstevel@tonic-gate 	int lcx;
3680Sstevel@tonic-gate 	int opl;
3690Sstevel@tonic-gate 	cpuset_t xc_cpuset, tset;
3700Sstevel@tonic-gate 
3710Sstevel@tonic-gate 	/*
3720Sstevel@tonic-gate 	 * Make sure the function address will not be interpreted as a
3730Sstevel@tonic-gate 	 * dmv interrupt
3740Sstevel@tonic-gate 	 */
3750Sstevel@tonic-gate 	ASSERT(!DMV_IS_DMV(func));
3760Sstevel@tonic-gate 
3770Sstevel@tonic-gate 	/*
3780Sstevel@tonic-gate 	 * It's illegal to send software inums through the cross-trap
3790Sstevel@tonic-gate 	 * interface.
3800Sstevel@tonic-gate 	 */
3810Sstevel@tonic-gate 	ASSERT((uintptr_t)func >= KERNELBASE);
3820Sstevel@tonic-gate 
3830Sstevel@tonic-gate 	CPUSET_ZERO(tset);
3840Sstevel@tonic-gate 
3850Sstevel@tonic-gate 	XC_SPL_ENTER(lcx, opl);		/* lcx set by the macro */
3860Sstevel@tonic-gate 
3870Sstevel@tonic-gate 	CPUSET_ADD(tset, lcx);
3880Sstevel@tonic-gate 
3890Sstevel@tonic-gate 	/*
3900Sstevel@tonic-gate 	 * same cpu - use software fast trap
3910Sstevel@tonic-gate 	 */
3920Sstevel@tonic-gate 	if (CPU_IN_SET(cpu_ready_set, lcx))
3930Sstevel@tonic-gate 		send_self_xcall(CPU, arg1, arg2, func);
3940Sstevel@tonic-gate 
3950Sstevel@tonic-gate 	XC_TRACE(XT_ALL_OTHER, &cpu_ready_set, func, arg1, arg2);
3960Sstevel@tonic-gate 
3970Sstevel@tonic-gate 	/*
3980Sstevel@tonic-gate 	 * don't send mondo to self
3990Sstevel@tonic-gate 	 */
4000Sstevel@tonic-gate 	xc_cpuset = cpu_ready_set;
4010Sstevel@tonic-gate 	CPUSET_DEL(xc_cpuset, lcx);
4020Sstevel@tonic-gate 
4030Sstevel@tonic-gate 	if (CPUSET_ISNULL(xc_cpuset)) {
4040Sstevel@tonic-gate 		XC_STAT_INC(x_dstat[lcx][XT_ALL_SELF]);
4050Sstevel@tonic-gate 		XC_TRACE(XT_ALL_SELF, &tset, func, arg1, arg2);
4060Sstevel@tonic-gate 		XC_SPL_EXIT(lcx, opl);
4070Sstevel@tonic-gate 		return;
4080Sstevel@tonic-gate 	}
4090Sstevel@tonic-gate 
4100Sstevel@tonic-gate 	init_mondo(func, arg1, arg2);
4110Sstevel@tonic-gate 	send_mondo_set(xc_cpuset);
4120Sstevel@tonic-gate 
4130Sstevel@tonic-gate 	XC_STAT_INC(x_dstat[lcx][XT_ALL_OTHER]);
4140Sstevel@tonic-gate 	XC_SPL_EXIT(lcx, opl);
4150Sstevel@tonic-gate }
4160Sstevel@tonic-gate 
4170Sstevel@tonic-gate /*
4180Sstevel@tonic-gate  * xc_one - send a "x-call" to a cpu
4190Sstevel@tonic-gate  */
4200Sstevel@tonic-gate void
xc_one(int cix,xcfunc_t * func,uint64_t arg1,uint64_t arg2)4210Sstevel@tonic-gate xc_one(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
4220Sstevel@tonic-gate {
4230Sstevel@tonic-gate 	int lcx;
4240Sstevel@tonic-gate 	int opl;
4250Sstevel@tonic-gate 	uint64_t loop_cnt = 0;
4260Sstevel@tonic-gate 	cpuset_t tset;
4270Sstevel@tonic-gate 	int first_time = 1;
4280Sstevel@tonic-gate 
4290Sstevel@tonic-gate 	/*
4300Sstevel@tonic-gate 	 * send to nobody; just return
4310Sstevel@tonic-gate 	 */
4320Sstevel@tonic-gate 	if (!CPU_IN_SET(cpu_ready_set, cix))
4330Sstevel@tonic-gate 		return;
4340Sstevel@tonic-gate 
4350Sstevel@tonic-gate 	ASSERT((uintptr_t)func > KERNELBASE);
4360Sstevel@tonic-gate 	ASSERT(((uintptr_t)func % PC_ALIGN) == 0);
4370Sstevel@tonic-gate 
4380Sstevel@tonic-gate 	CPUSET_ZERO(tset);
4390Sstevel@tonic-gate 
4400Sstevel@tonic-gate 	kpreempt_disable();
4410Sstevel@tonic-gate 
4420Sstevel@tonic-gate 	XC_SPL_ENTER(lcx, opl);		/* lcx set by the macro */
4430Sstevel@tonic-gate 
4440Sstevel@tonic-gate 	CPUSET_ADD(tset, cix);
4450Sstevel@tonic-gate 
4460Sstevel@tonic-gate 	if (cix == lcx) {	/* same cpu just do it */
4470Sstevel@tonic-gate 		XC_TRACE(XC_ONE_SELF, &tset, func, arg1, arg2);
4480Sstevel@tonic-gate 		(*func)(arg1, arg2);
4490Sstevel@tonic-gate 		XC_STAT_INC(x_dstat[lcx][XC_ONE_SELF]);
4500Sstevel@tonic-gate 		XC_SPL_EXIT(lcx, opl);
4510Sstevel@tonic-gate 		kpreempt_enable();
4520Sstevel@tonic-gate 		return;
4530Sstevel@tonic-gate 	}
4540Sstevel@tonic-gate 
4550Sstevel@tonic-gate 	if (xc_holder == lcx) {		/* got the xc_sys_mutex already */
4560Sstevel@tonic-gate 		ASSERT(MUTEX_HELD(&xc_sys_mutex));
4570Sstevel@tonic-gate 		ASSERT(CPU_IN_SET(xc_mbox[lcx].xc_cpuset, lcx));
4580Sstevel@tonic-gate 		ASSERT(CPU_IN_SET(xc_mbox[cix].xc_cpuset, cix));
4590Sstevel@tonic-gate 		ASSERT(xc_mbox[cix].xc_state == XC_WAIT);
4600Sstevel@tonic-gate 		XC_TRACE(XC_ONE_OTHER_H, &tset, func, arg1, arg2);
4610Sstevel@tonic-gate 
4620Sstevel@tonic-gate 		/*
4630Sstevel@tonic-gate 		 * target processor's xc_loop should be waiting
4640Sstevel@tonic-gate 		 * for the work to do; just set up the xc_mbox
4650Sstevel@tonic-gate 		 */
4660Sstevel@tonic-gate 		XC_SETUP(cix, func, arg1, arg2);
4670Sstevel@tonic-gate 		membar_stld();
4680Sstevel@tonic-gate 
4690Sstevel@tonic-gate 		while (xc_mbox[cix].xc_state != XC_WAIT) {
4700Sstevel@tonic-gate 			if (loop_cnt++ > xc_func_time_limit) {
4710Sstevel@tonic-gate 				if (sendmondo_in_recover) {
4720Sstevel@tonic-gate 					drv_usecwait(1);
4730Sstevel@tonic-gate 					loop_cnt = 0;
4740Sstevel@tonic-gate 					continue;
4750Sstevel@tonic-gate 				}
4760Sstevel@tonic-gate 				cmn_err(CE_PANIC, "xc_one() timeout, "
4770Sstevel@tonic-gate 				    "xc_state[%d] != XC_WAIT", cix);
4780Sstevel@tonic-gate 			}
4790Sstevel@tonic-gate 		}
4800Sstevel@tonic-gate 		XC_STAT_INC(x_dstat[lcx][XC_ONE_OTHER_H]);
4810Sstevel@tonic-gate 		XC_SPL_EXIT(lcx, opl);
4820Sstevel@tonic-gate 		kpreempt_enable();
4830Sstevel@tonic-gate 		return;
4840Sstevel@tonic-gate 	}
4850Sstevel@tonic-gate 
4860Sstevel@tonic-gate 	/*
4870Sstevel@tonic-gate 	 * Avoid dead lock if someone has sent us a xc_loop request while
4880Sstevel@tonic-gate 	 * we are trying to grab xc_sys_mutex.
4890Sstevel@tonic-gate 	 */
4900Sstevel@tonic-gate 	XC_SPL_EXIT(lcx, opl);
4910Sstevel@tonic-gate 
4920Sstevel@tonic-gate 	/*
4930Sstevel@tonic-gate 	 * At this point, since we don't own xc_sys_mutex,
4940Sstevel@tonic-gate 	 * our pil shouldn't run at or above the XCALL_PIL.
4950Sstevel@tonic-gate 	 */
4960Sstevel@tonic-gate 	ASSERT(getpil() < XCALL_PIL);
4970Sstevel@tonic-gate 
4980Sstevel@tonic-gate 	/*
4990Sstevel@tonic-gate 	 * Since xc_holder is not owned by us, it could be that
5000Sstevel@tonic-gate 	 * no one owns it, or we are not informed to enter into
5010Sstevel@tonic-gate 	 * xc_loop(). In either case, we need to grab the
5020Sstevel@tonic-gate 	 * xc_sys_mutex before we write to the xc_mbox, and
5030Sstevel@tonic-gate 	 * we shouldn't release it until the request is finished.
5040Sstevel@tonic-gate 	 */
5050Sstevel@tonic-gate 
5060Sstevel@tonic-gate 	mutex_enter(&xc_sys_mutex);
5070Sstevel@tonic-gate 	xc_spl_enter[lcx] = 1;
5080Sstevel@tonic-gate 
5090Sstevel@tonic-gate 	/*
5100Sstevel@tonic-gate 	 * Since we own xc_sys_mutex now, we are safe to
5116625Scb222892 	 * write to the xc_mbox.
5120Sstevel@tonic-gate 	 */
5130Sstevel@tonic-gate 	ASSERT(xc_mbox[cix].xc_state == XC_IDLE);
5140Sstevel@tonic-gate 	XC_TRACE(XC_ONE_OTHER, &tset, func, arg1, arg2);
5150Sstevel@tonic-gate 	XC_SETUP(cix, func, arg1, arg2);
5160Sstevel@tonic-gate 	init_mondo(setsoftint_tl1, xc_serv_inum, 0);
5170Sstevel@tonic-gate 	send_one_mondo(cix);
5186625Scb222892 	xc_spl_enter[lcx] = 0;
5190Sstevel@tonic-gate 
5200Sstevel@tonic-gate 	/* xc_serv does membar_stld */
5210Sstevel@tonic-gate 	while (xc_mbox[cix].xc_state != XC_IDLE) {
5220Sstevel@tonic-gate 		if (loop_cnt++ > xc_func_time_limit) {
5230Sstevel@tonic-gate 			if (sendmondo_in_recover) {
5240Sstevel@tonic-gate 				drv_usecwait(1);
5250Sstevel@tonic-gate 				loop_cnt = 0;
5260Sstevel@tonic-gate 				continue;
5270Sstevel@tonic-gate 			}
5280Sstevel@tonic-gate 			if (first_time) {
5290Sstevel@tonic-gate 				XT_SYNC_ONE(cix);
5300Sstevel@tonic-gate 				first_time = 0;
5310Sstevel@tonic-gate 				loop_cnt = 0;
5320Sstevel@tonic-gate 				continue;
5330Sstevel@tonic-gate 			}
5340Sstevel@tonic-gate 			cmn_err(CE_PANIC, "xc_one() timeout, "
5350Sstevel@tonic-gate 			    "xc_state[%d] != XC_IDLE", cix);
5360Sstevel@tonic-gate 		}
5370Sstevel@tonic-gate 	}
5380Sstevel@tonic-gate 	XC_STAT_INC(x_dstat[lcx][XC_ONE_OTHER]);
5390Sstevel@tonic-gate 	mutex_exit(&xc_sys_mutex);
5400Sstevel@tonic-gate 
5410Sstevel@tonic-gate 	kpreempt_enable();
5420Sstevel@tonic-gate }
5430Sstevel@tonic-gate 
5440Sstevel@tonic-gate /*
5450Sstevel@tonic-gate  * xc_some - send a "x-call" to some cpus; sending to self is excluded
5460Sstevel@tonic-gate  */
5470Sstevel@tonic-gate void
xc_some(cpuset_t cpuset,xcfunc_t * func,uint64_t arg1,uint64_t arg2)5480Sstevel@tonic-gate xc_some(cpuset_t cpuset, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
5490Sstevel@tonic-gate {
5500Sstevel@tonic-gate 	int lcx;
5510Sstevel@tonic-gate 	int opl;
5520Sstevel@tonic-gate 	cpuset_t xc_cpuset, tset;
5530Sstevel@tonic-gate 
5540Sstevel@tonic-gate 	ASSERT((uintptr_t)func > KERNELBASE);
5550Sstevel@tonic-gate 	ASSERT(((uintptr_t)func % PC_ALIGN) == 0);
5560Sstevel@tonic-gate 
5570Sstevel@tonic-gate 	CPUSET_ZERO(tset);
5580Sstevel@tonic-gate 
5590Sstevel@tonic-gate 	kpreempt_disable();
5600Sstevel@tonic-gate 	XC_SPL_ENTER(lcx, opl);			/* lcx set by the macro */
5610Sstevel@tonic-gate 
5620Sstevel@tonic-gate 	CPUSET_ADD(tset, lcx);
5630Sstevel@tonic-gate 
5640Sstevel@tonic-gate 	/*
5650Sstevel@tonic-gate 	 * only send to the CPU_READY ones
5660Sstevel@tonic-gate 	 */
5670Sstevel@tonic-gate 	xc_cpuset = cpu_ready_set;
5680Sstevel@tonic-gate 	CPUSET_AND(xc_cpuset, cpuset);
5690Sstevel@tonic-gate 
5700Sstevel@tonic-gate 	/*
5710Sstevel@tonic-gate 	 * send to nobody; just return
5720Sstevel@tonic-gate 	 */
5730Sstevel@tonic-gate 	if (CPUSET_ISNULL(xc_cpuset)) {
5740Sstevel@tonic-gate 		XC_SPL_EXIT(lcx, opl);
5750Sstevel@tonic-gate 		kpreempt_enable();
5760Sstevel@tonic-gate 		return;
5770Sstevel@tonic-gate 	}
5780Sstevel@tonic-gate 
5790Sstevel@tonic-gate 	if (CPU_IN_SET(xc_cpuset, lcx)) {
5800Sstevel@tonic-gate 		/*
5810Sstevel@tonic-gate 		 * same cpu just do it
5820Sstevel@tonic-gate 		 */
5830Sstevel@tonic-gate 		(*func)(arg1, arg2);
5840Sstevel@tonic-gate 		CPUSET_DEL(xc_cpuset, lcx);
5850Sstevel@tonic-gate 		if (CPUSET_ISNULL(xc_cpuset)) {
5860Sstevel@tonic-gate 			XC_STAT_INC(x_dstat[lcx][XC_SOME_SELF]);
5870Sstevel@tonic-gate 			XC_TRACE(XC_SOME_SELF, &tset, func, arg1, arg2);
5880Sstevel@tonic-gate 			XC_SPL_EXIT(lcx, opl);
5890Sstevel@tonic-gate 			kpreempt_enable();
5900Sstevel@tonic-gate 			return;
5910Sstevel@tonic-gate 		}
5920Sstevel@tonic-gate 	}
5930Sstevel@tonic-gate 
5940Sstevel@tonic-gate 	if (xc_holder == lcx) {		/* got the xc_sys_mutex already */
5950Sstevel@tonic-gate 		cpuset_t mset = xc_mbox[lcx].xc_cpuset;
5960Sstevel@tonic-gate 
5970Sstevel@tonic-gate 		CPUSET_AND(mset, cpuset);
5980Sstevel@tonic-gate 		ASSERT(MUTEX_HELD(&xc_sys_mutex));
5990Sstevel@tonic-gate 		ASSERT(CPUSET_ISEQUAL(mset, cpuset));
6000Sstevel@tonic-gate 		SEND_MBOX_ONLY(xc_cpuset, func, arg1, arg2, lcx, XC_WAIT);
6010Sstevel@tonic-gate 		WAIT_MBOX_DONE(xc_cpuset, lcx, XC_WAIT, 0);
6020Sstevel@tonic-gate 		XC_STAT_INC(x_dstat[lcx][XC_SOME_OTHER_H]);
6030Sstevel@tonic-gate 		XC_TRACE(XC_SOME_OTHER_H, &xc_cpuset, func, arg1, arg2);
6040Sstevel@tonic-gate 		XC_SPL_EXIT(lcx, opl);
6050Sstevel@tonic-gate 		kpreempt_enable();
6060Sstevel@tonic-gate 		return;
6070Sstevel@tonic-gate 	}
6080Sstevel@tonic-gate 
6090Sstevel@tonic-gate 	/*
6100Sstevel@tonic-gate 	 * Avoid dead lock if someone has sent us a xc_loop request while
6110Sstevel@tonic-gate 	 * we are trying to grab xc_sys_mutex.
6120Sstevel@tonic-gate 	 */
6130Sstevel@tonic-gate 	XC_SPL_EXIT(lcx, opl);
6140Sstevel@tonic-gate 
6150Sstevel@tonic-gate 	/*
6160Sstevel@tonic-gate 	 * At this point, since we don't own xc_sys_mutex,
6170Sstevel@tonic-gate 	 * our pil shouldn't run at or above the XCALL_PIL.
6180Sstevel@tonic-gate 	 */
6190Sstevel@tonic-gate 	ASSERT(getpil() < XCALL_PIL);
6200Sstevel@tonic-gate 
6210Sstevel@tonic-gate 	/*
6220Sstevel@tonic-gate 	 * grab xc_sys_mutex before writing to the xc_mbox
6230Sstevel@tonic-gate 	 */
6240Sstevel@tonic-gate 	mutex_enter(&xc_sys_mutex);
6250Sstevel@tonic-gate 	xc_spl_enter[lcx] = 1;
6260Sstevel@tonic-gate 
6270Sstevel@tonic-gate 	XC_TRACE(XC_SOME_OTHER, &xc_cpuset, func, arg1, arg2);
6280Sstevel@tonic-gate 	init_mondo(setsoftint_tl1, xc_serv_inum, 0);
6290Sstevel@tonic-gate 	SEND_MBOX_MONDO(xc_cpuset, func, arg1, arg2, XC_IDLE);
6300Sstevel@tonic-gate 	WAIT_MBOX_DONE(xc_cpuset, lcx, XC_IDLE, 1);
6310Sstevel@tonic-gate 
6320Sstevel@tonic-gate 	xc_spl_enter[lcx] = 0;
6330Sstevel@tonic-gate 	XC_STAT_INC(x_dstat[lcx][XC_SOME_OTHER]);
6340Sstevel@tonic-gate 	mutex_exit(&xc_sys_mutex);
6350Sstevel@tonic-gate 	kpreempt_enable();
6360Sstevel@tonic-gate }
6370Sstevel@tonic-gate 
6380Sstevel@tonic-gate /*
6390Sstevel@tonic-gate  * xc_all - send a "x-call" to all cpus
6400Sstevel@tonic-gate  */
6410Sstevel@tonic-gate void
xc_all(xcfunc_t * func,uint64_t arg1,uint64_t arg2)6420Sstevel@tonic-gate xc_all(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
6430Sstevel@tonic-gate {
6440Sstevel@tonic-gate 	int lcx;
6450Sstevel@tonic-gate 	int opl;
6460Sstevel@tonic-gate 	cpuset_t xc_cpuset, tset;
6470Sstevel@tonic-gate 
6480Sstevel@tonic-gate 	ASSERT((uintptr_t)func > KERNELBASE);
6490Sstevel@tonic-gate 	ASSERT(((uintptr_t)func % PC_ALIGN) == 0);
6500Sstevel@tonic-gate 
6510Sstevel@tonic-gate 	CPUSET_ZERO(tset);
6520Sstevel@tonic-gate 
6530Sstevel@tonic-gate 	kpreempt_disable();
6540Sstevel@tonic-gate 	XC_SPL_ENTER(lcx, opl);			/* lcx set by the macro */
6550Sstevel@tonic-gate 
6560Sstevel@tonic-gate 	CPUSET_ADD(tset, lcx);
6570Sstevel@tonic-gate 
6580Sstevel@tonic-gate 	/*
6590Sstevel@tonic-gate 	 * same cpu just do it
6600Sstevel@tonic-gate 	 */
6610Sstevel@tonic-gate 	(*func)(arg1, arg2);
6620Sstevel@tonic-gate 	xc_cpuset = cpu_ready_set;
6630Sstevel@tonic-gate 	CPUSET_DEL(xc_cpuset, lcx);
6640Sstevel@tonic-gate 
6650Sstevel@tonic-gate 	if (CPUSET_ISNULL(xc_cpuset)) {
6660Sstevel@tonic-gate 		XC_STAT_INC(x_dstat[lcx][XC_ALL_SELF]);
6670Sstevel@tonic-gate 		XC_TRACE(XC_ALL_SELF, &tset, func, arg1, arg2);
6680Sstevel@tonic-gate 		XC_SPL_EXIT(lcx, opl);
6690Sstevel@tonic-gate 		kpreempt_enable();
6700Sstevel@tonic-gate 		return;
6710Sstevel@tonic-gate 	}
6720Sstevel@tonic-gate 
6730Sstevel@tonic-gate 	if (xc_holder == lcx) {		/* got the xc_sys_mutex already */
6740Sstevel@tonic-gate 		cpuset_t mset = xc_mbox[lcx].xc_cpuset;
6750Sstevel@tonic-gate 
6760Sstevel@tonic-gate 		CPUSET_AND(mset, xc_cpuset);
6770Sstevel@tonic-gate 		ASSERT(MUTEX_HELD(&xc_sys_mutex));
6780Sstevel@tonic-gate 		ASSERT(CPUSET_ISEQUAL(mset, xc_cpuset));
6790Sstevel@tonic-gate 		XC_TRACE(XC_ALL_OTHER_H, &xc_cpuset, func, arg1, arg2);
6800Sstevel@tonic-gate 		SEND_MBOX_ONLY(xc_cpuset, func, arg1, arg2, lcx, XC_WAIT);
6810Sstevel@tonic-gate 		WAIT_MBOX_DONE(xc_cpuset, lcx, XC_WAIT, 0);
6820Sstevel@tonic-gate 		XC_STAT_INC(x_dstat[lcx][XC_ALL_OTHER_H]);
6830Sstevel@tonic-gate 		XC_SPL_EXIT(lcx, opl);
6840Sstevel@tonic-gate 		kpreempt_enable();
6850Sstevel@tonic-gate 		return;
6860Sstevel@tonic-gate 	}
6870Sstevel@tonic-gate 
6880Sstevel@tonic-gate 	/*
6890Sstevel@tonic-gate 	 * Avoid dead lock if someone has sent us a xc_loop request while
6900Sstevel@tonic-gate 	 * we are trying to grab xc_sys_mutex.
6910Sstevel@tonic-gate 	 */
6920Sstevel@tonic-gate 	XC_SPL_EXIT(lcx, opl);
6930Sstevel@tonic-gate 
6940Sstevel@tonic-gate 	/*
6950Sstevel@tonic-gate 	 * At this point, since we don't own xc_sys_mutex,
6960Sstevel@tonic-gate 	 * our pil shouldn't run at or above the XCALL_PIL.
6970Sstevel@tonic-gate 	 */
6980Sstevel@tonic-gate 	ASSERT(getpil() < XCALL_PIL);
6990Sstevel@tonic-gate 
7000Sstevel@tonic-gate 	/*
7010Sstevel@tonic-gate 	 * grab xc_sys_mutex before writing to the xc_mbox
7020Sstevel@tonic-gate 	 */
7030Sstevel@tonic-gate 	mutex_enter(&xc_sys_mutex);
7040Sstevel@tonic-gate 	xc_spl_enter[lcx] = 1;
7050Sstevel@tonic-gate 
7060Sstevel@tonic-gate 	XC_TRACE(XC_ALL_OTHER, &xc_cpuset, func, arg1, arg2);
7070Sstevel@tonic-gate 	init_mondo(setsoftint_tl1, xc_serv_inum, 0);
7080Sstevel@tonic-gate 	SEND_MBOX_MONDO(xc_cpuset, func, arg1, arg2, XC_IDLE);
7090Sstevel@tonic-gate 	WAIT_MBOX_DONE(xc_cpuset, lcx, XC_IDLE, 1);
7100Sstevel@tonic-gate 
7110Sstevel@tonic-gate 	xc_spl_enter[lcx] = 0;
7120Sstevel@tonic-gate 	XC_STAT_INC(x_dstat[lcx][XC_ALL_OTHER]);
7130Sstevel@tonic-gate 	mutex_exit(&xc_sys_mutex);
7140Sstevel@tonic-gate 	kpreempt_enable();
7150Sstevel@tonic-gate }
7160Sstevel@tonic-gate 
7170Sstevel@tonic-gate /*
7180Sstevel@tonic-gate  * xc_attention - paired with xc_dismissed()
7190Sstevel@tonic-gate  *
7200Sstevel@tonic-gate  * xt_attention() holds the xc_sys_mutex and xc_dismissed() releases it
7210Sstevel@tonic-gate  * called when an initiator wants to capture some/all cpus for a critical
7220Sstevel@tonic-gate  * session.
7230Sstevel@tonic-gate  */
7240Sstevel@tonic-gate void
xc_attention(cpuset_t cpuset)7250Sstevel@tonic-gate xc_attention(cpuset_t cpuset)
7260Sstevel@tonic-gate {
7270Sstevel@tonic-gate 	int pix, lcx;
7280Sstevel@tonic-gate 	cpuset_t xc_cpuset, tmpset;
7290Sstevel@tonic-gate 	cpuset_t recv_cpuset;
7300Sstevel@tonic-gate 	uint64_t loop_cnt = 0;
7310Sstevel@tonic-gate 	int first_time = 1;
7320Sstevel@tonic-gate 
7330Sstevel@tonic-gate 	CPUSET_ZERO(recv_cpuset);
7340Sstevel@tonic-gate 
7350Sstevel@tonic-gate 	/*
7360Sstevel@tonic-gate 	 * don't migrate the cpu until xc_dismissed() is finished
7370Sstevel@tonic-gate 	 */
7380Sstevel@tonic-gate 	ASSERT(getpil() < XCALL_PIL);
7390Sstevel@tonic-gate 	mutex_enter(&xc_sys_mutex);
7400Sstevel@tonic-gate 	lcx = (int)(CPU->cpu_id);
7410Sstevel@tonic-gate 	ASSERT(x_dstat[lcx][XC_ATTENTION] ==
7420Sstevel@tonic-gate 	    x_dstat[lcx][XC_DISMISSED]);
7430Sstevel@tonic-gate 	ASSERT(xc_holder == -1);
7440Sstevel@tonic-gate 	xc_mbox[lcx].xc_cpuset = cpuset;
7450Sstevel@tonic-gate 	xc_holder = lcx; /* no membar; only current cpu needs the right lcx */
7460Sstevel@tonic-gate 
7470Sstevel@tonic-gate 	/*
7480Sstevel@tonic-gate 	 * only send to the CPU_READY ones
7490Sstevel@tonic-gate 	 */
7500Sstevel@tonic-gate 	xc_cpuset = cpu_ready_set;
7510Sstevel@tonic-gate 	CPUSET_AND(xc_cpuset, cpuset);
7520Sstevel@tonic-gate 
7530Sstevel@tonic-gate 	/*
7540Sstevel@tonic-gate 	 * don't send mondo to self
7550Sstevel@tonic-gate 	 */
7560Sstevel@tonic-gate 	CPUSET_DEL(xc_cpuset, lcx);
7570Sstevel@tonic-gate 
7580Sstevel@tonic-gate 	XC_STAT_INC(x_dstat[lcx][XC_ATTENTION]);
7590Sstevel@tonic-gate 	XC_TRACE(XC_ATTENTION, &xc_cpuset, NULL, NULL, NULL);
7600Sstevel@tonic-gate 
7610Sstevel@tonic-gate 	if (CPUSET_ISNULL(xc_cpuset))
7620Sstevel@tonic-gate 		return;
7630Sstevel@tonic-gate 
7640Sstevel@tonic-gate 	xc_spl_enter[lcx] = 1;
7650Sstevel@tonic-gate 	/*
7660Sstevel@tonic-gate 	 * inform the target processors to enter into xc_loop()
7670Sstevel@tonic-gate 	 */
7680Sstevel@tonic-gate 	init_mondo(setsoftint_tl1, xc_loop_inum, 0);
7691310Sha137994 	SEND_MBOX_MONDO_XC_ENTER(xc_cpuset);
7700Sstevel@tonic-gate 	xc_spl_enter[lcx] = 0;
7710Sstevel@tonic-gate 
7720Sstevel@tonic-gate 	/*
7730Sstevel@tonic-gate 	 * make sure target processors have entered into xc_loop()
7740Sstevel@tonic-gate 	 */
7750Sstevel@tonic-gate 	while (!CPUSET_ISEQUAL(recv_cpuset, xc_cpuset)) {
7760Sstevel@tonic-gate 		tmpset = xc_cpuset;
7770Sstevel@tonic-gate 		for (pix = 0; pix < NCPU; pix++) {
7780Sstevel@tonic-gate 			if (CPU_IN_SET(tmpset, pix)) {
7790Sstevel@tonic-gate 				/*
7800Sstevel@tonic-gate 				 * membar_stld() is done in xc_loop
7810Sstevel@tonic-gate 				 */
7820Sstevel@tonic-gate 				if (xc_mbox[pix].xc_state == XC_WAIT) {
7830Sstevel@tonic-gate 					CPUSET_ADD(recv_cpuset, pix);
7840Sstevel@tonic-gate 				}
7850Sstevel@tonic-gate 				CPUSET_DEL(tmpset, pix);
7860Sstevel@tonic-gate 				if (CPUSET_ISNULL(tmpset)) {
7870Sstevel@tonic-gate 					break;
7880Sstevel@tonic-gate 				}
7890Sstevel@tonic-gate 			}
7900Sstevel@tonic-gate 		}
7910Sstevel@tonic-gate 		if (loop_cnt++ > xc_mondo_time_limit) {
7920Sstevel@tonic-gate 			if (sendmondo_in_recover) {
7930Sstevel@tonic-gate 				drv_usecwait(1);
7940Sstevel@tonic-gate 				loop_cnt = 0;
7950Sstevel@tonic-gate 				continue;
7960Sstevel@tonic-gate 			}
7970Sstevel@tonic-gate 			if (first_time) {
7980Sstevel@tonic-gate 				XT_SYNC_SOME(xc_cpuset);
7990Sstevel@tonic-gate 				first_time = 0;
8000Sstevel@tonic-gate 				loop_cnt = 0;
8010Sstevel@tonic-gate 				continue;
8020Sstevel@tonic-gate 			}
8030Sstevel@tonic-gate 			cmn_err(CE_PANIC, "xc_attention() timeout");
8040Sstevel@tonic-gate 		}
8050Sstevel@tonic-gate 	}
8060Sstevel@tonic-gate 
8070Sstevel@tonic-gate 	/*
8080Sstevel@tonic-gate 	 * xc_sys_mutex remains held until xc_dismissed() is finished
8090Sstevel@tonic-gate 	 */
8100Sstevel@tonic-gate }
8110Sstevel@tonic-gate 
8120Sstevel@tonic-gate /*
8130Sstevel@tonic-gate  * xc_dismissed - paired with xc_attention()
8140Sstevel@tonic-gate  *
8150Sstevel@tonic-gate  * Called after the critical session is finished.
8160Sstevel@tonic-gate  */
8170Sstevel@tonic-gate void
xc_dismissed(cpuset_t cpuset)8180Sstevel@tonic-gate xc_dismissed(cpuset_t cpuset)
8190Sstevel@tonic-gate {
8200Sstevel@tonic-gate 	int pix;
8210Sstevel@tonic-gate 	int lcx = (int)(CPU->cpu_id);
8220Sstevel@tonic-gate 	cpuset_t xc_cpuset, tmpset;
8230Sstevel@tonic-gate 	cpuset_t recv_cpuset;
8240Sstevel@tonic-gate 	uint64_t loop_cnt = 0;
8250Sstevel@tonic-gate 
8260Sstevel@tonic-gate 	ASSERT(lcx == xc_holder);
8270Sstevel@tonic-gate 	ASSERT(CPUSET_ISEQUAL(xc_mbox[lcx].xc_cpuset, cpuset));
8280Sstevel@tonic-gate 	ASSERT(getpil() >= XCALL_PIL);
8290Sstevel@tonic-gate 	CPUSET_ZERO(xc_mbox[lcx].xc_cpuset);
8300Sstevel@tonic-gate 	CPUSET_ZERO(recv_cpuset);
8310Sstevel@tonic-gate 	membar_stld();
8320Sstevel@tonic-gate 
8330Sstevel@tonic-gate 	XC_STAT_INC(x_dstat[lcx][XC_DISMISSED]);
8340Sstevel@tonic-gate 	ASSERT(x_dstat[lcx][XC_DISMISSED] == x_dstat[lcx][XC_ATTENTION]);
8350Sstevel@tonic-gate 
8360Sstevel@tonic-gate 	/*
8370Sstevel@tonic-gate 	 * only send to the CPU_READY ones
8380Sstevel@tonic-gate 	 */
8390Sstevel@tonic-gate 	xc_cpuset = cpu_ready_set;
8400Sstevel@tonic-gate 	CPUSET_AND(xc_cpuset, cpuset);
8410Sstevel@tonic-gate 
8420Sstevel@tonic-gate 	/*
8430Sstevel@tonic-gate 	 * exclude itself
8440Sstevel@tonic-gate 	 */
8450Sstevel@tonic-gate 	CPUSET_DEL(xc_cpuset, lcx);
8460Sstevel@tonic-gate 	XC_TRACE(XC_DISMISSED, &xc_cpuset, NULL, NULL, NULL);
8470Sstevel@tonic-gate 	if (CPUSET_ISNULL(xc_cpuset)) {
8480Sstevel@tonic-gate 		xc_holder = -1;
8490Sstevel@tonic-gate 		mutex_exit(&xc_sys_mutex);
8500Sstevel@tonic-gate 		return;
8510Sstevel@tonic-gate 	}
8520Sstevel@tonic-gate 
8530Sstevel@tonic-gate 	/*
8540Sstevel@tonic-gate 	 * inform other processors to get out of xc_loop()
8550Sstevel@tonic-gate 	 */
8560Sstevel@tonic-gate 	tmpset = xc_cpuset;
8570Sstevel@tonic-gate 	for (pix = 0; pix < NCPU; pix++) {
8580Sstevel@tonic-gate 		if (CPU_IN_SET(tmpset, pix)) {
8590Sstevel@tonic-gate 			xc_mbox[pix].xc_state = XC_EXIT;
8600Sstevel@tonic-gate 			membar_stld();
8610Sstevel@tonic-gate 			CPUSET_DEL(tmpset, pix);
8620Sstevel@tonic-gate 			if (CPUSET_ISNULL(tmpset)) {
8630Sstevel@tonic-gate 				break;
8640Sstevel@tonic-gate 			}
8650Sstevel@tonic-gate 		}
8660Sstevel@tonic-gate 	}
8670Sstevel@tonic-gate 
8680Sstevel@tonic-gate 	/*
8690Sstevel@tonic-gate 	 * make sure target processors have exited from xc_loop()
8700Sstevel@tonic-gate 	 */
8710Sstevel@tonic-gate 	while (!CPUSET_ISEQUAL(recv_cpuset, xc_cpuset)) {
8720Sstevel@tonic-gate 		tmpset = xc_cpuset;
8730Sstevel@tonic-gate 		for (pix = 0; pix < NCPU; pix++) {
8740Sstevel@tonic-gate 			if (CPU_IN_SET(tmpset, pix)) {
8750Sstevel@tonic-gate 				/*
8760Sstevel@tonic-gate 				 * membar_stld() is done in xc_loop
8770Sstevel@tonic-gate 				 */
8780Sstevel@tonic-gate 				if (xc_mbox[pix].xc_state == XC_IDLE) {
8790Sstevel@tonic-gate 					CPUSET_ADD(recv_cpuset, pix);
8800Sstevel@tonic-gate 				}
8810Sstevel@tonic-gate 				CPUSET_DEL(tmpset, pix);
8820Sstevel@tonic-gate 				if (CPUSET_ISNULL(tmpset)) {
8830Sstevel@tonic-gate 					break;
8840Sstevel@tonic-gate 				}
8850Sstevel@tonic-gate 			}
8860Sstevel@tonic-gate 		}
8870Sstevel@tonic-gate 		if (loop_cnt++ > xc_func_time_limit) {
8880Sstevel@tonic-gate 				if (sendmondo_in_recover) {
8890Sstevel@tonic-gate 					drv_usecwait(1);
8900Sstevel@tonic-gate 					loop_cnt = 0;
8910Sstevel@tonic-gate 					continue;
8920Sstevel@tonic-gate 				}
8930Sstevel@tonic-gate 			cmn_err(CE_PANIC, "xc_dismissed() timeout");
8940Sstevel@tonic-gate 		}
8950Sstevel@tonic-gate 	}
8960Sstevel@tonic-gate 	xc_holder = -1;
8970Sstevel@tonic-gate 	mutex_exit(&xc_sys_mutex);
8980Sstevel@tonic-gate }
8990Sstevel@tonic-gate 
9000Sstevel@tonic-gate /*
9010Sstevel@tonic-gate  * xc_serv - "x-call" handler at TL=0; serves only one x-call request
9020Sstevel@tonic-gate  * runs at XCALL_PIL level.
9030Sstevel@tonic-gate  */
9040Sstevel@tonic-gate uint_t
xc_serv(void)9050Sstevel@tonic-gate xc_serv(void)
9060Sstevel@tonic-gate {
9070Sstevel@tonic-gate 	int lcx = (int)(CPU->cpu_id);
9080Sstevel@tonic-gate 	struct xc_mbox *xmp;
9090Sstevel@tonic-gate 	xcfunc_t *func;
9100Sstevel@tonic-gate 	uint64_t arg1, arg2;
9110Sstevel@tonic-gate 	cpuset_t tset;
9120Sstevel@tonic-gate 
9130Sstevel@tonic-gate 	ASSERT(getpil() == XCALL_PIL);
9140Sstevel@tonic-gate 	CPUSET_ZERO(tset);
9150Sstevel@tonic-gate 	CPUSET_ADD(tset, lcx);
9160Sstevel@tonic-gate 	flush_windows();
9170Sstevel@tonic-gate 	xmp = &xc_mbox[lcx];
9180Sstevel@tonic-gate 	ASSERT(lcx != xc_holder);
9190Sstevel@tonic-gate 	ASSERT(xmp->xc_state == XC_DOIT);
9200Sstevel@tonic-gate 	func = xmp->xc_func;
9210Sstevel@tonic-gate 	XC_TRACE(XC_SERV, &tset, func, xmp->xc_arg1, xmp->xc_arg2);
9220Sstevel@tonic-gate 	if (func != NULL) {
9230Sstevel@tonic-gate 		arg1 = xmp->xc_arg1;
9240Sstevel@tonic-gate 		arg2 = xmp->xc_arg2;
9250Sstevel@tonic-gate 		(*func)(arg1, arg2);
9260Sstevel@tonic-gate 	}
9270Sstevel@tonic-gate 	XC_STAT_INC(x_rstat[lcx][XC_SERV]);
9280Sstevel@tonic-gate 	XC_TRACE(XC_SERV, &tset, func, arg1, arg2);
9290Sstevel@tonic-gate 	xmp->xc_state = XC_IDLE;
9300Sstevel@tonic-gate 	membar_stld();
9310Sstevel@tonic-gate 	return (1);
9320Sstevel@tonic-gate }
9330Sstevel@tonic-gate 
9340Sstevel@tonic-gate /*
9350Sstevel@tonic-gate  * if == 1, an xc_loop timeout will cause a panic
9360Sstevel@tonic-gate  * otherwise print a warning
9370Sstevel@tonic-gate  */
9380Sstevel@tonic-gate uint_t xc_loop_panic = 0;
9390Sstevel@tonic-gate 
9400Sstevel@tonic-gate /*
9410Sstevel@tonic-gate  * xc_loop - "x-call" handler at TL=0; capture the cpu for a critial
9420Sstevel@tonic-gate  * session, or serve multiple x-call requests runs at XCALL_PIL level.
9430Sstevel@tonic-gate  */
9440Sstevel@tonic-gate uint_t
xc_loop(void)9450Sstevel@tonic-gate xc_loop(void)
9460Sstevel@tonic-gate {
9470Sstevel@tonic-gate 	int lcx = (int)(CPU->cpu_id);
9480Sstevel@tonic-gate 	struct xc_mbox *xmp;
9490Sstevel@tonic-gate 	xcfunc_t *func;
9500Sstevel@tonic-gate 	uint64_t arg1, arg2;
9510Sstevel@tonic-gate 	uint64_t loop_cnt = 0;
9520Sstevel@tonic-gate 	cpuset_t tset;
9530Sstevel@tonic-gate 
9540Sstevel@tonic-gate 	ASSERT(getpil() == XCALL_PIL);
9550Sstevel@tonic-gate 
9560Sstevel@tonic-gate 	CPUSET_ZERO(tset);
9570Sstevel@tonic-gate 	flush_windows();
9580Sstevel@tonic-gate 
9590Sstevel@tonic-gate 	/*
9600Sstevel@tonic-gate 	 * Some one must have owned the xc_sys_mutex;
9610Sstevel@tonic-gate 	 * no further interrupt (at XCALL_PIL or below) can
9620Sstevel@tonic-gate 	 * be taken by this processor until xc_loop exits.
9630Sstevel@tonic-gate 	 *
9640Sstevel@tonic-gate 	 * The owner of xc_sys_mutex (or xc_holder) can expect
9650Sstevel@tonic-gate 	 * its xc/xt requests are handled as follows:
9660Sstevel@tonic-gate 	 * 	xc requests use xc_mbox's handshaking for their services
9670Sstevel@tonic-gate 	 * 	xt requests at TL>0 will be handled immediately
9680Sstevel@tonic-gate 	 * 	xt requests at TL=0:
9690Sstevel@tonic-gate 	 *		if their handlers'pils are <= XCALL_PIL, then
9700Sstevel@tonic-gate 	 *			they will be handled after xc_loop exits
9710Sstevel@tonic-gate 	 *			(so, they probably should not be used)
9720Sstevel@tonic-gate 	 *		else they will be handled immediately
9730Sstevel@tonic-gate 	 *
9740Sstevel@tonic-gate 	 * For those who are not informed to enter xc_loop, if they
9750Sstevel@tonic-gate 	 * send xc/xt requests to this processor at this moment,
9760Sstevel@tonic-gate 	 * the requests will be handled as follows:
9770Sstevel@tonic-gate 	 *	xc requests will be handled after they grab xc_sys_mutex
9780Sstevel@tonic-gate 	 *	xt requests at TL>0 will be handled immediately
9790Sstevel@tonic-gate 	 * 	xt requests at TL=0:
9800Sstevel@tonic-gate 	 *		if their handlers'pils are <= XCALL_PIL, then
9810Sstevel@tonic-gate 	 *			they will be handled after xc_loop exits
9820Sstevel@tonic-gate 	 *		else they will be handled immediately
9830Sstevel@tonic-gate 	 */
9840Sstevel@tonic-gate 	xmp = &xc_mbox[lcx];
9850Sstevel@tonic-gate 	ASSERT(lcx != xc_holder);
9860Sstevel@tonic-gate 	ASSERT(xmp->xc_state == XC_ENTER);
9870Sstevel@tonic-gate 	xmp->xc_state = XC_WAIT;
9880Sstevel@tonic-gate 	CPUSET_ADD(tset, lcx);
9890Sstevel@tonic-gate 	membar_stld();
9900Sstevel@tonic-gate 	XC_STAT_INC(x_rstat[lcx][XC_LOOP]);
9910Sstevel@tonic-gate 	XC_TRACE(XC_LOOP_ENTER, &tset, NULL, NULL, NULL);
9920Sstevel@tonic-gate 	while (xmp->xc_state != XC_EXIT) {
9930Sstevel@tonic-gate 		if (xmp->xc_state == XC_DOIT) {
9940Sstevel@tonic-gate 			func = xmp->xc_func;
9950Sstevel@tonic-gate 			arg1 = xmp->xc_arg1;
9960Sstevel@tonic-gate 			arg2 = xmp->xc_arg2;
9970Sstevel@tonic-gate 			XC_TRACE(XC_LOOP_DOIT, &tset, func, arg1, arg2);
9980Sstevel@tonic-gate 			if (func != NULL)
9990Sstevel@tonic-gate 				(*func)(arg1, arg2);
10000Sstevel@tonic-gate 			xmp->xc_state = XC_WAIT;
10010Sstevel@tonic-gate 			membar_stld();
10020Sstevel@tonic-gate 			/*
10030Sstevel@tonic-gate 			 * reset the timeout counter
10040Sstevel@tonic-gate 			 * since some work was done
10050Sstevel@tonic-gate 			 */
10060Sstevel@tonic-gate 			loop_cnt = 0;
10070Sstevel@tonic-gate 		} else {
10080Sstevel@tonic-gate 			/* patience is a virtue... */
10090Sstevel@tonic-gate 			loop_cnt++;
10100Sstevel@tonic-gate 		}
10110Sstevel@tonic-gate 
10120Sstevel@tonic-gate 		if (loop_cnt > xc_func_time_limit) {
10130Sstevel@tonic-gate 			if (sendmondo_in_recover) {
10140Sstevel@tonic-gate 				drv_usecwait(1);
10150Sstevel@tonic-gate 				loop_cnt = 0;
10160Sstevel@tonic-gate 				continue;
10170Sstevel@tonic-gate 			}
10180Sstevel@tonic-gate 			cmn_err(xc_loop_panic ? CE_PANIC : CE_WARN,
10190Sstevel@tonic-gate 			    "xc_loop() timeout");
10200Sstevel@tonic-gate 			/*
10210Sstevel@tonic-gate 			 * if the above displayed a warning,
10220Sstevel@tonic-gate 			 * reset the timeout counter and be patient
10230Sstevel@tonic-gate 			 */
10240Sstevel@tonic-gate 			loop_cnt = 0;
10250Sstevel@tonic-gate 		}
10260Sstevel@tonic-gate 	}
10270Sstevel@tonic-gate 	ASSERT(xmp->xc_state == XC_EXIT);
10280Sstevel@tonic-gate 	ASSERT(xc_holder != -1);
10290Sstevel@tonic-gate 	XC_TRACE(XC_LOOP_EXIT, &tset, NULL, NULL, NULL);
10300Sstevel@tonic-gate 	xmp->xc_state = XC_IDLE;
10310Sstevel@tonic-gate 	membar_stld();
10320Sstevel@tonic-gate 	return (1);
10330Sstevel@tonic-gate }
1034