10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 50Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 60Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 70Sstevel@tonic-gate * with the License. 80Sstevel@tonic-gate * 90Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 100Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 110Sstevel@tonic-gate * See the License for the specific language governing permissions 120Sstevel@tonic-gate * and limitations under the License. 130Sstevel@tonic-gate * 140Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 150Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 160Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 170Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 180Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 190Sstevel@tonic-gate * 200Sstevel@tonic-gate * CDDL HEADER END 210Sstevel@tonic-gate */ 220Sstevel@tonic-gate /* 23*1310Sha137994 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate #include <sys/systm.h> 300Sstevel@tonic-gate #include <sys/archsystm.h> 310Sstevel@tonic-gate #include <sys/machsystm.h> 320Sstevel@tonic-gate #include <sys/cpuvar.h> 330Sstevel@tonic-gate #include <sys/intreg.h> 340Sstevel@tonic-gate #include <sys/x_call.h> 350Sstevel@tonic-gate #include <sys/cmn_err.h> 360Sstevel@tonic-gate #include <sys/membar.h> 370Sstevel@tonic-gate #include <sys/disp.h> 380Sstevel@tonic-gate #include <sys/debug.h> 390Sstevel@tonic-gate #include <sys/privregs.h> 400Sstevel@tonic-gate #include <sys/xc_impl.h> 410Sstevel@tonic-gate #include <sys/ivintr.h> 420Sstevel@tonic-gate #include <sys/dmv.h> 43193Sjkennedy #include <sys/sysmacros.h> 440Sstevel@tonic-gate 450Sstevel@tonic-gate #ifdef DEBUG 460Sstevel@tonic-gate uint_t x_dstat[NCPU][XC_LOOP_EXIT+1]; 470Sstevel@tonic-gate uint_t x_rstat[NCPU][4]; 480Sstevel@tonic-gate #endif /* DEBUG */ 490Sstevel@tonic-gate 500Sstevel@tonic-gate static int xc_serv_inum; /* software interrupt number for xc_serv() */ 510Sstevel@tonic-gate static int xc_loop_inum; /* software interrupt number for xc_loop() */ 520Sstevel@tonic-gate kmutex_t xc_sys_mutex; /* protect xcall session and xc_mbox */ 530Sstevel@tonic-gate int xc_spl_enter[NCPU]; /* protect sending x-call */ 540Sstevel@tonic-gate static int xc_holder = -1; /* the cpu who initiates xc_attention, 0 is valid */ 550Sstevel@tonic-gate 560Sstevel@tonic-gate /* 570Sstevel@tonic-gate * Mail box for handshaking and xcall request; protected by xc_sys_mutex 580Sstevel@tonic-gate */ 590Sstevel@tonic-gate static struct xc_mbox { 600Sstevel@tonic-gate xcfunc_t *xc_func; 610Sstevel@tonic-gate uint64_t xc_arg1; 620Sstevel@tonic-gate uint64_t xc_arg2; 630Sstevel@tonic-gate cpuset_t xc_cpuset; 640Sstevel@tonic-gate volatile uint_t xc_state; 650Sstevel@tonic-gate } xc_mbox[NCPU]; 660Sstevel@tonic-gate 670Sstevel@tonic-gate uint64_t xc_tick_limit; /* send_mondo() tick limit value */ 680Sstevel@tonic-gate uint64_t xc_tick_limit_scale = 1; /* scale used to increase the limit */ 690Sstevel@tonic-gate uint64_t xc_tick_jump_limit; /* send_mondo() irregular tick jump limit */ 700Sstevel@tonic-gate 710Sstevel@tonic-gate /* timeout value for xcalls to be received by the target CPU */ 720Sstevel@tonic-gate uint64_t xc_mondo_time_limit; 730Sstevel@tonic-gate 740Sstevel@tonic-gate /* timeout value for xcall functions to be executed on the target CPU */ 750Sstevel@tonic-gate uint64_t xc_func_time_limit; 760Sstevel@tonic-gate 770Sstevel@tonic-gate uint64_t xc_scale = 1; /* scale used to calculate timeout limits */ 78193Sjkennedy uint64_t xc_mondo_multiplier = 10; 790Sstevel@tonic-gate 800Sstevel@tonic-gate uint_t sendmondo_in_recover; 810Sstevel@tonic-gate 820Sstevel@tonic-gate /* 830Sstevel@tonic-gate * sending x-calls 840Sstevel@tonic-gate */ 850Sstevel@tonic-gate void init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2); 860Sstevel@tonic-gate void send_one_mondo(int cpuid); 870Sstevel@tonic-gate void send_mondo_set(cpuset_t set); 880Sstevel@tonic-gate 890Sstevel@tonic-gate /* 90193Sjkennedy * Adjust xc_attention timeout if a faster cpu is dynamically added. 91193Sjkennedy * Ignore the dynamic removal of a cpu that would lower these timeout 92193Sjkennedy * values. 93193Sjkennedy */ 94193Sjkennedy static int 95193Sjkennedy xc_func_timeout_adj(cpu_setup_t what, int cpuid) { 96193Sjkennedy uint64_t freq = cpunodes[cpuid].clock_freq; 97193Sjkennedy 98193Sjkennedy switch (what) { 99193Sjkennedy case CPU_ON: 100193Sjkennedy case CPU_INIT: 101193Sjkennedy case CPU_CONFIG: 102193Sjkennedy case CPU_CPUPART_IN: 103193Sjkennedy if (freq * xc_scale > xc_mondo_time_limit) { 104193Sjkennedy xc_mondo_time_limit = freq * xc_scale; 105193Sjkennedy xc_func_time_limit = xc_mondo_time_limit * 106193Sjkennedy xc_mondo_multiplier; 107193Sjkennedy } 108193Sjkennedy break; 109193Sjkennedy case CPU_OFF: 110193Sjkennedy case CPU_UNCONFIG: 111193Sjkennedy case CPU_CPUPART_OUT: 112193Sjkennedy default: 113193Sjkennedy break; 114193Sjkennedy } 115193Sjkennedy 116193Sjkennedy return (0); 117193Sjkennedy } 118193Sjkennedy 119193Sjkennedy /* 1200Sstevel@tonic-gate * xc_init - initialize x-call related locks 1210Sstevel@tonic-gate */ 1220Sstevel@tonic-gate void 1230Sstevel@tonic-gate xc_init(void) 1240Sstevel@tonic-gate { 1250Sstevel@tonic-gate int pix; 126193Sjkennedy uint64_t maxfreq = 0; 1270Sstevel@tonic-gate 1280Sstevel@tonic-gate mutex_init(&xc_sys_mutex, NULL, MUTEX_SPIN, 1290Sstevel@tonic-gate (void *)ipltospl(XCALL_PIL)); 1300Sstevel@tonic-gate 1310Sstevel@tonic-gate #ifdef DEBUG 1320Sstevel@tonic-gate /* Initialize for all possible CPUs. */ 1330Sstevel@tonic-gate for (pix = 0; pix < NCPU; pix++) { 1340Sstevel@tonic-gate XC_STAT_INIT(pix); 1350Sstevel@tonic-gate } 1360Sstevel@tonic-gate #endif /* DEBUG */ 1370Sstevel@tonic-gate 1380Sstevel@tonic-gate xc_serv_inum = add_softintr(XCALL_PIL, (softintrfunc)xc_serv, 0); 1390Sstevel@tonic-gate xc_loop_inum = add_softintr(XCALL_PIL, (softintrfunc)xc_loop, 0); 1400Sstevel@tonic-gate 1410Sstevel@tonic-gate /* 1420Sstevel@tonic-gate * Initialize the calibrated tick limit for send_mondo. 1430Sstevel@tonic-gate * The value represents the maximum tick count to wait. 1440Sstevel@tonic-gate */ 1450Sstevel@tonic-gate xc_tick_limit = 1460Sstevel@tonic-gate ((uint64_t)sys_tick_freq * XC_SEND_MONDO_MSEC) / 1000; 1470Sstevel@tonic-gate xc_tick_jump_limit = xc_tick_limit / 32; 1480Sstevel@tonic-gate xc_tick_limit *= xc_tick_limit_scale; 1490Sstevel@tonic-gate 1500Sstevel@tonic-gate /* 1510Sstevel@tonic-gate * Maximum number of loops to wait before timing out in xc_attention. 1520Sstevel@tonic-gate */ 153193Sjkennedy for (pix = 0; pix < NCPU; pix++) { 154193Sjkennedy maxfreq = MAX(cpunodes[pix].clock_freq, maxfreq); 155193Sjkennedy } 156193Sjkennedy xc_mondo_time_limit = maxfreq * xc_scale; 157193Sjkennedy register_cpu_setup_func((cpu_setup_func_t *)xc_func_timeout_adj, NULL); 1580Sstevel@tonic-gate 1590Sstevel@tonic-gate /* 1600Sstevel@tonic-gate * Maximum number of loops to wait for a xcall function to be 161193Sjkennedy * executed on the target CPU. 1620Sstevel@tonic-gate */ 163193Sjkennedy xc_func_time_limit = xc_mondo_time_limit * xc_mondo_multiplier; 1640Sstevel@tonic-gate } 1650Sstevel@tonic-gate 1660Sstevel@tonic-gate /* 1670Sstevel@tonic-gate * The following routines basically provide callers with two kinds of 1680Sstevel@tonic-gate * inter-processor interrupt services: 1690Sstevel@tonic-gate * 1. cross calls (x-calls) - requests are handled at target cpu's TL=0 1700Sstevel@tonic-gate * 2. cross traps (c-traps) - requests are handled at target cpu's TL>0 1710Sstevel@tonic-gate * 1720Sstevel@tonic-gate * Although these routines protect the services from migrating to other cpus 1730Sstevel@tonic-gate * "after" they are called, it is the caller's choice or responsibility to 1740Sstevel@tonic-gate * prevent the cpu migration "before" calling them. 1750Sstevel@tonic-gate * 1760Sstevel@tonic-gate * X-call routines: 1770Sstevel@tonic-gate * 1780Sstevel@tonic-gate * xc_one() - send a request to one processor 1790Sstevel@tonic-gate * xc_some() - send a request to some processors 1800Sstevel@tonic-gate * xc_all() - send a request to all processors 1810Sstevel@tonic-gate * 1820Sstevel@tonic-gate * Their common parameters: 1830Sstevel@tonic-gate * func - a TL=0 handler address 1840Sstevel@tonic-gate * arg1 and arg2 - optional 1850Sstevel@tonic-gate * 1860Sstevel@tonic-gate * The services provided by x-call routines allow callers 1870Sstevel@tonic-gate * to send a request to target cpus to execute a TL=0 1880Sstevel@tonic-gate * handler. 1890Sstevel@tonic-gate * The interface of the registers of the TL=0 handler: 1900Sstevel@tonic-gate * %o0: arg1 1910Sstevel@tonic-gate * %o1: arg2 1920Sstevel@tonic-gate * 1930Sstevel@tonic-gate * X-trap routines: 1940Sstevel@tonic-gate * 1950Sstevel@tonic-gate * xt_one() - send a request to one processor 1960Sstevel@tonic-gate * xt_some() - send a request to some processors 1970Sstevel@tonic-gate * xt_all() - send a request to all processors 1980Sstevel@tonic-gate * 1990Sstevel@tonic-gate * Their common parameters: 2000Sstevel@tonic-gate * func - a TL>0 handler address or an interrupt number 2010Sstevel@tonic-gate * arg1, arg2 2020Sstevel@tonic-gate * optional when "func" is an address; 2030Sstevel@tonic-gate * 0 when "func" is an interrupt number 2040Sstevel@tonic-gate * 2050Sstevel@tonic-gate * If the request of "func" is a kernel address, then 2060Sstevel@tonic-gate * the target cpu will execute the request of "func" with 2070Sstevel@tonic-gate * args at "TL>0" level. 2080Sstevel@tonic-gate * The interface of the registers of the TL>0 handler: 2090Sstevel@tonic-gate * %g1: arg1 2100Sstevel@tonic-gate * %g2: arg2 2110Sstevel@tonic-gate * 2120Sstevel@tonic-gate * If the request of "func" is not a kernel address, then it has 2130Sstevel@tonic-gate * to be an assigned interrupt number through add_softintr(). 2140Sstevel@tonic-gate * An interrupt number is an index to the interrupt vector table, 2150Sstevel@tonic-gate * which entry contains an interrupt handler address with its 2160Sstevel@tonic-gate * corresponding interrupt level and argument. 2170Sstevel@tonic-gate * The target cpu will arrange the request to be serviced according 2180Sstevel@tonic-gate * to its pre-registered information. 2190Sstevel@tonic-gate * args are assumed to be zeros in this case. 2200Sstevel@tonic-gate * 2210Sstevel@tonic-gate * In addition, callers are allowed to capture and release cpus by 2220Sstevel@tonic-gate * calling the routines: xc_attention() and xc_dismissed(). 2230Sstevel@tonic-gate */ 2240Sstevel@tonic-gate 2250Sstevel@tonic-gate /* 2260Sstevel@tonic-gate * xt_one - send a "x-trap" to a cpu 2270Sstevel@tonic-gate */ 2280Sstevel@tonic-gate void 2290Sstevel@tonic-gate xt_one(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2) 2300Sstevel@tonic-gate { 2310Sstevel@tonic-gate if (!CPU_IN_SET(cpu_ready_set, cix)) { 2320Sstevel@tonic-gate return; 2330Sstevel@tonic-gate } 2340Sstevel@tonic-gate xt_one_unchecked(cix, func, arg1, arg2); 2350Sstevel@tonic-gate } 2360Sstevel@tonic-gate 2370Sstevel@tonic-gate /* 2380Sstevel@tonic-gate * xt_one_unchecked - send a "x-trap" to a cpu without checking for its 2390Sstevel@tonic-gate * existance in cpu_ready_set 2400Sstevel@tonic-gate */ 2410Sstevel@tonic-gate void 2420Sstevel@tonic-gate xt_one_unchecked(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2) 2430Sstevel@tonic-gate { 2440Sstevel@tonic-gate int lcx; 2450Sstevel@tonic-gate int opl; 2460Sstevel@tonic-gate cpuset_t tset; 2470Sstevel@tonic-gate 2480Sstevel@tonic-gate /* 2490Sstevel@tonic-gate * Make sure the function address will not be interpreted as a 2500Sstevel@tonic-gate * dmv interrupt 2510Sstevel@tonic-gate */ 2520Sstevel@tonic-gate ASSERT(!DMV_IS_DMV(func)); 2530Sstevel@tonic-gate 2540Sstevel@tonic-gate /* 2550Sstevel@tonic-gate * It's illegal to send software inums through the cross-trap 2560Sstevel@tonic-gate * interface. 2570Sstevel@tonic-gate */ 2580Sstevel@tonic-gate ASSERT((uintptr_t)func >= KERNELBASE); 2590Sstevel@tonic-gate 2600Sstevel@tonic-gate CPUSET_ZERO(tset); 2610Sstevel@tonic-gate 2620Sstevel@tonic-gate XC_SPL_ENTER(lcx, opl); /* lcx set by the macro */ 2630Sstevel@tonic-gate 2640Sstevel@tonic-gate CPUSET_ADD(tset, cix); 2650Sstevel@tonic-gate 2660Sstevel@tonic-gate if (cix == lcx) { 2670Sstevel@tonic-gate /* 2680Sstevel@tonic-gate * same cpu - use software fast trap 2690Sstevel@tonic-gate */ 2700Sstevel@tonic-gate send_self_xcall(CPU, arg1, arg2, func); 2710Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XT_ONE_SELF]); 2720Sstevel@tonic-gate XC_TRACE(XT_ONE_SELF, &tset, func, arg1, arg2); 2730Sstevel@tonic-gate } else { /* other cpu - send a mondo to the target cpu */ 2740Sstevel@tonic-gate /* 2750Sstevel@tonic-gate * other cpu - send a mondo to the target cpu 2760Sstevel@tonic-gate */ 2770Sstevel@tonic-gate XC_TRACE(XT_ONE_OTHER, &tset, func, arg1, arg2); 2780Sstevel@tonic-gate init_mondo(func, arg1, arg2); 2790Sstevel@tonic-gate send_one_mondo(cix); 2800Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XT_ONE_OTHER]); 2810Sstevel@tonic-gate } 2820Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 2830Sstevel@tonic-gate } 2840Sstevel@tonic-gate 2850Sstevel@tonic-gate /* 2860Sstevel@tonic-gate * xt_some - send a "x-trap" to some cpus 2870Sstevel@tonic-gate */ 2880Sstevel@tonic-gate void 2890Sstevel@tonic-gate xt_some(cpuset_t cpuset, xcfunc_t *func, uint64_t arg1, uint64_t arg2) 2900Sstevel@tonic-gate { 2910Sstevel@tonic-gate int lcx; 2920Sstevel@tonic-gate int opl; 2930Sstevel@tonic-gate cpuset_t xc_cpuset, tset; 2940Sstevel@tonic-gate 2950Sstevel@tonic-gate /* 2960Sstevel@tonic-gate * Make sure the function address will not be interpreted as a 2970Sstevel@tonic-gate * dmv interrupt 2980Sstevel@tonic-gate */ 2990Sstevel@tonic-gate ASSERT(!DMV_IS_DMV(func)); 3000Sstevel@tonic-gate 3010Sstevel@tonic-gate /* 3020Sstevel@tonic-gate * It's illegal to send software inums through the cross-trap 3030Sstevel@tonic-gate * interface. 3040Sstevel@tonic-gate */ 3050Sstevel@tonic-gate ASSERT((uintptr_t)func >= KERNELBASE); 3060Sstevel@tonic-gate 3070Sstevel@tonic-gate CPUSET_ZERO(tset); 3080Sstevel@tonic-gate 3090Sstevel@tonic-gate XC_SPL_ENTER(lcx, opl); /* lcx set by the macro */ 3100Sstevel@tonic-gate 3110Sstevel@tonic-gate CPUSET_ADD(tset, lcx); 3120Sstevel@tonic-gate 3130Sstevel@tonic-gate /* 3140Sstevel@tonic-gate * only send to the CPU_READY ones 3150Sstevel@tonic-gate */ 3160Sstevel@tonic-gate xc_cpuset = cpu_ready_set; 3170Sstevel@tonic-gate CPUSET_AND(xc_cpuset, cpuset); 3180Sstevel@tonic-gate 3190Sstevel@tonic-gate /* 3200Sstevel@tonic-gate * send to nobody; just return 3210Sstevel@tonic-gate */ 3220Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 3230Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 3240Sstevel@tonic-gate return; 3250Sstevel@tonic-gate } 3260Sstevel@tonic-gate 3270Sstevel@tonic-gate /* 3280Sstevel@tonic-gate * don't send mondo to self 3290Sstevel@tonic-gate */ 3300Sstevel@tonic-gate if (CPU_IN_SET(xc_cpuset, lcx)) { 3310Sstevel@tonic-gate /* 3320Sstevel@tonic-gate * same cpu - use software fast trap 3330Sstevel@tonic-gate */ 3340Sstevel@tonic-gate send_self_xcall(CPU, arg1, arg2, func); 3350Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XT_SOME_SELF]); 3360Sstevel@tonic-gate XC_TRACE(XT_SOME_SELF, &tset, func, arg1, arg2); 3370Sstevel@tonic-gate CPUSET_DEL(xc_cpuset, lcx); 3380Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 3390Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 3400Sstevel@tonic-gate return; 3410Sstevel@tonic-gate } 3420Sstevel@tonic-gate } 3430Sstevel@tonic-gate XC_TRACE(XT_SOME_OTHER, &xc_cpuset, func, arg1, arg2); 3440Sstevel@tonic-gate init_mondo(func, arg1, arg2); 3450Sstevel@tonic-gate send_mondo_set(xc_cpuset); 3460Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XT_SOME_OTHER]); 3470Sstevel@tonic-gate 3480Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 3490Sstevel@tonic-gate } 3500Sstevel@tonic-gate 3510Sstevel@tonic-gate /* 3520Sstevel@tonic-gate * xt_all - send a "x-trap" to all cpus 3530Sstevel@tonic-gate */ 3540Sstevel@tonic-gate void 3550Sstevel@tonic-gate xt_all(xcfunc_t *func, uint64_t arg1, uint64_t arg2) 3560Sstevel@tonic-gate { 3570Sstevel@tonic-gate int lcx; 3580Sstevel@tonic-gate int opl; 3590Sstevel@tonic-gate cpuset_t xc_cpuset, tset; 3600Sstevel@tonic-gate 3610Sstevel@tonic-gate /* 3620Sstevel@tonic-gate * Make sure the function address will not be interpreted as a 3630Sstevel@tonic-gate * dmv interrupt 3640Sstevel@tonic-gate */ 3650Sstevel@tonic-gate ASSERT(!DMV_IS_DMV(func)); 3660Sstevel@tonic-gate 3670Sstevel@tonic-gate /* 3680Sstevel@tonic-gate * It's illegal to send software inums through the cross-trap 3690Sstevel@tonic-gate * interface. 3700Sstevel@tonic-gate */ 3710Sstevel@tonic-gate ASSERT((uintptr_t)func >= KERNELBASE); 3720Sstevel@tonic-gate 3730Sstevel@tonic-gate CPUSET_ZERO(tset); 3740Sstevel@tonic-gate 3750Sstevel@tonic-gate XC_SPL_ENTER(lcx, opl); /* lcx set by the macro */ 3760Sstevel@tonic-gate 3770Sstevel@tonic-gate CPUSET_ADD(tset, lcx); 3780Sstevel@tonic-gate 3790Sstevel@tonic-gate /* 3800Sstevel@tonic-gate * same cpu - use software fast trap 3810Sstevel@tonic-gate */ 3820Sstevel@tonic-gate if (CPU_IN_SET(cpu_ready_set, lcx)) 3830Sstevel@tonic-gate send_self_xcall(CPU, arg1, arg2, func); 3840Sstevel@tonic-gate 3850Sstevel@tonic-gate XC_TRACE(XT_ALL_OTHER, &cpu_ready_set, func, arg1, arg2); 3860Sstevel@tonic-gate 3870Sstevel@tonic-gate /* 3880Sstevel@tonic-gate * don't send mondo to self 3890Sstevel@tonic-gate */ 3900Sstevel@tonic-gate xc_cpuset = cpu_ready_set; 3910Sstevel@tonic-gate CPUSET_DEL(xc_cpuset, lcx); 3920Sstevel@tonic-gate 3930Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 3940Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XT_ALL_SELF]); 3950Sstevel@tonic-gate XC_TRACE(XT_ALL_SELF, &tset, func, arg1, arg2); 3960Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 3970Sstevel@tonic-gate return; 3980Sstevel@tonic-gate } 3990Sstevel@tonic-gate 4000Sstevel@tonic-gate init_mondo(func, arg1, arg2); 4010Sstevel@tonic-gate send_mondo_set(xc_cpuset); 4020Sstevel@tonic-gate 4030Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XT_ALL_OTHER]); 4040Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 4050Sstevel@tonic-gate } 4060Sstevel@tonic-gate 4070Sstevel@tonic-gate /* 4080Sstevel@tonic-gate * xc_one - send a "x-call" to a cpu 4090Sstevel@tonic-gate */ 4100Sstevel@tonic-gate void 4110Sstevel@tonic-gate xc_one(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2) 4120Sstevel@tonic-gate { 4130Sstevel@tonic-gate int lcx; 4140Sstevel@tonic-gate int opl; 4150Sstevel@tonic-gate uint64_t loop_cnt = 0; 4160Sstevel@tonic-gate cpuset_t tset; 4170Sstevel@tonic-gate int first_time = 1; 4180Sstevel@tonic-gate 4190Sstevel@tonic-gate /* 4200Sstevel@tonic-gate * send to nobody; just return 4210Sstevel@tonic-gate */ 4220Sstevel@tonic-gate if (!CPU_IN_SET(cpu_ready_set, cix)) 4230Sstevel@tonic-gate return; 4240Sstevel@tonic-gate 4250Sstevel@tonic-gate ASSERT((uintptr_t)func > KERNELBASE); 4260Sstevel@tonic-gate ASSERT(((uintptr_t)func % PC_ALIGN) == 0); 4270Sstevel@tonic-gate 4280Sstevel@tonic-gate CPUSET_ZERO(tset); 4290Sstevel@tonic-gate 4300Sstevel@tonic-gate kpreempt_disable(); 4310Sstevel@tonic-gate 4320Sstevel@tonic-gate XC_SPL_ENTER(lcx, opl); /* lcx set by the macro */ 4330Sstevel@tonic-gate 4340Sstevel@tonic-gate CPUSET_ADD(tset, cix); 4350Sstevel@tonic-gate 4360Sstevel@tonic-gate if (cix == lcx) { /* same cpu just do it */ 4370Sstevel@tonic-gate XC_TRACE(XC_ONE_SELF, &tset, func, arg1, arg2); 4380Sstevel@tonic-gate (*func)(arg1, arg2); 4390Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ONE_SELF]); 4400Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 4410Sstevel@tonic-gate kpreempt_enable(); 4420Sstevel@tonic-gate return; 4430Sstevel@tonic-gate } 4440Sstevel@tonic-gate 4450Sstevel@tonic-gate if (xc_holder == lcx) { /* got the xc_sys_mutex already */ 4460Sstevel@tonic-gate ASSERT(MUTEX_HELD(&xc_sys_mutex)); 4470Sstevel@tonic-gate ASSERT(CPU_IN_SET(xc_mbox[lcx].xc_cpuset, lcx)); 4480Sstevel@tonic-gate ASSERT(CPU_IN_SET(xc_mbox[cix].xc_cpuset, cix)); 4490Sstevel@tonic-gate ASSERT(xc_mbox[cix].xc_state == XC_WAIT); 4500Sstevel@tonic-gate XC_TRACE(XC_ONE_OTHER_H, &tset, func, arg1, arg2); 4510Sstevel@tonic-gate 4520Sstevel@tonic-gate /* 4530Sstevel@tonic-gate * target processor's xc_loop should be waiting 4540Sstevel@tonic-gate * for the work to do; just set up the xc_mbox 4550Sstevel@tonic-gate */ 4560Sstevel@tonic-gate XC_SETUP(cix, func, arg1, arg2); 4570Sstevel@tonic-gate membar_stld(); 4580Sstevel@tonic-gate 4590Sstevel@tonic-gate while (xc_mbox[cix].xc_state != XC_WAIT) { 4600Sstevel@tonic-gate if (loop_cnt++ > xc_func_time_limit) { 4610Sstevel@tonic-gate if (sendmondo_in_recover) { 4620Sstevel@tonic-gate drv_usecwait(1); 4630Sstevel@tonic-gate loop_cnt = 0; 4640Sstevel@tonic-gate continue; 4650Sstevel@tonic-gate } 4660Sstevel@tonic-gate cmn_err(CE_PANIC, "xc_one() timeout, " 4670Sstevel@tonic-gate "xc_state[%d] != XC_WAIT", cix); 4680Sstevel@tonic-gate } 4690Sstevel@tonic-gate } 4700Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ONE_OTHER_H]); 4710Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 4720Sstevel@tonic-gate kpreempt_enable(); 4730Sstevel@tonic-gate return; 4740Sstevel@tonic-gate } 4750Sstevel@tonic-gate 4760Sstevel@tonic-gate /* 4770Sstevel@tonic-gate * Avoid dead lock if someone has sent us a xc_loop request while 4780Sstevel@tonic-gate * we are trying to grab xc_sys_mutex. 4790Sstevel@tonic-gate */ 4800Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 4810Sstevel@tonic-gate 4820Sstevel@tonic-gate /* 4830Sstevel@tonic-gate * At this point, since we don't own xc_sys_mutex, 4840Sstevel@tonic-gate * our pil shouldn't run at or above the XCALL_PIL. 4850Sstevel@tonic-gate */ 4860Sstevel@tonic-gate ASSERT(getpil() < XCALL_PIL); 4870Sstevel@tonic-gate 4880Sstevel@tonic-gate /* 4890Sstevel@tonic-gate * Since xc_holder is not owned by us, it could be that 4900Sstevel@tonic-gate * no one owns it, or we are not informed to enter into 4910Sstevel@tonic-gate * xc_loop(). In either case, we need to grab the 4920Sstevel@tonic-gate * xc_sys_mutex before we write to the xc_mbox, and 4930Sstevel@tonic-gate * we shouldn't release it until the request is finished. 4940Sstevel@tonic-gate */ 4950Sstevel@tonic-gate 4960Sstevel@tonic-gate mutex_enter(&xc_sys_mutex); 4970Sstevel@tonic-gate xc_spl_enter[lcx] = 1; 4980Sstevel@tonic-gate 4990Sstevel@tonic-gate /* 5000Sstevel@tonic-gate * Since we own xc_sys_mutex now, we are safe to 5010Sstevel@tonic-gate * write to the xc_mobx. 5020Sstevel@tonic-gate */ 5030Sstevel@tonic-gate ASSERT(xc_mbox[cix].xc_state == XC_IDLE); 5040Sstevel@tonic-gate XC_TRACE(XC_ONE_OTHER, &tset, func, arg1, arg2); 5050Sstevel@tonic-gate XC_SETUP(cix, func, arg1, arg2); 5060Sstevel@tonic-gate init_mondo(setsoftint_tl1, xc_serv_inum, 0); 5070Sstevel@tonic-gate send_one_mondo(cix); 5080Sstevel@tonic-gate 5090Sstevel@tonic-gate /* xc_serv does membar_stld */ 5100Sstevel@tonic-gate while (xc_mbox[cix].xc_state != XC_IDLE) { 5110Sstevel@tonic-gate if (loop_cnt++ > xc_func_time_limit) { 5120Sstevel@tonic-gate if (sendmondo_in_recover) { 5130Sstevel@tonic-gate drv_usecwait(1); 5140Sstevel@tonic-gate loop_cnt = 0; 5150Sstevel@tonic-gate continue; 5160Sstevel@tonic-gate } 5170Sstevel@tonic-gate if (first_time) { 5180Sstevel@tonic-gate XT_SYNC_ONE(cix); 5190Sstevel@tonic-gate first_time = 0; 5200Sstevel@tonic-gate loop_cnt = 0; 5210Sstevel@tonic-gate continue; 5220Sstevel@tonic-gate } 5230Sstevel@tonic-gate cmn_err(CE_PANIC, "xc_one() timeout, " 5240Sstevel@tonic-gate "xc_state[%d] != XC_IDLE", cix); 5250Sstevel@tonic-gate } 5260Sstevel@tonic-gate } 5270Sstevel@tonic-gate xc_spl_enter[lcx] = 0; 5280Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ONE_OTHER]); 5290Sstevel@tonic-gate mutex_exit(&xc_sys_mutex); 5300Sstevel@tonic-gate 5310Sstevel@tonic-gate kpreempt_enable(); 5320Sstevel@tonic-gate } 5330Sstevel@tonic-gate 5340Sstevel@tonic-gate /* 5350Sstevel@tonic-gate * xc_some - send a "x-call" to some cpus; sending to self is excluded 5360Sstevel@tonic-gate */ 5370Sstevel@tonic-gate void 5380Sstevel@tonic-gate xc_some(cpuset_t cpuset, xcfunc_t *func, uint64_t arg1, uint64_t arg2) 5390Sstevel@tonic-gate { 5400Sstevel@tonic-gate int lcx; 5410Sstevel@tonic-gate int opl; 5420Sstevel@tonic-gate cpuset_t xc_cpuset, tset; 5430Sstevel@tonic-gate 5440Sstevel@tonic-gate ASSERT((uintptr_t)func > KERNELBASE); 5450Sstevel@tonic-gate ASSERT(((uintptr_t)func % PC_ALIGN) == 0); 5460Sstevel@tonic-gate 5470Sstevel@tonic-gate CPUSET_ZERO(tset); 5480Sstevel@tonic-gate 5490Sstevel@tonic-gate kpreempt_disable(); 5500Sstevel@tonic-gate XC_SPL_ENTER(lcx, opl); /* lcx set by the macro */ 5510Sstevel@tonic-gate 5520Sstevel@tonic-gate CPUSET_ADD(tset, lcx); 5530Sstevel@tonic-gate 5540Sstevel@tonic-gate /* 5550Sstevel@tonic-gate * only send to the CPU_READY ones 5560Sstevel@tonic-gate */ 5570Sstevel@tonic-gate xc_cpuset = cpu_ready_set; 5580Sstevel@tonic-gate CPUSET_AND(xc_cpuset, cpuset); 5590Sstevel@tonic-gate 5600Sstevel@tonic-gate /* 5610Sstevel@tonic-gate * send to nobody; just return 5620Sstevel@tonic-gate */ 5630Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 5640Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 5650Sstevel@tonic-gate kpreempt_enable(); 5660Sstevel@tonic-gate return; 5670Sstevel@tonic-gate } 5680Sstevel@tonic-gate 5690Sstevel@tonic-gate if (CPU_IN_SET(xc_cpuset, lcx)) { 5700Sstevel@tonic-gate /* 5710Sstevel@tonic-gate * same cpu just do it 5720Sstevel@tonic-gate */ 5730Sstevel@tonic-gate (*func)(arg1, arg2); 5740Sstevel@tonic-gate CPUSET_DEL(xc_cpuset, lcx); 5750Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 5760Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_SOME_SELF]); 5770Sstevel@tonic-gate XC_TRACE(XC_SOME_SELF, &tset, func, arg1, arg2); 5780Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 5790Sstevel@tonic-gate kpreempt_enable(); 5800Sstevel@tonic-gate return; 5810Sstevel@tonic-gate } 5820Sstevel@tonic-gate } 5830Sstevel@tonic-gate 5840Sstevel@tonic-gate if (xc_holder == lcx) { /* got the xc_sys_mutex already */ 5850Sstevel@tonic-gate cpuset_t mset = xc_mbox[lcx].xc_cpuset; 5860Sstevel@tonic-gate 5870Sstevel@tonic-gate CPUSET_AND(mset, cpuset); 5880Sstevel@tonic-gate ASSERT(MUTEX_HELD(&xc_sys_mutex)); 5890Sstevel@tonic-gate ASSERT(CPUSET_ISEQUAL(mset, cpuset)); 5900Sstevel@tonic-gate SEND_MBOX_ONLY(xc_cpuset, func, arg1, arg2, lcx, XC_WAIT); 5910Sstevel@tonic-gate WAIT_MBOX_DONE(xc_cpuset, lcx, XC_WAIT, 0); 5920Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_SOME_OTHER_H]); 5930Sstevel@tonic-gate XC_TRACE(XC_SOME_OTHER_H, &xc_cpuset, func, arg1, arg2); 5940Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 5950Sstevel@tonic-gate kpreempt_enable(); 5960Sstevel@tonic-gate return; 5970Sstevel@tonic-gate } 5980Sstevel@tonic-gate 5990Sstevel@tonic-gate /* 6000Sstevel@tonic-gate * Avoid dead lock if someone has sent us a xc_loop request while 6010Sstevel@tonic-gate * we are trying to grab xc_sys_mutex. 6020Sstevel@tonic-gate */ 6030Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 6040Sstevel@tonic-gate 6050Sstevel@tonic-gate /* 6060Sstevel@tonic-gate * At this point, since we don't own xc_sys_mutex, 6070Sstevel@tonic-gate * our pil shouldn't run at or above the XCALL_PIL. 6080Sstevel@tonic-gate */ 6090Sstevel@tonic-gate ASSERT(getpil() < XCALL_PIL); 6100Sstevel@tonic-gate 6110Sstevel@tonic-gate /* 6120Sstevel@tonic-gate * grab xc_sys_mutex before writing to the xc_mbox 6130Sstevel@tonic-gate */ 6140Sstevel@tonic-gate mutex_enter(&xc_sys_mutex); 6150Sstevel@tonic-gate xc_spl_enter[lcx] = 1; 6160Sstevel@tonic-gate 6170Sstevel@tonic-gate XC_TRACE(XC_SOME_OTHER, &xc_cpuset, func, arg1, arg2); 6180Sstevel@tonic-gate init_mondo(setsoftint_tl1, xc_serv_inum, 0); 6190Sstevel@tonic-gate SEND_MBOX_MONDO(xc_cpuset, func, arg1, arg2, XC_IDLE); 6200Sstevel@tonic-gate WAIT_MBOX_DONE(xc_cpuset, lcx, XC_IDLE, 1); 6210Sstevel@tonic-gate 6220Sstevel@tonic-gate xc_spl_enter[lcx] = 0; 6230Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_SOME_OTHER]); 6240Sstevel@tonic-gate mutex_exit(&xc_sys_mutex); 6250Sstevel@tonic-gate kpreempt_enable(); 6260Sstevel@tonic-gate } 6270Sstevel@tonic-gate 6280Sstevel@tonic-gate /* 6290Sstevel@tonic-gate * xc_all - send a "x-call" to all cpus 6300Sstevel@tonic-gate */ 6310Sstevel@tonic-gate void 6320Sstevel@tonic-gate xc_all(xcfunc_t *func, uint64_t arg1, uint64_t arg2) 6330Sstevel@tonic-gate { 6340Sstevel@tonic-gate int lcx; 6350Sstevel@tonic-gate int opl; 6360Sstevel@tonic-gate cpuset_t xc_cpuset, tset; 6370Sstevel@tonic-gate 6380Sstevel@tonic-gate ASSERT((uintptr_t)func > KERNELBASE); 6390Sstevel@tonic-gate ASSERT(((uintptr_t)func % PC_ALIGN) == 0); 6400Sstevel@tonic-gate 6410Sstevel@tonic-gate CPUSET_ZERO(tset); 6420Sstevel@tonic-gate 6430Sstevel@tonic-gate kpreempt_disable(); 6440Sstevel@tonic-gate XC_SPL_ENTER(lcx, opl); /* lcx set by the macro */ 6450Sstevel@tonic-gate 6460Sstevel@tonic-gate CPUSET_ADD(tset, lcx); 6470Sstevel@tonic-gate 6480Sstevel@tonic-gate /* 6490Sstevel@tonic-gate * same cpu just do it 6500Sstevel@tonic-gate */ 6510Sstevel@tonic-gate (*func)(arg1, arg2); 6520Sstevel@tonic-gate xc_cpuset = cpu_ready_set; 6530Sstevel@tonic-gate CPUSET_DEL(xc_cpuset, lcx); 6540Sstevel@tonic-gate 6550Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 6560Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ALL_SELF]); 6570Sstevel@tonic-gate XC_TRACE(XC_ALL_SELF, &tset, func, arg1, arg2); 6580Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 6590Sstevel@tonic-gate kpreempt_enable(); 6600Sstevel@tonic-gate return; 6610Sstevel@tonic-gate } 6620Sstevel@tonic-gate 6630Sstevel@tonic-gate if (xc_holder == lcx) { /* got the xc_sys_mutex already */ 6640Sstevel@tonic-gate cpuset_t mset = xc_mbox[lcx].xc_cpuset; 6650Sstevel@tonic-gate 6660Sstevel@tonic-gate CPUSET_AND(mset, xc_cpuset); 6670Sstevel@tonic-gate ASSERT(MUTEX_HELD(&xc_sys_mutex)); 6680Sstevel@tonic-gate ASSERT(CPUSET_ISEQUAL(mset, xc_cpuset)); 6690Sstevel@tonic-gate XC_TRACE(XC_ALL_OTHER_H, &xc_cpuset, func, arg1, arg2); 6700Sstevel@tonic-gate SEND_MBOX_ONLY(xc_cpuset, func, arg1, arg2, lcx, XC_WAIT); 6710Sstevel@tonic-gate WAIT_MBOX_DONE(xc_cpuset, lcx, XC_WAIT, 0); 6720Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ALL_OTHER_H]); 6730Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 6740Sstevel@tonic-gate kpreempt_enable(); 6750Sstevel@tonic-gate return; 6760Sstevel@tonic-gate } 6770Sstevel@tonic-gate 6780Sstevel@tonic-gate /* 6790Sstevel@tonic-gate * Avoid dead lock if someone has sent us a xc_loop request while 6800Sstevel@tonic-gate * we are trying to grab xc_sys_mutex. 6810Sstevel@tonic-gate */ 6820Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 6830Sstevel@tonic-gate 6840Sstevel@tonic-gate /* 6850Sstevel@tonic-gate * At this point, since we don't own xc_sys_mutex, 6860Sstevel@tonic-gate * our pil shouldn't run at or above the XCALL_PIL. 6870Sstevel@tonic-gate */ 6880Sstevel@tonic-gate ASSERT(getpil() < XCALL_PIL); 6890Sstevel@tonic-gate 6900Sstevel@tonic-gate /* 6910Sstevel@tonic-gate * grab xc_sys_mutex before writing to the xc_mbox 6920Sstevel@tonic-gate */ 6930Sstevel@tonic-gate mutex_enter(&xc_sys_mutex); 6940Sstevel@tonic-gate xc_spl_enter[lcx] = 1; 6950Sstevel@tonic-gate 6960Sstevel@tonic-gate XC_TRACE(XC_ALL_OTHER, &xc_cpuset, func, arg1, arg2); 6970Sstevel@tonic-gate init_mondo(setsoftint_tl1, xc_serv_inum, 0); 6980Sstevel@tonic-gate SEND_MBOX_MONDO(xc_cpuset, func, arg1, arg2, XC_IDLE); 6990Sstevel@tonic-gate WAIT_MBOX_DONE(xc_cpuset, lcx, XC_IDLE, 1); 7000Sstevel@tonic-gate 7010Sstevel@tonic-gate xc_spl_enter[lcx] = 0; 7020Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ALL_OTHER]); 7030Sstevel@tonic-gate mutex_exit(&xc_sys_mutex); 7040Sstevel@tonic-gate kpreempt_enable(); 7050Sstevel@tonic-gate } 7060Sstevel@tonic-gate 7070Sstevel@tonic-gate /* 7080Sstevel@tonic-gate * xc_attention - paired with xc_dismissed() 7090Sstevel@tonic-gate * 7100Sstevel@tonic-gate * xt_attention() holds the xc_sys_mutex and xc_dismissed() releases it 7110Sstevel@tonic-gate * called when an initiator wants to capture some/all cpus for a critical 7120Sstevel@tonic-gate * session. 7130Sstevel@tonic-gate */ 7140Sstevel@tonic-gate void 7150Sstevel@tonic-gate xc_attention(cpuset_t cpuset) 7160Sstevel@tonic-gate { 7170Sstevel@tonic-gate int pix, lcx; 7180Sstevel@tonic-gate cpuset_t xc_cpuset, tmpset; 7190Sstevel@tonic-gate cpuset_t recv_cpuset; 7200Sstevel@tonic-gate uint64_t loop_cnt = 0; 7210Sstevel@tonic-gate int first_time = 1; 7220Sstevel@tonic-gate 7230Sstevel@tonic-gate CPUSET_ZERO(recv_cpuset); 7240Sstevel@tonic-gate 7250Sstevel@tonic-gate /* 7260Sstevel@tonic-gate * don't migrate the cpu until xc_dismissed() is finished 7270Sstevel@tonic-gate */ 7280Sstevel@tonic-gate ASSERT(getpil() < XCALL_PIL); 7290Sstevel@tonic-gate mutex_enter(&xc_sys_mutex); 7300Sstevel@tonic-gate lcx = (int)(CPU->cpu_id); 7310Sstevel@tonic-gate ASSERT(x_dstat[lcx][XC_ATTENTION] == 7320Sstevel@tonic-gate x_dstat[lcx][XC_DISMISSED]); 7330Sstevel@tonic-gate ASSERT(xc_holder == -1); 7340Sstevel@tonic-gate xc_mbox[lcx].xc_cpuset = cpuset; 7350Sstevel@tonic-gate xc_holder = lcx; /* no membar; only current cpu needs the right lcx */ 7360Sstevel@tonic-gate 7370Sstevel@tonic-gate /* 7380Sstevel@tonic-gate * only send to the CPU_READY ones 7390Sstevel@tonic-gate */ 7400Sstevel@tonic-gate xc_cpuset = cpu_ready_set; 7410Sstevel@tonic-gate CPUSET_AND(xc_cpuset, cpuset); 7420Sstevel@tonic-gate 7430Sstevel@tonic-gate /* 7440Sstevel@tonic-gate * don't send mondo to self 7450Sstevel@tonic-gate */ 7460Sstevel@tonic-gate CPUSET_DEL(xc_cpuset, lcx); 7470Sstevel@tonic-gate 7480Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ATTENTION]); 7490Sstevel@tonic-gate XC_TRACE(XC_ATTENTION, &xc_cpuset, NULL, NULL, NULL); 7500Sstevel@tonic-gate 7510Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) 7520Sstevel@tonic-gate return; 7530Sstevel@tonic-gate 7540Sstevel@tonic-gate xc_spl_enter[lcx] = 1; 7550Sstevel@tonic-gate /* 7560Sstevel@tonic-gate * inform the target processors to enter into xc_loop() 7570Sstevel@tonic-gate */ 7580Sstevel@tonic-gate init_mondo(setsoftint_tl1, xc_loop_inum, 0); 759*1310Sha137994 SEND_MBOX_MONDO_XC_ENTER(xc_cpuset); 7600Sstevel@tonic-gate xc_spl_enter[lcx] = 0; 7610Sstevel@tonic-gate 7620Sstevel@tonic-gate /* 7630Sstevel@tonic-gate * make sure target processors have entered into xc_loop() 7640Sstevel@tonic-gate */ 7650Sstevel@tonic-gate while (!CPUSET_ISEQUAL(recv_cpuset, xc_cpuset)) { 7660Sstevel@tonic-gate tmpset = xc_cpuset; 7670Sstevel@tonic-gate for (pix = 0; pix < NCPU; pix++) { 7680Sstevel@tonic-gate if (CPU_IN_SET(tmpset, pix)) { 7690Sstevel@tonic-gate /* 7700Sstevel@tonic-gate * membar_stld() is done in xc_loop 7710Sstevel@tonic-gate */ 7720Sstevel@tonic-gate if (xc_mbox[pix].xc_state == XC_WAIT) { 7730Sstevel@tonic-gate CPUSET_ADD(recv_cpuset, pix); 7740Sstevel@tonic-gate } 7750Sstevel@tonic-gate CPUSET_DEL(tmpset, pix); 7760Sstevel@tonic-gate if (CPUSET_ISNULL(tmpset)) { 7770Sstevel@tonic-gate break; 7780Sstevel@tonic-gate } 7790Sstevel@tonic-gate } 7800Sstevel@tonic-gate } 7810Sstevel@tonic-gate if (loop_cnt++ > xc_mondo_time_limit) { 7820Sstevel@tonic-gate if (sendmondo_in_recover) { 7830Sstevel@tonic-gate drv_usecwait(1); 7840Sstevel@tonic-gate loop_cnt = 0; 7850Sstevel@tonic-gate continue; 7860Sstevel@tonic-gate } 7870Sstevel@tonic-gate if (first_time) { 7880Sstevel@tonic-gate XT_SYNC_SOME(xc_cpuset); 7890Sstevel@tonic-gate first_time = 0; 7900Sstevel@tonic-gate loop_cnt = 0; 7910Sstevel@tonic-gate continue; 7920Sstevel@tonic-gate } 7930Sstevel@tonic-gate cmn_err(CE_PANIC, "xc_attention() timeout"); 7940Sstevel@tonic-gate } 7950Sstevel@tonic-gate } 7960Sstevel@tonic-gate 7970Sstevel@tonic-gate /* 7980Sstevel@tonic-gate * xc_sys_mutex remains held until xc_dismissed() is finished 7990Sstevel@tonic-gate */ 8000Sstevel@tonic-gate } 8010Sstevel@tonic-gate 8020Sstevel@tonic-gate /* 8030Sstevel@tonic-gate * xc_dismissed - paired with xc_attention() 8040Sstevel@tonic-gate * 8050Sstevel@tonic-gate * Called after the critical session is finished. 8060Sstevel@tonic-gate */ 8070Sstevel@tonic-gate void 8080Sstevel@tonic-gate xc_dismissed(cpuset_t cpuset) 8090Sstevel@tonic-gate { 8100Sstevel@tonic-gate int pix; 8110Sstevel@tonic-gate int lcx = (int)(CPU->cpu_id); 8120Sstevel@tonic-gate cpuset_t xc_cpuset, tmpset; 8130Sstevel@tonic-gate cpuset_t recv_cpuset; 8140Sstevel@tonic-gate uint64_t loop_cnt = 0; 8150Sstevel@tonic-gate 8160Sstevel@tonic-gate ASSERT(lcx == xc_holder); 8170Sstevel@tonic-gate ASSERT(CPUSET_ISEQUAL(xc_mbox[lcx].xc_cpuset, cpuset)); 8180Sstevel@tonic-gate ASSERT(getpil() >= XCALL_PIL); 8190Sstevel@tonic-gate CPUSET_ZERO(xc_mbox[lcx].xc_cpuset); 8200Sstevel@tonic-gate CPUSET_ZERO(recv_cpuset); 8210Sstevel@tonic-gate membar_stld(); 8220Sstevel@tonic-gate 8230Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_DISMISSED]); 8240Sstevel@tonic-gate ASSERT(x_dstat[lcx][XC_DISMISSED] == x_dstat[lcx][XC_ATTENTION]); 8250Sstevel@tonic-gate 8260Sstevel@tonic-gate /* 8270Sstevel@tonic-gate * only send to the CPU_READY ones 8280Sstevel@tonic-gate */ 8290Sstevel@tonic-gate xc_cpuset = cpu_ready_set; 8300Sstevel@tonic-gate CPUSET_AND(xc_cpuset, cpuset); 8310Sstevel@tonic-gate 8320Sstevel@tonic-gate /* 8330Sstevel@tonic-gate * exclude itself 8340Sstevel@tonic-gate */ 8350Sstevel@tonic-gate CPUSET_DEL(xc_cpuset, lcx); 8360Sstevel@tonic-gate XC_TRACE(XC_DISMISSED, &xc_cpuset, NULL, NULL, NULL); 8370Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 8380Sstevel@tonic-gate xc_holder = -1; 8390Sstevel@tonic-gate mutex_exit(&xc_sys_mutex); 8400Sstevel@tonic-gate return; 8410Sstevel@tonic-gate } 8420Sstevel@tonic-gate 8430Sstevel@tonic-gate /* 8440Sstevel@tonic-gate * inform other processors to get out of xc_loop() 8450Sstevel@tonic-gate */ 8460Sstevel@tonic-gate tmpset = xc_cpuset; 8470Sstevel@tonic-gate for (pix = 0; pix < NCPU; pix++) { 8480Sstevel@tonic-gate if (CPU_IN_SET(tmpset, pix)) { 8490Sstevel@tonic-gate xc_mbox[pix].xc_state = XC_EXIT; 8500Sstevel@tonic-gate membar_stld(); 8510Sstevel@tonic-gate CPUSET_DEL(tmpset, pix); 8520Sstevel@tonic-gate if (CPUSET_ISNULL(tmpset)) { 8530Sstevel@tonic-gate break; 8540Sstevel@tonic-gate } 8550Sstevel@tonic-gate } 8560Sstevel@tonic-gate } 8570Sstevel@tonic-gate 8580Sstevel@tonic-gate /* 8590Sstevel@tonic-gate * make sure target processors have exited from xc_loop() 8600Sstevel@tonic-gate */ 8610Sstevel@tonic-gate while (!CPUSET_ISEQUAL(recv_cpuset, xc_cpuset)) { 8620Sstevel@tonic-gate tmpset = xc_cpuset; 8630Sstevel@tonic-gate for (pix = 0; pix < NCPU; pix++) { 8640Sstevel@tonic-gate if (CPU_IN_SET(tmpset, pix)) { 8650Sstevel@tonic-gate /* 8660Sstevel@tonic-gate * membar_stld() is done in xc_loop 8670Sstevel@tonic-gate */ 8680Sstevel@tonic-gate if (xc_mbox[pix].xc_state == XC_IDLE) { 8690Sstevel@tonic-gate CPUSET_ADD(recv_cpuset, pix); 8700Sstevel@tonic-gate } 8710Sstevel@tonic-gate CPUSET_DEL(tmpset, pix); 8720Sstevel@tonic-gate if (CPUSET_ISNULL(tmpset)) { 8730Sstevel@tonic-gate break; 8740Sstevel@tonic-gate } 8750Sstevel@tonic-gate } 8760Sstevel@tonic-gate } 8770Sstevel@tonic-gate if (loop_cnt++ > xc_func_time_limit) { 8780Sstevel@tonic-gate if (sendmondo_in_recover) { 8790Sstevel@tonic-gate drv_usecwait(1); 8800Sstevel@tonic-gate loop_cnt = 0; 8810Sstevel@tonic-gate continue; 8820Sstevel@tonic-gate } 8830Sstevel@tonic-gate cmn_err(CE_PANIC, "xc_dismissed() timeout"); 8840Sstevel@tonic-gate } 8850Sstevel@tonic-gate } 8860Sstevel@tonic-gate xc_holder = -1; 8870Sstevel@tonic-gate mutex_exit(&xc_sys_mutex); 8880Sstevel@tonic-gate } 8890Sstevel@tonic-gate 8900Sstevel@tonic-gate /* 8910Sstevel@tonic-gate * xc_serv - "x-call" handler at TL=0; serves only one x-call request 8920Sstevel@tonic-gate * runs at XCALL_PIL level. 8930Sstevel@tonic-gate */ 8940Sstevel@tonic-gate uint_t 8950Sstevel@tonic-gate xc_serv(void) 8960Sstevel@tonic-gate { 8970Sstevel@tonic-gate int lcx = (int)(CPU->cpu_id); 8980Sstevel@tonic-gate struct xc_mbox *xmp; 8990Sstevel@tonic-gate xcfunc_t *func; 9000Sstevel@tonic-gate uint64_t arg1, arg2; 9010Sstevel@tonic-gate cpuset_t tset; 9020Sstevel@tonic-gate 9030Sstevel@tonic-gate ASSERT(getpil() == XCALL_PIL); 9040Sstevel@tonic-gate CPUSET_ZERO(tset); 9050Sstevel@tonic-gate CPUSET_ADD(tset, lcx); 9060Sstevel@tonic-gate flush_windows(); 9070Sstevel@tonic-gate xmp = &xc_mbox[lcx]; 9080Sstevel@tonic-gate ASSERT(lcx != xc_holder); 9090Sstevel@tonic-gate ASSERT(xmp->xc_state == XC_DOIT); 9100Sstevel@tonic-gate func = xmp->xc_func; 9110Sstevel@tonic-gate XC_TRACE(XC_SERV, &tset, func, xmp->xc_arg1, xmp->xc_arg2); 9120Sstevel@tonic-gate if (func != NULL) { 9130Sstevel@tonic-gate arg1 = xmp->xc_arg1; 9140Sstevel@tonic-gate arg2 = xmp->xc_arg2; 9150Sstevel@tonic-gate (*func)(arg1, arg2); 9160Sstevel@tonic-gate } 9170Sstevel@tonic-gate XC_STAT_INC(x_rstat[lcx][XC_SERV]); 9180Sstevel@tonic-gate XC_TRACE(XC_SERV, &tset, func, arg1, arg2); 9190Sstevel@tonic-gate xmp->xc_state = XC_IDLE; 9200Sstevel@tonic-gate membar_stld(); 9210Sstevel@tonic-gate return (1); 9220Sstevel@tonic-gate } 9230Sstevel@tonic-gate 9240Sstevel@tonic-gate /* 9250Sstevel@tonic-gate * if == 1, an xc_loop timeout will cause a panic 9260Sstevel@tonic-gate * otherwise print a warning 9270Sstevel@tonic-gate */ 9280Sstevel@tonic-gate uint_t xc_loop_panic = 0; 9290Sstevel@tonic-gate 9300Sstevel@tonic-gate /* 9310Sstevel@tonic-gate * xc_loop - "x-call" handler at TL=0; capture the cpu for a critial 9320Sstevel@tonic-gate * session, or serve multiple x-call requests runs at XCALL_PIL level. 9330Sstevel@tonic-gate */ 9340Sstevel@tonic-gate uint_t 9350Sstevel@tonic-gate xc_loop(void) 9360Sstevel@tonic-gate { 9370Sstevel@tonic-gate int lcx = (int)(CPU->cpu_id); 9380Sstevel@tonic-gate struct xc_mbox *xmp; 9390Sstevel@tonic-gate xcfunc_t *func; 9400Sstevel@tonic-gate uint64_t arg1, arg2; 9410Sstevel@tonic-gate uint64_t loop_cnt = 0; 9420Sstevel@tonic-gate cpuset_t tset; 9430Sstevel@tonic-gate 9440Sstevel@tonic-gate ASSERT(getpil() == XCALL_PIL); 9450Sstevel@tonic-gate 9460Sstevel@tonic-gate CPUSET_ZERO(tset); 9470Sstevel@tonic-gate flush_windows(); 9480Sstevel@tonic-gate 9490Sstevel@tonic-gate /* 9500Sstevel@tonic-gate * Some one must have owned the xc_sys_mutex; 9510Sstevel@tonic-gate * no further interrupt (at XCALL_PIL or below) can 9520Sstevel@tonic-gate * be taken by this processor until xc_loop exits. 9530Sstevel@tonic-gate * 9540Sstevel@tonic-gate * The owner of xc_sys_mutex (or xc_holder) can expect 9550Sstevel@tonic-gate * its xc/xt requests are handled as follows: 9560Sstevel@tonic-gate * xc requests use xc_mbox's handshaking for their services 9570Sstevel@tonic-gate * xt requests at TL>0 will be handled immediately 9580Sstevel@tonic-gate * xt requests at TL=0: 9590Sstevel@tonic-gate * if their handlers'pils are <= XCALL_PIL, then 9600Sstevel@tonic-gate * they will be handled after xc_loop exits 9610Sstevel@tonic-gate * (so, they probably should not be used) 9620Sstevel@tonic-gate * else they will be handled immediately 9630Sstevel@tonic-gate * 9640Sstevel@tonic-gate * For those who are not informed to enter xc_loop, if they 9650Sstevel@tonic-gate * send xc/xt requests to this processor at this moment, 9660Sstevel@tonic-gate * the requests will be handled as follows: 9670Sstevel@tonic-gate * xc requests will be handled after they grab xc_sys_mutex 9680Sstevel@tonic-gate * xt requests at TL>0 will be handled immediately 9690Sstevel@tonic-gate * xt requests at TL=0: 9700Sstevel@tonic-gate * if their handlers'pils are <= XCALL_PIL, then 9710Sstevel@tonic-gate * they will be handled after xc_loop exits 9720Sstevel@tonic-gate * else they will be handled immediately 9730Sstevel@tonic-gate */ 9740Sstevel@tonic-gate xmp = &xc_mbox[lcx]; 9750Sstevel@tonic-gate ASSERT(lcx != xc_holder); 9760Sstevel@tonic-gate ASSERT(xmp->xc_state == XC_ENTER); 9770Sstevel@tonic-gate xmp->xc_state = XC_WAIT; 9780Sstevel@tonic-gate CPUSET_ADD(tset, lcx); 9790Sstevel@tonic-gate membar_stld(); 9800Sstevel@tonic-gate XC_STAT_INC(x_rstat[lcx][XC_LOOP]); 9810Sstevel@tonic-gate XC_TRACE(XC_LOOP_ENTER, &tset, NULL, NULL, NULL); 9820Sstevel@tonic-gate while (xmp->xc_state != XC_EXIT) { 9830Sstevel@tonic-gate if (xmp->xc_state == XC_DOIT) { 9840Sstevel@tonic-gate func = xmp->xc_func; 9850Sstevel@tonic-gate arg1 = xmp->xc_arg1; 9860Sstevel@tonic-gate arg2 = xmp->xc_arg2; 9870Sstevel@tonic-gate XC_TRACE(XC_LOOP_DOIT, &tset, func, arg1, arg2); 9880Sstevel@tonic-gate if (func != NULL) 9890Sstevel@tonic-gate (*func)(arg1, arg2); 9900Sstevel@tonic-gate xmp->xc_state = XC_WAIT; 9910Sstevel@tonic-gate membar_stld(); 9920Sstevel@tonic-gate /* 9930Sstevel@tonic-gate * reset the timeout counter 9940Sstevel@tonic-gate * since some work was done 9950Sstevel@tonic-gate */ 9960Sstevel@tonic-gate loop_cnt = 0; 9970Sstevel@tonic-gate } else { 9980Sstevel@tonic-gate /* patience is a virtue... */ 9990Sstevel@tonic-gate loop_cnt++; 10000Sstevel@tonic-gate } 10010Sstevel@tonic-gate 10020Sstevel@tonic-gate if (loop_cnt > xc_func_time_limit) { 10030Sstevel@tonic-gate if (sendmondo_in_recover) { 10040Sstevel@tonic-gate drv_usecwait(1); 10050Sstevel@tonic-gate loop_cnt = 0; 10060Sstevel@tonic-gate continue; 10070Sstevel@tonic-gate } 10080Sstevel@tonic-gate cmn_err(xc_loop_panic ? CE_PANIC : CE_WARN, 10090Sstevel@tonic-gate "xc_loop() timeout"); 10100Sstevel@tonic-gate /* 10110Sstevel@tonic-gate * if the above displayed a warning, 10120Sstevel@tonic-gate * reset the timeout counter and be patient 10130Sstevel@tonic-gate */ 10140Sstevel@tonic-gate loop_cnt = 0; 10150Sstevel@tonic-gate } 10160Sstevel@tonic-gate } 10170Sstevel@tonic-gate ASSERT(xmp->xc_state == XC_EXIT); 10180Sstevel@tonic-gate ASSERT(xc_holder != -1); 10190Sstevel@tonic-gate XC_TRACE(XC_LOOP_EXIT, &tset, NULL, NULL, NULL); 10200Sstevel@tonic-gate xmp->xc_state = XC_IDLE; 10210Sstevel@tonic-gate membar_stld(); 10220Sstevel@tonic-gate return (1); 10230Sstevel@tonic-gate } 1024