1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate #include <sys/systm.h> 30*0Sstevel@tonic-gate #include <sys/archsystm.h> 31*0Sstevel@tonic-gate #include <sys/machsystm.h> 32*0Sstevel@tonic-gate #include <sys/cpuvar.h> 33*0Sstevel@tonic-gate #include <sys/intreg.h> 34*0Sstevel@tonic-gate #include <sys/x_call.h> 35*0Sstevel@tonic-gate #include <sys/cmn_err.h> 36*0Sstevel@tonic-gate #include <sys/membar.h> 37*0Sstevel@tonic-gate #include <sys/disp.h> 38*0Sstevel@tonic-gate #include <sys/debug.h> 39*0Sstevel@tonic-gate #include <sys/privregs.h> 40*0Sstevel@tonic-gate #include <sys/xc_impl.h> 41*0Sstevel@tonic-gate #include <sys/ivintr.h> 42*0Sstevel@tonic-gate #include <sys/dmv.h> 43*0Sstevel@tonic-gate 44*0Sstevel@tonic-gate #ifdef DEBUG 45*0Sstevel@tonic-gate uint_t x_dstat[NCPU][XC_LOOP_EXIT+1]; 46*0Sstevel@tonic-gate uint_t x_rstat[NCPU][4]; 47*0Sstevel@tonic-gate #endif /* DEBUG */ 48*0Sstevel@tonic-gate 49*0Sstevel@tonic-gate static int xc_serv_inum; /* software interrupt number for xc_serv() */ 50*0Sstevel@tonic-gate static int xc_loop_inum; /* software interrupt number for xc_loop() */ 51*0Sstevel@tonic-gate kmutex_t xc_sys_mutex; /* protect xcall session and xc_mbox */ 52*0Sstevel@tonic-gate int xc_spl_enter[NCPU]; /* protect sending x-call */ 53*0Sstevel@tonic-gate static int xc_holder = -1; /* the cpu who initiates xc_attention, 0 is valid */ 54*0Sstevel@tonic-gate 55*0Sstevel@tonic-gate /* 56*0Sstevel@tonic-gate * Mail box for handshaking and xcall request; protected by xc_sys_mutex 57*0Sstevel@tonic-gate */ 58*0Sstevel@tonic-gate static struct xc_mbox { 59*0Sstevel@tonic-gate xcfunc_t *xc_func; 60*0Sstevel@tonic-gate uint64_t xc_arg1; 61*0Sstevel@tonic-gate uint64_t xc_arg2; 62*0Sstevel@tonic-gate cpuset_t xc_cpuset; 63*0Sstevel@tonic-gate volatile uint_t xc_state; 64*0Sstevel@tonic-gate } xc_mbox[NCPU]; 65*0Sstevel@tonic-gate 66*0Sstevel@tonic-gate uint64_t xc_tick_limit; /* send_mondo() tick limit value */ 67*0Sstevel@tonic-gate uint64_t xc_tick_limit_scale = 1; /* scale used to increase the limit */ 68*0Sstevel@tonic-gate uint64_t xc_tick_jump_limit; /* send_mondo() irregular tick jump limit */ 69*0Sstevel@tonic-gate 70*0Sstevel@tonic-gate /* timeout value for xcalls to be received by the target CPU */ 71*0Sstevel@tonic-gate uint64_t xc_mondo_time_limit; 72*0Sstevel@tonic-gate 73*0Sstevel@tonic-gate /* timeout value for xcall functions to be executed on the target CPU */ 74*0Sstevel@tonic-gate uint64_t xc_func_time_limit; 75*0Sstevel@tonic-gate 76*0Sstevel@tonic-gate uint64_t xc_scale = 1; /* scale used to calculate timeout limits */ 77*0Sstevel@tonic-gate 78*0Sstevel@tonic-gate uint_t sendmondo_in_recover; 79*0Sstevel@tonic-gate 80*0Sstevel@tonic-gate /* 81*0Sstevel@tonic-gate * sending x-calls 82*0Sstevel@tonic-gate */ 83*0Sstevel@tonic-gate void init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2); 84*0Sstevel@tonic-gate void send_one_mondo(int cpuid); 85*0Sstevel@tonic-gate void send_mondo_set(cpuset_t set); 86*0Sstevel@tonic-gate 87*0Sstevel@tonic-gate /* 88*0Sstevel@tonic-gate * xc_init - initialize x-call related locks 89*0Sstevel@tonic-gate */ 90*0Sstevel@tonic-gate void 91*0Sstevel@tonic-gate xc_init(void) 92*0Sstevel@tonic-gate { 93*0Sstevel@tonic-gate #ifdef DEBUG 94*0Sstevel@tonic-gate int pix; 95*0Sstevel@tonic-gate #endif /* DEBUG */ 96*0Sstevel@tonic-gate 97*0Sstevel@tonic-gate mutex_init(&xc_sys_mutex, NULL, MUTEX_SPIN, 98*0Sstevel@tonic-gate (void *)ipltospl(XCALL_PIL)); 99*0Sstevel@tonic-gate 100*0Sstevel@tonic-gate #ifdef DEBUG 101*0Sstevel@tonic-gate /* Initialize for all possible CPUs. */ 102*0Sstevel@tonic-gate for (pix = 0; pix < NCPU; pix++) { 103*0Sstevel@tonic-gate XC_STAT_INIT(pix); 104*0Sstevel@tonic-gate } 105*0Sstevel@tonic-gate #endif /* DEBUG */ 106*0Sstevel@tonic-gate 107*0Sstevel@tonic-gate xc_serv_inum = add_softintr(XCALL_PIL, (softintrfunc)xc_serv, 0); 108*0Sstevel@tonic-gate xc_loop_inum = add_softintr(XCALL_PIL, (softintrfunc)xc_loop, 0); 109*0Sstevel@tonic-gate 110*0Sstevel@tonic-gate /* 111*0Sstevel@tonic-gate * Initialize the calibrated tick limit for send_mondo. 112*0Sstevel@tonic-gate * The value represents the maximum tick count to wait. 113*0Sstevel@tonic-gate */ 114*0Sstevel@tonic-gate xc_tick_limit = 115*0Sstevel@tonic-gate ((uint64_t)sys_tick_freq * XC_SEND_MONDO_MSEC) / 1000; 116*0Sstevel@tonic-gate xc_tick_jump_limit = xc_tick_limit / 32; 117*0Sstevel@tonic-gate xc_tick_limit *= xc_tick_limit_scale; 118*0Sstevel@tonic-gate 119*0Sstevel@tonic-gate /* 120*0Sstevel@tonic-gate * Maximum number of loops to wait before timing out in xc_attention. 121*0Sstevel@tonic-gate */ 122*0Sstevel@tonic-gate xc_mondo_time_limit = cpunodes[CPU->cpu_id].clock_freq * xc_scale; 123*0Sstevel@tonic-gate 124*0Sstevel@tonic-gate /* 125*0Sstevel@tonic-gate * Maximum number of loops to wait for a xcall function to be 126*0Sstevel@tonic-gate * executed on the target CPU. Default to 10 times the value 127*0Sstevel@tonic-gate * of xc_mondo_time_limit. 128*0Sstevel@tonic-gate */ 129*0Sstevel@tonic-gate xc_func_time_limit = xc_mondo_time_limit * 10; 130*0Sstevel@tonic-gate } 131*0Sstevel@tonic-gate 132*0Sstevel@tonic-gate /* 133*0Sstevel@tonic-gate * The following routines basically provide callers with two kinds of 134*0Sstevel@tonic-gate * inter-processor interrupt services: 135*0Sstevel@tonic-gate * 1. cross calls (x-calls) - requests are handled at target cpu's TL=0 136*0Sstevel@tonic-gate * 2. cross traps (c-traps) - requests are handled at target cpu's TL>0 137*0Sstevel@tonic-gate * 138*0Sstevel@tonic-gate * Although these routines protect the services from migrating to other cpus 139*0Sstevel@tonic-gate * "after" they are called, it is the caller's choice or responsibility to 140*0Sstevel@tonic-gate * prevent the cpu migration "before" calling them. 141*0Sstevel@tonic-gate * 142*0Sstevel@tonic-gate * X-call routines: 143*0Sstevel@tonic-gate * 144*0Sstevel@tonic-gate * xc_one() - send a request to one processor 145*0Sstevel@tonic-gate * xc_some() - send a request to some processors 146*0Sstevel@tonic-gate * xc_all() - send a request to all processors 147*0Sstevel@tonic-gate * 148*0Sstevel@tonic-gate * Their common parameters: 149*0Sstevel@tonic-gate * func - a TL=0 handler address 150*0Sstevel@tonic-gate * arg1 and arg2 - optional 151*0Sstevel@tonic-gate * 152*0Sstevel@tonic-gate * The services provided by x-call routines allow callers 153*0Sstevel@tonic-gate * to send a request to target cpus to execute a TL=0 154*0Sstevel@tonic-gate * handler. 155*0Sstevel@tonic-gate * The interface of the registers of the TL=0 handler: 156*0Sstevel@tonic-gate * %o0: arg1 157*0Sstevel@tonic-gate * %o1: arg2 158*0Sstevel@tonic-gate * 159*0Sstevel@tonic-gate * X-trap routines: 160*0Sstevel@tonic-gate * 161*0Sstevel@tonic-gate * xt_one() - send a request to one processor 162*0Sstevel@tonic-gate * xt_some() - send a request to some processors 163*0Sstevel@tonic-gate * xt_all() - send a request to all processors 164*0Sstevel@tonic-gate * 165*0Sstevel@tonic-gate * Their common parameters: 166*0Sstevel@tonic-gate * func - a TL>0 handler address or an interrupt number 167*0Sstevel@tonic-gate * arg1, arg2 168*0Sstevel@tonic-gate * optional when "func" is an address; 169*0Sstevel@tonic-gate * 0 when "func" is an interrupt number 170*0Sstevel@tonic-gate * 171*0Sstevel@tonic-gate * If the request of "func" is a kernel address, then 172*0Sstevel@tonic-gate * the target cpu will execute the request of "func" with 173*0Sstevel@tonic-gate * args at "TL>0" level. 174*0Sstevel@tonic-gate * The interface of the registers of the TL>0 handler: 175*0Sstevel@tonic-gate * %g1: arg1 176*0Sstevel@tonic-gate * %g2: arg2 177*0Sstevel@tonic-gate * 178*0Sstevel@tonic-gate * If the request of "func" is not a kernel address, then it has 179*0Sstevel@tonic-gate * to be an assigned interrupt number through add_softintr(). 180*0Sstevel@tonic-gate * An interrupt number is an index to the interrupt vector table, 181*0Sstevel@tonic-gate * which entry contains an interrupt handler address with its 182*0Sstevel@tonic-gate * corresponding interrupt level and argument. 183*0Sstevel@tonic-gate * The target cpu will arrange the request to be serviced according 184*0Sstevel@tonic-gate * to its pre-registered information. 185*0Sstevel@tonic-gate * args are assumed to be zeros in this case. 186*0Sstevel@tonic-gate * 187*0Sstevel@tonic-gate * In addition, callers are allowed to capture and release cpus by 188*0Sstevel@tonic-gate * calling the routines: xc_attention() and xc_dismissed(). 189*0Sstevel@tonic-gate */ 190*0Sstevel@tonic-gate 191*0Sstevel@tonic-gate /* 192*0Sstevel@tonic-gate * xt_one - send a "x-trap" to a cpu 193*0Sstevel@tonic-gate */ 194*0Sstevel@tonic-gate void 195*0Sstevel@tonic-gate xt_one(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2) 196*0Sstevel@tonic-gate { 197*0Sstevel@tonic-gate if (!CPU_IN_SET(cpu_ready_set, cix)) { 198*0Sstevel@tonic-gate return; 199*0Sstevel@tonic-gate } 200*0Sstevel@tonic-gate xt_one_unchecked(cix, func, arg1, arg2); 201*0Sstevel@tonic-gate } 202*0Sstevel@tonic-gate 203*0Sstevel@tonic-gate /* 204*0Sstevel@tonic-gate * xt_one_unchecked - send a "x-trap" to a cpu without checking for its 205*0Sstevel@tonic-gate * existance in cpu_ready_set 206*0Sstevel@tonic-gate */ 207*0Sstevel@tonic-gate void 208*0Sstevel@tonic-gate xt_one_unchecked(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2) 209*0Sstevel@tonic-gate { 210*0Sstevel@tonic-gate int lcx; 211*0Sstevel@tonic-gate int opl; 212*0Sstevel@tonic-gate cpuset_t tset; 213*0Sstevel@tonic-gate 214*0Sstevel@tonic-gate /* 215*0Sstevel@tonic-gate * Make sure the function address will not be interpreted as a 216*0Sstevel@tonic-gate * dmv interrupt 217*0Sstevel@tonic-gate */ 218*0Sstevel@tonic-gate ASSERT(!DMV_IS_DMV(func)); 219*0Sstevel@tonic-gate 220*0Sstevel@tonic-gate /* 221*0Sstevel@tonic-gate * It's illegal to send software inums through the cross-trap 222*0Sstevel@tonic-gate * interface. 223*0Sstevel@tonic-gate */ 224*0Sstevel@tonic-gate ASSERT((uintptr_t)func >= KERNELBASE); 225*0Sstevel@tonic-gate 226*0Sstevel@tonic-gate CPUSET_ZERO(tset); 227*0Sstevel@tonic-gate 228*0Sstevel@tonic-gate XC_SPL_ENTER(lcx, opl); /* lcx set by the macro */ 229*0Sstevel@tonic-gate 230*0Sstevel@tonic-gate CPUSET_ADD(tset, cix); 231*0Sstevel@tonic-gate 232*0Sstevel@tonic-gate if (cix == lcx) { 233*0Sstevel@tonic-gate /* 234*0Sstevel@tonic-gate * same cpu - use software fast trap 235*0Sstevel@tonic-gate */ 236*0Sstevel@tonic-gate send_self_xcall(CPU, arg1, arg2, func); 237*0Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XT_ONE_SELF]); 238*0Sstevel@tonic-gate XC_TRACE(XT_ONE_SELF, &tset, func, arg1, arg2); 239*0Sstevel@tonic-gate } else { /* other cpu - send a mondo to the target cpu */ 240*0Sstevel@tonic-gate /* 241*0Sstevel@tonic-gate * other cpu - send a mondo to the target cpu 242*0Sstevel@tonic-gate */ 243*0Sstevel@tonic-gate XC_TRACE(XT_ONE_OTHER, &tset, func, arg1, arg2); 244*0Sstevel@tonic-gate init_mondo(func, arg1, arg2); 245*0Sstevel@tonic-gate send_one_mondo(cix); 246*0Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XT_ONE_OTHER]); 247*0Sstevel@tonic-gate } 248*0Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 249*0Sstevel@tonic-gate } 250*0Sstevel@tonic-gate 251*0Sstevel@tonic-gate /* 252*0Sstevel@tonic-gate * xt_some - send a "x-trap" to some cpus 253*0Sstevel@tonic-gate */ 254*0Sstevel@tonic-gate void 255*0Sstevel@tonic-gate xt_some(cpuset_t cpuset, xcfunc_t *func, uint64_t arg1, uint64_t arg2) 256*0Sstevel@tonic-gate { 257*0Sstevel@tonic-gate int lcx; 258*0Sstevel@tonic-gate int opl; 259*0Sstevel@tonic-gate cpuset_t xc_cpuset, tset; 260*0Sstevel@tonic-gate 261*0Sstevel@tonic-gate /* 262*0Sstevel@tonic-gate * Make sure the function address will not be interpreted as a 263*0Sstevel@tonic-gate * dmv interrupt 264*0Sstevel@tonic-gate */ 265*0Sstevel@tonic-gate ASSERT(!DMV_IS_DMV(func)); 266*0Sstevel@tonic-gate 267*0Sstevel@tonic-gate /* 268*0Sstevel@tonic-gate * It's illegal to send software inums through the cross-trap 269*0Sstevel@tonic-gate * interface. 270*0Sstevel@tonic-gate */ 271*0Sstevel@tonic-gate ASSERT((uintptr_t)func >= KERNELBASE); 272*0Sstevel@tonic-gate 273*0Sstevel@tonic-gate CPUSET_ZERO(tset); 274*0Sstevel@tonic-gate 275*0Sstevel@tonic-gate XC_SPL_ENTER(lcx, opl); /* lcx set by the macro */ 276*0Sstevel@tonic-gate 277*0Sstevel@tonic-gate CPUSET_ADD(tset, lcx); 278*0Sstevel@tonic-gate 279*0Sstevel@tonic-gate /* 280*0Sstevel@tonic-gate * only send to the CPU_READY ones 281*0Sstevel@tonic-gate */ 282*0Sstevel@tonic-gate xc_cpuset = cpu_ready_set; 283*0Sstevel@tonic-gate CPUSET_AND(xc_cpuset, cpuset); 284*0Sstevel@tonic-gate 285*0Sstevel@tonic-gate /* 286*0Sstevel@tonic-gate * send to nobody; just return 287*0Sstevel@tonic-gate */ 288*0Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 289*0Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 290*0Sstevel@tonic-gate return; 291*0Sstevel@tonic-gate } 292*0Sstevel@tonic-gate 293*0Sstevel@tonic-gate /* 294*0Sstevel@tonic-gate * don't send mondo to self 295*0Sstevel@tonic-gate */ 296*0Sstevel@tonic-gate if (CPU_IN_SET(xc_cpuset, lcx)) { 297*0Sstevel@tonic-gate /* 298*0Sstevel@tonic-gate * same cpu - use software fast trap 299*0Sstevel@tonic-gate */ 300*0Sstevel@tonic-gate send_self_xcall(CPU, arg1, arg2, func); 301*0Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XT_SOME_SELF]); 302*0Sstevel@tonic-gate XC_TRACE(XT_SOME_SELF, &tset, func, arg1, arg2); 303*0Sstevel@tonic-gate CPUSET_DEL(xc_cpuset, lcx); 304*0Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 305*0Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 306*0Sstevel@tonic-gate return; 307*0Sstevel@tonic-gate } 308*0Sstevel@tonic-gate } 309*0Sstevel@tonic-gate XC_TRACE(XT_SOME_OTHER, &xc_cpuset, func, arg1, arg2); 310*0Sstevel@tonic-gate init_mondo(func, arg1, arg2); 311*0Sstevel@tonic-gate send_mondo_set(xc_cpuset); 312*0Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XT_SOME_OTHER]); 313*0Sstevel@tonic-gate 314*0Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 315*0Sstevel@tonic-gate } 316*0Sstevel@tonic-gate 317*0Sstevel@tonic-gate /* 318*0Sstevel@tonic-gate * xt_all - send a "x-trap" to all cpus 319*0Sstevel@tonic-gate */ 320*0Sstevel@tonic-gate void 321*0Sstevel@tonic-gate xt_all(xcfunc_t *func, uint64_t arg1, uint64_t arg2) 322*0Sstevel@tonic-gate { 323*0Sstevel@tonic-gate int lcx; 324*0Sstevel@tonic-gate int opl; 325*0Sstevel@tonic-gate cpuset_t xc_cpuset, tset; 326*0Sstevel@tonic-gate 327*0Sstevel@tonic-gate /* 328*0Sstevel@tonic-gate * Make sure the function address will not be interpreted as a 329*0Sstevel@tonic-gate * dmv interrupt 330*0Sstevel@tonic-gate */ 331*0Sstevel@tonic-gate ASSERT(!DMV_IS_DMV(func)); 332*0Sstevel@tonic-gate 333*0Sstevel@tonic-gate /* 334*0Sstevel@tonic-gate * It's illegal to send software inums through the cross-trap 335*0Sstevel@tonic-gate * interface. 336*0Sstevel@tonic-gate */ 337*0Sstevel@tonic-gate ASSERT((uintptr_t)func >= KERNELBASE); 338*0Sstevel@tonic-gate 339*0Sstevel@tonic-gate CPUSET_ZERO(tset); 340*0Sstevel@tonic-gate 341*0Sstevel@tonic-gate XC_SPL_ENTER(lcx, opl); /* lcx set by the macro */ 342*0Sstevel@tonic-gate 343*0Sstevel@tonic-gate CPUSET_ADD(tset, lcx); 344*0Sstevel@tonic-gate 345*0Sstevel@tonic-gate /* 346*0Sstevel@tonic-gate * same cpu - use software fast trap 347*0Sstevel@tonic-gate */ 348*0Sstevel@tonic-gate if (CPU_IN_SET(cpu_ready_set, lcx)) 349*0Sstevel@tonic-gate send_self_xcall(CPU, arg1, arg2, func); 350*0Sstevel@tonic-gate 351*0Sstevel@tonic-gate XC_TRACE(XT_ALL_OTHER, &cpu_ready_set, func, arg1, arg2); 352*0Sstevel@tonic-gate 353*0Sstevel@tonic-gate /* 354*0Sstevel@tonic-gate * don't send mondo to self 355*0Sstevel@tonic-gate */ 356*0Sstevel@tonic-gate xc_cpuset = cpu_ready_set; 357*0Sstevel@tonic-gate CPUSET_DEL(xc_cpuset, lcx); 358*0Sstevel@tonic-gate 359*0Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 360*0Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XT_ALL_SELF]); 361*0Sstevel@tonic-gate XC_TRACE(XT_ALL_SELF, &tset, func, arg1, arg2); 362*0Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 363*0Sstevel@tonic-gate return; 364*0Sstevel@tonic-gate } 365*0Sstevel@tonic-gate 366*0Sstevel@tonic-gate init_mondo(func, arg1, arg2); 367*0Sstevel@tonic-gate send_mondo_set(xc_cpuset); 368*0Sstevel@tonic-gate 369*0Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XT_ALL_OTHER]); 370*0Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 371*0Sstevel@tonic-gate } 372*0Sstevel@tonic-gate 373*0Sstevel@tonic-gate /* 374*0Sstevel@tonic-gate * xc_one - send a "x-call" to a cpu 375*0Sstevel@tonic-gate */ 376*0Sstevel@tonic-gate void 377*0Sstevel@tonic-gate xc_one(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2) 378*0Sstevel@tonic-gate { 379*0Sstevel@tonic-gate int lcx; 380*0Sstevel@tonic-gate int opl; 381*0Sstevel@tonic-gate uint64_t loop_cnt = 0; 382*0Sstevel@tonic-gate cpuset_t tset; 383*0Sstevel@tonic-gate int first_time = 1; 384*0Sstevel@tonic-gate 385*0Sstevel@tonic-gate /* 386*0Sstevel@tonic-gate * send to nobody; just return 387*0Sstevel@tonic-gate */ 388*0Sstevel@tonic-gate if (!CPU_IN_SET(cpu_ready_set, cix)) 389*0Sstevel@tonic-gate return; 390*0Sstevel@tonic-gate 391*0Sstevel@tonic-gate ASSERT((uintptr_t)func > KERNELBASE); 392*0Sstevel@tonic-gate ASSERT(((uintptr_t)func % PC_ALIGN) == 0); 393*0Sstevel@tonic-gate 394*0Sstevel@tonic-gate CPUSET_ZERO(tset); 395*0Sstevel@tonic-gate 396*0Sstevel@tonic-gate kpreempt_disable(); 397*0Sstevel@tonic-gate 398*0Sstevel@tonic-gate XC_SPL_ENTER(lcx, opl); /* lcx set by the macro */ 399*0Sstevel@tonic-gate 400*0Sstevel@tonic-gate CPUSET_ADD(tset, cix); 401*0Sstevel@tonic-gate 402*0Sstevel@tonic-gate if (cix == lcx) { /* same cpu just do it */ 403*0Sstevel@tonic-gate XC_TRACE(XC_ONE_SELF, &tset, func, arg1, arg2); 404*0Sstevel@tonic-gate (*func)(arg1, arg2); 405*0Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ONE_SELF]); 406*0Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 407*0Sstevel@tonic-gate kpreempt_enable(); 408*0Sstevel@tonic-gate return; 409*0Sstevel@tonic-gate } 410*0Sstevel@tonic-gate 411*0Sstevel@tonic-gate if (xc_holder == lcx) { /* got the xc_sys_mutex already */ 412*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&xc_sys_mutex)); 413*0Sstevel@tonic-gate ASSERT(CPU_IN_SET(xc_mbox[lcx].xc_cpuset, lcx)); 414*0Sstevel@tonic-gate ASSERT(CPU_IN_SET(xc_mbox[cix].xc_cpuset, cix)); 415*0Sstevel@tonic-gate ASSERT(xc_mbox[cix].xc_state == XC_WAIT); 416*0Sstevel@tonic-gate XC_TRACE(XC_ONE_OTHER_H, &tset, func, arg1, arg2); 417*0Sstevel@tonic-gate 418*0Sstevel@tonic-gate /* 419*0Sstevel@tonic-gate * target processor's xc_loop should be waiting 420*0Sstevel@tonic-gate * for the work to do; just set up the xc_mbox 421*0Sstevel@tonic-gate */ 422*0Sstevel@tonic-gate XC_SETUP(cix, func, arg1, arg2); 423*0Sstevel@tonic-gate membar_stld(); 424*0Sstevel@tonic-gate 425*0Sstevel@tonic-gate while (xc_mbox[cix].xc_state != XC_WAIT) { 426*0Sstevel@tonic-gate if (loop_cnt++ > xc_func_time_limit) { 427*0Sstevel@tonic-gate if (sendmondo_in_recover) { 428*0Sstevel@tonic-gate drv_usecwait(1); 429*0Sstevel@tonic-gate loop_cnt = 0; 430*0Sstevel@tonic-gate continue; 431*0Sstevel@tonic-gate } 432*0Sstevel@tonic-gate cmn_err(CE_PANIC, "xc_one() timeout, " 433*0Sstevel@tonic-gate "xc_state[%d] != XC_WAIT", cix); 434*0Sstevel@tonic-gate } 435*0Sstevel@tonic-gate } 436*0Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ONE_OTHER_H]); 437*0Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 438*0Sstevel@tonic-gate kpreempt_enable(); 439*0Sstevel@tonic-gate return; 440*0Sstevel@tonic-gate } 441*0Sstevel@tonic-gate 442*0Sstevel@tonic-gate /* 443*0Sstevel@tonic-gate * Avoid dead lock if someone has sent us a xc_loop request while 444*0Sstevel@tonic-gate * we are trying to grab xc_sys_mutex. 445*0Sstevel@tonic-gate */ 446*0Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 447*0Sstevel@tonic-gate 448*0Sstevel@tonic-gate /* 449*0Sstevel@tonic-gate * At this point, since we don't own xc_sys_mutex, 450*0Sstevel@tonic-gate * our pil shouldn't run at or above the XCALL_PIL. 451*0Sstevel@tonic-gate */ 452*0Sstevel@tonic-gate ASSERT(getpil() < XCALL_PIL); 453*0Sstevel@tonic-gate 454*0Sstevel@tonic-gate /* 455*0Sstevel@tonic-gate * Since xc_holder is not owned by us, it could be that 456*0Sstevel@tonic-gate * no one owns it, or we are not informed to enter into 457*0Sstevel@tonic-gate * xc_loop(). In either case, we need to grab the 458*0Sstevel@tonic-gate * xc_sys_mutex before we write to the xc_mbox, and 459*0Sstevel@tonic-gate * we shouldn't release it until the request is finished. 460*0Sstevel@tonic-gate */ 461*0Sstevel@tonic-gate 462*0Sstevel@tonic-gate mutex_enter(&xc_sys_mutex); 463*0Sstevel@tonic-gate xc_spl_enter[lcx] = 1; 464*0Sstevel@tonic-gate 465*0Sstevel@tonic-gate /* 466*0Sstevel@tonic-gate * Since we own xc_sys_mutex now, we are safe to 467*0Sstevel@tonic-gate * write to the xc_mobx. 468*0Sstevel@tonic-gate */ 469*0Sstevel@tonic-gate ASSERT(xc_mbox[cix].xc_state == XC_IDLE); 470*0Sstevel@tonic-gate XC_TRACE(XC_ONE_OTHER, &tset, func, arg1, arg2); 471*0Sstevel@tonic-gate XC_SETUP(cix, func, arg1, arg2); 472*0Sstevel@tonic-gate init_mondo(setsoftint_tl1, xc_serv_inum, 0); 473*0Sstevel@tonic-gate send_one_mondo(cix); 474*0Sstevel@tonic-gate 475*0Sstevel@tonic-gate /* xc_serv does membar_stld */ 476*0Sstevel@tonic-gate while (xc_mbox[cix].xc_state != XC_IDLE) { 477*0Sstevel@tonic-gate if (loop_cnt++ > xc_func_time_limit) { 478*0Sstevel@tonic-gate if (sendmondo_in_recover) { 479*0Sstevel@tonic-gate drv_usecwait(1); 480*0Sstevel@tonic-gate loop_cnt = 0; 481*0Sstevel@tonic-gate continue; 482*0Sstevel@tonic-gate } 483*0Sstevel@tonic-gate if (first_time) { 484*0Sstevel@tonic-gate XT_SYNC_ONE(cix); 485*0Sstevel@tonic-gate first_time = 0; 486*0Sstevel@tonic-gate loop_cnt = 0; 487*0Sstevel@tonic-gate continue; 488*0Sstevel@tonic-gate } 489*0Sstevel@tonic-gate cmn_err(CE_PANIC, "xc_one() timeout, " 490*0Sstevel@tonic-gate "xc_state[%d] != XC_IDLE", cix); 491*0Sstevel@tonic-gate } 492*0Sstevel@tonic-gate } 493*0Sstevel@tonic-gate xc_spl_enter[lcx] = 0; 494*0Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ONE_OTHER]); 495*0Sstevel@tonic-gate mutex_exit(&xc_sys_mutex); 496*0Sstevel@tonic-gate 497*0Sstevel@tonic-gate kpreempt_enable(); 498*0Sstevel@tonic-gate } 499*0Sstevel@tonic-gate 500*0Sstevel@tonic-gate /* 501*0Sstevel@tonic-gate * xc_some - send a "x-call" to some cpus; sending to self is excluded 502*0Sstevel@tonic-gate */ 503*0Sstevel@tonic-gate void 504*0Sstevel@tonic-gate xc_some(cpuset_t cpuset, xcfunc_t *func, uint64_t arg1, uint64_t arg2) 505*0Sstevel@tonic-gate { 506*0Sstevel@tonic-gate int lcx; 507*0Sstevel@tonic-gate int opl; 508*0Sstevel@tonic-gate cpuset_t xc_cpuset, tset; 509*0Sstevel@tonic-gate 510*0Sstevel@tonic-gate ASSERT((uintptr_t)func > KERNELBASE); 511*0Sstevel@tonic-gate ASSERT(((uintptr_t)func % PC_ALIGN) == 0); 512*0Sstevel@tonic-gate 513*0Sstevel@tonic-gate CPUSET_ZERO(tset); 514*0Sstevel@tonic-gate 515*0Sstevel@tonic-gate kpreempt_disable(); 516*0Sstevel@tonic-gate XC_SPL_ENTER(lcx, opl); /* lcx set by the macro */ 517*0Sstevel@tonic-gate 518*0Sstevel@tonic-gate CPUSET_ADD(tset, lcx); 519*0Sstevel@tonic-gate 520*0Sstevel@tonic-gate /* 521*0Sstevel@tonic-gate * only send to the CPU_READY ones 522*0Sstevel@tonic-gate */ 523*0Sstevel@tonic-gate xc_cpuset = cpu_ready_set; 524*0Sstevel@tonic-gate CPUSET_AND(xc_cpuset, cpuset); 525*0Sstevel@tonic-gate 526*0Sstevel@tonic-gate /* 527*0Sstevel@tonic-gate * send to nobody; just return 528*0Sstevel@tonic-gate */ 529*0Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 530*0Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 531*0Sstevel@tonic-gate kpreempt_enable(); 532*0Sstevel@tonic-gate return; 533*0Sstevel@tonic-gate } 534*0Sstevel@tonic-gate 535*0Sstevel@tonic-gate if (CPU_IN_SET(xc_cpuset, lcx)) { 536*0Sstevel@tonic-gate /* 537*0Sstevel@tonic-gate * same cpu just do it 538*0Sstevel@tonic-gate */ 539*0Sstevel@tonic-gate (*func)(arg1, arg2); 540*0Sstevel@tonic-gate CPUSET_DEL(xc_cpuset, lcx); 541*0Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 542*0Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_SOME_SELF]); 543*0Sstevel@tonic-gate XC_TRACE(XC_SOME_SELF, &tset, func, arg1, arg2); 544*0Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 545*0Sstevel@tonic-gate kpreempt_enable(); 546*0Sstevel@tonic-gate return; 547*0Sstevel@tonic-gate } 548*0Sstevel@tonic-gate } 549*0Sstevel@tonic-gate 550*0Sstevel@tonic-gate if (xc_holder == lcx) { /* got the xc_sys_mutex already */ 551*0Sstevel@tonic-gate cpuset_t mset = xc_mbox[lcx].xc_cpuset; 552*0Sstevel@tonic-gate 553*0Sstevel@tonic-gate CPUSET_AND(mset, cpuset); 554*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&xc_sys_mutex)); 555*0Sstevel@tonic-gate ASSERT(CPUSET_ISEQUAL(mset, cpuset)); 556*0Sstevel@tonic-gate SEND_MBOX_ONLY(xc_cpuset, func, arg1, arg2, lcx, XC_WAIT); 557*0Sstevel@tonic-gate WAIT_MBOX_DONE(xc_cpuset, lcx, XC_WAIT, 0); 558*0Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_SOME_OTHER_H]); 559*0Sstevel@tonic-gate XC_TRACE(XC_SOME_OTHER_H, &xc_cpuset, func, arg1, arg2); 560*0Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 561*0Sstevel@tonic-gate kpreempt_enable(); 562*0Sstevel@tonic-gate return; 563*0Sstevel@tonic-gate } 564*0Sstevel@tonic-gate 565*0Sstevel@tonic-gate /* 566*0Sstevel@tonic-gate * Avoid dead lock if someone has sent us a xc_loop request while 567*0Sstevel@tonic-gate * we are trying to grab xc_sys_mutex. 568*0Sstevel@tonic-gate */ 569*0Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 570*0Sstevel@tonic-gate 571*0Sstevel@tonic-gate /* 572*0Sstevel@tonic-gate * At this point, since we don't own xc_sys_mutex, 573*0Sstevel@tonic-gate * our pil shouldn't run at or above the XCALL_PIL. 574*0Sstevel@tonic-gate */ 575*0Sstevel@tonic-gate ASSERT(getpil() < XCALL_PIL); 576*0Sstevel@tonic-gate 577*0Sstevel@tonic-gate /* 578*0Sstevel@tonic-gate * grab xc_sys_mutex before writing to the xc_mbox 579*0Sstevel@tonic-gate */ 580*0Sstevel@tonic-gate mutex_enter(&xc_sys_mutex); 581*0Sstevel@tonic-gate xc_spl_enter[lcx] = 1; 582*0Sstevel@tonic-gate 583*0Sstevel@tonic-gate XC_TRACE(XC_SOME_OTHER, &xc_cpuset, func, arg1, arg2); 584*0Sstevel@tonic-gate init_mondo(setsoftint_tl1, xc_serv_inum, 0); 585*0Sstevel@tonic-gate SEND_MBOX_MONDO(xc_cpuset, func, arg1, arg2, XC_IDLE); 586*0Sstevel@tonic-gate WAIT_MBOX_DONE(xc_cpuset, lcx, XC_IDLE, 1); 587*0Sstevel@tonic-gate 588*0Sstevel@tonic-gate xc_spl_enter[lcx] = 0; 589*0Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_SOME_OTHER]); 590*0Sstevel@tonic-gate mutex_exit(&xc_sys_mutex); 591*0Sstevel@tonic-gate kpreempt_enable(); 592*0Sstevel@tonic-gate } 593*0Sstevel@tonic-gate 594*0Sstevel@tonic-gate /* 595*0Sstevel@tonic-gate * xc_all - send a "x-call" to all cpus 596*0Sstevel@tonic-gate */ 597*0Sstevel@tonic-gate void 598*0Sstevel@tonic-gate xc_all(xcfunc_t *func, uint64_t arg1, uint64_t arg2) 599*0Sstevel@tonic-gate { 600*0Sstevel@tonic-gate int lcx; 601*0Sstevel@tonic-gate int opl; 602*0Sstevel@tonic-gate cpuset_t xc_cpuset, tset; 603*0Sstevel@tonic-gate 604*0Sstevel@tonic-gate ASSERT((uintptr_t)func > KERNELBASE); 605*0Sstevel@tonic-gate ASSERT(((uintptr_t)func % PC_ALIGN) == 0); 606*0Sstevel@tonic-gate 607*0Sstevel@tonic-gate CPUSET_ZERO(tset); 608*0Sstevel@tonic-gate 609*0Sstevel@tonic-gate kpreempt_disable(); 610*0Sstevel@tonic-gate XC_SPL_ENTER(lcx, opl); /* lcx set by the macro */ 611*0Sstevel@tonic-gate 612*0Sstevel@tonic-gate CPUSET_ADD(tset, lcx); 613*0Sstevel@tonic-gate 614*0Sstevel@tonic-gate /* 615*0Sstevel@tonic-gate * same cpu just do it 616*0Sstevel@tonic-gate */ 617*0Sstevel@tonic-gate (*func)(arg1, arg2); 618*0Sstevel@tonic-gate xc_cpuset = cpu_ready_set; 619*0Sstevel@tonic-gate CPUSET_DEL(xc_cpuset, lcx); 620*0Sstevel@tonic-gate 621*0Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 622*0Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ALL_SELF]); 623*0Sstevel@tonic-gate XC_TRACE(XC_ALL_SELF, &tset, func, arg1, arg2); 624*0Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 625*0Sstevel@tonic-gate kpreempt_enable(); 626*0Sstevel@tonic-gate return; 627*0Sstevel@tonic-gate } 628*0Sstevel@tonic-gate 629*0Sstevel@tonic-gate if (xc_holder == lcx) { /* got the xc_sys_mutex already */ 630*0Sstevel@tonic-gate cpuset_t mset = xc_mbox[lcx].xc_cpuset; 631*0Sstevel@tonic-gate 632*0Sstevel@tonic-gate CPUSET_AND(mset, xc_cpuset); 633*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&xc_sys_mutex)); 634*0Sstevel@tonic-gate ASSERT(CPUSET_ISEQUAL(mset, xc_cpuset)); 635*0Sstevel@tonic-gate XC_TRACE(XC_ALL_OTHER_H, &xc_cpuset, func, arg1, arg2); 636*0Sstevel@tonic-gate SEND_MBOX_ONLY(xc_cpuset, func, arg1, arg2, lcx, XC_WAIT); 637*0Sstevel@tonic-gate WAIT_MBOX_DONE(xc_cpuset, lcx, XC_WAIT, 0); 638*0Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ALL_OTHER_H]); 639*0Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 640*0Sstevel@tonic-gate kpreempt_enable(); 641*0Sstevel@tonic-gate return; 642*0Sstevel@tonic-gate } 643*0Sstevel@tonic-gate 644*0Sstevel@tonic-gate /* 645*0Sstevel@tonic-gate * Avoid dead lock if someone has sent us a xc_loop request while 646*0Sstevel@tonic-gate * we are trying to grab xc_sys_mutex. 647*0Sstevel@tonic-gate */ 648*0Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 649*0Sstevel@tonic-gate 650*0Sstevel@tonic-gate /* 651*0Sstevel@tonic-gate * At this point, since we don't own xc_sys_mutex, 652*0Sstevel@tonic-gate * our pil shouldn't run at or above the XCALL_PIL. 653*0Sstevel@tonic-gate */ 654*0Sstevel@tonic-gate ASSERT(getpil() < XCALL_PIL); 655*0Sstevel@tonic-gate 656*0Sstevel@tonic-gate /* 657*0Sstevel@tonic-gate * grab xc_sys_mutex before writing to the xc_mbox 658*0Sstevel@tonic-gate */ 659*0Sstevel@tonic-gate mutex_enter(&xc_sys_mutex); 660*0Sstevel@tonic-gate xc_spl_enter[lcx] = 1; 661*0Sstevel@tonic-gate 662*0Sstevel@tonic-gate XC_TRACE(XC_ALL_OTHER, &xc_cpuset, func, arg1, arg2); 663*0Sstevel@tonic-gate init_mondo(setsoftint_tl1, xc_serv_inum, 0); 664*0Sstevel@tonic-gate SEND_MBOX_MONDO(xc_cpuset, func, arg1, arg2, XC_IDLE); 665*0Sstevel@tonic-gate WAIT_MBOX_DONE(xc_cpuset, lcx, XC_IDLE, 1); 666*0Sstevel@tonic-gate 667*0Sstevel@tonic-gate xc_spl_enter[lcx] = 0; 668*0Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ALL_OTHER]); 669*0Sstevel@tonic-gate mutex_exit(&xc_sys_mutex); 670*0Sstevel@tonic-gate kpreempt_enable(); 671*0Sstevel@tonic-gate } 672*0Sstevel@tonic-gate 673*0Sstevel@tonic-gate /* 674*0Sstevel@tonic-gate * xc_attention - paired with xc_dismissed() 675*0Sstevel@tonic-gate * 676*0Sstevel@tonic-gate * xt_attention() holds the xc_sys_mutex and xc_dismissed() releases it 677*0Sstevel@tonic-gate * called when an initiator wants to capture some/all cpus for a critical 678*0Sstevel@tonic-gate * session. 679*0Sstevel@tonic-gate */ 680*0Sstevel@tonic-gate void 681*0Sstevel@tonic-gate xc_attention(cpuset_t cpuset) 682*0Sstevel@tonic-gate { 683*0Sstevel@tonic-gate int pix, lcx; 684*0Sstevel@tonic-gate cpuset_t xc_cpuset, tmpset; 685*0Sstevel@tonic-gate cpuset_t recv_cpuset; 686*0Sstevel@tonic-gate uint64_t loop_cnt = 0; 687*0Sstevel@tonic-gate int first_time = 1; 688*0Sstevel@tonic-gate 689*0Sstevel@tonic-gate CPUSET_ZERO(recv_cpuset); 690*0Sstevel@tonic-gate 691*0Sstevel@tonic-gate /* 692*0Sstevel@tonic-gate * don't migrate the cpu until xc_dismissed() is finished 693*0Sstevel@tonic-gate */ 694*0Sstevel@tonic-gate ASSERT(getpil() < XCALL_PIL); 695*0Sstevel@tonic-gate mutex_enter(&xc_sys_mutex); 696*0Sstevel@tonic-gate lcx = (int)(CPU->cpu_id); 697*0Sstevel@tonic-gate ASSERT(x_dstat[lcx][XC_ATTENTION] == 698*0Sstevel@tonic-gate x_dstat[lcx][XC_DISMISSED]); 699*0Sstevel@tonic-gate ASSERT(xc_holder == -1); 700*0Sstevel@tonic-gate xc_mbox[lcx].xc_cpuset = cpuset; 701*0Sstevel@tonic-gate xc_holder = lcx; /* no membar; only current cpu needs the right lcx */ 702*0Sstevel@tonic-gate 703*0Sstevel@tonic-gate /* 704*0Sstevel@tonic-gate * only send to the CPU_READY ones 705*0Sstevel@tonic-gate */ 706*0Sstevel@tonic-gate xc_cpuset = cpu_ready_set; 707*0Sstevel@tonic-gate CPUSET_AND(xc_cpuset, cpuset); 708*0Sstevel@tonic-gate 709*0Sstevel@tonic-gate /* 710*0Sstevel@tonic-gate * don't send mondo to self 711*0Sstevel@tonic-gate */ 712*0Sstevel@tonic-gate CPUSET_DEL(xc_cpuset, lcx); 713*0Sstevel@tonic-gate 714*0Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ATTENTION]); 715*0Sstevel@tonic-gate XC_TRACE(XC_ATTENTION, &xc_cpuset, NULL, NULL, NULL); 716*0Sstevel@tonic-gate 717*0Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) 718*0Sstevel@tonic-gate return; 719*0Sstevel@tonic-gate 720*0Sstevel@tonic-gate xc_spl_enter[lcx] = 1; 721*0Sstevel@tonic-gate /* 722*0Sstevel@tonic-gate * inform the target processors to enter into xc_loop() 723*0Sstevel@tonic-gate */ 724*0Sstevel@tonic-gate tmpset = xc_cpuset; 725*0Sstevel@tonic-gate init_mondo(setsoftint_tl1, xc_loop_inum, 0); 726*0Sstevel@tonic-gate for (pix = 0; pix < NCPU; pix++) { 727*0Sstevel@tonic-gate if (CPU_IN_SET(tmpset, pix)) { 728*0Sstevel@tonic-gate ASSERT(xc_mbox[pix].xc_state == XC_IDLE); 729*0Sstevel@tonic-gate xc_mbox[pix].xc_state = XC_ENTER; 730*0Sstevel@tonic-gate send_one_mondo(pix); 731*0Sstevel@tonic-gate CPUSET_DEL(tmpset, pix); 732*0Sstevel@tonic-gate if (CPUSET_ISNULL(tmpset)) { 733*0Sstevel@tonic-gate break; 734*0Sstevel@tonic-gate } 735*0Sstevel@tonic-gate } 736*0Sstevel@tonic-gate } 737*0Sstevel@tonic-gate xc_spl_enter[lcx] = 0; 738*0Sstevel@tonic-gate 739*0Sstevel@tonic-gate /* 740*0Sstevel@tonic-gate * make sure target processors have entered into xc_loop() 741*0Sstevel@tonic-gate */ 742*0Sstevel@tonic-gate while (!CPUSET_ISEQUAL(recv_cpuset, xc_cpuset)) { 743*0Sstevel@tonic-gate tmpset = xc_cpuset; 744*0Sstevel@tonic-gate for (pix = 0; pix < NCPU; pix++) { 745*0Sstevel@tonic-gate if (CPU_IN_SET(tmpset, pix)) { 746*0Sstevel@tonic-gate /* 747*0Sstevel@tonic-gate * membar_stld() is done in xc_loop 748*0Sstevel@tonic-gate */ 749*0Sstevel@tonic-gate if (xc_mbox[pix].xc_state == XC_WAIT) { 750*0Sstevel@tonic-gate CPUSET_ADD(recv_cpuset, pix); 751*0Sstevel@tonic-gate } 752*0Sstevel@tonic-gate CPUSET_DEL(tmpset, pix); 753*0Sstevel@tonic-gate if (CPUSET_ISNULL(tmpset)) { 754*0Sstevel@tonic-gate break; 755*0Sstevel@tonic-gate } 756*0Sstevel@tonic-gate } 757*0Sstevel@tonic-gate } 758*0Sstevel@tonic-gate if (loop_cnt++ > xc_mondo_time_limit) { 759*0Sstevel@tonic-gate if (sendmondo_in_recover) { 760*0Sstevel@tonic-gate drv_usecwait(1); 761*0Sstevel@tonic-gate loop_cnt = 0; 762*0Sstevel@tonic-gate continue; 763*0Sstevel@tonic-gate } 764*0Sstevel@tonic-gate if (first_time) { 765*0Sstevel@tonic-gate XT_SYNC_SOME(xc_cpuset); 766*0Sstevel@tonic-gate first_time = 0; 767*0Sstevel@tonic-gate loop_cnt = 0; 768*0Sstevel@tonic-gate continue; 769*0Sstevel@tonic-gate } 770*0Sstevel@tonic-gate cmn_err(CE_PANIC, "xc_attention() timeout"); 771*0Sstevel@tonic-gate } 772*0Sstevel@tonic-gate } 773*0Sstevel@tonic-gate 774*0Sstevel@tonic-gate /* 775*0Sstevel@tonic-gate * xc_sys_mutex remains held until xc_dismissed() is finished 776*0Sstevel@tonic-gate */ 777*0Sstevel@tonic-gate } 778*0Sstevel@tonic-gate 779*0Sstevel@tonic-gate /* 780*0Sstevel@tonic-gate * xc_dismissed - paired with xc_attention() 781*0Sstevel@tonic-gate * 782*0Sstevel@tonic-gate * Called after the critical session is finished. 783*0Sstevel@tonic-gate */ 784*0Sstevel@tonic-gate void 785*0Sstevel@tonic-gate xc_dismissed(cpuset_t cpuset) 786*0Sstevel@tonic-gate { 787*0Sstevel@tonic-gate int pix; 788*0Sstevel@tonic-gate int lcx = (int)(CPU->cpu_id); 789*0Sstevel@tonic-gate cpuset_t xc_cpuset, tmpset; 790*0Sstevel@tonic-gate cpuset_t recv_cpuset; 791*0Sstevel@tonic-gate uint64_t loop_cnt = 0; 792*0Sstevel@tonic-gate 793*0Sstevel@tonic-gate ASSERT(lcx == xc_holder); 794*0Sstevel@tonic-gate ASSERT(CPUSET_ISEQUAL(xc_mbox[lcx].xc_cpuset, cpuset)); 795*0Sstevel@tonic-gate ASSERT(getpil() >= XCALL_PIL); 796*0Sstevel@tonic-gate CPUSET_ZERO(xc_mbox[lcx].xc_cpuset); 797*0Sstevel@tonic-gate CPUSET_ZERO(recv_cpuset); 798*0Sstevel@tonic-gate membar_stld(); 799*0Sstevel@tonic-gate 800*0Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_DISMISSED]); 801*0Sstevel@tonic-gate ASSERT(x_dstat[lcx][XC_DISMISSED] == x_dstat[lcx][XC_ATTENTION]); 802*0Sstevel@tonic-gate 803*0Sstevel@tonic-gate /* 804*0Sstevel@tonic-gate * only send to the CPU_READY ones 805*0Sstevel@tonic-gate */ 806*0Sstevel@tonic-gate xc_cpuset = cpu_ready_set; 807*0Sstevel@tonic-gate CPUSET_AND(xc_cpuset, cpuset); 808*0Sstevel@tonic-gate 809*0Sstevel@tonic-gate /* 810*0Sstevel@tonic-gate * exclude itself 811*0Sstevel@tonic-gate */ 812*0Sstevel@tonic-gate CPUSET_DEL(xc_cpuset, lcx); 813*0Sstevel@tonic-gate XC_TRACE(XC_DISMISSED, &xc_cpuset, NULL, NULL, NULL); 814*0Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 815*0Sstevel@tonic-gate xc_holder = -1; 816*0Sstevel@tonic-gate mutex_exit(&xc_sys_mutex); 817*0Sstevel@tonic-gate return; 818*0Sstevel@tonic-gate } 819*0Sstevel@tonic-gate 820*0Sstevel@tonic-gate /* 821*0Sstevel@tonic-gate * inform other processors to get out of xc_loop() 822*0Sstevel@tonic-gate */ 823*0Sstevel@tonic-gate tmpset = xc_cpuset; 824*0Sstevel@tonic-gate for (pix = 0; pix < NCPU; pix++) { 825*0Sstevel@tonic-gate if (CPU_IN_SET(tmpset, pix)) { 826*0Sstevel@tonic-gate xc_mbox[pix].xc_state = XC_EXIT; 827*0Sstevel@tonic-gate membar_stld(); 828*0Sstevel@tonic-gate CPUSET_DEL(tmpset, pix); 829*0Sstevel@tonic-gate if (CPUSET_ISNULL(tmpset)) { 830*0Sstevel@tonic-gate break; 831*0Sstevel@tonic-gate } 832*0Sstevel@tonic-gate } 833*0Sstevel@tonic-gate } 834*0Sstevel@tonic-gate 835*0Sstevel@tonic-gate /* 836*0Sstevel@tonic-gate * make sure target processors have exited from xc_loop() 837*0Sstevel@tonic-gate */ 838*0Sstevel@tonic-gate while (!CPUSET_ISEQUAL(recv_cpuset, xc_cpuset)) { 839*0Sstevel@tonic-gate tmpset = xc_cpuset; 840*0Sstevel@tonic-gate for (pix = 0; pix < NCPU; pix++) { 841*0Sstevel@tonic-gate if (CPU_IN_SET(tmpset, pix)) { 842*0Sstevel@tonic-gate /* 843*0Sstevel@tonic-gate * membar_stld() is done in xc_loop 844*0Sstevel@tonic-gate */ 845*0Sstevel@tonic-gate if (xc_mbox[pix].xc_state == XC_IDLE) { 846*0Sstevel@tonic-gate CPUSET_ADD(recv_cpuset, pix); 847*0Sstevel@tonic-gate } 848*0Sstevel@tonic-gate CPUSET_DEL(tmpset, pix); 849*0Sstevel@tonic-gate if (CPUSET_ISNULL(tmpset)) { 850*0Sstevel@tonic-gate break; 851*0Sstevel@tonic-gate } 852*0Sstevel@tonic-gate } 853*0Sstevel@tonic-gate } 854*0Sstevel@tonic-gate if (loop_cnt++ > xc_func_time_limit) { 855*0Sstevel@tonic-gate if (sendmondo_in_recover) { 856*0Sstevel@tonic-gate drv_usecwait(1); 857*0Sstevel@tonic-gate loop_cnt = 0; 858*0Sstevel@tonic-gate continue; 859*0Sstevel@tonic-gate } 860*0Sstevel@tonic-gate cmn_err(CE_PANIC, "xc_dismissed() timeout"); 861*0Sstevel@tonic-gate } 862*0Sstevel@tonic-gate } 863*0Sstevel@tonic-gate xc_holder = -1; 864*0Sstevel@tonic-gate mutex_exit(&xc_sys_mutex); 865*0Sstevel@tonic-gate } 866*0Sstevel@tonic-gate 867*0Sstevel@tonic-gate /* 868*0Sstevel@tonic-gate * xc_serv - "x-call" handler at TL=0; serves only one x-call request 869*0Sstevel@tonic-gate * runs at XCALL_PIL level. 870*0Sstevel@tonic-gate */ 871*0Sstevel@tonic-gate uint_t 872*0Sstevel@tonic-gate xc_serv(void) 873*0Sstevel@tonic-gate { 874*0Sstevel@tonic-gate int lcx = (int)(CPU->cpu_id); 875*0Sstevel@tonic-gate struct xc_mbox *xmp; 876*0Sstevel@tonic-gate xcfunc_t *func; 877*0Sstevel@tonic-gate uint64_t arg1, arg2; 878*0Sstevel@tonic-gate cpuset_t tset; 879*0Sstevel@tonic-gate 880*0Sstevel@tonic-gate ASSERT(getpil() == XCALL_PIL); 881*0Sstevel@tonic-gate CPUSET_ZERO(tset); 882*0Sstevel@tonic-gate CPUSET_ADD(tset, lcx); 883*0Sstevel@tonic-gate flush_windows(); 884*0Sstevel@tonic-gate xmp = &xc_mbox[lcx]; 885*0Sstevel@tonic-gate ASSERT(lcx != xc_holder); 886*0Sstevel@tonic-gate ASSERT(xmp->xc_state == XC_DOIT); 887*0Sstevel@tonic-gate func = xmp->xc_func; 888*0Sstevel@tonic-gate XC_TRACE(XC_SERV, &tset, func, xmp->xc_arg1, xmp->xc_arg2); 889*0Sstevel@tonic-gate if (func != NULL) { 890*0Sstevel@tonic-gate arg1 = xmp->xc_arg1; 891*0Sstevel@tonic-gate arg2 = xmp->xc_arg2; 892*0Sstevel@tonic-gate (*func)(arg1, arg2); 893*0Sstevel@tonic-gate } 894*0Sstevel@tonic-gate XC_STAT_INC(x_rstat[lcx][XC_SERV]); 895*0Sstevel@tonic-gate XC_TRACE(XC_SERV, &tset, func, arg1, arg2); 896*0Sstevel@tonic-gate xmp->xc_state = XC_IDLE; 897*0Sstevel@tonic-gate membar_stld(); 898*0Sstevel@tonic-gate return (1); 899*0Sstevel@tonic-gate } 900*0Sstevel@tonic-gate 901*0Sstevel@tonic-gate /* 902*0Sstevel@tonic-gate * if == 1, an xc_loop timeout will cause a panic 903*0Sstevel@tonic-gate * otherwise print a warning 904*0Sstevel@tonic-gate */ 905*0Sstevel@tonic-gate uint_t xc_loop_panic = 0; 906*0Sstevel@tonic-gate 907*0Sstevel@tonic-gate /* 908*0Sstevel@tonic-gate * xc_loop - "x-call" handler at TL=0; capture the cpu for a critial 909*0Sstevel@tonic-gate * session, or serve multiple x-call requests runs at XCALL_PIL level. 910*0Sstevel@tonic-gate */ 911*0Sstevel@tonic-gate uint_t 912*0Sstevel@tonic-gate xc_loop(void) 913*0Sstevel@tonic-gate { 914*0Sstevel@tonic-gate int lcx = (int)(CPU->cpu_id); 915*0Sstevel@tonic-gate struct xc_mbox *xmp; 916*0Sstevel@tonic-gate xcfunc_t *func; 917*0Sstevel@tonic-gate uint64_t arg1, arg2; 918*0Sstevel@tonic-gate uint64_t loop_cnt = 0; 919*0Sstevel@tonic-gate cpuset_t tset; 920*0Sstevel@tonic-gate 921*0Sstevel@tonic-gate ASSERT(getpil() == XCALL_PIL); 922*0Sstevel@tonic-gate 923*0Sstevel@tonic-gate CPUSET_ZERO(tset); 924*0Sstevel@tonic-gate flush_windows(); 925*0Sstevel@tonic-gate 926*0Sstevel@tonic-gate /* 927*0Sstevel@tonic-gate * Some one must have owned the xc_sys_mutex; 928*0Sstevel@tonic-gate * no further interrupt (at XCALL_PIL or below) can 929*0Sstevel@tonic-gate * be taken by this processor until xc_loop exits. 930*0Sstevel@tonic-gate * 931*0Sstevel@tonic-gate * The owner of xc_sys_mutex (or xc_holder) can expect 932*0Sstevel@tonic-gate * its xc/xt requests are handled as follows: 933*0Sstevel@tonic-gate * xc requests use xc_mbox's handshaking for their services 934*0Sstevel@tonic-gate * xt requests at TL>0 will be handled immediately 935*0Sstevel@tonic-gate * xt requests at TL=0: 936*0Sstevel@tonic-gate * if their handlers'pils are <= XCALL_PIL, then 937*0Sstevel@tonic-gate * they will be handled after xc_loop exits 938*0Sstevel@tonic-gate * (so, they probably should not be used) 939*0Sstevel@tonic-gate * else they will be handled immediately 940*0Sstevel@tonic-gate * 941*0Sstevel@tonic-gate * For those who are not informed to enter xc_loop, if they 942*0Sstevel@tonic-gate * send xc/xt requests to this processor at this moment, 943*0Sstevel@tonic-gate * the requests will be handled as follows: 944*0Sstevel@tonic-gate * xc requests will be handled after they grab xc_sys_mutex 945*0Sstevel@tonic-gate * xt requests at TL>0 will be handled immediately 946*0Sstevel@tonic-gate * xt requests at TL=0: 947*0Sstevel@tonic-gate * if their handlers'pils are <= XCALL_PIL, then 948*0Sstevel@tonic-gate * they will be handled after xc_loop exits 949*0Sstevel@tonic-gate * else they will be handled immediately 950*0Sstevel@tonic-gate */ 951*0Sstevel@tonic-gate xmp = &xc_mbox[lcx]; 952*0Sstevel@tonic-gate ASSERT(lcx != xc_holder); 953*0Sstevel@tonic-gate ASSERT(xmp->xc_state == XC_ENTER); 954*0Sstevel@tonic-gate xmp->xc_state = XC_WAIT; 955*0Sstevel@tonic-gate CPUSET_ADD(tset, lcx); 956*0Sstevel@tonic-gate membar_stld(); 957*0Sstevel@tonic-gate XC_STAT_INC(x_rstat[lcx][XC_LOOP]); 958*0Sstevel@tonic-gate XC_TRACE(XC_LOOP_ENTER, &tset, NULL, NULL, NULL); 959*0Sstevel@tonic-gate while (xmp->xc_state != XC_EXIT) { 960*0Sstevel@tonic-gate if (xmp->xc_state == XC_DOIT) { 961*0Sstevel@tonic-gate func = xmp->xc_func; 962*0Sstevel@tonic-gate arg1 = xmp->xc_arg1; 963*0Sstevel@tonic-gate arg2 = xmp->xc_arg2; 964*0Sstevel@tonic-gate XC_TRACE(XC_LOOP_DOIT, &tset, func, arg1, arg2); 965*0Sstevel@tonic-gate if (func != NULL) 966*0Sstevel@tonic-gate (*func)(arg1, arg2); 967*0Sstevel@tonic-gate xmp->xc_state = XC_WAIT; 968*0Sstevel@tonic-gate membar_stld(); 969*0Sstevel@tonic-gate /* 970*0Sstevel@tonic-gate * reset the timeout counter 971*0Sstevel@tonic-gate * since some work was done 972*0Sstevel@tonic-gate */ 973*0Sstevel@tonic-gate loop_cnt = 0; 974*0Sstevel@tonic-gate } else { 975*0Sstevel@tonic-gate /* patience is a virtue... */ 976*0Sstevel@tonic-gate loop_cnt++; 977*0Sstevel@tonic-gate } 978*0Sstevel@tonic-gate 979*0Sstevel@tonic-gate if (loop_cnt > xc_func_time_limit) { 980*0Sstevel@tonic-gate if (sendmondo_in_recover) { 981*0Sstevel@tonic-gate drv_usecwait(1); 982*0Sstevel@tonic-gate loop_cnt = 0; 983*0Sstevel@tonic-gate continue; 984*0Sstevel@tonic-gate } 985*0Sstevel@tonic-gate cmn_err(xc_loop_panic ? CE_PANIC : CE_WARN, 986*0Sstevel@tonic-gate "xc_loop() timeout"); 987*0Sstevel@tonic-gate /* 988*0Sstevel@tonic-gate * if the above displayed a warning, 989*0Sstevel@tonic-gate * reset the timeout counter and be patient 990*0Sstevel@tonic-gate */ 991*0Sstevel@tonic-gate loop_cnt = 0; 992*0Sstevel@tonic-gate } 993*0Sstevel@tonic-gate } 994*0Sstevel@tonic-gate ASSERT(xmp->xc_state == XC_EXIT); 995*0Sstevel@tonic-gate ASSERT(xc_holder != -1); 996*0Sstevel@tonic-gate XC_TRACE(XC_LOOP_EXIT, &tset, NULL, NULL, NULL); 997*0Sstevel@tonic-gate xmp->xc_state = XC_IDLE; 998*0Sstevel@tonic-gate membar_stld(); 999*0Sstevel@tonic-gate return (1); 1000*0Sstevel@tonic-gate } 1001