10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
54652Scwb * Common Development and Distribution License (the "License").
64652Scwb * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
22*11066Srafael.vanoni@sun.com * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
230Sstevel@tonic-gate * Use is subject to license terms.
240Sstevel@tonic-gate */
250Sstevel@tonic-gate
260Sstevel@tonic-gate #include <sys/types.h>
270Sstevel@tonic-gate #include <sys/param.h>
280Sstevel@tonic-gate #include <sys/t_lock.h>
290Sstevel@tonic-gate #include <sys/systm.h>
300Sstevel@tonic-gate #include <sys/spl.h>
310Sstevel@tonic-gate #include <sys/cmn_err.h>
320Sstevel@tonic-gate #include <sys/debug.h>
330Sstevel@tonic-gate #include <sys/kdi_impl.h>
345076Smishra #include <sys/cpuvar.h>
355076Smishra #include <sys/cpuvar.h>
365076Smishra #include <sys/archsystm.h>
370Sstevel@tonic-gate
380Sstevel@tonic-gate /*
390Sstevel@tonic-gate * Handle software interrupts through 'softcall' mechanism
40522Ssudheer *
41522Ssudheer * At present softcall mechanism uses a global list headed by softhead.
42522Ssudheer * Entries are added to tail and removed from head so as to preserve FIFO
43522Ssudheer * nature of entries in the softcall list. softcall() takes care of adding
44522Ssudheer * entries to the softtail.
45522Ssudheer *
46522Ssudheer * softint must take care of executing the entries in the FIFO
47522Ssudheer * order. It could be called simultaneously from multiple cpus, however only
485076Smishra * one instance of softint should process the softcall list with the exception
495076Smishra * when CPU is stuck due to high interrupt load and can't execute callbacks.
505076Smishra * State diagram is as follows :-
515076Smishra *
525076Smishra * - Upper half which is same as old state machine
535076Smishra * (IDLE->PEND->DRAIN->IDLE)
545076Smishra *
555076Smishra * - Lower half which steals the entries from softcall queue and execute
565076Smishra * in the context of softint interrupt handler. The interrupt handler
575076Smishra * is fired on a different CPU by sending a cross-call.
585076Smishra *
595076Smishra * Starting state is IDLE.
605076Smishra *
615076Smishra * softint()
625076Smishra *
63522Ssudheer *
645076Smishra * (c)
655076Smishra * ____________________________________________________
665076Smishra * | ^ ^
675076Smishra * v (a) | (b) |
685076Smishra * IDLE--------------------->PEND--------------------->DRAIN
695076Smishra * ^ | |
705076Smishra * | | |
715076Smishra * | | |
725076Smishra * | | |
735076Smishra * | | |
745076Smishra * | d d
755076Smishra * | | |
765076Smishra * | v v
775076Smishra * | PEND DRAIN
785076Smishra * | (e) & &
795076Smishra * |<-----------------------STEAL STEAL
805076Smishra * ^ |
815076Smishra * | |
825076Smishra * | (e) v
835076Smishra * |_________________________<__________________________|
845076Smishra *
855076Smishra *
865076Smishra *
875076Smishra * Edge (a)->(b)->(c) are same as old state machine and these
885076Smishra * are mutually exclusive state.
895076Smishra *
905076Smishra * a - When an entry is being enqueued to softcall queue then the state
915076Smishra * moves from IDLE to PEND.
925076Smishra *
935076Smishra * b - When interrupt handler has started processing softcall queue.
945076Smishra *
955076Smishra * c - When interrupt handler finished processing softcall queue, the
965076Smishra * state of machines goes back to IDLE.
975076Smishra *
985076Smishra * d - softcall() generates another softlevel1 iff interrupt handler
995076Smishra * hasn't run recently.
1005076Smishra *
1015076Smishra * e - Either PEND|STEAL or DRAIN|STEAL is set. We let softlevel1
1025076Smishra * handler exit because we have processed all the entries.
1035076Smishra *
1045076Smishra * When CPU is being pinned by higher level interrupts for more than
1055076Smishra * softcall_delay clock ticks, SOFT_STEAL is OR'ed so that softlevel1
1065076Smishra * handler on the other CPU can drain the queue.
1075076Smishra *
1085076Smishra * These states are needed for softcall mechanism since Solaris has only
1095076Smishra * one interface (ie. siron ) as of now for :
1105076Smishra *
1115076Smishra * - raising a soft interrupt architecture independently (ie not through
112522Ssudheer * setsoftint(..) )
113522Ssudheer * - to process the softcall queue.
1140Sstevel@tonic-gate */
1150Sstevel@tonic-gate
1160Sstevel@tonic-gate #define NSOFTCALLS 200
1175076Smishra
118522Ssudheer /*
119522Ssudheer * Defined states for softcall processing.
120522Ssudheer */
121522Ssudheer #define SOFT_IDLE 0x01 /* no processing is needed */
122522Ssudheer #define SOFT_PEND 0x02 /* softcall list needs processing */
1235076Smishra #define SOFT_DRAIN 0x04 /* list is being processed */
1245076Smishra #define SOFT_STEAL 0x08 /* list is being stolen for draining */
1250Sstevel@tonic-gate
1260Sstevel@tonic-gate typedef struct softcall {
1270Sstevel@tonic-gate void (*sc_func)(void *); /* function to call */
1280Sstevel@tonic-gate void *sc_arg; /* arg to pass to func */
1290Sstevel@tonic-gate struct softcall *sc_next; /* next in list */
1300Sstevel@tonic-gate } softcall_t;
1310Sstevel@tonic-gate
1325076Smishra /*
1335076Smishra * softcall list and state variables.
1345076Smishra */
1355076Smishra static softcall_t *softcalls;
1365076Smishra static softcall_t *softhead, *softtail, *softfree;
137522Ssudheer static uint_t softcall_state;
1385076Smishra static clock_t softcall_tick;
1395529Ssmaybe static clock_t softcall_countstart, softcall_lastpoke;
1405529Ssmaybe static uint_t softcall_pokecount;
1415529Ssmaybe
1425529Ssmaybe /*
1435529Ssmaybe * Max number of pokes per second before increasing softcall_delay
1445529Ssmaybe */
1455529Ssmaybe uint_t softcall_pokemax = 10;
1465076Smishra
1475076Smishra /*
1485076Smishra * This ensures that softcall entries don't get stuck for long. It's expressed
1495076Smishra * in 10 milliseconds as 1 unit. When hires_tick is set or other clock frequency
1505076Smishra * is used, softcall_init() ensures that it's still expressed as 1 = 10 milli
1515076Smishra * seconds.
1525076Smishra */
1535529Ssmaybe unsigned int softcall_delay = 1;
1545076Smishra
1555076Smishra /*
1565076Smishra * The last CPU which will drain softcall queue.
1575076Smishra */
1585076Smishra static int softcall_latest_cpuid = -1;
1595076Smishra
1605076Smishra /*
1615076Smishra * CPUSET to hold the CPU which is processing softcall queue
1625076Smishra * currently. There can be more than one CPU having bit set
1635076Smishra * but it will happen only when they are stuck.
1645076Smishra */
1655076Smishra static cpuset_t *softcall_cpuset = NULL;
1660Sstevel@tonic-gate
167522Ssudheer /*
168522Ssudheer * protects softcall lists and control variable softcall_state.
169522Ssudheer */
170522Ssudheer static kmutex_t softcall_lock;
1710Sstevel@tonic-gate
1720Sstevel@tonic-gate static void (*kdi_softcall_func)(void);
1735076Smishra extern void siron_poke_cpu(cpuset_t);
1740Sstevel@tonic-gate
1750Sstevel@tonic-gate extern void siron(void);
1765176Smishra extern void kdi_siron(void);
1770Sstevel@tonic-gate
1785529Ssmaybe
1790Sstevel@tonic-gate void
softcall_init(void)1800Sstevel@tonic-gate softcall_init(void)
1810Sstevel@tonic-gate {
1820Sstevel@tonic-gate softcall_t *sc;
1830Sstevel@tonic-gate
1845076Smishra softcalls = kmem_zalloc(sizeof (softcall_t) * NSOFTCALLS, KM_SLEEP);
1855076Smishra softcall_cpuset = kmem_zalloc(sizeof (cpuset_t), KM_SLEEP);
1860Sstevel@tonic-gate for (sc = softcalls; sc < &softcalls[NSOFTCALLS]; sc++) {
1870Sstevel@tonic-gate sc->sc_next = softfree;
1880Sstevel@tonic-gate softfree = sc;
1890Sstevel@tonic-gate }
1905076Smishra mutex_init(&softcall_lock, NULL, MUTEX_SPIN,
1915076Smishra (void *)ipltospl(SPL8));
1925076Smishra softcall_state = SOFT_IDLE;
193*11066Srafael.vanoni@sun.com softcall_tick = ddi_get_lbolt();
1945076Smishra
1955076Smishra /*
1965076Smishra * Since softcall_delay is expressed as 1 = 10 milliseconds.
1975076Smishra */
1985076Smishra softcall_delay = softcall_delay * (hz/100);
1995076Smishra CPUSET_ZERO(*softcall_cpuset);
2005076Smishra }
2015076Smishra
2025076Smishra /*
2035076Smishra * Gets called when softcall queue is not moving forward. We choose
2045076Smishra * a CPU and poke except the ones which are already poked.
2055076Smishra */
2065076Smishra static int
softcall_choose_cpu()2075076Smishra softcall_choose_cpu()
2085076Smishra {
2095076Smishra cpu_t *cplist = CPU;
2105076Smishra cpu_t *cp;
2115076Smishra int intr_load = INT_MAX;
2125076Smishra int cpuid = -1;
2135076Smishra cpuset_t poke;
2145076Smishra int s;
2155076Smishra
2165076Smishra ASSERT(getpil() >= DISP_LEVEL);
2175076Smishra ASSERT(ncpus > 1);
2185076Smishra ASSERT(MUTEX_HELD(&softcall_lock));
2195076Smishra
2205076Smishra CPUSET_ZERO(poke);
2215076Smishra
2225076Smishra /*
2235076Smishra * The hint is to start from current CPU.
2245076Smishra */
2255076Smishra cp = cplist;
2265076Smishra do {
2275176Smishra /*
2285176Smishra * Don't select this CPU if :
2295176Smishra * - in cpuset already
2305176Smishra * - CPU is not accepting interrupts
2315176Smishra * - CPU is being offlined
2325176Smishra */
2335076Smishra if (CPU_IN_SET(*softcall_cpuset, cp->cpu_id) ||
2345176Smishra (cp->cpu_flags & CPU_ENABLE) == 0 ||
2355176Smishra (cp == cpu_inmotion))
2365076Smishra continue;
2375529Ssmaybe #if defined(__x86)
2385529Ssmaybe /*
2395529Ssmaybe * Don't select this CPU if a hypervisor indicates it
2405529Ssmaybe * isn't currently scheduled onto a physical cpu. We are
2415529Ssmaybe * looking for a cpu that can respond quickly and the time
2425529Ssmaybe * to get the virtual cpu scheduled and switched to running
2435529Ssmaybe * state is likely to be relatively lengthy.
2445529Ssmaybe */
2455529Ssmaybe if (vcpu_on_pcpu(cp->cpu_id) == VCPU_NOT_ON_PCPU)
2465529Ssmaybe continue;
2475529Ssmaybe #endif /* __x86 */
2485076Smishra
2495076Smishra /* if CPU is not busy */
2505076Smishra if (cp->cpu_intrload == 0) {
2515076Smishra cpuid = cp->cpu_id;
2525076Smishra break;
2535076Smishra }
2545076Smishra
2555076Smishra if (cp->cpu_intrload < intr_load) {
2565076Smishra cpuid = cp->cpu_id;
2575076Smishra intr_load = cp->cpu_intrload;
2585076Smishra } else if (cp->cpu_intrload == intr_load) {
2595076Smishra /*
2605076Smishra * We want to poke CPUs having similar
2615076Smishra * load because we don't know which CPU is
2625076Smishra * can acknowledge level1 interrupt. The
2635076Smishra * list of such CPUs should not be large.
2645076Smishra */
2655076Smishra if (cpuid != -1) {
2665076Smishra /*
2675076Smishra * Put the last CPU chosen because
2685076Smishra * it also has same interrupt load.
2695076Smishra */
2705076Smishra CPUSET_ADD(poke, cpuid);
2715076Smishra cpuid = -1;
2725076Smishra }
2735076Smishra
2745076Smishra CPUSET_ADD(poke, cp->cpu_id);
2755076Smishra }
2765076Smishra } while ((cp = cp->cpu_next_onln) != cplist);
2775076Smishra
2785076Smishra /* if we found a CPU which suits best to poke */
2795076Smishra if (cpuid != -1) {
2805076Smishra CPUSET_ZERO(poke);
2815076Smishra CPUSET_ADD(poke, cpuid);
2825076Smishra }
2835076Smishra
2845076Smishra if (CPUSET_ISNULL(poke)) {
2855076Smishra mutex_exit(&softcall_lock);
2865076Smishra return (0);
2875076Smishra }
2885076Smishra
2895076Smishra /*
2905076Smishra * We first set the bit in cpuset and then poke.
2915076Smishra */
2925076Smishra CPUSET_XOR(*softcall_cpuset, poke);
2935076Smishra mutex_exit(&softcall_lock);
2945076Smishra
2955076Smishra /*
2965076Smishra * If softcall() was called at low pil then we may
2975076Smishra * get preempted before we raise PIL. It should be okay
2985076Smishra * because we are just going to poke CPUs now or at most
2995076Smishra * another thread may start choosing CPUs in this routine.
3005076Smishra */
3015076Smishra s = splhigh();
3025076Smishra siron_poke_cpu(poke);
3035076Smishra splx(s);
3045076Smishra return (1);
3050Sstevel@tonic-gate }
3060Sstevel@tonic-gate
3075529Ssmaybe
3080Sstevel@tonic-gate /*
3090Sstevel@tonic-gate * Call function func with argument arg
3100Sstevel@tonic-gate * at some later time at software interrupt priority
3110Sstevel@tonic-gate */
3120Sstevel@tonic-gate void
softcall(void (* func)(void *),void * arg)3130Sstevel@tonic-gate softcall(void (*func)(void *), void *arg)
3140Sstevel@tonic-gate {
3150Sstevel@tonic-gate softcall_t *sc;
3165529Ssmaybe clock_t w, now;
3170Sstevel@tonic-gate
3180Sstevel@tonic-gate /*
3190Sstevel@tonic-gate * protect against cross-calls
3200Sstevel@tonic-gate */
3210Sstevel@tonic-gate mutex_enter(&softcall_lock);
3220Sstevel@tonic-gate /* coalesce identical softcalls */
3230Sstevel@tonic-gate for (sc = softhead; sc != 0; sc = sc->sc_next) {
3240Sstevel@tonic-gate if (sc->sc_func == func && sc->sc_arg == arg) {
3255076Smishra goto intr;
3260Sstevel@tonic-gate }
3270Sstevel@tonic-gate }
3280Sstevel@tonic-gate
3290Sstevel@tonic-gate if ((sc = softfree) == 0)
3300Sstevel@tonic-gate panic("too many softcalls");
3315076Smishra
3320Sstevel@tonic-gate softfree = sc->sc_next;
3330Sstevel@tonic-gate sc->sc_func = func;
3340Sstevel@tonic-gate sc->sc_arg = arg;
3350Sstevel@tonic-gate sc->sc_next = 0;
3360Sstevel@tonic-gate
3370Sstevel@tonic-gate if (softhead) {
3380Sstevel@tonic-gate softtail->sc_next = sc;
3390Sstevel@tonic-gate softtail = sc;
3405076Smishra } else
3415076Smishra softhead = softtail = sc;
3425076Smishra
3435076Smishra intr:
3445076Smishra if (softcall_state & SOFT_IDLE) {
3455076Smishra softcall_state = SOFT_PEND;
346*11066Srafael.vanoni@sun.com softcall_tick = ddi_get_lbolt();
3470Sstevel@tonic-gate mutex_exit(&softcall_lock);
3485076Smishra siron();
3495076Smishra } else if (softcall_state & (SOFT_DRAIN|SOFT_PEND)) {
350*11066Srafael.vanoni@sun.com now = ddi_get_lbolt();
3515529Ssmaybe w = now - softcall_tick;
3525076Smishra if (w <= softcall_delay || ncpus == 1) {
3535076Smishra mutex_exit(&softcall_lock);
3545076Smishra return;
3555076Smishra }
3565529Ssmaybe /*
3575529Ssmaybe * Did we poke less than a second ago?
3585529Ssmaybe */
3595529Ssmaybe if (now - softcall_lastpoke < hz) {
3605529Ssmaybe /*
3615529Ssmaybe * We did, increment the poke count and
3625529Ssmaybe * see if we are poking too often
3635529Ssmaybe */
3645529Ssmaybe if (softcall_pokecount++ == 0)
3655529Ssmaybe softcall_countstart = now;
3665529Ssmaybe if (softcall_pokecount > softcall_pokemax) {
3675529Ssmaybe /*
3685529Ssmaybe * If poking too much increase the delay
3695529Ssmaybe */
3705529Ssmaybe if (now - softcall_countstart <= hz)
3715529Ssmaybe softcall_delay++;
3725529Ssmaybe softcall_pokecount = 0;
3735529Ssmaybe }
3745529Ssmaybe } else {
3755529Ssmaybe /*
3765529Ssmaybe * poke rate has dropped off, reset the poke monitor
3775529Ssmaybe */
3785529Ssmaybe softcall_pokecount = 0;
3795529Ssmaybe }
380*11066Srafael.vanoni@sun.com softcall_lastpoke = now;
3815076Smishra if (!(softcall_state & SOFT_STEAL)) {
3825076Smishra softcall_state |= SOFT_STEAL;
3835076Smishra
384522Ssudheer /*
3855076Smishra * We want to give some more chance before
3865076Smishra * fishing around again.
387522Ssudheer */
388*11066Srafael.vanoni@sun.com softcall_tick = now;
389522Ssudheer }
3905076Smishra
3915076Smishra /* softcall_lock will be released by this routine */
3925076Smishra (void) softcall_choose_cpu();
3930Sstevel@tonic-gate }
3940Sstevel@tonic-gate }
3950Sstevel@tonic-gate
3960Sstevel@tonic-gate void
kdi_softcall(void (* func)(void))3970Sstevel@tonic-gate kdi_softcall(void (*func)(void))
3980Sstevel@tonic-gate {
3990Sstevel@tonic-gate kdi_softcall_func = func;
4000Sstevel@tonic-gate
4010Sstevel@tonic-gate if (softhead == NULL)
4025176Smishra kdi_siron();
4030Sstevel@tonic-gate }
4040Sstevel@tonic-gate
4050Sstevel@tonic-gate /*
406522Ssudheer * Called to process software interrupts take one off queue, call it,
407522Ssudheer * repeat.
408522Ssudheer *
4095076Smishra * Note queue may change during call; softcall_lock, state variables
4105076Smishra * softcall_state and softcall_latest_cpuid ensures that -
4115076Smishra * - we don't have multiple cpus pulling from the list (thus causing
4125076Smishra * a violation of FIFO order with an exception when we are stuck).
4135076Smishra * - we don't miss a new entry having been added to the head.
4145076Smishra * - we don't miss a wakeup.
4150Sstevel@tonic-gate */
416522Ssudheer
4170Sstevel@tonic-gate void
softint(void)4180Sstevel@tonic-gate softint(void)
4190Sstevel@tonic-gate {
4205076Smishra softcall_t *sc = NULL;
4210Sstevel@tonic-gate void (*func)();
4220Sstevel@tonic-gate caddr_t arg;
4235076Smishra int cpu_id = CPU->cpu_id;
4240Sstevel@tonic-gate
4255176Smishra /*
4265176Smishra * Don't process softcall queue if current CPU is quiesced or
4275176Smishra * offlined. This can happen when a CPU is running pause
4285176Smishra * thread but softcall already sent a xcall.
4295176Smishra */
4305176Smishra if (CPU->cpu_flags & (CPU_QUIESCED|CPU_OFFLINE)) {
4315176Smishra if (softcall_cpuset != NULL &&
4325176Smishra CPU_IN_SET(*softcall_cpuset, cpu_id)) {
4335176Smishra CPUSET_DEL(*softcall_cpuset, cpu_id);
4345176Smishra goto out;
4355176Smishra }
4365176Smishra }
4375176Smishra
438522Ssudheer mutex_enter(&softcall_lock);
4395076Smishra
4405076Smishra if (softcall_state & (SOFT_STEAL|SOFT_PEND)) {
4415076Smishra softcall_state = SOFT_DRAIN;
4425076Smishra } else {
4435076Smishra /*
4445076Smishra * The check for softcall_cpuset being
4455076Smishra * NULL is required because it may get
4465076Smishra * called very early during boot.
4475076Smishra */
4485076Smishra if (softcall_cpuset != NULL &&
4495076Smishra CPU_IN_SET(*softcall_cpuset, cpu_id))
4505076Smishra CPUSET_DEL(*softcall_cpuset, cpu_id);
451522Ssudheer mutex_exit(&softcall_lock);
452522Ssudheer goto out;
453522Ssudheer }
4545076Smishra
4555076Smishra /*
4565076Smishra * Setting softcall_latest_cpuid to current CPU ensures
4575076Smishra * that there is only one active softlevel1 handler to
4585076Smishra * process softcall queues.
4595076Smishra *
4605076Smishra * Since softcall_lock lock is dropped before calling
4615076Smishra * func (callback), we need softcall_latest_cpuid
4625076Smishra * to prevent two softlevel1 hanlders working on the
4635076Smishra * queue when the first softlevel1 handler gets
4645076Smishra * stuck due to high interrupt load.
4655076Smishra */
4665076Smishra softcall_latest_cpuid = cpu_id;
4675076Smishra
4685076Smishra /* add ourself to the cpuset */
4695076Smishra if (!CPU_IN_SET(*softcall_cpuset, cpu_id))
4705076Smishra CPUSET_ADD(*softcall_cpuset, cpu_id);
471522Ssudheer
4720Sstevel@tonic-gate for (;;) {
473*11066Srafael.vanoni@sun.com softcall_tick = ddi_get_lbolt();
4740Sstevel@tonic-gate if ((sc = softhead) != NULL) {
4750Sstevel@tonic-gate func = sc->sc_func;
4760Sstevel@tonic-gate arg = sc->sc_arg;
4770Sstevel@tonic-gate softhead = sc->sc_next;
4780Sstevel@tonic-gate sc->sc_next = softfree;
4790Sstevel@tonic-gate softfree = sc;
4800Sstevel@tonic-gate }
4815076Smishra
482522Ssudheer if (sc == NULL) {
4835076Smishra if (CPU_IN_SET(*softcall_cpuset, cpu_id))
4845076Smishra CPUSET_DEL(*softcall_cpuset, cpu_id);
4855076Smishra
486522Ssudheer softcall_state = SOFT_IDLE;
4875076Smishra ASSERT(softcall_latest_cpuid == cpu_id);
4885076Smishra softcall_latest_cpuid = -1;
4895076Smishra
490522Ssudheer mutex_exit(&softcall_lock);
4910Sstevel@tonic-gate break;
492522Ssudheer }
4935076Smishra
494522Ssudheer mutex_exit(&softcall_lock);
4950Sstevel@tonic-gate func(arg);
496522Ssudheer mutex_enter(&softcall_lock);
4975076Smishra
4985076Smishra /*
4995076Smishra * No longer need softcall processing from current
5005076Smishra * interrupt handler because either
5015076Smishra * (a) softcall is in SOFT_IDLE state or
5025076Smishra * (b) There is a CPU already draining softcall
5035076Smishra * queue and the current softlevel1 is no
5045076Smishra * longer required.
5055076Smishra */
5065076Smishra if (softcall_latest_cpuid != cpu_id) {
5075076Smishra if (CPU_IN_SET(*softcall_cpuset, cpu_id))
5085076Smishra CPUSET_DEL(*softcall_cpuset, cpu_id);
5095076Smishra
5105076Smishra mutex_exit(&softcall_lock);
5115076Smishra break;
5125076Smishra }
5130Sstevel@tonic-gate }
5145076Smishra
515522Ssudheer out:
5160Sstevel@tonic-gate if ((func = kdi_softcall_func) != NULL) {
5170Sstevel@tonic-gate kdi_softcall_func = NULL;
5180Sstevel@tonic-gate func();
5190Sstevel@tonic-gate }
5200Sstevel@tonic-gate }
521