xref: /onnv-gate/usr/src/uts/common/os/softint.c (revision 5076:342323d1ccaa)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
54652Scwb  * Common Development and Distribution License (the "License").
64652Scwb  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
224652Scwb  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
270Sstevel@tonic-gate 
280Sstevel@tonic-gate #include <sys/types.h>
290Sstevel@tonic-gate #include <sys/param.h>
300Sstevel@tonic-gate #include <sys/t_lock.h>
310Sstevel@tonic-gate #include <sys/systm.h>
320Sstevel@tonic-gate #include <sys/spl.h>
330Sstevel@tonic-gate #include <sys/cmn_err.h>
340Sstevel@tonic-gate #include <sys/debug.h>
350Sstevel@tonic-gate #include <sys/kdi_impl.h>
36*5076Smishra #include <sys/cpuvar.h>
37*5076Smishra #include <sys/cpuvar.h>
38*5076Smishra #include <sys/archsystm.h>
390Sstevel@tonic-gate 
400Sstevel@tonic-gate /*
410Sstevel@tonic-gate  * Handle software interrupts through 'softcall' mechanism
42522Ssudheer  *
43522Ssudheer  * At present softcall mechanism uses a global list headed by softhead.
44522Ssudheer  * Entries are added to tail and removed from head so as to preserve FIFO
45522Ssudheer  * nature of entries in the softcall list. softcall() takes care of adding
46522Ssudheer  * entries to the softtail.
47522Ssudheer  *
48522Ssudheer  * softint must take care of executing the entries in the FIFO
49522Ssudheer  * order. It could be called simultaneously from multiple cpus, however only
50*5076Smishra  * one instance of softint should process the softcall list with the exception
51*5076Smishra  * when CPU is stuck due to high interrupt load and can't execute callbacks.
52*5076Smishra  * State diagram is as follows :-
53*5076Smishra  *
54*5076Smishra  *	- Upper half which is same as old state machine
55*5076Smishra  *	  (IDLE->PEND->DRAIN->IDLE)
56*5076Smishra  *
57*5076Smishra  *	- Lower half which steals the entries from softcall queue and execute
58*5076Smishra  *        in the context of softint interrupt handler. The interrupt handler
59*5076Smishra  *        is fired on a different CPU by sending a cross-call.
60*5076Smishra  *
61*5076Smishra  * Starting state is IDLE.
62*5076Smishra  *
63*5076Smishra  * 				softint()
64*5076Smishra  *
65522Ssudheer  *
66*5076Smishra  *				(c)
67*5076Smishra  * 	____________________________________________________
68*5076Smishra  * 	|                          ^                         ^
69*5076Smishra  * 	v            (a)           |           (b)           |
70*5076Smishra  * 	IDLE--------------------->PEND--------------------->DRAIN
71*5076Smishra  *	^                         |                         |
72*5076Smishra  * 	|                         |                         |
73*5076Smishra  * 	|                         |                         |
74*5076Smishra  * 	|                         |                         |
75*5076Smishra  * 	|                         |                         |
76*5076Smishra  * 	|                         d                         d
77*5076Smishra  * 	|                         |                         |
78*5076Smishra  * 	|                         v                         v
79*5076Smishra  * 	|                         PEND                      DRAIN
80*5076Smishra  * 	|            (e)           &                          &
81*5076Smishra  * 	|<-----------------------STEAL                      STEAL
82*5076Smishra  * 	^                                                    |
83*5076Smishra  * 	|                                                    |
84*5076Smishra  * 	|                         (e)                        v
85*5076Smishra  * 	|_________________________<__________________________|
86*5076Smishra  *
87*5076Smishra  *
88*5076Smishra  *
89*5076Smishra  * Edge (a)->(b)->(c) are same as old state machine and these
90*5076Smishra  * are mutually exclusive state.
91*5076Smishra  *
92*5076Smishra  * a - When an entry is being enqueued to softcall queue then the state
93*5076Smishra  *     moves from IDLE to PEND.
94*5076Smishra  *
95*5076Smishra  * b - When interrupt handler has started processing softcall queue.
96*5076Smishra  *
97*5076Smishra  * c - When interrupt handler finished processing softcall queue, the
98*5076Smishra  *     state of machines goes back to IDLE.
99*5076Smishra  *
100*5076Smishra  * d - softcall() generates another softlevel1 iff interrupt handler
101*5076Smishra  *     hasn't run recently.
102*5076Smishra  *
103*5076Smishra  * e - Either PEND|STEAL or DRAIN|STEAL is set. We let softlevel1
104*5076Smishra  *     handler exit because we have processed all the entries.
105*5076Smishra  *
106*5076Smishra  * When CPU is being pinned by higher level interrupts for more than
107*5076Smishra  * softcall_delay clock ticks, SOFT_STEAL is OR'ed so that softlevel1
108*5076Smishra  * handler on the other CPU can drain the queue.
109*5076Smishra  *
110*5076Smishra  * These states are needed for softcall mechanism since Solaris has only
111*5076Smishra  * one interface (ie. siron ) as of now for :
112*5076Smishra  *
113*5076Smishra  * - raising a soft interrupt architecture independently (ie not through
114522Ssudheer  *   setsoftint(..) )
115522Ssudheer  * - to process the softcall queue.
1160Sstevel@tonic-gate  */
1170Sstevel@tonic-gate 
1180Sstevel@tonic-gate #define	NSOFTCALLS	200
119*5076Smishra 
120522Ssudheer /*
121522Ssudheer  * Defined states for softcall processing.
122522Ssudheer  */
123522Ssudheer #define	SOFT_IDLE		0x01	/* no processing is needed */
124522Ssudheer #define	SOFT_PEND		0x02	/* softcall list needs processing */
125*5076Smishra #define	SOFT_DRAIN		0x04	/* list is being processed */
126*5076Smishra #define	SOFT_STEAL		0x08	/* list is being stolen for draining */
1270Sstevel@tonic-gate 
1280Sstevel@tonic-gate typedef struct softcall {
1290Sstevel@tonic-gate 	void (*sc_func)(void *);	/* function to call */
1300Sstevel@tonic-gate 	void *sc_arg;			/* arg to pass to func */
1310Sstevel@tonic-gate 	struct softcall *sc_next;	/* next in list */
1320Sstevel@tonic-gate } softcall_t;
1330Sstevel@tonic-gate 
134*5076Smishra /*
135*5076Smishra  * softcall list and state variables.
136*5076Smishra  */
137*5076Smishra static softcall_t *softcalls;
138*5076Smishra static softcall_t *softhead, *softtail, *softfree;
139522Ssudheer static uint_t	softcall_state;
140*5076Smishra static clock_t softcall_tick;
141*5076Smishra 
142*5076Smishra /*
143*5076Smishra  * This ensures that softcall entries don't get stuck for long. It's expressed
144*5076Smishra  * in 10 milliseconds as 1 unit. When hires_tick is set or other clock frequency
145*5076Smishra  * is used, softcall_init() ensures that it's still expressed as 1 =  10 milli
146*5076Smishra  * seconds.
147*5076Smishra  */
148*5076Smishra static int softcall_delay = 1;
149*5076Smishra 
150*5076Smishra /*
151*5076Smishra  * The last CPU which will drain softcall queue.
152*5076Smishra  */
153*5076Smishra static int softcall_latest_cpuid = -1;
154*5076Smishra 
155*5076Smishra /*
156*5076Smishra  * CPUSET to hold the CPU which is processing softcall queue
157*5076Smishra  * currently. There can be more than one CPU having bit set
158*5076Smishra  * but it will happen only when they are stuck.
159*5076Smishra  */
160*5076Smishra static cpuset_t *softcall_cpuset = NULL;
1610Sstevel@tonic-gate 
162522Ssudheer /*
163522Ssudheer  * protects softcall lists and control variable softcall_state.
164522Ssudheer  */
165522Ssudheer static kmutex_t	softcall_lock;
1660Sstevel@tonic-gate 
1670Sstevel@tonic-gate static void (*kdi_softcall_func)(void);
168*5076Smishra extern void siron_poke_cpu(cpuset_t);
1690Sstevel@tonic-gate 
1700Sstevel@tonic-gate extern void siron(void);
1710Sstevel@tonic-gate 
1720Sstevel@tonic-gate void
1730Sstevel@tonic-gate softcall_init(void)
1740Sstevel@tonic-gate {
1750Sstevel@tonic-gate 	softcall_t *sc;
1760Sstevel@tonic-gate 
177*5076Smishra 	softcalls = kmem_zalloc(sizeof (softcall_t) * NSOFTCALLS, KM_SLEEP);
178*5076Smishra 	softcall_cpuset = kmem_zalloc(sizeof (cpuset_t), KM_SLEEP);
1790Sstevel@tonic-gate 	for (sc = softcalls; sc < &softcalls[NSOFTCALLS]; sc++) {
1800Sstevel@tonic-gate 		sc->sc_next = softfree;
1810Sstevel@tonic-gate 		softfree = sc;
1820Sstevel@tonic-gate 	}
183*5076Smishra 	mutex_init(&softcall_lock, NULL, MUTEX_SPIN,
184*5076Smishra 	    (void *)ipltospl(SPL8));
185*5076Smishra 	softcall_state = SOFT_IDLE;
186*5076Smishra 	softcall_tick = lbolt;
187*5076Smishra 
188*5076Smishra 	if (softcall_delay < 0)
189*5076Smishra 		softcall_delay = 1;
190*5076Smishra 
191*5076Smishra 	/*
192*5076Smishra 	 * Since softcall_delay is expressed as 1 = 10 milliseconds.
193*5076Smishra 	 */
194*5076Smishra 	softcall_delay = softcall_delay * (hz/100);
195*5076Smishra 	CPUSET_ZERO(*softcall_cpuset);
196*5076Smishra }
197*5076Smishra 
198*5076Smishra /*
199*5076Smishra  * Gets called when softcall queue is not moving forward. We choose
200*5076Smishra  * a CPU and poke except the ones which are already poked.
201*5076Smishra  */
202*5076Smishra static int
203*5076Smishra softcall_choose_cpu()
204*5076Smishra {
205*5076Smishra 	cpu_t *cplist = CPU;
206*5076Smishra 	cpu_t *cp;
207*5076Smishra 	int intr_load = INT_MAX;
208*5076Smishra 	int cpuid = -1;
209*5076Smishra 	cpuset_t poke;
210*5076Smishra 	int s;
211*5076Smishra 
212*5076Smishra 	ASSERT(getpil() >= DISP_LEVEL);
213*5076Smishra 	ASSERT(ncpus > 1);
214*5076Smishra 	ASSERT(MUTEX_HELD(&softcall_lock));
215*5076Smishra 
216*5076Smishra 	CPUSET_ZERO(poke);
217*5076Smishra 
218*5076Smishra 	/*
219*5076Smishra 	 * The hint is to start from current CPU.
220*5076Smishra 	 */
221*5076Smishra 	cp = cplist;
222*5076Smishra 	do {
223*5076Smishra 		if (CPU_IN_SET(*softcall_cpuset, cp->cpu_id) ||
224*5076Smishra 		    (cp->cpu_flags & CPU_ENABLE) == 0)
225*5076Smishra 			continue;
226*5076Smishra 
227*5076Smishra 		/* if CPU is not busy */
228*5076Smishra 		if (cp->cpu_intrload == 0) {
229*5076Smishra 			cpuid = cp->cpu_id;
230*5076Smishra 			break;
231*5076Smishra 		}
232*5076Smishra 
233*5076Smishra 		if (cp->cpu_intrload < intr_load) {
234*5076Smishra 			cpuid = cp->cpu_id;
235*5076Smishra 			intr_load = cp->cpu_intrload;
236*5076Smishra 		} else if (cp->cpu_intrload == intr_load) {
237*5076Smishra 			/*
238*5076Smishra 			 * We want to poke CPUs having similar
239*5076Smishra 			 * load because we don't know which CPU is
240*5076Smishra 			 * can acknowledge level1 interrupt. The
241*5076Smishra 			 * list of such CPUs should not be large.
242*5076Smishra 			 */
243*5076Smishra 			if (cpuid != -1) {
244*5076Smishra 				/*
245*5076Smishra 				 * Put the last CPU chosen because
246*5076Smishra 				 * it also has same interrupt load.
247*5076Smishra 				 */
248*5076Smishra 				CPUSET_ADD(poke, cpuid);
249*5076Smishra 				cpuid = -1;
250*5076Smishra 			}
251*5076Smishra 
252*5076Smishra 			CPUSET_ADD(poke, cp->cpu_id);
253*5076Smishra 		}
254*5076Smishra 	} while ((cp = cp->cpu_next_onln) != cplist);
255*5076Smishra 
256*5076Smishra 	/* if we found a CPU which suits best to poke */
257*5076Smishra 	if (cpuid != -1) {
258*5076Smishra 		CPUSET_ZERO(poke);
259*5076Smishra 		CPUSET_ADD(poke, cpuid);
260*5076Smishra 	}
261*5076Smishra 
262*5076Smishra 	if (CPUSET_ISNULL(poke)) {
263*5076Smishra 		mutex_exit(&softcall_lock);
264*5076Smishra 		return (0);
265*5076Smishra 	}
266*5076Smishra 
267*5076Smishra 	/*
268*5076Smishra 	 * We first set the bit in cpuset and then poke.
269*5076Smishra 	 */
270*5076Smishra 	CPUSET_XOR(*softcall_cpuset, poke);
271*5076Smishra 	mutex_exit(&softcall_lock);
272*5076Smishra 
273*5076Smishra 	/*
274*5076Smishra 	 * If softcall() was called at low pil then we may
275*5076Smishra 	 * get preempted before we raise PIL. It should be okay
276*5076Smishra 	 * because we are just going to poke CPUs now or at most
277*5076Smishra 	 * another thread may start choosing CPUs in this routine.
278*5076Smishra 	 */
279*5076Smishra 	s = splhigh();
280*5076Smishra 	siron_poke_cpu(poke);
281*5076Smishra 	splx(s);
282*5076Smishra 	return (1);
2830Sstevel@tonic-gate }
2840Sstevel@tonic-gate 
2850Sstevel@tonic-gate /*
2860Sstevel@tonic-gate  * Call function func with argument arg
2870Sstevel@tonic-gate  * at some later time at software interrupt priority
2880Sstevel@tonic-gate  */
2890Sstevel@tonic-gate void
2900Sstevel@tonic-gate softcall(void (*func)(void *), void *arg)
2910Sstevel@tonic-gate {
2920Sstevel@tonic-gate 	softcall_t *sc;
293*5076Smishra 	clock_t w;
2940Sstevel@tonic-gate 
2950Sstevel@tonic-gate 	/*
2960Sstevel@tonic-gate 	 * protect against cross-calls
2970Sstevel@tonic-gate 	 */
2980Sstevel@tonic-gate 	mutex_enter(&softcall_lock);
2990Sstevel@tonic-gate 	/* coalesce identical softcalls */
3000Sstevel@tonic-gate 	for (sc = softhead; sc != 0; sc = sc->sc_next) {
3010Sstevel@tonic-gate 		if (sc->sc_func == func && sc->sc_arg == arg) {
302*5076Smishra 			goto intr;
3030Sstevel@tonic-gate 		}
3040Sstevel@tonic-gate 	}
3050Sstevel@tonic-gate 
3060Sstevel@tonic-gate 	if ((sc = softfree) == 0)
3070Sstevel@tonic-gate 		panic("too many softcalls");
308*5076Smishra 
3090Sstevel@tonic-gate 	softfree = sc->sc_next;
3100Sstevel@tonic-gate 	sc->sc_func = func;
3110Sstevel@tonic-gate 	sc->sc_arg = arg;
3120Sstevel@tonic-gate 	sc->sc_next = 0;
3130Sstevel@tonic-gate 
3140Sstevel@tonic-gate 	if (softhead) {
3150Sstevel@tonic-gate 		softtail->sc_next = sc;
3160Sstevel@tonic-gate 		softtail = sc;
317*5076Smishra 	} else
318*5076Smishra 		softhead = softtail = sc;
319*5076Smishra 
320*5076Smishra intr:
321*5076Smishra 	if (softcall_state & SOFT_IDLE) {
322*5076Smishra 		softcall_state = SOFT_PEND;
323*5076Smishra 		softcall_tick = lbolt;
3240Sstevel@tonic-gate 		mutex_exit(&softcall_lock);
325*5076Smishra 		siron();
326*5076Smishra 	} else if (softcall_state & (SOFT_DRAIN|SOFT_PEND)) {
327*5076Smishra 		w = lbolt - softcall_tick;
328*5076Smishra 		if (w <= softcall_delay || ncpus == 1) {
329*5076Smishra 			mutex_exit(&softcall_lock);
330*5076Smishra 			return;
331*5076Smishra 		}
332*5076Smishra 
333*5076Smishra 		if (!(softcall_state & SOFT_STEAL)) {
334*5076Smishra 			softcall_state |= SOFT_STEAL;
335*5076Smishra 
336522Ssudheer 			/*
337*5076Smishra 			 * We want to give some more chance before
338*5076Smishra 			 * fishing around again.
339522Ssudheer 			 */
340*5076Smishra 			softcall_tick = lbolt;
341522Ssudheer 		}
342*5076Smishra 
343*5076Smishra 		/* softcall_lock will be released by this routine */
344*5076Smishra 		(void) softcall_choose_cpu();
3450Sstevel@tonic-gate 	}
3460Sstevel@tonic-gate }
3470Sstevel@tonic-gate 
3480Sstevel@tonic-gate void
3490Sstevel@tonic-gate kdi_softcall(void (*func)(void))
3500Sstevel@tonic-gate {
3510Sstevel@tonic-gate 	kdi_softcall_func = func;
3520Sstevel@tonic-gate 
3530Sstevel@tonic-gate 	if (softhead == NULL)
354*5076Smishra 		siron();
3550Sstevel@tonic-gate }
3560Sstevel@tonic-gate 
3570Sstevel@tonic-gate /*
358522Ssudheer  * Called to process software interrupts take one off queue, call it,
359522Ssudheer  * repeat.
360522Ssudheer  *
361*5076Smishra  * Note queue may change during call; softcall_lock, state variables
362*5076Smishra  * softcall_state and softcall_latest_cpuid ensures that -
363*5076Smishra  * - we don't have multiple cpus pulling from the list (thus causing
364*5076Smishra  *   a violation of FIFO order with an exception when we are stuck).
365*5076Smishra  * - we don't miss a new entry having been added to the head.
366*5076Smishra  * - we don't miss a wakeup.
3670Sstevel@tonic-gate  */
368522Ssudheer 
3690Sstevel@tonic-gate void
3700Sstevel@tonic-gate softint(void)
3710Sstevel@tonic-gate {
372*5076Smishra 	softcall_t *sc = NULL;
3730Sstevel@tonic-gate 	void (*func)();
3740Sstevel@tonic-gate 	caddr_t arg;
375*5076Smishra 	int cpu_id = CPU->cpu_id;
3760Sstevel@tonic-gate 
377522Ssudheer 	mutex_enter(&softcall_lock);
378*5076Smishra 
379*5076Smishra 	if (softcall_state & (SOFT_STEAL|SOFT_PEND)) {
380*5076Smishra 		softcall_state = SOFT_DRAIN;
381*5076Smishra 	} else  {
382*5076Smishra 		/*
383*5076Smishra 		 * The check for softcall_cpuset being
384*5076Smishra 		 * NULL is required because it may get
385*5076Smishra 		 * called very early during boot.
386*5076Smishra 		 */
387*5076Smishra 		if (softcall_cpuset != NULL &&
388*5076Smishra 		    CPU_IN_SET(*softcall_cpuset, cpu_id))
389*5076Smishra 			CPUSET_DEL(*softcall_cpuset, cpu_id);
390522Ssudheer 		mutex_exit(&softcall_lock);
391522Ssudheer 		goto out;
392522Ssudheer 	}
393*5076Smishra 
394*5076Smishra 	/*
395*5076Smishra 	 * Setting softcall_latest_cpuid to current CPU ensures
396*5076Smishra 	 * that there is only one active softlevel1 handler to
397*5076Smishra 	 * process softcall queues.
398*5076Smishra 	 *
399*5076Smishra 	 * Since softcall_lock lock is dropped before calling
400*5076Smishra 	 * func (callback), we need softcall_latest_cpuid
401*5076Smishra 	 * to prevent two softlevel1 hanlders working on the
402*5076Smishra 	 * queue when the first softlevel1 handler gets
403*5076Smishra 	 * stuck due to high interrupt load.
404*5076Smishra 	 */
405*5076Smishra 	softcall_latest_cpuid = cpu_id;
406*5076Smishra 
407*5076Smishra 	/* add ourself to the cpuset */
408*5076Smishra 	if (!CPU_IN_SET(*softcall_cpuset, cpu_id))
409*5076Smishra 		CPUSET_ADD(*softcall_cpuset, cpu_id);
410522Ssudheer 
4110Sstevel@tonic-gate 	for (;;) {
412*5076Smishra 		softcall_tick = lbolt;
4130Sstevel@tonic-gate 		if ((sc = softhead) != NULL) {
4140Sstevel@tonic-gate 			func = sc->sc_func;
4150Sstevel@tonic-gate 			arg = sc->sc_arg;
4160Sstevel@tonic-gate 			softhead = sc->sc_next;
4170Sstevel@tonic-gate 			sc->sc_next = softfree;
4180Sstevel@tonic-gate 			softfree = sc;
4190Sstevel@tonic-gate 		}
420*5076Smishra 
421522Ssudheer 		if (sc == NULL) {
422*5076Smishra 			if (CPU_IN_SET(*softcall_cpuset, cpu_id))
423*5076Smishra 				CPUSET_DEL(*softcall_cpuset, cpu_id);
424*5076Smishra 
425522Ssudheer 			softcall_state = SOFT_IDLE;
426*5076Smishra 			ASSERT(softcall_latest_cpuid == cpu_id);
427*5076Smishra 			softcall_latest_cpuid = -1;
428*5076Smishra 
429522Ssudheer 			mutex_exit(&softcall_lock);
4300Sstevel@tonic-gate 			break;
431522Ssudheer 		}
432*5076Smishra 
433522Ssudheer 		mutex_exit(&softcall_lock);
4340Sstevel@tonic-gate 		func(arg);
435522Ssudheer 		mutex_enter(&softcall_lock);
436*5076Smishra 
437*5076Smishra 		/*
438*5076Smishra 		 * No longer need softcall processing from current
439*5076Smishra 		 * interrupt handler because either
440*5076Smishra 		 *  (a) softcall is in SOFT_IDLE state or
441*5076Smishra 		 *  (b) There is a CPU already draining softcall
442*5076Smishra 		 *	queue and the current softlevel1 is no
443*5076Smishra 		 *	longer required.
444*5076Smishra 		 */
445*5076Smishra 		if (softcall_latest_cpuid != cpu_id) {
446*5076Smishra 			if (CPU_IN_SET(*softcall_cpuset, cpu_id))
447*5076Smishra 				CPUSET_DEL(*softcall_cpuset, cpu_id);
448*5076Smishra 
449*5076Smishra 			mutex_exit(&softcall_lock);
450*5076Smishra 			break;
451*5076Smishra 		}
4520Sstevel@tonic-gate 	}
453*5076Smishra 
454522Ssudheer out:
4550Sstevel@tonic-gate 	if ((func = kdi_softcall_func) != NULL) {
4560Sstevel@tonic-gate 		kdi_softcall_func = NULL;
4570Sstevel@tonic-gate 		func();
4580Sstevel@tonic-gate 	}
4590Sstevel@tonic-gate }
460