xref: /onnv-gate/usr/src/uts/common/os/callout.c (revision 0:68f95e015346)
1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate #include <sys/callo.h>
30*0Sstevel@tonic-gate #include <sys/param.h>
31*0Sstevel@tonic-gate #include <sys/types.h>
32*0Sstevel@tonic-gate #include <sys/systm.h>
33*0Sstevel@tonic-gate #include <sys/cpuvar.h>
34*0Sstevel@tonic-gate #include <sys/thread.h>
35*0Sstevel@tonic-gate #include <sys/kmem.h>
36*0Sstevel@tonic-gate #include <sys/cmn_err.h>
37*0Sstevel@tonic-gate #include <sys/callb.h>
38*0Sstevel@tonic-gate #include <sys/debug.h>
39*0Sstevel@tonic-gate #include <sys/vtrace.h>
40*0Sstevel@tonic-gate #include <sys/sysmacros.h>
41*0Sstevel@tonic-gate #include <sys/sdt.h>
42*0Sstevel@tonic-gate 
43*0Sstevel@tonic-gate /*
44*0Sstevel@tonic-gate  * Callout tables.  See timeout(9F) for details.
45*0Sstevel@tonic-gate  */
46*0Sstevel@tonic-gate static int cpr_stop_callout;
47*0Sstevel@tonic-gate static int callout_fanout;
48*0Sstevel@tonic-gate static int ncallout;
49*0Sstevel@tonic-gate static callout_table_t *callout_table[CALLOUT_TABLES];
50*0Sstevel@tonic-gate 
51*0Sstevel@tonic-gate #define	CALLOUT_HASH_INSERT(cthead, cp, cnext, cprev)	\
52*0Sstevel@tonic-gate {							\
53*0Sstevel@tonic-gate 	callout_t **headpp = &cthead;			\
54*0Sstevel@tonic-gate 	callout_t *headp = *headpp;			\
55*0Sstevel@tonic-gate 	cp->cnext = headp;				\
56*0Sstevel@tonic-gate 	cp->cprev = NULL;				\
57*0Sstevel@tonic-gate 	if (headp != NULL)				\
58*0Sstevel@tonic-gate 		headp->cprev = cp;			\
59*0Sstevel@tonic-gate 	*headpp = cp;					\
60*0Sstevel@tonic-gate }
61*0Sstevel@tonic-gate 
62*0Sstevel@tonic-gate #define	CALLOUT_HASH_DELETE(cthead, cp, cnext, cprev)	\
63*0Sstevel@tonic-gate {							\
64*0Sstevel@tonic-gate 	callout_t *nextp = cp->cnext;			\
65*0Sstevel@tonic-gate 	callout_t *prevp = cp->cprev;			\
66*0Sstevel@tonic-gate 	if (nextp != NULL)				\
67*0Sstevel@tonic-gate 		nextp->cprev = prevp;			\
68*0Sstevel@tonic-gate 	if (prevp != NULL)				\
69*0Sstevel@tonic-gate 		prevp->cnext = nextp;			\
70*0Sstevel@tonic-gate 	else						\
71*0Sstevel@tonic-gate 		cthead = nextp;				\
72*0Sstevel@tonic-gate }
73*0Sstevel@tonic-gate 
74*0Sstevel@tonic-gate #define	CALLOUT_HASH_UPDATE(INSDEL, ct, cp, id, runtime)		\
75*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ct->ct_lock));				\
76*0Sstevel@tonic-gate 	ASSERT(cp->c_xid == id && cp->c_runtime == runtime);		\
77*0Sstevel@tonic-gate 	CALLOUT_HASH_##INSDEL(ct->ct_idhash[CALLOUT_IDHASH(id)],	\
78*0Sstevel@tonic-gate 	cp, c_idnext, c_idprev)						\
79*0Sstevel@tonic-gate 	CALLOUT_HASH_##INSDEL(ct->ct_lbhash[CALLOUT_LBHASH(runtime)],	\
80*0Sstevel@tonic-gate 	cp, c_lbnext, c_lbprev)
81*0Sstevel@tonic-gate 
82*0Sstevel@tonic-gate /*
83*0Sstevel@tonic-gate  * Allocate a callout structure.  We try quite hard because we
84*0Sstevel@tonic-gate  * can't sleep, and if we can't do the allocation, we're toast.
85*0Sstevel@tonic-gate  * Failing all, we try a KM_PANIC allocation.
86*0Sstevel@tonic-gate  */
87*0Sstevel@tonic-gate static callout_t *
88*0Sstevel@tonic-gate callout_alloc(callout_table_t *ct)
89*0Sstevel@tonic-gate {
90*0Sstevel@tonic-gate 	size_t size = 0;
91*0Sstevel@tonic-gate 	callout_t *cp = NULL;
92*0Sstevel@tonic-gate 
93*0Sstevel@tonic-gate 	mutex_exit(&ct->ct_lock);
94*0Sstevel@tonic-gate 	cp = kmem_alloc_tryhard(sizeof (callout_t), &size,
95*0Sstevel@tonic-gate 	    KM_NOSLEEP | KM_PANIC);
96*0Sstevel@tonic-gate 	bzero(cp, sizeof (callout_t));
97*0Sstevel@tonic-gate 	ncallout++;
98*0Sstevel@tonic-gate 	mutex_enter(&ct->ct_lock);
99*0Sstevel@tonic-gate 	return (cp);
100*0Sstevel@tonic-gate }
101*0Sstevel@tonic-gate 
102*0Sstevel@tonic-gate /*
103*0Sstevel@tonic-gate  * Arrange that func(arg) be called after delta clock ticks.
104*0Sstevel@tonic-gate  */
105*0Sstevel@tonic-gate static timeout_id_t
106*0Sstevel@tonic-gate timeout_common(void (*func)(void *), void *arg, clock_t delta,
107*0Sstevel@tonic-gate     callout_table_t *ct)
108*0Sstevel@tonic-gate {
109*0Sstevel@tonic-gate 	callout_t *cp;
110*0Sstevel@tonic-gate 	callout_id_t id;
111*0Sstevel@tonic-gate 	clock_t runtime;
112*0Sstevel@tonic-gate 
113*0Sstevel@tonic-gate 	mutex_enter(&ct->ct_lock);
114*0Sstevel@tonic-gate 
115*0Sstevel@tonic-gate 	if ((cp = ct->ct_freelist) == NULL)
116*0Sstevel@tonic-gate 		cp = callout_alloc(ct);
117*0Sstevel@tonic-gate 	else
118*0Sstevel@tonic-gate 		ct->ct_freelist = cp->c_idnext;
119*0Sstevel@tonic-gate 
120*0Sstevel@tonic-gate 	cp->c_func = func;
121*0Sstevel@tonic-gate 	cp->c_arg = arg;
122*0Sstevel@tonic-gate 
123*0Sstevel@tonic-gate 	/*
124*0Sstevel@tonic-gate 	 * Make sure the callout runs at least 1 tick in the future.
125*0Sstevel@tonic-gate 	 */
126*0Sstevel@tonic-gate 	if (delta <= 0)
127*0Sstevel@tonic-gate 		delta = 1;
128*0Sstevel@tonic-gate 	cp->c_runtime = runtime = lbolt + delta;
129*0Sstevel@tonic-gate 
130*0Sstevel@tonic-gate 	/*
131*0Sstevel@tonic-gate 	 * Assign an ID to this callout
132*0Sstevel@tonic-gate 	 */
133*0Sstevel@tonic-gate 	if (delta > CALLOUT_LONGTERM_TICKS)
134*0Sstevel@tonic-gate 		ct->ct_long_id = id = (ct->ct_long_id - CALLOUT_COUNTER_LOW) |
135*0Sstevel@tonic-gate 		    CALLOUT_COUNTER_HIGH;
136*0Sstevel@tonic-gate 	else
137*0Sstevel@tonic-gate 		ct->ct_short_id = id = (ct->ct_short_id - CALLOUT_COUNTER_LOW) |
138*0Sstevel@tonic-gate 		    CALLOUT_COUNTER_HIGH;
139*0Sstevel@tonic-gate 
140*0Sstevel@tonic-gate 	cp->c_xid = id;
141*0Sstevel@tonic-gate 
142*0Sstevel@tonic-gate 	CALLOUT_HASH_UPDATE(INSERT, ct, cp, id, runtime);
143*0Sstevel@tonic-gate 
144*0Sstevel@tonic-gate 	mutex_exit(&ct->ct_lock);
145*0Sstevel@tonic-gate 
146*0Sstevel@tonic-gate 	TRACE_4(TR_FAC_CALLOUT, TR_TIMEOUT,
147*0Sstevel@tonic-gate 		"timeout:%K(%p) in %ld ticks, cp %p",
148*0Sstevel@tonic-gate 		func, arg, delta, cp);
149*0Sstevel@tonic-gate 
150*0Sstevel@tonic-gate 	return ((timeout_id_t)id);
151*0Sstevel@tonic-gate }
152*0Sstevel@tonic-gate 
153*0Sstevel@tonic-gate timeout_id_t
154*0Sstevel@tonic-gate timeout(void (*func)(void *), void *arg, clock_t delta)
155*0Sstevel@tonic-gate {
156*0Sstevel@tonic-gate 	return (timeout_common(func, arg, delta,
157*0Sstevel@tonic-gate 	    callout_table[CALLOUT_TABLE(CALLOUT_NORMAL, CPU->cpu_seqid)]));
158*0Sstevel@tonic-gate 
159*0Sstevel@tonic-gate }
160*0Sstevel@tonic-gate 
161*0Sstevel@tonic-gate timeout_id_t
162*0Sstevel@tonic-gate realtime_timeout(void (*func)(void *), void *arg, clock_t delta)
163*0Sstevel@tonic-gate {
164*0Sstevel@tonic-gate 	return (timeout_common(func, arg, delta,
165*0Sstevel@tonic-gate 	    callout_table[CALLOUT_TABLE(CALLOUT_REALTIME, CPU->cpu_seqid)]));
166*0Sstevel@tonic-gate }
167*0Sstevel@tonic-gate 
168*0Sstevel@tonic-gate clock_t
169*0Sstevel@tonic-gate untimeout(timeout_id_t id_arg)
170*0Sstevel@tonic-gate {
171*0Sstevel@tonic-gate 	callout_id_t id = (callout_id_t)id_arg;
172*0Sstevel@tonic-gate 	callout_table_t *ct;
173*0Sstevel@tonic-gate 	callout_t *cp;
174*0Sstevel@tonic-gate 	callout_id_t xid;
175*0Sstevel@tonic-gate 
176*0Sstevel@tonic-gate 	ct = callout_table[id & CALLOUT_TABLE_MASK];
177*0Sstevel@tonic-gate 
178*0Sstevel@tonic-gate 	mutex_enter(&ct->ct_lock);
179*0Sstevel@tonic-gate 
180*0Sstevel@tonic-gate 	for (cp = ct->ct_idhash[CALLOUT_IDHASH(id)]; cp; cp = cp->c_idnext) {
181*0Sstevel@tonic-gate 
182*0Sstevel@tonic-gate 		if ((xid = cp->c_xid) == id) {
183*0Sstevel@tonic-gate 			clock_t runtime = cp->c_runtime;
184*0Sstevel@tonic-gate 			clock_t time_left = runtime - lbolt;
185*0Sstevel@tonic-gate 
186*0Sstevel@tonic-gate 			CALLOUT_HASH_UPDATE(DELETE, ct, cp, id, runtime);
187*0Sstevel@tonic-gate 			cp->c_idnext = ct->ct_freelist;
188*0Sstevel@tonic-gate 			ct->ct_freelist = cp;
189*0Sstevel@tonic-gate 			mutex_exit(&ct->ct_lock);
190*0Sstevel@tonic-gate 			TRACE_2(TR_FAC_CALLOUT, TR_UNTIMEOUT,
191*0Sstevel@tonic-gate 			    "untimeout:ID %lx ticks_left %ld", id, time_left);
192*0Sstevel@tonic-gate 			return (time_left < 0 ? 0 : time_left);
193*0Sstevel@tonic-gate 		}
194*0Sstevel@tonic-gate 
195*0Sstevel@tonic-gate 		if (xid != (id | CALLOUT_EXECUTING))
196*0Sstevel@tonic-gate 			continue;
197*0Sstevel@tonic-gate 
198*0Sstevel@tonic-gate 		/*
199*0Sstevel@tonic-gate 		 * The callout we want to delete is currently executing.
200*0Sstevel@tonic-gate 		 * The DDI states that we must wait until the callout
201*0Sstevel@tonic-gate 		 * completes before returning, so we block on c_done until
202*0Sstevel@tonic-gate 		 * the callout ID changes (to zero if it's on the freelist,
203*0Sstevel@tonic-gate 		 * or to a new callout ID if it's in use).  This implicitly
204*0Sstevel@tonic-gate 		 * assumes that callout structures are persistent (they are).
205*0Sstevel@tonic-gate 		 */
206*0Sstevel@tonic-gate 		if (cp->c_executor == curthread) {
207*0Sstevel@tonic-gate 			/*
208*0Sstevel@tonic-gate 			 * The timeout handler called untimeout() on itself.
209*0Sstevel@tonic-gate 			 * Stupid, but legal.  We can't wait for the timeout
210*0Sstevel@tonic-gate 			 * to complete without deadlocking, so we just return.
211*0Sstevel@tonic-gate 			 */
212*0Sstevel@tonic-gate 			mutex_exit(&ct->ct_lock);
213*0Sstevel@tonic-gate 			TRACE_1(TR_FAC_CALLOUT, TR_UNTIMEOUT_SELF,
214*0Sstevel@tonic-gate 			    "untimeout_self:ID %x", id);
215*0Sstevel@tonic-gate 			return (-1);
216*0Sstevel@tonic-gate 		}
217*0Sstevel@tonic-gate 		while (cp->c_xid == xid)
218*0Sstevel@tonic-gate 			cv_wait(&cp->c_done, &ct->ct_lock);
219*0Sstevel@tonic-gate 		mutex_exit(&ct->ct_lock);
220*0Sstevel@tonic-gate 		TRACE_1(TR_FAC_CALLOUT, TR_UNTIMEOUT_EXECUTING,
221*0Sstevel@tonic-gate 		    "untimeout_executing:ID %lx", id);
222*0Sstevel@tonic-gate 		return (-1);
223*0Sstevel@tonic-gate 	}
224*0Sstevel@tonic-gate 
225*0Sstevel@tonic-gate 	mutex_exit(&ct->ct_lock);
226*0Sstevel@tonic-gate 	TRACE_1(TR_FAC_CALLOUT, TR_UNTIMEOUT_BOGUS_ID,
227*0Sstevel@tonic-gate 	    "untimeout_bogus_id:ID %lx", id);
228*0Sstevel@tonic-gate 
229*0Sstevel@tonic-gate 	/*
230*0Sstevel@tonic-gate 	 * We didn't find the specified callout ID.  This means either
231*0Sstevel@tonic-gate 	 * (1) the callout already fired, or (2) the caller passed us
232*0Sstevel@tonic-gate 	 * a bogus value.  Perform a sanity check to detect case (2).
233*0Sstevel@tonic-gate 	 */
234*0Sstevel@tonic-gate 	if (id != 0 && (id & (CALLOUT_COUNTER_HIGH | CALLOUT_EXECUTING)) !=
235*0Sstevel@tonic-gate 	    CALLOUT_COUNTER_HIGH)
236*0Sstevel@tonic-gate 		panic("untimeout: impossible timeout id %lx", id);
237*0Sstevel@tonic-gate 
238*0Sstevel@tonic-gate 	return (-1);
239*0Sstevel@tonic-gate }
240*0Sstevel@tonic-gate 
241*0Sstevel@tonic-gate /*
242*0Sstevel@tonic-gate  * Do the actual work of executing callouts.  This routine is called either
243*0Sstevel@tonic-gate  * by a taskq_thread (normal case), or by softcall (realtime case).
244*0Sstevel@tonic-gate  */
245*0Sstevel@tonic-gate static void
246*0Sstevel@tonic-gate callout_execute(callout_table_t *ct)
247*0Sstevel@tonic-gate {
248*0Sstevel@tonic-gate 	callout_t *cp;
249*0Sstevel@tonic-gate 	callout_id_t xid;
250*0Sstevel@tonic-gate 	clock_t runtime;
251*0Sstevel@tonic-gate 
252*0Sstevel@tonic-gate 	mutex_enter(&ct->ct_lock);
253*0Sstevel@tonic-gate 
254*0Sstevel@tonic-gate 	while (((runtime = ct->ct_runtime) - ct->ct_curtime) <= 0) {
255*0Sstevel@tonic-gate 		for (cp = ct->ct_lbhash[CALLOUT_LBHASH(runtime)];
256*0Sstevel@tonic-gate 		    cp != NULL; cp = cp->c_lbnext) {
257*0Sstevel@tonic-gate 			xid = cp->c_xid;
258*0Sstevel@tonic-gate 			if (cp->c_runtime != runtime ||
259*0Sstevel@tonic-gate 			    (xid & CALLOUT_EXECUTING))
260*0Sstevel@tonic-gate 				continue;
261*0Sstevel@tonic-gate 			cp->c_executor = curthread;
262*0Sstevel@tonic-gate 			cp->c_xid = xid |= CALLOUT_EXECUTING;
263*0Sstevel@tonic-gate 			mutex_exit(&ct->ct_lock);
264*0Sstevel@tonic-gate 			DTRACE_PROBE1(callout__start, callout_t *, cp);
265*0Sstevel@tonic-gate 			(*cp->c_func)(cp->c_arg);
266*0Sstevel@tonic-gate 			DTRACE_PROBE1(callout__end, callout_t *, cp);
267*0Sstevel@tonic-gate 			mutex_enter(&ct->ct_lock);
268*0Sstevel@tonic-gate 
269*0Sstevel@tonic-gate 			/*
270*0Sstevel@tonic-gate 			 * Delete callout from hash tables, return to freelist,
271*0Sstevel@tonic-gate 			 * and tell anyone who cares that we're done.
272*0Sstevel@tonic-gate 			 * Even though we dropped and reacquired ct->ct_lock,
273*0Sstevel@tonic-gate 			 * it's OK to pick up where we left off because only
274*0Sstevel@tonic-gate 			 * newly-created timeouts can precede cp on ct_lbhash,
275*0Sstevel@tonic-gate 			 * and those timeouts cannot be due on this tick.
276*0Sstevel@tonic-gate 			 */
277*0Sstevel@tonic-gate 			CALLOUT_HASH_UPDATE(DELETE, ct, cp, xid, runtime);
278*0Sstevel@tonic-gate 			cp->c_idnext = ct->ct_freelist;
279*0Sstevel@tonic-gate 			ct->ct_freelist = cp;
280*0Sstevel@tonic-gate 			cp->c_xid = 0;	/* Indicate completion for c_done */
281*0Sstevel@tonic-gate 			cv_broadcast(&cp->c_done);
282*0Sstevel@tonic-gate 		}
283*0Sstevel@tonic-gate 		/*
284*0Sstevel@tonic-gate 		 * We have completed all callouts that were scheduled to
285*0Sstevel@tonic-gate 		 * run at "runtime".  If the global run time still matches
286*0Sstevel@tonic-gate 		 * our local copy, then we advance the global run time;
287*0Sstevel@tonic-gate 		 * otherwise, another callout thread must have already done so.
288*0Sstevel@tonic-gate 		 */
289*0Sstevel@tonic-gate 		if (ct->ct_runtime == runtime)
290*0Sstevel@tonic-gate 			ct->ct_runtime = runtime + 1;
291*0Sstevel@tonic-gate 	}
292*0Sstevel@tonic-gate 	mutex_exit(&ct->ct_lock);
293*0Sstevel@tonic-gate }
294*0Sstevel@tonic-gate 
295*0Sstevel@tonic-gate /*
296*0Sstevel@tonic-gate  * Schedule any callouts that are due on or before this tick.
297*0Sstevel@tonic-gate  */
298*0Sstevel@tonic-gate static void
299*0Sstevel@tonic-gate callout_schedule_1(callout_table_t *ct)
300*0Sstevel@tonic-gate {
301*0Sstevel@tonic-gate 	callout_t *cp;
302*0Sstevel@tonic-gate 	clock_t curtime, runtime;
303*0Sstevel@tonic-gate 
304*0Sstevel@tonic-gate 	mutex_enter(&ct->ct_lock);
305*0Sstevel@tonic-gate 	ct->ct_curtime = curtime = lbolt;
306*0Sstevel@tonic-gate 	while (((runtime = ct->ct_runtime) - curtime) <= 0) {
307*0Sstevel@tonic-gate 		for (cp = ct->ct_lbhash[CALLOUT_LBHASH(runtime)];
308*0Sstevel@tonic-gate 		    cp != NULL; cp = cp->c_lbnext) {
309*0Sstevel@tonic-gate 			if (cp->c_runtime != runtime ||
310*0Sstevel@tonic-gate 			    (cp->c_xid & CALLOUT_EXECUTING))
311*0Sstevel@tonic-gate 				continue;
312*0Sstevel@tonic-gate 			mutex_exit(&ct->ct_lock);
313*0Sstevel@tonic-gate 			if (ct->ct_taskq == NULL)
314*0Sstevel@tonic-gate 				softcall((void (*)(void *))callout_execute, ct);
315*0Sstevel@tonic-gate 			else
316*0Sstevel@tonic-gate 				(void) taskq_dispatch(ct->ct_taskq,
317*0Sstevel@tonic-gate 				    (task_func_t *)callout_execute, ct,
318*0Sstevel@tonic-gate 				    KM_NOSLEEP);
319*0Sstevel@tonic-gate 			return;
320*0Sstevel@tonic-gate 		}
321*0Sstevel@tonic-gate 		ct->ct_runtime++;
322*0Sstevel@tonic-gate 	}
323*0Sstevel@tonic-gate 	mutex_exit(&ct->ct_lock);
324*0Sstevel@tonic-gate }
325*0Sstevel@tonic-gate 
326*0Sstevel@tonic-gate /*
327*0Sstevel@tonic-gate  * Schedule callouts for all callout tables.  Called by clock() on each tick.
328*0Sstevel@tonic-gate  */
329*0Sstevel@tonic-gate void
330*0Sstevel@tonic-gate callout_schedule(void)
331*0Sstevel@tonic-gate {
332*0Sstevel@tonic-gate 	int f, t;
333*0Sstevel@tonic-gate 
334*0Sstevel@tonic-gate 	if (cpr_stop_callout)
335*0Sstevel@tonic-gate 		return;
336*0Sstevel@tonic-gate 
337*0Sstevel@tonic-gate 	for (t = 0; t < CALLOUT_NTYPES; t++)
338*0Sstevel@tonic-gate 		for (f = 0; f < callout_fanout; f++)
339*0Sstevel@tonic-gate 			callout_schedule_1(callout_table[CALLOUT_TABLE(t, f)]);
340*0Sstevel@tonic-gate }
341*0Sstevel@tonic-gate 
342*0Sstevel@tonic-gate /*
343*0Sstevel@tonic-gate  * Callback handler used by CPR to stop and resume callouts.
344*0Sstevel@tonic-gate  */
345*0Sstevel@tonic-gate /*ARGSUSED*/
346*0Sstevel@tonic-gate static boolean_t
347*0Sstevel@tonic-gate callout_cpr_callb(void *arg, int code)
348*0Sstevel@tonic-gate {
349*0Sstevel@tonic-gate 	cpr_stop_callout = (code == CB_CODE_CPR_CHKPT);
350*0Sstevel@tonic-gate 	return (B_TRUE);
351*0Sstevel@tonic-gate }
352*0Sstevel@tonic-gate 
353*0Sstevel@tonic-gate /*
354*0Sstevel@tonic-gate  * Initialize all callout tables.  Called at boot time just before clkstart().
355*0Sstevel@tonic-gate  */
356*0Sstevel@tonic-gate void
357*0Sstevel@tonic-gate callout_init(void)
358*0Sstevel@tonic-gate {
359*0Sstevel@tonic-gate 	int f, t;
360*0Sstevel@tonic-gate 	int table_id;
361*0Sstevel@tonic-gate 	callout_table_t *ct;
362*0Sstevel@tonic-gate 
363*0Sstevel@tonic-gate 	callout_fanout = MIN(CALLOUT_FANOUT, max_ncpus);
364*0Sstevel@tonic-gate 
365*0Sstevel@tonic-gate 	for (t = 0; t < CALLOUT_NTYPES; t++) {
366*0Sstevel@tonic-gate 		for (f = 0; f < CALLOUT_FANOUT; f++) {
367*0Sstevel@tonic-gate 			table_id = CALLOUT_TABLE(t, f);
368*0Sstevel@tonic-gate 			if (f >= callout_fanout) {
369*0Sstevel@tonic-gate 				callout_table[table_id] =
370*0Sstevel@tonic-gate 				    callout_table[table_id - callout_fanout];
371*0Sstevel@tonic-gate 				continue;
372*0Sstevel@tonic-gate 			}
373*0Sstevel@tonic-gate 			ct = kmem_zalloc(sizeof (callout_table_t), KM_SLEEP);
374*0Sstevel@tonic-gate 			callout_table[table_id] = ct;
375*0Sstevel@tonic-gate 			ct->ct_short_id = (callout_id_t)table_id |
376*0Sstevel@tonic-gate 			    CALLOUT_COUNTER_HIGH;
377*0Sstevel@tonic-gate 			ct->ct_long_id = ct->ct_short_id | CALLOUT_LONGTERM;
378*0Sstevel@tonic-gate 			ct->ct_curtime = ct->ct_runtime = lbolt;
379*0Sstevel@tonic-gate 			if (t == CALLOUT_NORMAL) {
380*0Sstevel@tonic-gate 				/*
381*0Sstevel@tonic-gate 				 * Each callout thread consumes exactly one
382*0Sstevel@tonic-gate 				 * task structure while active.  Therefore,
383*0Sstevel@tonic-gate 				 * prepopulating with 2 * CALLOUT_THREADS tasks
384*0Sstevel@tonic-gate 				 * ensures that there's at least one task per
385*0Sstevel@tonic-gate 				 * thread that's either scheduled or on the
386*0Sstevel@tonic-gate 				 * freelist.  In turn, this guarantees that
387*0Sstevel@tonic-gate 				 * taskq_dispatch() will always either succeed
388*0Sstevel@tonic-gate 				 * (because there's a free task structure) or
389*0Sstevel@tonic-gate 				 * be unnecessary (because "callout_excute(ct)"
390*0Sstevel@tonic-gate 				 * has already scheduled).
391*0Sstevel@tonic-gate 				 */
392*0Sstevel@tonic-gate 				ct->ct_taskq =
393*0Sstevel@tonic-gate 				    taskq_create_instance("callout_taskq", f,
394*0Sstevel@tonic-gate 				    CALLOUT_THREADS, maxclsyspri,
395*0Sstevel@tonic-gate 				    2 * CALLOUT_THREADS, 2 * CALLOUT_THREADS,
396*0Sstevel@tonic-gate 				    TASKQ_PREPOPULATE | TASKQ_CPR_SAFE);
397*0Sstevel@tonic-gate 			}
398*0Sstevel@tonic-gate 		}
399*0Sstevel@tonic-gate 	}
400*0Sstevel@tonic-gate 	(void) callb_add(callout_cpr_callb, 0, CB_CL_CPR_CALLOUT, "callout");
401*0Sstevel@tonic-gate }
402