10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*4123Sdm120769 * Common Development and Distribution License, Version 1.0 only 6*4123Sdm120769 * (the "License"). You may not use this file except in compliance 7*4123Sdm120769 * with the License. 80Sstevel@tonic-gate * 90Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 100Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 110Sstevel@tonic-gate * See the License for the specific language governing permissions 120Sstevel@tonic-gate * and limitations under the License. 130Sstevel@tonic-gate * 140Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 150Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 160Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 170Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 180Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 190Sstevel@tonic-gate * 200Sstevel@tonic-gate * CDDL HEADER END 210Sstevel@tonic-gate */ 220Sstevel@tonic-gate /* 23*4123Sdm120769 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate #include <sys/callo.h> 300Sstevel@tonic-gate #include <sys/param.h> 310Sstevel@tonic-gate #include <sys/types.h> 320Sstevel@tonic-gate #include <sys/systm.h> 330Sstevel@tonic-gate #include <sys/cpuvar.h> 340Sstevel@tonic-gate #include <sys/thread.h> 350Sstevel@tonic-gate #include <sys/kmem.h> 360Sstevel@tonic-gate #include <sys/cmn_err.h> 370Sstevel@tonic-gate #include <sys/callb.h> 380Sstevel@tonic-gate #include <sys/debug.h> 390Sstevel@tonic-gate #include <sys/vtrace.h> 400Sstevel@tonic-gate #include <sys/sysmacros.h> 410Sstevel@tonic-gate #include <sys/sdt.h> 420Sstevel@tonic-gate 430Sstevel@tonic-gate /* 440Sstevel@tonic-gate * Callout tables. See timeout(9F) for details. 450Sstevel@tonic-gate */ 460Sstevel@tonic-gate static int cpr_stop_callout; 470Sstevel@tonic-gate static int callout_fanout; 480Sstevel@tonic-gate static int ncallout; 490Sstevel@tonic-gate static callout_table_t *callout_table[CALLOUT_TABLES]; 500Sstevel@tonic-gate 510Sstevel@tonic-gate #define CALLOUT_HASH_INSERT(cthead, cp, cnext, cprev) \ 520Sstevel@tonic-gate { \ 530Sstevel@tonic-gate callout_t **headpp = &cthead; \ 540Sstevel@tonic-gate callout_t *headp = *headpp; \ 550Sstevel@tonic-gate cp->cnext = headp; \ 560Sstevel@tonic-gate cp->cprev = NULL; \ 570Sstevel@tonic-gate if (headp != NULL) \ 580Sstevel@tonic-gate headp->cprev = cp; \ 590Sstevel@tonic-gate *headpp = cp; \ 600Sstevel@tonic-gate } 610Sstevel@tonic-gate 620Sstevel@tonic-gate #define CALLOUT_HASH_DELETE(cthead, cp, cnext, cprev) \ 630Sstevel@tonic-gate { \ 640Sstevel@tonic-gate callout_t *nextp = cp->cnext; \ 650Sstevel@tonic-gate callout_t *prevp = cp->cprev; \ 660Sstevel@tonic-gate if (nextp != NULL) \ 670Sstevel@tonic-gate nextp->cprev = prevp; \ 680Sstevel@tonic-gate if (prevp != NULL) \ 690Sstevel@tonic-gate prevp->cnext = nextp; \ 700Sstevel@tonic-gate else \ 710Sstevel@tonic-gate cthead = nextp; \ 720Sstevel@tonic-gate } 730Sstevel@tonic-gate 740Sstevel@tonic-gate #define CALLOUT_HASH_UPDATE(INSDEL, ct, cp, id, runtime) \ 750Sstevel@tonic-gate ASSERT(MUTEX_HELD(&ct->ct_lock)); \ 760Sstevel@tonic-gate ASSERT(cp->c_xid == id && cp->c_runtime == runtime); \ 770Sstevel@tonic-gate CALLOUT_HASH_##INSDEL(ct->ct_idhash[CALLOUT_IDHASH(id)], \ 780Sstevel@tonic-gate cp, c_idnext, c_idprev) \ 790Sstevel@tonic-gate CALLOUT_HASH_##INSDEL(ct->ct_lbhash[CALLOUT_LBHASH(runtime)], \ 800Sstevel@tonic-gate cp, c_lbnext, c_lbprev) 810Sstevel@tonic-gate 820Sstevel@tonic-gate /* 830Sstevel@tonic-gate * Allocate a callout structure. We try quite hard because we 840Sstevel@tonic-gate * can't sleep, and if we can't do the allocation, we're toast. 850Sstevel@tonic-gate * Failing all, we try a KM_PANIC allocation. 860Sstevel@tonic-gate */ 870Sstevel@tonic-gate static callout_t * 880Sstevel@tonic-gate callout_alloc(callout_table_t *ct) 890Sstevel@tonic-gate { 900Sstevel@tonic-gate size_t size = 0; 910Sstevel@tonic-gate callout_t *cp = NULL; 920Sstevel@tonic-gate 930Sstevel@tonic-gate mutex_exit(&ct->ct_lock); 940Sstevel@tonic-gate cp = kmem_alloc_tryhard(sizeof (callout_t), &size, 950Sstevel@tonic-gate KM_NOSLEEP | KM_PANIC); 960Sstevel@tonic-gate bzero(cp, sizeof (callout_t)); 970Sstevel@tonic-gate ncallout++; 980Sstevel@tonic-gate mutex_enter(&ct->ct_lock); 990Sstevel@tonic-gate return (cp); 1000Sstevel@tonic-gate } 1010Sstevel@tonic-gate 1020Sstevel@tonic-gate /* 1030Sstevel@tonic-gate * Arrange that func(arg) be called after delta clock ticks. 1040Sstevel@tonic-gate */ 1050Sstevel@tonic-gate static timeout_id_t 1060Sstevel@tonic-gate timeout_common(void (*func)(void *), void *arg, clock_t delta, 1070Sstevel@tonic-gate callout_table_t *ct) 1080Sstevel@tonic-gate { 109*4123Sdm120769 callout_t *cp; 110*4123Sdm120769 callout_id_t id; 111*4123Sdm120769 clock_t runtime; 1120Sstevel@tonic-gate 1130Sstevel@tonic-gate mutex_enter(&ct->ct_lock); 1140Sstevel@tonic-gate 1150Sstevel@tonic-gate if ((cp = ct->ct_freelist) == NULL) 1160Sstevel@tonic-gate cp = callout_alloc(ct); 1170Sstevel@tonic-gate else 1180Sstevel@tonic-gate ct->ct_freelist = cp->c_idnext; 1190Sstevel@tonic-gate 1200Sstevel@tonic-gate cp->c_func = func; 1210Sstevel@tonic-gate cp->c_arg = arg; 1220Sstevel@tonic-gate 1230Sstevel@tonic-gate /* 1240Sstevel@tonic-gate * Make sure the callout runs at least 1 tick in the future. 1250Sstevel@tonic-gate */ 1260Sstevel@tonic-gate if (delta <= 0) 1270Sstevel@tonic-gate delta = 1; 1280Sstevel@tonic-gate cp->c_runtime = runtime = lbolt + delta; 1290Sstevel@tonic-gate 1303783Sqiao /* 1310Sstevel@tonic-gate * Assign an ID to this callout 1320Sstevel@tonic-gate */ 1330Sstevel@tonic-gate if (delta > CALLOUT_LONGTERM_TICKS) 1340Sstevel@tonic-gate ct->ct_long_id = id = (ct->ct_long_id - CALLOUT_COUNTER_LOW) | 1350Sstevel@tonic-gate CALLOUT_COUNTER_HIGH; 1360Sstevel@tonic-gate else 1370Sstevel@tonic-gate ct->ct_short_id = id = (ct->ct_short_id - CALLOUT_COUNTER_LOW) | 1380Sstevel@tonic-gate CALLOUT_COUNTER_HIGH; 1390Sstevel@tonic-gate 1400Sstevel@tonic-gate cp->c_xid = id; 1410Sstevel@tonic-gate 1420Sstevel@tonic-gate CALLOUT_HASH_UPDATE(INSERT, ct, cp, id, runtime); 1430Sstevel@tonic-gate 1440Sstevel@tonic-gate mutex_exit(&ct->ct_lock); 1450Sstevel@tonic-gate 1460Sstevel@tonic-gate TRACE_4(TR_FAC_CALLOUT, TR_TIMEOUT, 1470Sstevel@tonic-gate "timeout:%K(%p) in %ld ticks, cp %p", 1480Sstevel@tonic-gate func, arg, delta, cp); 1490Sstevel@tonic-gate 1500Sstevel@tonic-gate return ((timeout_id_t)id); 1510Sstevel@tonic-gate } 1520Sstevel@tonic-gate 1530Sstevel@tonic-gate timeout_id_t 1540Sstevel@tonic-gate timeout(void (*func)(void *), void *arg, clock_t delta) 1550Sstevel@tonic-gate { 1560Sstevel@tonic-gate return (timeout_common(func, arg, delta, 1570Sstevel@tonic-gate callout_table[CALLOUT_TABLE(CALLOUT_NORMAL, CPU->cpu_seqid)])); 1580Sstevel@tonic-gate 1590Sstevel@tonic-gate } 1600Sstevel@tonic-gate 1610Sstevel@tonic-gate timeout_id_t 1620Sstevel@tonic-gate realtime_timeout(void (*func)(void *), void *arg, clock_t delta) 1630Sstevel@tonic-gate { 1640Sstevel@tonic-gate return (timeout_common(func, arg, delta, 1650Sstevel@tonic-gate callout_table[CALLOUT_TABLE(CALLOUT_REALTIME, CPU->cpu_seqid)])); 1660Sstevel@tonic-gate } 1670Sstevel@tonic-gate 1680Sstevel@tonic-gate clock_t 1690Sstevel@tonic-gate untimeout(timeout_id_t id_arg) 1700Sstevel@tonic-gate { 1710Sstevel@tonic-gate callout_id_t id = (callout_id_t)id_arg; 1720Sstevel@tonic-gate callout_table_t *ct; 1730Sstevel@tonic-gate callout_t *cp; 1740Sstevel@tonic-gate callout_id_t xid; 1750Sstevel@tonic-gate 1760Sstevel@tonic-gate ct = callout_table[id & CALLOUT_TABLE_MASK]; 1770Sstevel@tonic-gate 1780Sstevel@tonic-gate mutex_enter(&ct->ct_lock); 1790Sstevel@tonic-gate 1800Sstevel@tonic-gate for (cp = ct->ct_idhash[CALLOUT_IDHASH(id)]; cp; cp = cp->c_idnext) { 1810Sstevel@tonic-gate 1820Sstevel@tonic-gate if ((xid = cp->c_xid) == id) { 1830Sstevel@tonic-gate clock_t runtime = cp->c_runtime; 1840Sstevel@tonic-gate clock_t time_left = runtime - lbolt; 1850Sstevel@tonic-gate 1860Sstevel@tonic-gate CALLOUT_HASH_UPDATE(DELETE, ct, cp, id, runtime); 1870Sstevel@tonic-gate cp->c_idnext = ct->ct_freelist; 1880Sstevel@tonic-gate ct->ct_freelist = cp; 1890Sstevel@tonic-gate mutex_exit(&ct->ct_lock); 1900Sstevel@tonic-gate TRACE_2(TR_FAC_CALLOUT, TR_UNTIMEOUT, 1910Sstevel@tonic-gate "untimeout:ID %lx ticks_left %ld", id, time_left); 1920Sstevel@tonic-gate return (time_left < 0 ? 0 : time_left); 1930Sstevel@tonic-gate } 1940Sstevel@tonic-gate 1950Sstevel@tonic-gate if (xid != (id | CALLOUT_EXECUTING)) 1960Sstevel@tonic-gate continue; 1970Sstevel@tonic-gate 1980Sstevel@tonic-gate /* 1990Sstevel@tonic-gate * The callout we want to delete is currently executing. 2000Sstevel@tonic-gate * The DDI states that we must wait until the callout 2010Sstevel@tonic-gate * completes before returning, so we block on c_done until 2020Sstevel@tonic-gate * the callout ID changes (to zero if it's on the freelist, 2030Sstevel@tonic-gate * or to a new callout ID if it's in use). This implicitly 2040Sstevel@tonic-gate * assumes that callout structures are persistent (they are). 2050Sstevel@tonic-gate */ 2060Sstevel@tonic-gate if (cp->c_executor == curthread) { 2070Sstevel@tonic-gate /* 2080Sstevel@tonic-gate * The timeout handler called untimeout() on itself. 2090Sstevel@tonic-gate * Stupid, but legal. We can't wait for the timeout 2100Sstevel@tonic-gate * to complete without deadlocking, so we just return. 2110Sstevel@tonic-gate */ 2120Sstevel@tonic-gate mutex_exit(&ct->ct_lock); 2130Sstevel@tonic-gate TRACE_1(TR_FAC_CALLOUT, TR_UNTIMEOUT_SELF, 2140Sstevel@tonic-gate "untimeout_self:ID %x", id); 2150Sstevel@tonic-gate return (-1); 2160Sstevel@tonic-gate } 2170Sstevel@tonic-gate while (cp->c_xid == xid) 2180Sstevel@tonic-gate cv_wait(&cp->c_done, &ct->ct_lock); 2190Sstevel@tonic-gate mutex_exit(&ct->ct_lock); 2200Sstevel@tonic-gate TRACE_1(TR_FAC_CALLOUT, TR_UNTIMEOUT_EXECUTING, 2210Sstevel@tonic-gate "untimeout_executing:ID %lx", id); 2220Sstevel@tonic-gate return (-1); 2230Sstevel@tonic-gate } 2240Sstevel@tonic-gate 2250Sstevel@tonic-gate mutex_exit(&ct->ct_lock); 2260Sstevel@tonic-gate TRACE_1(TR_FAC_CALLOUT, TR_UNTIMEOUT_BOGUS_ID, 2270Sstevel@tonic-gate "untimeout_bogus_id:ID %lx", id); 2280Sstevel@tonic-gate 2290Sstevel@tonic-gate /* 2300Sstevel@tonic-gate * We didn't find the specified callout ID. This means either 2310Sstevel@tonic-gate * (1) the callout already fired, or (2) the caller passed us 2320Sstevel@tonic-gate * a bogus value. Perform a sanity check to detect case (2). 2330Sstevel@tonic-gate */ 2340Sstevel@tonic-gate if (id != 0 && (id & (CALLOUT_COUNTER_HIGH | CALLOUT_EXECUTING)) != 2350Sstevel@tonic-gate CALLOUT_COUNTER_HIGH) 2360Sstevel@tonic-gate panic("untimeout: impossible timeout id %lx", id); 2370Sstevel@tonic-gate 2380Sstevel@tonic-gate return (-1); 2390Sstevel@tonic-gate } 2400Sstevel@tonic-gate 2410Sstevel@tonic-gate /* 2420Sstevel@tonic-gate * Do the actual work of executing callouts. This routine is called either 2430Sstevel@tonic-gate * by a taskq_thread (normal case), or by softcall (realtime case). 2440Sstevel@tonic-gate */ 2450Sstevel@tonic-gate static void 2460Sstevel@tonic-gate callout_execute(callout_table_t *ct) 2470Sstevel@tonic-gate { 248*4123Sdm120769 callout_t *cp; 249*4123Sdm120769 callout_id_t xid; 250*4123Sdm120769 clock_t runtime; 2510Sstevel@tonic-gate 2520Sstevel@tonic-gate mutex_enter(&ct->ct_lock); 2530Sstevel@tonic-gate 2540Sstevel@tonic-gate while (((runtime = ct->ct_runtime) - ct->ct_curtime) <= 0) { 2550Sstevel@tonic-gate for (cp = ct->ct_lbhash[CALLOUT_LBHASH(runtime)]; 2560Sstevel@tonic-gate cp != NULL; cp = cp->c_lbnext) { 2570Sstevel@tonic-gate xid = cp->c_xid; 2580Sstevel@tonic-gate if (cp->c_runtime != runtime || 2590Sstevel@tonic-gate (xid & CALLOUT_EXECUTING)) 2600Sstevel@tonic-gate continue; 2610Sstevel@tonic-gate cp->c_executor = curthread; 2620Sstevel@tonic-gate cp->c_xid = xid |= CALLOUT_EXECUTING; 2630Sstevel@tonic-gate mutex_exit(&ct->ct_lock); 2640Sstevel@tonic-gate DTRACE_PROBE1(callout__start, callout_t *, cp); 2650Sstevel@tonic-gate (*cp->c_func)(cp->c_arg); 2660Sstevel@tonic-gate DTRACE_PROBE1(callout__end, callout_t *, cp); 2670Sstevel@tonic-gate mutex_enter(&ct->ct_lock); 2680Sstevel@tonic-gate 2690Sstevel@tonic-gate /* 270*4123Sdm120769 * Delete callout from hash tables, return to freelist, 271*4123Sdm120769 * and tell anyone who cares that we're done. 2720Sstevel@tonic-gate * Even though we dropped and reacquired ct->ct_lock, 2730Sstevel@tonic-gate * it's OK to pick up where we left off because only 2740Sstevel@tonic-gate * newly-created timeouts can precede cp on ct_lbhash, 2750Sstevel@tonic-gate * and those timeouts cannot be due on this tick. 2760Sstevel@tonic-gate */ 2770Sstevel@tonic-gate CALLOUT_HASH_UPDATE(DELETE, ct, cp, xid, runtime); 2780Sstevel@tonic-gate cp->c_idnext = ct->ct_freelist; 2790Sstevel@tonic-gate ct->ct_freelist = cp; 2800Sstevel@tonic-gate cp->c_xid = 0; /* Indicate completion for c_done */ 2810Sstevel@tonic-gate cv_broadcast(&cp->c_done); 2820Sstevel@tonic-gate } 2830Sstevel@tonic-gate /* 2840Sstevel@tonic-gate * We have completed all callouts that were scheduled to 2850Sstevel@tonic-gate * run at "runtime". If the global run time still matches 2860Sstevel@tonic-gate * our local copy, then we advance the global run time; 2870Sstevel@tonic-gate * otherwise, another callout thread must have already done so. 2880Sstevel@tonic-gate */ 2890Sstevel@tonic-gate if (ct->ct_runtime == runtime) 2900Sstevel@tonic-gate ct->ct_runtime = runtime + 1; 2910Sstevel@tonic-gate } 2920Sstevel@tonic-gate mutex_exit(&ct->ct_lock); 2930Sstevel@tonic-gate } 2940Sstevel@tonic-gate 2950Sstevel@tonic-gate /* 2960Sstevel@tonic-gate * Schedule any callouts that are due on or before this tick. 2970Sstevel@tonic-gate */ 2980Sstevel@tonic-gate static void 2990Sstevel@tonic-gate callout_schedule_1(callout_table_t *ct) 3000Sstevel@tonic-gate { 301*4123Sdm120769 callout_t *cp; 302*4123Sdm120769 clock_t curtime, runtime; 3030Sstevel@tonic-gate 3040Sstevel@tonic-gate mutex_enter(&ct->ct_lock); 3050Sstevel@tonic-gate ct->ct_curtime = curtime = lbolt; 3060Sstevel@tonic-gate while (((runtime = ct->ct_runtime) - curtime) <= 0) { 3070Sstevel@tonic-gate for (cp = ct->ct_lbhash[CALLOUT_LBHASH(runtime)]; 3080Sstevel@tonic-gate cp != NULL; cp = cp->c_lbnext) { 3090Sstevel@tonic-gate if (cp->c_runtime != runtime || 3100Sstevel@tonic-gate (cp->c_xid & CALLOUT_EXECUTING)) 3110Sstevel@tonic-gate continue; 3120Sstevel@tonic-gate mutex_exit(&ct->ct_lock); 3130Sstevel@tonic-gate if (ct->ct_taskq == NULL) 3140Sstevel@tonic-gate softcall((void (*)(void *))callout_execute, ct); 3150Sstevel@tonic-gate else 3160Sstevel@tonic-gate (void) taskq_dispatch(ct->ct_taskq, 3170Sstevel@tonic-gate (task_func_t *)callout_execute, ct, 3180Sstevel@tonic-gate KM_NOSLEEP); 3190Sstevel@tonic-gate return; 3200Sstevel@tonic-gate } 3210Sstevel@tonic-gate ct->ct_runtime++; 3220Sstevel@tonic-gate } 3230Sstevel@tonic-gate mutex_exit(&ct->ct_lock); 3240Sstevel@tonic-gate } 3250Sstevel@tonic-gate 3260Sstevel@tonic-gate /* 3270Sstevel@tonic-gate * Schedule callouts for all callout tables. Called by clock() on each tick. 3280Sstevel@tonic-gate */ 3290Sstevel@tonic-gate void 3300Sstevel@tonic-gate callout_schedule(void) 3310Sstevel@tonic-gate { 3320Sstevel@tonic-gate int f, t; 3330Sstevel@tonic-gate 3340Sstevel@tonic-gate if (cpr_stop_callout) 3350Sstevel@tonic-gate return; 3360Sstevel@tonic-gate 3370Sstevel@tonic-gate for (t = 0; t < CALLOUT_NTYPES; t++) 3380Sstevel@tonic-gate for (f = 0; f < callout_fanout; f++) 3390Sstevel@tonic-gate callout_schedule_1(callout_table[CALLOUT_TABLE(t, f)]); 3400Sstevel@tonic-gate } 3410Sstevel@tonic-gate 3420Sstevel@tonic-gate /* 3430Sstevel@tonic-gate * Callback handler used by CPR to stop and resume callouts. 3440Sstevel@tonic-gate */ 3450Sstevel@tonic-gate /*ARGSUSED*/ 3460Sstevel@tonic-gate static boolean_t 3470Sstevel@tonic-gate callout_cpr_callb(void *arg, int code) 3480Sstevel@tonic-gate { 3490Sstevel@tonic-gate cpr_stop_callout = (code == CB_CODE_CPR_CHKPT); 3500Sstevel@tonic-gate return (B_TRUE); 3510Sstevel@tonic-gate } 3520Sstevel@tonic-gate 3530Sstevel@tonic-gate /* 3540Sstevel@tonic-gate * Initialize all callout tables. Called at boot time just before clkstart(). 3550Sstevel@tonic-gate */ 3560Sstevel@tonic-gate void 3570Sstevel@tonic-gate callout_init(void) 3580Sstevel@tonic-gate { 3590Sstevel@tonic-gate int f, t; 3600Sstevel@tonic-gate int table_id; 3610Sstevel@tonic-gate callout_table_t *ct; 3620Sstevel@tonic-gate 3630Sstevel@tonic-gate callout_fanout = MIN(CALLOUT_FANOUT, max_ncpus); 3640Sstevel@tonic-gate 3650Sstevel@tonic-gate for (t = 0; t < CALLOUT_NTYPES; t++) { 3660Sstevel@tonic-gate for (f = 0; f < CALLOUT_FANOUT; f++) { 3670Sstevel@tonic-gate table_id = CALLOUT_TABLE(t, f); 3680Sstevel@tonic-gate if (f >= callout_fanout) { 3690Sstevel@tonic-gate callout_table[table_id] = 3700Sstevel@tonic-gate callout_table[table_id - callout_fanout]; 3710Sstevel@tonic-gate continue; 3720Sstevel@tonic-gate } 3730Sstevel@tonic-gate ct = kmem_zalloc(sizeof (callout_table_t), KM_SLEEP); 3740Sstevel@tonic-gate callout_table[table_id] = ct; 3750Sstevel@tonic-gate ct->ct_short_id = (callout_id_t)table_id | 3760Sstevel@tonic-gate CALLOUT_COUNTER_HIGH; 3770Sstevel@tonic-gate ct->ct_long_id = ct->ct_short_id | CALLOUT_LONGTERM; 3780Sstevel@tonic-gate ct->ct_curtime = ct->ct_runtime = lbolt; 3790Sstevel@tonic-gate if (t == CALLOUT_NORMAL) { 3800Sstevel@tonic-gate /* 3810Sstevel@tonic-gate * Each callout thread consumes exactly one 3820Sstevel@tonic-gate * task structure while active. Therefore, 3830Sstevel@tonic-gate * prepopulating with 2 * CALLOUT_THREADS tasks 3840Sstevel@tonic-gate * ensures that there's at least one task per 3850Sstevel@tonic-gate * thread that's either scheduled or on the 3860Sstevel@tonic-gate * freelist. In turn, this guarantees that 3870Sstevel@tonic-gate * taskq_dispatch() will always either succeed 3880Sstevel@tonic-gate * (because there's a free task structure) or 3890Sstevel@tonic-gate * be unnecessary (because "callout_excute(ct)" 3900Sstevel@tonic-gate * has already scheduled). 3910Sstevel@tonic-gate */ 3920Sstevel@tonic-gate ct->ct_taskq = 3930Sstevel@tonic-gate taskq_create_instance("callout_taskq", f, 3940Sstevel@tonic-gate CALLOUT_THREADS, maxclsyspri, 3950Sstevel@tonic-gate 2 * CALLOUT_THREADS, 2 * CALLOUT_THREADS, 3960Sstevel@tonic-gate TASKQ_PREPOPULATE | TASKQ_CPR_SAFE); 3970Sstevel@tonic-gate } 3980Sstevel@tonic-gate } 3990Sstevel@tonic-gate } 4000Sstevel@tonic-gate (void) callb_add(callout_cpr_callb, 0, CB_CL_CPR_CALLOUT, "callout"); 4010Sstevel@tonic-gate } 402