1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate /* 30*0Sstevel@tonic-gate * Squeues - TCP/IP serialization mechanism. 31*0Sstevel@tonic-gate * 32*0Sstevel@tonic-gate * This is a general purpose high-performance serialization mechanism. It is 33*0Sstevel@tonic-gate * similar to a taskq with a single worker thread, the difference is that it 34*0Sstevel@tonic-gate * does not imply a context switch - the thread placing a request may actually 35*0Sstevel@tonic-gate * process it. It is also biased for processing requests in interrupt context. 36*0Sstevel@tonic-gate * 37*0Sstevel@tonic-gate * Each squeue has a worker thread which may optionally be bound to a CPU. 38*0Sstevel@tonic-gate * 39*0Sstevel@tonic-gate * Only one thread may process requests from a given squeue at any time. This is 40*0Sstevel@tonic-gate * called "entering" squeue. 41*0Sstevel@tonic-gate * 42*0Sstevel@tonic-gate * Each dispatched request is processed either by 43*0Sstevel@tonic-gate * 44*0Sstevel@tonic-gate * a) Dispatching thread or 45*0Sstevel@tonic-gate * b) Some other thread that is currently processing squeue at the time of 46*0Sstevel@tonic-gate * request or 47*0Sstevel@tonic-gate * c) worker thread. 48*0Sstevel@tonic-gate * 49*0Sstevel@tonic-gate * INTERFACES: 50*0Sstevel@tonic-gate * 51*0Sstevel@tonic-gate * squeue_t *squeue_create(name, bind, wait, pri) 52*0Sstevel@tonic-gate * 53*0Sstevel@tonic-gate * name: symbolic name for squeue. 54*0Sstevel@tonic-gate * wait: time to wait before waiking the worker thread after queueing 55*0Sstevel@tonic-gate * request. 56*0Sstevel@tonic-gate * bind: preferred CPU binding for the worker thread. 57*0Sstevel@tonic-gate * pri: thread priority for the worker thread. 58*0Sstevel@tonic-gate * 59*0Sstevel@tonic-gate * This function never fails and may sleep. It returns a transparent pointer 60*0Sstevel@tonic-gate * to the squeue_t structure that is passed to all other squeue operations. 61*0Sstevel@tonic-gate * 62*0Sstevel@tonic-gate * void squeue_bind(sqp, bind) 63*0Sstevel@tonic-gate * 64*0Sstevel@tonic-gate * Bind squeue worker thread to a CPU specified by the 'bind' argument. The 65*0Sstevel@tonic-gate * 'bind' value of -1 binds to the preferred thread specified for 66*0Sstevel@tonic-gate * squeue_create. 67*0Sstevel@tonic-gate * 68*0Sstevel@tonic-gate * NOTE: Any value of 'bind' other then -1 is not supported currently, but the 69*0Sstevel@tonic-gate * API is present - in the future it may be useful to specify different 70*0Sstevel@tonic-gate * binding. 71*0Sstevel@tonic-gate * 72*0Sstevel@tonic-gate * void squeue_unbind(sqp) 73*0Sstevel@tonic-gate * 74*0Sstevel@tonic-gate * Unbind the worker thread from its preferred CPU. 75*0Sstevel@tonic-gate * 76*0Sstevel@tonic-gate * void squeue_enter(*sqp, *mp, proc, arg, tag) 77*0Sstevel@tonic-gate * 78*0Sstevel@tonic-gate * Post a single request for processing. Each request consists of mblock 'mp', 79*0Sstevel@tonic-gate * function 'proc' to execute and an argument 'arg' to pass to this 80*0Sstevel@tonic-gate * function. The function is called as (*proc)(arg, mp, sqp); The tag is an 81*0Sstevel@tonic-gate * arbitrary number from 0 to 255 which will be stored in mp to track exact 82*0Sstevel@tonic-gate * caller of squeue_enter. The combination of function name and the tag should 83*0Sstevel@tonic-gate * provide enough information to identify the caller. 84*0Sstevel@tonic-gate * 85*0Sstevel@tonic-gate * If no one is processing the squeue, squeue_enter() will call the function 86*0Sstevel@tonic-gate * immediately. Otherwise it will add the request to the queue for later 87*0Sstevel@tonic-gate * processing. Once the function is executed, the thread may continue 88*0Sstevel@tonic-gate * executing all other requests pending on the queue. 89*0Sstevel@tonic-gate * 90*0Sstevel@tonic-gate * NOTE: The tagging information is only used when SQUEUE_DEBUG is set to 1. 91*0Sstevel@tonic-gate * NOTE: The argument can be conn_t only. Ideally we'd like to have generic 92*0Sstevel@tonic-gate * argument, but we want to drop connection reference count here - this 93*0Sstevel@tonic-gate * improves tail-call optimizations. 94*0Sstevel@tonic-gate * XXX: The arg should have type conn_t. 95*0Sstevel@tonic-gate * 96*0Sstevel@tonic-gate * void squeue_enter_nodrain(*sqp, *mp, proc, arg, tag) 97*0Sstevel@tonic-gate * 98*0Sstevel@tonic-gate * Same as squeue_enter(), but the entering thread will only try to execute a 99*0Sstevel@tonic-gate * single request. It will not continue executing any pending requests. 100*0Sstevel@tonic-gate * 101*0Sstevel@tonic-gate * void squeue_fill(*sqp, *mp, proc, arg, tag) 102*0Sstevel@tonic-gate * 103*0Sstevel@tonic-gate * Just place the request on the queue without trying to execute it. Arrange 104*0Sstevel@tonic-gate * for the worker thread to process the request. 105*0Sstevel@tonic-gate * 106*0Sstevel@tonic-gate * void squeue_profile_enable(sqp) 107*0Sstevel@tonic-gate * void squeue_profile_disable(sqp) 108*0Sstevel@tonic-gate * 109*0Sstevel@tonic-gate * Enable or disable profiling for specified 'sqp'. Profiling is only 110*0Sstevel@tonic-gate * available when SQUEUE_PROFILE is set. 111*0Sstevel@tonic-gate * 112*0Sstevel@tonic-gate * void squeue_profile_reset(sqp) 113*0Sstevel@tonic-gate * 114*0Sstevel@tonic-gate * Reset all profiling information to zero. Profiling is only 115*0Sstevel@tonic-gate * available when SQUEUE_PROFILE is set. 116*0Sstevel@tonic-gate * 117*0Sstevel@tonic-gate * void squeue_profile_start() 118*0Sstevel@tonic-gate * void squeue_profile_stop() 119*0Sstevel@tonic-gate * 120*0Sstevel@tonic-gate * Globally enable or disabled profiling for all squeues. 121*0Sstevel@tonic-gate * 122*0Sstevel@tonic-gate * uintptr_t *squeue_getprivate(sqp, p) 123*0Sstevel@tonic-gate * 124*0Sstevel@tonic-gate * Each squeue keeps small amount of private data space available for various 125*0Sstevel@tonic-gate * consumers. Current consumers include TCP and NCA. Other consumers need to 126*0Sstevel@tonic-gate * add their private tag to the sqprivate_t enum. The private information is 127*0Sstevel@tonic-gate * limited to an uintptr_t value. The squeue has no knowledge of its content 128*0Sstevel@tonic-gate * and does not manage it in any way. 129*0Sstevel@tonic-gate * 130*0Sstevel@tonic-gate * The typical use may be a breakdown of data structures per CPU (since 131*0Sstevel@tonic-gate * squeues are usually per CPU). See NCA for examples of use. 132*0Sstevel@tonic-gate * Currently 'p' may have one legal value SQPRIVATE_TCP. 133*0Sstevel@tonic-gate * 134*0Sstevel@tonic-gate * processorid_t squeue_binding(sqp) 135*0Sstevel@tonic-gate * 136*0Sstevel@tonic-gate * Returns the CPU binding for a given squeue. 137*0Sstevel@tonic-gate * 138*0Sstevel@tonic-gate * TUNABALES: 139*0Sstevel@tonic-gate * 140*0Sstevel@tonic-gate * squeue_intrdrain_ms: Maximum time in ms interrupts spend draining any 141*0Sstevel@tonic-gate * squeue. Note that this is approximation - squeues have no control on the 142*0Sstevel@tonic-gate * time it takes to process each request. This limit is only checked 143*0Sstevel@tonic-gate * between processing individual messages. 144*0Sstevel@tonic-gate * Default: 20 ms. 145*0Sstevel@tonic-gate * 146*0Sstevel@tonic-gate * squeue_writerdrain_ms: Maximum time in ms non-interrupts spend draining any 147*0Sstevel@tonic-gate * squeue. Note that this is approximation - squeues have no control on the 148*0Sstevel@tonic-gate * time it takes to process each request. This limit is only checked 149*0Sstevel@tonic-gate * between processing individual messages. 150*0Sstevel@tonic-gate * Default: 10 ms. 151*0Sstevel@tonic-gate * 152*0Sstevel@tonic-gate * squeue_workerdrain_ms: Maximum time in ms worker thread spends draining any 153*0Sstevel@tonic-gate * squeue. Note that this is approximation - squeues have no control on the 154*0Sstevel@tonic-gate * time it takes to process each request. This limit is only checked 155*0Sstevel@tonic-gate * between processing individual messages. 156*0Sstevel@tonic-gate * Default: 10 ms. 157*0Sstevel@tonic-gate * 158*0Sstevel@tonic-gate * squeue_workerwait_ms: When worker thread is interrupted because workerdrain 159*0Sstevel@tonic-gate * expired, how much time to wait before waking worker thread again. 160*0Sstevel@tonic-gate * Default: 10 ms. 161*0Sstevel@tonic-gate * 162*0Sstevel@tonic-gate * DEFINES: 163*0Sstevel@tonic-gate * 164*0Sstevel@tonic-gate * SQUEUE_DEBUG: If defined as 1, special code is compiled in which records 165*0Sstevel@tonic-gate * additional information aiding debugging is recorded in squeue. 166*0Sstevel@tonic-gate * 167*0Sstevel@tonic-gate * SQUEUE_PROFILE: If defined as 1, special code is compiled in which collects 168*0Sstevel@tonic-gate * various squeue statistics and exports them as kstats. 169*0Sstevel@tonic-gate * 170*0Sstevel@tonic-gate * Ideally we would like both SQUEUE_DEBUG and SQUEUE_PROFILE to be always set, 171*0Sstevel@tonic-gate * but it affects performance, so they are enabled on DEBUG kernels and disabled 172*0Sstevel@tonic-gate * on non-DEBUG by default. 173*0Sstevel@tonic-gate */ 174*0Sstevel@tonic-gate 175*0Sstevel@tonic-gate #include <sys/types.h> 176*0Sstevel@tonic-gate #include <sys/cmn_err.h> 177*0Sstevel@tonic-gate #include <sys/debug.h> 178*0Sstevel@tonic-gate #include <sys/kmem.h> 179*0Sstevel@tonic-gate #include <sys/cpuvar.h> 180*0Sstevel@tonic-gate #include <sys/condvar_impl.h> 181*0Sstevel@tonic-gate #include <sys/systm.h> 182*0Sstevel@tonic-gate #include <sys/callb.h> 183*0Sstevel@tonic-gate #include <sys/sdt.h> 184*0Sstevel@tonic-gate #include <sys/ddi.h> 185*0Sstevel@tonic-gate 186*0Sstevel@tonic-gate #include <inet/ipclassifier.h> 187*0Sstevel@tonic-gate 188*0Sstevel@tonic-gate /* 189*0Sstevel@tonic-gate * State flags. 190*0Sstevel@tonic-gate * Note: The MDB IP module depends on the values of these flags. 191*0Sstevel@tonic-gate */ 192*0Sstevel@tonic-gate #define SQS_PROC 0x0001 /* being processed */ 193*0Sstevel@tonic-gate #define SQS_WORKER 0x0002 /* worker thread */ 194*0Sstevel@tonic-gate #define SQS_ENTER 0x0004 /* enter thread */ 195*0Sstevel@tonic-gate #define SQS_FAST 0x0008 /* enter-fast thread */ 196*0Sstevel@tonic-gate #define SQS_USER 0x0010 /* A non interrupt user */ 197*0Sstevel@tonic-gate #define SQS_BOUND 0x0020 /* Worker thread is bound */ 198*0Sstevel@tonic-gate #define SQS_PROFILE 0x0040 /* Enable profiling */ 199*0Sstevel@tonic-gate #define SQS_REENTER 0x0080 /* Re entered thread */ 200*0Sstevel@tonic-gate #define SQS_TMO_PROG 0x0100 /* Timeout is being set */ 201*0Sstevel@tonic-gate 202*0Sstevel@tonic-gate #ifdef DEBUG 203*0Sstevel@tonic-gate #define SQUEUE_DEBUG 1 204*0Sstevel@tonic-gate #define SQUEUE_PROFILE 1 205*0Sstevel@tonic-gate #else 206*0Sstevel@tonic-gate #define SQUEUE_DEBUG 0 207*0Sstevel@tonic-gate #define SQUEUE_PROFILE 0 208*0Sstevel@tonic-gate #endif 209*0Sstevel@tonic-gate 210*0Sstevel@tonic-gate #include <sys/squeue_impl.h> 211*0Sstevel@tonic-gate 212*0Sstevel@tonic-gate static void squeue_fire(void *); 213*0Sstevel@tonic-gate static void squeue_drain(squeue_t *, uint_t, clock_t); 214*0Sstevel@tonic-gate static void squeue_worker(squeue_t *sqp); 215*0Sstevel@tonic-gate 216*0Sstevel@tonic-gate #if SQUEUE_PROFILE 217*0Sstevel@tonic-gate static kmutex_t squeue_kstat_lock; 218*0Sstevel@tonic-gate static int squeue_kstat_update(kstat_t *, int); 219*0Sstevel@tonic-gate #endif 220*0Sstevel@tonic-gate 221*0Sstevel@tonic-gate kmem_cache_t *squeue_cache; 222*0Sstevel@tonic-gate 223*0Sstevel@tonic-gate int squeue_intrdrain_ms = 20; 224*0Sstevel@tonic-gate int squeue_writerdrain_ms = 10; 225*0Sstevel@tonic-gate int squeue_workerdrain_ms = 10; 226*0Sstevel@tonic-gate int squeue_workerwait_ms = 10; 227*0Sstevel@tonic-gate 228*0Sstevel@tonic-gate /* The values above converted to ticks */ 229*0Sstevel@tonic-gate static int squeue_intrdrain_tick = 0; 230*0Sstevel@tonic-gate static int squeue_writerdrain_tick = 0; 231*0Sstevel@tonic-gate static int squeue_workerdrain_tick = 0; 232*0Sstevel@tonic-gate static int squeue_workerwait_tick = 0; 233*0Sstevel@tonic-gate 234*0Sstevel@tonic-gate /* 235*0Sstevel@tonic-gate * The minimum packet queued when worker thread doing the drain triggers 236*0Sstevel@tonic-gate * polling (if squeue allows it). The choice of 3 is arbitrary. You 237*0Sstevel@tonic-gate * definitely don't want it to be 1 since that will trigger polling 238*0Sstevel@tonic-gate * on very low loads as well (ssh seems to do be one such example 239*0Sstevel@tonic-gate * where packet flow was very low yet somehow 1 packet ended up getting 240*0Sstevel@tonic-gate * queued and worker thread fires every 10ms and blanking also gets 241*0Sstevel@tonic-gate * triggered. 242*0Sstevel@tonic-gate */ 243*0Sstevel@tonic-gate int squeue_worker_poll_min = 3; 244*0Sstevel@tonic-gate 245*0Sstevel@tonic-gate #if SQUEUE_PROFILE 246*0Sstevel@tonic-gate /* 247*0Sstevel@tonic-gate * Set to B_TRUE to enable profiling. 248*0Sstevel@tonic-gate */ 249*0Sstevel@tonic-gate static int squeue_profile = B_FALSE; 250*0Sstevel@tonic-gate #define SQ_PROFILING(sqp) (squeue_profile && ((sqp)->sq_state & SQS_PROFILE)) 251*0Sstevel@tonic-gate 252*0Sstevel@tonic-gate #define SQSTAT(sqp, x) ((sqp)->sq_stats.x++) 253*0Sstevel@tonic-gate #define SQDELTA(sqp, x, d) ((sqp)->sq_stats.x += (d)) 254*0Sstevel@tonic-gate 255*0Sstevel@tonic-gate struct squeue_kstat { 256*0Sstevel@tonic-gate kstat_named_t sq_count; 257*0Sstevel@tonic-gate kstat_named_t sq_max_qlen; 258*0Sstevel@tonic-gate kstat_named_t sq_npackets_worker; 259*0Sstevel@tonic-gate kstat_named_t sq_npackets_intr; 260*0Sstevel@tonic-gate kstat_named_t sq_npackets_other; 261*0Sstevel@tonic-gate kstat_named_t sq_nqueued_intr; 262*0Sstevel@tonic-gate kstat_named_t sq_nqueued_other; 263*0Sstevel@tonic-gate kstat_named_t sq_ndrains_worker; 264*0Sstevel@tonic-gate kstat_named_t sq_ndrains_intr; 265*0Sstevel@tonic-gate kstat_named_t sq_ndrains_other; 266*0Sstevel@tonic-gate kstat_named_t sq_time_worker; 267*0Sstevel@tonic-gate kstat_named_t sq_time_intr; 268*0Sstevel@tonic-gate kstat_named_t sq_time_other; 269*0Sstevel@tonic-gate } squeue_kstat = { 270*0Sstevel@tonic-gate { "count", KSTAT_DATA_UINT64 }, 271*0Sstevel@tonic-gate { "max_qlen", KSTAT_DATA_UINT64 }, 272*0Sstevel@tonic-gate { "packets_worker", KSTAT_DATA_UINT64 }, 273*0Sstevel@tonic-gate { "packets_intr", KSTAT_DATA_UINT64 }, 274*0Sstevel@tonic-gate { "packets_other", KSTAT_DATA_UINT64 }, 275*0Sstevel@tonic-gate { "queued_intr", KSTAT_DATA_UINT64 }, 276*0Sstevel@tonic-gate { "queued_other", KSTAT_DATA_UINT64 }, 277*0Sstevel@tonic-gate { "ndrains_worker", KSTAT_DATA_UINT64 }, 278*0Sstevel@tonic-gate { "ndrains_intr", KSTAT_DATA_UINT64 }, 279*0Sstevel@tonic-gate { "ndrains_other", KSTAT_DATA_UINT64 }, 280*0Sstevel@tonic-gate { "time_worker", KSTAT_DATA_UINT64 }, 281*0Sstevel@tonic-gate { "time_intr", KSTAT_DATA_UINT64 }, 282*0Sstevel@tonic-gate { "time_other", KSTAT_DATA_UINT64 }, 283*0Sstevel@tonic-gate }; 284*0Sstevel@tonic-gate #endif 285*0Sstevel@tonic-gate 286*0Sstevel@tonic-gate #define SQUEUE_WORKER_WAKEUP(sqp) { \ 287*0Sstevel@tonic-gate timeout_id_t tid = (sqp)->sq_tid; \ 288*0Sstevel@tonic-gate \ 289*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&(sqp)->sq_lock)); \ 290*0Sstevel@tonic-gate /* \ 291*0Sstevel@tonic-gate * Queue isn't being processed, so take \ 292*0Sstevel@tonic-gate * any post enqueue actions needed before leaving. \ 293*0Sstevel@tonic-gate */ \ 294*0Sstevel@tonic-gate if (tid != 0) { \ 295*0Sstevel@tonic-gate /* \ 296*0Sstevel@tonic-gate * Waiting for an enter() to process mblk(s). \ 297*0Sstevel@tonic-gate */ \ 298*0Sstevel@tonic-gate clock_t waited = lbolt - (sqp)->sq_awaken; \ 299*0Sstevel@tonic-gate \ 300*0Sstevel@tonic-gate if (TICK_TO_MSEC(waited) >= (sqp)->sq_wait) { \ 301*0Sstevel@tonic-gate /* \ 302*0Sstevel@tonic-gate * Times up and have a worker thread \ 303*0Sstevel@tonic-gate * waiting for work, so schedule it. \ 304*0Sstevel@tonic-gate */ \ 305*0Sstevel@tonic-gate (sqp)->sq_tid = 0; \ 306*0Sstevel@tonic-gate (sqp)->sq_awaken = lbolt; \ 307*0Sstevel@tonic-gate cv_signal(&(sqp)->sq_async); \ 308*0Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 309*0Sstevel@tonic-gate (void) untimeout(tid); \ 310*0Sstevel@tonic-gate return; \ 311*0Sstevel@tonic-gate } \ 312*0Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 313*0Sstevel@tonic-gate return; \ 314*0Sstevel@tonic-gate } else if ((sqp)->sq_state & SQS_TMO_PROG) { \ 315*0Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 316*0Sstevel@tonic-gate return; \ 317*0Sstevel@tonic-gate } else if ((sqp)->sq_wait != 0) { \ 318*0Sstevel@tonic-gate clock_t wait = (sqp)->sq_wait; \ 319*0Sstevel@tonic-gate /* \ 320*0Sstevel@tonic-gate * Wait up to sqp->sq_wait ms for an \ 321*0Sstevel@tonic-gate * enter() to process this queue. We \ 322*0Sstevel@tonic-gate * don't want to contend on timeout locks \ 323*0Sstevel@tonic-gate * with sq_lock held for performance reasons, \ 324*0Sstevel@tonic-gate * so drop the sq_lock before calling timeout \ 325*0Sstevel@tonic-gate * but we need to check if timeout is required \ 326*0Sstevel@tonic-gate * after re acquiring the sq_lock. Once \ 327*0Sstevel@tonic-gate * the sq_lock is dropped, someone else could \ 328*0Sstevel@tonic-gate * have processed the packet or the timeout could \ 329*0Sstevel@tonic-gate * have already fired. \ 330*0Sstevel@tonic-gate */ \ 331*0Sstevel@tonic-gate (sqp)->sq_state |= SQS_TMO_PROG; \ 332*0Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 333*0Sstevel@tonic-gate tid = timeout(squeue_fire, (sqp), wait); \ 334*0Sstevel@tonic-gate mutex_enter(&(sqp)->sq_lock); \ 335*0Sstevel@tonic-gate /* Check again if we still need the timeout */ \ 336*0Sstevel@tonic-gate if ((((sqp)->sq_state & (SQS_PROC|SQS_TMO_PROG)) == \ 337*0Sstevel@tonic-gate SQS_TMO_PROG) && ((sqp)->sq_tid == 0) && \ 338*0Sstevel@tonic-gate ((sqp)->sq_first != NULL)) { \ 339*0Sstevel@tonic-gate (sqp)->sq_state &= ~SQS_TMO_PROG; \ 340*0Sstevel@tonic-gate (sqp)->sq_awaken = lbolt; \ 341*0Sstevel@tonic-gate (sqp)->sq_tid = tid; \ 342*0Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 343*0Sstevel@tonic-gate return; \ 344*0Sstevel@tonic-gate } else { \ 345*0Sstevel@tonic-gate if ((sqp)->sq_state & SQS_TMO_PROG) { \ 346*0Sstevel@tonic-gate (sqp)->sq_state &= ~SQS_TMO_PROG; \ 347*0Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 348*0Sstevel@tonic-gate (void) untimeout(tid); \ 349*0Sstevel@tonic-gate } else { \ 350*0Sstevel@tonic-gate /* \ 351*0Sstevel@tonic-gate * The timer fired before we could \ 352*0Sstevel@tonic-gate * reacquire the sq_lock. squeue_fire \ 353*0Sstevel@tonic-gate * removes the SQS_TMO_PROG flag \ 354*0Sstevel@tonic-gate * and we don't need to do anything \ 355*0Sstevel@tonic-gate * else. \ 356*0Sstevel@tonic-gate */ \ 357*0Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 358*0Sstevel@tonic-gate } \ 359*0Sstevel@tonic-gate } \ 360*0Sstevel@tonic-gate } else { \ 361*0Sstevel@tonic-gate /* \ 362*0Sstevel@tonic-gate * Schedule the worker thread. \ 363*0Sstevel@tonic-gate */ \ 364*0Sstevel@tonic-gate (sqp)->sq_awaken = lbolt; \ 365*0Sstevel@tonic-gate cv_signal(&(sqp)->sq_async); \ 366*0Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 367*0Sstevel@tonic-gate } \ 368*0Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&(sqp)->sq_lock)); \ 369*0Sstevel@tonic-gate } 370*0Sstevel@tonic-gate 371*0Sstevel@tonic-gate #define ENQUEUE_MP(sqp, mp, proc, arg) { \ 372*0Sstevel@tonic-gate /* \ 373*0Sstevel@tonic-gate * Enque our mblk. \ 374*0Sstevel@tonic-gate */ \ 375*0Sstevel@tonic-gate (mp)->b_queue = NULL; \ 376*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&(sqp)->sq_lock)); \ 377*0Sstevel@tonic-gate ASSERT((mp)->b_prev == NULL && (mp)->b_next == NULL); \ 378*0Sstevel@tonic-gate (mp)->b_queue = (queue_t *)(proc); \ 379*0Sstevel@tonic-gate (mp)->b_prev = (mblk_t *)(arg); \ 380*0Sstevel@tonic-gate \ 381*0Sstevel@tonic-gate if ((sqp)->sq_last != NULL) \ 382*0Sstevel@tonic-gate (sqp)->sq_last->b_next = (mp); \ 383*0Sstevel@tonic-gate else \ 384*0Sstevel@tonic-gate (sqp)->sq_first = (mp); \ 385*0Sstevel@tonic-gate (sqp)->sq_last = (mp); \ 386*0Sstevel@tonic-gate (sqp)->sq_count++; \ 387*0Sstevel@tonic-gate ASSERT((sqp)->sq_count > 0); \ 388*0Sstevel@tonic-gate DTRACE_PROBE2(squeue__enqueue, squeue_t *, sqp, \ 389*0Sstevel@tonic-gate mblk_t *, mp); \ 390*0Sstevel@tonic-gate } 391*0Sstevel@tonic-gate 392*0Sstevel@tonic-gate 393*0Sstevel@tonic-gate #define ENQUEUE_CHAIN(sqp, mp, tail, cnt) { \ 394*0Sstevel@tonic-gate /* \ 395*0Sstevel@tonic-gate * Enqueue our mblk chain. \ 396*0Sstevel@tonic-gate */ \ 397*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&(sqp)->sq_lock)); \ 398*0Sstevel@tonic-gate \ 399*0Sstevel@tonic-gate if ((sqp)->sq_last != NULL) \ 400*0Sstevel@tonic-gate (sqp)->sq_last->b_next = (mp); \ 401*0Sstevel@tonic-gate else \ 402*0Sstevel@tonic-gate (sqp)->sq_first = (mp); \ 403*0Sstevel@tonic-gate (sqp)->sq_last = (tail); \ 404*0Sstevel@tonic-gate (sqp)->sq_count += (cnt); \ 405*0Sstevel@tonic-gate ASSERT((sqp)->sq_count > 0); \ 406*0Sstevel@tonic-gate DTRACE_PROBE4(squeue__enqueuechain, squeue_t *, sqp, \ 407*0Sstevel@tonic-gate mblk_t *, mp, mblk_t *, tail, int, cnt); \ 408*0Sstevel@tonic-gate \ 409*0Sstevel@tonic-gate } 410*0Sstevel@tonic-gate 411*0Sstevel@tonic-gate #define SQS_POLLING_ON(sqp, rx_ring) { \ 412*0Sstevel@tonic-gate ASSERT(rx_ring != NULL); \ 413*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&(sqp)->sq_lock)); \ 414*0Sstevel@tonic-gate rx_ring->rr_blank(rx_ring->rr_handle, \ 415*0Sstevel@tonic-gate MIN((sqp->sq_avg_drain_time * sqp->sq_count), \ 416*0Sstevel@tonic-gate rx_ring->rr_max_blank_time), \ 417*0Sstevel@tonic-gate rx_ring->rr_max_pkt_cnt); \ 418*0Sstevel@tonic-gate rx_ring->rr_poll_state |= ILL_POLLING; \ 419*0Sstevel@tonic-gate rx_ring->rr_poll_time = lbolt; \ 420*0Sstevel@tonic-gate } 421*0Sstevel@tonic-gate 422*0Sstevel@tonic-gate 423*0Sstevel@tonic-gate #define SQS_POLLING_OFF(sqp, rx_ring) { \ 424*0Sstevel@tonic-gate ASSERT(rx_ring != NULL); \ 425*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&(sqp)->sq_lock)); \ 426*0Sstevel@tonic-gate rx_ring->rr_blank(rx_ring->rr_handle, \ 427*0Sstevel@tonic-gate rx_ring->rr_min_blank_time, \ 428*0Sstevel@tonic-gate rx_ring->rr_min_pkt_cnt); \ 429*0Sstevel@tonic-gate } 430*0Sstevel@tonic-gate 431*0Sstevel@tonic-gate void 432*0Sstevel@tonic-gate squeue_init(void) 433*0Sstevel@tonic-gate { 434*0Sstevel@tonic-gate squeue_cache = kmem_cache_create("squeue_cache", 435*0Sstevel@tonic-gate sizeof (squeue_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 436*0Sstevel@tonic-gate 437*0Sstevel@tonic-gate squeue_intrdrain_tick = MSEC_TO_TICK_ROUNDUP(squeue_intrdrain_ms); 438*0Sstevel@tonic-gate squeue_writerdrain_tick = MSEC_TO_TICK_ROUNDUP(squeue_writerdrain_ms); 439*0Sstevel@tonic-gate squeue_workerdrain_tick = MSEC_TO_TICK_ROUNDUP(squeue_workerdrain_ms); 440*0Sstevel@tonic-gate squeue_workerwait_tick = MSEC_TO_TICK_ROUNDUP(squeue_workerwait_ms); 441*0Sstevel@tonic-gate } 442*0Sstevel@tonic-gate 443*0Sstevel@tonic-gate /* ARGSUSED */ 444*0Sstevel@tonic-gate squeue_t * 445*0Sstevel@tonic-gate squeue_create(char *name, processorid_t bind, clock_t wait, pri_t pri) 446*0Sstevel@tonic-gate { 447*0Sstevel@tonic-gate squeue_t *sqp = kmem_cache_alloc(squeue_cache, KM_SLEEP); 448*0Sstevel@tonic-gate 449*0Sstevel@tonic-gate bzero(sqp, sizeof (squeue_t)); 450*0Sstevel@tonic-gate (void) strncpy(sqp->sq_name, name, SQ_NAMELEN + 1); 451*0Sstevel@tonic-gate sqp->sq_name[SQ_NAMELEN] = '\0'; 452*0Sstevel@tonic-gate 453*0Sstevel@tonic-gate sqp->sq_bind = bind; 454*0Sstevel@tonic-gate sqp->sq_wait = MSEC_TO_TICK(wait); 455*0Sstevel@tonic-gate sqp->sq_avg_drain_time = 456*0Sstevel@tonic-gate drv_hztousec(squeue_intrdrain_tick)/squeue_intrdrain_tick; 457*0Sstevel@tonic-gate 458*0Sstevel@tonic-gate #if SQUEUE_PROFILE 459*0Sstevel@tonic-gate if ((sqp->sq_kstat = kstat_create("ip", bind, name, 460*0Sstevel@tonic-gate "net", KSTAT_TYPE_NAMED, 461*0Sstevel@tonic-gate sizeof (squeue_kstat) / sizeof (kstat_named_t), 462*0Sstevel@tonic-gate KSTAT_FLAG_VIRTUAL)) != NULL) { 463*0Sstevel@tonic-gate sqp->sq_kstat->ks_lock = &squeue_kstat_lock; 464*0Sstevel@tonic-gate sqp->sq_kstat->ks_data = &squeue_kstat; 465*0Sstevel@tonic-gate sqp->sq_kstat->ks_update = squeue_kstat_update; 466*0Sstevel@tonic-gate sqp->sq_kstat->ks_private = sqp; 467*0Sstevel@tonic-gate kstat_install(sqp->sq_kstat); 468*0Sstevel@tonic-gate } 469*0Sstevel@tonic-gate #endif 470*0Sstevel@tonic-gate 471*0Sstevel@tonic-gate sqp->sq_worker = thread_create(NULL, 0, squeue_worker, 472*0Sstevel@tonic-gate sqp, 0, &p0, TS_RUN, pri); 473*0Sstevel@tonic-gate 474*0Sstevel@tonic-gate return (sqp); 475*0Sstevel@tonic-gate } 476*0Sstevel@tonic-gate 477*0Sstevel@tonic-gate /* ARGSUSED */ 478*0Sstevel@tonic-gate void 479*0Sstevel@tonic-gate squeue_bind(squeue_t *sqp, processorid_t bind) 480*0Sstevel@tonic-gate { 481*0Sstevel@tonic-gate ASSERT(bind == -1); 482*0Sstevel@tonic-gate 483*0Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 484*0Sstevel@tonic-gate if (sqp->sq_state & SQS_BOUND) { 485*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 486*0Sstevel@tonic-gate return; 487*0Sstevel@tonic-gate } 488*0Sstevel@tonic-gate 489*0Sstevel@tonic-gate sqp->sq_state |= SQS_BOUND; 490*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 491*0Sstevel@tonic-gate 492*0Sstevel@tonic-gate thread_affinity_set(sqp->sq_worker, sqp->sq_bind); 493*0Sstevel@tonic-gate } 494*0Sstevel@tonic-gate 495*0Sstevel@tonic-gate void 496*0Sstevel@tonic-gate squeue_unbind(squeue_t *sqp) 497*0Sstevel@tonic-gate { 498*0Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 499*0Sstevel@tonic-gate if (!(sqp->sq_state & SQS_BOUND)) { 500*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 501*0Sstevel@tonic-gate return; 502*0Sstevel@tonic-gate } 503*0Sstevel@tonic-gate 504*0Sstevel@tonic-gate sqp->sq_state &= ~SQS_BOUND; 505*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 506*0Sstevel@tonic-gate 507*0Sstevel@tonic-gate thread_affinity_clear(sqp->sq_worker); 508*0Sstevel@tonic-gate } 509*0Sstevel@tonic-gate 510*0Sstevel@tonic-gate /* 511*0Sstevel@tonic-gate * squeue_enter() - enter squeue sqp with mblk mp (which can be 512*0Sstevel@tonic-gate * a chain), while tail points to the end and cnt in number of 513*0Sstevel@tonic-gate * mblks in the chain. 514*0Sstevel@tonic-gate * 515*0Sstevel@tonic-gate * For a chain of single packet (i.e. mp == tail), go through the 516*0Sstevel@tonic-gate * fast path if no one is processing the squeue and nothing is queued. 517*0Sstevel@tonic-gate * 518*0Sstevel@tonic-gate * The proc and arg for each mblk is already stored in the mblk in 519*0Sstevel@tonic-gate * appropriate places. 520*0Sstevel@tonic-gate */ 521*0Sstevel@tonic-gate void 522*0Sstevel@tonic-gate squeue_enter_chain(squeue_t *sqp, mblk_t *mp, mblk_t *tail, 523*0Sstevel@tonic-gate uint32_t cnt, uint8_t tag) 524*0Sstevel@tonic-gate { 525*0Sstevel@tonic-gate int interrupt = servicing_interrupt(); 526*0Sstevel@tonic-gate void *arg; 527*0Sstevel@tonic-gate sqproc_t proc; 528*0Sstevel@tonic-gate #if SQUEUE_PROFILE 529*0Sstevel@tonic-gate hrtime_t start, delta; 530*0Sstevel@tonic-gate #endif 531*0Sstevel@tonic-gate 532*0Sstevel@tonic-gate ASSERT(sqp != NULL); 533*0Sstevel@tonic-gate ASSERT(mp != NULL); 534*0Sstevel@tonic-gate ASSERT(tail != NULL); 535*0Sstevel@tonic-gate ASSERT(cnt > 0); 536*0Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock)); 537*0Sstevel@tonic-gate 538*0Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 539*0Sstevel@tonic-gate if (!(sqp->sq_state & SQS_PROC)) { 540*0Sstevel@tonic-gate /* 541*0Sstevel@tonic-gate * See if anything is already queued. If we are the 542*0Sstevel@tonic-gate * first packet, do inline processing else queue the 543*0Sstevel@tonic-gate * packet and do the drain. 544*0Sstevel@tonic-gate */ 545*0Sstevel@tonic-gate sqp->sq_run = curthread; 546*0Sstevel@tonic-gate if (sqp->sq_first == NULL && cnt == 1) { 547*0Sstevel@tonic-gate /* 548*0Sstevel@tonic-gate * Fast-path, ok to process and nothing queued. 549*0Sstevel@tonic-gate */ 550*0Sstevel@tonic-gate sqp->sq_state |= (SQS_PROC|SQS_FAST); 551*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 552*0Sstevel@tonic-gate 553*0Sstevel@tonic-gate /* 554*0Sstevel@tonic-gate * We are the chain of 1 packet so 555*0Sstevel@tonic-gate * go through this fast path. 556*0Sstevel@tonic-gate */ 557*0Sstevel@tonic-gate arg = mp->b_prev; 558*0Sstevel@tonic-gate mp->b_prev = NULL; 559*0Sstevel@tonic-gate proc = (sqproc_t)mp->b_queue; 560*0Sstevel@tonic-gate mp->b_queue = NULL; 561*0Sstevel@tonic-gate 562*0Sstevel@tonic-gate ASSERT(proc != NULL); 563*0Sstevel@tonic-gate ASSERT(arg != NULL); 564*0Sstevel@tonic-gate ASSERT(mp->b_next == NULL); 565*0Sstevel@tonic-gate 566*0Sstevel@tonic-gate #if SQUEUE_DEBUG 567*0Sstevel@tonic-gate sqp->sq_isintr = interrupt; 568*0Sstevel@tonic-gate sqp->sq_curmp = mp; 569*0Sstevel@tonic-gate sqp->sq_curproc = proc; 570*0Sstevel@tonic-gate sqp->sq_connp = arg; 571*0Sstevel@tonic-gate mp->b_tag = sqp->sq_tag = tag; 572*0Sstevel@tonic-gate #endif 573*0Sstevel@tonic-gate #if SQUEUE_PROFILE 574*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 575*0Sstevel@tonic-gate if (interrupt) 576*0Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_intr); 577*0Sstevel@tonic-gate else 578*0Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_other); 579*0Sstevel@tonic-gate start = gethrtime(); 580*0Sstevel@tonic-gate } 581*0Sstevel@tonic-gate #endif 582*0Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_TRUE; 583*0Sstevel@tonic-gate DTRACE_PROBE3(squeue__proc__start, squeue_t *, 584*0Sstevel@tonic-gate sqp, mblk_t *, mp, conn_t *, arg); 585*0Sstevel@tonic-gate (*proc)(arg, mp, sqp); 586*0Sstevel@tonic-gate DTRACE_PROBE2(squeue__proc__end, squeue_t *, 587*0Sstevel@tonic-gate sqp, conn_t *, arg); 588*0Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_FALSE; 589*0Sstevel@tonic-gate 590*0Sstevel@tonic-gate #if SQUEUE_PROFILE 591*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 592*0Sstevel@tonic-gate delta = gethrtime() - start; 593*0Sstevel@tonic-gate if (interrupt) 594*0Sstevel@tonic-gate SQDELTA(sqp, sq_time_intr, delta); 595*0Sstevel@tonic-gate else 596*0Sstevel@tonic-gate SQDELTA(sqp, sq_time_other, delta); 597*0Sstevel@tonic-gate } 598*0Sstevel@tonic-gate #endif 599*0Sstevel@tonic-gate #if SQUEUE_DEBUG 600*0Sstevel@tonic-gate sqp->sq_curmp = NULL; 601*0Sstevel@tonic-gate sqp->sq_curproc = NULL; 602*0Sstevel@tonic-gate sqp->sq_connp = NULL; 603*0Sstevel@tonic-gate sqp->sq_isintr = 0; 604*0Sstevel@tonic-gate #endif 605*0Sstevel@tonic-gate 606*0Sstevel@tonic-gate CONN_DEC_REF((conn_t *)arg); 607*0Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock)); 608*0Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 609*0Sstevel@tonic-gate sqp->sq_state &= ~(SQS_PROC|SQS_FAST); 610*0Sstevel@tonic-gate if (sqp->sq_first == NULL) { 611*0Sstevel@tonic-gate /* 612*0Sstevel@tonic-gate * We processed inline our packet and 613*0Sstevel@tonic-gate * nothing new has arrived. We are done. 614*0Sstevel@tonic-gate */ 615*0Sstevel@tonic-gate sqp->sq_run = NULL; 616*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 617*0Sstevel@tonic-gate return; 618*0Sstevel@tonic-gate } else if (sqp->sq_bind != CPU->cpu_id) { 619*0Sstevel@tonic-gate /* 620*0Sstevel@tonic-gate * If the current thread is not running 621*0Sstevel@tonic-gate * on the CPU to which this squeue is bound, 622*0Sstevel@tonic-gate * then don't allow it to drain. 623*0Sstevel@tonic-gate */ 624*0Sstevel@tonic-gate sqp->sq_run = NULL; 625*0Sstevel@tonic-gate SQUEUE_WORKER_WAKEUP(sqp); 626*0Sstevel@tonic-gate return; 627*0Sstevel@tonic-gate } 628*0Sstevel@tonic-gate } else { 629*0Sstevel@tonic-gate ENQUEUE_CHAIN(sqp, mp, tail, cnt); 630*0Sstevel@tonic-gate #if SQUEUE_DEBUG 631*0Sstevel@tonic-gate mp->b_tag = tag; 632*0Sstevel@tonic-gate #endif 633*0Sstevel@tonic-gate #if SQUEUE_PROFILE 634*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 635*0Sstevel@tonic-gate if (servicing_interrupt()) 636*0Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_intr); 637*0Sstevel@tonic-gate else 638*0Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_other); 639*0Sstevel@tonic-gate if (sqp->sq_stats.sq_max_qlen < sqp->sq_count) 640*0Sstevel@tonic-gate sqp->sq_stats.sq_max_qlen = 641*0Sstevel@tonic-gate sqp->sq_count; 642*0Sstevel@tonic-gate } 643*0Sstevel@tonic-gate #endif 644*0Sstevel@tonic-gate } 645*0Sstevel@tonic-gate 646*0Sstevel@tonic-gate /* 647*0Sstevel@tonic-gate * We are here because either we couldn't do inline 648*0Sstevel@tonic-gate * processing (because something was already queued), 649*0Sstevel@tonic-gate * or we had a chanin of more than one packet, 650*0Sstevel@tonic-gate * or something else arrived after we were done with 651*0Sstevel@tonic-gate * inline processing. 652*0Sstevel@tonic-gate */ 653*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&sqp->sq_lock)); 654*0Sstevel@tonic-gate ASSERT(sqp->sq_first != NULL); 655*0Sstevel@tonic-gate 656*0Sstevel@tonic-gate #if SQUEUE_PROFILE 657*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 658*0Sstevel@tonic-gate start = gethrtime(); 659*0Sstevel@tonic-gate } 660*0Sstevel@tonic-gate #endif 661*0Sstevel@tonic-gate #if SQUEUE_DEBUG 662*0Sstevel@tonic-gate sqp->sq_isintr = interrupt; 663*0Sstevel@tonic-gate #endif 664*0Sstevel@tonic-gate 665*0Sstevel@tonic-gate if (interrupt) { 666*0Sstevel@tonic-gate squeue_drain(sqp, SQS_ENTER, lbolt + 667*0Sstevel@tonic-gate squeue_intrdrain_tick); 668*0Sstevel@tonic-gate } else { 669*0Sstevel@tonic-gate squeue_drain(sqp, SQS_USER, lbolt + 670*0Sstevel@tonic-gate squeue_writerdrain_tick); 671*0Sstevel@tonic-gate } 672*0Sstevel@tonic-gate 673*0Sstevel@tonic-gate #if SQUEUE_PROFILE 674*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 675*0Sstevel@tonic-gate delta = gethrtime() - start; 676*0Sstevel@tonic-gate if (interrupt) 677*0Sstevel@tonic-gate SQDELTA(sqp, sq_time_intr, delta); 678*0Sstevel@tonic-gate else 679*0Sstevel@tonic-gate SQDELTA(sqp, sq_time_other, delta); 680*0Sstevel@tonic-gate } 681*0Sstevel@tonic-gate #endif 682*0Sstevel@tonic-gate #if SQUEUE_DEBUG 683*0Sstevel@tonic-gate sqp->sq_isintr = 0; 684*0Sstevel@tonic-gate #endif 685*0Sstevel@tonic-gate 686*0Sstevel@tonic-gate /* 687*0Sstevel@tonic-gate * If we didn't do a complete drain, the worker 688*0Sstevel@tonic-gate * thread was already signalled by squeue_drain. 689*0Sstevel@tonic-gate */ 690*0Sstevel@tonic-gate sqp->sq_run = NULL; 691*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 692*0Sstevel@tonic-gate return; 693*0Sstevel@tonic-gate } else { 694*0Sstevel@tonic-gate ASSERT(sqp->sq_run != NULL); 695*0Sstevel@tonic-gate /* 696*0Sstevel@tonic-gate * Queue is already being processed. Just enqueue 697*0Sstevel@tonic-gate * the packet and go away. 698*0Sstevel@tonic-gate */ 699*0Sstevel@tonic-gate #if SQUEUE_DEBUG 700*0Sstevel@tonic-gate mp->b_tag = tag; 701*0Sstevel@tonic-gate #endif 702*0Sstevel@tonic-gate #if SQUEUE_PROFILE 703*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 704*0Sstevel@tonic-gate if (servicing_interrupt()) 705*0Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_intr); 706*0Sstevel@tonic-gate else 707*0Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_other); 708*0Sstevel@tonic-gate if (sqp->sq_stats.sq_max_qlen < sqp->sq_count) 709*0Sstevel@tonic-gate sqp->sq_stats.sq_max_qlen = sqp->sq_count; 710*0Sstevel@tonic-gate } 711*0Sstevel@tonic-gate #endif 712*0Sstevel@tonic-gate 713*0Sstevel@tonic-gate ENQUEUE_CHAIN(sqp, mp, tail, cnt); 714*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 715*0Sstevel@tonic-gate return; 716*0Sstevel@tonic-gate } 717*0Sstevel@tonic-gate } 718*0Sstevel@tonic-gate 719*0Sstevel@tonic-gate /* 720*0Sstevel@tonic-gate * squeue_enter() - enter squeue *sqp with mblk *mp with argument of *arg. 721*0Sstevel@tonic-gate */ 722*0Sstevel@tonic-gate void 723*0Sstevel@tonic-gate squeue_enter(squeue_t *sqp, mblk_t *mp, sqproc_t proc, void *arg, 724*0Sstevel@tonic-gate uint8_t tag) 725*0Sstevel@tonic-gate { 726*0Sstevel@tonic-gate int interrupt = servicing_interrupt(); 727*0Sstevel@tonic-gate #if SQUEUE_PROFILE 728*0Sstevel@tonic-gate hrtime_t start, delta; 729*0Sstevel@tonic-gate #endif 730*0Sstevel@tonic-gate #if SQUEUE_DEBUG 731*0Sstevel@tonic-gate conn_t *connp = (conn_t *)arg; 732*0Sstevel@tonic-gate ASSERT(connp->conn_tcp->tcp_connp == connp); 733*0Sstevel@tonic-gate #endif 734*0Sstevel@tonic-gate 735*0Sstevel@tonic-gate ASSERT(proc != NULL); 736*0Sstevel@tonic-gate ASSERT(sqp != NULL); 737*0Sstevel@tonic-gate ASSERT(mp != NULL); 738*0Sstevel@tonic-gate ASSERT(mp->b_next == NULL); 739*0Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock)); 740*0Sstevel@tonic-gate 741*0Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 742*0Sstevel@tonic-gate if (!(sqp->sq_state & SQS_PROC)) { 743*0Sstevel@tonic-gate /* 744*0Sstevel@tonic-gate * See if anything is already queued. If we are the 745*0Sstevel@tonic-gate * first packet, do inline processing else queue the 746*0Sstevel@tonic-gate * packet and do the drain. 747*0Sstevel@tonic-gate */ 748*0Sstevel@tonic-gate sqp->sq_run = curthread; 749*0Sstevel@tonic-gate if (sqp->sq_first == NULL) { 750*0Sstevel@tonic-gate /* 751*0Sstevel@tonic-gate * Fast-path, ok to process and nothing queued. 752*0Sstevel@tonic-gate */ 753*0Sstevel@tonic-gate sqp->sq_state |= (SQS_PROC|SQS_FAST); 754*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 755*0Sstevel@tonic-gate 756*0Sstevel@tonic-gate #if SQUEUE_DEBUG 757*0Sstevel@tonic-gate sqp->sq_isintr = interrupt; 758*0Sstevel@tonic-gate sqp->sq_curmp = mp; 759*0Sstevel@tonic-gate sqp->sq_curproc = proc; 760*0Sstevel@tonic-gate sqp->sq_connp = connp; 761*0Sstevel@tonic-gate mp->b_tag = sqp->sq_tag = tag; 762*0Sstevel@tonic-gate #endif 763*0Sstevel@tonic-gate #if SQUEUE_PROFILE 764*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 765*0Sstevel@tonic-gate if (interrupt) 766*0Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_intr); 767*0Sstevel@tonic-gate else 768*0Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_other); 769*0Sstevel@tonic-gate start = gethrtime(); 770*0Sstevel@tonic-gate } 771*0Sstevel@tonic-gate #endif 772*0Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_TRUE; 773*0Sstevel@tonic-gate DTRACE_PROBE3(squeue__proc__start, squeue_t *, 774*0Sstevel@tonic-gate sqp, mblk_t *, mp, conn_t *, arg); 775*0Sstevel@tonic-gate (*proc)(arg, mp, sqp); 776*0Sstevel@tonic-gate DTRACE_PROBE2(squeue__proc__end, squeue_t *, 777*0Sstevel@tonic-gate sqp, conn_t *, arg); 778*0Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_FALSE; 779*0Sstevel@tonic-gate 780*0Sstevel@tonic-gate #if SQUEUE_PROFILE 781*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 782*0Sstevel@tonic-gate delta = gethrtime() - start; 783*0Sstevel@tonic-gate if (interrupt) 784*0Sstevel@tonic-gate SQDELTA(sqp, sq_time_intr, delta); 785*0Sstevel@tonic-gate else 786*0Sstevel@tonic-gate SQDELTA(sqp, sq_time_other, delta); 787*0Sstevel@tonic-gate } 788*0Sstevel@tonic-gate #endif 789*0Sstevel@tonic-gate #if SQUEUE_DEBUG 790*0Sstevel@tonic-gate sqp->sq_curmp = NULL; 791*0Sstevel@tonic-gate sqp->sq_curproc = NULL; 792*0Sstevel@tonic-gate sqp->sq_connp = NULL; 793*0Sstevel@tonic-gate sqp->sq_isintr = 0; 794*0Sstevel@tonic-gate #endif 795*0Sstevel@tonic-gate 796*0Sstevel@tonic-gate CONN_DEC_REF((conn_t *)arg); 797*0Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock)); 798*0Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 799*0Sstevel@tonic-gate sqp->sq_state &= ~(SQS_PROC|SQS_FAST); 800*0Sstevel@tonic-gate if (sqp->sq_first == NULL) { 801*0Sstevel@tonic-gate /* 802*0Sstevel@tonic-gate * We processed inline our packet and 803*0Sstevel@tonic-gate * nothing new has arrived. We are done. 804*0Sstevel@tonic-gate */ 805*0Sstevel@tonic-gate sqp->sq_run = NULL; 806*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 807*0Sstevel@tonic-gate return; 808*0Sstevel@tonic-gate } else if (sqp->sq_bind != CPU->cpu_id) { 809*0Sstevel@tonic-gate /* 810*0Sstevel@tonic-gate * If the current thread is not running 811*0Sstevel@tonic-gate * on the CPU to which this squeue is bound, 812*0Sstevel@tonic-gate * then don't allow it to drain. 813*0Sstevel@tonic-gate */ 814*0Sstevel@tonic-gate sqp->sq_run = NULL; 815*0Sstevel@tonic-gate SQUEUE_WORKER_WAKEUP(sqp); 816*0Sstevel@tonic-gate return; 817*0Sstevel@tonic-gate } 818*0Sstevel@tonic-gate } else { 819*0Sstevel@tonic-gate ENQUEUE_MP(sqp, mp, proc, arg); 820*0Sstevel@tonic-gate #if SQUEUE_DEBUG 821*0Sstevel@tonic-gate mp->b_tag = tag; 822*0Sstevel@tonic-gate #endif 823*0Sstevel@tonic-gate #if SQUEUE_PROFILE 824*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 825*0Sstevel@tonic-gate if (servicing_interrupt()) 826*0Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_intr); 827*0Sstevel@tonic-gate else 828*0Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_other); 829*0Sstevel@tonic-gate if (sqp->sq_stats.sq_max_qlen < sqp->sq_count) 830*0Sstevel@tonic-gate sqp->sq_stats.sq_max_qlen = 831*0Sstevel@tonic-gate sqp->sq_count; 832*0Sstevel@tonic-gate } 833*0Sstevel@tonic-gate #endif 834*0Sstevel@tonic-gate } 835*0Sstevel@tonic-gate 836*0Sstevel@tonic-gate /* 837*0Sstevel@tonic-gate * We are here because either we couldn't do inline 838*0Sstevel@tonic-gate * processing (because something was already queued) 839*0Sstevel@tonic-gate * or something else arrived after we were done with 840*0Sstevel@tonic-gate * inline processing. 841*0Sstevel@tonic-gate */ 842*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&sqp->sq_lock)); 843*0Sstevel@tonic-gate ASSERT(sqp->sq_first != NULL); 844*0Sstevel@tonic-gate 845*0Sstevel@tonic-gate #if SQUEUE_PROFILE 846*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 847*0Sstevel@tonic-gate start = gethrtime(); 848*0Sstevel@tonic-gate } 849*0Sstevel@tonic-gate #endif 850*0Sstevel@tonic-gate #if SQUEUE_DEBUG 851*0Sstevel@tonic-gate sqp->sq_isintr = interrupt; 852*0Sstevel@tonic-gate #endif 853*0Sstevel@tonic-gate 854*0Sstevel@tonic-gate if (interrupt) { 855*0Sstevel@tonic-gate squeue_drain(sqp, SQS_ENTER, lbolt + 856*0Sstevel@tonic-gate squeue_intrdrain_tick); 857*0Sstevel@tonic-gate } else { 858*0Sstevel@tonic-gate squeue_drain(sqp, SQS_USER, lbolt + 859*0Sstevel@tonic-gate squeue_writerdrain_tick); 860*0Sstevel@tonic-gate } 861*0Sstevel@tonic-gate 862*0Sstevel@tonic-gate #if SQUEUE_PROFILE 863*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 864*0Sstevel@tonic-gate delta = gethrtime() - start; 865*0Sstevel@tonic-gate if (interrupt) 866*0Sstevel@tonic-gate SQDELTA(sqp, sq_time_intr, delta); 867*0Sstevel@tonic-gate else 868*0Sstevel@tonic-gate SQDELTA(sqp, sq_time_other, delta); 869*0Sstevel@tonic-gate } 870*0Sstevel@tonic-gate #endif 871*0Sstevel@tonic-gate #if SQUEUE_DEBUG 872*0Sstevel@tonic-gate sqp->sq_isintr = 0; 873*0Sstevel@tonic-gate #endif 874*0Sstevel@tonic-gate 875*0Sstevel@tonic-gate /* 876*0Sstevel@tonic-gate * If we didn't do a complete drain, the worker 877*0Sstevel@tonic-gate * thread was already signalled by squeue_drain. 878*0Sstevel@tonic-gate */ 879*0Sstevel@tonic-gate sqp->sq_run = NULL; 880*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 881*0Sstevel@tonic-gate return; 882*0Sstevel@tonic-gate } else { 883*0Sstevel@tonic-gate ASSERT(sqp->sq_run != NULL); 884*0Sstevel@tonic-gate /* 885*0Sstevel@tonic-gate * We let a thread processing a squeue reenter only 886*0Sstevel@tonic-gate * once. This helps the case of incoming connection 887*0Sstevel@tonic-gate * where a SYN-ACK-ACK that triggers the conn_ind 888*0Sstevel@tonic-gate * doesn't have to queue the packet if listener and 889*0Sstevel@tonic-gate * eager are on the same squeue. Also helps the 890*0Sstevel@tonic-gate * loopback connection where the two ends are bound 891*0Sstevel@tonic-gate * to the same squeue (which is typical on single 892*0Sstevel@tonic-gate * CPU machines). 893*0Sstevel@tonic-gate * We let the thread reenter only once for the fear 894*0Sstevel@tonic-gate * of stack getting blown with multiple traversal. 895*0Sstevel@tonic-gate */ 896*0Sstevel@tonic-gate if (!(sqp->sq_state & SQS_REENTER) && 897*0Sstevel@tonic-gate (sqp->sq_run == curthread) && 898*0Sstevel@tonic-gate (((conn_t *)arg)->conn_on_sqp == B_FALSE)) { 899*0Sstevel@tonic-gate sqp->sq_state |= SQS_REENTER; 900*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 901*0Sstevel@tonic-gate 902*0Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_TRUE; 903*0Sstevel@tonic-gate DTRACE_PROBE3(squeue__proc__start, squeue_t *, 904*0Sstevel@tonic-gate sqp, mblk_t *, mp, conn_t *, arg); 905*0Sstevel@tonic-gate (*proc)(arg, mp, sqp); 906*0Sstevel@tonic-gate DTRACE_PROBE2(squeue__proc__end, squeue_t *, 907*0Sstevel@tonic-gate sqp, conn_t *, arg); 908*0Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_FALSE; 909*0Sstevel@tonic-gate CONN_DEC_REF((conn_t *)arg); 910*0Sstevel@tonic-gate 911*0Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 912*0Sstevel@tonic-gate sqp->sq_state &= ~SQS_REENTER; 913*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 914*0Sstevel@tonic-gate return; 915*0Sstevel@tonic-gate } 916*0Sstevel@tonic-gate /* 917*0Sstevel@tonic-gate * Queue is already being processed. Just enqueue 918*0Sstevel@tonic-gate * the packet and go away. 919*0Sstevel@tonic-gate */ 920*0Sstevel@tonic-gate #if SQUEUE_DEBUG 921*0Sstevel@tonic-gate mp->b_tag = tag; 922*0Sstevel@tonic-gate #endif 923*0Sstevel@tonic-gate #if SQUEUE_PROFILE 924*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 925*0Sstevel@tonic-gate if (servicing_interrupt()) 926*0Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_intr); 927*0Sstevel@tonic-gate else 928*0Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_other); 929*0Sstevel@tonic-gate if (sqp->sq_stats.sq_max_qlen < sqp->sq_count) 930*0Sstevel@tonic-gate sqp->sq_stats.sq_max_qlen = sqp->sq_count; 931*0Sstevel@tonic-gate } 932*0Sstevel@tonic-gate #endif 933*0Sstevel@tonic-gate 934*0Sstevel@tonic-gate ENQUEUE_MP(sqp, mp, proc, arg); 935*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 936*0Sstevel@tonic-gate return; 937*0Sstevel@tonic-gate } 938*0Sstevel@tonic-gate } 939*0Sstevel@tonic-gate 940*0Sstevel@tonic-gate void 941*0Sstevel@tonic-gate squeue_enter_nodrain(squeue_t *sqp, mblk_t *mp, sqproc_t proc, void *arg, 942*0Sstevel@tonic-gate uint8_t tag) 943*0Sstevel@tonic-gate { 944*0Sstevel@tonic-gate int interrupt = servicing_interrupt(); 945*0Sstevel@tonic-gate boolean_t being_processed; 946*0Sstevel@tonic-gate #if SQUEUE_DEBUG 947*0Sstevel@tonic-gate conn_t *connp = (conn_t *)arg; 948*0Sstevel@tonic-gate #endif 949*0Sstevel@tonic-gate #if SQUEUE_PROFILE 950*0Sstevel@tonic-gate hrtime_t start, delta; 951*0Sstevel@tonic-gate #endif 952*0Sstevel@tonic-gate 953*0Sstevel@tonic-gate ASSERT(proc != NULL); 954*0Sstevel@tonic-gate ASSERT(sqp != NULL); 955*0Sstevel@tonic-gate ASSERT(mp != NULL); 956*0Sstevel@tonic-gate ASSERT(mp->b_next == NULL); 957*0Sstevel@tonic-gate ASSERT(connp->conn_tcp->tcp_connp == connp); 958*0Sstevel@tonic-gate 959*0Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock)); 960*0Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 961*0Sstevel@tonic-gate 962*0Sstevel@tonic-gate being_processed = (sqp->sq_state & SQS_PROC); 963*0Sstevel@tonic-gate if (!being_processed && (sqp->sq_first == NULL)) { 964*0Sstevel@tonic-gate /* 965*0Sstevel@tonic-gate * Fast-path, ok to process and nothing queued. 966*0Sstevel@tonic-gate */ 967*0Sstevel@tonic-gate sqp->sq_state |= (SQS_PROC|SQS_FAST); 968*0Sstevel@tonic-gate sqp->sq_run = curthread; 969*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 970*0Sstevel@tonic-gate 971*0Sstevel@tonic-gate #if SQUEUE_DEBUG 972*0Sstevel@tonic-gate sqp->sq_isintr = interrupt; 973*0Sstevel@tonic-gate sqp->sq_curmp = mp; 974*0Sstevel@tonic-gate sqp->sq_curproc = proc; 975*0Sstevel@tonic-gate sqp->sq_connp = connp; 976*0Sstevel@tonic-gate mp->b_tag = sqp->sq_tag = tag; 977*0Sstevel@tonic-gate #endif 978*0Sstevel@tonic-gate 979*0Sstevel@tonic-gate #if SQUEUE_PROFILE 980*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 981*0Sstevel@tonic-gate if (interrupt) 982*0Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_intr); 983*0Sstevel@tonic-gate else 984*0Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_other); 985*0Sstevel@tonic-gate start = gethrtime(); 986*0Sstevel@tonic-gate } 987*0Sstevel@tonic-gate #endif 988*0Sstevel@tonic-gate 989*0Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_TRUE; 990*0Sstevel@tonic-gate DTRACE_PROBE3(squeue__proc__start, squeue_t *, 991*0Sstevel@tonic-gate sqp, mblk_t *, mp, conn_t *, arg); 992*0Sstevel@tonic-gate (*proc)(arg, mp, sqp); 993*0Sstevel@tonic-gate DTRACE_PROBE2(squeue__proc__end, squeue_t *, 994*0Sstevel@tonic-gate sqp, conn_t *, arg); 995*0Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_FALSE; 996*0Sstevel@tonic-gate 997*0Sstevel@tonic-gate #if SQUEUE_DEBUG 998*0Sstevel@tonic-gate sqp->sq_curmp = NULL; 999*0Sstevel@tonic-gate sqp->sq_curproc = NULL; 1000*0Sstevel@tonic-gate sqp->sq_connp = NULL; 1001*0Sstevel@tonic-gate sqp->sq_isintr = 0; 1002*0Sstevel@tonic-gate #endif 1003*0Sstevel@tonic-gate #if SQUEUE_PROFILE 1004*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 1005*0Sstevel@tonic-gate delta = gethrtime() - start; 1006*0Sstevel@tonic-gate if (interrupt) 1007*0Sstevel@tonic-gate SQDELTA(sqp, sq_time_intr, delta); 1008*0Sstevel@tonic-gate else 1009*0Sstevel@tonic-gate SQDELTA(sqp, sq_time_other, delta); 1010*0Sstevel@tonic-gate } 1011*0Sstevel@tonic-gate #endif 1012*0Sstevel@tonic-gate 1013*0Sstevel@tonic-gate CONN_DEC_REF((conn_t *)arg); 1014*0Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 1015*0Sstevel@tonic-gate sqp->sq_state &= ~(SQS_PROC|SQS_FAST); 1016*0Sstevel@tonic-gate sqp->sq_run = NULL; 1017*0Sstevel@tonic-gate if (sqp->sq_first == NULL) { 1018*0Sstevel@tonic-gate /* 1019*0Sstevel@tonic-gate * We processed inline our packet and 1020*0Sstevel@tonic-gate * nothing new has arrived. We are done. 1021*0Sstevel@tonic-gate */ 1022*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 1023*0Sstevel@tonic-gate } else { 1024*0Sstevel@tonic-gate SQUEUE_WORKER_WAKEUP(sqp); 1025*0Sstevel@tonic-gate } 1026*0Sstevel@tonic-gate return; 1027*0Sstevel@tonic-gate } else { 1028*0Sstevel@tonic-gate /* 1029*0Sstevel@tonic-gate * We let a thread processing a squeue reenter only 1030*0Sstevel@tonic-gate * once. This helps the case of incoming connection 1031*0Sstevel@tonic-gate * where a SYN-ACK-ACK that triggers the conn_ind 1032*0Sstevel@tonic-gate * doesn't have to queue the packet if listener and 1033*0Sstevel@tonic-gate * eager are on the same squeue. Also helps the 1034*0Sstevel@tonic-gate * loopback connection where the two ends are bound 1035*0Sstevel@tonic-gate * to the same squeue (which is typical on single 1036*0Sstevel@tonic-gate * CPU machines). 1037*0Sstevel@tonic-gate * We let the thread reenter only once for the fear 1038*0Sstevel@tonic-gate * of stack getting blown with multiple traversal. 1039*0Sstevel@tonic-gate */ 1040*0Sstevel@tonic-gate if (being_processed && !(sqp->sq_state & SQS_REENTER) && 1041*0Sstevel@tonic-gate (sqp->sq_run == curthread) && 1042*0Sstevel@tonic-gate (((conn_t *)arg)->conn_on_sqp == B_FALSE)) { 1043*0Sstevel@tonic-gate sqp->sq_state |= SQS_REENTER; 1044*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 1045*0Sstevel@tonic-gate 1046*0Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_TRUE; 1047*0Sstevel@tonic-gate DTRACE_PROBE3(squeue__proc__start, squeue_t *, 1048*0Sstevel@tonic-gate sqp, mblk_t *, mp, conn_t *, arg); 1049*0Sstevel@tonic-gate (*proc)(arg, mp, sqp); 1050*0Sstevel@tonic-gate DTRACE_PROBE2(squeue__proc__end, squeue_t *, 1051*0Sstevel@tonic-gate sqp, conn_t *, arg); 1052*0Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_FALSE; 1053*0Sstevel@tonic-gate CONN_DEC_REF((conn_t *)arg); 1054*0Sstevel@tonic-gate 1055*0Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 1056*0Sstevel@tonic-gate sqp->sq_state &= ~SQS_REENTER; 1057*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 1058*0Sstevel@tonic-gate return; 1059*0Sstevel@tonic-gate } 1060*0Sstevel@tonic-gate 1061*0Sstevel@tonic-gate #if SQUEUE_DEBUG 1062*0Sstevel@tonic-gate mp->b_tag = tag; 1063*0Sstevel@tonic-gate #endif 1064*0Sstevel@tonic-gate #if SQUEUE_PROFILE 1065*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 1066*0Sstevel@tonic-gate if (servicing_interrupt()) 1067*0Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_intr); 1068*0Sstevel@tonic-gate else 1069*0Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_other); 1070*0Sstevel@tonic-gate if (sqp->sq_stats.sq_max_qlen < sqp->sq_count) 1071*0Sstevel@tonic-gate sqp->sq_stats.sq_max_qlen = sqp->sq_count; 1072*0Sstevel@tonic-gate } 1073*0Sstevel@tonic-gate #endif 1074*0Sstevel@tonic-gate ENQUEUE_MP(sqp, mp, proc, arg); 1075*0Sstevel@tonic-gate if (being_processed) { 1076*0Sstevel@tonic-gate /* 1077*0Sstevel@tonic-gate * Queue is already being processed. 1078*0Sstevel@tonic-gate * No need to do anything. 1079*0Sstevel@tonic-gate */ 1080*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 1081*0Sstevel@tonic-gate return; 1082*0Sstevel@tonic-gate } 1083*0Sstevel@tonic-gate SQUEUE_WORKER_WAKEUP(sqp); 1084*0Sstevel@tonic-gate } 1085*0Sstevel@tonic-gate } 1086*0Sstevel@tonic-gate 1087*0Sstevel@tonic-gate /* 1088*0Sstevel@tonic-gate * squeue_fill() - fill squeue *sqp with mblk *mp with argument of *arg 1089*0Sstevel@tonic-gate * without processing the squeue. 1090*0Sstevel@tonic-gate */ 1091*0Sstevel@tonic-gate /* ARGSUSED */ 1092*0Sstevel@tonic-gate void 1093*0Sstevel@tonic-gate squeue_fill(squeue_t *sqp, mblk_t *mp, sqproc_t proc, void * arg, 1094*0Sstevel@tonic-gate uint8_t tag) 1095*0Sstevel@tonic-gate { 1096*0Sstevel@tonic-gate #if SQUEUE_DEBUG 1097*0Sstevel@tonic-gate conn_t *connp = (conn_t *)arg; 1098*0Sstevel@tonic-gate #endif 1099*0Sstevel@tonic-gate ASSERT(proc != NULL); 1100*0Sstevel@tonic-gate ASSERT(sqp != NULL); 1101*0Sstevel@tonic-gate ASSERT(mp != NULL); 1102*0Sstevel@tonic-gate ASSERT(mp->b_next == NULL); 1103*0Sstevel@tonic-gate ASSERT(connp->conn_tcp->tcp_connp == connp); 1104*0Sstevel@tonic-gate 1105*0Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock)); 1106*0Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 1107*0Sstevel@tonic-gate ENQUEUE_MP(sqp, mp, proc, arg); 1108*0Sstevel@tonic-gate #if SQUEUE_DEBUG 1109*0Sstevel@tonic-gate mp->b_tag = tag; 1110*0Sstevel@tonic-gate #endif 1111*0Sstevel@tonic-gate #if SQUEUE_PROFILE 1112*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 1113*0Sstevel@tonic-gate if (servicing_interrupt()) 1114*0Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_intr); 1115*0Sstevel@tonic-gate else 1116*0Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_other); 1117*0Sstevel@tonic-gate if (sqp->sq_stats.sq_max_qlen < sqp->sq_count) 1118*0Sstevel@tonic-gate sqp->sq_stats.sq_max_qlen = sqp->sq_count; 1119*0Sstevel@tonic-gate } 1120*0Sstevel@tonic-gate #endif 1121*0Sstevel@tonic-gate 1122*0Sstevel@tonic-gate /* 1123*0Sstevel@tonic-gate * If queue is already being processed. No need to do anything. 1124*0Sstevel@tonic-gate */ 1125*0Sstevel@tonic-gate if (sqp->sq_state & SQS_PROC) { 1126*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 1127*0Sstevel@tonic-gate return; 1128*0Sstevel@tonic-gate } 1129*0Sstevel@tonic-gate 1130*0Sstevel@tonic-gate SQUEUE_WORKER_WAKEUP(sqp); 1131*0Sstevel@tonic-gate } 1132*0Sstevel@tonic-gate 1133*0Sstevel@tonic-gate 1134*0Sstevel@tonic-gate /* 1135*0Sstevel@tonic-gate * PRIVATE FUNCTIONS 1136*0Sstevel@tonic-gate */ 1137*0Sstevel@tonic-gate 1138*0Sstevel@tonic-gate static void 1139*0Sstevel@tonic-gate squeue_fire(void *arg) 1140*0Sstevel@tonic-gate { 1141*0Sstevel@tonic-gate squeue_t *sqp = arg; 1142*0Sstevel@tonic-gate uint_t state; 1143*0Sstevel@tonic-gate 1144*0Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 1145*0Sstevel@tonic-gate 1146*0Sstevel@tonic-gate state = sqp->sq_state; 1147*0Sstevel@tonic-gate if (sqp->sq_tid == 0 && !(state & SQS_TMO_PROG)) { 1148*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 1149*0Sstevel@tonic-gate return; 1150*0Sstevel@tonic-gate } 1151*0Sstevel@tonic-gate 1152*0Sstevel@tonic-gate sqp->sq_tid = 0; 1153*0Sstevel@tonic-gate /* 1154*0Sstevel@tonic-gate * The timeout fired before we got a chance to set it. 1155*0Sstevel@tonic-gate * Process it anyway but remove the SQS_TMO_PROG so that 1156*0Sstevel@tonic-gate * the guy trying to set the timeout knows that it has 1157*0Sstevel@tonic-gate * already been processed. 1158*0Sstevel@tonic-gate */ 1159*0Sstevel@tonic-gate if (state & SQS_TMO_PROG) 1160*0Sstevel@tonic-gate sqp->sq_state &= ~SQS_TMO_PROG; 1161*0Sstevel@tonic-gate 1162*0Sstevel@tonic-gate if (!(state & SQS_PROC)) { 1163*0Sstevel@tonic-gate sqp->sq_awaken = lbolt; 1164*0Sstevel@tonic-gate cv_signal(&sqp->sq_async); 1165*0Sstevel@tonic-gate } 1166*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 1167*0Sstevel@tonic-gate } 1168*0Sstevel@tonic-gate 1169*0Sstevel@tonic-gate static void 1170*0Sstevel@tonic-gate squeue_drain(squeue_t *sqp, uint_t proc_type, clock_t expire) 1171*0Sstevel@tonic-gate { 1172*0Sstevel@tonic-gate mblk_t *mp; 1173*0Sstevel@tonic-gate mblk_t *head; 1174*0Sstevel@tonic-gate sqproc_t proc; 1175*0Sstevel@tonic-gate conn_t *connp; 1176*0Sstevel@tonic-gate clock_t start = lbolt; 1177*0Sstevel@tonic-gate clock_t drain_time; 1178*0Sstevel@tonic-gate timeout_id_t tid; 1179*0Sstevel@tonic-gate uint_t cnt; 1180*0Sstevel@tonic-gate uint_t total_cnt = 0; 1181*0Sstevel@tonic-gate ill_rx_ring_t *sq_rx_ring = sqp->sq_rx_ring; 1182*0Sstevel@tonic-gate int interrupt = servicing_interrupt(); 1183*0Sstevel@tonic-gate boolean_t poll_on = B_FALSE; 1184*0Sstevel@tonic-gate 1185*0Sstevel@tonic-gate ASSERT(mutex_owned(&sqp->sq_lock)); 1186*0Sstevel@tonic-gate ASSERT(!(sqp->sq_state & SQS_PROC)); 1187*0Sstevel@tonic-gate 1188*0Sstevel@tonic-gate #if SQUEUE_PROFILE 1189*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 1190*0Sstevel@tonic-gate if (interrupt) 1191*0Sstevel@tonic-gate SQSTAT(sqp, sq_ndrains_intr); 1192*0Sstevel@tonic-gate else if (!(proc_type & SQS_WORKER)) 1193*0Sstevel@tonic-gate SQSTAT(sqp, sq_ndrains_other); 1194*0Sstevel@tonic-gate else 1195*0Sstevel@tonic-gate SQSTAT(sqp, sq_ndrains_worker); 1196*0Sstevel@tonic-gate } 1197*0Sstevel@tonic-gate #endif 1198*0Sstevel@tonic-gate 1199*0Sstevel@tonic-gate if ((tid = sqp->sq_tid) != 0) 1200*0Sstevel@tonic-gate sqp->sq_tid = 0; 1201*0Sstevel@tonic-gate 1202*0Sstevel@tonic-gate sqp->sq_state |= SQS_PROC | proc_type; 1203*0Sstevel@tonic-gate head = sqp->sq_first; 1204*0Sstevel@tonic-gate sqp->sq_first = NULL; 1205*0Sstevel@tonic-gate sqp->sq_last = NULL; 1206*0Sstevel@tonic-gate cnt = sqp->sq_count; 1207*0Sstevel@tonic-gate 1208*0Sstevel@tonic-gate /* 1209*0Sstevel@tonic-gate * We have backlog built up. Switch to polling mode if the 1210*0Sstevel@tonic-gate * device underneath allows it. Need to do it only for 1211*0Sstevel@tonic-gate * drain by non-interrupt thread so interrupts don't 1212*0Sstevel@tonic-gate * come and disrupt us in between. If its a interrupt thread, 1213*0Sstevel@tonic-gate * no need because most devices will not issue another 1214*0Sstevel@tonic-gate * interrupt till this one returns. 1215*0Sstevel@tonic-gate */ 1216*0Sstevel@tonic-gate if ((sqp->sq_state & SQS_POLL_CAPAB) && !(proc_type & SQS_ENTER) && 1217*0Sstevel@tonic-gate (sqp->sq_count > squeue_worker_poll_min)) { 1218*0Sstevel@tonic-gate ASSERT(sq_rx_ring != NULL); 1219*0Sstevel@tonic-gate SQS_POLLING_ON(sqp, sq_rx_ring); 1220*0Sstevel@tonic-gate poll_on = B_TRUE; 1221*0Sstevel@tonic-gate } 1222*0Sstevel@tonic-gate 1223*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 1224*0Sstevel@tonic-gate 1225*0Sstevel@tonic-gate if (tid != 0) 1226*0Sstevel@tonic-gate (void) untimeout(tid); 1227*0Sstevel@tonic-gate again: 1228*0Sstevel@tonic-gate while ((mp = head) != NULL) { 1229*0Sstevel@tonic-gate head = mp->b_next; 1230*0Sstevel@tonic-gate mp->b_next = NULL; 1231*0Sstevel@tonic-gate 1232*0Sstevel@tonic-gate proc = (sqproc_t)mp->b_queue; 1233*0Sstevel@tonic-gate mp->b_queue = NULL; 1234*0Sstevel@tonic-gate connp = (conn_t *)mp->b_prev; 1235*0Sstevel@tonic-gate mp->b_prev = NULL; 1236*0Sstevel@tonic-gate #if SQUEUE_DEBUG 1237*0Sstevel@tonic-gate sqp->sq_curmp = mp; 1238*0Sstevel@tonic-gate sqp->sq_curproc = proc; 1239*0Sstevel@tonic-gate sqp->sq_connp = connp; 1240*0Sstevel@tonic-gate sqp->sq_tag = mp->b_tag; 1241*0Sstevel@tonic-gate #endif 1242*0Sstevel@tonic-gate 1243*0Sstevel@tonic-gate #if SQUEUE_PROFILE 1244*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 1245*0Sstevel@tonic-gate if (interrupt) 1246*0Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_intr); 1247*0Sstevel@tonic-gate else if (!(proc_type & SQS_WORKER)) 1248*0Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_other); 1249*0Sstevel@tonic-gate else 1250*0Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_worker); 1251*0Sstevel@tonic-gate } 1252*0Sstevel@tonic-gate #endif 1253*0Sstevel@tonic-gate 1254*0Sstevel@tonic-gate connp->conn_on_sqp = B_TRUE; 1255*0Sstevel@tonic-gate DTRACE_PROBE3(squeue__proc__start, squeue_t *, 1256*0Sstevel@tonic-gate sqp, mblk_t *, mp, conn_t *, connp); 1257*0Sstevel@tonic-gate (*proc)(connp, mp, sqp); 1258*0Sstevel@tonic-gate DTRACE_PROBE2(squeue__proc__end, squeue_t *, 1259*0Sstevel@tonic-gate sqp, conn_t *, connp); 1260*0Sstevel@tonic-gate connp->conn_on_sqp = B_FALSE; 1261*0Sstevel@tonic-gate CONN_DEC_REF(connp); 1262*0Sstevel@tonic-gate } 1263*0Sstevel@tonic-gate 1264*0Sstevel@tonic-gate 1265*0Sstevel@tonic-gate #if SQUEUE_DEBUG 1266*0Sstevel@tonic-gate sqp->sq_curmp = NULL; 1267*0Sstevel@tonic-gate sqp->sq_curproc = NULL; 1268*0Sstevel@tonic-gate sqp->sq_connp = NULL; 1269*0Sstevel@tonic-gate #endif 1270*0Sstevel@tonic-gate 1271*0Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 1272*0Sstevel@tonic-gate sqp->sq_count -= cnt; 1273*0Sstevel@tonic-gate total_cnt += cnt; 1274*0Sstevel@tonic-gate 1275*0Sstevel@tonic-gate if (sqp->sq_first != NULL) { 1276*0Sstevel@tonic-gate if (!expire || (lbolt < expire)) { 1277*0Sstevel@tonic-gate /* More arrived and time not expired */ 1278*0Sstevel@tonic-gate head = sqp->sq_first; 1279*0Sstevel@tonic-gate sqp->sq_first = NULL; 1280*0Sstevel@tonic-gate sqp->sq_last = NULL; 1281*0Sstevel@tonic-gate cnt = sqp->sq_count; 1282*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 1283*0Sstevel@tonic-gate goto again; 1284*0Sstevel@tonic-gate } 1285*0Sstevel@tonic-gate 1286*0Sstevel@tonic-gate /* 1287*0Sstevel@tonic-gate * If we are not worker thread and we 1288*0Sstevel@tonic-gate * reached our time limit to do drain, 1289*0Sstevel@tonic-gate * signal the worker thread to pick 1290*0Sstevel@tonic-gate * up the work. 1291*0Sstevel@tonic-gate * If we were the worker thread, then 1292*0Sstevel@tonic-gate * we take a break to allow an interrupt 1293*0Sstevel@tonic-gate * or writer to pick up the load. 1294*0Sstevel@tonic-gate */ 1295*0Sstevel@tonic-gate if (proc_type != SQS_WORKER) { 1296*0Sstevel@tonic-gate sqp->sq_awaken = lbolt; 1297*0Sstevel@tonic-gate cv_signal(&sqp->sq_async); 1298*0Sstevel@tonic-gate } 1299*0Sstevel@tonic-gate } 1300*0Sstevel@tonic-gate 1301*0Sstevel@tonic-gate /* 1302*0Sstevel@tonic-gate * Try to see if we can get a time estimate to process a packet. 1303*0Sstevel@tonic-gate * Do it only in interrupt context since less chance of context 1304*0Sstevel@tonic-gate * switch or pinning etc. to get a better estimate. 1305*0Sstevel@tonic-gate */ 1306*0Sstevel@tonic-gate if (interrupt && ((drain_time = (lbolt - start)) > 0)) 1307*0Sstevel@tonic-gate sqp->sq_avg_drain_time = ((80 * sqp->sq_avg_drain_time) + 1308*0Sstevel@tonic-gate (20 * (drv_hztousec(drain_time)/total_cnt)))/100; 1309*0Sstevel@tonic-gate 1310*0Sstevel@tonic-gate sqp->sq_state &= ~(SQS_PROC | proc_type); 1311*0Sstevel@tonic-gate 1312*0Sstevel@tonic-gate /* 1313*0Sstevel@tonic-gate * If polling was turned on, turn it off and reduce the default 1314*0Sstevel@tonic-gate * interrupt blank interval as well to bring new packets in faster 1315*0Sstevel@tonic-gate * (reduces the latency when there is no backlog). 1316*0Sstevel@tonic-gate */ 1317*0Sstevel@tonic-gate if (poll_on && (sqp->sq_state & SQS_POLL_CAPAB)) { 1318*0Sstevel@tonic-gate ASSERT(sq_rx_ring != NULL); 1319*0Sstevel@tonic-gate SQS_POLLING_OFF(sqp, sq_rx_ring); 1320*0Sstevel@tonic-gate } 1321*0Sstevel@tonic-gate } 1322*0Sstevel@tonic-gate 1323*0Sstevel@tonic-gate static void 1324*0Sstevel@tonic-gate squeue_worker(squeue_t *sqp) 1325*0Sstevel@tonic-gate { 1326*0Sstevel@tonic-gate kmutex_t *lock = &sqp->sq_lock; 1327*0Sstevel@tonic-gate kcondvar_t *async = &sqp->sq_async; 1328*0Sstevel@tonic-gate callb_cpr_t cprinfo; 1329*0Sstevel@tonic-gate #if SQUEUE_PROFILE 1330*0Sstevel@tonic-gate hrtime_t start; 1331*0Sstevel@tonic-gate #endif 1332*0Sstevel@tonic-gate 1333*0Sstevel@tonic-gate CALLB_CPR_INIT(&cprinfo, lock, callb_generic_cpr, "nca"); 1334*0Sstevel@tonic-gate mutex_enter(lock); 1335*0Sstevel@tonic-gate 1336*0Sstevel@tonic-gate for (;;) { 1337*0Sstevel@tonic-gate while (sqp->sq_first == NULL || (sqp->sq_state & SQS_PROC)) { 1338*0Sstevel@tonic-gate CALLB_CPR_SAFE_BEGIN(&cprinfo); 1339*0Sstevel@tonic-gate still_wait: 1340*0Sstevel@tonic-gate cv_wait(async, lock); 1341*0Sstevel@tonic-gate if (sqp->sq_state & SQS_PROC) { 1342*0Sstevel@tonic-gate goto still_wait; 1343*0Sstevel@tonic-gate } 1344*0Sstevel@tonic-gate CALLB_CPR_SAFE_END(&cprinfo, lock); 1345*0Sstevel@tonic-gate } 1346*0Sstevel@tonic-gate 1347*0Sstevel@tonic-gate #if SQUEUE_PROFILE 1348*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 1349*0Sstevel@tonic-gate start = gethrtime(); 1350*0Sstevel@tonic-gate } 1351*0Sstevel@tonic-gate #endif 1352*0Sstevel@tonic-gate 1353*0Sstevel@tonic-gate ASSERT(squeue_workerdrain_tick != 0); 1354*0Sstevel@tonic-gate sqp->sq_run = curthread; 1355*0Sstevel@tonic-gate squeue_drain(sqp, SQS_WORKER, lbolt + squeue_workerdrain_tick); 1356*0Sstevel@tonic-gate sqp->sq_run = NULL; 1357*0Sstevel@tonic-gate 1358*0Sstevel@tonic-gate if (sqp->sq_first != NULL) { 1359*0Sstevel@tonic-gate /* 1360*0Sstevel@tonic-gate * Doing too much processing by worker thread 1361*0Sstevel@tonic-gate * in presense of interrupts can be sub optimal. 1362*0Sstevel@tonic-gate * Instead, once a drain is done by worker thread 1363*0Sstevel@tonic-gate * for squeue_writerdrain_ms (the reason we are 1364*0Sstevel@tonic-gate * here), we force wait for squeue_workerwait_tick 1365*0Sstevel@tonic-gate * before doing more processing even if sq_wait is 1366*0Sstevel@tonic-gate * set to 0. 1367*0Sstevel@tonic-gate * 1368*0Sstevel@tonic-gate * This can be counterproductive for performance 1369*0Sstevel@tonic-gate * if worker thread is the only means to process 1370*0Sstevel@tonic-gate * the packets (interrupts or writers are not 1371*0Sstevel@tonic-gate * allowed inside the squeue). 1372*0Sstevel@tonic-gate */ 1373*0Sstevel@tonic-gate if (sqp->sq_tid == 0 && 1374*0Sstevel@tonic-gate !(sqp->sq_state & SQS_TMO_PROG)) { 1375*0Sstevel@tonic-gate timeout_id_t tid; 1376*0Sstevel@tonic-gate 1377*0Sstevel@tonic-gate sqp->sq_state |= SQS_TMO_PROG; 1378*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 1379*0Sstevel@tonic-gate tid = timeout(squeue_fire, sqp, 1380*0Sstevel@tonic-gate squeue_workerwait_tick); 1381*0Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 1382*0Sstevel@tonic-gate /* 1383*0Sstevel@tonic-gate * Check again if we still need 1384*0Sstevel@tonic-gate * the timeout 1385*0Sstevel@tonic-gate */ 1386*0Sstevel@tonic-gate if (((sqp->sq_state & (SQS_TMO_PROG|SQS_PROC)) 1387*0Sstevel@tonic-gate == SQS_TMO_PROG) && (sqp->sq_tid == 0) && 1388*0Sstevel@tonic-gate (sqp->sq_first != NULL)) { 1389*0Sstevel@tonic-gate sqp->sq_state &= ~SQS_TMO_PROG; 1390*0Sstevel@tonic-gate sqp->sq_awaken = lbolt; 1391*0Sstevel@tonic-gate sqp->sq_tid = tid; 1392*0Sstevel@tonic-gate } else if (sqp->sq_state & SQS_TMO_PROG) { 1393*0Sstevel@tonic-gate /* timeout not needed */ 1394*0Sstevel@tonic-gate sqp->sq_state &= ~SQS_TMO_PROG; 1395*0Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); 1396*0Sstevel@tonic-gate (void) untimeout(tid); 1397*0Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 1398*0Sstevel@tonic-gate } 1399*0Sstevel@tonic-gate } 1400*0Sstevel@tonic-gate CALLB_CPR_SAFE_BEGIN(&cprinfo); 1401*0Sstevel@tonic-gate cv_wait(async, lock); 1402*0Sstevel@tonic-gate CALLB_CPR_SAFE_END(&cprinfo, lock); 1403*0Sstevel@tonic-gate } 1404*0Sstevel@tonic-gate 1405*0Sstevel@tonic-gate 1406*0Sstevel@tonic-gate #if SQUEUE_PROFILE 1407*0Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 1408*0Sstevel@tonic-gate SQDELTA(sqp, sq_time_worker, gethrtime() - start); 1409*0Sstevel@tonic-gate } 1410*0Sstevel@tonic-gate #endif 1411*0Sstevel@tonic-gate } 1412*0Sstevel@tonic-gate } 1413*0Sstevel@tonic-gate 1414*0Sstevel@tonic-gate #if SQUEUE_PROFILE 1415*0Sstevel@tonic-gate static int 1416*0Sstevel@tonic-gate squeue_kstat_update(kstat_t *ksp, int rw) 1417*0Sstevel@tonic-gate { 1418*0Sstevel@tonic-gate struct squeue_kstat *sqsp = &squeue_kstat; 1419*0Sstevel@tonic-gate squeue_t *sqp = ksp->ks_private; 1420*0Sstevel@tonic-gate 1421*0Sstevel@tonic-gate if (rw == KSTAT_WRITE) 1422*0Sstevel@tonic-gate return (EACCES); 1423*0Sstevel@tonic-gate 1424*0Sstevel@tonic-gate #if SQUEUE_DEBUG 1425*0Sstevel@tonic-gate sqsp->sq_count.value.ui64 = sqp->sq_count; 1426*0Sstevel@tonic-gate sqsp->sq_max_qlen.value.ui64 = sqp->sq_stats.sq_max_qlen; 1427*0Sstevel@tonic-gate #endif 1428*0Sstevel@tonic-gate sqsp->sq_npackets_worker.value.ui64 = sqp->sq_stats.sq_npackets_worker; 1429*0Sstevel@tonic-gate sqsp->sq_npackets_intr.value.ui64 = sqp->sq_stats.sq_npackets_intr; 1430*0Sstevel@tonic-gate sqsp->sq_npackets_other.value.ui64 = sqp->sq_stats.sq_npackets_other; 1431*0Sstevel@tonic-gate sqsp->sq_nqueued_intr.value.ui64 = sqp->sq_stats.sq_nqueued_intr; 1432*0Sstevel@tonic-gate sqsp->sq_nqueued_other.value.ui64 = sqp->sq_stats.sq_nqueued_other; 1433*0Sstevel@tonic-gate sqsp->sq_ndrains_worker.value.ui64 = sqp->sq_stats.sq_ndrains_worker; 1434*0Sstevel@tonic-gate sqsp->sq_ndrains_intr.value.ui64 = sqp->sq_stats.sq_ndrains_intr; 1435*0Sstevel@tonic-gate sqsp->sq_ndrains_other.value.ui64 = sqp->sq_stats.sq_ndrains_other; 1436*0Sstevel@tonic-gate sqsp->sq_time_worker.value.ui64 = sqp->sq_stats.sq_time_worker; 1437*0Sstevel@tonic-gate sqsp->sq_time_intr.value.ui64 = sqp->sq_stats.sq_time_intr; 1438*0Sstevel@tonic-gate sqsp->sq_time_other.value.ui64 = sqp->sq_stats.sq_time_other; 1439*0Sstevel@tonic-gate return (0); 1440*0Sstevel@tonic-gate } 1441*0Sstevel@tonic-gate #endif 1442*0Sstevel@tonic-gate 1443*0Sstevel@tonic-gate void 1444*0Sstevel@tonic-gate squeue_profile_enable(squeue_t *sqp) 1445*0Sstevel@tonic-gate { 1446*0Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 1447*0Sstevel@tonic-gate sqp->sq_state |= SQS_PROFILE; 1448*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 1449*0Sstevel@tonic-gate } 1450*0Sstevel@tonic-gate 1451*0Sstevel@tonic-gate void 1452*0Sstevel@tonic-gate squeue_profile_disable(squeue_t *sqp) 1453*0Sstevel@tonic-gate { 1454*0Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 1455*0Sstevel@tonic-gate sqp->sq_state &= ~SQS_PROFILE; 1456*0Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 1457*0Sstevel@tonic-gate } 1458*0Sstevel@tonic-gate 1459*0Sstevel@tonic-gate void 1460*0Sstevel@tonic-gate squeue_profile_reset(squeue_t *sqp) 1461*0Sstevel@tonic-gate { 1462*0Sstevel@tonic-gate #if SQUEUE_PROFILE 1463*0Sstevel@tonic-gate bzero(&sqp->sq_stats, sizeof (sqstat_t)); 1464*0Sstevel@tonic-gate #endif 1465*0Sstevel@tonic-gate } 1466*0Sstevel@tonic-gate 1467*0Sstevel@tonic-gate void 1468*0Sstevel@tonic-gate squeue_profile_start(void) 1469*0Sstevel@tonic-gate { 1470*0Sstevel@tonic-gate #if SQUEUE_PROFILE 1471*0Sstevel@tonic-gate squeue_profile = B_TRUE; 1472*0Sstevel@tonic-gate #endif 1473*0Sstevel@tonic-gate } 1474*0Sstevel@tonic-gate 1475*0Sstevel@tonic-gate void 1476*0Sstevel@tonic-gate squeue_profile_stop(void) 1477*0Sstevel@tonic-gate { 1478*0Sstevel@tonic-gate #if SQUEUE_PROFILE 1479*0Sstevel@tonic-gate squeue_profile = B_FALSE; 1480*0Sstevel@tonic-gate #endif 1481*0Sstevel@tonic-gate } 1482*0Sstevel@tonic-gate 1483*0Sstevel@tonic-gate uintptr_t * 1484*0Sstevel@tonic-gate squeue_getprivate(squeue_t *sqp, sqprivate_t p) 1485*0Sstevel@tonic-gate { 1486*0Sstevel@tonic-gate ASSERT(p < SQPRIVATE_MAX); 1487*0Sstevel@tonic-gate 1488*0Sstevel@tonic-gate return (&sqp->sq_private[p]); 1489*0Sstevel@tonic-gate } 1490*0Sstevel@tonic-gate 1491*0Sstevel@tonic-gate processorid_t 1492*0Sstevel@tonic-gate squeue_binding(squeue_t *sqp) 1493*0Sstevel@tonic-gate { 1494*0Sstevel@tonic-gate return (sqp->sq_bind); 1495*0Sstevel@tonic-gate } 1496