10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 50Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 60Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 70Sstevel@tonic-gate * with the License. 80Sstevel@tonic-gate * 90Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 100Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 110Sstevel@tonic-gate * See the License for the specific language governing permissions 120Sstevel@tonic-gate * and limitations under the License. 130Sstevel@tonic-gate * 140Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 150Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 160Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 170Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 180Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 190Sstevel@tonic-gate * 200Sstevel@tonic-gate * CDDL HEADER END 210Sstevel@tonic-gate */ 220Sstevel@tonic-gate /* 230Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate /* 300Sstevel@tonic-gate * Squeues - TCP/IP serialization mechanism. 310Sstevel@tonic-gate * 320Sstevel@tonic-gate * This is a general purpose high-performance serialization mechanism. It is 330Sstevel@tonic-gate * similar to a taskq with a single worker thread, the difference is that it 340Sstevel@tonic-gate * does not imply a context switch - the thread placing a request may actually 350Sstevel@tonic-gate * process it. It is also biased for processing requests in interrupt context. 360Sstevel@tonic-gate * 370Sstevel@tonic-gate * Each squeue has a worker thread which may optionally be bound to a CPU. 380Sstevel@tonic-gate * 390Sstevel@tonic-gate * Only one thread may process requests from a given squeue at any time. This is 400Sstevel@tonic-gate * called "entering" squeue. 410Sstevel@tonic-gate * 420Sstevel@tonic-gate * Each dispatched request is processed either by 430Sstevel@tonic-gate * 440Sstevel@tonic-gate * a) Dispatching thread or 450Sstevel@tonic-gate * b) Some other thread that is currently processing squeue at the time of 460Sstevel@tonic-gate * request or 470Sstevel@tonic-gate * c) worker thread. 480Sstevel@tonic-gate * 490Sstevel@tonic-gate * INTERFACES: 500Sstevel@tonic-gate * 510Sstevel@tonic-gate * squeue_t *squeue_create(name, bind, wait, pri) 520Sstevel@tonic-gate * 530Sstevel@tonic-gate * name: symbolic name for squeue. 540Sstevel@tonic-gate * wait: time to wait before waiking the worker thread after queueing 550Sstevel@tonic-gate * request. 560Sstevel@tonic-gate * bind: preferred CPU binding for the worker thread. 570Sstevel@tonic-gate * pri: thread priority for the worker thread. 580Sstevel@tonic-gate * 590Sstevel@tonic-gate * This function never fails and may sleep. It returns a transparent pointer 600Sstevel@tonic-gate * to the squeue_t structure that is passed to all other squeue operations. 610Sstevel@tonic-gate * 620Sstevel@tonic-gate * void squeue_bind(sqp, bind) 630Sstevel@tonic-gate * 640Sstevel@tonic-gate * Bind squeue worker thread to a CPU specified by the 'bind' argument. The 650Sstevel@tonic-gate * 'bind' value of -1 binds to the preferred thread specified for 660Sstevel@tonic-gate * squeue_create. 670Sstevel@tonic-gate * 680Sstevel@tonic-gate * NOTE: Any value of 'bind' other then -1 is not supported currently, but the 690Sstevel@tonic-gate * API is present - in the future it may be useful to specify different 700Sstevel@tonic-gate * binding. 710Sstevel@tonic-gate * 720Sstevel@tonic-gate * void squeue_unbind(sqp) 730Sstevel@tonic-gate * 740Sstevel@tonic-gate * Unbind the worker thread from its preferred CPU. 750Sstevel@tonic-gate * 760Sstevel@tonic-gate * void squeue_enter(*sqp, *mp, proc, arg, tag) 770Sstevel@tonic-gate * 780Sstevel@tonic-gate * Post a single request for processing. Each request consists of mblock 'mp', 790Sstevel@tonic-gate * function 'proc' to execute and an argument 'arg' to pass to this 800Sstevel@tonic-gate * function. The function is called as (*proc)(arg, mp, sqp); The tag is an 810Sstevel@tonic-gate * arbitrary number from 0 to 255 which will be stored in mp to track exact 820Sstevel@tonic-gate * caller of squeue_enter. The combination of function name and the tag should 830Sstevel@tonic-gate * provide enough information to identify the caller. 840Sstevel@tonic-gate * 850Sstevel@tonic-gate * If no one is processing the squeue, squeue_enter() will call the function 860Sstevel@tonic-gate * immediately. Otherwise it will add the request to the queue for later 870Sstevel@tonic-gate * processing. Once the function is executed, the thread may continue 880Sstevel@tonic-gate * executing all other requests pending on the queue. 890Sstevel@tonic-gate * 900Sstevel@tonic-gate * NOTE: The tagging information is only used when SQUEUE_DEBUG is set to 1. 910Sstevel@tonic-gate * NOTE: The argument can be conn_t only. Ideally we'd like to have generic 920Sstevel@tonic-gate * argument, but we want to drop connection reference count here - this 930Sstevel@tonic-gate * improves tail-call optimizations. 940Sstevel@tonic-gate * XXX: The arg should have type conn_t. 950Sstevel@tonic-gate * 960Sstevel@tonic-gate * void squeue_enter_nodrain(*sqp, *mp, proc, arg, tag) 970Sstevel@tonic-gate * 980Sstevel@tonic-gate * Same as squeue_enter(), but the entering thread will only try to execute a 990Sstevel@tonic-gate * single request. It will not continue executing any pending requests. 1000Sstevel@tonic-gate * 1010Sstevel@tonic-gate * void squeue_fill(*sqp, *mp, proc, arg, tag) 1020Sstevel@tonic-gate * 1030Sstevel@tonic-gate * Just place the request on the queue without trying to execute it. Arrange 1040Sstevel@tonic-gate * for the worker thread to process the request. 1050Sstevel@tonic-gate * 1060Sstevel@tonic-gate * void squeue_profile_enable(sqp) 1070Sstevel@tonic-gate * void squeue_profile_disable(sqp) 1080Sstevel@tonic-gate * 1090Sstevel@tonic-gate * Enable or disable profiling for specified 'sqp'. Profiling is only 1100Sstevel@tonic-gate * available when SQUEUE_PROFILE is set. 1110Sstevel@tonic-gate * 1120Sstevel@tonic-gate * void squeue_profile_reset(sqp) 1130Sstevel@tonic-gate * 1140Sstevel@tonic-gate * Reset all profiling information to zero. Profiling is only 1150Sstevel@tonic-gate * available when SQUEUE_PROFILE is set. 1160Sstevel@tonic-gate * 1170Sstevel@tonic-gate * void squeue_profile_start() 1180Sstevel@tonic-gate * void squeue_profile_stop() 1190Sstevel@tonic-gate * 1200Sstevel@tonic-gate * Globally enable or disabled profiling for all squeues. 1210Sstevel@tonic-gate * 1220Sstevel@tonic-gate * uintptr_t *squeue_getprivate(sqp, p) 1230Sstevel@tonic-gate * 1240Sstevel@tonic-gate * Each squeue keeps small amount of private data space available for various 1250Sstevel@tonic-gate * consumers. Current consumers include TCP and NCA. Other consumers need to 1260Sstevel@tonic-gate * add their private tag to the sqprivate_t enum. The private information is 1270Sstevel@tonic-gate * limited to an uintptr_t value. The squeue has no knowledge of its content 1280Sstevel@tonic-gate * and does not manage it in any way. 1290Sstevel@tonic-gate * 1300Sstevel@tonic-gate * The typical use may be a breakdown of data structures per CPU (since 1310Sstevel@tonic-gate * squeues are usually per CPU). See NCA for examples of use. 1320Sstevel@tonic-gate * Currently 'p' may have one legal value SQPRIVATE_TCP. 1330Sstevel@tonic-gate * 1340Sstevel@tonic-gate * processorid_t squeue_binding(sqp) 1350Sstevel@tonic-gate * 1360Sstevel@tonic-gate * Returns the CPU binding for a given squeue. 1370Sstevel@tonic-gate * 1380Sstevel@tonic-gate * TUNABALES: 1390Sstevel@tonic-gate * 1400Sstevel@tonic-gate * squeue_intrdrain_ms: Maximum time in ms interrupts spend draining any 1410Sstevel@tonic-gate * squeue. Note that this is approximation - squeues have no control on the 1420Sstevel@tonic-gate * time it takes to process each request. This limit is only checked 1430Sstevel@tonic-gate * between processing individual messages. 1440Sstevel@tonic-gate * Default: 20 ms. 1450Sstevel@tonic-gate * 1460Sstevel@tonic-gate * squeue_writerdrain_ms: Maximum time in ms non-interrupts spend draining any 1470Sstevel@tonic-gate * squeue. Note that this is approximation - squeues have no control on the 1480Sstevel@tonic-gate * time it takes to process each request. This limit is only checked 1490Sstevel@tonic-gate * between processing individual messages. 1500Sstevel@tonic-gate * Default: 10 ms. 1510Sstevel@tonic-gate * 1520Sstevel@tonic-gate * squeue_workerdrain_ms: Maximum time in ms worker thread spends draining any 1530Sstevel@tonic-gate * squeue. Note that this is approximation - squeues have no control on the 1540Sstevel@tonic-gate * time it takes to process each request. This limit is only checked 1550Sstevel@tonic-gate * between processing individual messages. 1560Sstevel@tonic-gate * Default: 10 ms. 1570Sstevel@tonic-gate * 1580Sstevel@tonic-gate * squeue_workerwait_ms: When worker thread is interrupted because workerdrain 1590Sstevel@tonic-gate * expired, how much time to wait before waking worker thread again. 1600Sstevel@tonic-gate * Default: 10 ms. 1610Sstevel@tonic-gate * 1620Sstevel@tonic-gate * DEFINES: 1630Sstevel@tonic-gate * 1640Sstevel@tonic-gate * SQUEUE_DEBUG: If defined as 1, special code is compiled in which records 1650Sstevel@tonic-gate * additional information aiding debugging is recorded in squeue. 1660Sstevel@tonic-gate * 1670Sstevel@tonic-gate * SQUEUE_PROFILE: If defined as 1, special code is compiled in which collects 1680Sstevel@tonic-gate * various squeue statistics and exports them as kstats. 1690Sstevel@tonic-gate * 1700Sstevel@tonic-gate * Ideally we would like both SQUEUE_DEBUG and SQUEUE_PROFILE to be always set, 1710Sstevel@tonic-gate * but it affects performance, so they are enabled on DEBUG kernels and disabled 1720Sstevel@tonic-gate * on non-DEBUG by default. 1730Sstevel@tonic-gate */ 1740Sstevel@tonic-gate 1750Sstevel@tonic-gate #include <sys/types.h> 1760Sstevel@tonic-gate #include <sys/cmn_err.h> 1770Sstevel@tonic-gate #include <sys/debug.h> 1780Sstevel@tonic-gate #include <sys/kmem.h> 1790Sstevel@tonic-gate #include <sys/cpuvar.h> 1800Sstevel@tonic-gate #include <sys/condvar_impl.h> 1810Sstevel@tonic-gate #include <sys/systm.h> 1820Sstevel@tonic-gate #include <sys/callb.h> 1830Sstevel@tonic-gate #include <sys/sdt.h> 1840Sstevel@tonic-gate #include <sys/ddi.h> 1850Sstevel@tonic-gate 1860Sstevel@tonic-gate #include <inet/ipclassifier.h> 1870Sstevel@tonic-gate 1880Sstevel@tonic-gate /* 1890Sstevel@tonic-gate * State flags. 1900Sstevel@tonic-gate * Note: The MDB IP module depends on the values of these flags. 1910Sstevel@tonic-gate */ 1920Sstevel@tonic-gate #define SQS_PROC 0x0001 /* being processed */ 1930Sstevel@tonic-gate #define SQS_WORKER 0x0002 /* worker thread */ 1940Sstevel@tonic-gate #define SQS_ENTER 0x0004 /* enter thread */ 1950Sstevel@tonic-gate #define SQS_FAST 0x0008 /* enter-fast thread */ 1960Sstevel@tonic-gate #define SQS_USER 0x0010 /* A non interrupt user */ 1970Sstevel@tonic-gate #define SQS_BOUND 0x0020 /* Worker thread is bound */ 1980Sstevel@tonic-gate #define SQS_PROFILE 0x0040 /* Enable profiling */ 1990Sstevel@tonic-gate #define SQS_REENTER 0x0080 /* Re entered thread */ 2000Sstevel@tonic-gate #define SQS_TMO_PROG 0x0100 /* Timeout is being set */ 2010Sstevel@tonic-gate 2020Sstevel@tonic-gate #ifdef DEBUG 2030Sstevel@tonic-gate #define SQUEUE_DEBUG 1 2040Sstevel@tonic-gate #define SQUEUE_PROFILE 1 2050Sstevel@tonic-gate #else 2060Sstevel@tonic-gate #define SQUEUE_DEBUG 0 2070Sstevel@tonic-gate #define SQUEUE_PROFILE 0 2080Sstevel@tonic-gate #endif 2090Sstevel@tonic-gate 2100Sstevel@tonic-gate #include <sys/squeue_impl.h> 2110Sstevel@tonic-gate 2120Sstevel@tonic-gate static void squeue_fire(void *); 213*981Sbw static void squeue_drain(squeue_t *, uint_t, hrtime_t); 2140Sstevel@tonic-gate static void squeue_worker(squeue_t *sqp); 2150Sstevel@tonic-gate 2160Sstevel@tonic-gate #if SQUEUE_PROFILE 2170Sstevel@tonic-gate static kmutex_t squeue_kstat_lock; 2180Sstevel@tonic-gate static int squeue_kstat_update(kstat_t *, int); 2190Sstevel@tonic-gate #endif 2200Sstevel@tonic-gate 2210Sstevel@tonic-gate kmem_cache_t *squeue_cache; 2220Sstevel@tonic-gate 223*981Sbw #define SQUEUE_MSEC_TO_NSEC 1000000 224*981Sbw 2250Sstevel@tonic-gate int squeue_intrdrain_ms = 20; 2260Sstevel@tonic-gate int squeue_writerdrain_ms = 10; 2270Sstevel@tonic-gate int squeue_workerdrain_ms = 10; 2280Sstevel@tonic-gate int squeue_workerwait_ms = 10; 2290Sstevel@tonic-gate 230*981Sbw /* The values above converted to ticks or nano seconds */ 231*981Sbw static int squeue_intrdrain_ns = 0; 232*981Sbw static int squeue_writerdrain_ns = 0; 233*981Sbw static int squeue_workerdrain_ns = 0; 2340Sstevel@tonic-gate static int squeue_workerwait_tick = 0; 2350Sstevel@tonic-gate 2360Sstevel@tonic-gate /* 2370Sstevel@tonic-gate * The minimum packet queued when worker thread doing the drain triggers 2380Sstevel@tonic-gate * polling (if squeue allows it). The choice of 3 is arbitrary. You 2390Sstevel@tonic-gate * definitely don't want it to be 1 since that will trigger polling 2400Sstevel@tonic-gate * on very low loads as well (ssh seems to do be one such example 2410Sstevel@tonic-gate * where packet flow was very low yet somehow 1 packet ended up getting 2420Sstevel@tonic-gate * queued and worker thread fires every 10ms and blanking also gets 2430Sstevel@tonic-gate * triggered. 2440Sstevel@tonic-gate */ 2450Sstevel@tonic-gate int squeue_worker_poll_min = 3; 2460Sstevel@tonic-gate 2470Sstevel@tonic-gate #if SQUEUE_PROFILE 2480Sstevel@tonic-gate /* 2490Sstevel@tonic-gate * Set to B_TRUE to enable profiling. 2500Sstevel@tonic-gate */ 2510Sstevel@tonic-gate static int squeue_profile = B_FALSE; 2520Sstevel@tonic-gate #define SQ_PROFILING(sqp) (squeue_profile && ((sqp)->sq_state & SQS_PROFILE)) 2530Sstevel@tonic-gate 2540Sstevel@tonic-gate #define SQSTAT(sqp, x) ((sqp)->sq_stats.x++) 2550Sstevel@tonic-gate #define SQDELTA(sqp, x, d) ((sqp)->sq_stats.x += (d)) 2560Sstevel@tonic-gate 2570Sstevel@tonic-gate struct squeue_kstat { 2580Sstevel@tonic-gate kstat_named_t sq_count; 2590Sstevel@tonic-gate kstat_named_t sq_max_qlen; 2600Sstevel@tonic-gate kstat_named_t sq_npackets_worker; 2610Sstevel@tonic-gate kstat_named_t sq_npackets_intr; 2620Sstevel@tonic-gate kstat_named_t sq_npackets_other; 2630Sstevel@tonic-gate kstat_named_t sq_nqueued_intr; 2640Sstevel@tonic-gate kstat_named_t sq_nqueued_other; 2650Sstevel@tonic-gate kstat_named_t sq_ndrains_worker; 2660Sstevel@tonic-gate kstat_named_t sq_ndrains_intr; 2670Sstevel@tonic-gate kstat_named_t sq_ndrains_other; 2680Sstevel@tonic-gate kstat_named_t sq_time_worker; 2690Sstevel@tonic-gate kstat_named_t sq_time_intr; 2700Sstevel@tonic-gate kstat_named_t sq_time_other; 2710Sstevel@tonic-gate } squeue_kstat = { 2720Sstevel@tonic-gate { "count", KSTAT_DATA_UINT64 }, 2730Sstevel@tonic-gate { "max_qlen", KSTAT_DATA_UINT64 }, 2740Sstevel@tonic-gate { "packets_worker", KSTAT_DATA_UINT64 }, 2750Sstevel@tonic-gate { "packets_intr", KSTAT_DATA_UINT64 }, 2760Sstevel@tonic-gate { "packets_other", KSTAT_DATA_UINT64 }, 2770Sstevel@tonic-gate { "queued_intr", KSTAT_DATA_UINT64 }, 2780Sstevel@tonic-gate { "queued_other", KSTAT_DATA_UINT64 }, 2790Sstevel@tonic-gate { "ndrains_worker", KSTAT_DATA_UINT64 }, 2800Sstevel@tonic-gate { "ndrains_intr", KSTAT_DATA_UINT64 }, 2810Sstevel@tonic-gate { "ndrains_other", KSTAT_DATA_UINT64 }, 2820Sstevel@tonic-gate { "time_worker", KSTAT_DATA_UINT64 }, 2830Sstevel@tonic-gate { "time_intr", KSTAT_DATA_UINT64 }, 2840Sstevel@tonic-gate { "time_other", KSTAT_DATA_UINT64 }, 2850Sstevel@tonic-gate }; 2860Sstevel@tonic-gate #endif 2870Sstevel@tonic-gate 2880Sstevel@tonic-gate #define SQUEUE_WORKER_WAKEUP(sqp) { \ 2890Sstevel@tonic-gate timeout_id_t tid = (sqp)->sq_tid; \ 2900Sstevel@tonic-gate \ 2910Sstevel@tonic-gate ASSERT(MUTEX_HELD(&(sqp)->sq_lock)); \ 2920Sstevel@tonic-gate /* \ 2930Sstevel@tonic-gate * Queue isn't being processed, so take \ 2940Sstevel@tonic-gate * any post enqueue actions needed before leaving. \ 2950Sstevel@tonic-gate */ \ 2960Sstevel@tonic-gate if (tid != 0) { \ 2970Sstevel@tonic-gate /* \ 2980Sstevel@tonic-gate * Waiting for an enter() to process mblk(s). \ 2990Sstevel@tonic-gate */ \ 3000Sstevel@tonic-gate clock_t waited = lbolt - (sqp)->sq_awaken; \ 3010Sstevel@tonic-gate \ 3020Sstevel@tonic-gate if (TICK_TO_MSEC(waited) >= (sqp)->sq_wait) { \ 3030Sstevel@tonic-gate /* \ 3040Sstevel@tonic-gate * Times up and have a worker thread \ 3050Sstevel@tonic-gate * waiting for work, so schedule it. \ 3060Sstevel@tonic-gate */ \ 3070Sstevel@tonic-gate (sqp)->sq_tid = 0; \ 3080Sstevel@tonic-gate (sqp)->sq_awaken = lbolt; \ 3090Sstevel@tonic-gate cv_signal(&(sqp)->sq_async); \ 3100Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 3110Sstevel@tonic-gate (void) untimeout(tid); \ 3120Sstevel@tonic-gate return; \ 3130Sstevel@tonic-gate } \ 3140Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 3150Sstevel@tonic-gate return; \ 3160Sstevel@tonic-gate } else if ((sqp)->sq_state & SQS_TMO_PROG) { \ 3170Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 3180Sstevel@tonic-gate return; \ 3190Sstevel@tonic-gate } else if ((sqp)->sq_wait != 0) { \ 3200Sstevel@tonic-gate clock_t wait = (sqp)->sq_wait; \ 3210Sstevel@tonic-gate /* \ 3220Sstevel@tonic-gate * Wait up to sqp->sq_wait ms for an \ 3230Sstevel@tonic-gate * enter() to process this queue. We \ 3240Sstevel@tonic-gate * don't want to contend on timeout locks \ 3250Sstevel@tonic-gate * with sq_lock held for performance reasons, \ 3260Sstevel@tonic-gate * so drop the sq_lock before calling timeout \ 3270Sstevel@tonic-gate * but we need to check if timeout is required \ 3280Sstevel@tonic-gate * after re acquiring the sq_lock. Once \ 3290Sstevel@tonic-gate * the sq_lock is dropped, someone else could \ 3300Sstevel@tonic-gate * have processed the packet or the timeout could \ 3310Sstevel@tonic-gate * have already fired. \ 3320Sstevel@tonic-gate */ \ 3330Sstevel@tonic-gate (sqp)->sq_state |= SQS_TMO_PROG; \ 3340Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 3350Sstevel@tonic-gate tid = timeout(squeue_fire, (sqp), wait); \ 3360Sstevel@tonic-gate mutex_enter(&(sqp)->sq_lock); \ 3370Sstevel@tonic-gate /* Check again if we still need the timeout */ \ 3380Sstevel@tonic-gate if ((((sqp)->sq_state & (SQS_PROC|SQS_TMO_PROG)) == \ 3390Sstevel@tonic-gate SQS_TMO_PROG) && ((sqp)->sq_tid == 0) && \ 3400Sstevel@tonic-gate ((sqp)->sq_first != NULL)) { \ 3410Sstevel@tonic-gate (sqp)->sq_state &= ~SQS_TMO_PROG; \ 3420Sstevel@tonic-gate (sqp)->sq_awaken = lbolt; \ 3430Sstevel@tonic-gate (sqp)->sq_tid = tid; \ 3440Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 3450Sstevel@tonic-gate return; \ 3460Sstevel@tonic-gate } else { \ 3470Sstevel@tonic-gate if ((sqp)->sq_state & SQS_TMO_PROG) { \ 3480Sstevel@tonic-gate (sqp)->sq_state &= ~SQS_TMO_PROG; \ 3490Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 3500Sstevel@tonic-gate (void) untimeout(tid); \ 3510Sstevel@tonic-gate } else { \ 3520Sstevel@tonic-gate /* \ 3530Sstevel@tonic-gate * The timer fired before we could \ 3540Sstevel@tonic-gate * reacquire the sq_lock. squeue_fire \ 3550Sstevel@tonic-gate * removes the SQS_TMO_PROG flag \ 3560Sstevel@tonic-gate * and we don't need to do anything \ 3570Sstevel@tonic-gate * else. \ 3580Sstevel@tonic-gate */ \ 3590Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 3600Sstevel@tonic-gate } \ 3610Sstevel@tonic-gate } \ 3620Sstevel@tonic-gate } else { \ 3630Sstevel@tonic-gate /* \ 3640Sstevel@tonic-gate * Schedule the worker thread. \ 3650Sstevel@tonic-gate */ \ 3660Sstevel@tonic-gate (sqp)->sq_awaken = lbolt; \ 3670Sstevel@tonic-gate cv_signal(&(sqp)->sq_async); \ 3680Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 3690Sstevel@tonic-gate } \ 3700Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&(sqp)->sq_lock)); \ 3710Sstevel@tonic-gate } 3720Sstevel@tonic-gate 3730Sstevel@tonic-gate #define ENQUEUE_MP(sqp, mp, proc, arg) { \ 3740Sstevel@tonic-gate /* \ 3750Sstevel@tonic-gate * Enque our mblk. \ 3760Sstevel@tonic-gate */ \ 3770Sstevel@tonic-gate (mp)->b_queue = NULL; \ 3780Sstevel@tonic-gate ASSERT(MUTEX_HELD(&(sqp)->sq_lock)); \ 3790Sstevel@tonic-gate ASSERT((mp)->b_prev == NULL && (mp)->b_next == NULL); \ 3800Sstevel@tonic-gate (mp)->b_queue = (queue_t *)(proc); \ 3810Sstevel@tonic-gate (mp)->b_prev = (mblk_t *)(arg); \ 3820Sstevel@tonic-gate \ 3830Sstevel@tonic-gate if ((sqp)->sq_last != NULL) \ 3840Sstevel@tonic-gate (sqp)->sq_last->b_next = (mp); \ 3850Sstevel@tonic-gate else \ 3860Sstevel@tonic-gate (sqp)->sq_first = (mp); \ 3870Sstevel@tonic-gate (sqp)->sq_last = (mp); \ 3880Sstevel@tonic-gate (sqp)->sq_count++; \ 3890Sstevel@tonic-gate ASSERT((sqp)->sq_count > 0); \ 3900Sstevel@tonic-gate DTRACE_PROBE2(squeue__enqueue, squeue_t *, sqp, \ 3910Sstevel@tonic-gate mblk_t *, mp); \ 3920Sstevel@tonic-gate } 3930Sstevel@tonic-gate 3940Sstevel@tonic-gate 3950Sstevel@tonic-gate #define ENQUEUE_CHAIN(sqp, mp, tail, cnt) { \ 3960Sstevel@tonic-gate /* \ 3970Sstevel@tonic-gate * Enqueue our mblk chain. \ 3980Sstevel@tonic-gate */ \ 3990Sstevel@tonic-gate ASSERT(MUTEX_HELD(&(sqp)->sq_lock)); \ 4000Sstevel@tonic-gate \ 4010Sstevel@tonic-gate if ((sqp)->sq_last != NULL) \ 4020Sstevel@tonic-gate (sqp)->sq_last->b_next = (mp); \ 4030Sstevel@tonic-gate else \ 4040Sstevel@tonic-gate (sqp)->sq_first = (mp); \ 4050Sstevel@tonic-gate (sqp)->sq_last = (tail); \ 4060Sstevel@tonic-gate (sqp)->sq_count += (cnt); \ 4070Sstevel@tonic-gate ASSERT((sqp)->sq_count > 0); \ 4080Sstevel@tonic-gate DTRACE_PROBE4(squeue__enqueuechain, squeue_t *, sqp, \ 4090Sstevel@tonic-gate mblk_t *, mp, mblk_t *, tail, int, cnt); \ 4100Sstevel@tonic-gate \ 4110Sstevel@tonic-gate } 4120Sstevel@tonic-gate 4130Sstevel@tonic-gate #define SQS_POLLING_ON(sqp, rx_ring) { \ 4140Sstevel@tonic-gate ASSERT(rx_ring != NULL); \ 4150Sstevel@tonic-gate ASSERT(MUTEX_HELD(&(sqp)->sq_lock)); \ 4160Sstevel@tonic-gate rx_ring->rr_blank(rx_ring->rr_handle, \ 4170Sstevel@tonic-gate MIN((sqp->sq_avg_drain_time * sqp->sq_count), \ 4180Sstevel@tonic-gate rx_ring->rr_max_blank_time), \ 4190Sstevel@tonic-gate rx_ring->rr_max_pkt_cnt); \ 4200Sstevel@tonic-gate rx_ring->rr_poll_state |= ILL_POLLING; \ 4210Sstevel@tonic-gate rx_ring->rr_poll_time = lbolt; \ 4220Sstevel@tonic-gate } 4230Sstevel@tonic-gate 4240Sstevel@tonic-gate 4250Sstevel@tonic-gate #define SQS_POLLING_OFF(sqp, rx_ring) { \ 4260Sstevel@tonic-gate ASSERT(rx_ring != NULL); \ 4270Sstevel@tonic-gate ASSERT(MUTEX_HELD(&(sqp)->sq_lock)); \ 4280Sstevel@tonic-gate rx_ring->rr_blank(rx_ring->rr_handle, \ 4290Sstevel@tonic-gate rx_ring->rr_min_blank_time, \ 4300Sstevel@tonic-gate rx_ring->rr_min_pkt_cnt); \ 4310Sstevel@tonic-gate } 4320Sstevel@tonic-gate 4330Sstevel@tonic-gate void 4340Sstevel@tonic-gate squeue_init(void) 4350Sstevel@tonic-gate { 4360Sstevel@tonic-gate squeue_cache = kmem_cache_create("squeue_cache", 4370Sstevel@tonic-gate sizeof (squeue_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 4380Sstevel@tonic-gate 439*981Sbw squeue_intrdrain_ns = squeue_intrdrain_ms * SQUEUE_MSEC_TO_NSEC; 440*981Sbw squeue_writerdrain_ns = squeue_writerdrain_ms * SQUEUE_MSEC_TO_NSEC; 441*981Sbw squeue_workerdrain_ns = squeue_workerdrain_ms * SQUEUE_MSEC_TO_NSEC; 4420Sstevel@tonic-gate squeue_workerwait_tick = MSEC_TO_TICK_ROUNDUP(squeue_workerwait_ms); 4430Sstevel@tonic-gate } 4440Sstevel@tonic-gate 4450Sstevel@tonic-gate /* ARGSUSED */ 4460Sstevel@tonic-gate squeue_t * 4470Sstevel@tonic-gate squeue_create(char *name, processorid_t bind, clock_t wait, pri_t pri) 4480Sstevel@tonic-gate { 4490Sstevel@tonic-gate squeue_t *sqp = kmem_cache_alloc(squeue_cache, KM_SLEEP); 4500Sstevel@tonic-gate 4510Sstevel@tonic-gate bzero(sqp, sizeof (squeue_t)); 4520Sstevel@tonic-gate (void) strncpy(sqp->sq_name, name, SQ_NAMELEN + 1); 4530Sstevel@tonic-gate sqp->sq_name[SQ_NAMELEN] = '\0'; 4540Sstevel@tonic-gate 4550Sstevel@tonic-gate sqp->sq_bind = bind; 4560Sstevel@tonic-gate sqp->sq_wait = MSEC_TO_TICK(wait); 4570Sstevel@tonic-gate sqp->sq_avg_drain_time = 458*981Sbw drv_hztousec(NSEC_TO_TICK_ROUNDUP(squeue_intrdrain_ns)) / 459*981Sbw NSEC_TO_TICK_ROUNDUP(squeue_intrdrain_ns); 4600Sstevel@tonic-gate 4610Sstevel@tonic-gate #if SQUEUE_PROFILE 4620Sstevel@tonic-gate if ((sqp->sq_kstat = kstat_create("ip", bind, name, 4630Sstevel@tonic-gate "net", KSTAT_TYPE_NAMED, 4640Sstevel@tonic-gate sizeof (squeue_kstat) / sizeof (kstat_named_t), 4650Sstevel@tonic-gate KSTAT_FLAG_VIRTUAL)) != NULL) { 4660Sstevel@tonic-gate sqp->sq_kstat->ks_lock = &squeue_kstat_lock; 4670Sstevel@tonic-gate sqp->sq_kstat->ks_data = &squeue_kstat; 4680Sstevel@tonic-gate sqp->sq_kstat->ks_update = squeue_kstat_update; 4690Sstevel@tonic-gate sqp->sq_kstat->ks_private = sqp; 4700Sstevel@tonic-gate kstat_install(sqp->sq_kstat); 4710Sstevel@tonic-gate } 4720Sstevel@tonic-gate #endif 4730Sstevel@tonic-gate 4740Sstevel@tonic-gate sqp->sq_worker = thread_create(NULL, 0, squeue_worker, 4750Sstevel@tonic-gate sqp, 0, &p0, TS_RUN, pri); 4760Sstevel@tonic-gate 4770Sstevel@tonic-gate return (sqp); 4780Sstevel@tonic-gate } 4790Sstevel@tonic-gate 4800Sstevel@tonic-gate /* ARGSUSED */ 4810Sstevel@tonic-gate void 4820Sstevel@tonic-gate squeue_bind(squeue_t *sqp, processorid_t bind) 4830Sstevel@tonic-gate { 4840Sstevel@tonic-gate ASSERT(bind == -1); 4850Sstevel@tonic-gate 4860Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 4870Sstevel@tonic-gate if (sqp->sq_state & SQS_BOUND) { 4880Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 4890Sstevel@tonic-gate return; 4900Sstevel@tonic-gate } 4910Sstevel@tonic-gate 4920Sstevel@tonic-gate sqp->sq_state |= SQS_BOUND; 4930Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 4940Sstevel@tonic-gate 4950Sstevel@tonic-gate thread_affinity_set(sqp->sq_worker, sqp->sq_bind); 4960Sstevel@tonic-gate } 4970Sstevel@tonic-gate 4980Sstevel@tonic-gate void 4990Sstevel@tonic-gate squeue_unbind(squeue_t *sqp) 5000Sstevel@tonic-gate { 5010Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 5020Sstevel@tonic-gate if (!(sqp->sq_state & SQS_BOUND)) { 5030Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 5040Sstevel@tonic-gate return; 5050Sstevel@tonic-gate } 5060Sstevel@tonic-gate 5070Sstevel@tonic-gate sqp->sq_state &= ~SQS_BOUND; 5080Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 5090Sstevel@tonic-gate 5100Sstevel@tonic-gate thread_affinity_clear(sqp->sq_worker); 5110Sstevel@tonic-gate } 5120Sstevel@tonic-gate 5130Sstevel@tonic-gate /* 5140Sstevel@tonic-gate * squeue_enter() - enter squeue sqp with mblk mp (which can be 5150Sstevel@tonic-gate * a chain), while tail points to the end and cnt in number of 5160Sstevel@tonic-gate * mblks in the chain. 5170Sstevel@tonic-gate * 5180Sstevel@tonic-gate * For a chain of single packet (i.e. mp == tail), go through the 5190Sstevel@tonic-gate * fast path if no one is processing the squeue and nothing is queued. 5200Sstevel@tonic-gate * 5210Sstevel@tonic-gate * The proc and arg for each mblk is already stored in the mblk in 5220Sstevel@tonic-gate * appropriate places. 5230Sstevel@tonic-gate */ 5240Sstevel@tonic-gate void 5250Sstevel@tonic-gate squeue_enter_chain(squeue_t *sqp, mblk_t *mp, mblk_t *tail, 5260Sstevel@tonic-gate uint32_t cnt, uint8_t tag) 5270Sstevel@tonic-gate { 5280Sstevel@tonic-gate int interrupt = servicing_interrupt(); 5290Sstevel@tonic-gate void *arg; 5300Sstevel@tonic-gate sqproc_t proc; 531*981Sbw hrtime_t now; 5320Sstevel@tonic-gate #if SQUEUE_PROFILE 5330Sstevel@tonic-gate hrtime_t start, delta; 5340Sstevel@tonic-gate #endif 5350Sstevel@tonic-gate 5360Sstevel@tonic-gate ASSERT(sqp != NULL); 5370Sstevel@tonic-gate ASSERT(mp != NULL); 5380Sstevel@tonic-gate ASSERT(tail != NULL); 5390Sstevel@tonic-gate ASSERT(cnt > 0); 5400Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock)); 5410Sstevel@tonic-gate 5420Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 5430Sstevel@tonic-gate if (!(sqp->sq_state & SQS_PROC)) { 5440Sstevel@tonic-gate /* 5450Sstevel@tonic-gate * See if anything is already queued. If we are the 5460Sstevel@tonic-gate * first packet, do inline processing else queue the 5470Sstevel@tonic-gate * packet and do the drain. 5480Sstevel@tonic-gate */ 5490Sstevel@tonic-gate sqp->sq_run = curthread; 5500Sstevel@tonic-gate if (sqp->sq_first == NULL && cnt == 1) { 5510Sstevel@tonic-gate /* 5520Sstevel@tonic-gate * Fast-path, ok to process and nothing queued. 5530Sstevel@tonic-gate */ 5540Sstevel@tonic-gate sqp->sq_state |= (SQS_PROC|SQS_FAST); 5550Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 5560Sstevel@tonic-gate 5570Sstevel@tonic-gate /* 5580Sstevel@tonic-gate * We are the chain of 1 packet so 5590Sstevel@tonic-gate * go through this fast path. 5600Sstevel@tonic-gate */ 5610Sstevel@tonic-gate arg = mp->b_prev; 5620Sstevel@tonic-gate mp->b_prev = NULL; 5630Sstevel@tonic-gate proc = (sqproc_t)mp->b_queue; 5640Sstevel@tonic-gate mp->b_queue = NULL; 5650Sstevel@tonic-gate 5660Sstevel@tonic-gate ASSERT(proc != NULL); 5670Sstevel@tonic-gate ASSERT(arg != NULL); 5680Sstevel@tonic-gate ASSERT(mp->b_next == NULL); 5690Sstevel@tonic-gate 5700Sstevel@tonic-gate #if SQUEUE_DEBUG 5710Sstevel@tonic-gate sqp->sq_isintr = interrupt; 5720Sstevel@tonic-gate sqp->sq_curmp = mp; 5730Sstevel@tonic-gate sqp->sq_curproc = proc; 5740Sstevel@tonic-gate sqp->sq_connp = arg; 5750Sstevel@tonic-gate mp->b_tag = sqp->sq_tag = tag; 5760Sstevel@tonic-gate #endif 5770Sstevel@tonic-gate #if SQUEUE_PROFILE 5780Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 5790Sstevel@tonic-gate if (interrupt) 5800Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_intr); 5810Sstevel@tonic-gate else 5820Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_other); 5830Sstevel@tonic-gate start = gethrtime(); 5840Sstevel@tonic-gate } 5850Sstevel@tonic-gate #endif 5860Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_TRUE; 5870Sstevel@tonic-gate DTRACE_PROBE3(squeue__proc__start, squeue_t *, 5880Sstevel@tonic-gate sqp, mblk_t *, mp, conn_t *, arg); 5890Sstevel@tonic-gate (*proc)(arg, mp, sqp); 5900Sstevel@tonic-gate DTRACE_PROBE2(squeue__proc__end, squeue_t *, 5910Sstevel@tonic-gate sqp, conn_t *, arg); 5920Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_FALSE; 5930Sstevel@tonic-gate 5940Sstevel@tonic-gate #if SQUEUE_PROFILE 5950Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 5960Sstevel@tonic-gate delta = gethrtime() - start; 5970Sstevel@tonic-gate if (interrupt) 5980Sstevel@tonic-gate SQDELTA(sqp, sq_time_intr, delta); 5990Sstevel@tonic-gate else 6000Sstevel@tonic-gate SQDELTA(sqp, sq_time_other, delta); 6010Sstevel@tonic-gate } 6020Sstevel@tonic-gate #endif 6030Sstevel@tonic-gate #if SQUEUE_DEBUG 6040Sstevel@tonic-gate sqp->sq_curmp = NULL; 6050Sstevel@tonic-gate sqp->sq_curproc = NULL; 6060Sstevel@tonic-gate sqp->sq_connp = NULL; 6070Sstevel@tonic-gate sqp->sq_isintr = 0; 6080Sstevel@tonic-gate #endif 6090Sstevel@tonic-gate 6100Sstevel@tonic-gate CONN_DEC_REF((conn_t *)arg); 6110Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock)); 6120Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 6130Sstevel@tonic-gate sqp->sq_state &= ~(SQS_PROC|SQS_FAST); 6140Sstevel@tonic-gate if (sqp->sq_first == NULL) { 6150Sstevel@tonic-gate /* 6160Sstevel@tonic-gate * We processed inline our packet and 6170Sstevel@tonic-gate * nothing new has arrived. We are done. 6180Sstevel@tonic-gate */ 6190Sstevel@tonic-gate sqp->sq_run = NULL; 6200Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 6210Sstevel@tonic-gate return; 6220Sstevel@tonic-gate } else if (sqp->sq_bind != CPU->cpu_id) { 6230Sstevel@tonic-gate /* 6240Sstevel@tonic-gate * If the current thread is not running 6250Sstevel@tonic-gate * on the CPU to which this squeue is bound, 6260Sstevel@tonic-gate * then don't allow it to drain. 6270Sstevel@tonic-gate */ 6280Sstevel@tonic-gate sqp->sq_run = NULL; 6290Sstevel@tonic-gate SQUEUE_WORKER_WAKEUP(sqp); 6300Sstevel@tonic-gate return; 6310Sstevel@tonic-gate } 6320Sstevel@tonic-gate } else { 6330Sstevel@tonic-gate ENQUEUE_CHAIN(sqp, mp, tail, cnt); 6340Sstevel@tonic-gate #if SQUEUE_DEBUG 6350Sstevel@tonic-gate mp->b_tag = tag; 6360Sstevel@tonic-gate #endif 6370Sstevel@tonic-gate #if SQUEUE_PROFILE 6380Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 6390Sstevel@tonic-gate if (servicing_interrupt()) 6400Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_intr); 6410Sstevel@tonic-gate else 6420Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_other); 6430Sstevel@tonic-gate if (sqp->sq_stats.sq_max_qlen < sqp->sq_count) 6440Sstevel@tonic-gate sqp->sq_stats.sq_max_qlen = 6450Sstevel@tonic-gate sqp->sq_count; 6460Sstevel@tonic-gate } 6470Sstevel@tonic-gate #endif 6480Sstevel@tonic-gate } 6490Sstevel@tonic-gate 6500Sstevel@tonic-gate /* 6510Sstevel@tonic-gate * We are here because either we couldn't do inline 6520Sstevel@tonic-gate * processing (because something was already queued), 6530Sstevel@tonic-gate * or we had a chanin of more than one packet, 6540Sstevel@tonic-gate * or something else arrived after we were done with 6550Sstevel@tonic-gate * inline processing. 6560Sstevel@tonic-gate */ 6570Sstevel@tonic-gate ASSERT(MUTEX_HELD(&sqp->sq_lock)); 6580Sstevel@tonic-gate ASSERT(sqp->sq_first != NULL); 6590Sstevel@tonic-gate 6600Sstevel@tonic-gate #if SQUEUE_PROFILE 6610Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 6620Sstevel@tonic-gate start = gethrtime(); 6630Sstevel@tonic-gate } 6640Sstevel@tonic-gate #endif 6650Sstevel@tonic-gate #if SQUEUE_DEBUG 6660Sstevel@tonic-gate sqp->sq_isintr = interrupt; 6670Sstevel@tonic-gate #endif 6680Sstevel@tonic-gate 669*981Sbw now = gethrtime(); 6700Sstevel@tonic-gate if (interrupt) { 671*981Sbw squeue_drain(sqp, SQS_ENTER, now + 672*981Sbw squeue_intrdrain_ns); 6730Sstevel@tonic-gate } else { 674*981Sbw squeue_drain(sqp, SQS_USER, now + 675*981Sbw squeue_writerdrain_ns); 6760Sstevel@tonic-gate } 6770Sstevel@tonic-gate 6780Sstevel@tonic-gate #if SQUEUE_PROFILE 6790Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 6800Sstevel@tonic-gate delta = gethrtime() - start; 6810Sstevel@tonic-gate if (interrupt) 6820Sstevel@tonic-gate SQDELTA(sqp, sq_time_intr, delta); 6830Sstevel@tonic-gate else 6840Sstevel@tonic-gate SQDELTA(sqp, sq_time_other, delta); 6850Sstevel@tonic-gate } 6860Sstevel@tonic-gate #endif 6870Sstevel@tonic-gate #if SQUEUE_DEBUG 6880Sstevel@tonic-gate sqp->sq_isintr = 0; 6890Sstevel@tonic-gate #endif 6900Sstevel@tonic-gate 6910Sstevel@tonic-gate /* 6920Sstevel@tonic-gate * If we didn't do a complete drain, the worker 6930Sstevel@tonic-gate * thread was already signalled by squeue_drain. 6940Sstevel@tonic-gate */ 6950Sstevel@tonic-gate sqp->sq_run = NULL; 6960Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 6970Sstevel@tonic-gate return; 6980Sstevel@tonic-gate } else { 6990Sstevel@tonic-gate ASSERT(sqp->sq_run != NULL); 7000Sstevel@tonic-gate /* 7010Sstevel@tonic-gate * Queue is already being processed. Just enqueue 7020Sstevel@tonic-gate * the packet and go away. 7030Sstevel@tonic-gate */ 7040Sstevel@tonic-gate #if SQUEUE_DEBUG 7050Sstevel@tonic-gate mp->b_tag = tag; 7060Sstevel@tonic-gate #endif 7070Sstevel@tonic-gate #if SQUEUE_PROFILE 7080Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 7090Sstevel@tonic-gate if (servicing_interrupt()) 7100Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_intr); 7110Sstevel@tonic-gate else 7120Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_other); 7130Sstevel@tonic-gate if (sqp->sq_stats.sq_max_qlen < sqp->sq_count) 7140Sstevel@tonic-gate sqp->sq_stats.sq_max_qlen = sqp->sq_count; 7150Sstevel@tonic-gate } 7160Sstevel@tonic-gate #endif 7170Sstevel@tonic-gate 7180Sstevel@tonic-gate ENQUEUE_CHAIN(sqp, mp, tail, cnt); 7190Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 7200Sstevel@tonic-gate return; 7210Sstevel@tonic-gate } 7220Sstevel@tonic-gate } 7230Sstevel@tonic-gate 7240Sstevel@tonic-gate /* 7250Sstevel@tonic-gate * squeue_enter() - enter squeue *sqp with mblk *mp with argument of *arg. 7260Sstevel@tonic-gate */ 7270Sstevel@tonic-gate void 7280Sstevel@tonic-gate squeue_enter(squeue_t *sqp, mblk_t *mp, sqproc_t proc, void *arg, 7290Sstevel@tonic-gate uint8_t tag) 7300Sstevel@tonic-gate { 7310Sstevel@tonic-gate int interrupt = servicing_interrupt(); 732*981Sbw hrtime_t now; 7330Sstevel@tonic-gate #if SQUEUE_PROFILE 7340Sstevel@tonic-gate hrtime_t start, delta; 7350Sstevel@tonic-gate #endif 7360Sstevel@tonic-gate #if SQUEUE_DEBUG 7370Sstevel@tonic-gate conn_t *connp = (conn_t *)arg; 738741Smasputra ASSERT(!IPCL_IS_TCP(connp) || connp->conn_tcp->tcp_connp == connp); 739741Smasputra ASSERT(!IPCL_IS_UDP(connp) || connp->conn_udp->udp_connp == connp); 7400Sstevel@tonic-gate #endif 7410Sstevel@tonic-gate 7420Sstevel@tonic-gate ASSERT(proc != NULL); 7430Sstevel@tonic-gate ASSERT(sqp != NULL); 7440Sstevel@tonic-gate ASSERT(mp != NULL); 7450Sstevel@tonic-gate ASSERT(mp->b_next == NULL); 7460Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock)); 7470Sstevel@tonic-gate 7480Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 7490Sstevel@tonic-gate if (!(sqp->sq_state & SQS_PROC)) { 7500Sstevel@tonic-gate /* 7510Sstevel@tonic-gate * See if anything is already queued. If we are the 7520Sstevel@tonic-gate * first packet, do inline processing else queue the 7530Sstevel@tonic-gate * packet and do the drain. 7540Sstevel@tonic-gate */ 7550Sstevel@tonic-gate sqp->sq_run = curthread; 7560Sstevel@tonic-gate if (sqp->sq_first == NULL) { 7570Sstevel@tonic-gate /* 7580Sstevel@tonic-gate * Fast-path, ok to process and nothing queued. 7590Sstevel@tonic-gate */ 7600Sstevel@tonic-gate sqp->sq_state |= (SQS_PROC|SQS_FAST); 7610Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 7620Sstevel@tonic-gate 7630Sstevel@tonic-gate #if SQUEUE_DEBUG 7640Sstevel@tonic-gate sqp->sq_isintr = interrupt; 7650Sstevel@tonic-gate sqp->sq_curmp = mp; 7660Sstevel@tonic-gate sqp->sq_curproc = proc; 7670Sstevel@tonic-gate sqp->sq_connp = connp; 7680Sstevel@tonic-gate mp->b_tag = sqp->sq_tag = tag; 7690Sstevel@tonic-gate #endif 7700Sstevel@tonic-gate #if SQUEUE_PROFILE 7710Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 7720Sstevel@tonic-gate if (interrupt) 7730Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_intr); 7740Sstevel@tonic-gate else 7750Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_other); 7760Sstevel@tonic-gate start = gethrtime(); 7770Sstevel@tonic-gate } 7780Sstevel@tonic-gate #endif 7790Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_TRUE; 7800Sstevel@tonic-gate DTRACE_PROBE3(squeue__proc__start, squeue_t *, 7810Sstevel@tonic-gate sqp, mblk_t *, mp, conn_t *, arg); 7820Sstevel@tonic-gate (*proc)(arg, mp, sqp); 7830Sstevel@tonic-gate DTRACE_PROBE2(squeue__proc__end, squeue_t *, 7840Sstevel@tonic-gate sqp, conn_t *, arg); 7850Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_FALSE; 7860Sstevel@tonic-gate 7870Sstevel@tonic-gate #if SQUEUE_PROFILE 7880Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 7890Sstevel@tonic-gate delta = gethrtime() - start; 7900Sstevel@tonic-gate if (interrupt) 7910Sstevel@tonic-gate SQDELTA(sqp, sq_time_intr, delta); 7920Sstevel@tonic-gate else 7930Sstevel@tonic-gate SQDELTA(sqp, sq_time_other, delta); 7940Sstevel@tonic-gate } 7950Sstevel@tonic-gate #endif 7960Sstevel@tonic-gate #if SQUEUE_DEBUG 7970Sstevel@tonic-gate sqp->sq_curmp = NULL; 7980Sstevel@tonic-gate sqp->sq_curproc = NULL; 7990Sstevel@tonic-gate sqp->sq_connp = NULL; 8000Sstevel@tonic-gate sqp->sq_isintr = 0; 8010Sstevel@tonic-gate #endif 8020Sstevel@tonic-gate 8030Sstevel@tonic-gate CONN_DEC_REF((conn_t *)arg); 8040Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock)); 8050Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 8060Sstevel@tonic-gate sqp->sq_state &= ~(SQS_PROC|SQS_FAST); 8070Sstevel@tonic-gate if (sqp->sq_first == NULL) { 8080Sstevel@tonic-gate /* 8090Sstevel@tonic-gate * We processed inline our packet and 8100Sstevel@tonic-gate * nothing new has arrived. We are done. 8110Sstevel@tonic-gate */ 8120Sstevel@tonic-gate sqp->sq_run = NULL; 8130Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 8140Sstevel@tonic-gate return; 8150Sstevel@tonic-gate } else if (sqp->sq_bind != CPU->cpu_id) { 8160Sstevel@tonic-gate /* 8170Sstevel@tonic-gate * If the current thread is not running 8180Sstevel@tonic-gate * on the CPU to which this squeue is bound, 8190Sstevel@tonic-gate * then don't allow it to drain. 8200Sstevel@tonic-gate */ 8210Sstevel@tonic-gate sqp->sq_run = NULL; 8220Sstevel@tonic-gate SQUEUE_WORKER_WAKEUP(sqp); 8230Sstevel@tonic-gate return; 8240Sstevel@tonic-gate } 8250Sstevel@tonic-gate } else { 8260Sstevel@tonic-gate ENQUEUE_MP(sqp, mp, proc, arg); 8270Sstevel@tonic-gate #if SQUEUE_DEBUG 8280Sstevel@tonic-gate mp->b_tag = tag; 8290Sstevel@tonic-gate #endif 8300Sstevel@tonic-gate #if SQUEUE_PROFILE 8310Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 8320Sstevel@tonic-gate if (servicing_interrupt()) 8330Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_intr); 8340Sstevel@tonic-gate else 8350Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_other); 8360Sstevel@tonic-gate if (sqp->sq_stats.sq_max_qlen < sqp->sq_count) 8370Sstevel@tonic-gate sqp->sq_stats.sq_max_qlen = 8380Sstevel@tonic-gate sqp->sq_count; 8390Sstevel@tonic-gate } 8400Sstevel@tonic-gate #endif 8410Sstevel@tonic-gate } 8420Sstevel@tonic-gate 8430Sstevel@tonic-gate /* 8440Sstevel@tonic-gate * We are here because either we couldn't do inline 8450Sstevel@tonic-gate * processing (because something was already queued) 8460Sstevel@tonic-gate * or something else arrived after we were done with 8470Sstevel@tonic-gate * inline processing. 8480Sstevel@tonic-gate */ 8490Sstevel@tonic-gate ASSERT(MUTEX_HELD(&sqp->sq_lock)); 8500Sstevel@tonic-gate ASSERT(sqp->sq_first != NULL); 8510Sstevel@tonic-gate 8520Sstevel@tonic-gate #if SQUEUE_PROFILE 8530Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 8540Sstevel@tonic-gate start = gethrtime(); 8550Sstevel@tonic-gate } 8560Sstevel@tonic-gate #endif 8570Sstevel@tonic-gate #if SQUEUE_DEBUG 8580Sstevel@tonic-gate sqp->sq_isintr = interrupt; 8590Sstevel@tonic-gate #endif 8600Sstevel@tonic-gate 861*981Sbw now = gethrtime(); 8620Sstevel@tonic-gate if (interrupt) { 863*981Sbw squeue_drain(sqp, SQS_ENTER, now + 864*981Sbw squeue_intrdrain_ns); 8650Sstevel@tonic-gate } else { 866*981Sbw squeue_drain(sqp, SQS_USER, now + 867*981Sbw squeue_writerdrain_ns); 8680Sstevel@tonic-gate } 8690Sstevel@tonic-gate 8700Sstevel@tonic-gate #if SQUEUE_PROFILE 8710Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 8720Sstevel@tonic-gate delta = gethrtime() - start; 8730Sstevel@tonic-gate if (interrupt) 8740Sstevel@tonic-gate SQDELTA(sqp, sq_time_intr, delta); 8750Sstevel@tonic-gate else 8760Sstevel@tonic-gate SQDELTA(sqp, sq_time_other, delta); 8770Sstevel@tonic-gate } 8780Sstevel@tonic-gate #endif 8790Sstevel@tonic-gate #if SQUEUE_DEBUG 8800Sstevel@tonic-gate sqp->sq_isintr = 0; 8810Sstevel@tonic-gate #endif 8820Sstevel@tonic-gate 8830Sstevel@tonic-gate /* 8840Sstevel@tonic-gate * If we didn't do a complete drain, the worker 8850Sstevel@tonic-gate * thread was already signalled by squeue_drain. 8860Sstevel@tonic-gate */ 8870Sstevel@tonic-gate sqp->sq_run = NULL; 8880Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 8890Sstevel@tonic-gate return; 8900Sstevel@tonic-gate } else { 8910Sstevel@tonic-gate ASSERT(sqp->sq_run != NULL); 8920Sstevel@tonic-gate /* 8930Sstevel@tonic-gate * We let a thread processing a squeue reenter only 8940Sstevel@tonic-gate * once. This helps the case of incoming connection 8950Sstevel@tonic-gate * where a SYN-ACK-ACK that triggers the conn_ind 8960Sstevel@tonic-gate * doesn't have to queue the packet if listener and 8970Sstevel@tonic-gate * eager are on the same squeue. Also helps the 8980Sstevel@tonic-gate * loopback connection where the two ends are bound 8990Sstevel@tonic-gate * to the same squeue (which is typical on single 9000Sstevel@tonic-gate * CPU machines). 9010Sstevel@tonic-gate * We let the thread reenter only once for the fear 9020Sstevel@tonic-gate * of stack getting blown with multiple traversal. 9030Sstevel@tonic-gate */ 9040Sstevel@tonic-gate if (!(sqp->sq_state & SQS_REENTER) && 9050Sstevel@tonic-gate (sqp->sq_run == curthread) && 9060Sstevel@tonic-gate (((conn_t *)arg)->conn_on_sqp == B_FALSE)) { 9070Sstevel@tonic-gate sqp->sq_state |= SQS_REENTER; 9080Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 9090Sstevel@tonic-gate 9100Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_TRUE; 9110Sstevel@tonic-gate DTRACE_PROBE3(squeue__proc__start, squeue_t *, 9120Sstevel@tonic-gate sqp, mblk_t *, mp, conn_t *, arg); 9130Sstevel@tonic-gate (*proc)(arg, mp, sqp); 9140Sstevel@tonic-gate DTRACE_PROBE2(squeue__proc__end, squeue_t *, 9150Sstevel@tonic-gate sqp, conn_t *, arg); 9160Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_FALSE; 9170Sstevel@tonic-gate CONN_DEC_REF((conn_t *)arg); 9180Sstevel@tonic-gate 9190Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 9200Sstevel@tonic-gate sqp->sq_state &= ~SQS_REENTER; 9210Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 9220Sstevel@tonic-gate return; 9230Sstevel@tonic-gate } 9240Sstevel@tonic-gate /* 9250Sstevel@tonic-gate * Queue is already being processed. Just enqueue 9260Sstevel@tonic-gate * the packet and go away. 9270Sstevel@tonic-gate */ 9280Sstevel@tonic-gate #if SQUEUE_DEBUG 9290Sstevel@tonic-gate mp->b_tag = tag; 9300Sstevel@tonic-gate #endif 9310Sstevel@tonic-gate #if SQUEUE_PROFILE 9320Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 9330Sstevel@tonic-gate if (servicing_interrupt()) 9340Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_intr); 9350Sstevel@tonic-gate else 9360Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_other); 9370Sstevel@tonic-gate if (sqp->sq_stats.sq_max_qlen < sqp->sq_count) 9380Sstevel@tonic-gate sqp->sq_stats.sq_max_qlen = sqp->sq_count; 9390Sstevel@tonic-gate } 9400Sstevel@tonic-gate #endif 9410Sstevel@tonic-gate 9420Sstevel@tonic-gate ENQUEUE_MP(sqp, mp, proc, arg); 9430Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 9440Sstevel@tonic-gate return; 9450Sstevel@tonic-gate } 9460Sstevel@tonic-gate } 9470Sstevel@tonic-gate 9480Sstevel@tonic-gate void 9490Sstevel@tonic-gate squeue_enter_nodrain(squeue_t *sqp, mblk_t *mp, sqproc_t proc, void *arg, 9500Sstevel@tonic-gate uint8_t tag) 9510Sstevel@tonic-gate { 9520Sstevel@tonic-gate int interrupt = servicing_interrupt(); 9530Sstevel@tonic-gate boolean_t being_processed; 9540Sstevel@tonic-gate #if SQUEUE_DEBUG 9550Sstevel@tonic-gate conn_t *connp = (conn_t *)arg; 9560Sstevel@tonic-gate #endif 9570Sstevel@tonic-gate #if SQUEUE_PROFILE 9580Sstevel@tonic-gate hrtime_t start, delta; 9590Sstevel@tonic-gate #endif 9600Sstevel@tonic-gate 9610Sstevel@tonic-gate ASSERT(proc != NULL); 9620Sstevel@tonic-gate ASSERT(sqp != NULL); 9630Sstevel@tonic-gate ASSERT(mp != NULL); 9640Sstevel@tonic-gate ASSERT(mp->b_next == NULL); 965741Smasputra ASSERT(!IPCL_IS_TCP(connp) || connp->conn_tcp->tcp_connp == connp); 966741Smasputra ASSERT(!IPCL_IS_UDP(connp) || connp->conn_udp->udp_connp == connp); 967741Smasputra ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock)); 9680Sstevel@tonic-gate 9690Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 9700Sstevel@tonic-gate 9710Sstevel@tonic-gate being_processed = (sqp->sq_state & SQS_PROC); 9720Sstevel@tonic-gate if (!being_processed && (sqp->sq_first == NULL)) { 9730Sstevel@tonic-gate /* 9740Sstevel@tonic-gate * Fast-path, ok to process and nothing queued. 9750Sstevel@tonic-gate */ 9760Sstevel@tonic-gate sqp->sq_state |= (SQS_PROC|SQS_FAST); 9770Sstevel@tonic-gate sqp->sq_run = curthread; 9780Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 9790Sstevel@tonic-gate 9800Sstevel@tonic-gate #if SQUEUE_DEBUG 9810Sstevel@tonic-gate sqp->sq_isintr = interrupt; 9820Sstevel@tonic-gate sqp->sq_curmp = mp; 9830Sstevel@tonic-gate sqp->sq_curproc = proc; 9840Sstevel@tonic-gate sqp->sq_connp = connp; 9850Sstevel@tonic-gate mp->b_tag = sqp->sq_tag = tag; 9860Sstevel@tonic-gate #endif 9870Sstevel@tonic-gate 9880Sstevel@tonic-gate #if SQUEUE_PROFILE 9890Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 9900Sstevel@tonic-gate if (interrupt) 9910Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_intr); 9920Sstevel@tonic-gate else 9930Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_other); 9940Sstevel@tonic-gate start = gethrtime(); 9950Sstevel@tonic-gate } 9960Sstevel@tonic-gate #endif 9970Sstevel@tonic-gate 9980Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_TRUE; 9990Sstevel@tonic-gate DTRACE_PROBE3(squeue__proc__start, squeue_t *, 10000Sstevel@tonic-gate sqp, mblk_t *, mp, conn_t *, arg); 10010Sstevel@tonic-gate (*proc)(arg, mp, sqp); 10020Sstevel@tonic-gate DTRACE_PROBE2(squeue__proc__end, squeue_t *, 10030Sstevel@tonic-gate sqp, conn_t *, arg); 10040Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_FALSE; 10050Sstevel@tonic-gate 10060Sstevel@tonic-gate #if SQUEUE_DEBUG 10070Sstevel@tonic-gate sqp->sq_curmp = NULL; 10080Sstevel@tonic-gate sqp->sq_curproc = NULL; 10090Sstevel@tonic-gate sqp->sq_connp = NULL; 10100Sstevel@tonic-gate sqp->sq_isintr = 0; 10110Sstevel@tonic-gate #endif 10120Sstevel@tonic-gate #if SQUEUE_PROFILE 10130Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 10140Sstevel@tonic-gate delta = gethrtime() - start; 10150Sstevel@tonic-gate if (interrupt) 10160Sstevel@tonic-gate SQDELTA(sqp, sq_time_intr, delta); 10170Sstevel@tonic-gate else 10180Sstevel@tonic-gate SQDELTA(sqp, sq_time_other, delta); 10190Sstevel@tonic-gate } 10200Sstevel@tonic-gate #endif 10210Sstevel@tonic-gate 10220Sstevel@tonic-gate CONN_DEC_REF((conn_t *)arg); 10230Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 10240Sstevel@tonic-gate sqp->sq_state &= ~(SQS_PROC|SQS_FAST); 10250Sstevel@tonic-gate sqp->sq_run = NULL; 10260Sstevel@tonic-gate if (sqp->sq_first == NULL) { 10270Sstevel@tonic-gate /* 10280Sstevel@tonic-gate * We processed inline our packet and 10290Sstevel@tonic-gate * nothing new has arrived. We are done. 10300Sstevel@tonic-gate */ 10310Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 10320Sstevel@tonic-gate } else { 10330Sstevel@tonic-gate SQUEUE_WORKER_WAKEUP(sqp); 10340Sstevel@tonic-gate } 10350Sstevel@tonic-gate return; 10360Sstevel@tonic-gate } else { 10370Sstevel@tonic-gate /* 10380Sstevel@tonic-gate * We let a thread processing a squeue reenter only 10390Sstevel@tonic-gate * once. This helps the case of incoming connection 10400Sstevel@tonic-gate * where a SYN-ACK-ACK that triggers the conn_ind 10410Sstevel@tonic-gate * doesn't have to queue the packet if listener and 10420Sstevel@tonic-gate * eager are on the same squeue. Also helps the 10430Sstevel@tonic-gate * loopback connection where the two ends are bound 10440Sstevel@tonic-gate * to the same squeue (which is typical on single 10450Sstevel@tonic-gate * CPU machines). 10460Sstevel@tonic-gate * We let the thread reenter only once for the fear 10470Sstevel@tonic-gate * of stack getting blown with multiple traversal. 10480Sstevel@tonic-gate */ 10490Sstevel@tonic-gate if (being_processed && !(sqp->sq_state & SQS_REENTER) && 10500Sstevel@tonic-gate (sqp->sq_run == curthread) && 10510Sstevel@tonic-gate (((conn_t *)arg)->conn_on_sqp == B_FALSE)) { 10520Sstevel@tonic-gate sqp->sq_state |= SQS_REENTER; 10530Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 10540Sstevel@tonic-gate 10550Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_TRUE; 10560Sstevel@tonic-gate DTRACE_PROBE3(squeue__proc__start, squeue_t *, 10570Sstevel@tonic-gate sqp, mblk_t *, mp, conn_t *, arg); 10580Sstevel@tonic-gate (*proc)(arg, mp, sqp); 10590Sstevel@tonic-gate DTRACE_PROBE2(squeue__proc__end, squeue_t *, 10600Sstevel@tonic-gate sqp, conn_t *, arg); 10610Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_FALSE; 10620Sstevel@tonic-gate CONN_DEC_REF((conn_t *)arg); 10630Sstevel@tonic-gate 10640Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 10650Sstevel@tonic-gate sqp->sq_state &= ~SQS_REENTER; 10660Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 10670Sstevel@tonic-gate return; 10680Sstevel@tonic-gate } 10690Sstevel@tonic-gate 10700Sstevel@tonic-gate #if SQUEUE_DEBUG 10710Sstevel@tonic-gate mp->b_tag = tag; 10720Sstevel@tonic-gate #endif 10730Sstevel@tonic-gate #if SQUEUE_PROFILE 10740Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 10750Sstevel@tonic-gate if (servicing_interrupt()) 10760Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_intr); 10770Sstevel@tonic-gate else 10780Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_other); 10790Sstevel@tonic-gate if (sqp->sq_stats.sq_max_qlen < sqp->sq_count) 10800Sstevel@tonic-gate sqp->sq_stats.sq_max_qlen = sqp->sq_count; 10810Sstevel@tonic-gate } 10820Sstevel@tonic-gate #endif 10830Sstevel@tonic-gate ENQUEUE_MP(sqp, mp, proc, arg); 10840Sstevel@tonic-gate if (being_processed) { 10850Sstevel@tonic-gate /* 10860Sstevel@tonic-gate * Queue is already being processed. 10870Sstevel@tonic-gate * No need to do anything. 10880Sstevel@tonic-gate */ 10890Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 10900Sstevel@tonic-gate return; 10910Sstevel@tonic-gate } 10920Sstevel@tonic-gate SQUEUE_WORKER_WAKEUP(sqp); 10930Sstevel@tonic-gate } 10940Sstevel@tonic-gate } 10950Sstevel@tonic-gate 10960Sstevel@tonic-gate /* 10970Sstevel@tonic-gate * squeue_fill() - fill squeue *sqp with mblk *mp with argument of *arg 10980Sstevel@tonic-gate * without processing the squeue. 10990Sstevel@tonic-gate */ 11000Sstevel@tonic-gate /* ARGSUSED */ 11010Sstevel@tonic-gate void 11020Sstevel@tonic-gate squeue_fill(squeue_t *sqp, mblk_t *mp, sqproc_t proc, void * arg, 11030Sstevel@tonic-gate uint8_t tag) 11040Sstevel@tonic-gate { 11050Sstevel@tonic-gate #if SQUEUE_DEBUG 11060Sstevel@tonic-gate conn_t *connp = (conn_t *)arg; 11070Sstevel@tonic-gate #endif 11080Sstevel@tonic-gate ASSERT(proc != NULL); 11090Sstevel@tonic-gate ASSERT(sqp != NULL); 11100Sstevel@tonic-gate ASSERT(mp != NULL); 11110Sstevel@tonic-gate ASSERT(mp->b_next == NULL); 1112741Smasputra ASSERT(!IPCL_IS_TCP(connp) || connp->conn_tcp->tcp_connp == connp); 1113741Smasputra ASSERT(!IPCL_IS_UDP(connp) || connp->conn_udp->udp_connp == connp); 11140Sstevel@tonic-gate 11150Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock)); 11160Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 11170Sstevel@tonic-gate ENQUEUE_MP(sqp, mp, proc, arg); 11180Sstevel@tonic-gate #if SQUEUE_DEBUG 11190Sstevel@tonic-gate mp->b_tag = tag; 11200Sstevel@tonic-gate #endif 11210Sstevel@tonic-gate #if SQUEUE_PROFILE 11220Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 11230Sstevel@tonic-gate if (servicing_interrupt()) 11240Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_intr); 11250Sstevel@tonic-gate else 11260Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_other); 11270Sstevel@tonic-gate if (sqp->sq_stats.sq_max_qlen < sqp->sq_count) 11280Sstevel@tonic-gate sqp->sq_stats.sq_max_qlen = sqp->sq_count; 11290Sstevel@tonic-gate } 11300Sstevel@tonic-gate #endif 11310Sstevel@tonic-gate 11320Sstevel@tonic-gate /* 11330Sstevel@tonic-gate * If queue is already being processed. No need to do anything. 11340Sstevel@tonic-gate */ 11350Sstevel@tonic-gate if (sqp->sq_state & SQS_PROC) { 11360Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 11370Sstevel@tonic-gate return; 11380Sstevel@tonic-gate } 11390Sstevel@tonic-gate 11400Sstevel@tonic-gate SQUEUE_WORKER_WAKEUP(sqp); 11410Sstevel@tonic-gate } 11420Sstevel@tonic-gate 11430Sstevel@tonic-gate 11440Sstevel@tonic-gate /* 11450Sstevel@tonic-gate * PRIVATE FUNCTIONS 11460Sstevel@tonic-gate */ 11470Sstevel@tonic-gate 11480Sstevel@tonic-gate static void 11490Sstevel@tonic-gate squeue_fire(void *arg) 11500Sstevel@tonic-gate { 11510Sstevel@tonic-gate squeue_t *sqp = arg; 11520Sstevel@tonic-gate uint_t state; 11530Sstevel@tonic-gate 11540Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 11550Sstevel@tonic-gate 11560Sstevel@tonic-gate state = sqp->sq_state; 11570Sstevel@tonic-gate if (sqp->sq_tid == 0 && !(state & SQS_TMO_PROG)) { 11580Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 11590Sstevel@tonic-gate return; 11600Sstevel@tonic-gate } 11610Sstevel@tonic-gate 11620Sstevel@tonic-gate sqp->sq_tid = 0; 11630Sstevel@tonic-gate /* 11640Sstevel@tonic-gate * The timeout fired before we got a chance to set it. 11650Sstevel@tonic-gate * Process it anyway but remove the SQS_TMO_PROG so that 11660Sstevel@tonic-gate * the guy trying to set the timeout knows that it has 11670Sstevel@tonic-gate * already been processed. 11680Sstevel@tonic-gate */ 11690Sstevel@tonic-gate if (state & SQS_TMO_PROG) 11700Sstevel@tonic-gate sqp->sq_state &= ~SQS_TMO_PROG; 11710Sstevel@tonic-gate 11720Sstevel@tonic-gate if (!(state & SQS_PROC)) { 11730Sstevel@tonic-gate sqp->sq_awaken = lbolt; 11740Sstevel@tonic-gate cv_signal(&sqp->sq_async); 11750Sstevel@tonic-gate } 11760Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 11770Sstevel@tonic-gate } 11780Sstevel@tonic-gate 11790Sstevel@tonic-gate static void 1180*981Sbw squeue_drain(squeue_t *sqp, uint_t proc_type, hrtime_t expire) 11810Sstevel@tonic-gate { 11820Sstevel@tonic-gate mblk_t *mp; 11830Sstevel@tonic-gate mblk_t *head; 11840Sstevel@tonic-gate sqproc_t proc; 11850Sstevel@tonic-gate conn_t *connp; 11860Sstevel@tonic-gate clock_t start = lbolt; 11870Sstevel@tonic-gate clock_t drain_time; 11880Sstevel@tonic-gate timeout_id_t tid; 11890Sstevel@tonic-gate uint_t cnt; 11900Sstevel@tonic-gate uint_t total_cnt = 0; 11910Sstevel@tonic-gate ill_rx_ring_t *sq_rx_ring = sqp->sq_rx_ring; 11920Sstevel@tonic-gate int interrupt = servicing_interrupt(); 11930Sstevel@tonic-gate boolean_t poll_on = B_FALSE; 1194*981Sbw hrtime_t now; 11950Sstevel@tonic-gate 11960Sstevel@tonic-gate ASSERT(mutex_owned(&sqp->sq_lock)); 11970Sstevel@tonic-gate ASSERT(!(sqp->sq_state & SQS_PROC)); 11980Sstevel@tonic-gate 11990Sstevel@tonic-gate #if SQUEUE_PROFILE 12000Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 12010Sstevel@tonic-gate if (interrupt) 12020Sstevel@tonic-gate SQSTAT(sqp, sq_ndrains_intr); 12030Sstevel@tonic-gate else if (!(proc_type & SQS_WORKER)) 12040Sstevel@tonic-gate SQSTAT(sqp, sq_ndrains_other); 12050Sstevel@tonic-gate else 12060Sstevel@tonic-gate SQSTAT(sqp, sq_ndrains_worker); 12070Sstevel@tonic-gate } 12080Sstevel@tonic-gate #endif 12090Sstevel@tonic-gate 12100Sstevel@tonic-gate if ((tid = sqp->sq_tid) != 0) 12110Sstevel@tonic-gate sqp->sq_tid = 0; 12120Sstevel@tonic-gate 12130Sstevel@tonic-gate sqp->sq_state |= SQS_PROC | proc_type; 12140Sstevel@tonic-gate head = sqp->sq_first; 12150Sstevel@tonic-gate sqp->sq_first = NULL; 12160Sstevel@tonic-gate sqp->sq_last = NULL; 12170Sstevel@tonic-gate cnt = sqp->sq_count; 12180Sstevel@tonic-gate 12190Sstevel@tonic-gate /* 12200Sstevel@tonic-gate * We have backlog built up. Switch to polling mode if the 12210Sstevel@tonic-gate * device underneath allows it. Need to do it only for 12220Sstevel@tonic-gate * drain by non-interrupt thread so interrupts don't 12230Sstevel@tonic-gate * come and disrupt us in between. If its a interrupt thread, 12240Sstevel@tonic-gate * no need because most devices will not issue another 12250Sstevel@tonic-gate * interrupt till this one returns. 12260Sstevel@tonic-gate */ 12270Sstevel@tonic-gate if ((sqp->sq_state & SQS_POLL_CAPAB) && !(proc_type & SQS_ENTER) && 12280Sstevel@tonic-gate (sqp->sq_count > squeue_worker_poll_min)) { 12290Sstevel@tonic-gate ASSERT(sq_rx_ring != NULL); 12300Sstevel@tonic-gate SQS_POLLING_ON(sqp, sq_rx_ring); 12310Sstevel@tonic-gate poll_on = B_TRUE; 12320Sstevel@tonic-gate } 12330Sstevel@tonic-gate 12340Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 12350Sstevel@tonic-gate 12360Sstevel@tonic-gate if (tid != 0) 12370Sstevel@tonic-gate (void) untimeout(tid); 12380Sstevel@tonic-gate again: 12390Sstevel@tonic-gate while ((mp = head) != NULL) { 12400Sstevel@tonic-gate head = mp->b_next; 12410Sstevel@tonic-gate mp->b_next = NULL; 12420Sstevel@tonic-gate 12430Sstevel@tonic-gate proc = (sqproc_t)mp->b_queue; 12440Sstevel@tonic-gate mp->b_queue = NULL; 12450Sstevel@tonic-gate connp = (conn_t *)mp->b_prev; 12460Sstevel@tonic-gate mp->b_prev = NULL; 12470Sstevel@tonic-gate #if SQUEUE_DEBUG 12480Sstevel@tonic-gate sqp->sq_curmp = mp; 12490Sstevel@tonic-gate sqp->sq_curproc = proc; 12500Sstevel@tonic-gate sqp->sq_connp = connp; 12510Sstevel@tonic-gate sqp->sq_tag = mp->b_tag; 12520Sstevel@tonic-gate #endif 12530Sstevel@tonic-gate 12540Sstevel@tonic-gate #if SQUEUE_PROFILE 12550Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 12560Sstevel@tonic-gate if (interrupt) 12570Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_intr); 12580Sstevel@tonic-gate else if (!(proc_type & SQS_WORKER)) 12590Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_other); 12600Sstevel@tonic-gate else 12610Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_worker); 12620Sstevel@tonic-gate } 12630Sstevel@tonic-gate #endif 12640Sstevel@tonic-gate 12650Sstevel@tonic-gate connp->conn_on_sqp = B_TRUE; 12660Sstevel@tonic-gate DTRACE_PROBE3(squeue__proc__start, squeue_t *, 12670Sstevel@tonic-gate sqp, mblk_t *, mp, conn_t *, connp); 12680Sstevel@tonic-gate (*proc)(connp, mp, sqp); 12690Sstevel@tonic-gate DTRACE_PROBE2(squeue__proc__end, squeue_t *, 12700Sstevel@tonic-gate sqp, conn_t *, connp); 12710Sstevel@tonic-gate connp->conn_on_sqp = B_FALSE; 12720Sstevel@tonic-gate CONN_DEC_REF(connp); 12730Sstevel@tonic-gate } 12740Sstevel@tonic-gate 12750Sstevel@tonic-gate 12760Sstevel@tonic-gate #if SQUEUE_DEBUG 12770Sstevel@tonic-gate sqp->sq_curmp = NULL; 12780Sstevel@tonic-gate sqp->sq_curproc = NULL; 12790Sstevel@tonic-gate sqp->sq_connp = NULL; 12800Sstevel@tonic-gate #endif 12810Sstevel@tonic-gate 12820Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 12830Sstevel@tonic-gate sqp->sq_count -= cnt; 12840Sstevel@tonic-gate total_cnt += cnt; 12850Sstevel@tonic-gate 12860Sstevel@tonic-gate if (sqp->sq_first != NULL) { 1287*981Sbw 1288*981Sbw now = gethrtime(); 1289*981Sbw if (!expire || (now < expire)) { 12900Sstevel@tonic-gate /* More arrived and time not expired */ 12910Sstevel@tonic-gate head = sqp->sq_first; 12920Sstevel@tonic-gate sqp->sq_first = NULL; 12930Sstevel@tonic-gate sqp->sq_last = NULL; 12940Sstevel@tonic-gate cnt = sqp->sq_count; 12950Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 12960Sstevel@tonic-gate goto again; 12970Sstevel@tonic-gate } 12980Sstevel@tonic-gate 12990Sstevel@tonic-gate /* 13000Sstevel@tonic-gate * If we are not worker thread and we 13010Sstevel@tonic-gate * reached our time limit to do drain, 13020Sstevel@tonic-gate * signal the worker thread to pick 13030Sstevel@tonic-gate * up the work. 13040Sstevel@tonic-gate * If we were the worker thread, then 13050Sstevel@tonic-gate * we take a break to allow an interrupt 13060Sstevel@tonic-gate * or writer to pick up the load. 13070Sstevel@tonic-gate */ 13080Sstevel@tonic-gate if (proc_type != SQS_WORKER) { 13090Sstevel@tonic-gate sqp->sq_awaken = lbolt; 13100Sstevel@tonic-gate cv_signal(&sqp->sq_async); 13110Sstevel@tonic-gate } 13120Sstevel@tonic-gate } 13130Sstevel@tonic-gate 13140Sstevel@tonic-gate /* 13150Sstevel@tonic-gate * Try to see if we can get a time estimate to process a packet. 13160Sstevel@tonic-gate * Do it only in interrupt context since less chance of context 13170Sstevel@tonic-gate * switch or pinning etc. to get a better estimate. 13180Sstevel@tonic-gate */ 13190Sstevel@tonic-gate if (interrupt && ((drain_time = (lbolt - start)) > 0)) 13200Sstevel@tonic-gate sqp->sq_avg_drain_time = ((80 * sqp->sq_avg_drain_time) + 13210Sstevel@tonic-gate (20 * (drv_hztousec(drain_time)/total_cnt)))/100; 13220Sstevel@tonic-gate 13230Sstevel@tonic-gate sqp->sq_state &= ~(SQS_PROC | proc_type); 13240Sstevel@tonic-gate 13250Sstevel@tonic-gate /* 13260Sstevel@tonic-gate * If polling was turned on, turn it off and reduce the default 13270Sstevel@tonic-gate * interrupt blank interval as well to bring new packets in faster 13280Sstevel@tonic-gate * (reduces the latency when there is no backlog). 13290Sstevel@tonic-gate */ 13300Sstevel@tonic-gate if (poll_on && (sqp->sq_state & SQS_POLL_CAPAB)) { 13310Sstevel@tonic-gate ASSERT(sq_rx_ring != NULL); 13320Sstevel@tonic-gate SQS_POLLING_OFF(sqp, sq_rx_ring); 13330Sstevel@tonic-gate } 13340Sstevel@tonic-gate } 13350Sstevel@tonic-gate 13360Sstevel@tonic-gate static void 13370Sstevel@tonic-gate squeue_worker(squeue_t *sqp) 13380Sstevel@tonic-gate { 13390Sstevel@tonic-gate kmutex_t *lock = &sqp->sq_lock; 13400Sstevel@tonic-gate kcondvar_t *async = &sqp->sq_async; 13410Sstevel@tonic-gate callb_cpr_t cprinfo; 1342*981Sbw hrtime_t now; 13430Sstevel@tonic-gate #if SQUEUE_PROFILE 13440Sstevel@tonic-gate hrtime_t start; 13450Sstevel@tonic-gate #endif 13460Sstevel@tonic-gate 13470Sstevel@tonic-gate CALLB_CPR_INIT(&cprinfo, lock, callb_generic_cpr, "nca"); 13480Sstevel@tonic-gate mutex_enter(lock); 13490Sstevel@tonic-gate 13500Sstevel@tonic-gate for (;;) { 13510Sstevel@tonic-gate while (sqp->sq_first == NULL || (sqp->sq_state & SQS_PROC)) { 13520Sstevel@tonic-gate CALLB_CPR_SAFE_BEGIN(&cprinfo); 13530Sstevel@tonic-gate still_wait: 13540Sstevel@tonic-gate cv_wait(async, lock); 13550Sstevel@tonic-gate if (sqp->sq_state & SQS_PROC) { 13560Sstevel@tonic-gate goto still_wait; 13570Sstevel@tonic-gate } 13580Sstevel@tonic-gate CALLB_CPR_SAFE_END(&cprinfo, lock); 13590Sstevel@tonic-gate } 13600Sstevel@tonic-gate 13610Sstevel@tonic-gate #if SQUEUE_PROFILE 13620Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 13630Sstevel@tonic-gate start = gethrtime(); 13640Sstevel@tonic-gate } 13650Sstevel@tonic-gate #endif 13660Sstevel@tonic-gate 1367*981Sbw ASSERT(squeue_workerdrain_ns != 0); 1368*981Sbw now = gethrtime(); 13690Sstevel@tonic-gate sqp->sq_run = curthread; 1370*981Sbw squeue_drain(sqp, SQS_WORKER, now + squeue_workerdrain_ns); 13710Sstevel@tonic-gate sqp->sq_run = NULL; 13720Sstevel@tonic-gate 13730Sstevel@tonic-gate if (sqp->sq_first != NULL) { 13740Sstevel@tonic-gate /* 13750Sstevel@tonic-gate * Doing too much processing by worker thread 13760Sstevel@tonic-gate * in presense of interrupts can be sub optimal. 13770Sstevel@tonic-gate * Instead, once a drain is done by worker thread 1378*981Sbw * for squeue_writerdrain_ns (the reason we are 13790Sstevel@tonic-gate * here), we force wait for squeue_workerwait_tick 13800Sstevel@tonic-gate * before doing more processing even if sq_wait is 13810Sstevel@tonic-gate * set to 0. 13820Sstevel@tonic-gate * 13830Sstevel@tonic-gate * This can be counterproductive for performance 13840Sstevel@tonic-gate * if worker thread is the only means to process 13850Sstevel@tonic-gate * the packets (interrupts or writers are not 13860Sstevel@tonic-gate * allowed inside the squeue). 13870Sstevel@tonic-gate */ 13880Sstevel@tonic-gate if (sqp->sq_tid == 0 && 13890Sstevel@tonic-gate !(sqp->sq_state & SQS_TMO_PROG)) { 13900Sstevel@tonic-gate timeout_id_t tid; 13910Sstevel@tonic-gate 13920Sstevel@tonic-gate sqp->sq_state |= SQS_TMO_PROG; 13930Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 13940Sstevel@tonic-gate tid = timeout(squeue_fire, sqp, 13950Sstevel@tonic-gate squeue_workerwait_tick); 13960Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 13970Sstevel@tonic-gate /* 13980Sstevel@tonic-gate * Check again if we still need 13990Sstevel@tonic-gate * the timeout 14000Sstevel@tonic-gate */ 14010Sstevel@tonic-gate if (((sqp->sq_state & (SQS_TMO_PROG|SQS_PROC)) 14020Sstevel@tonic-gate == SQS_TMO_PROG) && (sqp->sq_tid == 0) && 14030Sstevel@tonic-gate (sqp->sq_first != NULL)) { 14040Sstevel@tonic-gate sqp->sq_state &= ~SQS_TMO_PROG; 14050Sstevel@tonic-gate sqp->sq_awaken = lbolt; 14060Sstevel@tonic-gate sqp->sq_tid = tid; 14070Sstevel@tonic-gate } else if (sqp->sq_state & SQS_TMO_PROG) { 14080Sstevel@tonic-gate /* timeout not needed */ 14090Sstevel@tonic-gate sqp->sq_state &= ~SQS_TMO_PROG; 14100Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); 14110Sstevel@tonic-gate (void) untimeout(tid); 14120Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 14130Sstevel@tonic-gate } 14140Sstevel@tonic-gate } 14150Sstevel@tonic-gate CALLB_CPR_SAFE_BEGIN(&cprinfo); 14160Sstevel@tonic-gate cv_wait(async, lock); 14170Sstevel@tonic-gate CALLB_CPR_SAFE_END(&cprinfo, lock); 14180Sstevel@tonic-gate } 14190Sstevel@tonic-gate 14200Sstevel@tonic-gate 14210Sstevel@tonic-gate #if SQUEUE_PROFILE 14220Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 14230Sstevel@tonic-gate SQDELTA(sqp, sq_time_worker, gethrtime() - start); 14240Sstevel@tonic-gate } 14250Sstevel@tonic-gate #endif 14260Sstevel@tonic-gate } 14270Sstevel@tonic-gate } 14280Sstevel@tonic-gate 14290Sstevel@tonic-gate #if SQUEUE_PROFILE 14300Sstevel@tonic-gate static int 14310Sstevel@tonic-gate squeue_kstat_update(kstat_t *ksp, int rw) 14320Sstevel@tonic-gate { 14330Sstevel@tonic-gate struct squeue_kstat *sqsp = &squeue_kstat; 14340Sstevel@tonic-gate squeue_t *sqp = ksp->ks_private; 14350Sstevel@tonic-gate 14360Sstevel@tonic-gate if (rw == KSTAT_WRITE) 14370Sstevel@tonic-gate return (EACCES); 14380Sstevel@tonic-gate 14390Sstevel@tonic-gate #if SQUEUE_DEBUG 14400Sstevel@tonic-gate sqsp->sq_count.value.ui64 = sqp->sq_count; 14410Sstevel@tonic-gate sqsp->sq_max_qlen.value.ui64 = sqp->sq_stats.sq_max_qlen; 14420Sstevel@tonic-gate #endif 14430Sstevel@tonic-gate sqsp->sq_npackets_worker.value.ui64 = sqp->sq_stats.sq_npackets_worker; 14440Sstevel@tonic-gate sqsp->sq_npackets_intr.value.ui64 = sqp->sq_stats.sq_npackets_intr; 14450Sstevel@tonic-gate sqsp->sq_npackets_other.value.ui64 = sqp->sq_stats.sq_npackets_other; 14460Sstevel@tonic-gate sqsp->sq_nqueued_intr.value.ui64 = sqp->sq_stats.sq_nqueued_intr; 14470Sstevel@tonic-gate sqsp->sq_nqueued_other.value.ui64 = sqp->sq_stats.sq_nqueued_other; 14480Sstevel@tonic-gate sqsp->sq_ndrains_worker.value.ui64 = sqp->sq_stats.sq_ndrains_worker; 14490Sstevel@tonic-gate sqsp->sq_ndrains_intr.value.ui64 = sqp->sq_stats.sq_ndrains_intr; 14500Sstevel@tonic-gate sqsp->sq_ndrains_other.value.ui64 = sqp->sq_stats.sq_ndrains_other; 14510Sstevel@tonic-gate sqsp->sq_time_worker.value.ui64 = sqp->sq_stats.sq_time_worker; 14520Sstevel@tonic-gate sqsp->sq_time_intr.value.ui64 = sqp->sq_stats.sq_time_intr; 14530Sstevel@tonic-gate sqsp->sq_time_other.value.ui64 = sqp->sq_stats.sq_time_other; 14540Sstevel@tonic-gate return (0); 14550Sstevel@tonic-gate } 14560Sstevel@tonic-gate #endif 14570Sstevel@tonic-gate 14580Sstevel@tonic-gate void 14590Sstevel@tonic-gate squeue_profile_enable(squeue_t *sqp) 14600Sstevel@tonic-gate { 14610Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 14620Sstevel@tonic-gate sqp->sq_state |= SQS_PROFILE; 14630Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 14640Sstevel@tonic-gate } 14650Sstevel@tonic-gate 14660Sstevel@tonic-gate void 14670Sstevel@tonic-gate squeue_profile_disable(squeue_t *sqp) 14680Sstevel@tonic-gate { 14690Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 14700Sstevel@tonic-gate sqp->sq_state &= ~SQS_PROFILE; 14710Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 14720Sstevel@tonic-gate } 14730Sstevel@tonic-gate 14740Sstevel@tonic-gate void 14750Sstevel@tonic-gate squeue_profile_reset(squeue_t *sqp) 14760Sstevel@tonic-gate { 14770Sstevel@tonic-gate #if SQUEUE_PROFILE 14780Sstevel@tonic-gate bzero(&sqp->sq_stats, sizeof (sqstat_t)); 14790Sstevel@tonic-gate #endif 14800Sstevel@tonic-gate } 14810Sstevel@tonic-gate 14820Sstevel@tonic-gate void 14830Sstevel@tonic-gate squeue_profile_start(void) 14840Sstevel@tonic-gate { 14850Sstevel@tonic-gate #if SQUEUE_PROFILE 14860Sstevel@tonic-gate squeue_profile = B_TRUE; 14870Sstevel@tonic-gate #endif 14880Sstevel@tonic-gate } 14890Sstevel@tonic-gate 14900Sstevel@tonic-gate void 14910Sstevel@tonic-gate squeue_profile_stop(void) 14920Sstevel@tonic-gate { 14930Sstevel@tonic-gate #if SQUEUE_PROFILE 14940Sstevel@tonic-gate squeue_profile = B_FALSE; 14950Sstevel@tonic-gate #endif 14960Sstevel@tonic-gate } 14970Sstevel@tonic-gate 14980Sstevel@tonic-gate uintptr_t * 14990Sstevel@tonic-gate squeue_getprivate(squeue_t *sqp, sqprivate_t p) 15000Sstevel@tonic-gate { 15010Sstevel@tonic-gate ASSERT(p < SQPRIVATE_MAX); 15020Sstevel@tonic-gate 15030Sstevel@tonic-gate return (&sqp->sq_private[p]); 15040Sstevel@tonic-gate } 15050Sstevel@tonic-gate 15060Sstevel@tonic-gate processorid_t 15070Sstevel@tonic-gate squeue_binding(squeue_t *sqp) 15080Sstevel@tonic-gate { 15090Sstevel@tonic-gate return (sqp->sq_bind); 15100Sstevel@tonic-gate } 1511