10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 52958Sdr146992 * Common Development and Distribution License (the "License"). 62958Sdr146992 * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 220Sstevel@tonic-gate /* All Rights Reserved */ 230Sstevel@tonic-gate 240Sstevel@tonic-gate /* 25*8752SPeter.Memishian@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 260Sstevel@tonic-gate * Use is subject to license terms. 270Sstevel@tonic-gate */ 280Sstevel@tonic-gate 290Sstevel@tonic-gate #include <sys/types.h> 300Sstevel@tonic-gate #include <sys/param.h> 310Sstevel@tonic-gate #include <sys/thread.h> 320Sstevel@tonic-gate #include <sys/sysmacros.h> 330Sstevel@tonic-gate #include <sys/stropts.h> 340Sstevel@tonic-gate #include <sys/stream.h> 350Sstevel@tonic-gate #include <sys/strsubr.h> 360Sstevel@tonic-gate #include <sys/strsun.h> 370Sstevel@tonic-gate #include <sys/conf.h> 380Sstevel@tonic-gate #include <sys/debug.h> 390Sstevel@tonic-gate #include <sys/cmn_err.h> 400Sstevel@tonic-gate #include <sys/kmem.h> 410Sstevel@tonic-gate #include <sys/atomic.h> 420Sstevel@tonic-gate #include <sys/errno.h> 430Sstevel@tonic-gate #include <sys/vtrace.h> 440Sstevel@tonic-gate #include <sys/ftrace.h> 450Sstevel@tonic-gate #include <sys/ontrap.h> 460Sstevel@tonic-gate #include <sys/multidata.h> 470Sstevel@tonic-gate #include <sys/multidata_impl.h> 480Sstevel@tonic-gate #include <sys/sdt.h> 491110Smeem #include <sys/strft.h> 500Sstevel@tonic-gate 510Sstevel@tonic-gate #ifdef DEBUG 520Sstevel@tonic-gate #include <sys/kmem_impl.h> 530Sstevel@tonic-gate #endif 540Sstevel@tonic-gate 550Sstevel@tonic-gate /* 560Sstevel@tonic-gate * This file contains all the STREAMS utility routines that may 570Sstevel@tonic-gate * be used by modules and drivers. 580Sstevel@tonic-gate */ 590Sstevel@tonic-gate 600Sstevel@tonic-gate /* 610Sstevel@tonic-gate * STREAMS message allocator: principles of operation 620Sstevel@tonic-gate * 630Sstevel@tonic-gate * The streams message allocator consists of all the routines that 640Sstevel@tonic-gate * allocate, dup and free streams messages: allocb(), [d]esballoc[a], 650Sstevel@tonic-gate * dupb(), freeb() and freemsg(). What follows is a high-level view 660Sstevel@tonic-gate * of how the allocator works. 670Sstevel@tonic-gate * 680Sstevel@tonic-gate * Every streams message consists of one or more mblks, a dblk, and data. 690Sstevel@tonic-gate * All mblks for all types of messages come from a common mblk_cache. 700Sstevel@tonic-gate * The dblk and data come in several flavors, depending on how the 710Sstevel@tonic-gate * message is allocated: 720Sstevel@tonic-gate * 730Sstevel@tonic-gate * (1) mblks up to DBLK_MAX_CACHE size are allocated from a collection of 740Sstevel@tonic-gate * fixed-size dblk/data caches. For message sizes that are multiples of 750Sstevel@tonic-gate * PAGESIZE, dblks are allocated separately from the buffer. 760Sstevel@tonic-gate * The associated buffer is allocated by the constructor using kmem_alloc(). 770Sstevel@tonic-gate * For all other message sizes, dblk and its associated data is allocated 780Sstevel@tonic-gate * as a single contiguous chunk of memory. 790Sstevel@tonic-gate * Objects in these caches consist of a dblk plus its associated data. 800Sstevel@tonic-gate * allocb() determines the nearest-size cache by table lookup: 810Sstevel@tonic-gate * the dblk_cache[] array provides the mapping from size to dblk cache. 820Sstevel@tonic-gate * 830Sstevel@tonic-gate * (2) Large messages (size > DBLK_MAX_CACHE) are constructed by 840Sstevel@tonic-gate * kmem_alloc()'ing a buffer for the data and supplying that 850Sstevel@tonic-gate * buffer to gesballoc(), described below. 860Sstevel@tonic-gate * 870Sstevel@tonic-gate * (3) The four flavors of [d]esballoc[a] are all implemented by a 880Sstevel@tonic-gate * common routine, gesballoc() ("generic esballoc"). gesballoc() 890Sstevel@tonic-gate * allocates a dblk from the global dblk_esb_cache and sets db_base, 900Sstevel@tonic-gate * db_lim and db_frtnp to describe the caller-supplied buffer. 910Sstevel@tonic-gate * 920Sstevel@tonic-gate * While there are several routines to allocate messages, there is only 930Sstevel@tonic-gate * one routine to free messages: freeb(). freeb() simply invokes the 940Sstevel@tonic-gate * dblk's free method, dbp->db_free(), which is set at allocation time. 950Sstevel@tonic-gate * 960Sstevel@tonic-gate * dupb() creates a new reference to a message by allocating a new mblk, 970Sstevel@tonic-gate * incrementing the dblk reference count and setting the dblk's free 980Sstevel@tonic-gate * method to dblk_decref(). The dblk's original free method is retained 990Sstevel@tonic-gate * in db_lastfree. dblk_decref() decrements the reference count on each 1000Sstevel@tonic-gate * freeb(). If this is not the last reference it just frees the mblk; 1010Sstevel@tonic-gate * if this *is* the last reference, it restores db_free to db_lastfree, 1020Sstevel@tonic-gate * sets db_mblk to the current mblk (see below), and invokes db_lastfree. 1030Sstevel@tonic-gate * 1040Sstevel@tonic-gate * The implementation makes aggressive use of kmem object caching for 1050Sstevel@tonic-gate * maximum performance. This makes the code simple and compact, but 1060Sstevel@tonic-gate * also a bit abstruse in some places. The invariants that constitute a 1070Sstevel@tonic-gate * message's constructed state, described below, are more subtle than usual. 1080Sstevel@tonic-gate * 1090Sstevel@tonic-gate * Every dblk has an "attached mblk" as part of its constructed state. 1100Sstevel@tonic-gate * The mblk is allocated by the dblk's constructor and remains attached 1110Sstevel@tonic-gate * until the message is either dup'ed or pulled up. In the dupb() case 1120Sstevel@tonic-gate * the mblk association doesn't matter until the last free, at which time 1130Sstevel@tonic-gate * dblk_decref() attaches the last mblk to the dblk. pullupmsg() affects 1140Sstevel@tonic-gate * the mblk association because it swaps the leading mblks of two messages, 1150Sstevel@tonic-gate * so it is responsible for swapping their db_mblk pointers accordingly. 1160Sstevel@tonic-gate * From a constructed-state viewpoint it doesn't matter that a dblk's 1170Sstevel@tonic-gate * attached mblk can change while the message is allocated; all that 1180Sstevel@tonic-gate * matters is that the dblk has *some* attached mblk when it's freed. 1190Sstevel@tonic-gate * 1200Sstevel@tonic-gate * The sizes of the allocb() small-message caches are not magical. 1210Sstevel@tonic-gate * They represent a good trade-off between internal and external 1220Sstevel@tonic-gate * fragmentation for current workloads. They should be reevaluated 1230Sstevel@tonic-gate * periodically, especially if allocations larger than DBLK_MAX_CACHE 1240Sstevel@tonic-gate * become common. We use 64-byte alignment so that dblks don't 1250Sstevel@tonic-gate * straddle cache lines unnecessarily. 1260Sstevel@tonic-gate */ 1270Sstevel@tonic-gate #define DBLK_MAX_CACHE 73728 1280Sstevel@tonic-gate #define DBLK_CACHE_ALIGN 64 1290Sstevel@tonic-gate #define DBLK_MIN_SIZE 8 1300Sstevel@tonic-gate #define DBLK_SIZE_SHIFT 3 1310Sstevel@tonic-gate 1320Sstevel@tonic-gate #ifdef _BIG_ENDIAN 1330Sstevel@tonic-gate #define DBLK_RTFU_SHIFT(field) \ 1340Sstevel@tonic-gate (8 * (&((dblk_t *)0)->db_struioflag - &((dblk_t *)0)->field)) 1350Sstevel@tonic-gate #else 1360Sstevel@tonic-gate #define DBLK_RTFU_SHIFT(field) \ 1370Sstevel@tonic-gate (8 * (&((dblk_t *)0)->field - &((dblk_t *)0)->db_ref)) 1380Sstevel@tonic-gate #endif 1390Sstevel@tonic-gate 1400Sstevel@tonic-gate #define DBLK_RTFU(ref, type, flags, uioflag) \ 1410Sstevel@tonic-gate (((ref) << DBLK_RTFU_SHIFT(db_ref)) | \ 1420Sstevel@tonic-gate ((type) << DBLK_RTFU_SHIFT(db_type)) | \ 1430Sstevel@tonic-gate (((flags) | (ref - 1)) << DBLK_RTFU_SHIFT(db_flags)) | \ 1440Sstevel@tonic-gate ((uioflag) << DBLK_RTFU_SHIFT(db_struioflag))) 1450Sstevel@tonic-gate #define DBLK_RTFU_REF_MASK (DBLK_REFMAX << DBLK_RTFU_SHIFT(db_ref)) 1460Sstevel@tonic-gate #define DBLK_RTFU_WORD(dbp) (*((uint32_t *)&(dbp)->db_ref)) 1470Sstevel@tonic-gate #define MBLK_BAND_FLAG_WORD(mp) (*((uint32_t *)&(mp)->b_band)) 1480Sstevel@tonic-gate 1490Sstevel@tonic-gate static size_t dblk_sizes[] = { 1500Sstevel@tonic-gate #ifdef _LP64 1516712Stomee 16, 80, 144, 208, 272, 336, 528, 1040, 1488, 1936, 2576, 3856, 1526712Stomee 8192, 12048, 16384, 20240, 24576, 28432, 32768, 36624, 1536712Stomee 40960, 44816, 49152, 53008, 57344, 61200, 65536, 69392, 1540Sstevel@tonic-gate #else 1556712Stomee 64, 128, 320, 576, 1088, 1536, 1984, 2624, 3904, 1566712Stomee 8192, 12096, 16384, 20288, 24576, 28480, 32768, 36672, 1576712Stomee 40960, 44864, 49152, 53056, 57344, 61248, 65536, 69440, 1580Sstevel@tonic-gate #endif 1590Sstevel@tonic-gate DBLK_MAX_CACHE, 0 1600Sstevel@tonic-gate }; 1610Sstevel@tonic-gate 1620Sstevel@tonic-gate static struct kmem_cache *dblk_cache[DBLK_MAX_CACHE / DBLK_MIN_SIZE]; 1630Sstevel@tonic-gate static struct kmem_cache *mblk_cache; 1640Sstevel@tonic-gate static struct kmem_cache *dblk_esb_cache; 1650Sstevel@tonic-gate static struct kmem_cache *fthdr_cache; 1660Sstevel@tonic-gate static struct kmem_cache *ftblk_cache; 1670Sstevel@tonic-gate 1680Sstevel@tonic-gate static void dblk_lastfree(mblk_t *mp, dblk_t *dbp); 1690Sstevel@tonic-gate static mblk_t *allocb_oversize(size_t size, int flags); 1700Sstevel@tonic-gate static int allocb_tryhard_fails; 1710Sstevel@tonic-gate static void frnop_func(void *arg); 1720Sstevel@tonic-gate frtn_t frnop = { frnop_func }; 1730Sstevel@tonic-gate static void bcache_dblk_lastfree(mblk_t *mp, dblk_t *dbp); 1740Sstevel@tonic-gate 1750Sstevel@tonic-gate static boolean_t rwnext_enter(queue_t *qp); 1760Sstevel@tonic-gate static void rwnext_exit(queue_t *qp); 1770Sstevel@tonic-gate 1780Sstevel@tonic-gate /* 1790Sstevel@tonic-gate * Patchable mblk/dblk kmem_cache flags. 1800Sstevel@tonic-gate */ 1810Sstevel@tonic-gate int dblk_kmem_flags = 0; 1820Sstevel@tonic-gate int mblk_kmem_flags = 0; 1830Sstevel@tonic-gate 1840Sstevel@tonic-gate static int 1850Sstevel@tonic-gate dblk_constructor(void *buf, void *cdrarg, int kmflags) 1860Sstevel@tonic-gate { 1870Sstevel@tonic-gate dblk_t *dbp = buf; 1880Sstevel@tonic-gate ssize_t msg_size = (ssize_t)cdrarg; 1890Sstevel@tonic-gate size_t index; 1900Sstevel@tonic-gate 1910Sstevel@tonic-gate ASSERT(msg_size != 0); 1920Sstevel@tonic-gate 1930Sstevel@tonic-gate index = (msg_size - 1) >> DBLK_SIZE_SHIFT; 1940Sstevel@tonic-gate 195577Smeem ASSERT(index < (DBLK_MAX_CACHE >> DBLK_SIZE_SHIFT)); 1960Sstevel@tonic-gate 1970Sstevel@tonic-gate if ((dbp->db_mblk = kmem_cache_alloc(mblk_cache, kmflags)) == NULL) 1980Sstevel@tonic-gate return (-1); 1990Sstevel@tonic-gate if ((msg_size & PAGEOFFSET) == 0) { 2000Sstevel@tonic-gate dbp->db_base = kmem_alloc(msg_size, kmflags); 2010Sstevel@tonic-gate if (dbp->db_base == NULL) { 2020Sstevel@tonic-gate kmem_cache_free(mblk_cache, dbp->db_mblk); 2030Sstevel@tonic-gate return (-1); 2040Sstevel@tonic-gate } 2050Sstevel@tonic-gate } else { 2060Sstevel@tonic-gate dbp->db_base = (unsigned char *)&dbp[1]; 2070Sstevel@tonic-gate } 2080Sstevel@tonic-gate 2090Sstevel@tonic-gate dbp->db_mblk->b_datap = dbp; 2100Sstevel@tonic-gate dbp->db_cache = dblk_cache[index]; 2110Sstevel@tonic-gate dbp->db_lim = dbp->db_base + msg_size; 2120Sstevel@tonic-gate dbp->db_free = dbp->db_lastfree = dblk_lastfree; 2130Sstevel@tonic-gate dbp->db_frtnp = NULL; 2140Sstevel@tonic-gate dbp->db_fthdr = NULL; 2150Sstevel@tonic-gate dbp->db_credp = NULL; 2160Sstevel@tonic-gate dbp->db_cpid = -1; 2170Sstevel@tonic-gate dbp->db_struioflag = 0; 2180Sstevel@tonic-gate dbp->db_struioun.cksum.flags = 0; 2190Sstevel@tonic-gate return (0); 2200Sstevel@tonic-gate } 2210Sstevel@tonic-gate 2220Sstevel@tonic-gate /*ARGSUSED*/ 2230Sstevel@tonic-gate static int 2240Sstevel@tonic-gate dblk_esb_constructor(void *buf, void *cdrarg, int kmflags) 2250Sstevel@tonic-gate { 2260Sstevel@tonic-gate dblk_t *dbp = buf; 2270Sstevel@tonic-gate 2280Sstevel@tonic-gate if ((dbp->db_mblk = kmem_cache_alloc(mblk_cache, kmflags)) == NULL) 2290Sstevel@tonic-gate return (-1); 2300Sstevel@tonic-gate dbp->db_mblk->b_datap = dbp; 2310Sstevel@tonic-gate dbp->db_cache = dblk_esb_cache; 2320Sstevel@tonic-gate dbp->db_fthdr = NULL; 2330Sstevel@tonic-gate dbp->db_credp = NULL; 2340Sstevel@tonic-gate dbp->db_cpid = -1; 2350Sstevel@tonic-gate dbp->db_struioflag = 0; 2360Sstevel@tonic-gate dbp->db_struioun.cksum.flags = 0; 2370Sstevel@tonic-gate return (0); 2380Sstevel@tonic-gate } 2390Sstevel@tonic-gate 2400Sstevel@tonic-gate static int 2410Sstevel@tonic-gate bcache_dblk_constructor(void *buf, void *cdrarg, int kmflags) 2420Sstevel@tonic-gate { 2430Sstevel@tonic-gate dblk_t *dbp = buf; 244*8752SPeter.Memishian@Sun.COM bcache_t *bcp = cdrarg; 2450Sstevel@tonic-gate 2460Sstevel@tonic-gate if ((dbp->db_mblk = kmem_cache_alloc(mblk_cache, kmflags)) == NULL) 2470Sstevel@tonic-gate return (-1); 2480Sstevel@tonic-gate 249*8752SPeter.Memishian@Sun.COM dbp->db_base = kmem_cache_alloc(bcp->buffer_cache, kmflags); 250*8752SPeter.Memishian@Sun.COM if (dbp->db_base == NULL) { 2510Sstevel@tonic-gate kmem_cache_free(mblk_cache, dbp->db_mblk); 2520Sstevel@tonic-gate return (-1); 2530Sstevel@tonic-gate } 2540Sstevel@tonic-gate 2550Sstevel@tonic-gate dbp->db_mblk->b_datap = dbp; 2560Sstevel@tonic-gate dbp->db_cache = (void *)bcp; 2570Sstevel@tonic-gate dbp->db_lim = dbp->db_base + bcp->size; 2580Sstevel@tonic-gate dbp->db_free = dbp->db_lastfree = bcache_dblk_lastfree; 2590Sstevel@tonic-gate dbp->db_frtnp = NULL; 2600Sstevel@tonic-gate dbp->db_fthdr = NULL; 2610Sstevel@tonic-gate dbp->db_credp = NULL; 2620Sstevel@tonic-gate dbp->db_cpid = -1; 2630Sstevel@tonic-gate dbp->db_struioflag = 0; 2640Sstevel@tonic-gate dbp->db_struioun.cksum.flags = 0; 2650Sstevel@tonic-gate return (0); 2660Sstevel@tonic-gate } 2670Sstevel@tonic-gate 2680Sstevel@tonic-gate /*ARGSUSED*/ 2690Sstevel@tonic-gate static void 2700Sstevel@tonic-gate dblk_destructor(void *buf, void *cdrarg) 2710Sstevel@tonic-gate { 2720Sstevel@tonic-gate dblk_t *dbp = buf; 2730Sstevel@tonic-gate ssize_t msg_size = (ssize_t)cdrarg; 2740Sstevel@tonic-gate 2750Sstevel@tonic-gate ASSERT(dbp->db_mblk->b_datap == dbp); 2760Sstevel@tonic-gate ASSERT(msg_size != 0); 2770Sstevel@tonic-gate ASSERT(dbp->db_struioflag == 0); 2780Sstevel@tonic-gate ASSERT(dbp->db_struioun.cksum.flags == 0); 2790Sstevel@tonic-gate 2800Sstevel@tonic-gate if ((msg_size & PAGEOFFSET) == 0) { 2810Sstevel@tonic-gate kmem_free(dbp->db_base, msg_size); 2820Sstevel@tonic-gate } 2830Sstevel@tonic-gate 2840Sstevel@tonic-gate kmem_cache_free(mblk_cache, dbp->db_mblk); 2850Sstevel@tonic-gate } 2860Sstevel@tonic-gate 2870Sstevel@tonic-gate static void 2880Sstevel@tonic-gate bcache_dblk_destructor(void *buf, void *cdrarg) 2890Sstevel@tonic-gate { 2900Sstevel@tonic-gate dblk_t *dbp = buf; 291*8752SPeter.Memishian@Sun.COM bcache_t *bcp = cdrarg; 2920Sstevel@tonic-gate 2930Sstevel@tonic-gate kmem_cache_free(bcp->buffer_cache, dbp->db_base); 2940Sstevel@tonic-gate 2950Sstevel@tonic-gate ASSERT(dbp->db_mblk->b_datap == dbp); 2960Sstevel@tonic-gate ASSERT(dbp->db_struioflag == 0); 2970Sstevel@tonic-gate ASSERT(dbp->db_struioun.cksum.flags == 0); 2980Sstevel@tonic-gate 2990Sstevel@tonic-gate kmem_cache_free(mblk_cache, dbp->db_mblk); 3000Sstevel@tonic-gate } 3010Sstevel@tonic-gate 302*8752SPeter.Memishian@Sun.COM /* ARGSUSED */ 303*8752SPeter.Memishian@Sun.COM static int 304*8752SPeter.Memishian@Sun.COM ftblk_constructor(void *buf, void *cdrarg, int kmflags) 305*8752SPeter.Memishian@Sun.COM { 306*8752SPeter.Memishian@Sun.COM ftblk_t *fbp = buf; 307*8752SPeter.Memishian@Sun.COM int i; 308*8752SPeter.Memishian@Sun.COM 309*8752SPeter.Memishian@Sun.COM bzero(fbp, sizeof (ftblk_t)); 310*8752SPeter.Memishian@Sun.COM if (str_ftstack != 0) { 311*8752SPeter.Memishian@Sun.COM for (i = 0; i < FTBLK_EVNTS; i++) 312*8752SPeter.Memishian@Sun.COM fbp->ev[i].stk = kmem_alloc(sizeof (ftstk_t), kmflags); 313*8752SPeter.Memishian@Sun.COM } 314*8752SPeter.Memishian@Sun.COM 315*8752SPeter.Memishian@Sun.COM return (0); 316*8752SPeter.Memishian@Sun.COM } 317*8752SPeter.Memishian@Sun.COM 318*8752SPeter.Memishian@Sun.COM /* ARGSUSED */ 319*8752SPeter.Memishian@Sun.COM static void 320*8752SPeter.Memishian@Sun.COM ftblk_destructor(void *buf, void *cdrarg) 321*8752SPeter.Memishian@Sun.COM { 322*8752SPeter.Memishian@Sun.COM ftblk_t *fbp = buf; 323*8752SPeter.Memishian@Sun.COM int i; 324*8752SPeter.Memishian@Sun.COM 325*8752SPeter.Memishian@Sun.COM if (str_ftstack != 0) { 326*8752SPeter.Memishian@Sun.COM for (i = 0; i < FTBLK_EVNTS; i++) { 327*8752SPeter.Memishian@Sun.COM if (fbp->ev[i].stk != NULL) { 328*8752SPeter.Memishian@Sun.COM kmem_free(fbp->ev[i].stk, sizeof (ftstk_t)); 329*8752SPeter.Memishian@Sun.COM fbp->ev[i].stk = NULL; 330*8752SPeter.Memishian@Sun.COM } 331*8752SPeter.Memishian@Sun.COM } 332*8752SPeter.Memishian@Sun.COM } 333*8752SPeter.Memishian@Sun.COM } 334*8752SPeter.Memishian@Sun.COM 335*8752SPeter.Memishian@Sun.COM static int 336*8752SPeter.Memishian@Sun.COM fthdr_constructor(void *buf, void *cdrarg, int kmflags) 337*8752SPeter.Memishian@Sun.COM { 338*8752SPeter.Memishian@Sun.COM fthdr_t *fhp = buf; 339*8752SPeter.Memishian@Sun.COM 340*8752SPeter.Memishian@Sun.COM return (ftblk_constructor(&fhp->first, cdrarg, kmflags)); 341*8752SPeter.Memishian@Sun.COM } 342*8752SPeter.Memishian@Sun.COM 343*8752SPeter.Memishian@Sun.COM static void 344*8752SPeter.Memishian@Sun.COM fthdr_destructor(void *buf, void *cdrarg) 345*8752SPeter.Memishian@Sun.COM { 346*8752SPeter.Memishian@Sun.COM fthdr_t *fhp = buf; 347*8752SPeter.Memishian@Sun.COM 348*8752SPeter.Memishian@Sun.COM ftblk_destructor(&fhp->first, cdrarg); 349*8752SPeter.Memishian@Sun.COM } 350*8752SPeter.Memishian@Sun.COM 3510Sstevel@tonic-gate void 3520Sstevel@tonic-gate streams_msg_init(void) 3530Sstevel@tonic-gate { 3540Sstevel@tonic-gate char name[40]; 3550Sstevel@tonic-gate size_t size; 3560Sstevel@tonic-gate size_t lastsize = DBLK_MIN_SIZE; 3570Sstevel@tonic-gate size_t *sizep; 3580Sstevel@tonic-gate struct kmem_cache *cp; 3590Sstevel@tonic-gate size_t tot_size; 3600Sstevel@tonic-gate int offset; 3610Sstevel@tonic-gate 362*8752SPeter.Memishian@Sun.COM mblk_cache = kmem_cache_create("streams_mblk", sizeof (mblk_t), 32, 363*8752SPeter.Memishian@Sun.COM NULL, NULL, NULL, NULL, NULL, mblk_kmem_flags); 3640Sstevel@tonic-gate 3650Sstevel@tonic-gate for (sizep = dblk_sizes; (size = *sizep) != 0; sizep++) { 3660Sstevel@tonic-gate 3670Sstevel@tonic-gate if ((offset = (size & PAGEOFFSET)) != 0) { 3680Sstevel@tonic-gate /* 3690Sstevel@tonic-gate * We are in the middle of a page, dblk should 3700Sstevel@tonic-gate * be allocated on the same page 3710Sstevel@tonic-gate */ 3720Sstevel@tonic-gate tot_size = size + sizeof (dblk_t); 3730Sstevel@tonic-gate ASSERT((offset + sizeof (dblk_t) + sizeof (kmem_slab_t)) 3746707Sbrutus < PAGESIZE); 3750Sstevel@tonic-gate ASSERT((tot_size & (DBLK_CACHE_ALIGN - 1)) == 0); 3760Sstevel@tonic-gate 3770Sstevel@tonic-gate } else { 3780Sstevel@tonic-gate 3790Sstevel@tonic-gate /* 3800Sstevel@tonic-gate * buf size is multiple of page size, dblk and 3810Sstevel@tonic-gate * buffer are allocated separately. 3820Sstevel@tonic-gate */ 3830Sstevel@tonic-gate 3840Sstevel@tonic-gate ASSERT((size & (DBLK_CACHE_ALIGN - 1)) == 0); 3850Sstevel@tonic-gate tot_size = sizeof (dblk_t); 3860Sstevel@tonic-gate } 3870Sstevel@tonic-gate 3880Sstevel@tonic-gate (void) sprintf(name, "streams_dblk_%ld", size); 389*8752SPeter.Memishian@Sun.COM cp = kmem_cache_create(name, tot_size, DBLK_CACHE_ALIGN, 390*8752SPeter.Memishian@Sun.COM dblk_constructor, dblk_destructor, NULL, (void *)(size), 391*8752SPeter.Memishian@Sun.COM NULL, dblk_kmem_flags); 3920Sstevel@tonic-gate 3930Sstevel@tonic-gate while (lastsize <= size) { 3940Sstevel@tonic-gate dblk_cache[(lastsize - 1) >> DBLK_SIZE_SHIFT] = cp; 3950Sstevel@tonic-gate lastsize += DBLK_MIN_SIZE; 3960Sstevel@tonic-gate } 3970Sstevel@tonic-gate } 3980Sstevel@tonic-gate 399*8752SPeter.Memishian@Sun.COM dblk_esb_cache = kmem_cache_create("streams_dblk_esb", sizeof (dblk_t), 400*8752SPeter.Memishian@Sun.COM DBLK_CACHE_ALIGN, dblk_esb_constructor, dblk_destructor, NULL, 401*8752SPeter.Memishian@Sun.COM (void *)sizeof (dblk_t), NULL, dblk_kmem_flags); 402*8752SPeter.Memishian@Sun.COM fthdr_cache = kmem_cache_create("streams_fthdr", sizeof (fthdr_t), 32, 403*8752SPeter.Memishian@Sun.COM fthdr_constructor, fthdr_destructor, NULL, NULL, NULL, 0); 404*8752SPeter.Memishian@Sun.COM ftblk_cache = kmem_cache_create("streams_ftblk", sizeof (ftblk_t), 32, 405*8752SPeter.Memishian@Sun.COM ftblk_constructor, ftblk_destructor, NULL, NULL, NULL, 0); 4060Sstevel@tonic-gate 4070Sstevel@tonic-gate /* Initialize Multidata caches */ 4080Sstevel@tonic-gate mmd_init(); 4093932Sss146032 4103932Sss146032 /* initialize throttling queue for esballoc */ 4113932Sss146032 esballoc_queue_init(); 4120Sstevel@tonic-gate } 4130Sstevel@tonic-gate 4140Sstevel@tonic-gate /*ARGSUSED*/ 4150Sstevel@tonic-gate mblk_t * 4160Sstevel@tonic-gate allocb(size_t size, uint_t pri) 4170Sstevel@tonic-gate { 4180Sstevel@tonic-gate dblk_t *dbp; 4190Sstevel@tonic-gate mblk_t *mp; 4200Sstevel@tonic-gate size_t index; 4210Sstevel@tonic-gate 4220Sstevel@tonic-gate index = (size - 1) >> DBLK_SIZE_SHIFT; 4230Sstevel@tonic-gate 4240Sstevel@tonic-gate if (index >= (DBLK_MAX_CACHE >> DBLK_SIZE_SHIFT)) { 4250Sstevel@tonic-gate if (size != 0) { 4260Sstevel@tonic-gate mp = allocb_oversize(size, KM_NOSLEEP); 4270Sstevel@tonic-gate goto out; 4280Sstevel@tonic-gate } 4290Sstevel@tonic-gate index = 0; 4300Sstevel@tonic-gate } 4310Sstevel@tonic-gate 4320Sstevel@tonic-gate if ((dbp = kmem_cache_alloc(dblk_cache[index], KM_NOSLEEP)) == NULL) { 4330Sstevel@tonic-gate mp = NULL; 4340Sstevel@tonic-gate goto out; 4350Sstevel@tonic-gate } 4360Sstevel@tonic-gate 4370Sstevel@tonic-gate mp = dbp->db_mblk; 4380Sstevel@tonic-gate DBLK_RTFU_WORD(dbp) = DBLK_RTFU(1, M_DATA, 0, 0); 4390Sstevel@tonic-gate mp->b_next = mp->b_prev = mp->b_cont = NULL; 4400Sstevel@tonic-gate mp->b_rptr = mp->b_wptr = dbp->db_base; 4410Sstevel@tonic-gate mp->b_queue = NULL; 4420Sstevel@tonic-gate MBLK_BAND_FLAG_WORD(mp) = 0; 4430Sstevel@tonic-gate STR_FTALLOC(&dbp->db_fthdr, FTEV_ALLOCB, size); 4440Sstevel@tonic-gate out: 4450Sstevel@tonic-gate FTRACE_1("allocb(): mp=0x%p", (uintptr_t)mp); 4460Sstevel@tonic-gate 4470Sstevel@tonic-gate return (mp); 4480Sstevel@tonic-gate } 4490Sstevel@tonic-gate 4500Sstevel@tonic-gate mblk_t * 4510Sstevel@tonic-gate allocb_tmpl(size_t size, const mblk_t *tmpl) 4520Sstevel@tonic-gate { 4530Sstevel@tonic-gate mblk_t *mp = allocb(size, 0); 4540Sstevel@tonic-gate 4550Sstevel@tonic-gate if (mp != NULL) { 4560Sstevel@tonic-gate cred_t *cr = DB_CRED(tmpl); 4570Sstevel@tonic-gate if (cr != NULL) 4580Sstevel@tonic-gate crhold(mp->b_datap->db_credp = cr); 4590Sstevel@tonic-gate DB_CPID(mp) = DB_CPID(tmpl); 4600Sstevel@tonic-gate DB_TYPE(mp) = DB_TYPE(tmpl); 4610Sstevel@tonic-gate } 4620Sstevel@tonic-gate return (mp); 4630Sstevel@tonic-gate } 4640Sstevel@tonic-gate 4650Sstevel@tonic-gate mblk_t * 4660Sstevel@tonic-gate allocb_cred(size_t size, cred_t *cr) 4670Sstevel@tonic-gate { 4680Sstevel@tonic-gate mblk_t *mp = allocb(size, 0); 4690Sstevel@tonic-gate 4700Sstevel@tonic-gate if (mp != NULL && cr != NULL) 4710Sstevel@tonic-gate crhold(mp->b_datap->db_credp = cr); 4720Sstevel@tonic-gate 4730Sstevel@tonic-gate return (mp); 4740Sstevel@tonic-gate } 4750Sstevel@tonic-gate 4760Sstevel@tonic-gate mblk_t * 4770Sstevel@tonic-gate allocb_cred_wait(size_t size, uint_t flags, int *error, cred_t *cr) 4780Sstevel@tonic-gate { 4790Sstevel@tonic-gate mblk_t *mp = allocb_wait(size, 0, flags, error); 4800Sstevel@tonic-gate 4810Sstevel@tonic-gate if (mp != NULL && cr != NULL) 4820Sstevel@tonic-gate crhold(mp->b_datap->db_credp = cr); 4830Sstevel@tonic-gate 4840Sstevel@tonic-gate return (mp); 4850Sstevel@tonic-gate } 4860Sstevel@tonic-gate 4870Sstevel@tonic-gate void 4880Sstevel@tonic-gate freeb(mblk_t *mp) 4890Sstevel@tonic-gate { 4900Sstevel@tonic-gate dblk_t *dbp = mp->b_datap; 4910Sstevel@tonic-gate 4920Sstevel@tonic-gate ASSERT(dbp->db_ref > 0); 4930Sstevel@tonic-gate ASSERT(mp->b_next == NULL && mp->b_prev == NULL); 4940Sstevel@tonic-gate FTRACE_1("freeb(): mp=0x%lx", (uintptr_t)mp); 4950Sstevel@tonic-gate 4960Sstevel@tonic-gate STR_FTEVENT_MBLK(mp, caller(), FTEV_FREEB, dbp->db_ref); 4970Sstevel@tonic-gate 4980Sstevel@tonic-gate dbp->db_free(mp, dbp); 4990Sstevel@tonic-gate } 5000Sstevel@tonic-gate 5010Sstevel@tonic-gate void 5020Sstevel@tonic-gate freemsg(mblk_t *mp) 5030Sstevel@tonic-gate { 5040Sstevel@tonic-gate FTRACE_1("freemsg(): mp=0x%lx", (uintptr_t)mp); 5050Sstevel@tonic-gate while (mp) { 5060Sstevel@tonic-gate dblk_t *dbp = mp->b_datap; 5070Sstevel@tonic-gate mblk_t *mp_cont = mp->b_cont; 5080Sstevel@tonic-gate 5090Sstevel@tonic-gate ASSERT(dbp->db_ref > 0); 5100Sstevel@tonic-gate ASSERT(mp->b_next == NULL && mp->b_prev == NULL); 5110Sstevel@tonic-gate 5120Sstevel@tonic-gate STR_FTEVENT_MBLK(mp, caller(), FTEV_FREEB, dbp->db_ref); 5130Sstevel@tonic-gate 5140Sstevel@tonic-gate dbp->db_free(mp, dbp); 5150Sstevel@tonic-gate mp = mp_cont; 5160Sstevel@tonic-gate } 5170Sstevel@tonic-gate } 5180Sstevel@tonic-gate 5190Sstevel@tonic-gate /* 5200Sstevel@tonic-gate * Reallocate a block for another use. Try hard to use the old block. 5210Sstevel@tonic-gate * If the old data is wanted (copy), leave b_wptr at the end of the data, 5220Sstevel@tonic-gate * otherwise return b_wptr = b_rptr. 5230Sstevel@tonic-gate * 5240Sstevel@tonic-gate * This routine is private and unstable. 5250Sstevel@tonic-gate */ 5260Sstevel@tonic-gate mblk_t * 5270Sstevel@tonic-gate reallocb(mblk_t *mp, size_t size, uint_t copy) 5280Sstevel@tonic-gate { 5290Sstevel@tonic-gate mblk_t *mp1; 5300Sstevel@tonic-gate unsigned char *old_rptr; 5310Sstevel@tonic-gate ptrdiff_t cur_size; 5320Sstevel@tonic-gate 5330Sstevel@tonic-gate if (mp == NULL) 5340Sstevel@tonic-gate return (allocb(size, BPRI_HI)); 5350Sstevel@tonic-gate 5360Sstevel@tonic-gate cur_size = mp->b_wptr - mp->b_rptr; 5370Sstevel@tonic-gate old_rptr = mp->b_rptr; 5380Sstevel@tonic-gate 5390Sstevel@tonic-gate ASSERT(mp->b_datap->db_ref != 0); 5400Sstevel@tonic-gate 5410Sstevel@tonic-gate if (mp->b_datap->db_ref == 1 && MBLKSIZE(mp) >= size) { 5420Sstevel@tonic-gate /* 5430Sstevel@tonic-gate * If the data is wanted and it will fit where it is, no 5440Sstevel@tonic-gate * work is required. 5450Sstevel@tonic-gate */ 5460Sstevel@tonic-gate if (copy && mp->b_datap->db_lim - mp->b_rptr >= size) 5470Sstevel@tonic-gate return (mp); 5480Sstevel@tonic-gate 5490Sstevel@tonic-gate mp->b_wptr = mp->b_rptr = mp->b_datap->db_base; 5500Sstevel@tonic-gate mp1 = mp; 5510Sstevel@tonic-gate } else if ((mp1 = allocb_tmpl(size, mp)) != NULL) { 5520Sstevel@tonic-gate /* XXX other mp state could be copied too, db_flags ... ? */ 5530Sstevel@tonic-gate mp1->b_cont = mp->b_cont; 5540Sstevel@tonic-gate } else { 5550Sstevel@tonic-gate return (NULL); 5560Sstevel@tonic-gate } 5570Sstevel@tonic-gate 5580Sstevel@tonic-gate if (copy) { 5590Sstevel@tonic-gate bcopy(old_rptr, mp1->b_rptr, cur_size); 5600Sstevel@tonic-gate mp1->b_wptr = mp1->b_rptr + cur_size; 5610Sstevel@tonic-gate } 5620Sstevel@tonic-gate 5630Sstevel@tonic-gate if (mp != mp1) 5640Sstevel@tonic-gate freeb(mp); 5650Sstevel@tonic-gate 5660Sstevel@tonic-gate return (mp1); 5670Sstevel@tonic-gate } 5680Sstevel@tonic-gate 5690Sstevel@tonic-gate static void 5700Sstevel@tonic-gate dblk_lastfree(mblk_t *mp, dblk_t *dbp) 5710Sstevel@tonic-gate { 5720Sstevel@tonic-gate ASSERT(dbp->db_mblk == mp); 5730Sstevel@tonic-gate if (dbp->db_fthdr != NULL) 5740Sstevel@tonic-gate str_ftfree(dbp); 5750Sstevel@tonic-gate 5760Sstevel@tonic-gate /* set credp and projid to be 'unspecified' before returning to cache */ 5770Sstevel@tonic-gate if (dbp->db_credp != NULL) { 5780Sstevel@tonic-gate crfree(dbp->db_credp); 5790Sstevel@tonic-gate dbp->db_credp = NULL; 5800Sstevel@tonic-gate } 5810Sstevel@tonic-gate dbp->db_cpid = -1; 5820Sstevel@tonic-gate 5830Sstevel@tonic-gate /* Reset the struioflag and the checksum flag fields */ 5840Sstevel@tonic-gate dbp->db_struioflag = 0; 5850Sstevel@tonic-gate dbp->db_struioun.cksum.flags = 0; 5860Sstevel@tonic-gate 5876707Sbrutus /* and the COOKED and/or UIOA flag(s) */ 5886707Sbrutus dbp->db_flags &= ~(DBLK_COOKED | DBLK_UIOA); 589898Skais 5900Sstevel@tonic-gate kmem_cache_free(dbp->db_cache, dbp); 5910Sstevel@tonic-gate } 5920Sstevel@tonic-gate 5930Sstevel@tonic-gate static void 5940Sstevel@tonic-gate dblk_decref(mblk_t *mp, dblk_t *dbp) 5950Sstevel@tonic-gate { 5960Sstevel@tonic-gate if (dbp->db_ref != 1) { 5970Sstevel@tonic-gate uint32_t rtfu = atomic_add_32_nv(&DBLK_RTFU_WORD(dbp), 5980Sstevel@tonic-gate -(1 << DBLK_RTFU_SHIFT(db_ref))); 5990Sstevel@tonic-gate /* 6000Sstevel@tonic-gate * atomic_add_32_nv() just decremented db_ref, so we no longer 6010Sstevel@tonic-gate * have a reference to the dblk, which means another thread 6020Sstevel@tonic-gate * could free it. Therefore we cannot examine the dblk to 6030Sstevel@tonic-gate * determine whether ours was the last reference. Instead, 6040Sstevel@tonic-gate * we extract the new and minimum reference counts from rtfu. 6050Sstevel@tonic-gate * Note that all we're really saying is "if (ref != refmin)". 6060Sstevel@tonic-gate */ 6070Sstevel@tonic-gate if (((rtfu >> DBLK_RTFU_SHIFT(db_ref)) & DBLK_REFMAX) != 6080Sstevel@tonic-gate ((rtfu >> DBLK_RTFU_SHIFT(db_flags)) & DBLK_REFMIN)) { 6090Sstevel@tonic-gate kmem_cache_free(mblk_cache, mp); 6100Sstevel@tonic-gate return; 6110Sstevel@tonic-gate } 6120Sstevel@tonic-gate } 6130Sstevel@tonic-gate dbp->db_mblk = mp; 6140Sstevel@tonic-gate dbp->db_free = dbp->db_lastfree; 6150Sstevel@tonic-gate dbp->db_lastfree(mp, dbp); 6160Sstevel@tonic-gate } 6170Sstevel@tonic-gate 6180Sstevel@tonic-gate mblk_t * 6190Sstevel@tonic-gate dupb(mblk_t *mp) 6200Sstevel@tonic-gate { 6210Sstevel@tonic-gate dblk_t *dbp = mp->b_datap; 6220Sstevel@tonic-gate mblk_t *new_mp; 6230Sstevel@tonic-gate uint32_t oldrtfu, newrtfu; 6240Sstevel@tonic-gate 6250Sstevel@tonic-gate if ((new_mp = kmem_cache_alloc(mblk_cache, KM_NOSLEEP)) == NULL) 6260Sstevel@tonic-gate goto out; 6270Sstevel@tonic-gate 6280Sstevel@tonic-gate new_mp->b_next = new_mp->b_prev = new_mp->b_cont = NULL; 6290Sstevel@tonic-gate new_mp->b_rptr = mp->b_rptr; 6300Sstevel@tonic-gate new_mp->b_wptr = mp->b_wptr; 6310Sstevel@tonic-gate new_mp->b_datap = dbp; 6320Sstevel@tonic-gate new_mp->b_queue = NULL; 6330Sstevel@tonic-gate MBLK_BAND_FLAG_WORD(new_mp) = MBLK_BAND_FLAG_WORD(mp); 6340Sstevel@tonic-gate 6350Sstevel@tonic-gate STR_FTEVENT_MBLK(mp, caller(), FTEV_DUPB, dbp->db_ref); 6360Sstevel@tonic-gate 6373163Sgeorges dbp->db_free = dblk_decref; 6380Sstevel@tonic-gate do { 6390Sstevel@tonic-gate ASSERT(dbp->db_ref > 0); 6400Sstevel@tonic-gate oldrtfu = DBLK_RTFU_WORD(dbp); 6410Sstevel@tonic-gate newrtfu = oldrtfu + (1 << DBLK_RTFU_SHIFT(db_ref)); 6420Sstevel@tonic-gate /* 6430Sstevel@tonic-gate * If db_ref is maxed out we can't dup this message anymore. 6440Sstevel@tonic-gate */ 6450Sstevel@tonic-gate if ((oldrtfu & DBLK_RTFU_REF_MASK) == DBLK_RTFU_REF_MASK) { 6460Sstevel@tonic-gate kmem_cache_free(mblk_cache, new_mp); 6470Sstevel@tonic-gate new_mp = NULL; 6480Sstevel@tonic-gate goto out; 6490Sstevel@tonic-gate } 6500Sstevel@tonic-gate } while (cas32(&DBLK_RTFU_WORD(dbp), oldrtfu, newrtfu) != oldrtfu); 6510Sstevel@tonic-gate 6520Sstevel@tonic-gate out: 6530Sstevel@tonic-gate FTRACE_1("dupb(): new_mp=0x%lx", (uintptr_t)new_mp); 6540Sstevel@tonic-gate return (new_mp); 6550Sstevel@tonic-gate } 6560Sstevel@tonic-gate 6570Sstevel@tonic-gate static void 6580Sstevel@tonic-gate dblk_lastfree_desb(mblk_t *mp, dblk_t *dbp) 6590Sstevel@tonic-gate { 6600Sstevel@tonic-gate frtn_t *frp = dbp->db_frtnp; 6610Sstevel@tonic-gate 6620Sstevel@tonic-gate ASSERT(dbp->db_mblk == mp); 6630Sstevel@tonic-gate frp->free_func(frp->free_arg); 6640Sstevel@tonic-gate if (dbp->db_fthdr != NULL) 6650Sstevel@tonic-gate str_ftfree(dbp); 6660Sstevel@tonic-gate 6670Sstevel@tonic-gate /* set credp and projid to be 'unspecified' before returning to cache */ 6680Sstevel@tonic-gate if (dbp->db_credp != NULL) { 6690Sstevel@tonic-gate crfree(dbp->db_credp); 6700Sstevel@tonic-gate dbp->db_credp = NULL; 6710Sstevel@tonic-gate } 6720Sstevel@tonic-gate dbp->db_cpid = -1; 6730Sstevel@tonic-gate dbp->db_struioflag = 0; 6740Sstevel@tonic-gate dbp->db_struioun.cksum.flags = 0; 6750Sstevel@tonic-gate 6760Sstevel@tonic-gate kmem_cache_free(dbp->db_cache, dbp); 6770Sstevel@tonic-gate } 6780Sstevel@tonic-gate 6790Sstevel@tonic-gate /*ARGSUSED*/ 6800Sstevel@tonic-gate static void 6810Sstevel@tonic-gate frnop_func(void *arg) 6820Sstevel@tonic-gate { 6830Sstevel@tonic-gate } 6840Sstevel@tonic-gate 6850Sstevel@tonic-gate /* 6860Sstevel@tonic-gate * Generic esballoc used to implement the four flavors: [d]esballoc[a]. 6870Sstevel@tonic-gate */ 6880Sstevel@tonic-gate static mblk_t * 6890Sstevel@tonic-gate gesballoc(unsigned char *base, size_t size, uint32_t db_rtfu, frtn_t *frp, 6900Sstevel@tonic-gate void (*lastfree)(mblk_t *, dblk_t *), int kmflags) 6910Sstevel@tonic-gate { 6920Sstevel@tonic-gate dblk_t *dbp; 6930Sstevel@tonic-gate mblk_t *mp; 6940Sstevel@tonic-gate 6950Sstevel@tonic-gate ASSERT(base != NULL && frp != NULL); 6960Sstevel@tonic-gate 6970Sstevel@tonic-gate if ((dbp = kmem_cache_alloc(dblk_esb_cache, kmflags)) == NULL) { 6980Sstevel@tonic-gate mp = NULL; 6990Sstevel@tonic-gate goto out; 7000Sstevel@tonic-gate } 7010Sstevel@tonic-gate 7020Sstevel@tonic-gate mp = dbp->db_mblk; 7030Sstevel@tonic-gate dbp->db_base = base; 7040Sstevel@tonic-gate dbp->db_lim = base + size; 7050Sstevel@tonic-gate dbp->db_free = dbp->db_lastfree = lastfree; 7060Sstevel@tonic-gate dbp->db_frtnp = frp; 7070Sstevel@tonic-gate DBLK_RTFU_WORD(dbp) = db_rtfu; 7080Sstevel@tonic-gate mp->b_next = mp->b_prev = mp->b_cont = NULL; 7090Sstevel@tonic-gate mp->b_rptr = mp->b_wptr = base; 7100Sstevel@tonic-gate mp->b_queue = NULL; 7110Sstevel@tonic-gate MBLK_BAND_FLAG_WORD(mp) = 0; 7120Sstevel@tonic-gate 7130Sstevel@tonic-gate out: 7140Sstevel@tonic-gate FTRACE_1("gesballoc(): mp=0x%lx", (uintptr_t)mp); 7150Sstevel@tonic-gate return (mp); 7160Sstevel@tonic-gate } 7170Sstevel@tonic-gate 7180Sstevel@tonic-gate /*ARGSUSED*/ 7190Sstevel@tonic-gate mblk_t * 7200Sstevel@tonic-gate esballoc(unsigned char *base, size_t size, uint_t pri, frtn_t *frp) 7210Sstevel@tonic-gate { 7220Sstevel@tonic-gate mblk_t *mp; 7230Sstevel@tonic-gate 7240Sstevel@tonic-gate /* 7250Sstevel@tonic-gate * Note that this is structured to allow the common case (i.e. 7260Sstevel@tonic-gate * STREAMS flowtracing disabled) to call gesballoc() with tail 7270Sstevel@tonic-gate * call optimization. 7280Sstevel@tonic-gate */ 7290Sstevel@tonic-gate if (!str_ftnever) { 7300Sstevel@tonic-gate mp = gesballoc(base, size, DBLK_RTFU(1, M_DATA, 0, 0), 7310Sstevel@tonic-gate frp, freebs_enqueue, KM_NOSLEEP); 7320Sstevel@tonic-gate 7330Sstevel@tonic-gate if (mp != NULL) 7340Sstevel@tonic-gate STR_FTALLOC(&DB_FTHDR(mp), FTEV_ESBALLOC, size); 7350Sstevel@tonic-gate return (mp); 7360Sstevel@tonic-gate } 7370Sstevel@tonic-gate 7380Sstevel@tonic-gate return (gesballoc(base, size, DBLK_RTFU(1, M_DATA, 0, 0), 7390Sstevel@tonic-gate frp, freebs_enqueue, KM_NOSLEEP)); 7400Sstevel@tonic-gate } 7410Sstevel@tonic-gate 7420Sstevel@tonic-gate /* 7430Sstevel@tonic-gate * Same as esballoc() but sleeps waiting for memory. 7440Sstevel@tonic-gate */ 7450Sstevel@tonic-gate /*ARGSUSED*/ 7460Sstevel@tonic-gate mblk_t * 7470Sstevel@tonic-gate esballoc_wait(unsigned char *base, size_t size, uint_t pri, frtn_t *frp) 7480Sstevel@tonic-gate { 7490Sstevel@tonic-gate mblk_t *mp; 7500Sstevel@tonic-gate 7510Sstevel@tonic-gate /* 7520Sstevel@tonic-gate * Note that this is structured to allow the common case (i.e. 7530Sstevel@tonic-gate * STREAMS flowtracing disabled) to call gesballoc() with tail 7540Sstevel@tonic-gate * call optimization. 7550Sstevel@tonic-gate */ 7560Sstevel@tonic-gate if (!str_ftnever) { 7570Sstevel@tonic-gate mp = gesballoc(base, size, DBLK_RTFU(1, M_DATA, 0, 0), 7580Sstevel@tonic-gate frp, freebs_enqueue, KM_SLEEP); 7590Sstevel@tonic-gate 7600Sstevel@tonic-gate STR_FTALLOC(&DB_FTHDR(mp), FTEV_ESBALLOC, size); 7610Sstevel@tonic-gate return (mp); 7620Sstevel@tonic-gate } 7630Sstevel@tonic-gate 7640Sstevel@tonic-gate return (gesballoc(base, size, DBLK_RTFU(1, M_DATA, 0, 0), 7650Sstevel@tonic-gate frp, freebs_enqueue, KM_SLEEP)); 7660Sstevel@tonic-gate } 7670Sstevel@tonic-gate 7680Sstevel@tonic-gate /*ARGSUSED*/ 7690Sstevel@tonic-gate mblk_t * 7700Sstevel@tonic-gate desballoc(unsigned char *base, size_t size, uint_t pri, frtn_t *frp) 7710Sstevel@tonic-gate { 7720Sstevel@tonic-gate mblk_t *mp; 7730Sstevel@tonic-gate 7740Sstevel@tonic-gate /* 7750Sstevel@tonic-gate * Note that this is structured to allow the common case (i.e. 7760Sstevel@tonic-gate * STREAMS flowtracing disabled) to call gesballoc() with tail 7770Sstevel@tonic-gate * call optimization. 7780Sstevel@tonic-gate */ 7790Sstevel@tonic-gate if (!str_ftnever) { 7800Sstevel@tonic-gate mp = gesballoc(base, size, DBLK_RTFU(1, M_DATA, 0, 0), 7816707Sbrutus frp, dblk_lastfree_desb, KM_NOSLEEP); 7820Sstevel@tonic-gate 7830Sstevel@tonic-gate if (mp != NULL) 7840Sstevel@tonic-gate STR_FTALLOC(&DB_FTHDR(mp), FTEV_DESBALLOC, size); 7850Sstevel@tonic-gate return (mp); 7860Sstevel@tonic-gate } 7870Sstevel@tonic-gate 7880Sstevel@tonic-gate return (gesballoc(base, size, DBLK_RTFU(1, M_DATA, 0, 0), 7890Sstevel@tonic-gate frp, dblk_lastfree_desb, KM_NOSLEEP)); 7900Sstevel@tonic-gate } 7910Sstevel@tonic-gate 7920Sstevel@tonic-gate /*ARGSUSED*/ 7930Sstevel@tonic-gate mblk_t * 7940Sstevel@tonic-gate esballoca(unsigned char *base, size_t size, uint_t pri, frtn_t *frp) 7950Sstevel@tonic-gate { 7960Sstevel@tonic-gate mblk_t *mp; 7970Sstevel@tonic-gate 7980Sstevel@tonic-gate /* 7990Sstevel@tonic-gate * Note that this is structured to allow the common case (i.e. 8000Sstevel@tonic-gate * STREAMS flowtracing disabled) to call gesballoc() with tail 8010Sstevel@tonic-gate * call optimization. 8020Sstevel@tonic-gate */ 8030Sstevel@tonic-gate if (!str_ftnever) { 8040Sstevel@tonic-gate mp = gesballoc(base, size, DBLK_RTFU(2, M_DATA, 0, 0), 8050Sstevel@tonic-gate frp, freebs_enqueue, KM_NOSLEEP); 8060Sstevel@tonic-gate 8070Sstevel@tonic-gate if (mp != NULL) 8080Sstevel@tonic-gate STR_FTALLOC(&DB_FTHDR(mp), FTEV_ESBALLOCA, size); 8090Sstevel@tonic-gate return (mp); 8100Sstevel@tonic-gate } 8110Sstevel@tonic-gate 8120Sstevel@tonic-gate return (gesballoc(base, size, DBLK_RTFU(2, M_DATA, 0, 0), 8130Sstevel@tonic-gate frp, freebs_enqueue, KM_NOSLEEP)); 8140Sstevel@tonic-gate } 8150Sstevel@tonic-gate 8160Sstevel@tonic-gate /*ARGSUSED*/ 8170Sstevel@tonic-gate mblk_t * 8180Sstevel@tonic-gate desballoca(unsigned char *base, size_t size, uint_t pri, frtn_t *frp) 8190Sstevel@tonic-gate { 8200Sstevel@tonic-gate mblk_t *mp; 8210Sstevel@tonic-gate 8220Sstevel@tonic-gate /* 8230Sstevel@tonic-gate * Note that this is structured to allow the common case (i.e. 8240Sstevel@tonic-gate * STREAMS flowtracing disabled) to call gesballoc() with tail 8250Sstevel@tonic-gate * call optimization. 8260Sstevel@tonic-gate */ 8270Sstevel@tonic-gate if (!str_ftnever) { 8280Sstevel@tonic-gate mp = gesballoc(base, size, DBLK_RTFU(2, M_DATA, 0, 0), 8290Sstevel@tonic-gate frp, dblk_lastfree_desb, KM_NOSLEEP); 8300Sstevel@tonic-gate 8310Sstevel@tonic-gate if (mp != NULL) 8320Sstevel@tonic-gate STR_FTALLOC(&DB_FTHDR(mp), FTEV_DESBALLOCA, size); 8330Sstevel@tonic-gate return (mp); 8340Sstevel@tonic-gate } 8350Sstevel@tonic-gate 8360Sstevel@tonic-gate return (gesballoc(base, size, DBLK_RTFU(2, M_DATA, 0, 0), 8370Sstevel@tonic-gate frp, dblk_lastfree_desb, KM_NOSLEEP)); 8380Sstevel@tonic-gate } 8390Sstevel@tonic-gate 8400Sstevel@tonic-gate static void 8410Sstevel@tonic-gate bcache_dblk_lastfree(mblk_t *mp, dblk_t *dbp) 8420Sstevel@tonic-gate { 8430Sstevel@tonic-gate bcache_t *bcp = dbp->db_cache; 8440Sstevel@tonic-gate 8450Sstevel@tonic-gate ASSERT(dbp->db_mblk == mp); 8460Sstevel@tonic-gate if (dbp->db_fthdr != NULL) 8470Sstevel@tonic-gate str_ftfree(dbp); 8480Sstevel@tonic-gate 8490Sstevel@tonic-gate /* set credp and projid to be 'unspecified' before returning to cache */ 8500Sstevel@tonic-gate if (dbp->db_credp != NULL) { 8510Sstevel@tonic-gate crfree(dbp->db_credp); 8520Sstevel@tonic-gate dbp->db_credp = NULL; 8530Sstevel@tonic-gate } 8540Sstevel@tonic-gate dbp->db_cpid = -1; 8550Sstevel@tonic-gate dbp->db_struioflag = 0; 8560Sstevel@tonic-gate dbp->db_struioun.cksum.flags = 0; 8570Sstevel@tonic-gate 8580Sstevel@tonic-gate mutex_enter(&bcp->mutex); 8590Sstevel@tonic-gate kmem_cache_free(bcp->dblk_cache, dbp); 8600Sstevel@tonic-gate bcp->alloc--; 8610Sstevel@tonic-gate 8620Sstevel@tonic-gate if (bcp->alloc == 0 && bcp->destroy != 0) { 8630Sstevel@tonic-gate kmem_cache_destroy(bcp->dblk_cache); 8640Sstevel@tonic-gate kmem_cache_destroy(bcp->buffer_cache); 8650Sstevel@tonic-gate mutex_exit(&bcp->mutex); 8660Sstevel@tonic-gate mutex_destroy(&bcp->mutex); 8670Sstevel@tonic-gate kmem_free(bcp, sizeof (bcache_t)); 8680Sstevel@tonic-gate } else { 8690Sstevel@tonic-gate mutex_exit(&bcp->mutex); 8700Sstevel@tonic-gate } 8710Sstevel@tonic-gate } 8720Sstevel@tonic-gate 8730Sstevel@tonic-gate bcache_t * 8740Sstevel@tonic-gate bcache_create(char *name, size_t size, uint_t align) 8750Sstevel@tonic-gate { 8760Sstevel@tonic-gate bcache_t *bcp; 8770Sstevel@tonic-gate char buffer[255]; 8780Sstevel@tonic-gate 8790Sstevel@tonic-gate ASSERT((align & (align - 1)) == 0); 8800Sstevel@tonic-gate 881*8752SPeter.Memishian@Sun.COM if ((bcp = kmem_alloc(sizeof (bcache_t), KM_NOSLEEP)) == NULL) 8820Sstevel@tonic-gate return (NULL); 8830Sstevel@tonic-gate 8840Sstevel@tonic-gate bcp->size = size; 8850Sstevel@tonic-gate bcp->align = align; 8860Sstevel@tonic-gate bcp->alloc = 0; 8870Sstevel@tonic-gate bcp->destroy = 0; 8880Sstevel@tonic-gate 8890Sstevel@tonic-gate mutex_init(&bcp->mutex, NULL, MUTEX_DRIVER, NULL); 8900Sstevel@tonic-gate 8910Sstevel@tonic-gate (void) sprintf(buffer, "%s_buffer_cache", name); 8920Sstevel@tonic-gate bcp->buffer_cache = kmem_cache_create(buffer, size, align, NULL, NULL, 8930Sstevel@tonic-gate NULL, NULL, NULL, 0); 8940Sstevel@tonic-gate (void) sprintf(buffer, "%s_dblk_cache", name); 8950Sstevel@tonic-gate bcp->dblk_cache = kmem_cache_create(buffer, sizeof (dblk_t), 8960Sstevel@tonic-gate DBLK_CACHE_ALIGN, bcache_dblk_constructor, bcache_dblk_destructor, 8976707Sbrutus NULL, (void *)bcp, NULL, 0); 8980Sstevel@tonic-gate 8990Sstevel@tonic-gate return (bcp); 9000Sstevel@tonic-gate } 9010Sstevel@tonic-gate 9020Sstevel@tonic-gate void 9030Sstevel@tonic-gate bcache_destroy(bcache_t *bcp) 9040Sstevel@tonic-gate { 9050Sstevel@tonic-gate ASSERT(bcp != NULL); 9060Sstevel@tonic-gate 9070Sstevel@tonic-gate mutex_enter(&bcp->mutex); 9080Sstevel@tonic-gate if (bcp->alloc == 0) { 9090Sstevel@tonic-gate kmem_cache_destroy(bcp->dblk_cache); 9100Sstevel@tonic-gate kmem_cache_destroy(bcp->buffer_cache); 9110Sstevel@tonic-gate mutex_exit(&bcp->mutex); 9120Sstevel@tonic-gate mutex_destroy(&bcp->mutex); 9130Sstevel@tonic-gate kmem_free(bcp, sizeof (bcache_t)); 9140Sstevel@tonic-gate } else { 9150Sstevel@tonic-gate bcp->destroy++; 9160Sstevel@tonic-gate mutex_exit(&bcp->mutex); 9170Sstevel@tonic-gate } 9180Sstevel@tonic-gate } 9190Sstevel@tonic-gate 9200Sstevel@tonic-gate /*ARGSUSED*/ 9210Sstevel@tonic-gate mblk_t * 9220Sstevel@tonic-gate bcache_allocb(bcache_t *bcp, uint_t pri) 9230Sstevel@tonic-gate { 9240Sstevel@tonic-gate dblk_t *dbp; 9250Sstevel@tonic-gate mblk_t *mp = NULL; 9260Sstevel@tonic-gate 9270Sstevel@tonic-gate ASSERT(bcp != NULL); 9280Sstevel@tonic-gate 9290Sstevel@tonic-gate mutex_enter(&bcp->mutex); 9300Sstevel@tonic-gate if (bcp->destroy != 0) { 9310Sstevel@tonic-gate mutex_exit(&bcp->mutex); 9320Sstevel@tonic-gate goto out; 9330Sstevel@tonic-gate } 9340Sstevel@tonic-gate 9350Sstevel@tonic-gate if ((dbp = kmem_cache_alloc(bcp->dblk_cache, KM_NOSLEEP)) == NULL) { 9360Sstevel@tonic-gate mutex_exit(&bcp->mutex); 9370Sstevel@tonic-gate goto out; 9380Sstevel@tonic-gate } 9390Sstevel@tonic-gate bcp->alloc++; 9400Sstevel@tonic-gate mutex_exit(&bcp->mutex); 9410Sstevel@tonic-gate 9420Sstevel@tonic-gate ASSERT(((uintptr_t)(dbp->db_base) & (bcp->align - 1)) == 0); 9430Sstevel@tonic-gate 9440Sstevel@tonic-gate mp = dbp->db_mblk; 9450Sstevel@tonic-gate DBLK_RTFU_WORD(dbp) = DBLK_RTFU(1, M_DATA, 0, 0); 9460Sstevel@tonic-gate mp->b_next = mp->b_prev = mp->b_cont = NULL; 9470Sstevel@tonic-gate mp->b_rptr = mp->b_wptr = dbp->db_base; 9480Sstevel@tonic-gate mp->b_queue = NULL; 9490Sstevel@tonic-gate MBLK_BAND_FLAG_WORD(mp) = 0; 9500Sstevel@tonic-gate STR_FTALLOC(&dbp->db_fthdr, FTEV_BCALLOCB, bcp->size); 9510Sstevel@tonic-gate out: 9520Sstevel@tonic-gate FTRACE_1("bcache_allocb(): mp=0x%p", (uintptr_t)mp); 9530Sstevel@tonic-gate 9540Sstevel@tonic-gate return (mp); 9550Sstevel@tonic-gate } 9560Sstevel@tonic-gate 9570Sstevel@tonic-gate static void 9580Sstevel@tonic-gate dblk_lastfree_oversize(mblk_t *mp, dblk_t *dbp) 9590Sstevel@tonic-gate { 9600Sstevel@tonic-gate ASSERT(dbp->db_mblk == mp); 9610Sstevel@tonic-gate if (dbp->db_fthdr != NULL) 9620Sstevel@tonic-gate str_ftfree(dbp); 9630Sstevel@tonic-gate 9640Sstevel@tonic-gate /* set credp and projid to be 'unspecified' before returning to cache */ 9650Sstevel@tonic-gate if (dbp->db_credp != NULL) { 9660Sstevel@tonic-gate crfree(dbp->db_credp); 9670Sstevel@tonic-gate dbp->db_credp = NULL; 9680Sstevel@tonic-gate } 9690Sstevel@tonic-gate dbp->db_cpid = -1; 9700Sstevel@tonic-gate dbp->db_struioflag = 0; 9710Sstevel@tonic-gate dbp->db_struioun.cksum.flags = 0; 9720Sstevel@tonic-gate 9730Sstevel@tonic-gate kmem_free(dbp->db_base, dbp->db_lim - dbp->db_base); 9740Sstevel@tonic-gate kmem_cache_free(dbp->db_cache, dbp); 9750Sstevel@tonic-gate } 9760Sstevel@tonic-gate 9770Sstevel@tonic-gate static mblk_t * 9780Sstevel@tonic-gate allocb_oversize(size_t size, int kmflags) 9790Sstevel@tonic-gate { 9800Sstevel@tonic-gate mblk_t *mp; 9810Sstevel@tonic-gate void *buf; 9820Sstevel@tonic-gate 9830Sstevel@tonic-gate size = P2ROUNDUP(size, DBLK_CACHE_ALIGN); 9840Sstevel@tonic-gate if ((buf = kmem_alloc(size, kmflags)) == NULL) 9850Sstevel@tonic-gate return (NULL); 9860Sstevel@tonic-gate if ((mp = gesballoc(buf, size, DBLK_RTFU(1, M_DATA, 0, 0), 9870Sstevel@tonic-gate &frnop, dblk_lastfree_oversize, kmflags)) == NULL) 9880Sstevel@tonic-gate kmem_free(buf, size); 9890Sstevel@tonic-gate 9900Sstevel@tonic-gate if (mp != NULL) 9910Sstevel@tonic-gate STR_FTALLOC(&DB_FTHDR(mp), FTEV_ALLOCBIG, size); 9920Sstevel@tonic-gate 9930Sstevel@tonic-gate return (mp); 9940Sstevel@tonic-gate } 9950Sstevel@tonic-gate 9960Sstevel@tonic-gate mblk_t * 9970Sstevel@tonic-gate allocb_tryhard(size_t target_size) 9980Sstevel@tonic-gate { 9990Sstevel@tonic-gate size_t size; 10000Sstevel@tonic-gate mblk_t *bp; 10010Sstevel@tonic-gate 10020Sstevel@tonic-gate for (size = target_size; size < target_size + 512; 10030Sstevel@tonic-gate size += DBLK_CACHE_ALIGN) 10040Sstevel@tonic-gate if ((bp = allocb(size, BPRI_HI)) != NULL) 10050Sstevel@tonic-gate return (bp); 10060Sstevel@tonic-gate allocb_tryhard_fails++; 10070Sstevel@tonic-gate return (NULL); 10080Sstevel@tonic-gate } 10090Sstevel@tonic-gate 10100Sstevel@tonic-gate /* 10110Sstevel@tonic-gate * This routine is consolidation private for STREAMS internal use 10120Sstevel@tonic-gate * This routine may only be called from sync routines (i.e., not 10130Sstevel@tonic-gate * from put or service procedures). It is located here (rather 10140Sstevel@tonic-gate * than strsubr.c) so that we don't have to expose all of the 10150Sstevel@tonic-gate * allocb() implementation details in header files. 10160Sstevel@tonic-gate */ 10170Sstevel@tonic-gate mblk_t * 10180Sstevel@tonic-gate allocb_wait(size_t size, uint_t pri, uint_t flags, int *error) 10190Sstevel@tonic-gate { 10200Sstevel@tonic-gate dblk_t *dbp; 10210Sstevel@tonic-gate mblk_t *mp; 10220Sstevel@tonic-gate size_t index; 10230Sstevel@tonic-gate 10240Sstevel@tonic-gate index = (size -1) >> DBLK_SIZE_SHIFT; 10250Sstevel@tonic-gate 10260Sstevel@tonic-gate if (flags & STR_NOSIG) { 10270Sstevel@tonic-gate if (index >= (DBLK_MAX_CACHE >> DBLK_SIZE_SHIFT)) { 10280Sstevel@tonic-gate if (size != 0) { 10290Sstevel@tonic-gate mp = allocb_oversize(size, KM_SLEEP); 10300Sstevel@tonic-gate FTRACE_1("allocb_wait (NOSIG): mp=0x%lx", 10310Sstevel@tonic-gate (uintptr_t)mp); 10320Sstevel@tonic-gate return (mp); 10330Sstevel@tonic-gate } 10340Sstevel@tonic-gate index = 0; 10350Sstevel@tonic-gate } 10360Sstevel@tonic-gate 10370Sstevel@tonic-gate dbp = kmem_cache_alloc(dblk_cache[index], KM_SLEEP); 10380Sstevel@tonic-gate mp = dbp->db_mblk; 10390Sstevel@tonic-gate DBLK_RTFU_WORD(dbp) = DBLK_RTFU(1, M_DATA, 0, 0); 10400Sstevel@tonic-gate mp->b_next = mp->b_prev = mp->b_cont = NULL; 10410Sstevel@tonic-gate mp->b_rptr = mp->b_wptr = dbp->db_base; 10420Sstevel@tonic-gate mp->b_queue = NULL; 10430Sstevel@tonic-gate MBLK_BAND_FLAG_WORD(mp) = 0; 10440Sstevel@tonic-gate STR_FTALLOC(&DB_FTHDR(mp), FTEV_ALLOCBW, size); 10450Sstevel@tonic-gate 10460Sstevel@tonic-gate FTRACE_1("allocb_wait (NOSIG): mp=0x%lx", (uintptr_t)mp); 10470Sstevel@tonic-gate 10480Sstevel@tonic-gate } else { 10490Sstevel@tonic-gate while ((mp = allocb(size, pri)) == NULL) { 10500Sstevel@tonic-gate if ((*error = strwaitbuf(size, BPRI_HI)) != 0) 10510Sstevel@tonic-gate return (NULL); 10520Sstevel@tonic-gate } 10530Sstevel@tonic-gate } 10540Sstevel@tonic-gate 10550Sstevel@tonic-gate return (mp); 10560Sstevel@tonic-gate } 10570Sstevel@tonic-gate 10580Sstevel@tonic-gate /* 10590Sstevel@tonic-gate * Call function 'func' with 'arg' when a class zero block can 10600Sstevel@tonic-gate * be allocated with priority 'pri'. 10610Sstevel@tonic-gate */ 10620Sstevel@tonic-gate bufcall_id_t 10630Sstevel@tonic-gate esbbcall(uint_t pri, void (*func)(void *), void *arg) 10640Sstevel@tonic-gate { 10650Sstevel@tonic-gate return (bufcall(1, pri, func, arg)); 10660Sstevel@tonic-gate } 10670Sstevel@tonic-gate 10680Sstevel@tonic-gate /* 10690Sstevel@tonic-gate * Allocates an iocblk (M_IOCTL) block. Properly sets the credentials 10700Sstevel@tonic-gate * ioc_id, rval and error of the struct ioctl to set up an ioctl call. 10710Sstevel@tonic-gate * This provides consistency for all internal allocators of ioctl. 10720Sstevel@tonic-gate */ 10730Sstevel@tonic-gate mblk_t * 10740Sstevel@tonic-gate mkiocb(uint_t cmd) 10750Sstevel@tonic-gate { 10760Sstevel@tonic-gate struct iocblk *ioc; 10770Sstevel@tonic-gate mblk_t *mp; 10780Sstevel@tonic-gate 10790Sstevel@tonic-gate /* 10800Sstevel@tonic-gate * Allocate enough space for any of the ioctl related messages. 10810Sstevel@tonic-gate */ 10820Sstevel@tonic-gate if ((mp = allocb(sizeof (union ioctypes), BPRI_MED)) == NULL) 10830Sstevel@tonic-gate return (NULL); 10840Sstevel@tonic-gate 10850Sstevel@tonic-gate bzero(mp->b_rptr, sizeof (union ioctypes)); 10860Sstevel@tonic-gate 10870Sstevel@tonic-gate /* 10880Sstevel@tonic-gate * Set the mblk_t information and ptrs correctly. 10890Sstevel@tonic-gate */ 10900Sstevel@tonic-gate mp->b_wptr += sizeof (struct iocblk); 10910Sstevel@tonic-gate mp->b_datap->db_type = M_IOCTL; 10920Sstevel@tonic-gate 10930Sstevel@tonic-gate /* 10940Sstevel@tonic-gate * Fill in the fields. 10950Sstevel@tonic-gate */ 10960Sstevel@tonic-gate ioc = (struct iocblk *)mp->b_rptr; 10970Sstevel@tonic-gate ioc->ioc_cmd = cmd; 10980Sstevel@tonic-gate ioc->ioc_cr = kcred; 10990Sstevel@tonic-gate ioc->ioc_id = getiocseqno(); 11000Sstevel@tonic-gate ioc->ioc_flag = IOC_NATIVE; 11010Sstevel@tonic-gate return (mp); 11020Sstevel@tonic-gate } 11030Sstevel@tonic-gate 11040Sstevel@tonic-gate /* 11050Sstevel@tonic-gate * test if block of given size can be allocated with a request of 11060Sstevel@tonic-gate * the given priority. 11070Sstevel@tonic-gate * 'pri' is no longer used, but is retained for compatibility. 11080Sstevel@tonic-gate */ 11090Sstevel@tonic-gate /* ARGSUSED */ 11100Sstevel@tonic-gate int 11110Sstevel@tonic-gate testb(size_t size, uint_t pri) 11120Sstevel@tonic-gate { 11130Sstevel@tonic-gate return ((size + sizeof (dblk_t)) <= kmem_avail()); 11140Sstevel@tonic-gate } 11150Sstevel@tonic-gate 11160Sstevel@tonic-gate /* 11170Sstevel@tonic-gate * Call function 'func' with argument 'arg' when there is a reasonably 11180Sstevel@tonic-gate * good chance that a block of size 'size' can be allocated. 11190Sstevel@tonic-gate * 'pri' is no longer used, but is retained for compatibility. 11200Sstevel@tonic-gate */ 11210Sstevel@tonic-gate /* ARGSUSED */ 11220Sstevel@tonic-gate bufcall_id_t 11230Sstevel@tonic-gate bufcall(size_t size, uint_t pri, void (*func)(void *), void *arg) 11240Sstevel@tonic-gate { 11250Sstevel@tonic-gate static long bid = 1; /* always odd to save checking for zero */ 11260Sstevel@tonic-gate bufcall_id_t bc_id; 11270Sstevel@tonic-gate struct strbufcall *bcp; 11280Sstevel@tonic-gate 11290Sstevel@tonic-gate if ((bcp = kmem_alloc(sizeof (strbufcall_t), KM_NOSLEEP)) == NULL) 11300Sstevel@tonic-gate return (0); 11310Sstevel@tonic-gate 11320Sstevel@tonic-gate bcp->bc_func = func; 11330Sstevel@tonic-gate bcp->bc_arg = arg; 11340Sstevel@tonic-gate bcp->bc_size = size; 11350Sstevel@tonic-gate bcp->bc_next = NULL; 11360Sstevel@tonic-gate bcp->bc_executor = NULL; 11370Sstevel@tonic-gate 11380Sstevel@tonic-gate mutex_enter(&strbcall_lock); 11390Sstevel@tonic-gate /* 11400Sstevel@tonic-gate * After bcp is linked into strbcalls and strbcall_lock is dropped there 11410Sstevel@tonic-gate * should be no references to bcp since it may be freed by 11420Sstevel@tonic-gate * runbufcalls(). Since bcp_id field is returned, we save its value in 11430Sstevel@tonic-gate * the local var. 11440Sstevel@tonic-gate */ 11450Sstevel@tonic-gate bc_id = bcp->bc_id = (bufcall_id_t)(bid += 2); /* keep it odd */ 11460Sstevel@tonic-gate 11470Sstevel@tonic-gate /* 11480Sstevel@tonic-gate * add newly allocated stream event to existing 11490Sstevel@tonic-gate * linked list of events. 11500Sstevel@tonic-gate */ 11510Sstevel@tonic-gate if (strbcalls.bc_head == NULL) { 11520Sstevel@tonic-gate strbcalls.bc_head = strbcalls.bc_tail = bcp; 11530Sstevel@tonic-gate } else { 11540Sstevel@tonic-gate strbcalls.bc_tail->bc_next = bcp; 11550Sstevel@tonic-gate strbcalls.bc_tail = bcp; 11560Sstevel@tonic-gate } 11570Sstevel@tonic-gate 11580Sstevel@tonic-gate cv_signal(&strbcall_cv); 11590Sstevel@tonic-gate mutex_exit(&strbcall_lock); 11600Sstevel@tonic-gate return (bc_id); 11610Sstevel@tonic-gate } 11620Sstevel@tonic-gate 11630Sstevel@tonic-gate /* 11640Sstevel@tonic-gate * Cancel a bufcall request. 11650Sstevel@tonic-gate */ 11660Sstevel@tonic-gate void 11670Sstevel@tonic-gate unbufcall(bufcall_id_t id) 11680Sstevel@tonic-gate { 11690Sstevel@tonic-gate strbufcall_t *bcp, *pbcp; 11700Sstevel@tonic-gate 11710Sstevel@tonic-gate mutex_enter(&strbcall_lock); 11720Sstevel@tonic-gate again: 11730Sstevel@tonic-gate pbcp = NULL; 11740Sstevel@tonic-gate for (bcp = strbcalls.bc_head; bcp; bcp = bcp->bc_next) { 11750Sstevel@tonic-gate if (id == bcp->bc_id) 11760Sstevel@tonic-gate break; 11770Sstevel@tonic-gate pbcp = bcp; 11780Sstevel@tonic-gate } 11790Sstevel@tonic-gate if (bcp) { 11800Sstevel@tonic-gate if (bcp->bc_executor != NULL) { 11810Sstevel@tonic-gate if (bcp->bc_executor != curthread) { 11820Sstevel@tonic-gate cv_wait(&bcall_cv, &strbcall_lock); 11830Sstevel@tonic-gate goto again; 11840Sstevel@tonic-gate } 11850Sstevel@tonic-gate } else { 11860Sstevel@tonic-gate if (pbcp) 11870Sstevel@tonic-gate pbcp->bc_next = bcp->bc_next; 11880Sstevel@tonic-gate else 11890Sstevel@tonic-gate strbcalls.bc_head = bcp->bc_next; 11900Sstevel@tonic-gate if (bcp == strbcalls.bc_tail) 11910Sstevel@tonic-gate strbcalls.bc_tail = pbcp; 11920Sstevel@tonic-gate kmem_free(bcp, sizeof (strbufcall_t)); 11930Sstevel@tonic-gate } 11940Sstevel@tonic-gate } 11950Sstevel@tonic-gate mutex_exit(&strbcall_lock); 11960Sstevel@tonic-gate } 11970Sstevel@tonic-gate 11980Sstevel@tonic-gate /* 11990Sstevel@tonic-gate * Duplicate a message block by block (uses dupb), returning 12000Sstevel@tonic-gate * a pointer to the duplicate message. 12010Sstevel@tonic-gate * Returns a non-NULL value only if the entire message 12020Sstevel@tonic-gate * was dup'd. 12030Sstevel@tonic-gate */ 12040Sstevel@tonic-gate mblk_t * 12050Sstevel@tonic-gate dupmsg(mblk_t *bp) 12060Sstevel@tonic-gate { 12070Sstevel@tonic-gate mblk_t *head, *nbp; 12080Sstevel@tonic-gate 12090Sstevel@tonic-gate if (!bp || !(nbp = head = dupb(bp))) 12100Sstevel@tonic-gate return (NULL); 12110Sstevel@tonic-gate 12120Sstevel@tonic-gate while (bp->b_cont) { 12130Sstevel@tonic-gate if (!(nbp->b_cont = dupb(bp->b_cont))) { 12140Sstevel@tonic-gate freemsg(head); 12150Sstevel@tonic-gate return (NULL); 12160Sstevel@tonic-gate } 12170Sstevel@tonic-gate nbp = nbp->b_cont; 12180Sstevel@tonic-gate bp = bp->b_cont; 12190Sstevel@tonic-gate } 12200Sstevel@tonic-gate return (head); 12210Sstevel@tonic-gate } 12220Sstevel@tonic-gate 12230Sstevel@tonic-gate #define DUPB_NOLOAN(bp) \ 12240Sstevel@tonic-gate ((((bp)->b_datap->db_struioflag & STRUIO_ZC) != 0) ? \ 12250Sstevel@tonic-gate copyb((bp)) : dupb((bp))) 12260Sstevel@tonic-gate 12270Sstevel@tonic-gate mblk_t * 12280Sstevel@tonic-gate dupmsg_noloan(mblk_t *bp) 12290Sstevel@tonic-gate { 12300Sstevel@tonic-gate mblk_t *head, *nbp; 12310Sstevel@tonic-gate 12320Sstevel@tonic-gate if (bp == NULL || DB_TYPE(bp) != M_DATA || 12330Sstevel@tonic-gate ((nbp = head = DUPB_NOLOAN(bp)) == NULL)) 12340Sstevel@tonic-gate return (NULL); 12350Sstevel@tonic-gate 12360Sstevel@tonic-gate while (bp->b_cont) { 12370Sstevel@tonic-gate if ((nbp->b_cont = DUPB_NOLOAN(bp->b_cont)) == NULL) { 12380Sstevel@tonic-gate freemsg(head); 12390Sstevel@tonic-gate return (NULL); 12400Sstevel@tonic-gate } 12410Sstevel@tonic-gate nbp = nbp->b_cont; 12420Sstevel@tonic-gate bp = bp->b_cont; 12430Sstevel@tonic-gate } 12440Sstevel@tonic-gate return (head); 12450Sstevel@tonic-gate } 12460Sstevel@tonic-gate 12470Sstevel@tonic-gate /* 12480Sstevel@tonic-gate * Copy data from message and data block to newly allocated message and 12490Sstevel@tonic-gate * data block. Returns new message block pointer, or NULL if error. 12500Sstevel@tonic-gate * The alignment of rptr (w.r.t. word alignment) will be the same in the copy 12510Sstevel@tonic-gate * as in the original even when db_base is not word aligned. (bug 1052877) 12520Sstevel@tonic-gate */ 12530Sstevel@tonic-gate mblk_t * 12540Sstevel@tonic-gate copyb(mblk_t *bp) 12550Sstevel@tonic-gate { 12560Sstevel@tonic-gate mblk_t *nbp; 12570Sstevel@tonic-gate dblk_t *dp, *ndp; 12580Sstevel@tonic-gate uchar_t *base; 12590Sstevel@tonic-gate size_t size; 12600Sstevel@tonic-gate size_t unaligned; 12610Sstevel@tonic-gate 12620Sstevel@tonic-gate ASSERT(bp->b_wptr >= bp->b_rptr); 12630Sstevel@tonic-gate 12640Sstevel@tonic-gate dp = bp->b_datap; 12650Sstevel@tonic-gate if (dp->db_fthdr != NULL) 12660Sstevel@tonic-gate STR_FTEVENT_MBLK(bp, caller(), FTEV_COPYB, 0); 12670Sstevel@tonic-gate 12680Sstevel@tonic-gate /* 12690Sstevel@tonic-gate * Special handling for Multidata message; this should be 12700Sstevel@tonic-gate * removed once a copy-callback routine is made available. 12710Sstevel@tonic-gate */ 12720Sstevel@tonic-gate if (dp->db_type == M_MULTIDATA) { 12730Sstevel@tonic-gate cred_t *cr; 12740Sstevel@tonic-gate 12750Sstevel@tonic-gate if ((nbp = mmd_copy(bp, KM_NOSLEEP)) == NULL) 12760Sstevel@tonic-gate return (NULL); 12770Sstevel@tonic-gate 12780Sstevel@tonic-gate nbp->b_flag = bp->b_flag; 12790Sstevel@tonic-gate nbp->b_band = bp->b_band; 12800Sstevel@tonic-gate ndp = nbp->b_datap; 12810Sstevel@tonic-gate 12820Sstevel@tonic-gate /* See comments below on potential issues. */ 12830Sstevel@tonic-gate STR_FTEVENT_MBLK(nbp, caller(), FTEV_COPYB, 1); 12840Sstevel@tonic-gate 12850Sstevel@tonic-gate ASSERT(ndp->db_type == dp->db_type); 12860Sstevel@tonic-gate cr = dp->db_credp; 12870Sstevel@tonic-gate if (cr != NULL) 12880Sstevel@tonic-gate crhold(ndp->db_credp = cr); 12890Sstevel@tonic-gate ndp->db_cpid = dp->db_cpid; 12900Sstevel@tonic-gate return (nbp); 12910Sstevel@tonic-gate } 12920Sstevel@tonic-gate 12930Sstevel@tonic-gate size = dp->db_lim - dp->db_base; 12940Sstevel@tonic-gate unaligned = P2PHASE((uintptr_t)dp->db_base, sizeof (uint_t)); 12950Sstevel@tonic-gate if ((nbp = allocb_tmpl(size + unaligned, bp)) == NULL) 12960Sstevel@tonic-gate return (NULL); 12970Sstevel@tonic-gate nbp->b_flag = bp->b_flag; 12980Sstevel@tonic-gate nbp->b_band = bp->b_band; 12990Sstevel@tonic-gate ndp = nbp->b_datap; 13000Sstevel@tonic-gate 13010Sstevel@tonic-gate /* 13020Sstevel@tonic-gate * Well, here is a potential issue. If we are trying to 13030Sstevel@tonic-gate * trace a flow, and we copy the message, we might lose 13040Sstevel@tonic-gate * information about where this message might have been. 13050Sstevel@tonic-gate * So we should inherit the FT data. On the other hand, 13060Sstevel@tonic-gate * a user might be interested only in alloc to free data. 13070Sstevel@tonic-gate * So I guess the real answer is to provide a tunable. 13080Sstevel@tonic-gate */ 13090Sstevel@tonic-gate STR_FTEVENT_MBLK(nbp, caller(), FTEV_COPYB, 1); 13100Sstevel@tonic-gate 13110Sstevel@tonic-gate base = ndp->db_base + unaligned; 13120Sstevel@tonic-gate bcopy(dp->db_base, ndp->db_base + unaligned, size); 13130Sstevel@tonic-gate 13140Sstevel@tonic-gate nbp->b_rptr = base + (bp->b_rptr - dp->db_base); 13150Sstevel@tonic-gate nbp->b_wptr = nbp->b_rptr + MBLKL(bp); 13160Sstevel@tonic-gate 13170Sstevel@tonic-gate return (nbp); 13180Sstevel@tonic-gate } 13190Sstevel@tonic-gate 13200Sstevel@tonic-gate /* 13210Sstevel@tonic-gate * Copy data from message to newly allocated message using new 13220Sstevel@tonic-gate * data blocks. Returns a pointer to the new message, or NULL if error. 13230Sstevel@tonic-gate */ 13240Sstevel@tonic-gate mblk_t * 13250Sstevel@tonic-gate copymsg(mblk_t *bp) 13260Sstevel@tonic-gate { 13270Sstevel@tonic-gate mblk_t *head, *nbp; 13280Sstevel@tonic-gate 13290Sstevel@tonic-gate if (!bp || !(nbp = head = copyb(bp))) 13300Sstevel@tonic-gate return (NULL); 13310Sstevel@tonic-gate 13320Sstevel@tonic-gate while (bp->b_cont) { 13330Sstevel@tonic-gate if (!(nbp->b_cont = copyb(bp->b_cont))) { 13340Sstevel@tonic-gate freemsg(head); 13350Sstevel@tonic-gate return (NULL); 13360Sstevel@tonic-gate } 13370Sstevel@tonic-gate nbp = nbp->b_cont; 13380Sstevel@tonic-gate bp = bp->b_cont; 13390Sstevel@tonic-gate } 13400Sstevel@tonic-gate return (head); 13410Sstevel@tonic-gate } 13420Sstevel@tonic-gate 13430Sstevel@tonic-gate /* 13440Sstevel@tonic-gate * link a message block to tail of message 13450Sstevel@tonic-gate */ 13460Sstevel@tonic-gate void 13470Sstevel@tonic-gate linkb(mblk_t *mp, mblk_t *bp) 13480Sstevel@tonic-gate { 13490Sstevel@tonic-gate ASSERT(mp && bp); 13500Sstevel@tonic-gate 13510Sstevel@tonic-gate for (; mp->b_cont; mp = mp->b_cont) 13520Sstevel@tonic-gate ; 13530Sstevel@tonic-gate mp->b_cont = bp; 13540Sstevel@tonic-gate } 13550Sstevel@tonic-gate 13560Sstevel@tonic-gate /* 13570Sstevel@tonic-gate * unlink a message block from head of message 13580Sstevel@tonic-gate * return pointer to new message. 13590Sstevel@tonic-gate * NULL if message becomes empty. 13600Sstevel@tonic-gate */ 13610Sstevel@tonic-gate mblk_t * 13620Sstevel@tonic-gate unlinkb(mblk_t *bp) 13630Sstevel@tonic-gate { 13640Sstevel@tonic-gate mblk_t *bp1; 13650Sstevel@tonic-gate 13660Sstevel@tonic-gate bp1 = bp->b_cont; 13670Sstevel@tonic-gate bp->b_cont = NULL; 13680Sstevel@tonic-gate return (bp1); 13690Sstevel@tonic-gate } 13700Sstevel@tonic-gate 13710Sstevel@tonic-gate /* 13720Sstevel@tonic-gate * remove a message block "bp" from message "mp" 13730Sstevel@tonic-gate * 13740Sstevel@tonic-gate * Return pointer to new message or NULL if no message remains. 13750Sstevel@tonic-gate * Return -1 if bp is not found in message. 13760Sstevel@tonic-gate */ 13770Sstevel@tonic-gate mblk_t * 13780Sstevel@tonic-gate rmvb(mblk_t *mp, mblk_t *bp) 13790Sstevel@tonic-gate { 13800Sstevel@tonic-gate mblk_t *tmp; 13810Sstevel@tonic-gate mblk_t *lastp = NULL; 13820Sstevel@tonic-gate 13830Sstevel@tonic-gate ASSERT(mp && bp); 13840Sstevel@tonic-gate for (tmp = mp; tmp; tmp = tmp->b_cont) { 13850Sstevel@tonic-gate if (tmp == bp) { 13860Sstevel@tonic-gate if (lastp) 13870Sstevel@tonic-gate lastp->b_cont = tmp->b_cont; 13880Sstevel@tonic-gate else 13890Sstevel@tonic-gate mp = tmp->b_cont; 13900Sstevel@tonic-gate tmp->b_cont = NULL; 13910Sstevel@tonic-gate return (mp); 13920Sstevel@tonic-gate } 13930Sstevel@tonic-gate lastp = tmp; 13940Sstevel@tonic-gate } 13950Sstevel@tonic-gate return ((mblk_t *)-1); 13960Sstevel@tonic-gate } 13970Sstevel@tonic-gate 13980Sstevel@tonic-gate /* 13990Sstevel@tonic-gate * Concatenate and align first len bytes of common 14000Sstevel@tonic-gate * message type. Len == -1, means concat everything. 14010Sstevel@tonic-gate * Returns 1 on success, 0 on failure 14020Sstevel@tonic-gate * After the pullup, mp points to the pulled up data. 14030Sstevel@tonic-gate */ 14040Sstevel@tonic-gate int 14050Sstevel@tonic-gate pullupmsg(mblk_t *mp, ssize_t len) 14060Sstevel@tonic-gate { 14070Sstevel@tonic-gate mblk_t *bp, *b_cont; 14080Sstevel@tonic-gate dblk_t *dbp; 14090Sstevel@tonic-gate ssize_t n; 14100Sstevel@tonic-gate 14110Sstevel@tonic-gate ASSERT(mp->b_datap->db_ref > 0); 14120Sstevel@tonic-gate ASSERT(mp->b_next == NULL && mp->b_prev == NULL); 14130Sstevel@tonic-gate 14140Sstevel@tonic-gate /* 14150Sstevel@tonic-gate * We won't handle Multidata message, since it contains 14160Sstevel@tonic-gate * metadata which this function has no knowledge of; we 14170Sstevel@tonic-gate * assert on DEBUG, and return failure otherwise. 14180Sstevel@tonic-gate */ 14190Sstevel@tonic-gate ASSERT(mp->b_datap->db_type != M_MULTIDATA); 14200Sstevel@tonic-gate if (mp->b_datap->db_type == M_MULTIDATA) 14210Sstevel@tonic-gate return (0); 14220Sstevel@tonic-gate 14230Sstevel@tonic-gate if (len == -1) { 14240Sstevel@tonic-gate if (mp->b_cont == NULL && str_aligned(mp->b_rptr)) 14250Sstevel@tonic-gate return (1); 14260Sstevel@tonic-gate len = xmsgsize(mp); 14270Sstevel@tonic-gate } else { 14280Sstevel@tonic-gate ssize_t first_mblk_len = mp->b_wptr - mp->b_rptr; 14290Sstevel@tonic-gate ASSERT(first_mblk_len >= 0); 14300Sstevel@tonic-gate /* 14310Sstevel@tonic-gate * If the length is less than that of the first mblk, 14320Sstevel@tonic-gate * we want to pull up the message into an aligned mblk. 14330Sstevel@tonic-gate * Though not part of the spec, some callers assume it. 14340Sstevel@tonic-gate */ 14350Sstevel@tonic-gate if (len <= first_mblk_len) { 14360Sstevel@tonic-gate if (str_aligned(mp->b_rptr)) 14370Sstevel@tonic-gate return (1); 14380Sstevel@tonic-gate len = first_mblk_len; 14390Sstevel@tonic-gate } else if (xmsgsize(mp) < len) 14400Sstevel@tonic-gate return (0); 14410Sstevel@tonic-gate } 14420Sstevel@tonic-gate 14430Sstevel@tonic-gate if ((bp = allocb_tmpl(len, mp)) == NULL) 14440Sstevel@tonic-gate return (0); 14450Sstevel@tonic-gate 14460Sstevel@tonic-gate dbp = bp->b_datap; 14470Sstevel@tonic-gate *bp = *mp; /* swap mblks so bp heads the old msg... */ 14480Sstevel@tonic-gate mp->b_datap = dbp; /* ... and mp heads the new message */ 14490Sstevel@tonic-gate mp->b_datap->db_mblk = mp; 14500Sstevel@tonic-gate bp->b_datap->db_mblk = bp; 14510Sstevel@tonic-gate mp->b_rptr = mp->b_wptr = dbp->db_base; 14520Sstevel@tonic-gate 14530Sstevel@tonic-gate do { 14540Sstevel@tonic-gate ASSERT(bp->b_datap->db_ref > 0); 14550Sstevel@tonic-gate ASSERT(bp->b_wptr >= bp->b_rptr); 14560Sstevel@tonic-gate n = MIN(bp->b_wptr - bp->b_rptr, len); 14570Sstevel@tonic-gate bcopy(bp->b_rptr, mp->b_wptr, (size_t)n); 14580Sstevel@tonic-gate mp->b_wptr += n; 14590Sstevel@tonic-gate bp->b_rptr += n; 14600Sstevel@tonic-gate len -= n; 14610Sstevel@tonic-gate if (bp->b_rptr != bp->b_wptr) 14620Sstevel@tonic-gate break; 14630Sstevel@tonic-gate b_cont = bp->b_cont; 14640Sstevel@tonic-gate freeb(bp); 14650Sstevel@tonic-gate bp = b_cont; 14660Sstevel@tonic-gate } while (len && bp); 14670Sstevel@tonic-gate 14680Sstevel@tonic-gate mp->b_cont = bp; /* tack on whatever wasn't pulled up */ 14690Sstevel@tonic-gate 14700Sstevel@tonic-gate return (1); 14710Sstevel@tonic-gate } 14720Sstevel@tonic-gate 14730Sstevel@tonic-gate /* 14740Sstevel@tonic-gate * Concatenate and align at least the first len bytes of common message 14750Sstevel@tonic-gate * type. Len == -1 means concatenate everything. The original message is 14760Sstevel@tonic-gate * unaltered. Returns a pointer to a new message on success, otherwise 14770Sstevel@tonic-gate * returns NULL. 14780Sstevel@tonic-gate */ 14790Sstevel@tonic-gate mblk_t * 14800Sstevel@tonic-gate msgpullup(mblk_t *mp, ssize_t len) 14810Sstevel@tonic-gate { 14820Sstevel@tonic-gate mblk_t *newmp; 14830Sstevel@tonic-gate ssize_t totlen; 14840Sstevel@tonic-gate ssize_t n; 14850Sstevel@tonic-gate 14860Sstevel@tonic-gate /* 14870Sstevel@tonic-gate * We won't handle Multidata message, since it contains 14880Sstevel@tonic-gate * metadata which this function has no knowledge of; we 14890Sstevel@tonic-gate * assert on DEBUG, and return failure otherwise. 14900Sstevel@tonic-gate */ 14910Sstevel@tonic-gate ASSERT(mp->b_datap->db_type != M_MULTIDATA); 14920Sstevel@tonic-gate if (mp->b_datap->db_type == M_MULTIDATA) 14930Sstevel@tonic-gate return (NULL); 14940Sstevel@tonic-gate 14950Sstevel@tonic-gate totlen = xmsgsize(mp); 14960Sstevel@tonic-gate 14970Sstevel@tonic-gate if ((len > 0) && (len > totlen)) 14980Sstevel@tonic-gate return (NULL); 14990Sstevel@tonic-gate 15000Sstevel@tonic-gate /* 15010Sstevel@tonic-gate * Copy all of the first msg type into one new mblk, then dupmsg 15020Sstevel@tonic-gate * and link the rest onto this. 15030Sstevel@tonic-gate */ 15040Sstevel@tonic-gate 15050Sstevel@tonic-gate len = totlen; 15060Sstevel@tonic-gate 15070Sstevel@tonic-gate if ((newmp = allocb_tmpl(len, mp)) == NULL) 15080Sstevel@tonic-gate return (NULL); 15090Sstevel@tonic-gate 15100Sstevel@tonic-gate newmp->b_flag = mp->b_flag; 15110Sstevel@tonic-gate newmp->b_band = mp->b_band; 15120Sstevel@tonic-gate 15130Sstevel@tonic-gate while (len > 0) { 15140Sstevel@tonic-gate n = mp->b_wptr - mp->b_rptr; 15150Sstevel@tonic-gate ASSERT(n >= 0); /* allow zero-length mblk_t's */ 15160Sstevel@tonic-gate if (n > 0) 15170Sstevel@tonic-gate bcopy(mp->b_rptr, newmp->b_wptr, n); 15180Sstevel@tonic-gate newmp->b_wptr += n; 15190Sstevel@tonic-gate len -= n; 15200Sstevel@tonic-gate mp = mp->b_cont; 15210Sstevel@tonic-gate } 15220Sstevel@tonic-gate 15230Sstevel@tonic-gate if (mp != NULL) { 15240Sstevel@tonic-gate newmp->b_cont = dupmsg(mp); 15250Sstevel@tonic-gate if (newmp->b_cont == NULL) { 15260Sstevel@tonic-gate freemsg(newmp); 15270Sstevel@tonic-gate return (NULL); 15280Sstevel@tonic-gate } 15290Sstevel@tonic-gate } 15300Sstevel@tonic-gate 15310Sstevel@tonic-gate return (newmp); 15320Sstevel@tonic-gate } 15330Sstevel@tonic-gate 15340Sstevel@tonic-gate /* 15350Sstevel@tonic-gate * Trim bytes from message 15360Sstevel@tonic-gate * len > 0, trim from head 15370Sstevel@tonic-gate * len < 0, trim from tail 15380Sstevel@tonic-gate * Returns 1 on success, 0 on failure. 15390Sstevel@tonic-gate */ 15400Sstevel@tonic-gate int 15410Sstevel@tonic-gate adjmsg(mblk_t *mp, ssize_t len) 15420Sstevel@tonic-gate { 15430Sstevel@tonic-gate mblk_t *bp; 15440Sstevel@tonic-gate mblk_t *save_bp = NULL; 15450Sstevel@tonic-gate mblk_t *prev_bp; 15460Sstevel@tonic-gate mblk_t *bcont; 15470Sstevel@tonic-gate unsigned char type; 15480Sstevel@tonic-gate ssize_t n; 15490Sstevel@tonic-gate int fromhead; 15500Sstevel@tonic-gate int first; 15510Sstevel@tonic-gate 15520Sstevel@tonic-gate ASSERT(mp != NULL); 15530Sstevel@tonic-gate /* 15540Sstevel@tonic-gate * We won't handle Multidata message, since it contains 15550Sstevel@tonic-gate * metadata which this function has no knowledge of; we 15560Sstevel@tonic-gate * assert on DEBUG, and return failure otherwise. 15570Sstevel@tonic-gate */ 15580Sstevel@tonic-gate ASSERT(mp->b_datap->db_type != M_MULTIDATA); 15590Sstevel@tonic-gate if (mp->b_datap->db_type == M_MULTIDATA) 15600Sstevel@tonic-gate return (0); 15610Sstevel@tonic-gate 15620Sstevel@tonic-gate if (len < 0) { 15630Sstevel@tonic-gate fromhead = 0; 15640Sstevel@tonic-gate len = -len; 15650Sstevel@tonic-gate } else { 15660Sstevel@tonic-gate fromhead = 1; 15670Sstevel@tonic-gate } 15680Sstevel@tonic-gate 15690Sstevel@tonic-gate if (xmsgsize(mp) < len) 15700Sstevel@tonic-gate return (0); 15710Sstevel@tonic-gate 15720Sstevel@tonic-gate if (fromhead) { 15730Sstevel@tonic-gate first = 1; 15740Sstevel@tonic-gate while (len) { 15750Sstevel@tonic-gate ASSERT(mp->b_wptr >= mp->b_rptr); 15760Sstevel@tonic-gate n = MIN(mp->b_wptr - mp->b_rptr, len); 15770Sstevel@tonic-gate mp->b_rptr += n; 15780Sstevel@tonic-gate len -= n; 15790Sstevel@tonic-gate 15800Sstevel@tonic-gate /* 15810Sstevel@tonic-gate * If this is not the first zero length 15820Sstevel@tonic-gate * message remove it 15830Sstevel@tonic-gate */ 15840Sstevel@tonic-gate if (!first && (mp->b_wptr == mp->b_rptr)) { 15850Sstevel@tonic-gate bcont = mp->b_cont; 15860Sstevel@tonic-gate freeb(mp); 15870Sstevel@tonic-gate mp = save_bp->b_cont = bcont; 15880Sstevel@tonic-gate } else { 15890Sstevel@tonic-gate save_bp = mp; 15900Sstevel@tonic-gate mp = mp->b_cont; 15910Sstevel@tonic-gate } 15920Sstevel@tonic-gate first = 0; 15930Sstevel@tonic-gate } 15940Sstevel@tonic-gate } else { 15950Sstevel@tonic-gate type = mp->b_datap->db_type; 15960Sstevel@tonic-gate while (len) { 15970Sstevel@tonic-gate bp = mp; 15980Sstevel@tonic-gate save_bp = NULL; 15990Sstevel@tonic-gate 16000Sstevel@tonic-gate /* 16010Sstevel@tonic-gate * Find the last message of same type 16020Sstevel@tonic-gate */ 16030Sstevel@tonic-gate while (bp && bp->b_datap->db_type == type) { 16040Sstevel@tonic-gate ASSERT(bp->b_wptr >= bp->b_rptr); 16050Sstevel@tonic-gate prev_bp = save_bp; 16060Sstevel@tonic-gate save_bp = bp; 16070Sstevel@tonic-gate bp = bp->b_cont; 16080Sstevel@tonic-gate } 16090Sstevel@tonic-gate if (save_bp == NULL) 16100Sstevel@tonic-gate break; 16110Sstevel@tonic-gate n = MIN(save_bp->b_wptr - save_bp->b_rptr, len); 16120Sstevel@tonic-gate save_bp->b_wptr -= n; 16130Sstevel@tonic-gate len -= n; 16140Sstevel@tonic-gate 16150Sstevel@tonic-gate /* 16160Sstevel@tonic-gate * If this is not the first message 16170Sstevel@tonic-gate * and we have taken away everything 16180Sstevel@tonic-gate * from this message, remove it 16190Sstevel@tonic-gate */ 16200Sstevel@tonic-gate 16210Sstevel@tonic-gate if ((save_bp != mp) && 16226707Sbrutus (save_bp->b_wptr == save_bp->b_rptr)) { 16230Sstevel@tonic-gate bcont = save_bp->b_cont; 16240Sstevel@tonic-gate freeb(save_bp); 16250Sstevel@tonic-gate prev_bp->b_cont = bcont; 16260Sstevel@tonic-gate } 16270Sstevel@tonic-gate } 16280Sstevel@tonic-gate } 16290Sstevel@tonic-gate return (1); 16300Sstevel@tonic-gate } 16310Sstevel@tonic-gate 16320Sstevel@tonic-gate /* 16330Sstevel@tonic-gate * get number of data bytes in message 16340Sstevel@tonic-gate */ 16350Sstevel@tonic-gate size_t 16360Sstevel@tonic-gate msgdsize(mblk_t *bp) 16370Sstevel@tonic-gate { 16380Sstevel@tonic-gate size_t count = 0; 16390Sstevel@tonic-gate 16400Sstevel@tonic-gate for (; bp; bp = bp->b_cont) 16410Sstevel@tonic-gate if (bp->b_datap->db_type == M_DATA) { 16420Sstevel@tonic-gate ASSERT(bp->b_wptr >= bp->b_rptr); 16430Sstevel@tonic-gate count += bp->b_wptr - bp->b_rptr; 16440Sstevel@tonic-gate } 16450Sstevel@tonic-gate return (count); 16460Sstevel@tonic-gate } 16470Sstevel@tonic-gate 16480Sstevel@tonic-gate /* 16490Sstevel@tonic-gate * Get a message off head of queue 16500Sstevel@tonic-gate * 16510Sstevel@tonic-gate * If queue has no buffers then mark queue 16520Sstevel@tonic-gate * with QWANTR. (queue wants to be read by 16530Sstevel@tonic-gate * someone when data becomes available) 16540Sstevel@tonic-gate * 16550Sstevel@tonic-gate * If there is something to take off then do so. 16560Sstevel@tonic-gate * If queue falls below hi water mark turn off QFULL 16570Sstevel@tonic-gate * flag. Decrement weighted count of queue. 16580Sstevel@tonic-gate * Also turn off QWANTR because queue is being read. 16590Sstevel@tonic-gate * 16600Sstevel@tonic-gate * The queue count is maintained on a per-band basis. 16610Sstevel@tonic-gate * Priority band 0 (normal messages) uses q_count, 16620Sstevel@tonic-gate * q_lowat, etc. Non-zero priority bands use the 16630Sstevel@tonic-gate * fields in their respective qband structures 16640Sstevel@tonic-gate * (qb_count, qb_lowat, etc.) All messages appear 16650Sstevel@tonic-gate * on the same list, linked via their b_next pointers. 16660Sstevel@tonic-gate * q_first is the head of the list. q_count does 16670Sstevel@tonic-gate * not reflect the size of all the messages on the 16680Sstevel@tonic-gate * queue. It only reflects those messages in the 16690Sstevel@tonic-gate * normal band of flow. The one exception to this 16700Sstevel@tonic-gate * deals with high priority messages. They are in 16710Sstevel@tonic-gate * their own conceptual "band", but are accounted 16720Sstevel@tonic-gate * against q_count. 16730Sstevel@tonic-gate * 16740Sstevel@tonic-gate * If queue count is below the lo water mark and QWANTW 16750Sstevel@tonic-gate * is set, enable the closest backq which has a service 16760Sstevel@tonic-gate * procedure and turn off the QWANTW flag. 16770Sstevel@tonic-gate * 16780Sstevel@tonic-gate * getq could be built on top of rmvq, but isn't because 16790Sstevel@tonic-gate * of performance considerations. 16800Sstevel@tonic-gate * 16810Sstevel@tonic-gate * A note on the use of q_count and q_mblkcnt: 16820Sstevel@tonic-gate * q_count is the traditional byte count for messages that 16830Sstevel@tonic-gate * have been put on a queue. Documentation tells us that 16840Sstevel@tonic-gate * we shouldn't rely on that count, but some drivers/modules 16850Sstevel@tonic-gate * do. What was needed, however, is a mechanism to prevent 16860Sstevel@tonic-gate * runaway streams from consuming all of the resources, 16870Sstevel@tonic-gate * and particularly be able to flow control zero-length 16880Sstevel@tonic-gate * messages. q_mblkcnt is used for this purpose. It 16890Sstevel@tonic-gate * counts the number of mblk's that are being put on 16900Sstevel@tonic-gate * the queue. The intention here, is that each mblk should 16910Sstevel@tonic-gate * contain one byte of data and, for the purpose of 16920Sstevel@tonic-gate * flow-control, logically does. A queue will become 16930Sstevel@tonic-gate * full when EITHER of these values (q_count and q_mblkcnt) 16940Sstevel@tonic-gate * reach the highwater mark. It will clear when BOTH 16950Sstevel@tonic-gate * of them drop below the highwater mark. And it will 16960Sstevel@tonic-gate * backenable when BOTH of them drop below the lowwater 16970Sstevel@tonic-gate * mark. 16980Sstevel@tonic-gate * With this algorithm, a driver/module might be able 16990Sstevel@tonic-gate * to find a reasonably accurate q_count, and the 17000Sstevel@tonic-gate * framework can still try and limit resource usage. 17010Sstevel@tonic-gate */ 17020Sstevel@tonic-gate mblk_t * 17030Sstevel@tonic-gate getq(queue_t *q) 17040Sstevel@tonic-gate { 17050Sstevel@tonic-gate mblk_t *bp; 1706235Smicheng uchar_t band = 0; 17070Sstevel@tonic-gate 17086769Sja97890 bp = getq_noenab(q, 0); 17090Sstevel@tonic-gate if (bp != NULL) 17100Sstevel@tonic-gate band = bp->b_band; 17110Sstevel@tonic-gate 17120Sstevel@tonic-gate /* 17130Sstevel@tonic-gate * Inlined from qbackenable(). 17140Sstevel@tonic-gate * Quick check without holding the lock. 17150Sstevel@tonic-gate */ 17160Sstevel@tonic-gate if (band == 0 && (q->q_flag & (QWANTW|QWANTWSYNC)) == 0) 17170Sstevel@tonic-gate return (bp); 17180Sstevel@tonic-gate 17190Sstevel@tonic-gate qbackenable(q, band); 17200Sstevel@tonic-gate return (bp); 17210Sstevel@tonic-gate } 17220Sstevel@tonic-gate 17230Sstevel@tonic-gate /* 1724741Smasputra * Calculate number of data bytes in a single data message block taking 1725741Smasputra * multidata messages into account. 1726741Smasputra */ 1727741Smasputra 1728741Smasputra #define ADD_MBLK_SIZE(mp, size) \ 1729741Smasputra if (DB_TYPE(mp) != M_MULTIDATA) { \ 1730741Smasputra (size) += MBLKL(mp); \ 1731741Smasputra } else { \ 1732741Smasputra uint_t pinuse; \ 1733741Smasputra \ 1734741Smasputra mmd_getsize(mmd_getmultidata(mp), NULL, &pinuse); \ 1735741Smasputra (size) += pinuse; \ 1736741Smasputra } 1737741Smasputra 1738741Smasputra /* 17396769Sja97890 * Returns the number of bytes in a message (a message is defined as a 17406769Sja97890 * chain of mblks linked by b_cont). If a non-NULL mblkcnt is supplied we 17416769Sja97890 * also return the number of distinct mblks in the message. 17426769Sja97890 */ 17436769Sja97890 int 17446769Sja97890 mp_cont_len(mblk_t *bp, int *mblkcnt) 17456769Sja97890 { 17466769Sja97890 mblk_t *mp; 17476769Sja97890 int mblks = 0; 17486769Sja97890 int bytes = 0; 17496769Sja97890 17506769Sja97890 for (mp = bp; mp != NULL; mp = mp->b_cont) { 17516769Sja97890 ADD_MBLK_SIZE(mp, bytes); 17526769Sja97890 mblks++; 17536769Sja97890 } 17546769Sja97890 17556769Sja97890 if (mblkcnt != NULL) 17566769Sja97890 *mblkcnt = mblks; 17576769Sja97890 17586769Sja97890 return (bytes); 17596769Sja97890 } 17606769Sja97890 17616769Sja97890 /* 17620Sstevel@tonic-gate * Like getq() but does not backenable. This is used by the stream 17630Sstevel@tonic-gate * head when a putback() is likely. The caller must call qbackenable() 17640Sstevel@tonic-gate * after it is done with accessing the queue. 17656769Sja97890 * The rbytes arguments to getq_noneab() allows callers to specify a 17666769Sja97890 * the maximum number of bytes to return. If the current amount on the 17676769Sja97890 * queue is less than this then the entire message will be returned. 17686769Sja97890 * A value of 0 returns the entire message and is equivalent to the old 17696769Sja97890 * default behaviour prior to the addition of the rbytes argument. 17700Sstevel@tonic-gate */ 17710Sstevel@tonic-gate mblk_t * 17726769Sja97890 getq_noenab(queue_t *q, ssize_t rbytes) 17730Sstevel@tonic-gate { 17746769Sja97890 mblk_t *bp, *mp1; 17756769Sja97890 mblk_t *mp2 = NULL; 17760Sstevel@tonic-gate qband_t *qbp; 17770Sstevel@tonic-gate kthread_id_t freezer; 17780Sstevel@tonic-gate int bytecnt = 0, mblkcnt = 0; 17790Sstevel@tonic-gate 17800Sstevel@tonic-gate /* freezestr should allow its caller to call getq/putq */ 17810Sstevel@tonic-gate freezer = STREAM(q)->sd_freezer; 17820Sstevel@tonic-gate if (freezer == curthread) { 17830Sstevel@tonic-gate ASSERT(frozenstr(q)); 17840Sstevel@tonic-gate ASSERT(MUTEX_HELD(QLOCK(q))); 17850Sstevel@tonic-gate } else 17860Sstevel@tonic-gate mutex_enter(QLOCK(q)); 17870Sstevel@tonic-gate 17880Sstevel@tonic-gate if ((bp = q->q_first) == 0) { 17890Sstevel@tonic-gate q->q_flag |= QWANTR; 17900Sstevel@tonic-gate } else { 17916769Sja97890 /* 17926769Sja97890 * If the caller supplied a byte threshold and there is 17936769Sja97890 * more than this amount on the queue then break up the 17946769Sja97890 * the message appropriately. We can only safely do 17956769Sja97890 * this for M_DATA messages. 17966769Sja97890 */ 17976769Sja97890 if ((DB_TYPE(bp) == M_DATA) && (rbytes > 0) && 17986769Sja97890 (q->q_count > rbytes)) { 17996769Sja97890 /* 18006769Sja97890 * Inline version of mp_cont_len() which terminates 18016769Sja97890 * when we meet or exceed rbytes. 18026769Sja97890 */ 18036769Sja97890 for (mp1 = bp; mp1 != NULL; mp1 = mp1->b_cont) { 18046769Sja97890 mblkcnt++; 18056769Sja97890 ADD_MBLK_SIZE(mp1, bytecnt); 18066769Sja97890 if (bytecnt >= rbytes) 18076769Sja97890 break; 18086769Sja97890 } 18096769Sja97890 /* 18106769Sja97890 * We need to account for the following scenarios: 18116769Sja97890 * 18126769Sja97890 * 1) Too much data in the first message: 18136769Sja97890 * mp1 will be the mblk which puts us over our 18146769Sja97890 * byte limit. 18156769Sja97890 * 2) Not enough data in the first message: 18166769Sja97890 * mp1 will be NULL. 18176769Sja97890 * 3) Exactly the right amount of data contained within 18186769Sja97890 * whole mblks: 18196769Sja97890 * mp1->b_cont will be where we break the message. 18206769Sja97890 */ 18216769Sja97890 if (bytecnt > rbytes) { 18226769Sja97890 /* 18236769Sja97890 * Dup/copy mp1 and put what we don't need 18246769Sja97890 * back onto the queue. Adjust the read/write 18256769Sja97890 * and continuation pointers appropriately 18266769Sja97890 * and decrement the current mblk count to 18276769Sja97890 * reflect we are putting an mblk back onto 18286769Sja97890 * the queue. 18296769Sja97890 * When adjusting the message pointers, it's 18306769Sja97890 * OK to use the existing bytecnt and the 18316769Sja97890 * requested amount (rbytes) to calculate the 18326769Sja97890 * the new write offset (b_wptr) of what we 18336769Sja97890 * are taking. However, we cannot use these 18346769Sja97890 * values when calculating the read offset of 18356769Sja97890 * the mblk we are putting back on the queue. 18366769Sja97890 * This is because the begining (b_rptr) of the 18376769Sja97890 * mblk represents some arbitrary point within 18386769Sja97890 * the message. 18396769Sja97890 * It's simplest to do this by advancing b_rptr 18406769Sja97890 * by the new length of mp1 as we don't have to 18416769Sja97890 * remember any intermediate state. 18426769Sja97890 */ 18436769Sja97890 ASSERT(mp1 != NULL); 18446769Sja97890 mblkcnt--; 18456769Sja97890 if ((mp2 = dupb(mp1)) == NULL && 18466769Sja97890 (mp2 = copyb(mp1)) == NULL) { 18476769Sja97890 bytecnt = mblkcnt = 0; 18486769Sja97890 goto dup_failed; 18496769Sja97890 } 18506769Sja97890 mp2->b_cont = mp1->b_cont; 18516769Sja97890 mp1->b_wptr -= bytecnt - rbytes; 18526769Sja97890 mp2->b_rptr += mp1->b_wptr - mp1->b_rptr; 18536769Sja97890 mp1->b_cont = NULL; 18546769Sja97890 bytecnt = rbytes; 18556769Sja97890 } else { 18566769Sja97890 /* 18576769Sja97890 * Either there is not enough data in the first 18586769Sja97890 * message or there is no excess data to deal 18596769Sja97890 * with. If mp1 is NULL, we are taking the 18606769Sja97890 * whole message. No need to do anything. 18616769Sja97890 * Otherwise we assign mp1->b_cont to mp2 as 18626769Sja97890 * we will be putting this back onto the head of 18636769Sja97890 * the queue. 18646769Sja97890 */ 18656769Sja97890 if (mp1 != NULL) { 18666769Sja97890 mp2 = mp1->b_cont; 18676769Sja97890 mp1->b_cont = NULL; 18686769Sja97890 } 18696769Sja97890 } 18706769Sja97890 /* 18716769Sja97890 * If mp2 is not NULL then we have part of the message 18726769Sja97890 * to put back onto the queue. 18736769Sja97890 */ 18746769Sja97890 if (mp2 != NULL) { 18756769Sja97890 if ((mp2->b_next = bp->b_next) == NULL) 18766769Sja97890 q->q_last = mp2; 18776769Sja97890 else 18786769Sja97890 bp->b_next->b_prev = mp2; 18796769Sja97890 q->q_first = mp2; 18806769Sja97890 } else { 18816769Sja97890 if ((q->q_first = bp->b_next) == NULL) 18826769Sja97890 q->q_last = NULL; 18836769Sja97890 else 18846769Sja97890 q->q_first->b_prev = NULL; 18856769Sja97890 } 18866769Sja97890 } else { 18876769Sja97890 /* 18886769Sja97890 * Either no byte threshold was supplied, there is 18896769Sja97890 * not enough on the queue or we failed to 18906769Sja97890 * duplicate/copy a data block. In these cases we 18916769Sja97890 * just take the entire first message. 18926769Sja97890 */ 18936769Sja97890 dup_failed: 18946769Sja97890 bytecnt = mp_cont_len(bp, &mblkcnt); 18956769Sja97890 if ((q->q_first = bp->b_next) == NULL) 18966769Sja97890 q->q_last = NULL; 18976769Sja97890 else 18986769Sja97890 q->q_first->b_prev = NULL; 18990Sstevel@tonic-gate } 19000Sstevel@tonic-gate if (bp->b_band == 0) { 19010Sstevel@tonic-gate q->q_count -= bytecnt; 19020Sstevel@tonic-gate q->q_mblkcnt -= mblkcnt; 19035360Srk129064 if (q->q_mblkcnt == 0 || ((q->q_count < q->q_hiwat) && 19045360Srk129064 (q->q_mblkcnt < q->q_hiwat))) { 19050Sstevel@tonic-gate q->q_flag &= ~QFULL; 19060Sstevel@tonic-gate } 19070Sstevel@tonic-gate } else { 19080Sstevel@tonic-gate int i; 19090Sstevel@tonic-gate 19100Sstevel@tonic-gate ASSERT(bp->b_band <= q->q_nband); 19110Sstevel@tonic-gate ASSERT(q->q_bandp != NULL); 19120Sstevel@tonic-gate ASSERT(MUTEX_HELD(QLOCK(q))); 19130Sstevel@tonic-gate qbp = q->q_bandp; 19140Sstevel@tonic-gate i = bp->b_band; 19150Sstevel@tonic-gate while (--i > 0) 19160Sstevel@tonic-gate qbp = qbp->qb_next; 19170Sstevel@tonic-gate if (qbp->qb_first == qbp->qb_last) { 19180Sstevel@tonic-gate qbp->qb_first = NULL; 19190Sstevel@tonic-gate qbp->qb_last = NULL; 19200Sstevel@tonic-gate } else { 19210Sstevel@tonic-gate qbp->qb_first = bp->b_next; 19220Sstevel@tonic-gate } 19230Sstevel@tonic-gate qbp->qb_count -= bytecnt; 19240Sstevel@tonic-gate qbp->qb_mblkcnt -= mblkcnt; 19255360Srk129064 if (qbp->qb_mblkcnt == 0 || 19265360Srk129064 ((qbp->qb_count < qbp->qb_hiwat) && 19275360Srk129064 (qbp->qb_mblkcnt < qbp->qb_hiwat))) { 19280Sstevel@tonic-gate qbp->qb_flag &= ~QB_FULL; 19290Sstevel@tonic-gate } 19300Sstevel@tonic-gate } 19310Sstevel@tonic-gate q->q_flag &= ~QWANTR; 19320Sstevel@tonic-gate bp->b_next = NULL; 19330Sstevel@tonic-gate bp->b_prev = NULL; 19340Sstevel@tonic-gate } 19350Sstevel@tonic-gate if (freezer != curthread) 19360Sstevel@tonic-gate mutex_exit(QLOCK(q)); 19370Sstevel@tonic-gate 19380Sstevel@tonic-gate STR_FTEVENT_MSG(bp, q, FTEV_GETQ, NULL); 19390Sstevel@tonic-gate 19400Sstevel@tonic-gate return (bp); 19410Sstevel@tonic-gate } 19420Sstevel@tonic-gate 19430Sstevel@tonic-gate /* 19440Sstevel@tonic-gate * Determine if a backenable is needed after removing a message in the 19450Sstevel@tonic-gate * specified band. 19460Sstevel@tonic-gate * NOTE: This routine assumes that something like getq_noenab() has been 19470Sstevel@tonic-gate * already called. 19480Sstevel@tonic-gate * 19490Sstevel@tonic-gate * For the read side it is ok to hold sd_lock across calling this (and the 19500Sstevel@tonic-gate * stream head often does). 19510Sstevel@tonic-gate * But for the write side strwakeq might be invoked and it acquires sd_lock. 19520Sstevel@tonic-gate */ 19530Sstevel@tonic-gate void 1954235Smicheng qbackenable(queue_t *q, uchar_t band) 19550Sstevel@tonic-gate { 19560Sstevel@tonic-gate int backenab = 0; 19570Sstevel@tonic-gate qband_t *qbp; 19580Sstevel@tonic-gate kthread_id_t freezer; 19590Sstevel@tonic-gate 19600Sstevel@tonic-gate ASSERT(q); 19610Sstevel@tonic-gate ASSERT((q->q_flag & QREADR) || MUTEX_NOT_HELD(&STREAM(q)->sd_lock)); 19620Sstevel@tonic-gate 19630Sstevel@tonic-gate /* 19640Sstevel@tonic-gate * Quick check without holding the lock. 19650Sstevel@tonic-gate * OK since after getq() has lowered the q_count these flags 19660Sstevel@tonic-gate * would not change unless either the qbackenable() is done by 19670Sstevel@tonic-gate * another thread (which is ok) or the queue has gotten QFULL 19680Sstevel@tonic-gate * in which case another backenable will take place when the queue 19690Sstevel@tonic-gate * drops below q_lowat. 19700Sstevel@tonic-gate */ 19710Sstevel@tonic-gate if (band == 0 && (q->q_flag & (QWANTW|QWANTWSYNC)) == 0) 19720Sstevel@tonic-gate return; 19730Sstevel@tonic-gate 19740Sstevel@tonic-gate /* freezestr should allow its caller to call getq/putq */ 19750Sstevel@tonic-gate freezer = STREAM(q)->sd_freezer; 19760Sstevel@tonic-gate if (freezer == curthread) { 19770Sstevel@tonic-gate ASSERT(frozenstr(q)); 19780Sstevel@tonic-gate ASSERT(MUTEX_HELD(QLOCK(q))); 19790Sstevel@tonic-gate } else 19800Sstevel@tonic-gate mutex_enter(QLOCK(q)); 19810Sstevel@tonic-gate 19820Sstevel@tonic-gate if (band == 0) { 19830Sstevel@tonic-gate if (q->q_lowat == 0 || (q->q_count < q->q_lowat && 19840Sstevel@tonic-gate q->q_mblkcnt < q->q_lowat)) { 19850Sstevel@tonic-gate backenab = q->q_flag & (QWANTW|QWANTWSYNC); 19860Sstevel@tonic-gate } 19870Sstevel@tonic-gate } else { 19880Sstevel@tonic-gate int i; 19890Sstevel@tonic-gate 19900Sstevel@tonic-gate ASSERT((unsigned)band <= q->q_nband); 19910Sstevel@tonic-gate ASSERT(q->q_bandp != NULL); 19920Sstevel@tonic-gate 19930Sstevel@tonic-gate qbp = q->q_bandp; 19940Sstevel@tonic-gate i = band; 19950Sstevel@tonic-gate while (--i > 0) 19960Sstevel@tonic-gate qbp = qbp->qb_next; 19970Sstevel@tonic-gate 19980Sstevel@tonic-gate if (qbp->qb_lowat == 0 || (qbp->qb_count < qbp->qb_lowat && 19990Sstevel@tonic-gate qbp->qb_mblkcnt < qbp->qb_lowat)) { 20000Sstevel@tonic-gate backenab = qbp->qb_flag & QB_WANTW; 20010Sstevel@tonic-gate } 20020Sstevel@tonic-gate } 20030Sstevel@tonic-gate 20040Sstevel@tonic-gate if (backenab == 0) { 20050Sstevel@tonic-gate if (freezer != curthread) 20060Sstevel@tonic-gate mutex_exit(QLOCK(q)); 20070Sstevel@tonic-gate return; 20080Sstevel@tonic-gate } 20090Sstevel@tonic-gate 20100Sstevel@tonic-gate /* Have to drop the lock across strwakeq and backenable */ 20110Sstevel@tonic-gate if (backenab & QWANTWSYNC) 20120Sstevel@tonic-gate q->q_flag &= ~QWANTWSYNC; 20130Sstevel@tonic-gate if (backenab & (QWANTW|QB_WANTW)) { 20140Sstevel@tonic-gate if (band != 0) 20150Sstevel@tonic-gate qbp->qb_flag &= ~QB_WANTW; 20160Sstevel@tonic-gate else { 20170Sstevel@tonic-gate q->q_flag &= ~QWANTW; 20180Sstevel@tonic-gate } 20190Sstevel@tonic-gate } 20200Sstevel@tonic-gate 20210Sstevel@tonic-gate if (freezer != curthread) 20220Sstevel@tonic-gate mutex_exit(QLOCK(q)); 20230Sstevel@tonic-gate 20240Sstevel@tonic-gate if (backenab & QWANTWSYNC) 20250Sstevel@tonic-gate strwakeq(q, QWANTWSYNC); 20260Sstevel@tonic-gate if (backenab & (QWANTW|QB_WANTW)) 20270Sstevel@tonic-gate backenable(q, band); 20280Sstevel@tonic-gate } 20290Sstevel@tonic-gate 20300Sstevel@tonic-gate /* 20310Sstevel@tonic-gate * Remove a message from a queue. The queue count and other 20320Sstevel@tonic-gate * flow control parameters are adjusted and the back queue 20330Sstevel@tonic-gate * enabled if necessary. 20340Sstevel@tonic-gate * 20350Sstevel@tonic-gate * rmvq can be called with the stream frozen, but other utility functions 20360Sstevel@tonic-gate * holding QLOCK, and by streams modules without any locks/frozen. 20370Sstevel@tonic-gate */ 20380Sstevel@tonic-gate void 20390Sstevel@tonic-gate rmvq(queue_t *q, mblk_t *mp) 20400Sstevel@tonic-gate { 20410Sstevel@tonic-gate ASSERT(mp != NULL); 20420Sstevel@tonic-gate 20430Sstevel@tonic-gate rmvq_noenab(q, mp); 20440Sstevel@tonic-gate if (curthread != STREAM(q)->sd_freezer && MUTEX_HELD(QLOCK(q))) { 20450Sstevel@tonic-gate /* 20460Sstevel@tonic-gate * qbackenable can handle a frozen stream but not a "random" 20470Sstevel@tonic-gate * qlock being held. Drop lock across qbackenable. 20480Sstevel@tonic-gate */ 20490Sstevel@tonic-gate mutex_exit(QLOCK(q)); 20500Sstevel@tonic-gate qbackenable(q, mp->b_band); 20510Sstevel@tonic-gate mutex_enter(QLOCK(q)); 20520Sstevel@tonic-gate } else { 20530Sstevel@tonic-gate qbackenable(q, mp->b_band); 20540Sstevel@tonic-gate } 20550Sstevel@tonic-gate } 20560Sstevel@tonic-gate 20570Sstevel@tonic-gate /* 20580Sstevel@tonic-gate * Like rmvq() but without any backenabling. 20590Sstevel@tonic-gate * This exists to handle SR_CONSOL_DATA in strrput(). 20600Sstevel@tonic-gate */ 20610Sstevel@tonic-gate void 20620Sstevel@tonic-gate rmvq_noenab(queue_t *q, mblk_t *mp) 20630Sstevel@tonic-gate { 20640Sstevel@tonic-gate int i; 20650Sstevel@tonic-gate qband_t *qbp = NULL; 20660Sstevel@tonic-gate kthread_id_t freezer; 20670Sstevel@tonic-gate int bytecnt = 0, mblkcnt = 0; 20680Sstevel@tonic-gate 20690Sstevel@tonic-gate freezer = STREAM(q)->sd_freezer; 20700Sstevel@tonic-gate if (freezer == curthread) { 20710Sstevel@tonic-gate ASSERT(frozenstr(q)); 20720Sstevel@tonic-gate ASSERT(MUTEX_HELD(QLOCK(q))); 20730Sstevel@tonic-gate } else if (MUTEX_HELD(QLOCK(q))) { 20740Sstevel@tonic-gate /* Don't drop lock on exit */ 20750Sstevel@tonic-gate freezer = curthread; 20760Sstevel@tonic-gate } else 20770Sstevel@tonic-gate mutex_enter(QLOCK(q)); 20780Sstevel@tonic-gate 20790Sstevel@tonic-gate ASSERT(mp->b_band <= q->q_nband); 20800Sstevel@tonic-gate if (mp->b_band != 0) { /* Adjust band pointers */ 20810Sstevel@tonic-gate ASSERT(q->q_bandp != NULL); 20820Sstevel@tonic-gate qbp = q->q_bandp; 20830Sstevel@tonic-gate i = mp->b_band; 20840Sstevel@tonic-gate while (--i > 0) 20850Sstevel@tonic-gate qbp = qbp->qb_next; 20860Sstevel@tonic-gate if (mp == qbp->qb_first) { 20870Sstevel@tonic-gate if (mp->b_next && mp->b_band == mp->b_next->b_band) 20880Sstevel@tonic-gate qbp->qb_first = mp->b_next; 20890Sstevel@tonic-gate else 20900Sstevel@tonic-gate qbp->qb_first = NULL; 20910Sstevel@tonic-gate } 20920Sstevel@tonic-gate if (mp == qbp->qb_last) { 20930Sstevel@tonic-gate if (mp->b_prev && mp->b_band == mp->b_prev->b_band) 20940Sstevel@tonic-gate qbp->qb_last = mp->b_prev; 20950Sstevel@tonic-gate else 20960Sstevel@tonic-gate qbp->qb_last = NULL; 20970Sstevel@tonic-gate } 20980Sstevel@tonic-gate } 20990Sstevel@tonic-gate 21000Sstevel@tonic-gate /* 21010Sstevel@tonic-gate * Remove the message from the list. 21020Sstevel@tonic-gate */ 21030Sstevel@tonic-gate if (mp->b_prev) 21040Sstevel@tonic-gate mp->b_prev->b_next = mp->b_next; 21050Sstevel@tonic-gate else 21060Sstevel@tonic-gate q->q_first = mp->b_next; 21070Sstevel@tonic-gate if (mp->b_next) 21080Sstevel@tonic-gate mp->b_next->b_prev = mp->b_prev; 21090Sstevel@tonic-gate else 21100Sstevel@tonic-gate q->q_last = mp->b_prev; 21110Sstevel@tonic-gate mp->b_next = NULL; 21120Sstevel@tonic-gate mp->b_prev = NULL; 21130Sstevel@tonic-gate 21140Sstevel@tonic-gate /* Get the size of the message for q_count accounting */ 21156769Sja97890 bytecnt = mp_cont_len(mp, &mblkcnt); 21160Sstevel@tonic-gate 21170Sstevel@tonic-gate if (mp->b_band == 0) { /* Perform q_count accounting */ 21180Sstevel@tonic-gate q->q_count -= bytecnt; 21190Sstevel@tonic-gate q->q_mblkcnt -= mblkcnt; 21205360Srk129064 if (q->q_mblkcnt == 0 || ((q->q_count < q->q_hiwat) && 21215360Srk129064 (q->q_mblkcnt < q->q_hiwat))) { 21220Sstevel@tonic-gate q->q_flag &= ~QFULL; 21230Sstevel@tonic-gate } 21240Sstevel@tonic-gate } else { /* Perform qb_count accounting */ 21250Sstevel@tonic-gate qbp->qb_count -= bytecnt; 21260Sstevel@tonic-gate qbp->qb_mblkcnt -= mblkcnt; 21275360Srk129064 if (qbp->qb_mblkcnt == 0 || ((qbp->qb_count < qbp->qb_hiwat) && 21285360Srk129064 (qbp->qb_mblkcnt < qbp->qb_hiwat))) { 21290Sstevel@tonic-gate qbp->qb_flag &= ~QB_FULL; 21300Sstevel@tonic-gate } 21310Sstevel@tonic-gate } 21320Sstevel@tonic-gate if (freezer != curthread) 21330Sstevel@tonic-gate mutex_exit(QLOCK(q)); 21340Sstevel@tonic-gate 21350Sstevel@tonic-gate STR_FTEVENT_MSG(mp, q, FTEV_RMVQ, NULL); 21360Sstevel@tonic-gate } 21370Sstevel@tonic-gate 21380Sstevel@tonic-gate /* 21390Sstevel@tonic-gate * Empty a queue. 21400Sstevel@tonic-gate * If flag is set, remove all messages. Otherwise, remove 21410Sstevel@tonic-gate * only non-control messages. If queue falls below its low 21420Sstevel@tonic-gate * water mark, and QWANTW is set, enable the nearest upstream 21430Sstevel@tonic-gate * service procedure. 21440Sstevel@tonic-gate * 21450Sstevel@tonic-gate * Historical note: when merging the M_FLUSH code in strrput with this 21460Sstevel@tonic-gate * code one difference was discovered. flushq did not have a check 21470Sstevel@tonic-gate * for q_lowat == 0 in the backenabling test. 21480Sstevel@tonic-gate * 21490Sstevel@tonic-gate * pcproto_flag specifies whether or not a M_PCPROTO message should be flushed 21500Sstevel@tonic-gate * if one exists on the queue. 21510Sstevel@tonic-gate */ 21520Sstevel@tonic-gate void 21530Sstevel@tonic-gate flushq_common(queue_t *q, int flag, int pcproto_flag) 21540Sstevel@tonic-gate { 21550Sstevel@tonic-gate mblk_t *mp, *nmp; 21560Sstevel@tonic-gate qband_t *qbp; 21570Sstevel@tonic-gate int backenab = 0; 21580Sstevel@tonic-gate unsigned char bpri; 21590Sstevel@tonic-gate unsigned char qbf[NBAND]; /* band flushing backenable flags */ 21600Sstevel@tonic-gate 21610Sstevel@tonic-gate if (q->q_first == NULL) 21620Sstevel@tonic-gate return; 21630Sstevel@tonic-gate 21640Sstevel@tonic-gate mutex_enter(QLOCK(q)); 21650Sstevel@tonic-gate mp = q->q_first; 21660Sstevel@tonic-gate q->q_first = NULL; 21670Sstevel@tonic-gate q->q_last = NULL; 21680Sstevel@tonic-gate q->q_count = 0; 21690Sstevel@tonic-gate q->q_mblkcnt = 0; 21700Sstevel@tonic-gate for (qbp = q->q_bandp; qbp; qbp = qbp->qb_next) { 21710Sstevel@tonic-gate qbp->qb_first = NULL; 21720Sstevel@tonic-gate qbp->qb_last = NULL; 21730Sstevel@tonic-gate qbp->qb_count = 0; 21740Sstevel@tonic-gate qbp->qb_mblkcnt = 0; 21750Sstevel@tonic-gate qbp->qb_flag &= ~QB_FULL; 21760Sstevel@tonic-gate } 21770Sstevel@tonic-gate q->q_flag &= ~QFULL; 21780Sstevel@tonic-gate mutex_exit(QLOCK(q)); 21790Sstevel@tonic-gate while (mp) { 21800Sstevel@tonic-gate nmp = mp->b_next; 21810Sstevel@tonic-gate mp->b_next = mp->b_prev = NULL; 21820Sstevel@tonic-gate 21830Sstevel@tonic-gate STR_FTEVENT_MBLK(mp, q, FTEV_FLUSHQ, NULL); 21840Sstevel@tonic-gate 21850Sstevel@tonic-gate if (pcproto_flag && (mp->b_datap->db_type == M_PCPROTO)) 21860Sstevel@tonic-gate (void) putq(q, mp); 21870Sstevel@tonic-gate else if (flag || datamsg(mp->b_datap->db_type)) 21880Sstevel@tonic-gate freemsg(mp); 21890Sstevel@tonic-gate else 21900Sstevel@tonic-gate (void) putq(q, mp); 21910Sstevel@tonic-gate mp = nmp; 21920Sstevel@tonic-gate } 21930Sstevel@tonic-gate bpri = 1; 21940Sstevel@tonic-gate mutex_enter(QLOCK(q)); 21950Sstevel@tonic-gate for (qbp = q->q_bandp; qbp; qbp = qbp->qb_next) { 21960Sstevel@tonic-gate if ((qbp->qb_flag & QB_WANTW) && 21970Sstevel@tonic-gate (((qbp->qb_count < qbp->qb_lowat) && 21980Sstevel@tonic-gate (qbp->qb_mblkcnt < qbp->qb_lowat)) || 21990Sstevel@tonic-gate qbp->qb_lowat == 0)) { 22000Sstevel@tonic-gate qbp->qb_flag &= ~QB_WANTW; 22010Sstevel@tonic-gate backenab = 1; 22020Sstevel@tonic-gate qbf[bpri] = 1; 22030Sstevel@tonic-gate } else 22040Sstevel@tonic-gate qbf[bpri] = 0; 22050Sstevel@tonic-gate bpri++; 22060Sstevel@tonic-gate } 22070Sstevel@tonic-gate ASSERT(bpri == (unsigned char)(q->q_nband + 1)); 22080Sstevel@tonic-gate if ((q->q_flag & QWANTW) && 22090Sstevel@tonic-gate (((q->q_count < q->q_lowat) && 22100Sstevel@tonic-gate (q->q_mblkcnt < q->q_lowat)) || q->q_lowat == 0)) { 22110Sstevel@tonic-gate q->q_flag &= ~QWANTW; 22120Sstevel@tonic-gate backenab = 1; 22130Sstevel@tonic-gate qbf[0] = 1; 22140Sstevel@tonic-gate } else 22150Sstevel@tonic-gate qbf[0] = 0; 22160Sstevel@tonic-gate 22170Sstevel@tonic-gate /* 22180Sstevel@tonic-gate * If any band can now be written to, and there is a writer 22190Sstevel@tonic-gate * for that band, then backenable the closest service procedure. 22200Sstevel@tonic-gate */ 22210Sstevel@tonic-gate if (backenab) { 22220Sstevel@tonic-gate mutex_exit(QLOCK(q)); 22230Sstevel@tonic-gate for (bpri = q->q_nband; bpri != 0; bpri--) 22240Sstevel@tonic-gate if (qbf[bpri]) 2225235Smicheng backenable(q, bpri); 22260Sstevel@tonic-gate if (qbf[0]) 22270Sstevel@tonic-gate backenable(q, 0); 22280Sstevel@tonic-gate } else 22290Sstevel@tonic-gate mutex_exit(QLOCK(q)); 22300Sstevel@tonic-gate } 22310Sstevel@tonic-gate 22320Sstevel@tonic-gate /* 22330Sstevel@tonic-gate * The real flushing takes place in flushq_common. This is done so that 22340Sstevel@tonic-gate * a flag which specifies whether or not M_PCPROTO messages should be flushed 22350Sstevel@tonic-gate * or not. Currently the only place that uses this flag is the stream head. 22360Sstevel@tonic-gate */ 22370Sstevel@tonic-gate void 22380Sstevel@tonic-gate flushq(queue_t *q, int flag) 22390Sstevel@tonic-gate { 22400Sstevel@tonic-gate flushq_common(q, flag, 0); 22410Sstevel@tonic-gate } 22420Sstevel@tonic-gate 22430Sstevel@tonic-gate /* 22440Sstevel@tonic-gate * Flush the queue of messages of the given priority band. 22450Sstevel@tonic-gate * There is some duplication of code between flushq and flushband. 22460Sstevel@tonic-gate * This is because we want to optimize the code as much as possible. 22470Sstevel@tonic-gate * The assumption is that there will be more messages in the normal 22480Sstevel@tonic-gate * (priority 0) band than in any other. 22490Sstevel@tonic-gate * 22500Sstevel@tonic-gate * Historical note: when merging the M_FLUSH code in strrput with this 22510Sstevel@tonic-gate * code one difference was discovered. flushband had an extra check for 22520Sstevel@tonic-gate * did not have a check for (mp->b_datap->db_type < QPCTL) in the band 0 22530Sstevel@tonic-gate * case. That check does not match the man page for flushband and was not 22540Sstevel@tonic-gate * in the strrput flush code hence it was removed. 22550Sstevel@tonic-gate */ 22560Sstevel@tonic-gate void 22570Sstevel@tonic-gate flushband(queue_t *q, unsigned char pri, int flag) 22580Sstevel@tonic-gate { 22590Sstevel@tonic-gate mblk_t *mp; 22600Sstevel@tonic-gate mblk_t *nmp; 22610Sstevel@tonic-gate mblk_t *last; 22620Sstevel@tonic-gate qband_t *qbp; 22630Sstevel@tonic-gate int band; 22640Sstevel@tonic-gate 22650Sstevel@tonic-gate ASSERT((flag == FLUSHDATA) || (flag == FLUSHALL)); 22660Sstevel@tonic-gate if (pri > q->q_nband) { 22670Sstevel@tonic-gate return; 22680Sstevel@tonic-gate } 22690Sstevel@tonic-gate mutex_enter(QLOCK(q)); 22700Sstevel@tonic-gate if (pri == 0) { 22710Sstevel@tonic-gate mp = q->q_first; 22720Sstevel@tonic-gate q->q_first = NULL; 22730Sstevel@tonic-gate q->q_last = NULL; 22740Sstevel@tonic-gate q->q_count = 0; 22750Sstevel@tonic-gate q->q_mblkcnt = 0; 22760Sstevel@tonic-gate for (qbp = q->q_bandp; qbp; qbp = qbp->qb_next) { 22770Sstevel@tonic-gate qbp->qb_first = NULL; 22780Sstevel@tonic-gate qbp->qb_last = NULL; 22790Sstevel@tonic-gate qbp->qb_count = 0; 22800Sstevel@tonic-gate qbp->qb_mblkcnt = 0; 22810Sstevel@tonic-gate qbp->qb_flag &= ~QB_FULL; 22820Sstevel@tonic-gate } 22830Sstevel@tonic-gate q->q_flag &= ~QFULL; 22840Sstevel@tonic-gate mutex_exit(QLOCK(q)); 22850Sstevel@tonic-gate while (mp) { 22860Sstevel@tonic-gate nmp = mp->b_next; 22870Sstevel@tonic-gate mp->b_next = mp->b_prev = NULL; 22880Sstevel@tonic-gate if ((mp->b_band == 0) && 22896707Sbrutus ((flag == FLUSHALL) || 22906707Sbrutus datamsg(mp->b_datap->db_type))) 22910Sstevel@tonic-gate freemsg(mp); 22920Sstevel@tonic-gate else 22930Sstevel@tonic-gate (void) putq(q, mp); 22940Sstevel@tonic-gate mp = nmp; 22950Sstevel@tonic-gate } 22960Sstevel@tonic-gate mutex_enter(QLOCK(q)); 22970Sstevel@tonic-gate if ((q->q_flag & QWANTW) && 22980Sstevel@tonic-gate (((q->q_count < q->q_lowat) && 22990Sstevel@tonic-gate (q->q_mblkcnt < q->q_lowat)) || q->q_lowat == 0)) { 23000Sstevel@tonic-gate q->q_flag &= ~QWANTW; 23010Sstevel@tonic-gate mutex_exit(QLOCK(q)); 23020Sstevel@tonic-gate 2303235Smicheng backenable(q, pri); 23040Sstevel@tonic-gate } else 23050Sstevel@tonic-gate mutex_exit(QLOCK(q)); 23060Sstevel@tonic-gate } else { /* pri != 0 */ 23070Sstevel@tonic-gate boolean_t flushed = B_FALSE; 23080Sstevel@tonic-gate band = pri; 23090Sstevel@tonic-gate 23100Sstevel@tonic-gate ASSERT(MUTEX_HELD(QLOCK(q))); 23110Sstevel@tonic-gate qbp = q->q_bandp; 23120Sstevel@tonic-gate while (--band > 0) 23130Sstevel@tonic-gate qbp = qbp->qb_next; 23140Sstevel@tonic-gate mp = qbp->qb_first; 23150Sstevel@tonic-gate if (mp == NULL) { 23160Sstevel@tonic-gate mutex_exit(QLOCK(q)); 23170Sstevel@tonic-gate return; 23180Sstevel@tonic-gate } 23190Sstevel@tonic-gate last = qbp->qb_last->b_next; 23200Sstevel@tonic-gate /* 23210Sstevel@tonic-gate * rmvq_noenab() and freemsg() are called for each mblk that 23220Sstevel@tonic-gate * meets the criteria. The loop is executed until the last 23230Sstevel@tonic-gate * mblk has been processed. 23240Sstevel@tonic-gate */ 23250Sstevel@tonic-gate while (mp != last) { 23260Sstevel@tonic-gate ASSERT(mp->b_band == pri); 23270Sstevel@tonic-gate nmp = mp->b_next; 23280Sstevel@tonic-gate if (flag == FLUSHALL || datamsg(mp->b_datap->db_type)) { 23290Sstevel@tonic-gate rmvq_noenab(q, mp); 23300Sstevel@tonic-gate freemsg(mp); 23310Sstevel@tonic-gate flushed = B_TRUE; 23320Sstevel@tonic-gate } 23330Sstevel@tonic-gate mp = nmp; 23340Sstevel@tonic-gate } 23350Sstevel@tonic-gate mutex_exit(QLOCK(q)); 23360Sstevel@tonic-gate 23370Sstevel@tonic-gate /* 23380Sstevel@tonic-gate * If any mblk(s) has been freed, we know that qbackenable() 23390Sstevel@tonic-gate * will need to be called. 23400Sstevel@tonic-gate */ 23410Sstevel@tonic-gate if (flushed) 2342235Smicheng qbackenable(q, pri); 23430Sstevel@tonic-gate } 23440Sstevel@tonic-gate } 23450Sstevel@tonic-gate 23460Sstevel@tonic-gate /* 23470Sstevel@tonic-gate * Return 1 if the queue is not full. If the queue is full, return 23480Sstevel@tonic-gate * 0 (may not put message) and set QWANTW flag (caller wants to write 23490Sstevel@tonic-gate * to the queue). 23500Sstevel@tonic-gate */ 23510Sstevel@tonic-gate int 23520Sstevel@tonic-gate canput(queue_t *q) 23530Sstevel@tonic-gate { 23540Sstevel@tonic-gate TRACE_1(TR_FAC_STREAMS_FR, TR_CANPUT_IN, "canput:%p", q); 23550Sstevel@tonic-gate 23560Sstevel@tonic-gate /* this is for loopback transports, they should not do a canput */ 23570Sstevel@tonic-gate ASSERT(STRMATED(q->q_stream) || STREAM(q) == STREAM(q->q_nfsrv)); 23580Sstevel@tonic-gate 23590Sstevel@tonic-gate /* Find next forward module that has a service procedure */ 23600Sstevel@tonic-gate q = q->q_nfsrv; 23610Sstevel@tonic-gate 23620Sstevel@tonic-gate if (!(q->q_flag & QFULL)) { 23630Sstevel@tonic-gate TRACE_2(TR_FAC_STREAMS_FR, TR_CANPUT_OUT, "canput:%p %d", q, 1); 23640Sstevel@tonic-gate return (1); 23650Sstevel@tonic-gate } 23660Sstevel@tonic-gate mutex_enter(QLOCK(q)); 23670Sstevel@tonic-gate if (q->q_flag & QFULL) { 23680Sstevel@tonic-gate q->q_flag |= QWANTW; 23690Sstevel@tonic-gate mutex_exit(QLOCK(q)); 23700Sstevel@tonic-gate TRACE_2(TR_FAC_STREAMS_FR, TR_CANPUT_OUT, "canput:%p %d", q, 0); 23710Sstevel@tonic-gate return (0); 23720Sstevel@tonic-gate } 23730Sstevel@tonic-gate mutex_exit(QLOCK(q)); 23740Sstevel@tonic-gate TRACE_2(TR_FAC_STREAMS_FR, TR_CANPUT_OUT, "canput:%p %d", q, 1); 23750Sstevel@tonic-gate return (1); 23760Sstevel@tonic-gate } 23770Sstevel@tonic-gate 23780Sstevel@tonic-gate /* 23790Sstevel@tonic-gate * This is the new canput for use with priority bands. Return 1 if the 23800Sstevel@tonic-gate * band is not full. If the band is full, return 0 (may not put message) 23810Sstevel@tonic-gate * and set QWANTW(QB_WANTW) flag for zero(non-zero) band (caller wants to 23820Sstevel@tonic-gate * write to the queue). 23830Sstevel@tonic-gate */ 23840Sstevel@tonic-gate int 23850Sstevel@tonic-gate bcanput(queue_t *q, unsigned char pri) 23860Sstevel@tonic-gate { 23870Sstevel@tonic-gate qband_t *qbp; 23880Sstevel@tonic-gate 23890Sstevel@tonic-gate TRACE_2(TR_FAC_STREAMS_FR, TR_BCANPUT_IN, "bcanput:%p %p", q, pri); 23900Sstevel@tonic-gate if (!q) 23910Sstevel@tonic-gate return (0); 23920Sstevel@tonic-gate 23930Sstevel@tonic-gate /* Find next forward module that has a service procedure */ 23940Sstevel@tonic-gate q = q->q_nfsrv; 23950Sstevel@tonic-gate 23960Sstevel@tonic-gate mutex_enter(QLOCK(q)); 23970Sstevel@tonic-gate if (pri == 0) { 23980Sstevel@tonic-gate if (q->q_flag & QFULL) { 23990Sstevel@tonic-gate q->q_flag |= QWANTW; 24000Sstevel@tonic-gate mutex_exit(QLOCK(q)); 24010Sstevel@tonic-gate TRACE_3(TR_FAC_STREAMS_FR, TR_BCANPUT_OUT, 24026707Sbrutus "bcanput:%p %X %d", q, pri, 0); 24030Sstevel@tonic-gate return (0); 24040Sstevel@tonic-gate } 24050Sstevel@tonic-gate } else { /* pri != 0 */ 24060Sstevel@tonic-gate if (pri > q->q_nband) { 24070Sstevel@tonic-gate /* 24080Sstevel@tonic-gate * No band exists yet, so return success. 24090Sstevel@tonic-gate */ 24100Sstevel@tonic-gate mutex_exit(QLOCK(q)); 24110Sstevel@tonic-gate TRACE_3(TR_FAC_STREAMS_FR, TR_BCANPUT_OUT, 24126707Sbrutus "bcanput:%p %X %d", q, pri, 1); 24130Sstevel@tonic-gate return (1); 24140Sstevel@tonic-gate } 24150Sstevel@tonic-gate qbp = q->q_bandp; 24160Sstevel@tonic-gate while (--pri) 24170Sstevel@tonic-gate qbp = qbp->qb_next; 24180Sstevel@tonic-gate if (qbp->qb_flag & QB_FULL) { 24190Sstevel@tonic-gate qbp->qb_flag |= QB_WANTW; 24200Sstevel@tonic-gate mutex_exit(QLOCK(q)); 24210Sstevel@tonic-gate TRACE_3(TR_FAC_STREAMS_FR, TR_BCANPUT_OUT, 24226707Sbrutus "bcanput:%p %X %d", q, pri, 0); 24230Sstevel@tonic-gate return (0); 24240Sstevel@tonic-gate } 24250Sstevel@tonic-gate } 24260Sstevel@tonic-gate mutex_exit(QLOCK(q)); 24270Sstevel@tonic-gate TRACE_3(TR_FAC_STREAMS_FR, TR_BCANPUT_OUT, 24286707Sbrutus "bcanput:%p %X %d", q, pri, 1); 24290Sstevel@tonic-gate return (1); 24300Sstevel@tonic-gate } 24310Sstevel@tonic-gate 24320Sstevel@tonic-gate /* 24330Sstevel@tonic-gate * Put a message on a queue. 24340Sstevel@tonic-gate * 24350Sstevel@tonic-gate * Messages are enqueued on a priority basis. The priority classes 24360Sstevel@tonic-gate * are HIGH PRIORITY (type >= QPCTL), PRIORITY (type < QPCTL && band > 0), 24370Sstevel@tonic-gate * and B_NORMAL (type < QPCTL && band == 0). 24380Sstevel@tonic-gate * 24390Sstevel@tonic-gate * Add appropriate weighted data block sizes to queue count. 24400Sstevel@tonic-gate * If queue hits high water mark then set QFULL flag. 24410Sstevel@tonic-gate * 24420Sstevel@tonic-gate * If QNOENAB is not set (putq is allowed to enable the queue), 24430Sstevel@tonic-gate * enable the queue only if the message is PRIORITY, 24440Sstevel@tonic-gate * or the QWANTR flag is set (indicating that the service procedure 24450Sstevel@tonic-gate * is ready to read the queue. This implies that a service 24460Sstevel@tonic-gate * procedure must NEVER put a high priority message back on its own 24470Sstevel@tonic-gate * queue, as this would result in an infinite loop (!). 24480Sstevel@tonic-gate */ 24490Sstevel@tonic-gate int 24500Sstevel@tonic-gate putq(queue_t *q, mblk_t *bp) 24510Sstevel@tonic-gate { 24520Sstevel@tonic-gate mblk_t *tmp; 24530Sstevel@tonic-gate qband_t *qbp = NULL; 24540Sstevel@tonic-gate int mcls = (int)queclass(bp); 24550Sstevel@tonic-gate kthread_id_t freezer; 24560Sstevel@tonic-gate int bytecnt = 0, mblkcnt = 0; 24570Sstevel@tonic-gate 24580Sstevel@tonic-gate freezer = STREAM(q)->sd_freezer; 24590Sstevel@tonic-gate if (freezer == curthread) { 24600Sstevel@tonic-gate ASSERT(frozenstr(q)); 24610Sstevel@tonic-gate ASSERT(MUTEX_HELD(QLOCK(q))); 24620Sstevel@tonic-gate } else 24630Sstevel@tonic-gate mutex_enter(QLOCK(q)); 24640Sstevel@tonic-gate 24650Sstevel@tonic-gate /* 24660Sstevel@tonic-gate * Make sanity checks and if qband structure is not yet 24670Sstevel@tonic-gate * allocated, do so. 24680Sstevel@tonic-gate */ 24690Sstevel@tonic-gate if (mcls == QPCTL) { 24700Sstevel@tonic-gate if (bp->b_band != 0) 24710Sstevel@tonic-gate bp->b_band = 0; /* force to be correct */ 24720Sstevel@tonic-gate } else if (bp->b_band != 0) { 24730Sstevel@tonic-gate int i; 24740Sstevel@tonic-gate qband_t **qbpp; 24750Sstevel@tonic-gate 24760Sstevel@tonic-gate if (bp->b_band > q->q_nband) { 24770Sstevel@tonic-gate 24780Sstevel@tonic-gate /* 24790Sstevel@tonic-gate * The qband structure for this priority band is 24800Sstevel@tonic-gate * not on the queue yet, so we have to allocate 24810Sstevel@tonic-gate * one on the fly. It would be wasteful to 24820Sstevel@tonic-gate * associate the qband structures with every 24830Sstevel@tonic-gate * queue when the queues are allocated. This is 24840Sstevel@tonic-gate * because most queues will only need the normal 24850Sstevel@tonic-gate * band of flow which can be described entirely 24860Sstevel@tonic-gate * by the queue itself. 24870Sstevel@tonic-gate */ 24880Sstevel@tonic-gate qbpp = &q->q_bandp; 24890Sstevel@tonic-gate while (*qbpp) 24900Sstevel@tonic-gate qbpp = &(*qbpp)->qb_next; 24910Sstevel@tonic-gate while (bp->b_band > q->q_nband) { 24920Sstevel@tonic-gate if ((*qbpp = allocband()) == NULL) { 24930Sstevel@tonic-gate if (freezer != curthread) 24940Sstevel@tonic-gate mutex_exit(QLOCK(q)); 24950Sstevel@tonic-gate return (0); 24960Sstevel@tonic-gate } 24970Sstevel@tonic-gate (*qbpp)->qb_hiwat = q->q_hiwat; 24980Sstevel@tonic-gate (*qbpp)->qb_lowat = q->q_lowat; 24990Sstevel@tonic-gate q->q_nband++; 25000Sstevel@tonic-gate qbpp = &(*qbpp)->qb_next; 25010Sstevel@tonic-gate } 25020Sstevel@tonic-gate } 25030Sstevel@tonic-gate ASSERT(MUTEX_HELD(QLOCK(q))); 25040Sstevel@tonic-gate qbp = q->q_bandp; 25050Sstevel@tonic-gate i = bp->b_band; 25060Sstevel@tonic-gate while (--i) 25070Sstevel@tonic-gate qbp = qbp->qb_next; 25080Sstevel@tonic-gate } 25090Sstevel@tonic-gate 25100Sstevel@tonic-gate /* 25110Sstevel@tonic-gate * If queue is empty, add the message and initialize the pointers. 25120Sstevel@tonic-gate * Otherwise, adjust message pointers and queue pointers based on 25130Sstevel@tonic-gate * the type of the message and where it belongs on the queue. Some 25140Sstevel@tonic-gate * code is duplicated to minimize the number of conditionals and 25150Sstevel@tonic-gate * hopefully minimize the amount of time this routine takes. 25160Sstevel@tonic-gate */ 25170Sstevel@tonic-gate if (!q->q_first) { 25180Sstevel@tonic-gate bp->b_next = NULL; 25190Sstevel@tonic-gate bp->b_prev = NULL; 25200Sstevel@tonic-gate q->q_first = bp; 25210Sstevel@tonic-gate q->q_last = bp; 25220Sstevel@tonic-gate if (qbp) { 25230Sstevel@tonic-gate qbp->qb_first = bp; 25240Sstevel@tonic-gate qbp->qb_last = bp; 25250Sstevel@tonic-gate } 25260Sstevel@tonic-gate } else if (!qbp) { /* bp->b_band == 0 */ 25270Sstevel@tonic-gate 25280Sstevel@tonic-gate /* 25290Sstevel@tonic-gate * If queue class of message is less than or equal to 25300Sstevel@tonic-gate * that of the last one on the queue, tack on to the end. 25310Sstevel@tonic-gate */ 25320Sstevel@tonic-gate tmp = q->q_last; 25330Sstevel@tonic-gate if (mcls <= (int)queclass(tmp)) { 25340Sstevel@tonic-gate bp->b_next = NULL; 25350Sstevel@tonic-gate bp->b_prev = tmp; 25360Sstevel@tonic-gate tmp->b_next = bp; 25370Sstevel@tonic-gate q->q_last = bp; 25380Sstevel@tonic-gate } else { 25390Sstevel@tonic-gate tmp = q->q_first; 25400Sstevel@tonic-gate while ((int)queclass(tmp) >= mcls) 25410Sstevel@tonic-gate tmp = tmp->b_next; 25420Sstevel@tonic-gate 25430Sstevel@tonic-gate /* 25440Sstevel@tonic-gate * Insert bp before tmp. 25450Sstevel@tonic-gate */ 25460Sstevel@tonic-gate bp->b_next = tmp; 25470Sstevel@tonic-gate bp->b_prev = tmp->b_prev; 25480Sstevel@tonic-gate if (tmp->b_prev) 25490Sstevel@tonic-gate tmp->b_prev->b_next = bp; 25500Sstevel@tonic-gate else 25510Sstevel@tonic-gate q->q_first = bp; 25520Sstevel@tonic-gate tmp->b_prev = bp; 25530Sstevel@tonic-gate } 25540Sstevel@tonic-gate } else { /* bp->b_band != 0 */ 25550Sstevel@tonic-gate if (qbp->qb_first) { 25560Sstevel@tonic-gate tmp = qbp->qb_last; 25570Sstevel@tonic-gate 25580Sstevel@tonic-gate /* 25590Sstevel@tonic-gate * Insert bp after the last message in this band. 25600Sstevel@tonic-gate */ 25610Sstevel@tonic-gate bp->b_next = tmp->b_next; 25620Sstevel@tonic-gate if (tmp->b_next) 25630Sstevel@tonic-gate tmp->b_next->b_prev = bp; 25640Sstevel@tonic-gate else 25650Sstevel@tonic-gate q->q_last = bp; 25660Sstevel@tonic-gate bp->b_prev = tmp; 25670Sstevel@tonic-gate tmp->b_next = bp; 25680Sstevel@tonic-gate } else { 25690Sstevel@tonic-gate tmp = q->q_last; 25700Sstevel@tonic-gate if ((mcls < (int)queclass(tmp)) || 25710Sstevel@tonic-gate (bp->b_band <= tmp->b_band)) { 25720Sstevel@tonic-gate 25730Sstevel@tonic-gate /* 25740Sstevel@tonic-gate * Tack bp on end of queue. 25750Sstevel@tonic-gate */ 25760Sstevel@tonic-gate bp->b_next = NULL; 25770Sstevel@tonic-gate bp->b_prev = tmp; 25780Sstevel@tonic-gate tmp->b_next = bp; 25790Sstevel@tonic-gate q->q_last = bp; 25800Sstevel@tonic-gate } else { 25810Sstevel@tonic-gate tmp = q->q_first; 25820Sstevel@tonic-gate while (tmp->b_datap->db_type >= QPCTL) 25830Sstevel@tonic-gate tmp = tmp->b_next; 25840Sstevel@tonic-gate while (tmp->b_band >= bp->b_band) 25850Sstevel@tonic-gate tmp = tmp->b_next; 25860Sstevel@tonic-gate 25870Sstevel@tonic-gate /* 25880Sstevel@tonic-gate * Insert bp before tmp. 25890Sstevel@tonic-gate */ 25900Sstevel@tonic-gate bp->b_next = tmp; 25910Sstevel@tonic-gate bp->b_prev = tmp->b_prev; 25920Sstevel@tonic-gate if (tmp->b_prev) 25930Sstevel@tonic-gate tmp->b_prev->b_next = bp; 25940Sstevel@tonic-gate else 25950Sstevel@tonic-gate q->q_first = bp; 25960Sstevel@tonic-gate tmp->b_prev = bp; 25970Sstevel@tonic-gate } 25980Sstevel@tonic-gate qbp->qb_first = bp; 25990Sstevel@tonic-gate } 26000Sstevel@tonic-gate qbp->qb_last = bp; 26010Sstevel@tonic-gate } 26020Sstevel@tonic-gate 26030Sstevel@tonic-gate /* Get message byte count for q_count accounting */ 26046769Sja97890 bytecnt = mp_cont_len(bp, &mblkcnt); 2605741Smasputra 26060Sstevel@tonic-gate if (qbp) { 26070Sstevel@tonic-gate qbp->qb_count += bytecnt; 26080Sstevel@tonic-gate qbp->qb_mblkcnt += mblkcnt; 26090Sstevel@tonic-gate if ((qbp->qb_count >= qbp->qb_hiwat) || 26100Sstevel@tonic-gate (qbp->qb_mblkcnt >= qbp->qb_hiwat)) { 26110Sstevel@tonic-gate qbp->qb_flag |= QB_FULL; 26120Sstevel@tonic-gate } 26130Sstevel@tonic-gate } else { 26140Sstevel@tonic-gate q->q_count += bytecnt; 26150Sstevel@tonic-gate q->q_mblkcnt += mblkcnt; 26160Sstevel@tonic-gate if ((q->q_count >= q->q_hiwat) || 26170Sstevel@tonic-gate (q->q_mblkcnt >= q->q_hiwat)) { 26180Sstevel@tonic-gate q->q_flag |= QFULL; 26190Sstevel@tonic-gate } 26200Sstevel@tonic-gate } 26210Sstevel@tonic-gate 26220Sstevel@tonic-gate STR_FTEVENT_MSG(bp, q, FTEV_PUTQ, NULL); 26230Sstevel@tonic-gate 26240Sstevel@tonic-gate if ((mcls > QNORM) || 26250Sstevel@tonic-gate (canenable(q) && (q->q_flag & QWANTR || bp->b_band))) 26260Sstevel@tonic-gate qenable_locked(q); 26270Sstevel@tonic-gate ASSERT(MUTEX_HELD(QLOCK(q))); 26280Sstevel@tonic-gate if (freezer != curthread) 26290Sstevel@tonic-gate mutex_exit(QLOCK(q)); 26300Sstevel@tonic-gate 26310Sstevel@tonic-gate return (1); 26320Sstevel@tonic-gate } 26330Sstevel@tonic-gate 26340Sstevel@tonic-gate /* 26350Sstevel@tonic-gate * Put stuff back at beginning of Q according to priority order. 26360Sstevel@tonic-gate * See comment on putq above for details. 26370Sstevel@tonic-gate */ 26380Sstevel@tonic-gate int 26390Sstevel@tonic-gate putbq(queue_t *q, mblk_t *bp) 26400Sstevel@tonic-gate { 26410Sstevel@tonic-gate mblk_t *tmp; 26420Sstevel@tonic-gate qband_t *qbp = NULL; 26430Sstevel@tonic-gate int mcls = (int)queclass(bp); 26440Sstevel@tonic-gate kthread_id_t freezer; 26450Sstevel@tonic-gate int bytecnt = 0, mblkcnt = 0; 26460Sstevel@tonic-gate 26470Sstevel@tonic-gate ASSERT(q && bp); 26480Sstevel@tonic-gate ASSERT(bp->b_next == NULL); 26490Sstevel@tonic-gate freezer = STREAM(q)->sd_freezer; 26500Sstevel@tonic-gate if (freezer == curthread) { 26510Sstevel@tonic-gate ASSERT(frozenstr(q)); 26520Sstevel@tonic-gate ASSERT(MUTEX_HELD(QLOCK(q))); 26530Sstevel@tonic-gate } else 26540Sstevel@tonic-gate mutex_enter(QLOCK(q)); 26550Sstevel@tonic-gate 26560Sstevel@tonic-gate /* 26570Sstevel@tonic-gate * Make sanity checks and if qband structure is not yet 26580Sstevel@tonic-gate * allocated, do so. 26590Sstevel@tonic-gate */ 26600Sstevel@tonic-gate if (mcls == QPCTL) { 26610Sstevel@tonic-gate if (bp->b_band != 0) 26620Sstevel@tonic-gate bp->b_band = 0; /* force to be correct */ 26630Sstevel@tonic-gate } else if (bp->b_band != 0) { 26640Sstevel@tonic-gate int i; 26650Sstevel@tonic-gate qband_t **qbpp; 26660Sstevel@tonic-gate 26670Sstevel@tonic-gate if (bp->b_band > q->q_nband) { 26680Sstevel@tonic-gate qbpp = &q->q_bandp; 26690Sstevel@tonic-gate while (*qbpp) 26700Sstevel@tonic-gate qbpp = &(*qbpp)->qb_next; 26710Sstevel@tonic-gate while (bp->b_band > q->q_nband) { 26720Sstevel@tonic-gate if ((*qbpp = allocband()) == NULL) { 26730Sstevel@tonic-gate if (freezer != curthread) 26740Sstevel@tonic-gate mutex_exit(QLOCK(q)); 26750Sstevel@tonic-gate return (0); 26760Sstevel@tonic-gate } 26770Sstevel@tonic-gate (*qbpp)->qb_hiwat = q->q_hiwat; 26780Sstevel@tonic-gate (*qbpp)->qb_lowat = q->q_lowat; 26790Sstevel@tonic-gate q->q_nband++; 26800Sstevel@tonic-gate qbpp = &(*qbpp)->qb_next; 26810Sstevel@tonic-gate } 26820Sstevel@tonic-gate } 26830Sstevel@tonic-gate qbp = q->q_bandp; 26840Sstevel@tonic-gate i = bp->b_band; 26850Sstevel@tonic-gate while (--i) 26860Sstevel@tonic-gate qbp = qbp->qb_next; 26870Sstevel@tonic-gate } 26880Sstevel@tonic-gate 26890Sstevel@tonic-gate /* 26900Sstevel@tonic-gate * If queue is empty or if message is high priority, 26910Sstevel@tonic-gate * place on the front of the queue. 26920Sstevel@tonic-gate */ 26930Sstevel@tonic-gate tmp = q->q_first; 26940Sstevel@tonic-gate if ((!tmp) || (mcls == QPCTL)) { 26950Sstevel@tonic-gate bp->b_next = tmp; 26960Sstevel@tonic-gate if (tmp) 26970Sstevel@tonic-gate tmp->b_prev = bp; 26980Sstevel@tonic-gate else 26990Sstevel@tonic-gate q->q_last = bp; 27000Sstevel@tonic-gate q->q_first = bp; 27010Sstevel@tonic-gate bp->b_prev = NULL; 27020Sstevel@tonic-gate if (qbp) { 27030Sstevel@tonic-gate qbp->qb_first = bp; 27040Sstevel@tonic-gate qbp->qb_last = bp; 27050Sstevel@tonic-gate } 27060Sstevel@tonic-gate } else if (qbp) { /* bp->b_band != 0 */ 27070Sstevel@tonic-gate tmp = qbp->qb_first; 27080Sstevel@tonic-gate if (tmp) { 27090Sstevel@tonic-gate 27100Sstevel@tonic-gate /* 27110Sstevel@tonic-gate * Insert bp before the first message in this band. 27120Sstevel@tonic-gate */ 27130Sstevel@tonic-gate bp->b_next = tmp; 27140Sstevel@tonic-gate bp->b_prev = tmp->b_prev; 27150Sstevel@tonic-gate if (tmp->b_prev) 27160Sstevel@tonic-gate tmp->b_prev->b_next = bp; 27170Sstevel@tonic-gate else 27180Sstevel@tonic-gate q->q_first = bp; 27190Sstevel@tonic-gate tmp->b_prev = bp; 27200Sstevel@tonic-gate } else { 27210Sstevel@tonic-gate tmp = q->q_last; 27220Sstevel@tonic-gate if ((mcls < (int)queclass(tmp)) || 27230Sstevel@tonic-gate (bp->b_band < tmp->b_band)) { 27240Sstevel@tonic-gate 27250Sstevel@tonic-gate /* 27260Sstevel@tonic-gate * Tack bp on end of queue. 27270Sstevel@tonic-gate */ 27280Sstevel@tonic-gate bp->b_next = NULL; 27290Sstevel@tonic-gate bp->b_prev = tmp; 27300Sstevel@tonic-gate tmp->b_next = bp; 27310Sstevel@tonic-gate q->q_last = bp; 27320Sstevel@tonic-gate } else { 27330Sstevel@tonic-gate tmp = q->q_first; 27340Sstevel@tonic-gate while (tmp->b_datap->db_type >= QPCTL) 27350Sstevel@tonic-gate tmp = tmp->b_next; 27360Sstevel@tonic-gate while (tmp->b_band > bp->b_band) 27370Sstevel@tonic-gate tmp = tmp->b_next; 27380Sstevel@tonic-gate 27390Sstevel@tonic-gate /* 27400Sstevel@tonic-gate * Insert bp before tmp. 27410Sstevel@tonic-gate */ 27420Sstevel@tonic-gate bp->b_next = tmp; 27430Sstevel@tonic-gate bp->b_prev = tmp->b_prev; 27440Sstevel@tonic-gate if (tmp->b_prev) 27450Sstevel@tonic-gate tmp->b_prev->b_next = bp; 27460Sstevel@tonic-gate else 27470Sstevel@tonic-gate q->q_first = bp; 27480Sstevel@tonic-gate tmp->b_prev = bp; 27490Sstevel@tonic-gate } 27500Sstevel@tonic-gate qbp->qb_last = bp; 27510Sstevel@tonic-gate } 27520Sstevel@tonic-gate qbp->qb_first = bp; 27530Sstevel@tonic-gate } else { /* bp->b_band == 0 && !QPCTL */ 27540Sstevel@tonic-gate 27550Sstevel@tonic-gate /* 27560Sstevel@tonic-gate * If the queue class or band is less than that of the last 27570Sstevel@tonic-gate * message on the queue, tack bp on the end of the queue. 27580Sstevel@tonic-gate */ 27590Sstevel@tonic-gate tmp = q->q_last; 27600Sstevel@tonic-gate if ((mcls < (int)queclass(tmp)) || (bp->b_band < tmp->b_band)) { 27610Sstevel@tonic-gate bp->b_next = NULL; 27620Sstevel@tonic-gate bp->b_prev = tmp; 27630Sstevel@tonic-gate tmp->b_next = bp; 27640Sstevel@tonic-gate q->q_last = bp; 27650Sstevel@tonic-gate } else { 27660Sstevel@tonic-gate tmp = q->q_first; 27670Sstevel@tonic-gate while (tmp->b_datap->db_type >= QPCTL) 27680Sstevel@tonic-gate tmp = tmp->b_next; 27690Sstevel@tonic-gate while (tmp->b_band > bp->b_band) 27700Sstevel@tonic-gate tmp = tmp->b_next; 27710Sstevel@tonic-gate 27720Sstevel@tonic-gate /* 27730Sstevel@tonic-gate * Insert bp before tmp. 27740Sstevel@tonic-gate */ 27750Sstevel@tonic-gate bp->b_next = tmp; 27760Sstevel@tonic-gate bp->b_prev = tmp->b_prev; 27770Sstevel@tonic-gate if (tmp->b_prev) 27780Sstevel@tonic-gate tmp->b_prev->b_next = bp; 27790Sstevel@tonic-gate else 27800Sstevel@tonic-gate q->q_first = bp; 27810Sstevel@tonic-gate tmp->b_prev = bp; 27820Sstevel@tonic-gate } 27830Sstevel@tonic-gate } 27840Sstevel@tonic-gate 27850Sstevel@tonic-gate /* Get message byte count for q_count accounting */ 27866769Sja97890 bytecnt = mp_cont_len(bp, &mblkcnt); 27876769Sja97890 27880Sstevel@tonic-gate if (qbp) { 27890Sstevel@tonic-gate qbp->qb_count += bytecnt; 27900Sstevel@tonic-gate qbp->qb_mblkcnt += mblkcnt; 27910Sstevel@tonic-gate if ((qbp->qb_count >= qbp->qb_hiwat) || 27920Sstevel@tonic-gate (qbp->qb_mblkcnt >= qbp->qb_hiwat)) { 27930Sstevel@tonic-gate qbp->qb_flag |= QB_FULL; 27940Sstevel@tonic-gate } 27950Sstevel@tonic-gate } else { 27960Sstevel@tonic-gate q->q_count += bytecnt; 27970Sstevel@tonic-gate q->q_mblkcnt += mblkcnt; 27980Sstevel@tonic-gate if ((q->q_count >= q->q_hiwat) || 27990Sstevel@tonic-gate (q->q_mblkcnt >= q->q_hiwat)) { 28000Sstevel@tonic-gate q->q_flag |= QFULL; 28010Sstevel@tonic-gate } 28020Sstevel@tonic-gate } 28030Sstevel@tonic-gate 28040Sstevel@tonic-gate STR_FTEVENT_MSG(bp, q, FTEV_PUTBQ, NULL); 28050Sstevel@tonic-gate 28060Sstevel@tonic-gate if ((mcls > QNORM) || (canenable(q) && (q->q_flag & QWANTR))) 28070Sstevel@tonic-gate qenable_locked(q); 28080Sstevel@tonic-gate ASSERT(MUTEX_HELD(QLOCK(q))); 28090Sstevel@tonic-gate if (freezer != curthread) 28100Sstevel@tonic-gate mutex_exit(QLOCK(q)); 28110Sstevel@tonic-gate 28120Sstevel@tonic-gate return (1); 28130Sstevel@tonic-gate } 28140Sstevel@tonic-gate 28150Sstevel@tonic-gate /* 28160Sstevel@tonic-gate * Insert a message before an existing message on the queue. If the 28170Sstevel@tonic-gate * existing message is NULL, the new messages is placed on the end of 28180Sstevel@tonic-gate * the queue. The queue class of the new message is ignored. However, 28190Sstevel@tonic-gate * the priority band of the new message must adhere to the following 28200Sstevel@tonic-gate * ordering: 28210Sstevel@tonic-gate * 28220Sstevel@tonic-gate * emp->b_prev->b_band >= mp->b_band >= emp->b_band. 28230Sstevel@tonic-gate * 28240Sstevel@tonic-gate * All flow control parameters are updated. 28250Sstevel@tonic-gate * 28260Sstevel@tonic-gate * insq can be called with the stream frozen, but other utility functions 28270Sstevel@tonic-gate * holding QLOCK, and by streams modules without any locks/frozen. 28280Sstevel@tonic-gate */ 28290Sstevel@tonic-gate int 28300Sstevel@tonic-gate insq(queue_t *q, mblk_t *emp, mblk_t *mp) 28310Sstevel@tonic-gate { 28320Sstevel@tonic-gate mblk_t *tmp; 28330Sstevel@tonic-gate qband_t *qbp = NULL; 28340Sstevel@tonic-gate int mcls = (int)queclass(mp); 28350Sstevel@tonic-gate kthread_id_t freezer; 28360Sstevel@tonic-gate int bytecnt = 0, mblkcnt = 0; 28370Sstevel@tonic-gate 28380Sstevel@tonic-gate freezer = STREAM(q)->sd_freezer; 28390Sstevel@tonic-gate if (freezer == curthread) { 28400Sstevel@tonic-gate ASSERT(frozenstr(q)); 28410Sstevel@tonic-gate ASSERT(MUTEX_HELD(QLOCK(q))); 28420Sstevel@tonic-gate } else if (MUTEX_HELD(QLOCK(q))) { 28430Sstevel@tonic-gate /* Don't drop lock on exit */ 28440Sstevel@tonic-gate freezer = curthread; 28450Sstevel@tonic-gate } else 28460Sstevel@tonic-gate mutex_enter(QLOCK(q)); 28470Sstevel@tonic-gate 28480Sstevel@tonic-gate if (mcls == QPCTL) { 28490Sstevel@tonic-gate if (mp->b_band != 0) 28500Sstevel@tonic-gate mp->b_band = 0; /* force to be correct */ 28510Sstevel@tonic-gate if (emp && emp->b_prev && 28520Sstevel@tonic-gate (emp->b_prev->b_datap->db_type < QPCTL)) 28530Sstevel@tonic-gate goto badord; 28540Sstevel@tonic-gate } 28550Sstevel@tonic-gate if (emp) { 28560Sstevel@tonic-gate if (((mcls == QNORM) && (mp->b_band < emp->b_band)) || 28570Sstevel@tonic-gate (emp->b_prev && (emp->b_prev->b_datap->db_type < QPCTL) && 28580Sstevel@tonic-gate (emp->b_prev->b_band < mp->b_band))) { 28590Sstevel@tonic-gate goto badord; 28600Sstevel@tonic-gate } 28610Sstevel@tonic-gate } else { 28620Sstevel@tonic-gate tmp = q->q_last; 28630Sstevel@tonic-gate if (tmp && (mcls == QNORM) && (mp->b_band > tmp->b_band)) { 28640Sstevel@tonic-gate badord: 28650Sstevel@tonic-gate cmn_err(CE_WARN, 28660Sstevel@tonic-gate "insq: attempt to insert message out of order " 28670Sstevel@tonic-gate "on q %p", (void *)q); 28680Sstevel@tonic-gate if (freezer != curthread) 28690Sstevel@tonic-gate mutex_exit(QLOCK(q)); 28700Sstevel@tonic-gate return (0); 28710Sstevel@tonic-gate } 28720Sstevel@tonic-gate } 28730Sstevel@tonic-gate 28740Sstevel@tonic-gate if (mp->b_band != 0) { 28750Sstevel@tonic-gate int i; 28760Sstevel@tonic-gate qband_t **qbpp; 28770Sstevel@tonic-gate 28780Sstevel@tonic-gate if (mp->b_band > q->q_nband) { 28790Sstevel@tonic-gate qbpp = &q->q_bandp; 28800Sstevel@tonic-gate while (*qbpp) 28810Sstevel@tonic-gate qbpp = &(*qbpp)->qb_next; 28820Sstevel@tonic-gate while (mp->b_band > q->q_nband) { 28830Sstevel@tonic-gate if ((*qbpp = allocband()) == NULL) { 28840Sstevel@tonic-gate if (freezer != curthread) 28850Sstevel@tonic-gate mutex_exit(QLOCK(q)); 28860Sstevel@tonic-gate return (0); 28870Sstevel@tonic-gate } 28880Sstevel@tonic-gate (*qbpp)->qb_hiwat = q->q_hiwat; 28890Sstevel@tonic-gate (*qbpp)->qb_lowat = q->q_lowat; 28900Sstevel@tonic-gate q->q_nband++; 28910Sstevel@tonic-gate qbpp = &(*qbpp)->qb_next; 28920Sstevel@tonic-gate } 28930Sstevel@tonic-gate } 28940Sstevel@tonic-gate qbp = q->q_bandp; 28950Sstevel@tonic-gate i = mp->b_band; 28960Sstevel@tonic-gate while (--i) 28970Sstevel@tonic-gate qbp = qbp->qb_next; 28980Sstevel@tonic-gate } 28990Sstevel@tonic-gate 29000Sstevel@tonic-gate if ((mp->b_next = emp) != NULL) { 29010Sstevel@tonic-gate if ((mp->b_prev = emp->b_prev) != NULL) 29020Sstevel@tonic-gate emp->b_prev->b_next = mp; 29030Sstevel@tonic-gate else 29040Sstevel@tonic-gate q->q_first = mp; 29050Sstevel@tonic-gate emp->b_prev = mp; 29060Sstevel@tonic-gate } else { 29070Sstevel@tonic-gate if ((mp->b_prev = q->q_last) != NULL) 29080Sstevel@tonic-gate q->q_last->b_next = mp; 29090Sstevel@tonic-gate else 29100Sstevel@tonic-gate q->q_first = mp; 29110Sstevel@tonic-gate q->q_last = mp; 29120Sstevel@tonic-gate } 29130Sstevel@tonic-gate 29140Sstevel@tonic-gate /* Get mblk and byte count for q_count accounting */ 29156769Sja97890 bytecnt = mp_cont_len(mp, &mblkcnt); 29160Sstevel@tonic-gate 29170Sstevel@tonic-gate if (qbp) { /* adjust qband pointers and count */ 29180Sstevel@tonic-gate if (!qbp->qb_first) { 29190Sstevel@tonic-gate qbp->qb_first = mp; 29200Sstevel@tonic-gate qbp->qb_last = mp; 29210Sstevel@tonic-gate } else { 29220Sstevel@tonic-gate if (mp->b_prev == NULL || (mp->b_prev != NULL && 29230Sstevel@tonic-gate (mp->b_prev->b_band != mp->b_band))) 29240Sstevel@tonic-gate qbp->qb_first = mp; 29250Sstevel@tonic-gate else if (mp->b_next == NULL || (mp->b_next != NULL && 29260Sstevel@tonic-gate (mp->b_next->b_band != mp->b_band))) 29270Sstevel@tonic-gate qbp->qb_last = mp; 29280Sstevel@tonic-gate } 29290Sstevel@tonic-gate qbp->qb_count += bytecnt; 29300Sstevel@tonic-gate qbp->qb_mblkcnt += mblkcnt; 29310Sstevel@tonic-gate if ((qbp->qb_count >= qbp->qb_hiwat) || 29320Sstevel@tonic-gate (qbp->qb_mblkcnt >= qbp->qb_hiwat)) { 29330Sstevel@tonic-gate qbp->qb_flag |= QB_FULL; 29340Sstevel@tonic-gate } 29350Sstevel@tonic-gate } else { 29360Sstevel@tonic-gate q->q_count += bytecnt; 29370Sstevel@tonic-gate q->q_mblkcnt += mblkcnt; 29380Sstevel@tonic-gate if ((q->q_count >= q->q_hiwat) || 29390Sstevel@tonic-gate (q->q_mblkcnt >= q->q_hiwat)) { 29400Sstevel@tonic-gate q->q_flag |= QFULL; 29410Sstevel@tonic-gate } 29420Sstevel@tonic-gate } 29430Sstevel@tonic-gate 29440Sstevel@tonic-gate STR_FTEVENT_MSG(mp, q, FTEV_INSQ, NULL); 29450Sstevel@tonic-gate 29460Sstevel@tonic-gate if (canenable(q) && (q->q_flag & QWANTR)) 29470Sstevel@tonic-gate qenable_locked(q); 29480Sstevel@tonic-gate 29490Sstevel@tonic-gate ASSERT(MUTEX_HELD(QLOCK(q))); 29500Sstevel@tonic-gate if (freezer != curthread) 29510Sstevel@tonic-gate mutex_exit(QLOCK(q)); 29520Sstevel@tonic-gate 29530Sstevel@tonic-gate return (1); 29540Sstevel@tonic-gate } 29550Sstevel@tonic-gate 29560Sstevel@tonic-gate /* 29570Sstevel@tonic-gate * Create and put a control message on queue. 29580Sstevel@tonic-gate */ 29590Sstevel@tonic-gate int 29600Sstevel@tonic-gate putctl(queue_t *q, int type) 29610Sstevel@tonic-gate { 29620Sstevel@tonic-gate mblk_t *bp; 29630Sstevel@tonic-gate 29640Sstevel@tonic-gate if ((datamsg(type) && (type != M_DELAY)) || 29650Sstevel@tonic-gate (bp = allocb_tryhard(0)) == NULL) 29660Sstevel@tonic-gate return (0); 29670Sstevel@tonic-gate bp->b_datap->db_type = (unsigned char) type; 29680Sstevel@tonic-gate 29690Sstevel@tonic-gate put(q, bp); 29700Sstevel@tonic-gate 29710Sstevel@tonic-gate return (1); 29720Sstevel@tonic-gate } 29730Sstevel@tonic-gate 29740Sstevel@tonic-gate /* 29750Sstevel@tonic-gate * Control message with a single-byte parameter 29760Sstevel@tonic-gate */ 29770Sstevel@tonic-gate int 29780Sstevel@tonic-gate putctl1(queue_t *q, int type, int param) 29790Sstevel@tonic-gate { 29800Sstevel@tonic-gate mblk_t *bp; 29810Sstevel@tonic-gate 29820Sstevel@tonic-gate if ((datamsg(type) && (type != M_DELAY)) || 29830Sstevel@tonic-gate (bp = allocb_tryhard(1)) == NULL) 29840Sstevel@tonic-gate return (0); 29850Sstevel@tonic-gate bp->b_datap->db_type = (unsigned char)type; 29860Sstevel@tonic-gate *bp->b_wptr++ = (unsigned char)param; 29870Sstevel@tonic-gate 29880Sstevel@tonic-gate put(q, bp); 29890Sstevel@tonic-gate 29900Sstevel@tonic-gate return (1); 29910Sstevel@tonic-gate } 29920Sstevel@tonic-gate 29930Sstevel@tonic-gate int 29940Sstevel@tonic-gate putnextctl1(queue_t *q, int type, int param) 29950Sstevel@tonic-gate { 29960Sstevel@tonic-gate mblk_t *bp; 29970Sstevel@tonic-gate 29980Sstevel@tonic-gate if ((datamsg(type) && (type != M_DELAY)) || 29996707Sbrutus ((bp = allocb_tryhard(1)) == NULL)) 30000Sstevel@tonic-gate return (0); 30010Sstevel@tonic-gate 30020Sstevel@tonic-gate bp->b_datap->db_type = (unsigned char)type; 30030Sstevel@tonic-gate *bp->b_wptr++ = (unsigned char)param; 30040Sstevel@tonic-gate 30050Sstevel@tonic-gate putnext(q, bp); 30060Sstevel@tonic-gate 30070Sstevel@tonic-gate return (1); 30080Sstevel@tonic-gate } 30090Sstevel@tonic-gate 30100Sstevel@tonic-gate int 30110Sstevel@tonic-gate putnextctl(queue_t *q, int type) 30120Sstevel@tonic-gate { 30130Sstevel@tonic-gate mblk_t *bp; 30140Sstevel@tonic-gate 30150Sstevel@tonic-gate if ((datamsg(type) && (type != M_DELAY)) || 30166707Sbrutus ((bp = allocb_tryhard(0)) == NULL)) 30170Sstevel@tonic-gate return (0); 30180Sstevel@tonic-gate bp->b_datap->db_type = (unsigned char)type; 30190Sstevel@tonic-gate 30200Sstevel@tonic-gate putnext(q, bp); 30210Sstevel@tonic-gate 30220Sstevel@tonic-gate return (1); 30230Sstevel@tonic-gate } 30240Sstevel@tonic-gate 30250Sstevel@tonic-gate /* 30260Sstevel@tonic-gate * Return the queue upstream from this one 30270Sstevel@tonic-gate */ 30280Sstevel@tonic-gate queue_t * 30290Sstevel@tonic-gate backq(queue_t *q) 30300Sstevel@tonic-gate { 30310Sstevel@tonic-gate q = _OTHERQ(q); 30320Sstevel@tonic-gate if (q->q_next) { 30330Sstevel@tonic-gate q = q->q_next; 30340Sstevel@tonic-gate return (_OTHERQ(q)); 30350Sstevel@tonic-gate } 30360Sstevel@tonic-gate return (NULL); 30370Sstevel@tonic-gate } 30380Sstevel@tonic-gate 30390Sstevel@tonic-gate /* 30400Sstevel@tonic-gate * Send a block back up the queue in reverse from this 30410Sstevel@tonic-gate * one (e.g. to respond to ioctls) 30420Sstevel@tonic-gate */ 30430Sstevel@tonic-gate void 30440Sstevel@tonic-gate qreply(queue_t *q, mblk_t *bp) 30450Sstevel@tonic-gate { 30460Sstevel@tonic-gate ASSERT(q && bp); 30470Sstevel@tonic-gate 30480Sstevel@tonic-gate putnext(_OTHERQ(q), bp); 30490Sstevel@tonic-gate } 30500Sstevel@tonic-gate 30510Sstevel@tonic-gate /* 30520Sstevel@tonic-gate * Streams Queue Scheduling 30530Sstevel@tonic-gate * 30540Sstevel@tonic-gate * Queues are enabled through qenable() when they have messages to 30550Sstevel@tonic-gate * process. They are serviced by queuerun(), which runs each enabled 30560Sstevel@tonic-gate * queue's service procedure. The call to queuerun() is processor 30570Sstevel@tonic-gate * dependent - the general principle is that it be run whenever a queue 30580Sstevel@tonic-gate * is enabled but before returning to user level. For system calls, 30590Sstevel@tonic-gate * the function runqueues() is called if their action causes a queue 30600Sstevel@tonic-gate * to be enabled. For device interrupts, queuerun() should be 30610Sstevel@tonic-gate * called before returning from the last level of interrupt. Beyond 30620Sstevel@tonic-gate * this, no timing assumptions should be made about queue scheduling. 30630Sstevel@tonic-gate */ 30640Sstevel@tonic-gate 30650Sstevel@tonic-gate /* 30660Sstevel@tonic-gate * Enable a queue: put it on list of those whose service procedures are 30670Sstevel@tonic-gate * ready to run and set up the scheduling mechanism. 30680Sstevel@tonic-gate * The broadcast is done outside the mutex -> to avoid the woken thread 30690Sstevel@tonic-gate * from contending with the mutex. This is OK 'cos the queue has been 30700Sstevel@tonic-gate * enqueued on the runlist and flagged safely at this point. 30710Sstevel@tonic-gate */ 30720Sstevel@tonic-gate void 30730Sstevel@tonic-gate qenable(queue_t *q) 30740Sstevel@tonic-gate { 30750Sstevel@tonic-gate mutex_enter(QLOCK(q)); 30760Sstevel@tonic-gate qenable_locked(q); 30770Sstevel@tonic-gate mutex_exit(QLOCK(q)); 30780Sstevel@tonic-gate } 30790Sstevel@tonic-gate /* 30800Sstevel@tonic-gate * Return number of messages on queue 30810Sstevel@tonic-gate */ 30820Sstevel@tonic-gate int 30830Sstevel@tonic-gate qsize(queue_t *qp) 30840Sstevel@tonic-gate { 30850Sstevel@tonic-gate int count = 0; 30860Sstevel@tonic-gate mblk_t *mp; 30870Sstevel@tonic-gate 30880Sstevel@tonic-gate mutex_enter(QLOCK(qp)); 30890Sstevel@tonic-gate for (mp = qp->q_first; mp; mp = mp->b_next) 30900Sstevel@tonic-gate count++; 30910Sstevel@tonic-gate mutex_exit(QLOCK(qp)); 30920Sstevel@tonic-gate return (count); 30930Sstevel@tonic-gate } 30940Sstevel@tonic-gate 30950Sstevel@tonic-gate /* 30960Sstevel@tonic-gate * noenable - set queue so that putq() will not enable it. 30970Sstevel@tonic-gate * enableok - set queue so that putq() can enable it. 30980Sstevel@tonic-gate */ 30990Sstevel@tonic-gate void 31000Sstevel@tonic-gate noenable(queue_t *q) 31010Sstevel@tonic-gate { 31020Sstevel@tonic-gate mutex_enter(QLOCK(q)); 31030Sstevel@tonic-gate q->q_flag |= QNOENB; 31040Sstevel@tonic-gate mutex_exit(QLOCK(q)); 31050Sstevel@tonic-gate } 31060Sstevel@tonic-gate 31070Sstevel@tonic-gate void 31080Sstevel@tonic-gate enableok(queue_t *q) 31090Sstevel@tonic-gate { 31100Sstevel@tonic-gate mutex_enter(QLOCK(q)); 31110Sstevel@tonic-gate q->q_flag &= ~QNOENB; 31120Sstevel@tonic-gate mutex_exit(QLOCK(q)); 31130Sstevel@tonic-gate } 31140Sstevel@tonic-gate 31150Sstevel@tonic-gate /* 31160Sstevel@tonic-gate * Set queue fields. 31170Sstevel@tonic-gate */ 31180Sstevel@tonic-gate int 31190Sstevel@tonic-gate strqset(queue_t *q, qfields_t what, unsigned char pri, intptr_t val) 31200Sstevel@tonic-gate { 31210Sstevel@tonic-gate qband_t *qbp = NULL; 31220Sstevel@tonic-gate queue_t *wrq; 31230Sstevel@tonic-gate int error = 0; 31240Sstevel@tonic-gate kthread_id_t freezer; 31250Sstevel@tonic-gate 31260Sstevel@tonic-gate freezer = STREAM(q)->sd_freezer; 31270Sstevel@tonic-gate if (freezer == curthread) { 31280Sstevel@tonic-gate ASSERT(frozenstr(q)); 31290Sstevel@tonic-gate ASSERT(MUTEX_HELD(QLOCK(q))); 31300Sstevel@tonic-gate } else 31310Sstevel@tonic-gate mutex_enter(QLOCK(q)); 31320Sstevel@tonic-gate 31330Sstevel@tonic-gate if (what >= QBAD) { 31340Sstevel@tonic-gate error = EINVAL; 31350Sstevel@tonic-gate goto done; 31360Sstevel@tonic-gate } 31370Sstevel@tonic-gate if (pri != 0) { 31380Sstevel@tonic-gate int i; 31390Sstevel@tonic-gate qband_t **qbpp; 31400Sstevel@tonic-gate 31410Sstevel@tonic-gate if (pri > q->q_nband) { 31420Sstevel@tonic-gate qbpp = &q->q_bandp; 31430Sstevel@tonic-gate while (*qbpp) 31440Sstevel@tonic-gate qbpp = &(*qbpp)->qb_next; 31450Sstevel@tonic-gate while (pri > q->q_nband) { 31460Sstevel@tonic-gate if ((*qbpp = allocband()) == NULL) { 31470Sstevel@tonic-gate error = EAGAIN; 31480Sstevel@tonic-gate goto done; 31490Sstevel@tonic-gate } 31500Sstevel@tonic-gate (*qbpp)->qb_hiwat = q->q_hiwat; 31510Sstevel@tonic-gate (*qbpp)->qb_lowat = q->q_lowat; 31520Sstevel@tonic-gate q->q_nband++; 31530Sstevel@tonic-gate qbpp = &(*qbpp)->qb_next; 31540Sstevel@tonic-gate } 31550Sstevel@tonic-gate } 31560Sstevel@tonic-gate qbp = q->q_bandp; 31570Sstevel@tonic-gate i = pri; 31580Sstevel@tonic-gate while (--i) 31590Sstevel@tonic-gate qbp = qbp->qb_next; 31600Sstevel@tonic-gate } 31610Sstevel@tonic-gate switch (what) { 31620Sstevel@tonic-gate 31630Sstevel@tonic-gate case QHIWAT: 31640Sstevel@tonic-gate if (qbp) 31650Sstevel@tonic-gate qbp->qb_hiwat = (size_t)val; 31660Sstevel@tonic-gate else 31670Sstevel@tonic-gate q->q_hiwat = (size_t)val; 31680Sstevel@tonic-gate break; 31690Sstevel@tonic-gate 31700Sstevel@tonic-gate case QLOWAT: 31710Sstevel@tonic-gate if (qbp) 31720Sstevel@tonic-gate qbp->qb_lowat = (size_t)val; 31730Sstevel@tonic-gate else 31740Sstevel@tonic-gate q->q_lowat = (size_t)val; 31750Sstevel@tonic-gate break; 31760Sstevel@tonic-gate 31770Sstevel@tonic-gate case QMAXPSZ: 31780Sstevel@tonic-gate if (qbp) 31790Sstevel@tonic-gate error = EINVAL; 31800Sstevel@tonic-gate else 31810Sstevel@tonic-gate q->q_maxpsz = (ssize_t)val; 31820Sstevel@tonic-gate 31830Sstevel@tonic-gate /* 31840Sstevel@tonic-gate * Performance concern, strwrite looks at the module below 31850Sstevel@tonic-gate * the stream head for the maxpsz each time it does a write 31860Sstevel@tonic-gate * we now cache it at the stream head. Check to see if this 31870Sstevel@tonic-gate * queue is sitting directly below the stream head. 31880Sstevel@tonic-gate */ 31890Sstevel@tonic-gate wrq = STREAM(q)->sd_wrq; 31900Sstevel@tonic-gate if (q != wrq->q_next) 31910Sstevel@tonic-gate break; 31920Sstevel@tonic-gate 31930Sstevel@tonic-gate /* 31940Sstevel@tonic-gate * If the stream is not frozen drop the current QLOCK and 31950Sstevel@tonic-gate * acquire the sd_wrq QLOCK which protects sd_qn_* 31960Sstevel@tonic-gate */ 31970Sstevel@tonic-gate if (freezer != curthread) { 31980Sstevel@tonic-gate mutex_exit(QLOCK(q)); 31990Sstevel@tonic-gate mutex_enter(QLOCK(wrq)); 32000Sstevel@tonic-gate } 32010Sstevel@tonic-gate ASSERT(MUTEX_HELD(QLOCK(wrq))); 32020Sstevel@tonic-gate 32030Sstevel@tonic-gate if (strmsgsz != 0) { 32040Sstevel@tonic-gate if (val == INFPSZ) 32050Sstevel@tonic-gate val = strmsgsz; 32060Sstevel@tonic-gate else { 32070Sstevel@tonic-gate if (STREAM(q)->sd_vnode->v_type == VFIFO) 32080Sstevel@tonic-gate val = MIN(PIPE_BUF, val); 32090Sstevel@tonic-gate else 32100Sstevel@tonic-gate val = MIN(strmsgsz, val); 32110Sstevel@tonic-gate } 32120Sstevel@tonic-gate } 32130Sstevel@tonic-gate STREAM(q)->sd_qn_maxpsz = val; 32140Sstevel@tonic-gate if (freezer != curthread) { 32150Sstevel@tonic-gate mutex_exit(QLOCK(wrq)); 32160Sstevel@tonic-gate mutex_enter(QLOCK(q)); 32170Sstevel@tonic-gate } 32180Sstevel@tonic-gate break; 32190Sstevel@tonic-gate 32200Sstevel@tonic-gate case QMINPSZ: 32210Sstevel@tonic-gate if (qbp) 32220Sstevel@tonic-gate error = EINVAL; 32230Sstevel@tonic-gate else 32240Sstevel@tonic-gate q->q_minpsz = (ssize_t)val; 32250Sstevel@tonic-gate 32260Sstevel@tonic-gate /* 32270Sstevel@tonic-gate * Performance concern, strwrite looks at the module below 32280Sstevel@tonic-gate * the stream head for the maxpsz each time it does a write 32290Sstevel@tonic-gate * we now cache it at the stream head. Check to see if this 32300Sstevel@tonic-gate * queue is sitting directly below the stream head. 32310Sstevel@tonic-gate */ 32320Sstevel@tonic-gate wrq = STREAM(q)->sd_wrq; 32330Sstevel@tonic-gate if (q != wrq->q_next) 32340Sstevel@tonic-gate break; 32350Sstevel@tonic-gate 32360Sstevel@tonic-gate /* 32370Sstevel@tonic-gate * If the stream is not frozen drop the current QLOCK and 32380Sstevel@tonic-gate * acquire the sd_wrq QLOCK which protects sd_qn_* 32390Sstevel@tonic-gate */ 32400Sstevel@tonic-gate if (freezer != curthread) { 32410Sstevel@tonic-gate mutex_exit(QLOCK(q)); 32420Sstevel@tonic-gate mutex_enter(QLOCK(wrq)); 32430Sstevel@tonic-gate } 32440Sstevel@tonic-gate STREAM(q)->sd_qn_minpsz = (ssize_t)val; 32450Sstevel@tonic-gate 32460Sstevel@tonic-gate if (freezer != curthread) { 32470Sstevel@tonic-gate mutex_exit(QLOCK(wrq)); 32480Sstevel@tonic-gate mutex_enter(QLOCK(q)); 32490Sstevel@tonic-gate } 32500Sstevel@tonic-gate break; 32510Sstevel@tonic-gate 32520Sstevel@tonic-gate case QSTRUIOT: 32530Sstevel@tonic-gate if (qbp) 32540Sstevel@tonic-gate error = EINVAL; 32550Sstevel@tonic-gate else 32560Sstevel@tonic-gate q->q_struiot = (ushort_t)val; 32570Sstevel@tonic-gate break; 32580Sstevel@tonic-gate 32590Sstevel@tonic-gate case QCOUNT: 32600Sstevel@tonic-gate case QFIRST: 32610Sstevel@tonic-gate case QLAST: 32620Sstevel@tonic-gate case QFLAG: 32630Sstevel@tonic-gate error = EPERM; 32640Sstevel@tonic-gate break; 32650Sstevel@tonic-gate 32660Sstevel@tonic-gate default: 32670Sstevel@tonic-gate error = EINVAL; 32680Sstevel@tonic-gate break; 32690Sstevel@tonic-gate } 32700Sstevel@tonic-gate done: 32710Sstevel@tonic-gate if (freezer != curthread) 32720Sstevel@tonic-gate mutex_exit(QLOCK(q)); 32730Sstevel@tonic-gate return (error); 32740Sstevel@tonic-gate } 32750Sstevel@tonic-gate 32760Sstevel@tonic-gate /* 32770Sstevel@tonic-gate * Get queue fields. 32780Sstevel@tonic-gate */ 32790Sstevel@tonic-gate int 32800Sstevel@tonic-gate strqget(queue_t *q, qfields_t what, unsigned char pri, void *valp) 32810Sstevel@tonic-gate { 32820Sstevel@tonic-gate qband_t *qbp = NULL; 32830Sstevel@tonic-gate int error = 0; 32840Sstevel@tonic-gate kthread_id_t freezer; 32850Sstevel@tonic-gate 32860Sstevel@tonic-gate freezer = STREAM(q)->sd_freezer; 32870Sstevel@tonic-gate if (freezer == curthread) { 32880Sstevel@tonic-gate ASSERT(frozenstr(q)); 32890Sstevel@tonic-gate ASSERT(MUTEX_HELD(QLOCK(q))); 32900Sstevel@tonic-gate } else 32910Sstevel@tonic-gate mutex_enter(QLOCK(q)); 32920Sstevel@tonic-gate if (what >= QBAD) { 32930Sstevel@tonic-gate error = EINVAL; 32940Sstevel@tonic-gate goto done; 32950Sstevel@tonic-gate } 32960Sstevel@tonic-gate if (pri != 0) { 32970Sstevel@tonic-gate int i; 32980Sstevel@tonic-gate qband_t **qbpp; 32990Sstevel@tonic-gate 33000Sstevel@tonic-gate if (pri > q->q_nband) { 33010Sstevel@tonic-gate qbpp = &q->q_bandp; 33020Sstevel@tonic-gate while (*qbpp) 33030Sstevel@tonic-gate qbpp = &(*qbpp)->qb_next; 33040Sstevel@tonic-gate while (pri > q->q_nband) { 33050Sstevel@tonic-gate if ((*qbpp = allocband()) == NULL) { 33060Sstevel@tonic-gate error = EAGAIN; 33070Sstevel@tonic-gate goto done; 33080Sstevel@tonic-gate } 33090Sstevel@tonic-gate (*qbpp)->qb_hiwat = q->q_hiwat; 33100Sstevel@tonic-gate (*qbpp)->qb_lowat = q->q_lowat; 33110Sstevel@tonic-gate q->q_nband++; 33120Sstevel@tonic-gate qbpp = &(*qbpp)->qb_next; 33130Sstevel@tonic-gate } 33140Sstevel@tonic-gate } 33150Sstevel@tonic-gate qbp = q->q_bandp; 33160Sstevel@tonic-gate i = pri; 33170Sstevel@tonic-gate while (--i) 33180Sstevel@tonic-gate qbp = qbp->qb_next; 33190Sstevel@tonic-gate } 33200Sstevel@tonic-gate switch (what) { 33210Sstevel@tonic-gate case QHIWAT: 33220Sstevel@tonic-gate if (qbp) 33230Sstevel@tonic-gate *(size_t *)valp = qbp->qb_hiwat; 33240Sstevel@tonic-gate else 33250Sstevel@tonic-gate *(size_t *)valp = q->q_hiwat; 33260Sstevel@tonic-gate break; 33270Sstevel@tonic-gate 33280Sstevel@tonic-gate case QLOWAT: 33290Sstevel@tonic-gate if (qbp) 33300Sstevel@tonic-gate *(size_t *)valp = qbp->qb_lowat; 33310Sstevel@tonic-gate else 33320Sstevel@tonic-gate *(size_t *)valp = q->q_lowat; 33330Sstevel@tonic-gate break; 33340Sstevel@tonic-gate 33350Sstevel@tonic-gate case QMAXPSZ: 33360Sstevel@tonic-gate if (qbp) 33370Sstevel@tonic-gate error = EINVAL; 33380Sstevel@tonic-gate else 33390Sstevel@tonic-gate *(ssize_t *)valp = q->q_maxpsz; 33400Sstevel@tonic-gate break; 33410Sstevel@tonic-gate 33420Sstevel@tonic-gate case QMINPSZ: 33430Sstevel@tonic-gate if (qbp) 33440Sstevel@tonic-gate error = EINVAL; 33450Sstevel@tonic-gate else 33460Sstevel@tonic-gate *(ssize_t *)valp = q->q_minpsz; 33470Sstevel@tonic-gate break; 33480Sstevel@tonic-gate 33490Sstevel@tonic-gate case QCOUNT: 33500Sstevel@tonic-gate if (qbp) 33510Sstevel@tonic-gate *(size_t *)valp = qbp->qb_count; 33520Sstevel@tonic-gate else 33530Sstevel@tonic-gate *(size_t *)valp = q->q_count; 33540Sstevel@tonic-gate break; 33550Sstevel@tonic-gate 33560Sstevel@tonic-gate case QFIRST: 33570Sstevel@tonic-gate if (qbp) 33580Sstevel@tonic-gate *(mblk_t **)valp = qbp->qb_first; 33590Sstevel@tonic-gate else 33600Sstevel@tonic-gate *(mblk_t **)valp = q->q_first; 33610Sstevel@tonic-gate break; 33620Sstevel@tonic-gate 33630Sstevel@tonic-gate case QLAST: 33640Sstevel@tonic-gate if (qbp) 33650Sstevel@tonic-gate *(mblk_t **)valp = qbp->qb_last; 33660Sstevel@tonic-gate else 33670Sstevel@tonic-gate *(mblk_t **)valp = q->q_last; 33680Sstevel@tonic-gate break; 33690Sstevel@tonic-gate 33700Sstevel@tonic-gate case QFLAG: 33710Sstevel@tonic-gate if (qbp) 33720Sstevel@tonic-gate *(uint_t *)valp = qbp->qb_flag; 33730Sstevel@tonic-gate else 33740Sstevel@tonic-gate *(uint_t *)valp = q->q_flag; 33750Sstevel@tonic-gate break; 33760Sstevel@tonic-gate 33770Sstevel@tonic-gate case QSTRUIOT: 33780Sstevel@tonic-gate if (qbp) 33790Sstevel@tonic-gate error = EINVAL; 33800Sstevel@tonic-gate else 33810Sstevel@tonic-gate *(short *)valp = q->q_struiot; 33820Sstevel@tonic-gate break; 33830Sstevel@tonic-gate 33840Sstevel@tonic-gate default: 33850Sstevel@tonic-gate error = EINVAL; 33860Sstevel@tonic-gate break; 33870Sstevel@tonic-gate } 33880Sstevel@tonic-gate done: 33890Sstevel@tonic-gate if (freezer != curthread) 33900Sstevel@tonic-gate mutex_exit(QLOCK(q)); 33910Sstevel@tonic-gate return (error); 33920Sstevel@tonic-gate } 33930Sstevel@tonic-gate 33940Sstevel@tonic-gate /* 33950Sstevel@tonic-gate * Function awakes all in cvwait/sigwait/pollwait, on one of: 33960Sstevel@tonic-gate * QWANTWSYNC or QWANTR or QWANTW, 33970Sstevel@tonic-gate * 33980Sstevel@tonic-gate * Note: for QWANTWSYNC/QWANTW and QWANTR, if no WSLEEPer or RSLEEPer then a 33990Sstevel@tonic-gate * deferred wakeup will be done. Also if strpoll() in progress then a 34000Sstevel@tonic-gate * deferred pollwakeup will be done. 34010Sstevel@tonic-gate */ 34020Sstevel@tonic-gate void 34030Sstevel@tonic-gate strwakeq(queue_t *q, int flag) 34040Sstevel@tonic-gate { 34050Sstevel@tonic-gate stdata_t *stp = STREAM(q); 34060Sstevel@tonic-gate pollhead_t *pl; 34070Sstevel@tonic-gate 34080Sstevel@tonic-gate mutex_enter(&stp->sd_lock); 34090Sstevel@tonic-gate pl = &stp->sd_pollist; 34100Sstevel@tonic-gate if (flag & QWANTWSYNC) { 34110Sstevel@tonic-gate ASSERT(!(q->q_flag & QREADR)); 34120Sstevel@tonic-gate if (stp->sd_flag & WSLEEP) { 34130Sstevel@tonic-gate stp->sd_flag &= ~WSLEEP; 34140Sstevel@tonic-gate cv_broadcast(&stp->sd_wrq->q_wait); 34150Sstevel@tonic-gate } else { 34160Sstevel@tonic-gate stp->sd_wakeq |= WSLEEP; 34170Sstevel@tonic-gate } 34180Sstevel@tonic-gate 34190Sstevel@tonic-gate mutex_exit(&stp->sd_lock); 34200Sstevel@tonic-gate pollwakeup(pl, POLLWRNORM); 34210Sstevel@tonic-gate mutex_enter(&stp->sd_lock); 34220Sstevel@tonic-gate 34230Sstevel@tonic-gate if (stp->sd_sigflags & S_WRNORM) 34240Sstevel@tonic-gate strsendsig(stp->sd_siglist, S_WRNORM, 0, 0); 34250Sstevel@tonic-gate } else if (flag & QWANTR) { 34260Sstevel@tonic-gate if (stp->sd_flag & RSLEEP) { 34270Sstevel@tonic-gate stp->sd_flag &= ~RSLEEP; 34280Sstevel@tonic-gate cv_broadcast(&_RD(stp->sd_wrq)->q_wait); 34290Sstevel@tonic-gate } else { 34300Sstevel@tonic-gate stp->sd_wakeq |= RSLEEP; 34310Sstevel@tonic-gate } 34320Sstevel@tonic-gate 34330Sstevel@tonic-gate mutex_exit(&stp->sd_lock); 34340Sstevel@tonic-gate pollwakeup(pl, POLLIN | POLLRDNORM); 34350Sstevel@tonic-gate mutex_enter(&stp->sd_lock); 34360Sstevel@tonic-gate 34370Sstevel@tonic-gate { 34380Sstevel@tonic-gate int events = stp->sd_sigflags & (S_INPUT | S_RDNORM); 34390Sstevel@tonic-gate 34400Sstevel@tonic-gate if (events) 34410Sstevel@tonic-gate strsendsig(stp->sd_siglist, events, 0, 0); 34420Sstevel@tonic-gate } 34430Sstevel@tonic-gate } else { 34440Sstevel@tonic-gate if (stp->sd_flag & WSLEEP) { 34450Sstevel@tonic-gate stp->sd_flag &= ~WSLEEP; 34460Sstevel@tonic-gate cv_broadcast(&stp->sd_wrq->q_wait); 34470Sstevel@tonic-gate } 34480Sstevel@tonic-gate 34490Sstevel@tonic-gate mutex_exit(&stp->sd_lock); 34500Sstevel@tonic-gate pollwakeup(pl, POLLWRNORM); 34510Sstevel@tonic-gate mutex_enter(&stp->sd_lock); 34520Sstevel@tonic-gate 34530Sstevel@tonic-gate if (stp->sd_sigflags & S_WRNORM) 34540Sstevel@tonic-gate strsendsig(stp->sd_siglist, S_WRNORM, 0, 0); 34550Sstevel@tonic-gate } 34560Sstevel@tonic-gate mutex_exit(&stp->sd_lock); 34570Sstevel@tonic-gate } 34580Sstevel@tonic-gate 34590Sstevel@tonic-gate int 34600Sstevel@tonic-gate struioget(queue_t *q, mblk_t *mp, struiod_t *dp, int noblock) 34610Sstevel@tonic-gate { 34620Sstevel@tonic-gate stdata_t *stp = STREAM(q); 34630Sstevel@tonic-gate int typ = STRUIOT_STANDARD; 34640Sstevel@tonic-gate uio_t *uiop = &dp->d_uio; 34650Sstevel@tonic-gate dblk_t *dbp; 34660Sstevel@tonic-gate ssize_t uiocnt; 34670Sstevel@tonic-gate ssize_t cnt; 34680Sstevel@tonic-gate unsigned char *ptr; 34690Sstevel@tonic-gate ssize_t resid; 34700Sstevel@tonic-gate int error = 0; 34710Sstevel@tonic-gate on_trap_data_t otd; 34720Sstevel@tonic-gate queue_t *stwrq; 34730Sstevel@tonic-gate 34740Sstevel@tonic-gate /* 34750Sstevel@tonic-gate * Plumbing may change while taking the type so store the 34760Sstevel@tonic-gate * queue in a temporary variable. It doesn't matter even 34770Sstevel@tonic-gate * if the we take the type from the previous plumbing, 34780Sstevel@tonic-gate * that's because if the plumbing has changed when we were 34790Sstevel@tonic-gate * holding the queue in a temporary variable, we can continue 34800Sstevel@tonic-gate * processing the message the way it would have been processed 34810Sstevel@tonic-gate * in the old plumbing, without any side effects but a bit 34820Sstevel@tonic-gate * extra processing for partial ip header checksum. 34830Sstevel@tonic-gate * 34840Sstevel@tonic-gate * This has been done to avoid holding the sd_lock which is 34850Sstevel@tonic-gate * very hot. 34860Sstevel@tonic-gate */ 34870Sstevel@tonic-gate 34880Sstevel@tonic-gate stwrq = stp->sd_struiowrq; 34890Sstevel@tonic-gate if (stwrq) 34900Sstevel@tonic-gate typ = stwrq->q_struiot; 34910Sstevel@tonic-gate 34920Sstevel@tonic-gate for (; (resid = uiop->uio_resid) > 0 && mp; mp = mp->b_cont) { 34930Sstevel@tonic-gate dbp = mp->b_datap; 34940Sstevel@tonic-gate ptr = (uchar_t *)(mp->b_rptr + dbp->db_cksumstuff); 34950Sstevel@tonic-gate uiocnt = dbp->db_cksumend - dbp->db_cksumstuff; 34960Sstevel@tonic-gate cnt = MIN(uiocnt, uiop->uio_resid); 34970Sstevel@tonic-gate if (!(dbp->db_struioflag & STRUIO_SPEC) || 34980Sstevel@tonic-gate (dbp->db_struioflag & STRUIO_DONE) || cnt == 0) { 34990Sstevel@tonic-gate /* 35000Sstevel@tonic-gate * Either this mblk has already been processed 35010Sstevel@tonic-gate * or there is no more room in this mblk (?). 35020Sstevel@tonic-gate */ 35030Sstevel@tonic-gate continue; 35040Sstevel@tonic-gate } 35050Sstevel@tonic-gate switch (typ) { 35060Sstevel@tonic-gate case STRUIOT_STANDARD: 35070Sstevel@tonic-gate if (noblock) { 35080Sstevel@tonic-gate if (on_trap(&otd, OT_DATA_ACCESS)) { 35090Sstevel@tonic-gate no_trap(); 35100Sstevel@tonic-gate error = EWOULDBLOCK; 35110Sstevel@tonic-gate goto out; 35120Sstevel@tonic-gate } 35130Sstevel@tonic-gate } 35140Sstevel@tonic-gate if (error = uiomove(ptr, cnt, UIO_WRITE, uiop)) { 35150Sstevel@tonic-gate if (noblock) 35160Sstevel@tonic-gate no_trap(); 35170Sstevel@tonic-gate goto out; 35180Sstevel@tonic-gate } 35190Sstevel@tonic-gate if (noblock) 35200Sstevel@tonic-gate no_trap(); 35210Sstevel@tonic-gate break; 35220Sstevel@tonic-gate 35230Sstevel@tonic-gate default: 35240Sstevel@tonic-gate error = EIO; 35250Sstevel@tonic-gate goto out; 35260Sstevel@tonic-gate } 35270Sstevel@tonic-gate dbp->db_struioflag |= STRUIO_DONE; 35280Sstevel@tonic-gate dbp->db_cksumstuff += cnt; 35290Sstevel@tonic-gate } 35300Sstevel@tonic-gate out: 35310Sstevel@tonic-gate if (error == EWOULDBLOCK && (resid -= uiop->uio_resid) > 0) { 35320Sstevel@tonic-gate /* 35330Sstevel@tonic-gate * A fault has occured and some bytes were moved to the 35340Sstevel@tonic-gate * current mblk, the uio_t has already been updated by 35350Sstevel@tonic-gate * the appropriate uio routine, so also update the mblk 35360Sstevel@tonic-gate * to reflect this in case this same mblk chain is used 35370Sstevel@tonic-gate * again (after the fault has been handled). 35380Sstevel@tonic-gate */ 35390Sstevel@tonic-gate uiocnt = dbp->db_cksumend - dbp->db_cksumstuff; 35400Sstevel@tonic-gate if (uiocnt >= resid) 35410Sstevel@tonic-gate dbp->db_cksumstuff += resid; 35420Sstevel@tonic-gate } 35430Sstevel@tonic-gate return (error); 35440Sstevel@tonic-gate } 35450Sstevel@tonic-gate 35460Sstevel@tonic-gate /* 35470Sstevel@tonic-gate * Try to enter queue synchronously. Any attempt to enter a closing queue will 35480Sstevel@tonic-gate * fails. The qp->q_rwcnt keeps track of the number of successful entries so 35490Sstevel@tonic-gate * that removeq() will not try to close the queue while a thread is inside the 35500Sstevel@tonic-gate * queue. 35510Sstevel@tonic-gate */ 35520Sstevel@tonic-gate static boolean_t 35530Sstevel@tonic-gate rwnext_enter(queue_t *qp) 35540Sstevel@tonic-gate { 35550Sstevel@tonic-gate mutex_enter(QLOCK(qp)); 35560Sstevel@tonic-gate if (qp->q_flag & QWCLOSE) { 35570Sstevel@tonic-gate mutex_exit(QLOCK(qp)); 35580Sstevel@tonic-gate return (B_FALSE); 35590Sstevel@tonic-gate } 35600Sstevel@tonic-gate qp->q_rwcnt++; 35610Sstevel@tonic-gate ASSERT(qp->q_rwcnt != 0); 35620Sstevel@tonic-gate mutex_exit(QLOCK(qp)); 35630Sstevel@tonic-gate return (B_TRUE); 35640Sstevel@tonic-gate } 35650Sstevel@tonic-gate 35660Sstevel@tonic-gate /* 35670Sstevel@tonic-gate * Decrease the count of threads running in sync stream queue and wake up any 35680Sstevel@tonic-gate * threads blocked in removeq(). 35690Sstevel@tonic-gate */ 35700Sstevel@tonic-gate static void 35710Sstevel@tonic-gate rwnext_exit(queue_t *qp) 35720Sstevel@tonic-gate { 35730Sstevel@tonic-gate mutex_enter(QLOCK(qp)); 35740Sstevel@tonic-gate qp->q_rwcnt--; 35750Sstevel@tonic-gate if (qp->q_flag & QWANTRMQSYNC) { 35760Sstevel@tonic-gate qp->q_flag &= ~QWANTRMQSYNC; 35770Sstevel@tonic-gate cv_broadcast(&qp->q_wait); 35780Sstevel@tonic-gate } 35790Sstevel@tonic-gate mutex_exit(QLOCK(qp)); 35800Sstevel@tonic-gate } 35810Sstevel@tonic-gate 35820Sstevel@tonic-gate /* 35830Sstevel@tonic-gate * The purpose of rwnext() is to call the rw procedure of the next 35840Sstevel@tonic-gate * (downstream) modules queue. 35850Sstevel@tonic-gate * 35860Sstevel@tonic-gate * treated as put entrypoint for perimeter syncronization. 35870Sstevel@tonic-gate * 35880Sstevel@tonic-gate * There's no need to grab sq_putlocks here (which only exist for CIPUT 35890Sstevel@tonic-gate * sync queues). If it is CIPUT sync queue sq_count is incremented and it does 35900Sstevel@tonic-gate * not matter if any regular put entrypoints have been already entered. We 35910Sstevel@tonic-gate * can't increment one of the sq_putcounts (instead of sq_count) because 35920Sstevel@tonic-gate * qwait_rw won't know which counter to decrement. 35930Sstevel@tonic-gate * 35940Sstevel@tonic-gate * It would be reasonable to add the lockless FASTPUT logic. 35950Sstevel@tonic-gate */ 35960Sstevel@tonic-gate int 35970Sstevel@tonic-gate rwnext(queue_t *qp, struiod_t *dp) 35980Sstevel@tonic-gate { 35990Sstevel@tonic-gate queue_t *nqp; 36000Sstevel@tonic-gate syncq_t *sq; 36010Sstevel@tonic-gate uint16_t count; 36020Sstevel@tonic-gate uint16_t flags; 36030Sstevel@tonic-gate struct qinit *qi; 36040Sstevel@tonic-gate int (*proc)(); 36050Sstevel@tonic-gate struct stdata *stp; 36060Sstevel@tonic-gate int isread; 36070Sstevel@tonic-gate int rval; 36080Sstevel@tonic-gate 36090Sstevel@tonic-gate stp = STREAM(qp); 36100Sstevel@tonic-gate /* 36110Sstevel@tonic-gate * Prevent q_next from changing by holding sd_lock until acquiring 36120Sstevel@tonic-gate * SQLOCK. Note that a read-side rwnext from the streamhead will 36130Sstevel@tonic-gate * already have sd_lock acquired. In either case sd_lock is always 36140Sstevel@tonic-gate * released after acquiring SQLOCK. 36150Sstevel@tonic-gate * 36160Sstevel@tonic-gate * The streamhead read-side holding sd_lock when calling rwnext is 36170Sstevel@tonic-gate * required to prevent a race condition were M_DATA mblks flowing 36180Sstevel@tonic-gate * up the read-side of the stream could be bypassed by a rwnext() 36190Sstevel@tonic-gate * down-call. In this case sd_lock acts as the streamhead perimeter. 36200Sstevel@tonic-gate */ 36210Sstevel@tonic-gate if ((nqp = _WR(qp)) == qp) { 36220Sstevel@tonic-gate isread = 0; 36230Sstevel@tonic-gate mutex_enter(&stp->sd_lock); 36240Sstevel@tonic-gate qp = nqp->q_next; 36250Sstevel@tonic-gate } else { 36260Sstevel@tonic-gate isread = 1; 36270Sstevel@tonic-gate if (nqp != stp->sd_wrq) 36280Sstevel@tonic-gate /* Not streamhead */ 36290Sstevel@tonic-gate mutex_enter(&stp->sd_lock); 36300Sstevel@tonic-gate qp = _RD(nqp->q_next); 36310Sstevel@tonic-gate } 36320Sstevel@tonic-gate qi = qp->q_qinfo; 36330Sstevel@tonic-gate if (qp->q_struiot == STRUIOT_NONE || ! (proc = qi->qi_rwp)) { 36340Sstevel@tonic-gate /* 36350Sstevel@tonic-gate * Not a synchronous module or no r/w procedure for this 36360Sstevel@tonic-gate * queue, so just return EINVAL and let the caller handle it. 36370Sstevel@tonic-gate */ 36380Sstevel@tonic-gate mutex_exit(&stp->sd_lock); 36390Sstevel@tonic-gate return (EINVAL); 36400Sstevel@tonic-gate } 36410Sstevel@tonic-gate 36420Sstevel@tonic-gate if (rwnext_enter(qp) == B_FALSE) { 36430Sstevel@tonic-gate mutex_exit(&stp->sd_lock); 36440Sstevel@tonic-gate return (EINVAL); 36450Sstevel@tonic-gate } 36460Sstevel@tonic-gate 36470Sstevel@tonic-gate sq = qp->q_syncq; 36480Sstevel@tonic-gate mutex_enter(SQLOCK(sq)); 36490Sstevel@tonic-gate mutex_exit(&stp->sd_lock); 36500Sstevel@tonic-gate count = sq->sq_count; 36510Sstevel@tonic-gate flags = sq->sq_flags; 36520Sstevel@tonic-gate ASSERT(sq->sq_ciputctrl == NULL || (flags & SQ_CIPUT)); 36530Sstevel@tonic-gate 36540Sstevel@tonic-gate while ((flags & SQ_GOAWAY) || (!(flags & SQ_CIPUT) && count != 0)) { 36550Sstevel@tonic-gate /* 36560Sstevel@tonic-gate * if this queue is being closed, return. 36570Sstevel@tonic-gate */ 36580Sstevel@tonic-gate if (qp->q_flag & QWCLOSE) { 36590Sstevel@tonic-gate mutex_exit(SQLOCK(sq)); 36600Sstevel@tonic-gate rwnext_exit(qp); 36610Sstevel@tonic-gate return (EINVAL); 36620Sstevel@tonic-gate } 36630Sstevel@tonic-gate 36640Sstevel@tonic-gate /* 36650Sstevel@tonic-gate * Wait until we can enter the inner perimeter. 36660Sstevel@tonic-gate */ 36670Sstevel@tonic-gate sq->sq_flags = flags | SQ_WANTWAKEUP; 36680Sstevel@tonic-gate cv_wait(&sq->sq_wait, SQLOCK(sq)); 36690Sstevel@tonic-gate count = sq->sq_count; 36700Sstevel@tonic-gate flags = sq->sq_flags; 36710Sstevel@tonic-gate } 36720Sstevel@tonic-gate 36730Sstevel@tonic-gate if (isread == 0 && stp->sd_struiowrq == NULL || 36740Sstevel@tonic-gate isread == 1 && stp->sd_struiordq == NULL) { 36750Sstevel@tonic-gate /* 36760Sstevel@tonic-gate * Stream plumbing changed while waiting for inner perimeter 36770Sstevel@tonic-gate * so just return EINVAL and let the caller handle it. 36780Sstevel@tonic-gate */ 36790Sstevel@tonic-gate mutex_exit(SQLOCK(sq)); 36800Sstevel@tonic-gate rwnext_exit(qp); 36810Sstevel@tonic-gate return (EINVAL); 36820Sstevel@tonic-gate } 36830Sstevel@tonic-gate if (!(flags & SQ_CIPUT)) 36840Sstevel@tonic-gate sq->sq_flags = flags | SQ_EXCL; 36850Sstevel@tonic-gate sq->sq_count = count + 1; 36860Sstevel@tonic-gate ASSERT(sq->sq_count != 0); /* Wraparound */ 36870Sstevel@tonic-gate /* 36880Sstevel@tonic-gate * Note: The only message ordering guarantee that rwnext() makes is 36890Sstevel@tonic-gate * for the write queue flow-control case. All others (r/w queue 36900Sstevel@tonic-gate * with q_count > 0 (or q_first != 0)) are the resposibilty of 36910Sstevel@tonic-gate * the queue's rw procedure. This could be genralized here buy 36920Sstevel@tonic-gate * running the queue's service procedure, but that wouldn't be 36930Sstevel@tonic-gate * the most efficent for all cases. 36940Sstevel@tonic-gate */ 36950Sstevel@tonic-gate mutex_exit(SQLOCK(sq)); 36960Sstevel@tonic-gate if (! isread && (qp->q_flag & QFULL)) { 36970Sstevel@tonic-gate /* 36980Sstevel@tonic-gate * Write queue may be flow controlled. If so, 36990Sstevel@tonic-gate * mark the queue for wakeup when it's not. 37000Sstevel@tonic-gate */ 37010Sstevel@tonic-gate mutex_enter(QLOCK(qp)); 37020Sstevel@tonic-gate if (qp->q_flag & QFULL) { 37030Sstevel@tonic-gate qp->q_flag |= QWANTWSYNC; 37040Sstevel@tonic-gate mutex_exit(QLOCK(qp)); 37050Sstevel@tonic-gate rval = EWOULDBLOCK; 37060Sstevel@tonic-gate goto out; 37070Sstevel@tonic-gate } 37080Sstevel@tonic-gate mutex_exit(QLOCK(qp)); 37090Sstevel@tonic-gate } 37100Sstevel@tonic-gate 37110Sstevel@tonic-gate if (! isread && dp->d_mp) 37120Sstevel@tonic-gate STR_FTEVENT_MSG(dp->d_mp, nqp, FTEV_RWNEXT, dp->d_mp->b_rptr - 37130Sstevel@tonic-gate dp->d_mp->b_datap->db_base); 37140Sstevel@tonic-gate 37150Sstevel@tonic-gate rval = (*proc)(qp, dp); 37160Sstevel@tonic-gate 37170Sstevel@tonic-gate if (isread && dp->d_mp) 37180Sstevel@tonic-gate STR_FTEVENT_MSG(dp->d_mp, _RD(nqp), FTEV_RWNEXT, 37190Sstevel@tonic-gate dp->d_mp->b_rptr - dp->d_mp->b_datap->db_base); 37200Sstevel@tonic-gate out: 37210Sstevel@tonic-gate /* 37220Sstevel@tonic-gate * The queue is protected from being freed by sq_count, so it is 37230Sstevel@tonic-gate * safe to call rwnext_exit and reacquire SQLOCK(sq). 37240Sstevel@tonic-gate */ 37250Sstevel@tonic-gate rwnext_exit(qp); 37260Sstevel@tonic-gate 37270Sstevel@tonic-gate mutex_enter(SQLOCK(sq)); 37280Sstevel@tonic-gate flags = sq->sq_flags; 37290Sstevel@tonic-gate ASSERT(sq->sq_count != 0); 37300Sstevel@tonic-gate sq->sq_count--; 37310Sstevel@tonic-gate if (flags & SQ_TAIL) { 37320Sstevel@tonic-gate putnext_tail(sq, qp, flags); 37330Sstevel@tonic-gate /* 37340Sstevel@tonic-gate * The only purpose of this ASSERT is to preserve calling stack 37350Sstevel@tonic-gate * in DEBUG kernel. 37360Sstevel@tonic-gate */ 37370Sstevel@tonic-gate ASSERT(flags & SQ_TAIL); 37380Sstevel@tonic-gate return (rval); 37390Sstevel@tonic-gate } 37400Sstevel@tonic-gate ASSERT(flags & (SQ_EXCL|SQ_CIPUT)); 37410Sstevel@tonic-gate /* 37420Sstevel@tonic-gate * Safe to always drop SQ_EXCL: 37430Sstevel@tonic-gate * Not SQ_CIPUT means we set SQ_EXCL above 37440Sstevel@tonic-gate * For SQ_CIPUT SQ_EXCL will only be set if the put procedure 37450Sstevel@tonic-gate * did a qwriter(INNER) in which case nobody else 37460Sstevel@tonic-gate * is in the inner perimeter and we are exiting. 37470Sstevel@tonic-gate * 37480Sstevel@tonic-gate * I would like to make the following assertion: 37490Sstevel@tonic-gate * 37500Sstevel@tonic-gate * ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) != (SQ_EXCL|SQ_CIPUT) || 37510Sstevel@tonic-gate * sq->sq_count == 0); 37520Sstevel@tonic-gate * 37530Sstevel@tonic-gate * which indicates that if we are both putshared and exclusive, 37540Sstevel@tonic-gate * we became exclusive while executing the putproc, and the only 37550Sstevel@tonic-gate * claim on the syncq was the one we dropped a few lines above. 37560Sstevel@tonic-gate * But other threads that enter putnext while the syncq is exclusive 37570Sstevel@tonic-gate * need to make a claim as they may need to drop SQLOCK in the 37580Sstevel@tonic-gate * has_writers case to avoid deadlocks. If these threads are 37590Sstevel@tonic-gate * delayed or preempted, it is possible that the writer thread can 37600Sstevel@tonic-gate * find out that there are other claims making the (sq_count == 0) 37610Sstevel@tonic-gate * test invalid. 37620Sstevel@tonic-gate */ 37630Sstevel@tonic-gate 37640Sstevel@tonic-gate sq->sq_flags = flags & ~SQ_EXCL; 37650Sstevel@tonic-gate if (sq->sq_flags & SQ_WANTWAKEUP) { 37660Sstevel@tonic-gate sq->sq_flags &= ~SQ_WANTWAKEUP; 37670Sstevel@tonic-gate cv_broadcast(&sq->sq_wait); 37680Sstevel@tonic-gate } 37690Sstevel@tonic-gate mutex_exit(SQLOCK(sq)); 37700Sstevel@tonic-gate return (rval); 37710Sstevel@tonic-gate } 37720Sstevel@tonic-gate 37730Sstevel@tonic-gate /* 37740Sstevel@tonic-gate * The purpose of infonext() is to call the info procedure of the next 37750Sstevel@tonic-gate * (downstream) modules queue. 37760Sstevel@tonic-gate * 37770Sstevel@tonic-gate * treated as put entrypoint for perimeter syncronization. 37780Sstevel@tonic-gate * 37790Sstevel@tonic-gate * There's no need to grab sq_putlocks here (which only exist for CIPUT 37800Sstevel@tonic-gate * sync queues). If it is CIPUT sync queue regular sq_count is incremented and 37810Sstevel@tonic-gate * it does not matter if any regular put entrypoints have been already 37820Sstevel@tonic-gate * entered. 37830Sstevel@tonic-gate */ 37840Sstevel@tonic-gate int 37850Sstevel@tonic-gate infonext(queue_t *qp, infod_t *idp) 37860Sstevel@tonic-gate { 37870Sstevel@tonic-gate queue_t *nqp; 37880Sstevel@tonic-gate syncq_t *sq; 37890Sstevel@tonic-gate uint16_t count; 37900Sstevel@tonic-gate uint16_t flags; 37910Sstevel@tonic-gate struct qinit *qi; 37920Sstevel@tonic-gate int (*proc)(); 37930Sstevel@tonic-gate struct stdata *stp; 37940Sstevel@tonic-gate int rval; 37950Sstevel@tonic-gate 37960Sstevel@tonic-gate stp = STREAM(qp); 37970Sstevel@tonic-gate /* 37980Sstevel@tonic-gate * Prevent q_next from changing by holding sd_lock until 37990Sstevel@tonic-gate * acquiring SQLOCK. 38000Sstevel@tonic-gate */ 38010Sstevel@tonic-gate mutex_enter(&stp->sd_lock); 38020Sstevel@tonic-gate if ((nqp = _WR(qp)) == qp) { 38030Sstevel@tonic-gate qp = nqp->q_next; 38040Sstevel@tonic-gate } else { 38050Sstevel@tonic-gate qp = _RD(nqp->q_next); 38060Sstevel@tonic-gate } 38070Sstevel@tonic-gate qi = qp->q_qinfo; 38080Sstevel@tonic-gate if (qp->q_struiot == STRUIOT_NONE || ! (proc = qi->qi_infop)) { 38090Sstevel@tonic-gate mutex_exit(&stp->sd_lock); 38100Sstevel@tonic-gate return (EINVAL); 38110Sstevel@tonic-gate } 38120Sstevel@tonic-gate sq = qp->q_syncq; 38130Sstevel@tonic-gate mutex_enter(SQLOCK(sq)); 38140Sstevel@tonic-gate mutex_exit(&stp->sd_lock); 38150Sstevel@tonic-gate count = sq->sq_count; 38160Sstevel@tonic-gate flags = sq->sq_flags; 38170Sstevel@tonic-gate ASSERT(sq->sq_ciputctrl == NULL || (flags & SQ_CIPUT)); 38180Sstevel@tonic-gate 38190Sstevel@tonic-gate while ((flags & SQ_GOAWAY) || (!(flags & SQ_CIPUT) && count != 0)) { 38200Sstevel@tonic-gate /* 38210Sstevel@tonic-gate * Wait until we can enter the inner perimeter. 38220Sstevel@tonic-gate */ 38230Sstevel@tonic-gate sq->sq_flags = flags | SQ_WANTWAKEUP; 38240Sstevel@tonic-gate cv_wait(&sq->sq_wait, SQLOCK(sq)); 38250Sstevel@tonic-gate count = sq->sq_count; 38260Sstevel@tonic-gate flags = sq->sq_flags; 38270Sstevel@tonic-gate } 38280Sstevel@tonic-gate 38290Sstevel@tonic-gate if (! (flags & SQ_CIPUT)) 38300Sstevel@tonic-gate sq->sq_flags = flags | SQ_EXCL; 38310Sstevel@tonic-gate sq->sq_count = count + 1; 38320Sstevel@tonic-gate ASSERT(sq->sq_count != 0); /* Wraparound */ 38330Sstevel@tonic-gate mutex_exit(SQLOCK(sq)); 38340Sstevel@tonic-gate 38350Sstevel@tonic-gate rval = (*proc)(qp, idp); 38360Sstevel@tonic-gate 38370Sstevel@tonic-gate mutex_enter(SQLOCK(sq)); 38380Sstevel@tonic-gate flags = sq->sq_flags; 38390Sstevel@tonic-gate ASSERT(sq->sq_count != 0); 38400Sstevel@tonic-gate sq->sq_count--; 38410Sstevel@tonic-gate if (flags & SQ_TAIL) { 38420Sstevel@tonic-gate putnext_tail(sq, qp, flags); 38430Sstevel@tonic-gate /* 38440Sstevel@tonic-gate * The only purpose of this ASSERT is to preserve calling stack 38450Sstevel@tonic-gate * in DEBUG kernel. 38460Sstevel@tonic-gate */ 38470Sstevel@tonic-gate ASSERT(flags & SQ_TAIL); 38480Sstevel@tonic-gate return (rval); 38490Sstevel@tonic-gate } 38500Sstevel@tonic-gate ASSERT(flags & (SQ_EXCL|SQ_CIPUT)); 38510Sstevel@tonic-gate /* 38520Sstevel@tonic-gate * XXXX 38530Sstevel@tonic-gate * I am not certain the next comment is correct here. I need to consider 38540Sstevel@tonic-gate * why the infonext is called, and if dropping SQ_EXCL unless non-CIPUT 38550Sstevel@tonic-gate * might cause other problems. It just might be safer to drop it if 38560Sstevel@tonic-gate * !SQ_CIPUT because that is when we set it. 38570Sstevel@tonic-gate */ 38580Sstevel@tonic-gate /* 38590Sstevel@tonic-gate * Safe to always drop SQ_EXCL: 38600Sstevel@tonic-gate * Not SQ_CIPUT means we set SQ_EXCL above 38610Sstevel@tonic-gate * For SQ_CIPUT SQ_EXCL will only be set if the put procedure 38620Sstevel@tonic-gate * did a qwriter(INNER) in which case nobody else 38630Sstevel@tonic-gate * is in the inner perimeter and we are exiting. 38640Sstevel@tonic-gate * 38650Sstevel@tonic-gate * I would like to make the following assertion: 38660Sstevel@tonic-gate * 38670Sstevel@tonic-gate * ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) != (SQ_EXCL|SQ_CIPUT) || 38680Sstevel@tonic-gate * sq->sq_count == 0); 38690Sstevel@tonic-gate * 38700Sstevel@tonic-gate * which indicates that if we are both putshared and exclusive, 38710Sstevel@tonic-gate * we became exclusive while executing the putproc, and the only 38720Sstevel@tonic-gate * claim on the syncq was the one we dropped a few lines above. 38730Sstevel@tonic-gate * But other threads that enter putnext while the syncq is exclusive 38740Sstevel@tonic-gate * need to make a claim as they may need to drop SQLOCK in the 38750Sstevel@tonic-gate * has_writers case to avoid deadlocks. If these threads are 38760Sstevel@tonic-gate * delayed or preempted, it is possible that the writer thread can 38770Sstevel@tonic-gate * find out that there are other claims making the (sq_count == 0) 38780Sstevel@tonic-gate * test invalid. 38790Sstevel@tonic-gate */ 38800Sstevel@tonic-gate 38810Sstevel@tonic-gate sq->sq_flags = flags & ~SQ_EXCL; 38820Sstevel@tonic-gate mutex_exit(SQLOCK(sq)); 38830Sstevel@tonic-gate return (rval); 38840Sstevel@tonic-gate } 38850Sstevel@tonic-gate 38860Sstevel@tonic-gate /* 38870Sstevel@tonic-gate * Return nonzero if the queue is responsible for struio(), else return 0. 38880Sstevel@tonic-gate */ 38890Sstevel@tonic-gate int 38900Sstevel@tonic-gate isuioq(queue_t *q) 38910Sstevel@tonic-gate { 38920Sstevel@tonic-gate if (q->q_flag & QREADR) 38930Sstevel@tonic-gate return (STREAM(q)->sd_struiordq == q); 38940Sstevel@tonic-gate else 38950Sstevel@tonic-gate return (STREAM(q)->sd_struiowrq == q); 38960Sstevel@tonic-gate } 38970Sstevel@tonic-gate 38980Sstevel@tonic-gate #if defined(__sparc) 38990Sstevel@tonic-gate int disable_putlocks = 0; 39000Sstevel@tonic-gate #else 39010Sstevel@tonic-gate int disable_putlocks = 1; 39020Sstevel@tonic-gate #endif 39030Sstevel@tonic-gate 39040Sstevel@tonic-gate /* 39050Sstevel@tonic-gate * called by create_putlock. 39060Sstevel@tonic-gate */ 39070Sstevel@tonic-gate static void 39080Sstevel@tonic-gate create_syncq_putlocks(queue_t *q) 39090Sstevel@tonic-gate { 39100Sstevel@tonic-gate syncq_t *sq = q->q_syncq; 39110Sstevel@tonic-gate ciputctrl_t *cip; 39120Sstevel@tonic-gate int i; 39130Sstevel@tonic-gate 39140Sstevel@tonic-gate ASSERT(sq != NULL); 39150Sstevel@tonic-gate 39160Sstevel@tonic-gate ASSERT(disable_putlocks == 0); 39170Sstevel@tonic-gate ASSERT(n_ciputctrl >= min_n_ciputctrl); 39180Sstevel@tonic-gate ASSERT(ciputctrl_cache != NULL); 39190Sstevel@tonic-gate 39200Sstevel@tonic-gate if (!(sq->sq_type & SQ_CIPUT)) 39210Sstevel@tonic-gate return; 39220Sstevel@tonic-gate 39230Sstevel@tonic-gate for (i = 0; i <= 1; i++) { 39240Sstevel@tonic-gate if (sq->sq_ciputctrl == NULL) { 39250Sstevel@tonic-gate cip = kmem_cache_alloc(ciputctrl_cache, KM_SLEEP); 39260Sstevel@tonic-gate SUMCHECK_CIPUTCTRL_COUNTS(cip, n_ciputctrl - 1, 0); 39270Sstevel@tonic-gate mutex_enter(SQLOCK(sq)); 39280Sstevel@tonic-gate if (sq->sq_ciputctrl != NULL) { 39290Sstevel@tonic-gate mutex_exit(SQLOCK(sq)); 39300Sstevel@tonic-gate kmem_cache_free(ciputctrl_cache, cip); 39310Sstevel@tonic-gate } else { 39320Sstevel@tonic-gate ASSERT(sq->sq_nciputctrl == 0); 39330Sstevel@tonic-gate sq->sq_nciputctrl = n_ciputctrl - 1; 39340Sstevel@tonic-gate /* 39350Sstevel@tonic-gate * putnext checks sq_ciputctrl without holding 39360Sstevel@tonic-gate * SQLOCK. if it is not NULL putnext assumes 39370Sstevel@tonic-gate * sq_nciputctrl is initialized. membar below 39380Sstevel@tonic-gate * insures that. 39390Sstevel@tonic-gate */ 39400Sstevel@tonic-gate membar_producer(); 39410Sstevel@tonic-gate sq->sq_ciputctrl = cip; 39420Sstevel@tonic-gate mutex_exit(SQLOCK(sq)); 39430Sstevel@tonic-gate } 39440Sstevel@tonic-gate } 39450Sstevel@tonic-gate ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1); 39460Sstevel@tonic-gate if (i == 1) 39470Sstevel@tonic-gate break; 39480Sstevel@tonic-gate q = _OTHERQ(q); 39490Sstevel@tonic-gate if (!(q->q_flag & QPERQ)) { 39500Sstevel@tonic-gate ASSERT(sq == q->q_syncq); 39510Sstevel@tonic-gate break; 39520Sstevel@tonic-gate } 39530Sstevel@tonic-gate ASSERT(q->q_syncq != NULL); 39540Sstevel@tonic-gate ASSERT(sq != q->q_syncq); 39550Sstevel@tonic-gate sq = q->q_syncq; 39560Sstevel@tonic-gate ASSERT(sq->sq_type & SQ_CIPUT); 39570Sstevel@tonic-gate } 39580Sstevel@tonic-gate } 39590Sstevel@tonic-gate 39600Sstevel@tonic-gate /* 39610Sstevel@tonic-gate * If stream argument is 0 only create per cpu sq_putlocks/sq_putcounts for 39620Sstevel@tonic-gate * syncq of q. If stream argument is not 0 create per cpu stream_putlocks for 39630Sstevel@tonic-gate * the stream of q and per cpu sq_putlocks/sq_putcounts for all syncq's 39640Sstevel@tonic-gate * starting from q and down to the driver. 39650Sstevel@tonic-gate * 39660Sstevel@tonic-gate * This should be called after the affected queues are part of stream 39670Sstevel@tonic-gate * geometry. It should be called from driver/module open routine after 39680Sstevel@tonic-gate * qprocson() call. It is also called from nfs syscall where it is known that 39690Sstevel@tonic-gate * stream is configured and won't change its geometry during create_putlock 39700Sstevel@tonic-gate * call. 39710Sstevel@tonic-gate * 39720Sstevel@tonic-gate * caller normally uses 0 value for the stream argument to speed up MT putnext 39730Sstevel@tonic-gate * into the perimeter of q for example because its perimeter is per module 39740Sstevel@tonic-gate * (e.g. IP). 39750Sstevel@tonic-gate * 39760Sstevel@tonic-gate * caller normally uses non 0 value for the stream argument to hint the system 39770Sstevel@tonic-gate * that the stream of q is a very contended global system stream 39780Sstevel@tonic-gate * (e.g. NFS/UDP) and the part of the stream from q to the driver is 39790Sstevel@tonic-gate * particularly MT hot. 39800Sstevel@tonic-gate * 39810Sstevel@tonic-gate * Caller insures stream plumbing won't happen while we are here and therefore 39820Sstevel@tonic-gate * q_next can be safely used. 39830Sstevel@tonic-gate */ 39840Sstevel@tonic-gate 39850Sstevel@tonic-gate void 39860Sstevel@tonic-gate create_putlocks(queue_t *q, int stream) 39870Sstevel@tonic-gate { 39880Sstevel@tonic-gate ciputctrl_t *cip; 39890Sstevel@tonic-gate struct stdata *stp = STREAM(q); 39900Sstevel@tonic-gate 39910Sstevel@tonic-gate q = _WR(q); 39920Sstevel@tonic-gate ASSERT(stp != NULL); 39930Sstevel@tonic-gate 39940Sstevel@tonic-gate if (disable_putlocks != 0) 39950Sstevel@tonic-gate return; 39960Sstevel@tonic-gate 39970Sstevel@tonic-gate if (n_ciputctrl < min_n_ciputctrl) 39980Sstevel@tonic-gate return; 39990Sstevel@tonic-gate 40000Sstevel@tonic-gate ASSERT(ciputctrl_cache != NULL); 40010Sstevel@tonic-gate 40020Sstevel@tonic-gate if (stream != 0 && stp->sd_ciputctrl == NULL) { 40030Sstevel@tonic-gate cip = kmem_cache_alloc(ciputctrl_cache, KM_SLEEP); 40040Sstevel@tonic-gate SUMCHECK_CIPUTCTRL_COUNTS(cip, n_ciputctrl - 1, 0); 40050Sstevel@tonic-gate mutex_enter(&stp->sd_lock); 40060Sstevel@tonic-gate if (stp->sd_ciputctrl != NULL) { 40070Sstevel@tonic-gate mutex_exit(&stp->sd_lock); 40080Sstevel@tonic-gate kmem_cache_free(ciputctrl_cache, cip); 40090Sstevel@tonic-gate } else { 40100Sstevel@tonic-gate ASSERT(stp->sd_nciputctrl == 0); 40110Sstevel@tonic-gate stp->sd_nciputctrl = n_ciputctrl - 1; 40120Sstevel@tonic-gate /* 40130Sstevel@tonic-gate * putnext checks sd_ciputctrl without holding 40140Sstevel@tonic-gate * sd_lock. if it is not NULL putnext assumes 40150Sstevel@tonic-gate * sd_nciputctrl is initialized. membar below 40160Sstevel@tonic-gate * insures that. 40170Sstevel@tonic-gate */ 40180Sstevel@tonic-gate membar_producer(); 40190Sstevel@tonic-gate stp->sd_ciputctrl = cip; 40200Sstevel@tonic-gate mutex_exit(&stp->sd_lock); 40210Sstevel@tonic-gate } 40220Sstevel@tonic-gate } 40230Sstevel@tonic-gate 40240Sstevel@tonic-gate ASSERT(stream == 0 || stp->sd_nciputctrl == n_ciputctrl - 1); 40250Sstevel@tonic-gate 40260Sstevel@tonic-gate while (_SAMESTR(q)) { 40270Sstevel@tonic-gate create_syncq_putlocks(q); 40280Sstevel@tonic-gate if (stream == 0) 40290Sstevel@tonic-gate return; 40300Sstevel@tonic-gate q = q->q_next; 40310Sstevel@tonic-gate } 40320Sstevel@tonic-gate ASSERT(q != NULL); 40330Sstevel@tonic-gate create_syncq_putlocks(q); 40340Sstevel@tonic-gate } 40350Sstevel@tonic-gate 40360Sstevel@tonic-gate /* 40370Sstevel@tonic-gate * STREAMS Flow Trace - record STREAMS Flow Trace events as an mblk flows 40380Sstevel@tonic-gate * through a stream. 40390Sstevel@tonic-gate * 4040*8752SPeter.Memishian@Sun.COM * Data currently record per-event is a timestamp, module/driver name, 4041*8752SPeter.Memishian@Sun.COM * downstream module/driver name, optional callstack, event type and a per 4042*8752SPeter.Memishian@Sun.COM * type datum. Much of the STREAMS framework is instrumented for automatic 4043*8752SPeter.Memishian@Sun.COM * flow tracing (when enabled). Events can be defined and used by STREAMS 4044*8752SPeter.Memishian@Sun.COM * modules and drivers. 40450Sstevel@tonic-gate * 40460Sstevel@tonic-gate * Global objects: 40470Sstevel@tonic-gate * 40480Sstevel@tonic-gate * str_ftevent() - Add a flow-trace event to a dblk. 40490Sstevel@tonic-gate * str_ftfree() - Free flow-trace data 40500Sstevel@tonic-gate * 40510Sstevel@tonic-gate * Local objects: 40520Sstevel@tonic-gate * 40530Sstevel@tonic-gate * fthdr_cache - pointer to the kmem cache for trace header. 40540Sstevel@tonic-gate * ftblk_cache - pointer to the kmem cache for trace data blocks. 40550Sstevel@tonic-gate */ 40560Sstevel@tonic-gate 40570Sstevel@tonic-gate int str_ftnever = 1; /* Don't do STREAMS flow tracing */ 4058*8752SPeter.Memishian@Sun.COM int str_ftstack = 0; /* Don't record event call stacks */ 40590Sstevel@tonic-gate 40600Sstevel@tonic-gate void 40610Sstevel@tonic-gate str_ftevent(fthdr_t *hp, void *p, ushort_t evnt, ushort_t data) 40620Sstevel@tonic-gate { 40630Sstevel@tonic-gate ftblk_t *bp = hp->tail; 40640Sstevel@tonic-gate ftblk_t *nbp; 40650Sstevel@tonic-gate ftevnt_t *ep; 40660Sstevel@tonic-gate int ix, nix; 40670Sstevel@tonic-gate 40680Sstevel@tonic-gate ASSERT(hp != NULL); 40690Sstevel@tonic-gate 40700Sstevel@tonic-gate for (;;) { 40710Sstevel@tonic-gate if ((ix = bp->ix) == FTBLK_EVNTS) { 40720Sstevel@tonic-gate /* 40730Sstevel@tonic-gate * Tail doesn't have room, so need a new tail. 40740Sstevel@tonic-gate * 40750Sstevel@tonic-gate * To make this MT safe, first, allocate a new 40760Sstevel@tonic-gate * ftblk, and initialize it. To make life a 40770Sstevel@tonic-gate * little easier, reserve the first slot (mostly 40780Sstevel@tonic-gate * by making ix = 1). When we are finished with 40790Sstevel@tonic-gate * the initialization, CAS this pointer to the 40800Sstevel@tonic-gate * tail. If this succeeds, this is the new 40810Sstevel@tonic-gate * "next" block. Otherwise, another thread 40820Sstevel@tonic-gate * got here first, so free the block and start 40830Sstevel@tonic-gate * again. 40840Sstevel@tonic-gate */ 4085*8752SPeter.Memishian@Sun.COM nbp = kmem_cache_alloc(ftblk_cache, KM_NOSLEEP); 4086*8752SPeter.Memishian@Sun.COM if (nbp == NULL) { 40870Sstevel@tonic-gate /* no mem, so punt */ 40880Sstevel@tonic-gate str_ftnever++; 40890Sstevel@tonic-gate /* free up all flow data? */ 40900Sstevel@tonic-gate return; 40910Sstevel@tonic-gate } 40920Sstevel@tonic-gate nbp->nxt = NULL; 40930Sstevel@tonic-gate nbp->ix = 1; 40940Sstevel@tonic-gate /* 40950Sstevel@tonic-gate * Just in case there is another thread about 40960Sstevel@tonic-gate * to get the next index, we need to make sure 40970Sstevel@tonic-gate * the value is there for it. 40980Sstevel@tonic-gate */ 40990Sstevel@tonic-gate membar_producer(); 41000Sstevel@tonic-gate if (casptr(&hp->tail, bp, nbp) == bp) { 41010Sstevel@tonic-gate /* CAS was successful */ 41020Sstevel@tonic-gate bp->nxt = nbp; 41030Sstevel@tonic-gate membar_producer(); 41040Sstevel@tonic-gate bp = nbp; 41050Sstevel@tonic-gate ix = 0; 41060Sstevel@tonic-gate goto cas_good; 41070Sstevel@tonic-gate } else { 41080Sstevel@tonic-gate kmem_cache_free(ftblk_cache, nbp); 41090Sstevel@tonic-gate bp = hp->tail; 41100Sstevel@tonic-gate continue; 41110Sstevel@tonic-gate } 41120Sstevel@tonic-gate } 41130Sstevel@tonic-gate nix = ix + 1; 41140Sstevel@tonic-gate if (cas32((uint32_t *)&bp->ix, ix, nix) == ix) { 41150Sstevel@tonic-gate cas_good: 41160Sstevel@tonic-gate if (curthread != hp->thread) { 41170Sstevel@tonic-gate hp->thread = curthread; 41180Sstevel@tonic-gate evnt |= FTEV_CS; 41190Sstevel@tonic-gate } 41200Sstevel@tonic-gate if (CPU->cpu_seqid != hp->cpu_seqid) { 41210Sstevel@tonic-gate hp->cpu_seqid = CPU->cpu_seqid; 41220Sstevel@tonic-gate evnt |= FTEV_PS; 41230Sstevel@tonic-gate } 41240Sstevel@tonic-gate ep = &bp->ev[ix]; 41250Sstevel@tonic-gate break; 41260Sstevel@tonic-gate } 41270Sstevel@tonic-gate } 41280Sstevel@tonic-gate 41290Sstevel@tonic-gate if (evnt & FTEV_QMASK) { 41300Sstevel@tonic-gate queue_t *qp = p; 41310Sstevel@tonic-gate 41320Sstevel@tonic-gate if (!(qp->q_flag & QREADR)) 41330Sstevel@tonic-gate evnt |= FTEV_ISWR; 4134*8752SPeter.Memishian@Sun.COM 4135*8752SPeter.Memishian@Sun.COM ep->mid = Q2NAME(qp); 4136*8752SPeter.Memishian@Sun.COM 4137*8752SPeter.Memishian@Sun.COM /* 4138*8752SPeter.Memishian@Sun.COM * We only record the next queue name for FTEV_PUTNEXT since 4139*8752SPeter.Memishian@Sun.COM * that's the only time we *really* need it, and the putnext() 4140*8752SPeter.Memishian@Sun.COM * code ensures that qp->q_next won't vanish. (We could use 4141*8752SPeter.Memishian@Sun.COM * claimstr()/releasestr() but at a performance cost.) 4142*8752SPeter.Memishian@Sun.COM */ 4143*8752SPeter.Memishian@Sun.COM if ((evnt & FTEV_MASK) == FTEV_PUTNEXT && qp->q_next != NULL) 4144*8752SPeter.Memishian@Sun.COM ep->midnext = Q2NAME(qp->q_next); 4145*8752SPeter.Memishian@Sun.COM else 4146*8752SPeter.Memishian@Sun.COM ep->midnext = NULL; 41470Sstevel@tonic-gate } else { 4148*8752SPeter.Memishian@Sun.COM ep->mid = p; 4149*8752SPeter.Memishian@Sun.COM ep->midnext = NULL; 41500Sstevel@tonic-gate } 41510Sstevel@tonic-gate 4152*8752SPeter.Memishian@Sun.COM if (ep->stk != NULL) 4153*8752SPeter.Memishian@Sun.COM ep->stk->fs_depth = getpcstack(ep->stk->fs_stk, FTSTK_DEPTH); 4154*8752SPeter.Memishian@Sun.COM 41550Sstevel@tonic-gate ep->ts = gethrtime(); 41560Sstevel@tonic-gate ep->evnt = evnt; 41570Sstevel@tonic-gate ep->data = data; 41580Sstevel@tonic-gate hp->hash = (hp->hash << 9) + hp->hash; 41590Sstevel@tonic-gate hp->hash += (evnt << 16) | data; 41600Sstevel@tonic-gate hp->hash += (uintptr_t)ep->mid; 41610Sstevel@tonic-gate } 41620Sstevel@tonic-gate 41630Sstevel@tonic-gate /* 41640Sstevel@tonic-gate * Free flow-trace data. 41650Sstevel@tonic-gate */ 41660Sstevel@tonic-gate void 41670Sstevel@tonic-gate str_ftfree(dblk_t *dbp) 41680Sstevel@tonic-gate { 41690Sstevel@tonic-gate fthdr_t *hp = dbp->db_fthdr; 41700Sstevel@tonic-gate ftblk_t *bp = &hp->first; 41710Sstevel@tonic-gate ftblk_t *nbp; 41720Sstevel@tonic-gate 41730Sstevel@tonic-gate if (bp != hp->tail || bp->ix != 0) { 41740Sstevel@tonic-gate /* 41750Sstevel@tonic-gate * Clear out the hash, have the tail point to itself, and free 41760Sstevel@tonic-gate * any continuation blocks. 41770Sstevel@tonic-gate */ 41780Sstevel@tonic-gate bp = hp->first.nxt; 41790Sstevel@tonic-gate hp->tail = &hp->first; 41800Sstevel@tonic-gate hp->hash = 0; 41810Sstevel@tonic-gate hp->first.nxt = NULL; 41820Sstevel@tonic-gate hp->first.ix = 0; 41830Sstevel@tonic-gate while (bp != NULL) { 41840Sstevel@tonic-gate nbp = bp->nxt; 41850Sstevel@tonic-gate kmem_cache_free(ftblk_cache, bp); 41860Sstevel@tonic-gate bp = nbp; 41870Sstevel@tonic-gate } 41880Sstevel@tonic-gate } 41890Sstevel@tonic-gate kmem_cache_free(fthdr_cache, hp); 41900Sstevel@tonic-gate dbp->db_fthdr = NULL; 41910Sstevel@tonic-gate } 4192