1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate /* 30*0Sstevel@tonic-gate * STREAMS Buffering module 31*0Sstevel@tonic-gate * 32*0Sstevel@tonic-gate * This streams module collects incoming messages from modules below 33*0Sstevel@tonic-gate * it on the stream and buffers them up into a smaller number of 34*0Sstevel@tonic-gate * aggregated messages. Its main purpose is to reduce overhead by 35*0Sstevel@tonic-gate * cutting down on the number of read (or getmsg) calls its client 36*0Sstevel@tonic-gate * user process makes. 37*0Sstevel@tonic-gate * - only M_DATA is buffered. 38*0Sstevel@tonic-gate * - multithreading assumes configured as D_MTQPAIR 39*0Sstevel@tonic-gate * - packets are lost only if flag SB_NO_HEADER is clear and buffer 40*0Sstevel@tonic-gate * allocation fails. 41*0Sstevel@tonic-gate * - in order message transmission. This is enforced for messages other 42*0Sstevel@tonic-gate * than high priority messages. 43*0Sstevel@tonic-gate * - zero length messages on the read side are not passed up the 44*0Sstevel@tonic-gate * stream but used internally for synchronization. 45*0Sstevel@tonic-gate * FLAGS: 46*0Sstevel@tonic-gate * - SB_NO_PROTO_CVT - no conversion of M_PROTO messages to M_DATA. 47*0Sstevel@tonic-gate * (conversion is the default for backwards compatibility 48*0Sstevel@tonic-gate * hence the negative logic). 49*0Sstevel@tonic-gate * - SB_NO_HEADER - no headers in buffered data. 50*0Sstevel@tonic-gate * (adding headers is the default for backwards compatibility 51*0Sstevel@tonic-gate * hence the negative logic). 52*0Sstevel@tonic-gate * - SB_DEFER_CHUNK - provides improved response time in question-answer 53*0Sstevel@tonic-gate * applications. Buffering is not enabled until the second message 54*0Sstevel@tonic-gate * is received on the read side within the sb_ticks interval. 55*0Sstevel@tonic-gate * This option will often be used in combination with flag SB_SEND_ON_WRITE. 56*0Sstevel@tonic-gate * - SB_SEND_ON_WRITE - a write message results in any pending buffered read 57*0Sstevel@tonic-gate * data being immediately sent upstream. 58*0Sstevel@tonic-gate * - SB_NO_DROPS - bufmod behaves transparently in flow control and propagates 59*0Sstevel@tonic-gate * the blocked flow condition downstream. If this flag is clear (default) 60*0Sstevel@tonic-gate * messages will be dropped if the upstream flow is blocked. 61*0Sstevel@tonic-gate */ 62*0Sstevel@tonic-gate 63*0Sstevel@tonic-gate 64*0Sstevel@tonic-gate #include <sys/types.h> 65*0Sstevel@tonic-gate #include <sys/errno.h> 66*0Sstevel@tonic-gate #include <sys/debug.h> 67*0Sstevel@tonic-gate #include <sys/stropts.h> 68*0Sstevel@tonic-gate #include <sys/time.h> 69*0Sstevel@tonic-gate #include <sys/stream.h> 70*0Sstevel@tonic-gate #include <sys/conf.h> 71*0Sstevel@tonic-gate #include <sys/ddi.h> 72*0Sstevel@tonic-gate #include <sys/sunddi.h> 73*0Sstevel@tonic-gate #include <sys/kmem.h> 74*0Sstevel@tonic-gate #include <sys/strsun.h> 75*0Sstevel@tonic-gate #include <sys/bufmod.h> 76*0Sstevel@tonic-gate #include <sys/modctl.h> 77*0Sstevel@tonic-gate #include <sys/isa_defs.h> 78*0Sstevel@tonic-gate 79*0Sstevel@tonic-gate /* 80*0Sstevel@tonic-gate * Per-Stream state information. 81*0Sstevel@tonic-gate * 82*0Sstevel@tonic-gate * If sb_ticks is negative, we don't deliver chunks until they're 83*0Sstevel@tonic-gate * full. If it's zero, we deliver every packet as it arrives. (In 84*0Sstevel@tonic-gate * this case we force sb_chunk to zero, to make the implementation 85*0Sstevel@tonic-gate * easier.) Otherwise, sb_ticks gives the number of ticks in a 86*0Sstevel@tonic-gate * buffering interval. The interval begins when the a read side data 87*0Sstevel@tonic-gate * message is received and a timeout is not active. If sb_snap is 88*0Sstevel@tonic-gate * zero, no truncation of the msg is done. 89*0Sstevel@tonic-gate */ 90*0Sstevel@tonic-gate struct sb { 91*0Sstevel@tonic-gate queue_t *sb_rq; /* our rq */ 92*0Sstevel@tonic-gate mblk_t *sb_mp; /* partial chunk */ 93*0Sstevel@tonic-gate mblk_t *sb_head; /* pre-allocated space for the next header */ 94*0Sstevel@tonic-gate mblk_t *sb_tail; /* first mblk of last message appended */ 95*0Sstevel@tonic-gate uint_t sb_mlen; /* sb_mp length */ 96*0Sstevel@tonic-gate uint_t sb_mcount; /* input msg count in sb_mp */ 97*0Sstevel@tonic-gate uint_t sb_chunk; /* max chunk size */ 98*0Sstevel@tonic-gate clock_t sb_ticks; /* timeout interval */ 99*0Sstevel@tonic-gate timeout_id_t sb_timeoutid; /* qtimeout() id */ 100*0Sstevel@tonic-gate uint_t sb_drops; /* cumulative # discarded msgs */ 101*0Sstevel@tonic-gate uint_t sb_snap; /* snapshot length */ 102*0Sstevel@tonic-gate uint_t sb_flags; /* flags field */ 103*0Sstevel@tonic-gate uint_t sb_state; /* state variable */ 104*0Sstevel@tonic-gate }; 105*0Sstevel@tonic-gate 106*0Sstevel@tonic-gate /* 107*0Sstevel@tonic-gate * Function prototypes. 108*0Sstevel@tonic-gate */ 109*0Sstevel@tonic-gate static int sbopen(queue_t *, dev_t *, int, int, cred_t *); 110*0Sstevel@tonic-gate static int sbclose(queue_t *, int, cred_t *); 111*0Sstevel@tonic-gate static void sbwput(queue_t *, mblk_t *); 112*0Sstevel@tonic-gate static void sbrput(queue_t *, mblk_t *); 113*0Sstevel@tonic-gate static void sbrsrv(queue_t *); 114*0Sstevel@tonic-gate static void sbioctl(queue_t *, mblk_t *); 115*0Sstevel@tonic-gate static void sbaddmsg(queue_t *, mblk_t *); 116*0Sstevel@tonic-gate static void sbtick(void *); 117*0Sstevel@tonic-gate static void sbclosechunk(struct sb *); 118*0Sstevel@tonic-gate static void sbsendit(queue_t *, mblk_t *); 119*0Sstevel@tonic-gate 120*0Sstevel@tonic-gate static struct module_info sb_minfo = { 121*0Sstevel@tonic-gate 21, /* mi_idnum */ 122*0Sstevel@tonic-gate "bufmod", /* mi_idname */ 123*0Sstevel@tonic-gate 0, /* mi_minpsz */ 124*0Sstevel@tonic-gate INFPSZ, /* mi_maxpsz */ 125*0Sstevel@tonic-gate 1, /* mi_hiwat */ 126*0Sstevel@tonic-gate 0 /* mi_lowat */ 127*0Sstevel@tonic-gate }; 128*0Sstevel@tonic-gate 129*0Sstevel@tonic-gate static struct qinit sb_rinit = { 130*0Sstevel@tonic-gate (int (*)())sbrput, /* qi_putp */ 131*0Sstevel@tonic-gate (int (*)())sbrsrv, /* qi_srvp */ 132*0Sstevel@tonic-gate sbopen, /* qi_qopen */ 133*0Sstevel@tonic-gate sbclose, /* qi_qclose */ 134*0Sstevel@tonic-gate NULL, /* qi_qadmin */ 135*0Sstevel@tonic-gate &sb_minfo, /* qi_minfo */ 136*0Sstevel@tonic-gate NULL /* qi_mstat */ 137*0Sstevel@tonic-gate }; 138*0Sstevel@tonic-gate 139*0Sstevel@tonic-gate static struct qinit sb_winit = { 140*0Sstevel@tonic-gate (int (*)())sbwput, /* qi_putp */ 141*0Sstevel@tonic-gate NULL, /* qi_srvp */ 142*0Sstevel@tonic-gate NULL, /* qi_qopen */ 143*0Sstevel@tonic-gate NULL, /* qi_qclose */ 144*0Sstevel@tonic-gate NULL, /* qi_qadmin */ 145*0Sstevel@tonic-gate &sb_minfo, /* qi_minfo */ 146*0Sstevel@tonic-gate NULL /* qi_mstat */ 147*0Sstevel@tonic-gate }; 148*0Sstevel@tonic-gate 149*0Sstevel@tonic-gate static struct streamtab sb_info = { 150*0Sstevel@tonic-gate &sb_rinit, /* st_rdinit */ 151*0Sstevel@tonic-gate &sb_winit, /* st_wrinit */ 152*0Sstevel@tonic-gate NULL, /* st_muxrinit */ 153*0Sstevel@tonic-gate NULL /* st_muxwinit */ 154*0Sstevel@tonic-gate }; 155*0Sstevel@tonic-gate 156*0Sstevel@tonic-gate 157*0Sstevel@tonic-gate /* 158*0Sstevel@tonic-gate * This is the loadable module wrapper. 159*0Sstevel@tonic-gate */ 160*0Sstevel@tonic-gate 161*0Sstevel@tonic-gate static struct fmodsw fsw = { 162*0Sstevel@tonic-gate "bufmod", 163*0Sstevel@tonic-gate &sb_info, 164*0Sstevel@tonic-gate D_MTQPAIR | D_MP 165*0Sstevel@tonic-gate }; 166*0Sstevel@tonic-gate 167*0Sstevel@tonic-gate /* 168*0Sstevel@tonic-gate * Module linkage information for the kernel. 169*0Sstevel@tonic-gate */ 170*0Sstevel@tonic-gate 171*0Sstevel@tonic-gate static struct modlstrmod modlstrmod = { 172*0Sstevel@tonic-gate &mod_strmodops, "streams buffer mod", &fsw 173*0Sstevel@tonic-gate }; 174*0Sstevel@tonic-gate 175*0Sstevel@tonic-gate static struct modlinkage modlinkage = { 176*0Sstevel@tonic-gate MODREV_1, &modlstrmod, NULL 177*0Sstevel@tonic-gate }; 178*0Sstevel@tonic-gate 179*0Sstevel@tonic-gate 180*0Sstevel@tonic-gate int 181*0Sstevel@tonic-gate _init(void) 182*0Sstevel@tonic-gate { 183*0Sstevel@tonic-gate return (mod_install(&modlinkage)); 184*0Sstevel@tonic-gate } 185*0Sstevel@tonic-gate 186*0Sstevel@tonic-gate int 187*0Sstevel@tonic-gate _fini(void) 188*0Sstevel@tonic-gate { 189*0Sstevel@tonic-gate return (mod_remove(&modlinkage)); 190*0Sstevel@tonic-gate } 191*0Sstevel@tonic-gate 192*0Sstevel@tonic-gate int 193*0Sstevel@tonic-gate _info(struct modinfo *modinfop) 194*0Sstevel@tonic-gate { 195*0Sstevel@tonic-gate return (mod_info(&modlinkage, modinfop)); 196*0Sstevel@tonic-gate } 197*0Sstevel@tonic-gate 198*0Sstevel@tonic-gate 199*0Sstevel@tonic-gate /* ARGSUSED */ 200*0Sstevel@tonic-gate static int 201*0Sstevel@tonic-gate sbopen(queue_t *rq, dev_t *dev, int oflag, int sflag, cred_t *crp) 202*0Sstevel@tonic-gate { 203*0Sstevel@tonic-gate struct sb *sbp; 204*0Sstevel@tonic-gate ASSERT(rq); 205*0Sstevel@tonic-gate 206*0Sstevel@tonic-gate if (sflag != MODOPEN) 207*0Sstevel@tonic-gate return (EINVAL); 208*0Sstevel@tonic-gate 209*0Sstevel@tonic-gate if (rq->q_ptr) 210*0Sstevel@tonic-gate return (0); 211*0Sstevel@tonic-gate 212*0Sstevel@tonic-gate /* 213*0Sstevel@tonic-gate * Allocate and initialize per-Stream structure. 214*0Sstevel@tonic-gate */ 215*0Sstevel@tonic-gate sbp = kmem_alloc(sizeof (struct sb), KM_SLEEP); 216*0Sstevel@tonic-gate sbp->sb_rq = rq; 217*0Sstevel@tonic-gate sbp->sb_ticks = -1; 218*0Sstevel@tonic-gate sbp->sb_chunk = SB_DFLT_CHUNK; 219*0Sstevel@tonic-gate sbp->sb_tail = sbp->sb_mp = sbp->sb_head = NULL; 220*0Sstevel@tonic-gate sbp->sb_mlen = 0; 221*0Sstevel@tonic-gate sbp->sb_mcount = 0; 222*0Sstevel@tonic-gate sbp->sb_timeoutid = 0; 223*0Sstevel@tonic-gate sbp->sb_drops = 0; 224*0Sstevel@tonic-gate sbp->sb_snap = 0; 225*0Sstevel@tonic-gate sbp->sb_flags = 0; 226*0Sstevel@tonic-gate sbp->sb_state = 0; 227*0Sstevel@tonic-gate 228*0Sstevel@tonic-gate rq->q_ptr = WR(rq)->q_ptr = sbp; 229*0Sstevel@tonic-gate 230*0Sstevel@tonic-gate qprocson(rq); 231*0Sstevel@tonic-gate 232*0Sstevel@tonic-gate 233*0Sstevel@tonic-gate return (0); 234*0Sstevel@tonic-gate } 235*0Sstevel@tonic-gate 236*0Sstevel@tonic-gate /* ARGSUSED1 */ 237*0Sstevel@tonic-gate static int 238*0Sstevel@tonic-gate sbclose(queue_t *rq, int flag, cred_t *credp) 239*0Sstevel@tonic-gate { 240*0Sstevel@tonic-gate struct sb *sbp = (struct sb *)rq->q_ptr; 241*0Sstevel@tonic-gate 242*0Sstevel@tonic-gate ASSERT(sbp); 243*0Sstevel@tonic-gate 244*0Sstevel@tonic-gate qprocsoff(rq); 245*0Sstevel@tonic-gate /* 246*0Sstevel@tonic-gate * Cancel an outstanding timeout 247*0Sstevel@tonic-gate */ 248*0Sstevel@tonic-gate if (sbp->sb_timeoutid != 0) { 249*0Sstevel@tonic-gate (void) quntimeout(rq, sbp->sb_timeoutid); 250*0Sstevel@tonic-gate sbp->sb_timeoutid = 0; 251*0Sstevel@tonic-gate } 252*0Sstevel@tonic-gate /* 253*0Sstevel@tonic-gate * Free the current chunk. 254*0Sstevel@tonic-gate */ 255*0Sstevel@tonic-gate if (sbp->sb_mp) { 256*0Sstevel@tonic-gate freemsg(sbp->sb_mp); 257*0Sstevel@tonic-gate sbp->sb_tail = sbp->sb_mp = sbp->sb_head = NULL; 258*0Sstevel@tonic-gate sbp->sb_mlen = 0; 259*0Sstevel@tonic-gate } 260*0Sstevel@tonic-gate 261*0Sstevel@tonic-gate /* 262*0Sstevel@tonic-gate * Free the per-Stream structure. 263*0Sstevel@tonic-gate */ 264*0Sstevel@tonic-gate kmem_free((caddr_t)sbp, sizeof (struct sb)); 265*0Sstevel@tonic-gate rq->q_ptr = WR(rq)->q_ptr = NULL; 266*0Sstevel@tonic-gate 267*0Sstevel@tonic-gate return (0); 268*0Sstevel@tonic-gate } 269*0Sstevel@tonic-gate 270*0Sstevel@tonic-gate /* 271*0Sstevel@tonic-gate * the correction factor is introduced to compensate for 272*0Sstevel@tonic-gate * whatever assumptions the modules below have made about 273*0Sstevel@tonic-gate * how much traffic is flowing through the stream and the fact 274*0Sstevel@tonic-gate * that bufmod may be snipping messages with the sb_snap length. 275*0Sstevel@tonic-gate */ 276*0Sstevel@tonic-gate #define SNIT_HIWAT(msgsize, fudge) ((4 * msgsize * fudge) + 512) 277*0Sstevel@tonic-gate #define SNIT_LOWAT(msgsize, fudge) ((2 * msgsize * fudge) + 256) 278*0Sstevel@tonic-gate 279*0Sstevel@tonic-gate 280*0Sstevel@tonic-gate static void 281*0Sstevel@tonic-gate sbioc(queue_t *wq, mblk_t *mp) 282*0Sstevel@tonic-gate { 283*0Sstevel@tonic-gate struct iocblk *iocp; 284*0Sstevel@tonic-gate struct sb *sbp = (struct sb *)wq->q_ptr; 285*0Sstevel@tonic-gate clock_t ticks; 286*0Sstevel@tonic-gate mblk_t *mop; 287*0Sstevel@tonic-gate 288*0Sstevel@tonic-gate iocp = (struct iocblk *)mp->b_rptr; 289*0Sstevel@tonic-gate 290*0Sstevel@tonic-gate switch (iocp->ioc_cmd) { 291*0Sstevel@tonic-gate case SBIOCGCHUNK: 292*0Sstevel@tonic-gate case SBIOCGSNAP: 293*0Sstevel@tonic-gate case SBIOCGFLAGS: 294*0Sstevel@tonic-gate case SBIOCGTIME: 295*0Sstevel@tonic-gate miocack(wq, mp, 0, 0); 296*0Sstevel@tonic-gate return; 297*0Sstevel@tonic-gate 298*0Sstevel@tonic-gate case SBIOCSTIME: 299*0Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 300*0Sstevel@tonic-gate if ((iocp->ioc_flag & IOC_MODELS) != IOC_NATIVE) { 301*0Sstevel@tonic-gate struct timeval32 *t32; 302*0Sstevel@tonic-gate 303*0Sstevel@tonic-gate t32 = (struct timeval32 *)mp->b_cont->b_rptr; 304*0Sstevel@tonic-gate if (t32->tv_sec < 0 || t32->tv_usec < 0) { 305*0Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL); 306*0Sstevel@tonic-gate break; 307*0Sstevel@tonic-gate } 308*0Sstevel@tonic-gate ticks = TIMEVAL_TO_TICK(t32); 309*0Sstevel@tonic-gate } else 310*0Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */ 311*0Sstevel@tonic-gate { 312*0Sstevel@tonic-gate struct timeval *tb; 313*0Sstevel@tonic-gate 314*0Sstevel@tonic-gate tb = (struct timeval *)mp->b_cont->b_rptr; 315*0Sstevel@tonic-gate 316*0Sstevel@tonic-gate if (tb->tv_sec < 0 || tb->tv_usec < 0) { 317*0Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL); 318*0Sstevel@tonic-gate break; 319*0Sstevel@tonic-gate } 320*0Sstevel@tonic-gate ticks = TIMEVAL_TO_TICK(tb); 321*0Sstevel@tonic-gate } 322*0Sstevel@tonic-gate sbp->sb_ticks = ticks; 323*0Sstevel@tonic-gate if (ticks == 0) 324*0Sstevel@tonic-gate sbp->sb_chunk = 0; 325*0Sstevel@tonic-gate miocack(wq, mp, 0, 0); 326*0Sstevel@tonic-gate sbclosechunk(sbp); 327*0Sstevel@tonic-gate return; 328*0Sstevel@tonic-gate 329*0Sstevel@tonic-gate case SBIOCSCHUNK: 330*0Sstevel@tonic-gate /* 331*0Sstevel@tonic-gate * set up hi/lo water marks on stream head read queue. 332*0Sstevel@tonic-gate * unlikely to run out of resources. Fix at later date. 333*0Sstevel@tonic-gate */ 334*0Sstevel@tonic-gate if ((mop = allocb(sizeof (struct stroptions), 335*0Sstevel@tonic-gate BPRI_MED)) != NULL) { 336*0Sstevel@tonic-gate struct stroptions *sop; 337*0Sstevel@tonic-gate uint_t chunk; 338*0Sstevel@tonic-gate 339*0Sstevel@tonic-gate chunk = *(uint_t *)mp->b_cont->b_rptr; 340*0Sstevel@tonic-gate mop->b_datap->db_type = M_SETOPTS; 341*0Sstevel@tonic-gate mop->b_wptr += sizeof (struct stroptions); 342*0Sstevel@tonic-gate sop = (struct stroptions *)mop->b_rptr; 343*0Sstevel@tonic-gate sop->so_flags = SO_HIWAT | SO_LOWAT; 344*0Sstevel@tonic-gate sop->so_hiwat = SNIT_HIWAT(chunk, 1); 345*0Sstevel@tonic-gate sop->so_lowat = SNIT_LOWAT(chunk, 1); 346*0Sstevel@tonic-gate qreply(wq, mop); 347*0Sstevel@tonic-gate } 348*0Sstevel@tonic-gate 349*0Sstevel@tonic-gate sbp->sb_chunk = *(uint_t *)mp->b_cont->b_rptr; 350*0Sstevel@tonic-gate miocack(wq, mp, 0, 0); 351*0Sstevel@tonic-gate sbclosechunk(sbp); 352*0Sstevel@tonic-gate return; 353*0Sstevel@tonic-gate 354*0Sstevel@tonic-gate case SBIOCSFLAGS: 355*0Sstevel@tonic-gate sbp->sb_flags = *(uint_t *)mp->b_cont->b_rptr; 356*0Sstevel@tonic-gate miocack(wq, mp, 0, 0); 357*0Sstevel@tonic-gate return; 358*0Sstevel@tonic-gate 359*0Sstevel@tonic-gate case SBIOCSSNAP: 360*0Sstevel@tonic-gate /* 361*0Sstevel@tonic-gate * if chunking dont worry about effects of 362*0Sstevel@tonic-gate * snipping of message size on head flow control 363*0Sstevel@tonic-gate * since it has a relatively small bearing on the 364*0Sstevel@tonic-gate * data rate onto the streamn head. 365*0Sstevel@tonic-gate */ 366*0Sstevel@tonic-gate if (!sbp->sb_chunk) { 367*0Sstevel@tonic-gate /* 368*0Sstevel@tonic-gate * set up hi/lo water marks on stream head read queue. 369*0Sstevel@tonic-gate * unlikely to run out of resources. Fix at later date. 370*0Sstevel@tonic-gate */ 371*0Sstevel@tonic-gate if ((mop = allocb(sizeof (struct stroptions), 372*0Sstevel@tonic-gate BPRI_MED)) != NULL) { 373*0Sstevel@tonic-gate struct stroptions *sop; 374*0Sstevel@tonic-gate uint_t snap; 375*0Sstevel@tonic-gate int fudge; 376*0Sstevel@tonic-gate 377*0Sstevel@tonic-gate snap = *(uint_t *)mp->b_cont->b_rptr; 378*0Sstevel@tonic-gate mop->b_datap->db_type = M_SETOPTS; 379*0Sstevel@tonic-gate mop->b_wptr += sizeof (struct stroptions); 380*0Sstevel@tonic-gate sop = (struct stroptions *)mop->b_rptr; 381*0Sstevel@tonic-gate sop->so_flags = SO_HIWAT | SO_LOWAT; 382*0Sstevel@tonic-gate fudge = snap <= 100 ? 4 : 383*0Sstevel@tonic-gate snap <= 400 ? 2 : 384*0Sstevel@tonic-gate 1; 385*0Sstevel@tonic-gate sop->so_hiwat = SNIT_HIWAT(snap, fudge); 386*0Sstevel@tonic-gate sop->so_lowat = SNIT_LOWAT(snap, fudge); 387*0Sstevel@tonic-gate qreply(wq, mop); 388*0Sstevel@tonic-gate } 389*0Sstevel@tonic-gate } 390*0Sstevel@tonic-gate 391*0Sstevel@tonic-gate sbp->sb_snap = *(uint_t *)mp->b_cont->b_rptr; 392*0Sstevel@tonic-gate miocack(wq, mp, 0, 0); 393*0Sstevel@tonic-gate return; 394*0Sstevel@tonic-gate 395*0Sstevel@tonic-gate default: 396*0Sstevel@tonic-gate ASSERT(0); 397*0Sstevel@tonic-gate return; 398*0Sstevel@tonic-gate } 399*0Sstevel@tonic-gate } 400*0Sstevel@tonic-gate 401*0Sstevel@tonic-gate /* 402*0Sstevel@tonic-gate * Write-side put procedure. Its main task is to detect ioctls 403*0Sstevel@tonic-gate * for manipulating the buffering state and hand them to sbioctl. 404*0Sstevel@tonic-gate * Other message types are passed on through. 405*0Sstevel@tonic-gate */ 406*0Sstevel@tonic-gate static void 407*0Sstevel@tonic-gate sbwput(queue_t *wq, mblk_t *mp) 408*0Sstevel@tonic-gate { 409*0Sstevel@tonic-gate struct sb *sbp = (struct sb *)wq->q_ptr; 410*0Sstevel@tonic-gate struct copyresp *resp; 411*0Sstevel@tonic-gate 412*0Sstevel@tonic-gate if (sbp->sb_flags & SB_SEND_ON_WRITE) 413*0Sstevel@tonic-gate sbclosechunk(sbp); 414*0Sstevel@tonic-gate switch (mp->b_datap->db_type) { 415*0Sstevel@tonic-gate case M_IOCTL: 416*0Sstevel@tonic-gate sbioctl(wq, mp); 417*0Sstevel@tonic-gate break; 418*0Sstevel@tonic-gate 419*0Sstevel@tonic-gate case M_IOCDATA: 420*0Sstevel@tonic-gate resp = (struct copyresp *)mp->b_rptr; 421*0Sstevel@tonic-gate if (resp->cp_rval) { 422*0Sstevel@tonic-gate /* 423*0Sstevel@tonic-gate * Just free message on failure. 424*0Sstevel@tonic-gate */ 425*0Sstevel@tonic-gate freemsg(mp); 426*0Sstevel@tonic-gate break; 427*0Sstevel@tonic-gate } 428*0Sstevel@tonic-gate 429*0Sstevel@tonic-gate switch (resp->cp_cmd) { 430*0Sstevel@tonic-gate case SBIOCSTIME: 431*0Sstevel@tonic-gate case SBIOCSCHUNK: 432*0Sstevel@tonic-gate case SBIOCSFLAGS: 433*0Sstevel@tonic-gate case SBIOCSSNAP: 434*0Sstevel@tonic-gate case SBIOCGTIME: 435*0Sstevel@tonic-gate case SBIOCGCHUNK: 436*0Sstevel@tonic-gate case SBIOCGSNAP: 437*0Sstevel@tonic-gate case SBIOCGFLAGS: 438*0Sstevel@tonic-gate sbioc(wq, mp); 439*0Sstevel@tonic-gate break; 440*0Sstevel@tonic-gate 441*0Sstevel@tonic-gate default: 442*0Sstevel@tonic-gate putnext(wq, mp); 443*0Sstevel@tonic-gate break; 444*0Sstevel@tonic-gate } 445*0Sstevel@tonic-gate break; 446*0Sstevel@tonic-gate 447*0Sstevel@tonic-gate default: 448*0Sstevel@tonic-gate putnext(wq, mp); 449*0Sstevel@tonic-gate break; 450*0Sstevel@tonic-gate } 451*0Sstevel@tonic-gate } 452*0Sstevel@tonic-gate 453*0Sstevel@tonic-gate /* 454*0Sstevel@tonic-gate * Read-side put procedure. It's responsible for buffering up incoming 455*0Sstevel@tonic-gate * messages and grouping them into aggregates according to the current 456*0Sstevel@tonic-gate * buffering parameters. 457*0Sstevel@tonic-gate */ 458*0Sstevel@tonic-gate static void 459*0Sstevel@tonic-gate sbrput(queue_t *rq, mblk_t *mp) 460*0Sstevel@tonic-gate { 461*0Sstevel@tonic-gate struct sb *sbp = (struct sb *)rq->q_ptr; 462*0Sstevel@tonic-gate 463*0Sstevel@tonic-gate ASSERT(sbp); 464*0Sstevel@tonic-gate 465*0Sstevel@tonic-gate switch (mp->b_datap->db_type) { 466*0Sstevel@tonic-gate case M_PROTO: 467*0Sstevel@tonic-gate if (sbp->sb_flags & SB_NO_PROTO_CVT) { 468*0Sstevel@tonic-gate sbclosechunk(sbp); 469*0Sstevel@tonic-gate sbsendit(rq, mp); 470*0Sstevel@tonic-gate break; 471*0Sstevel@tonic-gate } else { 472*0Sstevel@tonic-gate /* 473*0Sstevel@tonic-gate * Convert M_PROTO to M_DATA. 474*0Sstevel@tonic-gate */ 475*0Sstevel@tonic-gate mp->b_datap->db_type = M_DATA; 476*0Sstevel@tonic-gate } 477*0Sstevel@tonic-gate /* FALLTHRU */ 478*0Sstevel@tonic-gate 479*0Sstevel@tonic-gate case M_DATA: 480*0Sstevel@tonic-gate if ((sbp->sb_flags & SB_DEFER_CHUNK) && 481*0Sstevel@tonic-gate !(sbp->sb_state & SB_FRCVD)) { 482*0Sstevel@tonic-gate sbclosechunk(sbp); 483*0Sstevel@tonic-gate sbsendit(rq, mp); 484*0Sstevel@tonic-gate sbp->sb_state |= SB_FRCVD; 485*0Sstevel@tonic-gate } else 486*0Sstevel@tonic-gate sbaddmsg(rq, mp); 487*0Sstevel@tonic-gate 488*0Sstevel@tonic-gate if ((sbp->sb_ticks > 0) && !(sbp->sb_timeoutid)) 489*0Sstevel@tonic-gate sbp->sb_timeoutid = qtimeout(sbp->sb_rq, sbtick, 490*0Sstevel@tonic-gate sbp, sbp->sb_ticks); 491*0Sstevel@tonic-gate 492*0Sstevel@tonic-gate break; 493*0Sstevel@tonic-gate 494*0Sstevel@tonic-gate case M_FLUSH: 495*0Sstevel@tonic-gate if (*mp->b_rptr & FLUSHR) { 496*0Sstevel@tonic-gate /* 497*0Sstevel@tonic-gate * Reset timeout, flush the chunk currently in 498*0Sstevel@tonic-gate * progress, and start a new chunk. 499*0Sstevel@tonic-gate */ 500*0Sstevel@tonic-gate if (sbp->sb_timeoutid) { 501*0Sstevel@tonic-gate (void) quntimeout(sbp->sb_rq, 502*0Sstevel@tonic-gate sbp->sb_timeoutid); 503*0Sstevel@tonic-gate sbp->sb_timeoutid = 0; 504*0Sstevel@tonic-gate } 505*0Sstevel@tonic-gate if (sbp->sb_mp) { 506*0Sstevel@tonic-gate freemsg(sbp->sb_mp); 507*0Sstevel@tonic-gate sbp->sb_tail = sbp->sb_mp = sbp->sb_head = NULL; 508*0Sstevel@tonic-gate sbp->sb_mlen = 0; 509*0Sstevel@tonic-gate sbp->sb_mcount = 0; 510*0Sstevel@tonic-gate } 511*0Sstevel@tonic-gate flushq(rq, FLUSHALL); 512*0Sstevel@tonic-gate } 513*0Sstevel@tonic-gate putnext(rq, mp); 514*0Sstevel@tonic-gate break; 515*0Sstevel@tonic-gate 516*0Sstevel@tonic-gate case M_CTL: 517*0Sstevel@tonic-gate /* 518*0Sstevel@tonic-gate * Zero-length M_CTL means our timeout() popped. 519*0Sstevel@tonic-gate */ 520*0Sstevel@tonic-gate if (MBLKL(mp) == 0) { 521*0Sstevel@tonic-gate freemsg(mp); 522*0Sstevel@tonic-gate sbclosechunk(sbp); 523*0Sstevel@tonic-gate } else { 524*0Sstevel@tonic-gate sbclosechunk(sbp); 525*0Sstevel@tonic-gate sbsendit(rq, mp); 526*0Sstevel@tonic-gate } 527*0Sstevel@tonic-gate break; 528*0Sstevel@tonic-gate 529*0Sstevel@tonic-gate default: 530*0Sstevel@tonic-gate if (mp->b_datap->db_type <= QPCTL) { 531*0Sstevel@tonic-gate sbclosechunk(sbp); 532*0Sstevel@tonic-gate sbsendit(rq, mp); 533*0Sstevel@tonic-gate } else { 534*0Sstevel@tonic-gate /* Note: out of band */ 535*0Sstevel@tonic-gate putnext(rq, mp); 536*0Sstevel@tonic-gate } 537*0Sstevel@tonic-gate break; 538*0Sstevel@tonic-gate } 539*0Sstevel@tonic-gate } 540*0Sstevel@tonic-gate 541*0Sstevel@tonic-gate /* 542*0Sstevel@tonic-gate * read service procedure. 543*0Sstevel@tonic-gate */ 544*0Sstevel@tonic-gate /* ARGSUSED */ 545*0Sstevel@tonic-gate static void 546*0Sstevel@tonic-gate sbrsrv(queue_t *rq) 547*0Sstevel@tonic-gate { 548*0Sstevel@tonic-gate mblk_t *mp; 549*0Sstevel@tonic-gate 550*0Sstevel@tonic-gate /* 551*0Sstevel@tonic-gate * High priority messages shouldn't get here but if 552*0Sstevel@tonic-gate * one does, jam it through to avoid infinite loop. 553*0Sstevel@tonic-gate */ 554*0Sstevel@tonic-gate while ((mp = getq(rq)) != NULL) { 555*0Sstevel@tonic-gate if (!canputnext(rq) && (mp->b_datap->db_type <= QPCTL)) { 556*0Sstevel@tonic-gate /* should only get here if SB_NO_SROPS */ 557*0Sstevel@tonic-gate (void) putbq(rq, mp); 558*0Sstevel@tonic-gate return; 559*0Sstevel@tonic-gate } 560*0Sstevel@tonic-gate putnext(rq, mp); 561*0Sstevel@tonic-gate } 562*0Sstevel@tonic-gate } 563*0Sstevel@tonic-gate 564*0Sstevel@tonic-gate /* 565*0Sstevel@tonic-gate * Handle write-side M_IOCTL messages. 566*0Sstevel@tonic-gate */ 567*0Sstevel@tonic-gate static void 568*0Sstevel@tonic-gate sbioctl(queue_t *wq, mblk_t *mp) 569*0Sstevel@tonic-gate { 570*0Sstevel@tonic-gate struct sb *sbp = (struct sb *)wq->q_ptr; 571*0Sstevel@tonic-gate struct iocblk *iocp = (struct iocblk *)mp->b_rptr; 572*0Sstevel@tonic-gate struct timeval *t; 573*0Sstevel@tonic-gate clock_t ticks; 574*0Sstevel@tonic-gate mblk_t *mop; 575*0Sstevel@tonic-gate int transparent = iocp->ioc_count; 576*0Sstevel@tonic-gate mblk_t *datamp; 577*0Sstevel@tonic-gate int error; 578*0Sstevel@tonic-gate 579*0Sstevel@tonic-gate switch (iocp->ioc_cmd) { 580*0Sstevel@tonic-gate case SBIOCSTIME: 581*0Sstevel@tonic-gate if (iocp->ioc_count == TRANSPARENT) { 582*0Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 583*0Sstevel@tonic-gate if ((iocp->ioc_flag & IOC_MODELS) != IOC_NATIVE) { 584*0Sstevel@tonic-gate mcopyin(mp, NULL, sizeof (struct timeval32), 585*0Sstevel@tonic-gate NULL); 586*0Sstevel@tonic-gate } else 587*0Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */ 588*0Sstevel@tonic-gate { 589*0Sstevel@tonic-gate mcopyin(mp, NULL, sizeof (*t), NULL); 590*0Sstevel@tonic-gate } 591*0Sstevel@tonic-gate qreply(wq, mp); 592*0Sstevel@tonic-gate } else { 593*0Sstevel@tonic-gate /* 594*0Sstevel@tonic-gate * Verify argument length. 595*0Sstevel@tonic-gate */ 596*0Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 597*0Sstevel@tonic-gate if ((iocp->ioc_flag & IOC_MODELS) != IOC_NATIVE) { 598*0Sstevel@tonic-gate struct timeval32 *t32; 599*0Sstevel@tonic-gate 600*0Sstevel@tonic-gate error = miocpullup(mp, 601*0Sstevel@tonic-gate sizeof (struct timeval32)); 602*0Sstevel@tonic-gate if (error != 0) { 603*0Sstevel@tonic-gate miocnak(wq, mp, 0, error); 604*0Sstevel@tonic-gate break; 605*0Sstevel@tonic-gate } 606*0Sstevel@tonic-gate t32 = (struct timeval32 *)mp->b_cont->b_rptr; 607*0Sstevel@tonic-gate if (t32->tv_sec < 0 || t32->tv_usec < 0) { 608*0Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL); 609*0Sstevel@tonic-gate break; 610*0Sstevel@tonic-gate } 611*0Sstevel@tonic-gate ticks = TIMEVAL_TO_TICK(t32); 612*0Sstevel@tonic-gate } else 613*0Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */ 614*0Sstevel@tonic-gate { 615*0Sstevel@tonic-gate error = miocpullup(mp, sizeof (struct timeval)); 616*0Sstevel@tonic-gate if (error != 0) { 617*0Sstevel@tonic-gate miocnak(wq, mp, 0, error); 618*0Sstevel@tonic-gate break; 619*0Sstevel@tonic-gate } 620*0Sstevel@tonic-gate 621*0Sstevel@tonic-gate t = (struct timeval *)mp->b_cont->b_rptr; 622*0Sstevel@tonic-gate if (t->tv_sec < 0 || t->tv_usec < 0) { 623*0Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL); 624*0Sstevel@tonic-gate break; 625*0Sstevel@tonic-gate } 626*0Sstevel@tonic-gate ticks = TIMEVAL_TO_TICK(t); 627*0Sstevel@tonic-gate } 628*0Sstevel@tonic-gate sbp->sb_ticks = ticks; 629*0Sstevel@tonic-gate if (ticks == 0) 630*0Sstevel@tonic-gate sbp->sb_chunk = 0; 631*0Sstevel@tonic-gate miocack(wq, mp, 0, 0); 632*0Sstevel@tonic-gate sbclosechunk(sbp); 633*0Sstevel@tonic-gate } 634*0Sstevel@tonic-gate break; 635*0Sstevel@tonic-gate 636*0Sstevel@tonic-gate case SBIOCGTIME: { 637*0Sstevel@tonic-gate struct timeval *t; 638*0Sstevel@tonic-gate 639*0Sstevel@tonic-gate /* 640*0Sstevel@tonic-gate * Verify argument length. 641*0Sstevel@tonic-gate */ 642*0Sstevel@tonic-gate if (transparent != TRANSPARENT) { 643*0Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 644*0Sstevel@tonic-gate if ((iocp->ioc_flag & IOC_MODELS) != IOC_NATIVE) { 645*0Sstevel@tonic-gate error = miocpullup(mp, 646*0Sstevel@tonic-gate sizeof (struct timeval32)); 647*0Sstevel@tonic-gate if (error != 0) { 648*0Sstevel@tonic-gate miocnak(wq, mp, 0, error); 649*0Sstevel@tonic-gate break; 650*0Sstevel@tonic-gate } 651*0Sstevel@tonic-gate } else 652*0Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */ 653*0Sstevel@tonic-gate error = miocpullup(mp, sizeof (struct timeval)); 654*0Sstevel@tonic-gate if (error != 0) { 655*0Sstevel@tonic-gate miocnak(wq, mp, 0, error); 656*0Sstevel@tonic-gate break; 657*0Sstevel@tonic-gate } 658*0Sstevel@tonic-gate } 659*0Sstevel@tonic-gate 660*0Sstevel@tonic-gate /* 661*0Sstevel@tonic-gate * If infinite timeout, return range error 662*0Sstevel@tonic-gate * for the ioctl. 663*0Sstevel@tonic-gate */ 664*0Sstevel@tonic-gate if (sbp->sb_ticks < 0) { 665*0Sstevel@tonic-gate miocnak(wq, mp, 0, ERANGE); 666*0Sstevel@tonic-gate break; 667*0Sstevel@tonic-gate } 668*0Sstevel@tonic-gate 669*0Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 670*0Sstevel@tonic-gate if ((iocp->ioc_flag & IOC_MODELS) != IOC_NATIVE) { 671*0Sstevel@tonic-gate struct timeval32 *t32; 672*0Sstevel@tonic-gate 673*0Sstevel@tonic-gate if (transparent == TRANSPARENT) { 674*0Sstevel@tonic-gate datamp = allocb(sizeof (*t32), BPRI_MED); 675*0Sstevel@tonic-gate if (datamp == NULL) { 676*0Sstevel@tonic-gate miocnak(wq, mp, 0, EAGAIN); 677*0Sstevel@tonic-gate break; 678*0Sstevel@tonic-gate } 679*0Sstevel@tonic-gate mcopyout(mp, NULL, sizeof (*t32), NULL, datamp); 680*0Sstevel@tonic-gate } 681*0Sstevel@tonic-gate 682*0Sstevel@tonic-gate t32 = (struct timeval32 *)mp->b_cont->b_rptr; 683*0Sstevel@tonic-gate TICK_TO_TIMEVAL32(sbp->sb_ticks, t32); 684*0Sstevel@tonic-gate 685*0Sstevel@tonic-gate if (transparent == TRANSPARENT) 686*0Sstevel@tonic-gate qreply(wq, mp); 687*0Sstevel@tonic-gate else 688*0Sstevel@tonic-gate miocack(wq, mp, sizeof (*t32), 0); 689*0Sstevel@tonic-gate } else 690*0Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */ 691*0Sstevel@tonic-gate { 692*0Sstevel@tonic-gate if (transparent == TRANSPARENT) { 693*0Sstevel@tonic-gate datamp = allocb(sizeof (*t), BPRI_MED); 694*0Sstevel@tonic-gate if (datamp == NULL) { 695*0Sstevel@tonic-gate miocnak(wq, mp, 0, EAGAIN); 696*0Sstevel@tonic-gate break; 697*0Sstevel@tonic-gate } 698*0Sstevel@tonic-gate mcopyout(mp, NULL, sizeof (*t), NULL, datamp); 699*0Sstevel@tonic-gate } 700*0Sstevel@tonic-gate 701*0Sstevel@tonic-gate t = (struct timeval *)mp->b_cont->b_rptr; 702*0Sstevel@tonic-gate TICK_TO_TIMEVAL(sbp->sb_ticks, t); 703*0Sstevel@tonic-gate 704*0Sstevel@tonic-gate if (transparent == TRANSPARENT) 705*0Sstevel@tonic-gate qreply(wq, mp); 706*0Sstevel@tonic-gate else 707*0Sstevel@tonic-gate miocack(wq, mp, sizeof (*t), 0); 708*0Sstevel@tonic-gate } 709*0Sstevel@tonic-gate break; 710*0Sstevel@tonic-gate } 711*0Sstevel@tonic-gate 712*0Sstevel@tonic-gate case SBIOCCTIME: 713*0Sstevel@tonic-gate sbp->sb_ticks = -1; 714*0Sstevel@tonic-gate miocack(wq, mp, 0, 0); 715*0Sstevel@tonic-gate break; 716*0Sstevel@tonic-gate 717*0Sstevel@tonic-gate case SBIOCSCHUNK: 718*0Sstevel@tonic-gate if (iocp->ioc_count == TRANSPARENT) { 719*0Sstevel@tonic-gate mcopyin(mp, NULL, sizeof (uint_t), NULL); 720*0Sstevel@tonic-gate qreply(wq, mp); 721*0Sstevel@tonic-gate } else { 722*0Sstevel@tonic-gate /* 723*0Sstevel@tonic-gate * Verify argument length. 724*0Sstevel@tonic-gate */ 725*0Sstevel@tonic-gate error = miocpullup(mp, sizeof (uint_t)); 726*0Sstevel@tonic-gate if (error != 0) { 727*0Sstevel@tonic-gate miocnak(wq, mp, 0, error); 728*0Sstevel@tonic-gate break; 729*0Sstevel@tonic-gate } 730*0Sstevel@tonic-gate 731*0Sstevel@tonic-gate /* 732*0Sstevel@tonic-gate * set up hi/lo water marks on stream head read queue. 733*0Sstevel@tonic-gate * unlikely to run out of resources. Fix at later date. 734*0Sstevel@tonic-gate */ 735*0Sstevel@tonic-gate if ((mop = allocb(sizeof (struct stroptions), 736*0Sstevel@tonic-gate BPRI_MED)) != NULL) { 737*0Sstevel@tonic-gate struct stroptions *sop; 738*0Sstevel@tonic-gate uint_t chunk; 739*0Sstevel@tonic-gate 740*0Sstevel@tonic-gate chunk = *(uint_t *)mp->b_cont->b_rptr; 741*0Sstevel@tonic-gate mop->b_datap->db_type = M_SETOPTS; 742*0Sstevel@tonic-gate mop->b_wptr += sizeof (struct stroptions); 743*0Sstevel@tonic-gate sop = (struct stroptions *)mop->b_rptr; 744*0Sstevel@tonic-gate sop->so_flags = SO_HIWAT | SO_LOWAT; 745*0Sstevel@tonic-gate sop->so_hiwat = SNIT_HIWAT(chunk, 1); 746*0Sstevel@tonic-gate sop->so_lowat = SNIT_LOWAT(chunk, 1); 747*0Sstevel@tonic-gate qreply(wq, mop); 748*0Sstevel@tonic-gate } 749*0Sstevel@tonic-gate 750*0Sstevel@tonic-gate sbp->sb_chunk = *(uint_t *)mp->b_cont->b_rptr; 751*0Sstevel@tonic-gate miocack(wq, mp, 0, 0); 752*0Sstevel@tonic-gate sbclosechunk(sbp); 753*0Sstevel@tonic-gate } 754*0Sstevel@tonic-gate break; 755*0Sstevel@tonic-gate 756*0Sstevel@tonic-gate case SBIOCGCHUNK: 757*0Sstevel@tonic-gate /* 758*0Sstevel@tonic-gate * Verify argument length. 759*0Sstevel@tonic-gate */ 760*0Sstevel@tonic-gate if (transparent != TRANSPARENT) { 761*0Sstevel@tonic-gate error = miocpullup(mp, sizeof (uint_t)); 762*0Sstevel@tonic-gate if (error != 0) { 763*0Sstevel@tonic-gate miocnak(wq, mp, 0, error); 764*0Sstevel@tonic-gate break; 765*0Sstevel@tonic-gate } 766*0Sstevel@tonic-gate } 767*0Sstevel@tonic-gate 768*0Sstevel@tonic-gate if (transparent == TRANSPARENT) { 769*0Sstevel@tonic-gate datamp = allocb(sizeof (uint_t), BPRI_MED); 770*0Sstevel@tonic-gate if (datamp == NULL) { 771*0Sstevel@tonic-gate miocnak(wq, mp, 0, EAGAIN); 772*0Sstevel@tonic-gate break; 773*0Sstevel@tonic-gate } 774*0Sstevel@tonic-gate mcopyout(mp, NULL, sizeof (uint_t), NULL, datamp); 775*0Sstevel@tonic-gate } 776*0Sstevel@tonic-gate 777*0Sstevel@tonic-gate *(uint_t *)mp->b_cont->b_rptr = sbp->sb_chunk; 778*0Sstevel@tonic-gate 779*0Sstevel@tonic-gate if (transparent == TRANSPARENT) 780*0Sstevel@tonic-gate qreply(wq, mp); 781*0Sstevel@tonic-gate else 782*0Sstevel@tonic-gate miocack(wq, mp, sizeof (uint_t), 0); 783*0Sstevel@tonic-gate break; 784*0Sstevel@tonic-gate 785*0Sstevel@tonic-gate case SBIOCSSNAP: 786*0Sstevel@tonic-gate if (iocp->ioc_count == TRANSPARENT) { 787*0Sstevel@tonic-gate mcopyin(mp, NULL, sizeof (uint_t), NULL); 788*0Sstevel@tonic-gate qreply(wq, mp); 789*0Sstevel@tonic-gate } else { 790*0Sstevel@tonic-gate /* 791*0Sstevel@tonic-gate * Verify argument length. 792*0Sstevel@tonic-gate */ 793*0Sstevel@tonic-gate error = miocpullup(mp, sizeof (uint_t)); 794*0Sstevel@tonic-gate if (error != 0) { 795*0Sstevel@tonic-gate miocnak(wq, mp, 0, error); 796*0Sstevel@tonic-gate break; 797*0Sstevel@tonic-gate } 798*0Sstevel@tonic-gate 799*0Sstevel@tonic-gate /* 800*0Sstevel@tonic-gate * if chunking dont worry about effects of 801*0Sstevel@tonic-gate * snipping of message size on head flow control 802*0Sstevel@tonic-gate * since it has a relatively small bearing on the 803*0Sstevel@tonic-gate * data rate onto the streamn head. 804*0Sstevel@tonic-gate */ 805*0Sstevel@tonic-gate if (!sbp->sb_chunk) { 806*0Sstevel@tonic-gate /* 807*0Sstevel@tonic-gate * set up hi/lo water marks on stream 808*0Sstevel@tonic-gate * head read queue. unlikely to run out 809*0Sstevel@tonic-gate * of resources. Fix at later date. 810*0Sstevel@tonic-gate */ 811*0Sstevel@tonic-gate if ((mop = allocb(sizeof (struct stroptions), 812*0Sstevel@tonic-gate BPRI_MED)) != NULL) { 813*0Sstevel@tonic-gate struct stroptions *sop; 814*0Sstevel@tonic-gate uint_t snap; 815*0Sstevel@tonic-gate int fudge; 816*0Sstevel@tonic-gate 817*0Sstevel@tonic-gate snap = *(uint_t *)mp->b_cont->b_rptr; 818*0Sstevel@tonic-gate mop->b_datap->db_type = M_SETOPTS; 819*0Sstevel@tonic-gate mop->b_wptr += sizeof (*sop); 820*0Sstevel@tonic-gate sop = (struct stroptions *)mop->b_rptr; 821*0Sstevel@tonic-gate sop->so_flags = SO_HIWAT | SO_LOWAT; 822*0Sstevel@tonic-gate fudge = (snap <= 100) ? 4 : 823*0Sstevel@tonic-gate (snap <= 400) ? 2 : 1; 824*0Sstevel@tonic-gate sop->so_hiwat = SNIT_HIWAT(snap, fudge); 825*0Sstevel@tonic-gate sop->so_lowat = SNIT_LOWAT(snap, fudge); 826*0Sstevel@tonic-gate qreply(wq, mop); 827*0Sstevel@tonic-gate } 828*0Sstevel@tonic-gate } 829*0Sstevel@tonic-gate 830*0Sstevel@tonic-gate sbp->sb_snap = *(uint_t *)mp->b_cont->b_rptr; 831*0Sstevel@tonic-gate 832*0Sstevel@tonic-gate miocack(wq, mp, 0, 0); 833*0Sstevel@tonic-gate } 834*0Sstevel@tonic-gate break; 835*0Sstevel@tonic-gate 836*0Sstevel@tonic-gate case SBIOCGSNAP: 837*0Sstevel@tonic-gate /* 838*0Sstevel@tonic-gate * Verify argument length 839*0Sstevel@tonic-gate */ 840*0Sstevel@tonic-gate if (transparent != TRANSPARENT) { 841*0Sstevel@tonic-gate error = miocpullup(mp, sizeof (uint_t)); 842*0Sstevel@tonic-gate if (error != 0) { 843*0Sstevel@tonic-gate miocnak(wq, mp, 0, error); 844*0Sstevel@tonic-gate break; 845*0Sstevel@tonic-gate } 846*0Sstevel@tonic-gate } 847*0Sstevel@tonic-gate 848*0Sstevel@tonic-gate if (transparent == TRANSPARENT) { 849*0Sstevel@tonic-gate datamp = allocb(sizeof (uint_t), BPRI_MED); 850*0Sstevel@tonic-gate if (datamp == NULL) { 851*0Sstevel@tonic-gate miocnak(wq, mp, 0, EAGAIN); 852*0Sstevel@tonic-gate break; 853*0Sstevel@tonic-gate } 854*0Sstevel@tonic-gate mcopyout(mp, NULL, sizeof (uint_t), NULL, datamp); 855*0Sstevel@tonic-gate } 856*0Sstevel@tonic-gate 857*0Sstevel@tonic-gate *(uint_t *)mp->b_cont->b_rptr = sbp->sb_snap; 858*0Sstevel@tonic-gate 859*0Sstevel@tonic-gate if (transparent == TRANSPARENT) 860*0Sstevel@tonic-gate qreply(wq, mp); 861*0Sstevel@tonic-gate else 862*0Sstevel@tonic-gate miocack(wq, mp, sizeof (uint_t), 0); 863*0Sstevel@tonic-gate break; 864*0Sstevel@tonic-gate 865*0Sstevel@tonic-gate case SBIOCSFLAGS: 866*0Sstevel@tonic-gate /* 867*0Sstevel@tonic-gate * set the flags. 868*0Sstevel@tonic-gate */ 869*0Sstevel@tonic-gate if (iocp->ioc_count == TRANSPARENT) { 870*0Sstevel@tonic-gate mcopyin(mp, NULL, sizeof (uint_t), NULL); 871*0Sstevel@tonic-gate qreply(wq, mp); 872*0Sstevel@tonic-gate } else { 873*0Sstevel@tonic-gate error = miocpullup(mp, sizeof (uint_t)); 874*0Sstevel@tonic-gate if (error != 0) { 875*0Sstevel@tonic-gate miocnak(wq, mp, 0, error); 876*0Sstevel@tonic-gate break; 877*0Sstevel@tonic-gate } 878*0Sstevel@tonic-gate sbp->sb_flags = *(uint_t *)mp->b_cont->b_rptr; 879*0Sstevel@tonic-gate miocack(wq, mp, 0, 0); 880*0Sstevel@tonic-gate } 881*0Sstevel@tonic-gate break; 882*0Sstevel@tonic-gate 883*0Sstevel@tonic-gate case SBIOCGFLAGS: 884*0Sstevel@tonic-gate /* 885*0Sstevel@tonic-gate * Verify argument length 886*0Sstevel@tonic-gate */ 887*0Sstevel@tonic-gate if (transparent != TRANSPARENT) { 888*0Sstevel@tonic-gate error = miocpullup(mp, sizeof (uint_t)); 889*0Sstevel@tonic-gate if (error != 0) { 890*0Sstevel@tonic-gate miocnak(wq, mp, 0, error); 891*0Sstevel@tonic-gate break; 892*0Sstevel@tonic-gate } 893*0Sstevel@tonic-gate } 894*0Sstevel@tonic-gate 895*0Sstevel@tonic-gate if (transparent == TRANSPARENT) { 896*0Sstevel@tonic-gate datamp = allocb(sizeof (uint_t), BPRI_MED); 897*0Sstevel@tonic-gate if (datamp == NULL) { 898*0Sstevel@tonic-gate miocnak(wq, mp, 0, EAGAIN); 899*0Sstevel@tonic-gate break; 900*0Sstevel@tonic-gate } 901*0Sstevel@tonic-gate mcopyout(mp, NULL, sizeof (uint_t), NULL, datamp); 902*0Sstevel@tonic-gate } 903*0Sstevel@tonic-gate 904*0Sstevel@tonic-gate *(uint_t *)mp->b_cont->b_rptr = sbp->sb_flags; 905*0Sstevel@tonic-gate 906*0Sstevel@tonic-gate if (transparent == TRANSPARENT) 907*0Sstevel@tonic-gate qreply(wq, mp); 908*0Sstevel@tonic-gate else 909*0Sstevel@tonic-gate miocack(wq, mp, sizeof (uint_t), 0); 910*0Sstevel@tonic-gate break; 911*0Sstevel@tonic-gate 912*0Sstevel@tonic-gate 913*0Sstevel@tonic-gate default: 914*0Sstevel@tonic-gate putnext(wq, mp); 915*0Sstevel@tonic-gate break; 916*0Sstevel@tonic-gate } 917*0Sstevel@tonic-gate } 918*0Sstevel@tonic-gate 919*0Sstevel@tonic-gate /* 920*0Sstevel@tonic-gate * Given a length l, calculate the amount of extra storage 921*0Sstevel@tonic-gate * required to round it up to the next multiple of the alignment a. 922*0Sstevel@tonic-gate */ 923*0Sstevel@tonic-gate #define RoundUpAmt(l, a) ((l) % (a) ? (a) - ((l) % (a)) : 0) 924*0Sstevel@tonic-gate /* 925*0Sstevel@tonic-gate * Calculate additional amount of space required for alignment. 926*0Sstevel@tonic-gate */ 927*0Sstevel@tonic-gate #define Align(l) RoundUpAmt(l, sizeof (ulong_t)) 928*0Sstevel@tonic-gate /* 929*0Sstevel@tonic-gate * Smallest possible message size when headers are enabled. 930*0Sstevel@tonic-gate * This is used to calculate whether a chunk is nearly full. 931*0Sstevel@tonic-gate */ 932*0Sstevel@tonic-gate #define SMALLEST_MESSAGE sizeof (struct sb_hdr) + _POINTER_ALIGNMENT 933*0Sstevel@tonic-gate 934*0Sstevel@tonic-gate /* 935*0Sstevel@tonic-gate * Process a read-side M_DATA message. 936*0Sstevel@tonic-gate * 937*0Sstevel@tonic-gate * If the currently accumulating chunk doesn't have enough room 938*0Sstevel@tonic-gate * for the message, close off the chunk, pass it upward, and start 939*0Sstevel@tonic-gate * a new one. Then add the message to the current chunk, taking 940*0Sstevel@tonic-gate * account of the possibility that the message's size exceeds the 941*0Sstevel@tonic-gate * chunk size. 942*0Sstevel@tonic-gate * 943*0Sstevel@tonic-gate * If headers are enabled add an sb_hdr header and trailing alignment padding. 944*0Sstevel@tonic-gate * 945*0Sstevel@tonic-gate * To optimise performance the total number of msgbs should be kept 946*0Sstevel@tonic-gate * to a minimum. This is achieved by using any remaining space in message N 947*0Sstevel@tonic-gate * for both its own padding as well as the header of message N+1 if possible. 948*0Sstevel@tonic-gate * If there's insufficient space we allocate one message to hold this 'wrapper'. 949*0Sstevel@tonic-gate * (there's likely to be space beyond message N, since allocb would have 950*0Sstevel@tonic-gate * rounded up the required size to one of the dblk_sizes). 951*0Sstevel@tonic-gate * 952*0Sstevel@tonic-gate */ 953*0Sstevel@tonic-gate static void 954*0Sstevel@tonic-gate sbaddmsg(queue_t *rq, mblk_t *mp) 955*0Sstevel@tonic-gate { 956*0Sstevel@tonic-gate struct sb *sbp; 957*0Sstevel@tonic-gate struct timeval t; 958*0Sstevel@tonic-gate struct sb_hdr hp; 959*0Sstevel@tonic-gate mblk_t *wrapper; /* padding for msg N, header for msg N+1 */ 960*0Sstevel@tonic-gate mblk_t *last; /* last mblk of current message */ 961*0Sstevel@tonic-gate size_t wrapperlen; /* length of header + padding */ 962*0Sstevel@tonic-gate size_t origlen; /* data length before truncation */ 963*0Sstevel@tonic-gate size_t pad; /* bytes required to align header */ 964*0Sstevel@tonic-gate 965*0Sstevel@tonic-gate sbp = (struct sb *)rq->q_ptr; 966*0Sstevel@tonic-gate 967*0Sstevel@tonic-gate origlen = msgdsize(mp); 968*0Sstevel@tonic-gate 969*0Sstevel@tonic-gate /* 970*0Sstevel@tonic-gate * Truncate the message. 971*0Sstevel@tonic-gate */ 972*0Sstevel@tonic-gate if ((sbp->sb_snap > 0) && (origlen > sbp->sb_snap) && 973*0Sstevel@tonic-gate (adjmsg(mp, -(origlen - sbp->sb_snap)) == 1)) 974*0Sstevel@tonic-gate hp.sbh_totlen = hp.sbh_msglen = sbp->sb_snap; 975*0Sstevel@tonic-gate else 976*0Sstevel@tonic-gate hp.sbh_totlen = hp.sbh_msglen = origlen; 977*0Sstevel@tonic-gate 978*0Sstevel@tonic-gate if (sbp->sb_flags & SB_NO_HEADER) { 979*0Sstevel@tonic-gate 980*0Sstevel@tonic-gate /* 981*0Sstevel@tonic-gate * Would the inclusion of this message overflow the current 982*0Sstevel@tonic-gate * chunk? If so close the chunk off and start a new one. 983*0Sstevel@tonic-gate */ 984*0Sstevel@tonic-gate if ((hp.sbh_totlen + sbp->sb_mlen) > sbp->sb_chunk) 985*0Sstevel@tonic-gate sbclosechunk(sbp); 986*0Sstevel@tonic-gate /* 987*0Sstevel@tonic-gate * First message too big for chunk - just send it up. 988*0Sstevel@tonic-gate * This will always be true when we're not chunking. 989*0Sstevel@tonic-gate */ 990*0Sstevel@tonic-gate if (hp.sbh_totlen > sbp->sb_chunk) { 991*0Sstevel@tonic-gate sbsendit(rq, mp); 992*0Sstevel@tonic-gate return; 993*0Sstevel@tonic-gate } 994*0Sstevel@tonic-gate 995*0Sstevel@tonic-gate /* 996*0Sstevel@tonic-gate * We now know that the msg will fit in the chunk. 997*0Sstevel@tonic-gate * Link it onto the end of the chunk. 998*0Sstevel@tonic-gate * Since linkb() walks the entire chain, we keep a pointer to 999*0Sstevel@tonic-gate * the first mblk of the last msgb added and call linkb on that 1000*0Sstevel@tonic-gate * that last message, rather than performing the 1001*0Sstevel@tonic-gate * O(n) linkb() operation on the whole chain. 1002*0Sstevel@tonic-gate * sb_head isn't needed in this SB_NO_HEADER mode. 1003*0Sstevel@tonic-gate */ 1004*0Sstevel@tonic-gate if (sbp->sb_mp) 1005*0Sstevel@tonic-gate linkb(sbp->sb_tail, mp); 1006*0Sstevel@tonic-gate else 1007*0Sstevel@tonic-gate sbp->sb_mp = mp; 1008*0Sstevel@tonic-gate 1009*0Sstevel@tonic-gate sbp->sb_tail = mp; 1010*0Sstevel@tonic-gate sbp->sb_mlen += hp.sbh_totlen; 1011*0Sstevel@tonic-gate sbp->sb_mcount++; 1012*0Sstevel@tonic-gate } else { 1013*0Sstevel@tonic-gate /* Timestamp must be done immediately */ 1014*0Sstevel@tonic-gate uniqtime(&t); 1015*0Sstevel@tonic-gate TIMEVAL_TO_TIMEVAL32(&hp.sbh_timestamp, &t); 1016*0Sstevel@tonic-gate 1017*0Sstevel@tonic-gate pad = Align(hp.sbh_totlen); 1018*0Sstevel@tonic-gate hp.sbh_totlen += sizeof (hp); 1019*0Sstevel@tonic-gate hp.sbh_totlen += pad; 1020*0Sstevel@tonic-gate 1021*0Sstevel@tonic-gate /* 1022*0Sstevel@tonic-gate * Would the inclusion of this message overflow the current 1023*0Sstevel@tonic-gate * chunk? If so close the chunk off and start a new one. 1024*0Sstevel@tonic-gate */ 1025*0Sstevel@tonic-gate if ((hp.sbh_totlen + sbp->sb_mlen) > sbp->sb_chunk) 1026*0Sstevel@tonic-gate sbclosechunk(sbp); 1027*0Sstevel@tonic-gate 1028*0Sstevel@tonic-gate if (sbp->sb_head == NULL) { 1029*0Sstevel@tonic-gate /* Allocate leading header of new chunk */ 1030*0Sstevel@tonic-gate sbp->sb_head = allocb(sizeof (hp), BPRI_MED); 1031*0Sstevel@tonic-gate if (sbp->sb_head == NULL) { 1032*0Sstevel@tonic-gate /* 1033*0Sstevel@tonic-gate * Memory allocation failure. 1034*0Sstevel@tonic-gate * This will need to be revisited 1035*0Sstevel@tonic-gate * since using certain flag combinations 1036*0Sstevel@tonic-gate * can result in messages being dropped 1037*0Sstevel@tonic-gate * silently. 1038*0Sstevel@tonic-gate */ 1039*0Sstevel@tonic-gate freemsg(mp); 1040*0Sstevel@tonic-gate sbp->sb_drops++; 1041*0Sstevel@tonic-gate return; 1042*0Sstevel@tonic-gate } 1043*0Sstevel@tonic-gate sbp->sb_mp = sbp->sb_head; 1044*0Sstevel@tonic-gate } 1045*0Sstevel@tonic-gate 1046*0Sstevel@tonic-gate /* 1047*0Sstevel@tonic-gate * Copy header into message 1048*0Sstevel@tonic-gate */ 1049*0Sstevel@tonic-gate hp.sbh_drops = sbp->sb_drops; 1050*0Sstevel@tonic-gate hp.sbh_origlen = origlen; 1051*0Sstevel@tonic-gate (void) memcpy(sbp->sb_head->b_wptr, (char *)&hp, sizeof (hp)); 1052*0Sstevel@tonic-gate sbp->sb_head->b_wptr += sizeof (hp); 1053*0Sstevel@tonic-gate 1054*0Sstevel@tonic-gate ASSERT(sbp->sb_head->b_wptr <= sbp->sb_head->b_datap->db_lim); 1055*0Sstevel@tonic-gate 1056*0Sstevel@tonic-gate /* 1057*0Sstevel@tonic-gate * Join message to the chunk 1058*0Sstevel@tonic-gate */ 1059*0Sstevel@tonic-gate linkb(sbp->sb_head, mp); 1060*0Sstevel@tonic-gate 1061*0Sstevel@tonic-gate sbp->sb_mcount++; 1062*0Sstevel@tonic-gate sbp->sb_mlen += hp.sbh_totlen; 1063*0Sstevel@tonic-gate 1064*0Sstevel@tonic-gate /* 1065*0Sstevel@tonic-gate * If the first message alone is too big for the chunk close 1066*0Sstevel@tonic-gate * the chunk now. 1067*0Sstevel@tonic-gate * If the next message would immediately cause the chunk to 1068*0Sstevel@tonic-gate * overflow we may as well close the chunk now. The next 1069*0Sstevel@tonic-gate * message is certain to be at least SMALLEST_MESSAGE size. 1070*0Sstevel@tonic-gate */ 1071*0Sstevel@tonic-gate if (hp.sbh_totlen + SMALLEST_MESSAGE > sbp->sb_chunk) { 1072*0Sstevel@tonic-gate sbclosechunk(sbp); 1073*0Sstevel@tonic-gate return; 1074*0Sstevel@tonic-gate } 1075*0Sstevel@tonic-gate 1076*0Sstevel@tonic-gate /* 1077*0Sstevel@tonic-gate * Find space for the wrapper. The wrapper consists of: 1078*0Sstevel@tonic-gate * 1079*0Sstevel@tonic-gate * 1) Padding for this message (this is to ensure each header 1080*0Sstevel@tonic-gate * begins on an 8 byte boundary in the userland buffer). 1081*0Sstevel@tonic-gate * 1082*0Sstevel@tonic-gate * 2) Space for the next message's header, in case the next 1083*0Sstevel@tonic-gate * next message will fit in this chunk. 1084*0Sstevel@tonic-gate * 1085*0Sstevel@tonic-gate * It may be possible to append the wrapper to the last mblk 1086*0Sstevel@tonic-gate * of the message, but only if we 'own' the data. If the dblk 1087*0Sstevel@tonic-gate * has been shared through dupmsg() we mustn't alter it. 1088*0Sstevel@tonic-gate */ 1089*0Sstevel@tonic-gate 1090*0Sstevel@tonic-gate wrapperlen = (sizeof (hp) + pad); 1091*0Sstevel@tonic-gate 1092*0Sstevel@tonic-gate /* Is there space for the wrapper beyond the message's data ? */ 1093*0Sstevel@tonic-gate for (last = mp; last->b_cont; last = last->b_cont) 1094*0Sstevel@tonic-gate ; 1095*0Sstevel@tonic-gate 1096*0Sstevel@tonic-gate if ((wrapperlen <= MBLKTAIL(last)) && 1097*0Sstevel@tonic-gate (last->b_datap->db_ref == 1)) { 1098*0Sstevel@tonic-gate if (pad > 0) { 1099*0Sstevel@tonic-gate /* 1100*0Sstevel@tonic-gate * Pad with zeroes to the next pointer boundary 1101*0Sstevel@tonic-gate * (we don't want to disclose kernel data to 1102*0Sstevel@tonic-gate * users), then advance wptr. 1103*0Sstevel@tonic-gate */ 1104*0Sstevel@tonic-gate (void) memset(last->b_wptr, 0, pad); 1105*0Sstevel@tonic-gate last->b_wptr += pad; 1106*0Sstevel@tonic-gate } 1107*0Sstevel@tonic-gate /* Remember where to write the header information */ 1108*0Sstevel@tonic-gate sbp->sb_head = last; 1109*0Sstevel@tonic-gate } else { 1110*0Sstevel@tonic-gate /* Have to allocate additional space for the wrapper */ 1111*0Sstevel@tonic-gate wrapper = allocb(wrapperlen, BPRI_MED); 1112*0Sstevel@tonic-gate if (wrapper == NULL) { 1113*0Sstevel@tonic-gate sbclosechunk(sbp); 1114*0Sstevel@tonic-gate return; 1115*0Sstevel@tonic-gate } 1116*0Sstevel@tonic-gate if (pad > 0) { 1117*0Sstevel@tonic-gate /* 1118*0Sstevel@tonic-gate * Pad with zeroes (we don't want to disclose 1119*0Sstevel@tonic-gate * kernel data to users). 1120*0Sstevel@tonic-gate */ 1121*0Sstevel@tonic-gate (void) memset(wrapper->b_wptr, 0, pad); 1122*0Sstevel@tonic-gate wrapper->b_wptr += pad; 1123*0Sstevel@tonic-gate } 1124*0Sstevel@tonic-gate /* Link the wrapper msg onto the end of the chunk */ 1125*0Sstevel@tonic-gate linkb(mp, wrapper); 1126*0Sstevel@tonic-gate /* Remember to write the next header in this wrapper */ 1127*0Sstevel@tonic-gate sbp->sb_head = wrapper; 1128*0Sstevel@tonic-gate } 1129*0Sstevel@tonic-gate } 1130*0Sstevel@tonic-gate } 1131*0Sstevel@tonic-gate 1132*0Sstevel@tonic-gate /* 1133*0Sstevel@tonic-gate * Called from timeout(). 1134*0Sstevel@tonic-gate * Signal a timeout by passing a zero-length M_CTL msg in the read-side 1135*0Sstevel@tonic-gate * to synchronize with any active module threads (open, close, wput, rput). 1136*0Sstevel@tonic-gate */ 1137*0Sstevel@tonic-gate static void 1138*0Sstevel@tonic-gate sbtick(void *arg) 1139*0Sstevel@tonic-gate { 1140*0Sstevel@tonic-gate struct sb *sbp = arg; 1141*0Sstevel@tonic-gate queue_t *rq; 1142*0Sstevel@tonic-gate 1143*0Sstevel@tonic-gate ASSERT(sbp); 1144*0Sstevel@tonic-gate 1145*0Sstevel@tonic-gate rq = sbp->sb_rq; 1146*0Sstevel@tonic-gate sbp->sb_timeoutid = 0; /* timeout has fired */ 1147*0Sstevel@tonic-gate 1148*0Sstevel@tonic-gate if (putctl(rq, M_CTL) == 0) /* failure */ 1149*0Sstevel@tonic-gate sbp->sb_timeoutid = qtimeout(rq, sbtick, sbp, sbp->sb_ticks); 1150*0Sstevel@tonic-gate } 1151*0Sstevel@tonic-gate 1152*0Sstevel@tonic-gate /* 1153*0Sstevel@tonic-gate * Close off the currently accumulating chunk and pass 1154*0Sstevel@tonic-gate * it upward. Takes care of resetting timers as well. 1155*0Sstevel@tonic-gate * 1156*0Sstevel@tonic-gate * This routine is called both directly and as a result 1157*0Sstevel@tonic-gate * of the chunk timeout expiring. 1158*0Sstevel@tonic-gate */ 1159*0Sstevel@tonic-gate static void 1160*0Sstevel@tonic-gate sbclosechunk(struct sb *sbp) 1161*0Sstevel@tonic-gate { 1162*0Sstevel@tonic-gate mblk_t *mp; 1163*0Sstevel@tonic-gate queue_t *rq; 1164*0Sstevel@tonic-gate 1165*0Sstevel@tonic-gate ASSERT(sbp); 1166*0Sstevel@tonic-gate 1167*0Sstevel@tonic-gate if (sbp->sb_timeoutid) { 1168*0Sstevel@tonic-gate (void) quntimeout(sbp->sb_rq, sbp->sb_timeoutid); 1169*0Sstevel@tonic-gate sbp->sb_timeoutid = 0; 1170*0Sstevel@tonic-gate } 1171*0Sstevel@tonic-gate 1172*0Sstevel@tonic-gate mp = sbp->sb_mp; 1173*0Sstevel@tonic-gate rq = sbp->sb_rq; 1174*0Sstevel@tonic-gate 1175*0Sstevel@tonic-gate /* 1176*0Sstevel@tonic-gate * If there's currently a chunk in progress, close it off 1177*0Sstevel@tonic-gate * and try to send it up. 1178*0Sstevel@tonic-gate */ 1179*0Sstevel@tonic-gate if (mp) { 1180*0Sstevel@tonic-gate sbsendit(rq, mp); 1181*0Sstevel@tonic-gate } 1182*0Sstevel@tonic-gate 1183*0Sstevel@tonic-gate /* 1184*0Sstevel@tonic-gate * Clear old chunk. Ready for new msgs. 1185*0Sstevel@tonic-gate */ 1186*0Sstevel@tonic-gate sbp->sb_tail = sbp->sb_mp = sbp->sb_head = NULL; 1187*0Sstevel@tonic-gate sbp->sb_mlen = 0; 1188*0Sstevel@tonic-gate sbp->sb_mcount = 0; 1189*0Sstevel@tonic-gate if (sbp->sb_flags & SB_DEFER_CHUNK) 1190*0Sstevel@tonic-gate sbp->sb_state &= ~SB_FRCVD; 1191*0Sstevel@tonic-gate 1192*0Sstevel@tonic-gate } 1193*0Sstevel@tonic-gate 1194*0Sstevel@tonic-gate static void 1195*0Sstevel@tonic-gate sbsendit(queue_t *rq, mblk_t *mp) 1196*0Sstevel@tonic-gate { 1197*0Sstevel@tonic-gate struct sb *sbp = (struct sb *)rq->q_ptr; 1198*0Sstevel@tonic-gate 1199*0Sstevel@tonic-gate if (!canputnext(rq)) { 1200*0Sstevel@tonic-gate if (sbp->sb_flags & SB_NO_DROPS) 1201*0Sstevel@tonic-gate (void) putq(rq, mp); 1202*0Sstevel@tonic-gate else { 1203*0Sstevel@tonic-gate freemsg(mp); 1204*0Sstevel@tonic-gate sbp->sb_drops += sbp->sb_mcount; 1205*0Sstevel@tonic-gate } 1206*0Sstevel@tonic-gate return; 1207*0Sstevel@tonic-gate } 1208*0Sstevel@tonic-gate /* 1209*0Sstevel@tonic-gate * If there are messages on the q already, keep 1210*0Sstevel@tonic-gate * queueing them since they need to be processed in order. 1211*0Sstevel@tonic-gate */ 1212*0Sstevel@tonic-gate if (qsize(rq) > 0) { 1213*0Sstevel@tonic-gate /* should only get here if SB_NO_DROPS */ 1214*0Sstevel@tonic-gate (void) putq(rq, mp); 1215*0Sstevel@tonic-gate } 1216*0Sstevel@tonic-gate else 1217*0Sstevel@tonic-gate putnext(rq, mp); 1218*0Sstevel@tonic-gate } 1219