1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 1991-2003 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28*0Sstevel@tonic-gate /* All Rights Reserved */ 29*0Sstevel@tonic-gate 30*0Sstevel@tonic-gate 31*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 32*0Sstevel@tonic-gate 33*0Sstevel@tonic-gate /* 34*0Sstevel@tonic-gate * UNIX Device Driver Interface functions 35*0Sstevel@tonic-gate * This file contains the C-versions of putnext() and put(). 36*0Sstevel@tonic-gate * Assembly language versions exist for some architectures. 37*0Sstevel@tonic-gate */ 38*0Sstevel@tonic-gate 39*0Sstevel@tonic-gate #include <sys/types.h> 40*0Sstevel@tonic-gate #include <sys/systm.h> 41*0Sstevel@tonic-gate #include <sys/cpuvar.h> 42*0Sstevel@tonic-gate #include <sys/debug.h> 43*0Sstevel@tonic-gate #include <sys/t_lock.h> 44*0Sstevel@tonic-gate #include <sys/stream.h> 45*0Sstevel@tonic-gate #include <sys/thread.h> 46*0Sstevel@tonic-gate #include <sys/strsubr.h> 47*0Sstevel@tonic-gate #include <sys/ddi.h> 48*0Sstevel@tonic-gate #include <sys/vtrace.h> 49*0Sstevel@tonic-gate #include <sys/cmn_err.h> 50*0Sstevel@tonic-gate #include <sys/strft.h> 51*0Sstevel@tonic-gate #include <sys/stack.h> 52*0Sstevel@tonic-gate #include <sys/archsystm.h> 53*0Sstevel@tonic-gate 54*0Sstevel@tonic-gate /* 55*0Sstevel@tonic-gate * Streams with many modules may create long chains of calls via putnext() which 56*0Sstevel@tonic-gate * may exhaust stack space. When putnext detects that the stack space left is 57*0Sstevel@tonic-gate * too small (less then PUT_STACK_NEEDED), the call chain is broken and 58*0Sstevel@tonic-gate * further processing is delegated to the background thread via call to 59*0Sstevel@tonic-gate * putnext_tail(). Unfortunately there is no generic solution with fixed stack 60*0Sstevel@tonic-gate * size, and putnext() is recursive function, so this hack is a necessary evil. 61*0Sstevel@tonic-gate * 62*0Sstevel@tonic-gate * The redzone value is chosen dependent on the default stack size which is 8K 63*0Sstevel@tonic-gate * on 32-bit kernels and on x86 and 16K on 64-bit kernels. The values are chosen 64*0Sstevel@tonic-gate * empirically. For 64-bit kernels it is 5000 and for 32-bit kernels it is 2500. 65*0Sstevel@tonic-gate * Experiments showed that 2500 is not enough for 64-bit kernels and 2048 is not 66*0Sstevel@tonic-gate * enough for 32-bit. 67*0Sstevel@tonic-gate * 68*0Sstevel@tonic-gate * The redzone value is a tuneable rather then a constant to allow adjustments 69*0Sstevel@tonic-gate * in the field. 70*0Sstevel@tonic-gate * 71*0Sstevel@tonic-gate * The check in PUT_STACK_NOTENOUGH is taken from segkp_map_red() function. It 72*0Sstevel@tonic-gate * is possible to define it as a generic function exported by seg_kp, but 73*0Sstevel@tonic-gate * 74*0Sstevel@tonic-gate * a) It may sound like an open invitation to use the facility indiscriminately. 75*0Sstevel@tonic-gate * b) It adds extra function call in putnext path. 76*0Sstevel@tonic-gate * 77*0Sstevel@tonic-gate * We keep a global counter `put_stack_notenough' which keeps track how many 78*0Sstevel@tonic-gate * times the stack switching hack was used. 79*0Sstevel@tonic-gate */ 80*0Sstevel@tonic-gate 81*0Sstevel@tonic-gate static ulong_t put_stack_notenough; 82*0Sstevel@tonic-gate 83*0Sstevel@tonic-gate #ifdef _LP64 84*0Sstevel@tonic-gate #define PUT_STACK_NEEDED 5000 85*0Sstevel@tonic-gate #else 86*0Sstevel@tonic-gate #define PUT_STACK_NEEDED 2500 87*0Sstevel@tonic-gate #endif 88*0Sstevel@tonic-gate 89*0Sstevel@tonic-gate int put_stack_needed = PUT_STACK_NEEDED; 90*0Sstevel@tonic-gate 91*0Sstevel@tonic-gate #if defined(STACK_GROWTH_DOWN) 92*0Sstevel@tonic-gate #define PUT_STACK_NOTENOUGH() \ 93*0Sstevel@tonic-gate (((STACK_BIAS + (uintptr_t)getfp() - \ 94*0Sstevel@tonic-gate (uintptr_t)curthread->t_stkbase) < put_stack_needed) && \ 95*0Sstevel@tonic-gate ++put_stack_notenough) 96*0Sstevel@tonic-gate #else 97*0Sstevel@tonic-gate #error "STACK_GROWTH_DOWN undefined" 98*0Sstevel@tonic-gate #endif 99*0Sstevel@tonic-gate 100*0Sstevel@tonic-gate boolean_t UseFastlocks = B_FALSE; 101*0Sstevel@tonic-gate 102*0Sstevel@tonic-gate /* 103*0Sstevel@tonic-gate * function: putnext() 104*0Sstevel@tonic-gate * purpose: call the put routine of the queue linked to qp 105*0Sstevel@tonic-gate * 106*0Sstevel@tonic-gate * Note: this function is written to perform well on modern computer 107*0Sstevel@tonic-gate * architectures by e.g. preloading values into registers and "smearing" out 108*0Sstevel@tonic-gate * code. 109*0Sstevel@tonic-gate * 110*0Sstevel@tonic-gate * A note on the fastput mechanism. The most significant bit of a 111*0Sstevel@tonic-gate * putcount is considered the "FASTPUT" bit. If set, then there is 112*0Sstevel@tonic-gate * nothing stoping a concurrent put from occuring (note that putcounts 113*0Sstevel@tonic-gate * are only allowed on CIPUT perimiters). If, however, it is cleared, 114*0Sstevel@tonic-gate * then we need to take the normal lock path by aquiring the SQLOCK. 115*0Sstevel@tonic-gate * This is a slowlock. When a thread starts exclusiveness, e.g. wants 116*0Sstevel@tonic-gate * writer access, it will clear the FASTPUT bit, causing new threads 117*0Sstevel@tonic-gate * to take the slowlock path. This assures that putcounts will not 118*0Sstevel@tonic-gate * increase in value, so the want-writer does not need to constantly 119*0Sstevel@tonic-gate * aquire the putlocks to sum the putcounts. This does have the 120*0Sstevel@tonic-gate * possibility of having the count drop right after reading, but that 121*0Sstevel@tonic-gate * is no different than aquiring, reading and then releasing. However, 122*0Sstevel@tonic-gate * in this mode, it cannot go up, so eventually they will drop to zero 123*0Sstevel@tonic-gate * and the want-writer can proceed. 124*0Sstevel@tonic-gate * 125*0Sstevel@tonic-gate * If the FASTPUT bit is set, or in the slowlock path we see that there 126*0Sstevel@tonic-gate * are no writers or want-writers, we make the choice of calling the 127*0Sstevel@tonic-gate * putproc, or a "fast-fill_syncq". The fast-fill is a fill with 128*0Sstevel@tonic-gate * immediate intention to drain. This is done because there are 129*0Sstevel@tonic-gate * messages already at the queue waiting to drain. To preserve message 130*0Sstevel@tonic-gate * ordering, we need to put this message at the end, and pickup the 131*0Sstevel@tonic-gate * messages at the beginning. We call the macro that actually 132*0Sstevel@tonic-gate * enqueues the message on the queue, and then call qdrain_syncq. If 133*0Sstevel@tonic-gate * there is already a drainer, we just return. We could make that 134*0Sstevel@tonic-gate * check before calling qdrain_syncq, but it is a little more clear 135*0Sstevel@tonic-gate * to have qdrain_syncq do this (we might try the above optimization 136*0Sstevel@tonic-gate * as this behavior evolves). qdrain_syncq assumes that SQ_EXCL is set 137*0Sstevel@tonic-gate * already if this is a non-CIPUT perimiter, and that an appropriate 138*0Sstevel@tonic-gate * claim has been made. So we do all that work before dropping the 139*0Sstevel@tonic-gate * SQLOCK with our claim. 140*0Sstevel@tonic-gate * 141*0Sstevel@tonic-gate * If we cannot proceed with the putproc/fast-fill, we just fall 142*0Sstevel@tonic-gate * through to the qfill_syncq, and then tail processing. If state 143*0Sstevel@tonic-gate * has changed in that cycle, or wakeups are needed, it will occur 144*0Sstevel@tonic-gate * there. 145*0Sstevel@tonic-gate */ 146*0Sstevel@tonic-gate void 147*0Sstevel@tonic-gate putnext(queue_t *qp, mblk_t *mp) 148*0Sstevel@tonic-gate { 149*0Sstevel@tonic-gate queue_t *fqp = qp; /* For strft tracing */ 150*0Sstevel@tonic-gate syncq_t *sq; 151*0Sstevel@tonic-gate uint16_t flags; 152*0Sstevel@tonic-gate uint16_t drain_mask; 153*0Sstevel@tonic-gate struct qinit *qi; 154*0Sstevel@tonic-gate int (*putproc)(); 155*0Sstevel@tonic-gate struct stdata *stp; 156*0Sstevel@tonic-gate int ix; 157*0Sstevel@tonic-gate boolean_t queued = B_FALSE; 158*0Sstevel@tonic-gate kmutex_t *sdlock = NULL; 159*0Sstevel@tonic-gate kmutex_t *sqciplock = NULL; 160*0Sstevel@tonic-gate ushort_t *sqcipcount = NULL; 161*0Sstevel@tonic-gate 162*0Sstevel@tonic-gate TRACE_2(TR_FAC_STREAMS_FR, TR_PUTNEXT_START, 163*0Sstevel@tonic-gate "putnext_start:(%p, %p)", qp, mp); 164*0Sstevel@tonic-gate 165*0Sstevel@tonic-gate ASSERT(mp->b_datap->db_ref != 0); 166*0Sstevel@tonic-gate ASSERT(mp->b_next == NULL && mp->b_prev == NULL); 167*0Sstevel@tonic-gate stp = STREAM(qp); 168*0Sstevel@tonic-gate ASSERT(stp != NULL); 169*0Sstevel@tonic-gate if (stp->sd_ciputctrl != NULL) { 170*0Sstevel@tonic-gate ix = CPU->cpu_seqid & stp->sd_nciputctrl; 171*0Sstevel@tonic-gate sdlock = &stp->sd_ciputctrl[ix].ciputctrl_lock; 172*0Sstevel@tonic-gate mutex_enter(sdlock); 173*0Sstevel@tonic-gate } else { 174*0Sstevel@tonic-gate mutex_enter(sdlock = &stp->sd_lock); 175*0Sstevel@tonic-gate } 176*0Sstevel@tonic-gate qp = qp->q_next; 177*0Sstevel@tonic-gate sq = qp->q_syncq; 178*0Sstevel@tonic-gate ASSERT(sq != NULL); 179*0Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 180*0Sstevel@tonic-gate qi = qp->q_qinfo; 181*0Sstevel@tonic-gate 182*0Sstevel@tonic-gate if (sq->sq_ciputctrl != NULL) { 183*0Sstevel@tonic-gate /* fastlock: */ 184*0Sstevel@tonic-gate ASSERT(sq->sq_flags & SQ_CIPUT); 185*0Sstevel@tonic-gate ix = CPU->cpu_seqid & sq->sq_nciputctrl; 186*0Sstevel@tonic-gate sqciplock = &sq->sq_ciputctrl[ix].ciputctrl_lock; 187*0Sstevel@tonic-gate sqcipcount = &sq->sq_ciputctrl[ix].ciputctrl_count; 188*0Sstevel@tonic-gate mutex_enter(sqciplock); 189*0Sstevel@tonic-gate if (!((*sqcipcount) & SQ_FASTPUT) || 190*0Sstevel@tonic-gate (sq->sq_flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS))) { 191*0Sstevel@tonic-gate mutex_exit(sqciplock); 192*0Sstevel@tonic-gate sqciplock = NULL; 193*0Sstevel@tonic-gate goto slowlock; 194*0Sstevel@tonic-gate } 195*0Sstevel@tonic-gate mutex_exit(sdlock); 196*0Sstevel@tonic-gate (*sqcipcount)++; 197*0Sstevel@tonic-gate ASSERT(*sqcipcount != 0); 198*0Sstevel@tonic-gate queued = qp->q_sqflags & Q_SQQUEUED; 199*0Sstevel@tonic-gate mutex_exit(sqciplock); 200*0Sstevel@tonic-gate } else { 201*0Sstevel@tonic-gate slowlock: 202*0Sstevel@tonic-gate ASSERT(sqciplock == NULL); 203*0Sstevel@tonic-gate mutex_enter(SQLOCK(sq)); 204*0Sstevel@tonic-gate mutex_exit(sdlock); 205*0Sstevel@tonic-gate flags = sq->sq_flags; 206*0Sstevel@tonic-gate /* 207*0Sstevel@tonic-gate * We are going to drop SQLOCK, so make a claim to prevent syncq 208*0Sstevel@tonic-gate * from closing. 209*0Sstevel@tonic-gate */ 210*0Sstevel@tonic-gate sq->sq_count++; 211*0Sstevel@tonic-gate ASSERT(sq->sq_count != 0); /* Wraparound */ 212*0Sstevel@tonic-gate /* 213*0Sstevel@tonic-gate * If there are writers or exclusive waiters, there is not much 214*0Sstevel@tonic-gate * we can do. Place the message on the syncq and schedule a 215*0Sstevel@tonic-gate * background thread to drain it. 216*0Sstevel@tonic-gate * 217*0Sstevel@tonic-gate * Also if we are approaching end of stack, fill the syncq and 218*0Sstevel@tonic-gate * switch processing to a background thread - see comments on 219*0Sstevel@tonic-gate * top. 220*0Sstevel@tonic-gate */ 221*0Sstevel@tonic-gate if ((flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS)) || 222*0Sstevel@tonic-gate (sq->sq_needexcl != 0) || PUT_STACK_NOTENOUGH()) { 223*0Sstevel@tonic-gate 224*0Sstevel@tonic-gate TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END, 225*0Sstevel@tonic-gate "putnext_end:(%p, %p, %p) SQ_EXCL fill", 226*0Sstevel@tonic-gate qp, mp, sq); 227*0Sstevel@tonic-gate 228*0Sstevel@tonic-gate /* 229*0Sstevel@tonic-gate * NOTE: qfill_syncq will need QLOCK. It is safe to drop 230*0Sstevel@tonic-gate * SQLOCK because positive sq_count keeps the syncq from 231*0Sstevel@tonic-gate * closing. 232*0Sstevel@tonic-gate */ 233*0Sstevel@tonic-gate mutex_exit(SQLOCK(sq)); 234*0Sstevel@tonic-gate 235*0Sstevel@tonic-gate qfill_syncq(sq, qp, mp); 236*0Sstevel@tonic-gate /* 237*0Sstevel@tonic-gate * NOTE: after the call to qfill_syncq() qp may be 238*0Sstevel@tonic-gate * closed, both qp and sq should not be referenced at 239*0Sstevel@tonic-gate * this point. 240*0Sstevel@tonic-gate * 241*0Sstevel@tonic-gate * This ASSERT is located here to prevent stack frame 242*0Sstevel@tonic-gate * consumption in the DEBUG code. 243*0Sstevel@tonic-gate */ 244*0Sstevel@tonic-gate ASSERT(sqciplock == NULL); 245*0Sstevel@tonic-gate return; 246*0Sstevel@tonic-gate } 247*0Sstevel@tonic-gate 248*0Sstevel@tonic-gate queued = qp->q_sqflags & Q_SQQUEUED; 249*0Sstevel@tonic-gate /* 250*0Sstevel@tonic-gate * If not a concurrent perimiter, we need to acquire 251*0Sstevel@tonic-gate * it exclusively. It could not have been previously 252*0Sstevel@tonic-gate * set since we held the SQLOCK before testing 253*0Sstevel@tonic-gate * SQ_GOAWAY above (which includes SQ_EXCL). 254*0Sstevel@tonic-gate * We do this here because we hold the SQLOCK, and need 255*0Sstevel@tonic-gate * to make this state change BEFORE dropping it. 256*0Sstevel@tonic-gate */ 257*0Sstevel@tonic-gate if (!(flags & SQ_CIPUT)) { 258*0Sstevel@tonic-gate ASSERT((sq->sq_flags & SQ_EXCL) == 0); 259*0Sstevel@tonic-gate ASSERT(!(sq->sq_type & SQ_CIPUT)); 260*0Sstevel@tonic-gate sq->sq_flags |= SQ_EXCL; 261*0Sstevel@tonic-gate } 262*0Sstevel@tonic-gate mutex_exit(SQLOCK(sq)); 263*0Sstevel@tonic-gate } 264*0Sstevel@tonic-gate 265*0Sstevel@tonic-gate ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT))); 266*0Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 267*0Sstevel@tonic-gate 268*0Sstevel@tonic-gate /* 269*0Sstevel@tonic-gate * We now have a claim on the syncq, we are either going to 270*0Sstevel@tonic-gate * put the message on the syncq and then drain it, or we are 271*0Sstevel@tonic-gate * going to call the putproc(). 272*0Sstevel@tonic-gate */ 273*0Sstevel@tonic-gate putproc = qi->qi_putp; 274*0Sstevel@tonic-gate if (!queued) { 275*0Sstevel@tonic-gate STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr - 276*0Sstevel@tonic-gate mp->b_datap->db_base); 277*0Sstevel@tonic-gate (*putproc)(qp, mp); 278*0Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 279*0Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 280*0Sstevel@tonic-gate } else { 281*0Sstevel@tonic-gate mutex_enter(QLOCK(qp)); 282*0Sstevel@tonic-gate /* 283*0Sstevel@tonic-gate * If there are no messages in front of us, just call putproc(), 284*0Sstevel@tonic-gate * otherwise enqueue the message and drain the queue. 285*0Sstevel@tonic-gate */ 286*0Sstevel@tonic-gate if (qp->q_syncqmsgs == 0) { 287*0Sstevel@tonic-gate mutex_exit(QLOCK(qp)); 288*0Sstevel@tonic-gate STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr - 289*0Sstevel@tonic-gate mp->b_datap->db_base); 290*0Sstevel@tonic-gate (*putproc)(qp, mp); 291*0Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 292*0Sstevel@tonic-gate } else { 293*0Sstevel@tonic-gate /* 294*0Sstevel@tonic-gate * We are doing a fill with the intent to 295*0Sstevel@tonic-gate * drain (meaning we are filling because 296*0Sstevel@tonic-gate * there are messages in front of us ane we 297*0Sstevel@tonic-gate * need to preserve message ordering) 298*0Sstevel@tonic-gate * Therefore, put the message on the queue 299*0Sstevel@tonic-gate * and call qdrain_syncq (must be done with 300*0Sstevel@tonic-gate * the QLOCK held). 301*0Sstevel@tonic-gate */ 302*0Sstevel@tonic-gate STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, 303*0Sstevel@tonic-gate mp->b_rptr - mp->b_datap->db_base); 304*0Sstevel@tonic-gate 305*0Sstevel@tonic-gate #ifdef DEBUG 306*0Sstevel@tonic-gate /* 307*0Sstevel@tonic-gate * These two values were in the original code for 308*0Sstevel@tonic-gate * all syncq messages. This is unnecessary in 309*0Sstevel@tonic-gate * the current implementation, but was retained 310*0Sstevel@tonic-gate * in debug mode as it is usefull to know where 311*0Sstevel@tonic-gate * problems occur. 312*0Sstevel@tonic-gate */ 313*0Sstevel@tonic-gate mp->b_queue = qp; 314*0Sstevel@tonic-gate mp->b_prev = (mblk_t *)putproc; 315*0Sstevel@tonic-gate #endif 316*0Sstevel@tonic-gate SQPUT_MP(qp, mp); 317*0Sstevel@tonic-gate qdrain_syncq(sq, qp); 318*0Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 319*0Sstevel@tonic-gate } 320*0Sstevel@tonic-gate } 321*0Sstevel@tonic-gate /* 322*0Sstevel@tonic-gate * Before we release our claim, we need to see if any 323*0Sstevel@tonic-gate * events were posted. If the syncq is SQ_EXCL && SQ_QUEUED, 324*0Sstevel@tonic-gate * we were responsible for going exclusive and, therefore, 325*0Sstevel@tonic-gate * are resposible for draining. 326*0Sstevel@tonic-gate */ 327*0Sstevel@tonic-gate if (sq->sq_flags & (SQ_EXCL)) { 328*0Sstevel@tonic-gate drain_mask = 0; 329*0Sstevel@tonic-gate } else { 330*0Sstevel@tonic-gate drain_mask = SQ_QUEUED; 331*0Sstevel@tonic-gate } 332*0Sstevel@tonic-gate 333*0Sstevel@tonic-gate if (sqciplock != NULL) { 334*0Sstevel@tonic-gate mutex_enter(sqciplock); 335*0Sstevel@tonic-gate flags = sq->sq_flags; 336*0Sstevel@tonic-gate ASSERT(flags & SQ_CIPUT); 337*0Sstevel@tonic-gate /* SQ_EXCL could have been set by qwriter_inner */ 338*0Sstevel@tonic-gate if ((flags & (SQ_EXCL|SQ_TAIL)) || sq->sq_needexcl) { 339*0Sstevel@tonic-gate /* 340*0Sstevel@tonic-gate * we need SQLOCK to handle 341*0Sstevel@tonic-gate * wakeups/drains/flags change. sqciplock 342*0Sstevel@tonic-gate * is needed to decrement sqcipcount. 343*0Sstevel@tonic-gate * SQLOCK has to be grabbed before sqciplock 344*0Sstevel@tonic-gate * for lock ordering purposes. 345*0Sstevel@tonic-gate * after sqcipcount is decremented some lock 346*0Sstevel@tonic-gate * still needs to be held to make sure 347*0Sstevel@tonic-gate * syncq won't get freed on us. 348*0Sstevel@tonic-gate * 349*0Sstevel@tonic-gate * To prevent deadlocks we try to grab SQLOCK and if it 350*0Sstevel@tonic-gate * is held already we drop sqciplock, acquire SQLOCK and 351*0Sstevel@tonic-gate * reacqwire sqciplock again. 352*0Sstevel@tonic-gate */ 353*0Sstevel@tonic-gate if (mutex_tryenter(SQLOCK(sq)) == 0) { 354*0Sstevel@tonic-gate mutex_exit(sqciplock); 355*0Sstevel@tonic-gate mutex_enter(SQLOCK(sq)); 356*0Sstevel@tonic-gate mutex_enter(sqciplock); 357*0Sstevel@tonic-gate } 358*0Sstevel@tonic-gate flags = sq->sq_flags; 359*0Sstevel@tonic-gate ASSERT(*sqcipcount != 0); 360*0Sstevel@tonic-gate (*sqcipcount)--; 361*0Sstevel@tonic-gate mutex_exit(sqciplock); 362*0Sstevel@tonic-gate } else { 363*0Sstevel@tonic-gate ASSERT(*sqcipcount != 0); 364*0Sstevel@tonic-gate (*sqcipcount)--; 365*0Sstevel@tonic-gate mutex_exit(sqciplock); 366*0Sstevel@tonic-gate TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END, 367*0Sstevel@tonic-gate "putnext_end:(%p, %p, %p) done", qp, mp, sq); 368*0Sstevel@tonic-gate return; 369*0Sstevel@tonic-gate } 370*0Sstevel@tonic-gate } else { 371*0Sstevel@tonic-gate mutex_enter(SQLOCK(sq)); 372*0Sstevel@tonic-gate flags = sq->sq_flags; 373*0Sstevel@tonic-gate ASSERT(sq->sq_count != 0); 374*0Sstevel@tonic-gate sq->sq_count--; 375*0Sstevel@tonic-gate } 376*0Sstevel@tonic-gate if ((flags & (SQ_TAIL)) || sq->sq_needexcl) { 377*0Sstevel@tonic-gate putnext_tail(sq, qp, (flags & ~drain_mask)); 378*0Sstevel@tonic-gate /* 379*0Sstevel@tonic-gate * The only purpose of this ASSERT is to preserve calling stack 380*0Sstevel@tonic-gate * in DEBUG kernel. 381*0Sstevel@tonic-gate */ 382*0Sstevel@tonic-gate ASSERT(sq != NULL); 383*0Sstevel@tonic-gate return; 384*0Sstevel@tonic-gate } 385*0Sstevel@tonic-gate ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)) || queued); 386*0Sstevel@tonic-gate ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) || queued); 387*0Sstevel@tonic-gate /* 388*0Sstevel@tonic-gate * Safe to always drop SQ_EXCL: 389*0Sstevel@tonic-gate * Not SQ_CIPUT means we set SQ_EXCL above 390*0Sstevel@tonic-gate * For SQ_CIPUT SQ_EXCL will only be set if the put 391*0Sstevel@tonic-gate * procedure did a qwriter(INNER) in which case 392*0Sstevel@tonic-gate * nobody else is in the inner perimeter and we 393*0Sstevel@tonic-gate * are exiting. 394*0Sstevel@tonic-gate * 395*0Sstevel@tonic-gate * I would like to make the following assertion: 396*0Sstevel@tonic-gate * 397*0Sstevel@tonic-gate * ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) != (SQ_EXCL|SQ_CIPUT) || 398*0Sstevel@tonic-gate * sq->sq_count == 0); 399*0Sstevel@tonic-gate * 400*0Sstevel@tonic-gate * which indicates that if we are both putshared and exclusive, 401*0Sstevel@tonic-gate * we became exclusive while executing the putproc, and the only 402*0Sstevel@tonic-gate * claim on the syncq was the one we dropped a few lines above. 403*0Sstevel@tonic-gate * But other threads that enter putnext while the syncq is exclusive 404*0Sstevel@tonic-gate * need to make a claim as they may need to drop SQLOCK in the 405*0Sstevel@tonic-gate * has_writers case to avoid deadlocks. If these threads are 406*0Sstevel@tonic-gate * delayed or preempted, it is possible that the writer thread can 407*0Sstevel@tonic-gate * find out that there are other claims making the (sq_count == 0) 408*0Sstevel@tonic-gate * test invalid. 409*0Sstevel@tonic-gate */ 410*0Sstevel@tonic-gate 411*0Sstevel@tonic-gate sq->sq_flags = flags & ~SQ_EXCL; 412*0Sstevel@tonic-gate mutex_exit(SQLOCK(sq)); 413*0Sstevel@tonic-gate TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END, 414*0Sstevel@tonic-gate "putnext_end:(%p, %p, %p) done", qp, mp, sq); 415*0Sstevel@tonic-gate } 416*0Sstevel@tonic-gate 417*0Sstevel@tonic-gate 418*0Sstevel@tonic-gate /* 419*0Sstevel@tonic-gate * wrapper for qi_putp entry in module ops vec. 420*0Sstevel@tonic-gate * implements asynchronous putnext(). 421*0Sstevel@tonic-gate * Note, that unlike putnext(), this routine is NOT optimized for the 422*0Sstevel@tonic-gate * fastpath. Calling this routine will grab whatever locks are necessary 423*0Sstevel@tonic-gate * to protect the stream head, q_next, and syncq's. 424*0Sstevel@tonic-gate * And since it is in the normal locks path, we do not use putlocks if 425*0Sstevel@tonic-gate * they exist (though this can be changed by swapping the value of 426*0Sstevel@tonic-gate * UseFastlocks). 427*0Sstevel@tonic-gate */ 428*0Sstevel@tonic-gate void 429*0Sstevel@tonic-gate put(queue_t *qp, mblk_t *mp) 430*0Sstevel@tonic-gate { 431*0Sstevel@tonic-gate queue_t *fqp = qp; /* For strft tracing */ 432*0Sstevel@tonic-gate syncq_t *sq; 433*0Sstevel@tonic-gate uint16_t flags; 434*0Sstevel@tonic-gate uint16_t drain_mask; 435*0Sstevel@tonic-gate struct qinit *qi; 436*0Sstevel@tonic-gate int (*putproc)(); 437*0Sstevel@tonic-gate int ix; 438*0Sstevel@tonic-gate boolean_t queued = B_FALSE; 439*0Sstevel@tonic-gate kmutex_t *sqciplock = NULL; 440*0Sstevel@tonic-gate ushort_t *sqcipcount = NULL; 441*0Sstevel@tonic-gate 442*0Sstevel@tonic-gate TRACE_2(TR_FAC_STREAMS_FR, TR_PUT_START, 443*0Sstevel@tonic-gate "put:(%X, %X)", qp, mp); 444*0Sstevel@tonic-gate ASSERT(mp->b_datap->db_ref != 0); 445*0Sstevel@tonic-gate ASSERT(mp->b_next == NULL && mp->b_prev == NULL); 446*0Sstevel@tonic-gate 447*0Sstevel@tonic-gate sq = qp->q_syncq; 448*0Sstevel@tonic-gate ASSERT(sq != NULL); 449*0Sstevel@tonic-gate qi = qp->q_qinfo; 450*0Sstevel@tonic-gate 451*0Sstevel@tonic-gate if (UseFastlocks && sq->sq_ciputctrl != NULL) { 452*0Sstevel@tonic-gate /* fastlock: */ 453*0Sstevel@tonic-gate ASSERT(sq->sq_flags & SQ_CIPUT); 454*0Sstevel@tonic-gate ix = CPU->cpu_seqid & sq->sq_nciputctrl; 455*0Sstevel@tonic-gate sqciplock = &sq->sq_ciputctrl[ix].ciputctrl_lock; 456*0Sstevel@tonic-gate sqcipcount = &sq->sq_ciputctrl[ix].ciputctrl_count; 457*0Sstevel@tonic-gate mutex_enter(sqciplock); 458*0Sstevel@tonic-gate if (!((*sqcipcount) & SQ_FASTPUT) || 459*0Sstevel@tonic-gate (sq->sq_flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS))) { 460*0Sstevel@tonic-gate mutex_exit(sqciplock); 461*0Sstevel@tonic-gate sqciplock = NULL; 462*0Sstevel@tonic-gate goto slowlock; 463*0Sstevel@tonic-gate } 464*0Sstevel@tonic-gate (*sqcipcount)++; 465*0Sstevel@tonic-gate ASSERT(*sqcipcount != 0); 466*0Sstevel@tonic-gate queued = qp->q_sqflags & Q_SQQUEUED; 467*0Sstevel@tonic-gate mutex_exit(sqciplock); 468*0Sstevel@tonic-gate } else { 469*0Sstevel@tonic-gate slowlock: 470*0Sstevel@tonic-gate ASSERT(sqciplock == NULL); 471*0Sstevel@tonic-gate mutex_enter(SQLOCK(sq)); 472*0Sstevel@tonic-gate flags = sq->sq_flags; 473*0Sstevel@tonic-gate /* 474*0Sstevel@tonic-gate * We are going to drop SQLOCK, so make a claim to prevent syncq 475*0Sstevel@tonic-gate * from closing. 476*0Sstevel@tonic-gate */ 477*0Sstevel@tonic-gate sq->sq_count++; 478*0Sstevel@tonic-gate ASSERT(sq->sq_count != 0); /* Wraparound */ 479*0Sstevel@tonic-gate /* 480*0Sstevel@tonic-gate * If there are writers or exclusive waiters, there is not much 481*0Sstevel@tonic-gate * we can do. Place the message on the syncq and schedule a 482*0Sstevel@tonic-gate * background thread to drain it. 483*0Sstevel@tonic-gate * 484*0Sstevel@tonic-gate * Also if we are approaching end of stack, fill the syncq and 485*0Sstevel@tonic-gate * switch processing to a background thread - see comments on 486*0Sstevel@tonic-gate * top. 487*0Sstevel@tonic-gate */ 488*0Sstevel@tonic-gate if ((flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS)) || 489*0Sstevel@tonic-gate (sq->sq_needexcl != 0) || PUT_STACK_NOTENOUGH()) { 490*0Sstevel@tonic-gate 491*0Sstevel@tonic-gate TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END, 492*0Sstevel@tonic-gate "putnext_end:(%p, %p, %p) SQ_EXCL fill", 493*0Sstevel@tonic-gate qp, mp, sq); 494*0Sstevel@tonic-gate 495*0Sstevel@tonic-gate /* 496*0Sstevel@tonic-gate * NOTE: qfill_syncq will need QLOCK. It is safe to drop 497*0Sstevel@tonic-gate * SQLOCK because positive sq_count keeps the syncq from 498*0Sstevel@tonic-gate * closing. 499*0Sstevel@tonic-gate */ 500*0Sstevel@tonic-gate mutex_exit(SQLOCK(sq)); 501*0Sstevel@tonic-gate 502*0Sstevel@tonic-gate qfill_syncq(sq, qp, mp); 503*0Sstevel@tonic-gate /* 504*0Sstevel@tonic-gate * NOTE: after the call to qfill_syncq() qp may be 505*0Sstevel@tonic-gate * closed, both qp and sq should not be referenced at 506*0Sstevel@tonic-gate * this point. 507*0Sstevel@tonic-gate * 508*0Sstevel@tonic-gate * This ASSERT is located here to prevent stack frame 509*0Sstevel@tonic-gate * consumption in the DEBUG code. 510*0Sstevel@tonic-gate */ 511*0Sstevel@tonic-gate ASSERT(sqciplock == NULL); 512*0Sstevel@tonic-gate return; 513*0Sstevel@tonic-gate } 514*0Sstevel@tonic-gate 515*0Sstevel@tonic-gate queued = qp->q_sqflags & Q_SQQUEUED; 516*0Sstevel@tonic-gate /* 517*0Sstevel@tonic-gate * If not a concurrent perimiter, we need to acquire 518*0Sstevel@tonic-gate * it exclusively. It could not have been previously 519*0Sstevel@tonic-gate * set since we held the SQLOCK before testing 520*0Sstevel@tonic-gate * SQ_GOAWAY above (which includes SQ_EXCL). 521*0Sstevel@tonic-gate * We do this here because we hold the SQLOCK, and need 522*0Sstevel@tonic-gate * to make this state change BEFORE dropping it. 523*0Sstevel@tonic-gate */ 524*0Sstevel@tonic-gate if (!(flags & SQ_CIPUT)) { 525*0Sstevel@tonic-gate ASSERT((sq->sq_flags & SQ_EXCL) == 0); 526*0Sstevel@tonic-gate ASSERT(!(sq->sq_type & SQ_CIPUT)); 527*0Sstevel@tonic-gate sq->sq_flags |= SQ_EXCL; 528*0Sstevel@tonic-gate } 529*0Sstevel@tonic-gate mutex_exit(SQLOCK(sq)); 530*0Sstevel@tonic-gate } 531*0Sstevel@tonic-gate 532*0Sstevel@tonic-gate ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT))); 533*0Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 534*0Sstevel@tonic-gate 535*0Sstevel@tonic-gate /* 536*0Sstevel@tonic-gate * We now have a claim on the syncq, we are either going to 537*0Sstevel@tonic-gate * put the message on the syncq and then drain it, or we are 538*0Sstevel@tonic-gate * going to call the putproc(). 539*0Sstevel@tonic-gate */ 540*0Sstevel@tonic-gate putproc = qi->qi_putp; 541*0Sstevel@tonic-gate if (!queued) { 542*0Sstevel@tonic-gate STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr - 543*0Sstevel@tonic-gate mp->b_datap->db_base); 544*0Sstevel@tonic-gate (*putproc)(qp, mp); 545*0Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 546*0Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 547*0Sstevel@tonic-gate } else { 548*0Sstevel@tonic-gate mutex_enter(QLOCK(qp)); 549*0Sstevel@tonic-gate /* 550*0Sstevel@tonic-gate * If there are no messages in front of us, just call putproc(), 551*0Sstevel@tonic-gate * otherwise enqueue the message and drain the queue. 552*0Sstevel@tonic-gate */ 553*0Sstevel@tonic-gate if (qp->q_syncqmsgs == 0) { 554*0Sstevel@tonic-gate mutex_exit(QLOCK(qp)); 555*0Sstevel@tonic-gate STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr - 556*0Sstevel@tonic-gate mp->b_datap->db_base); 557*0Sstevel@tonic-gate (*putproc)(qp, mp); 558*0Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 559*0Sstevel@tonic-gate } else { 560*0Sstevel@tonic-gate /* 561*0Sstevel@tonic-gate * We are doing a fill with the intent to 562*0Sstevel@tonic-gate * drain (meaning we are filling because 563*0Sstevel@tonic-gate * there are messages in front of us ane we 564*0Sstevel@tonic-gate * need to preserve message ordering) 565*0Sstevel@tonic-gate * Therefore, put the message on the queue 566*0Sstevel@tonic-gate * and call qdrain_syncq (must be done with 567*0Sstevel@tonic-gate * the QLOCK held). 568*0Sstevel@tonic-gate */ 569*0Sstevel@tonic-gate STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, 570*0Sstevel@tonic-gate mp->b_rptr - mp->b_datap->db_base); 571*0Sstevel@tonic-gate 572*0Sstevel@tonic-gate #ifdef DEBUG 573*0Sstevel@tonic-gate /* 574*0Sstevel@tonic-gate * These two values were in the original code for 575*0Sstevel@tonic-gate * all syncq messages. This is unnecessary in 576*0Sstevel@tonic-gate * the current implementation, but was retained 577*0Sstevel@tonic-gate * in debug mode as it is usefull to know where 578*0Sstevel@tonic-gate * problems occur. 579*0Sstevel@tonic-gate */ 580*0Sstevel@tonic-gate mp->b_queue = qp; 581*0Sstevel@tonic-gate mp->b_prev = (mblk_t *)putproc; 582*0Sstevel@tonic-gate #endif 583*0Sstevel@tonic-gate SQPUT_MP(qp, mp); 584*0Sstevel@tonic-gate qdrain_syncq(sq, qp); 585*0Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 586*0Sstevel@tonic-gate } 587*0Sstevel@tonic-gate } 588*0Sstevel@tonic-gate /* 589*0Sstevel@tonic-gate * Before we release our claim, we need to see if any 590*0Sstevel@tonic-gate * events were posted. If the syncq is SQ_EXCL && SQ_QUEUED, 591*0Sstevel@tonic-gate * we were responsible for going exclusive and, therefore, 592*0Sstevel@tonic-gate * are resposible for draining. 593*0Sstevel@tonic-gate */ 594*0Sstevel@tonic-gate if (sq->sq_flags & (SQ_EXCL)) { 595*0Sstevel@tonic-gate drain_mask = 0; 596*0Sstevel@tonic-gate } else { 597*0Sstevel@tonic-gate drain_mask = SQ_QUEUED; 598*0Sstevel@tonic-gate } 599*0Sstevel@tonic-gate 600*0Sstevel@tonic-gate if (sqciplock != NULL) { 601*0Sstevel@tonic-gate mutex_enter(sqciplock); 602*0Sstevel@tonic-gate flags = sq->sq_flags; 603*0Sstevel@tonic-gate ASSERT(flags & SQ_CIPUT); 604*0Sstevel@tonic-gate /* SQ_EXCL could have been set by qwriter_inner */ 605*0Sstevel@tonic-gate if ((flags & (SQ_EXCL|SQ_TAIL)) || sq->sq_needexcl) { 606*0Sstevel@tonic-gate /* 607*0Sstevel@tonic-gate * we need SQLOCK to handle 608*0Sstevel@tonic-gate * wakeups/drains/flags change. sqciplock 609*0Sstevel@tonic-gate * is needed to decrement sqcipcount. 610*0Sstevel@tonic-gate * SQLOCK has to be grabbed before sqciplock 611*0Sstevel@tonic-gate * for lock ordering purposes. 612*0Sstevel@tonic-gate * after sqcipcount is decremented some lock 613*0Sstevel@tonic-gate * still needs to be held to make sure 614*0Sstevel@tonic-gate * syncq won't get freed on us. 615*0Sstevel@tonic-gate * 616*0Sstevel@tonic-gate * To prevent deadlocks we try to grab SQLOCK and if it 617*0Sstevel@tonic-gate * is held already we drop sqciplock, acquire SQLOCK and 618*0Sstevel@tonic-gate * reacqwire sqciplock again. 619*0Sstevel@tonic-gate */ 620*0Sstevel@tonic-gate if (mutex_tryenter(SQLOCK(sq)) == 0) { 621*0Sstevel@tonic-gate mutex_exit(sqciplock); 622*0Sstevel@tonic-gate mutex_enter(SQLOCK(sq)); 623*0Sstevel@tonic-gate mutex_enter(sqciplock); 624*0Sstevel@tonic-gate } 625*0Sstevel@tonic-gate flags = sq->sq_flags; 626*0Sstevel@tonic-gate ASSERT(*sqcipcount != 0); 627*0Sstevel@tonic-gate (*sqcipcount)--; 628*0Sstevel@tonic-gate mutex_exit(sqciplock); 629*0Sstevel@tonic-gate } else { 630*0Sstevel@tonic-gate ASSERT(*sqcipcount != 0); 631*0Sstevel@tonic-gate (*sqcipcount)--; 632*0Sstevel@tonic-gate mutex_exit(sqciplock); 633*0Sstevel@tonic-gate TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END, 634*0Sstevel@tonic-gate "putnext_end:(%p, %p, %p) done", qp, mp, sq); 635*0Sstevel@tonic-gate return; 636*0Sstevel@tonic-gate } 637*0Sstevel@tonic-gate } else { 638*0Sstevel@tonic-gate mutex_enter(SQLOCK(sq)); 639*0Sstevel@tonic-gate flags = sq->sq_flags; 640*0Sstevel@tonic-gate ASSERT(sq->sq_count != 0); 641*0Sstevel@tonic-gate sq->sq_count--; 642*0Sstevel@tonic-gate } 643*0Sstevel@tonic-gate if ((flags & (SQ_TAIL)) || sq->sq_needexcl) { 644*0Sstevel@tonic-gate putnext_tail(sq, qp, (flags & ~drain_mask)); 645*0Sstevel@tonic-gate /* 646*0Sstevel@tonic-gate * The only purpose of this ASSERT is to preserve calling stack 647*0Sstevel@tonic-gate * in DEBUG kernel. 648*0Sstevel@tonic-gate */ 649*0Sstevel@tonic-gate ASSERT(sq != NULL); 650*0Sstevel@tonic-gate return; 651*0Sstevel@tonic-gate } 652*0Sstevel@tonic-gate ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)) || queued); 653*0Sstevel@tonic-gate ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) || queued); 654*0Sstevel@tonic-gate /* 655*0Sstevel@tonic-gate * Safe to always drop SQ_EXCL: 656*0Sstevel@tonic-gate * Not SQ_CIPUT means we set SQ_EXCL above 657*0Sstevel@tonic-gate * For SQ_CIPUT SQ_EXCL will only be set if the put 658*0Sstevel@tonic-gate * procedure did a qwriter(INNER) in which case 659*0Sstevel@tonic-gate * nobody else is in the inner perimeter and we 660*0Sstevel@tonic-gate * are exiting. 661*0Sstevel@tonic-gate * 662*0Sstevel@tonic-gate * I would like to make the following assertion: 663*0Sstevel@tonic-gate * 664*0Sstevel@tonic-gate * ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) != (SQ_EXCL|SQ_CIPUT) || 665*0Sstevel@tonic-gate * sq->sq_count == 0); 666*0Sstevel@tonic-gate * 667*0Sstevel@tonic-gate * which indicates that if we are both putshared and exclusive, 668*0Sstevel@tonic-gate * we became exclusive while executing the putproc, and the only 669*0Sstevel@tonic-gate * claim on the syncq was the one we dropped a few lines above. 670*0Sstevel@tonic-gate * But other threads that enter putnext while the syncq is exclusive 671*0Sstevel@tonic-gate * need to make a claim as they may need to drop SQLOCK in the 672*0Sstevel@tonic-gate * has_writers case to avoid deadlocks. If these threads are 673*0Sstevel@tonic-gate * delayed or preempted, it is possible that the writer thread can 674*0Sstevel@tonic-gate * find out that there are other claims making the (sq_count == 0) 675*0Sstevel@tonic-gate * test invalid. 676*0Sstevel@tonic-gate */ 677*0Sstevel@tonic-gate 678*0Sstevel@tonic-gate sq->sq_flags = flags & ~SQ_EXCL; 679*0Sstevel@tonic-gate mutex_exit(SQLOCK(sq)); 680*0Sstevel@tonic-gate TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END, 681*0Sstevel@tonic-gate "putnext_end:(%p, %p, %p) done", qp, mp, sq); 682*0Sstevel@tonic-gate } 683