1 /* 2 * Copyright (c) 1982, 1986, 1988, 1990 Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * from: @(#)uipc_socket2.c 7.17 (Berkeley) 5/4/91 34 * $Id: uipc_socket2.c,v 1.3 1993/05/18 18:19:38 cgd Exp $ 35 */ 36 37 #include "param.h" 38 #include "systm.h" 39 #include "proc.h" 40 #include "file.h" 41 #include "buf.h" 42 #include "malloc.h" 43 #include "select.h" 44 #include "mbuf.h" 45 #include "protosw.h" 46 #include "socket.h" 47 #include "socketvar.h" 48 49 /* 50 * Primitive routines for operating on sockets and socket buffers 51 */ 52 53 /* strings for sleep message: */ 54 char netio[] = "netio"; 55 char netcon[] = "netcon"; 56 char netcls[] = "netcls"; 57 58 u_long sb_max = SB_MAX; /* patchable */ 59 60 /* 61 * Procedures to manipulate state flags of socket 62 * and do appropriate wakeups. Normal sequence from the 63 * active (originating) side is that soisconnecting() is 64 * called during processing of connect() call, 65 * resulting in an eventual call to soisconnected() if/when the 66 * connection is established. When the connection is torn down 67 * soisdisconnecting() is called during processing of disconnect() call, 68 * and soisdisconnected() is called when the connection to the peer 69 * is totally severed. The semantics of these routines are such that 70 * connectionless protocols can call soisconnected() and soisdisconnected() 71 * only, bypassing the in-progress calls when setting up a ``connection'' 72 * takes no time. 73 * 74 * From the passive side, a socket is created with 75 * two queues of sockets: so_q0 for connections in progress 76 * and so_q for connections already made and awaiting user acceptance. 77 * As a protocol is preparing incoming connections, it creates a socket 78 * structure queued on so_q0 by calling sonewconn(). When the connection 79 * is established, soisconnected() is called, and transfers the 80 * socket structure to so_q, making it available to accept(). 81 * 82 * If a socket is closed with sockets on either 83 * so_q0 or so_q, these sockets are dropped. 84 * 85 * If higher level protocols are implemented in 86 * the kernel, the wakeups done here will sometimes 87 * cause software-interrupt process scheduling. 88 */ 89 90 soisconnecting(so) 91 register struct socket *so; 92 { 93 94 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); 95 so->so_state |= SS_ISCONNECTING; 96 } 97 98 soisconnected(so) 99 register struct socket *so; 100 { 101 register struct socket *head = so->so_head; 102 103 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING); 104 so->so_state |= SS_ISCONNECTED; 105 if (head && soqremque(so, 0)) { 106 soqinsque(head, so, 1); 107 sorwakeup(head); 108 wakeup((caddr_t)&head->so_timeo); 109 } else { 110 wakeup((caddr_t)&so->so_timeo); 111 sorwakeup(so); 112 sowwakeup(so); 113 } 114 } 115 116 soisdisconnecting(so) 117 register struct socket *so; 118 { 119 120 so->so_state &= ~SS_ISCONNECTING; 121 so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE); 122 wakeup((caddr_t)&so->so_timeo); 123 sowwakeup(so); 124 sorwakeup(so); 125 } 126 127 soisdisconnected(so) 128 register struct socket *so; 129 { 130 131 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); 132 so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE); 133 wakeup((caddr_t)&so->so_timeo); 134 sowwakeup(so); 135 sorwakeup(so); 136 } 137 138 /* 139 * When an attempt at a new connection is noted on a socket 140 * which accepts connections, sonewconn is called. If the 141 * connection is possible (subject to space constraints, etc.) 142 * then we allocate a new structure, propoerly linked into the 143 * data structure of the original socket, and return this. 144 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. 145 * 146 * Currently, sonewconn() is defined as sonewconn1() in socketvar.h 147 * to catch calls that are missing the (new) second parameter. 148 */ 149 struct socket * 150 sonewconn1(head, connstatus) 151 register struct socket *head; 152 int connstatus; 153 { 154 register struct socket *so; 155 int soqueue = connstatus ? 1 : 0; 156 157 if (head->so_qlen + head->so_q0len > 3 * head->so_qlimit / 2) 158 return ((struct socket *)0); 159 MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_DONTWAIT); 160 if (so == NULL) 161 return ((struct socket *)0); 162 bzero((caddr_t)so, sizeof(*so)); 163 so->so_type = head->so_type; 164 so->so_options = head->so_options &~ SO_ACCEPTCONN; 165 so->so_linger = head->so_linger; 166 so->so_state = head->so_state | SS_NOFDREF; 167 so->so_proto = head->so_proto; 168 so->so_timeo = head->so_timeo; 169 so->so_pgid = head->so_pgid; 170 (void) soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat); 171 soqinsque(head, so, soqueue); 172 if ((*so->so_proto->pr_usrreq)(so, PRU_ATTACH, 173 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)) { 174 (void) soqremque(so, soqueue); 175 (void) free((caddr_t)so, M_SOCKET); 176 return ((struct socket *)0); 177 } 178 if (connstatus) { 179 sorwakeup(head); 180 wakeup((caddr_t)&head->so_timeo); 181 so->so_state |= connstatus; 182 } 183 return (so); 184 } 185 186 soqinsque(head, so, q) 187 register struct socket *head, *so; 188 int q; 189 { 190 191 register struct socket **prev; 192 so->so_head = head; 193 if (q == 0) { 194 head->so_q0len++; 195 so->so_q0 = 0; 196 for (prev = &(head->so_q0); *prev; ) 197 prev = &((*prev)->so_q0); 198 } else { 199 head->so_qlen++; 200 so->so_q = 0; 201 for (prev = &(head->so_q); *prev; ) 202 prev = &((*prev)->so_q); 203 } 204 *prev = so; 205 } 206 207 soqremque(so, q) 208 register struct socket *so; 209 int q; 210 { 211 register struct socket *head, *prev, *next; 212 213 head = so->so_head; 214 prev = head; 215 for (;;) { 216 next = q ? prev->so_q : prev->so_q0; 217 if (next == so) 218 break; 219 if (next == 0) 220 return (0); 221 prev = next; 222 } 223 if (q == 0) { 224 prev->so_q0 = next->so_q0; 225 head->so_q0len--; 226 } else { 227 prev->so_q = next->so_q; 228 head->so_qlen--; 229 } 230 next->so_q0 = next->so_q = 0; 231 next->so_head = 0; 232 return (1); 233 } 234 235 /* 236 * Socantsendmore indicates that no more data will be sent on the 237 * socket; it would normally be applied to a socket when the user 238 * informs the system that no more data is to be sent, by the protocol 239 * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data 240 * will be received, and will normally be applied to the socket by a 241 * protocol when it detects that the peer will send no more data. 242 * Data queued for reading in the socket may yet be read. 243 */ 244 245 socantsendmore(so) 246 struct socket *so; 247 { 248 249 so->so_state |= SS_CANTSENDMORE; 250 sowwakeup(so); 251 } 252 253 socantrcvmore(so) 254 struct socket *so; 255 { 256 257 so->so_state |= SS_CANTRCVMORE; 258 sorwakeup(so); 259 } 260 261 /* 262 * Socket select/wakeup routines. 263 */ 264 265 /* 266 * Queue a process for a select on a socket buffer. 267 */ 268 sbselqueue(sb, cp) 269 struct sockbuf *sb; 270 struct proc *cp; 271 { 272 selrecord(cp, &sb->sb_sel); 273 sb->sb_flags |= SB_SEL; 274 } 275 276 /* 277 * Wait for data to arrive at/drain from a socket buffer. 278 */ 279 sbwait(sb) 280 struct sockbuf *sb; 281 { 282 283 sb->sb_flags |= SB_WAIT; 284 return (tsleep((caddr_t)&sb->sb_cc, 285 (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, netio, 286 sb->sb_timeo)); 287 } 288 289 /* 290 * Lock a sockbuf already known to be locked; 291 * return any error returned from sleep (EINTR). 292 */ 293 sb_lock(sb) 294 register struct sockbuf *sb; 295 { 296 int error; 297 298 while (sb->sb_flags & SB_LOCK) { 299 sb->sb_flags |= SB_WANT; 300 if (error = tsleep((caddr_t)&sb->sb_flags, 301 (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK|PCATCH, 302 netio, 0)) 303 return (error); 304 } 305 sb->sb_flags |= SB_LOCK; 306 return (0); 307 } 308 309 /* 310 * Wakeup processes waiting on a socket buffer. 311 * Do asynchronous notification via SIGIO 312 * if the socket has the SS_ASYNC flag set. 313 */ 314 sowakeup(so, sb) 315 register struct socket *so; 316 register struct sockbuf *sb; 317 { 318 struct proc *p; 319 320 selwakeup(&sb->sb_sel); 321 sb->sb_flags &= ~SB_SEL; 322 if (sb->sb_flags & SB_WAIT) { 323 sb->sb_flags &= ~SB_WAIT; 324 wakeup((caddr_t)&sb->sb_cc); 325 } 326 if (so->so_state & SS_ASYNC) { 327 if (so->so_pgid < 0) 328 gsignal(-so->so_pgid, SIGIO); 329 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) 330 psignal(p, SIGIO); 331 } 332 } 333 334 /* 335 * Socket buffer (struct sockbuf) utility routines. 336 * 337 * Each socket contains two socket buffers: one for sending data and 338 * one for receiving data. Each buffer contains a queue of mbufs, 339 * information about the number of mbufs and amount of data in the 340 * queue, and other fields allowing select() statements and notification 341 * on data availability to be implemented. 342 * 343 * Data stored in a socket buffer is maintained as a list of records. 344 * Each record is a list of mbufs chained together with the m_next 345 * field. Records are chained together with the m_nextpkt field. The upper 346 * level routine soreceive() expects the following conventions to be 347 * observed when placing information in the receive buffer: 348 * 349 * 1. If the protocol requires each message be preceded by the sender's 350 * name, then a record containing that name must be present before 351 * any associated data (mbuf's must be of type MT_SONAME). 352 * 2. If the protocol supports the exchange of ``access rights'' (really 353 * just additional data associated with the message), and there are 354 * ``rights'' to be received, then a record containing this data 355 * should be present (mbuf's must be of type MT_RIGHTS). 356 * 3. If a name or rights record exists, then it must be followed by 357 * a data record, perhaps of zero length. 358 * 359 * Before using a new socket structure it is first necessary to reserve 360 * buffer space to the socket, by calling sbreserve(). This should commit 361 * some of the available buffer space in the system buffer pool for the 362 * socket (currently, it does nothing but enforce limits). The space 363 * should be released by calling sbrelease() when the socket is destroyed. 364 */ 365 366 soreserve(so, sndcc, rcvcc) 367 register struct socket *so; 368 u_long sndcc, rcvcc; 369 { 370 371 if (sbreserve(&so->so_snd, sndcc) == 0) 372 goto bad; 373 if (sbreserve(&so->so_rcv, rcvcc) == 0) 374 goto bad2; 375 if (so->so_rcv.sb_lowat == 0) 376 so->so_rcv.sb_lowat = 1; 377 if (so->so_snd.sb_lowat == 0) 378 so->so_snd.sb_lowat = MCLBYTES; 379 if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) 380 so->so_snd.sb_lowat = so->so_snd.sb_hiwat; 381 return (0); 382 bad2: 383 sbrelease(&so->so_snd); 384 bad: 385 return (ENOBUFS); 386 } 387 388 /* 389 * Allot mbufs to a sockbuf. 390 * Attempt to scale mbmax so that mbcnt doesn't become limiting 391 * if buffering efficiency is near the normal case. 392 */ 393 sbreserve(sb, cc) 394 struct sockbuf *sb; 395 u_long cc; 396 { 397 398 if (cc > sb_max * MCLBYTES / (MSIZE + MCLBYTES)) 399 return (0); 400 sb->sb_hiwat = cc; 401 sb->sb_mbmax = min(cc * 2, sb_max); 402 if (sb->sb_lowat > sb->sb_hiwat) 403 sb->sb_lowat = sb->sb_hiwat; 404 return (1); 405 } 406 407 /* 408 * Free mbufs held by a socket, and reserved mbuf space. 409 */ 410 sbrelease(sb) 411 struct sockbuf *sb; 412 { 413 414 sbflush(sb); 415 sb->sb_hiwat = sb->sb_mbmax = 0; 416 } 417 418 /* 419 * Routines to add and remove 420 * data from an mbuf queue. 421 * 422 * The routines sbappend() or sbappendrecord() are normally called to 423 * append new mbufs to a socket buffer, after checking that adequate 424 * space is available, comparing the function sbspace() with the amount 425 * of data to be added. sbappendrecord() differs from sbappend() in 426 * that data supplied is treated as the beginning of a new record. 427 * To place a sender's address, optional access rights, and data in a 428 * socket receive buffer, sbappendaddr() should be used. To place 429 * access rights and data in a socket receive buffer, sbappendrights() 430 * should be used. In either case, the new data begins a new record. 431 * Note that unlike sbappend() and sbappendrecord(), these routines check 432 * for the caller that there will be enough space to store the data. 433 * Each fails if there is not enough space, or if it cannot find mbufs 434 * to store additional information in. 435 * 436 * Reliable protocols may use the socket send buffer to hold data 437 * awaiting acknowledgement. Data is normally copied from a socket 438 * send buffer in a protocol with m_copy for output to a peer, 439 * and then removing the data from the socket buffer with sbdrop() 440 * or sbdroprecord() when the data is acknowledged by the peer. 441 */ 442 443 /* 444 * Append mbuf chain m to the last record in the 445 * socket buffer sb. The additional space associated 446 * the mbuf chain is recorded in sb. Empty mbufs are 447 * discarded and mbufs are compacted where possible. 448 */ 449 sbappend(sb, m) 450 struct sockbuf *sb; 451 struct mbuf *m; 452 { 453 register struct mbuf *n; 454 455 if (m == 0) 456 return; 457 if (n = sb->sb_mb) { 458 while (n->m_nextpkt) 459 n = n->m_nextpkt; 460 do { 461 if (n->m_flags & M_EOR) { 462 sbappendrecord(sb, m); /* XXXXXX!!!! */ 463 return; 464 } 465 } while (n->m_next && (n = n->m_next)); 466 } 467 sbcompress(sb, m, n); 468 } 469 470 #ifdef SOCKBUF_DEBUG 471 sbcheck(sb) 472 register struct sockbuf *sb; 473 { 474 register struct mbuf *m; 475 register int len = 0, mbcnt = 0; 476 477 for (m = sb->sb_mb; m; m = m->m_next) { 478 len += m->m_len; 479 mbcnt += MSIZE; 480 if (m->m_flags & M_EXT) 481 mbcnt += m->m_ext.ext_size; 482 if (m->m_nextpkt) 483 panic("sbcheck nextpkt"); 484 } 485 if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) { 486 printf("cc %d != %d || mbcnt %d != %d\n", len, sb->sb_cc, 487 mbcnt, sb->sb_mbcnt); 488 panic("sbcheck"); 489 } 490 } 491 #endif 492 493 /* 494 * As above, except the mbuf chain 495 * begins a new record. 496 */ 497 sbappendrecord(sb, m0) 498 register struct sockbuf *sb; 499 register struct mbuf *m0; 500 { 501 register struct mbuf *m; 502 503 if (m0 == 0) 504 return; 505 if (m = sb->sb_mb) 506 while (m->m_nextpkt) 507 m = m->m_nextpkt; 508 /* 509 * Put the first mbuf on the queue. 510 * Note this permits zero length records. 511 */ 512 sballoc(sb, m0); 513 if (m) 514 m->m_nextpkt = m0; 515 else 516 sb->sb_mb = m0; 517 m = m0->m_next; 518 m0->m_next = 0; 519 if (m && (m0->m_flags & M_EOR)) { 520 m0->m_flags &= ~M_EOR; 521 m->m_flags |= M_EOR; 522 } 523 sbcompress(sb, m, m0); 524 } 525 526 /* 527 * As above except that OOB data 528 * is inserted at the beginning of the sockbuf, 529 * but after any other OOB data. 530 */ 531 sbinsertoob(sb, m0) 532 register struct sockbuf *sb; 533 register struct mbuf *m0; 534 { 535 register struct mbuf *m; 536 register struct mbuf **mp; 537 538 if (m0 == 0) 539 return; 540 for (mp = &sb->sb_mb; m = *mp; mp = &((*mp)->m_nextpkt)) { 541 again: 542 switch (m->m_type) { 543 544 case MT_OOBDATA: 545 continue; /* WANT next train */ 546 547 case MT_CONTROL: 548 if (m = m->m_next) 549 goto again; /* inspect THIS train further */ 550 } 551 break; 552 } 553 /* 554 * Put the first mbuf on the queue. 555 * Note this permits zero length records. 556 */ 557 sballoc(sb, m0); 558 m0->m_nextpkt = *mp; 559 *mp = m0; 560 m = m0->m_next; 561 m0->m_next = 0; 562 if (m && (m0->m_flags & M_EOR)) { 563 m0->m_flags &= ~M_EOR; 564 m->m_flags |= M_EOR; 565 } 566 sbcompress(sb, m, m0); 567 } 568 569 /* 570 * Append address and data, and optionally, control (ancillary) data 571 * to the receive queue of a socket. If present, 572 * m0 must include a packet header with total length. 573 * Returns 0 if no space in sockbuf or insufficient mbufs. 574 */ 575 sbappendaddr(sb, asa, m0, control) 576 register struct sockbuf *sb; 577 struct sockaddr *asa; 578 struct mbuf *m0, *control; 579 { 580 register struct mbuf *m, *n; 581 int space = asa->sa_len; 582 583 if (m0 && (m0->m_flags & M_PKTHDR) == 0) 584 panic("sbappendaddr"); 585 if (m0) 586 space += m0->m_pkthdr.len; 587 for (n = control; n; n = n->m_next) { 588 space += n->m_len; 589 if (n->m_next == 0) /* keep pointer to last control buf */ 590 break; 591 } 592 if (space > sbspace(sb)) 593 return (0); 594 if (asa->sa_len > MLEN) 595 return (0); 596 MGET(m, M_DONTWAIT, MT_SONAME); 597 if (m == 0) 598 return (0); 599 m->m_len = asa->sa_len; 600 bcopy((caddr_t)asa, mtod(m, caddr_t), asa->sa_len); 601 if (n) 602 n->m_next = m0; /* concatenate data to control */ 603 else 604 control = m0; 605 m->m_next = control; 606 for (n = m; n; n = n->m_next) 607 sballoc(sb, n); 608 if (n = sb->sb_mb) { 609 while (n->m_nextpkt) 610 n = n->m_nextpkt; 611 n->m_nextpkt = m; 612 } else 613 sb->sb_mb = m; 614 return (1); 615 } 616 617 sbappendcontrol(sb, m0, control) 618 struct sockbuf *sb; 619 struct mbuf *control, *m0; 620 { 621 register struct mbuf *m, *n; 622 int space = 0; 623 624 if (control == 0) 625 panic("sbappendcontrol"); 626 for (m = control; ; m = m->m_next) { 627 space += m->m_len; 628 if (m->m_next == 0) 629 break; 630 } 631 n = m; /* save pointer to last control buffer */ 632 for (m = m0; m; m = m->m_next) 633 space += m->m_len; 634 if (space > sbspace(sb)) 635 return (0); 636 n->m_next = m0; /* concatenate data to control */ 637 for (m = control; m; m = m->m_next) 638 sballoc(sb, m); 639 if (n = sb->sb_mb) { 640 while (n->m_nextpkt) 641 n = n->m_nextpkt; 642 n->m_nextpkt = control; 643 } else 644 sb->sb_mb = control; 645 return (1); 646 } 647 648 /* 649 * Compress mbuf chain m into the socket 650 * buffer sb following mbuf n. If n 651 * is null, the buffer is presumed empty. 652 */ 653 sbcompress(sb, m, n) 654 register struct sockbuf *sb; 655 register struct mbuf *m, *n; 656 { 657 register int eor = 0; 658 register struct mbuf *o; 659 660 while (m) { 661 eor |= m->m_flags & M_EOR; 662 if (m->m_len == 0 && 663 (eor == 0 || 664 (((o = m->m_next) || (o = n)) && 665 o->m_type == m->m_type))) { 666 m = m_free(m); 667 continue; 668 } 669 if (n && (n->m_flags & (M_EXT | M_EOR)) == 0 && 670 (n->m_data + n->m_len + m->m_len) < &n->m_dat[MLEN] && 671 n->m_type == m->m_type) { 672 bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len, 673 (unsigned)m->m_len); 674 n->m_len += m->m_len; 675 sb->sb_cc += m->m_len; 676 m = m_free(m); 677 continue; 678 } 679 if (n) 680 n->m_next = m; 681 else 682 sb->sb_mb = m; 683 sballoc(sb, m); 684 n = m; 685 m->m_flags &= ~M_EOR; 686 m = m->m_next; 687 n->m_next = 0; 688 } 689 if (eor) { 690 if (n) 691 n->m_flags |= eor; 692 else 693 printf("semi-panic: sbcompress\n"); 694 } 695 } 696 697 /* 698 * Free all mbufs in a sockbuf. 699 * Check that all resources are reclaimed. 700 */ 701 sbflush(sb) 702 register struct sockbuf *sb; 703 { 704 705 if (sb->sb_flags & SB_LOCK) 706 panic("sbflush"); 707 while (sb->sb_mbcnt) 708 sbdrop(sb, (int)sb->sb_cc); 709 if (sb->sb_cc || sb->sb_mb) 710 panic("sbflush 2"); 711 } 712 713 /* 714 * Drop data from (the front of) a sockbuf. 715 */ 716 sbdrop(sb, len) 717 register struct sockbuf *sb; 718 register int len; 719 { 720 register struct mbuf *m, *mn; 721 struct mbuf *next; 722 723 next = (m = sb->sb_mb) ? m->m_nextpkt : 0; 724 while (len > 0) { 725 if (m == 0) { 726 if (next == 0) 727 panic("sbdrop"); 728 m = next; 729 next = m->m_nextpkt; 730 continue; 731 } 732 if (m->m_len > len) { 733 m->m_len -= len; 734 m->m_data += len; 735 sb->sb_cc -= len; 736 break; 737 } 738 len -= m->m_len; 739 sbfree(sb, m); 740 MFREE(m, mn); 741 m = mn; 742 } 743 while (m && m->m_len == 0) { 744 sbfree(sb, m); 745 MFREE(m, mn); 746 m = mn; 747 } 748 if (m) { 749 sb->sb_mb = m; 750 m->m_nextpkt = next; 751 } else 752 sb->sb_mb = next; 753 } 754 755 /* 756 * Drop a record off the front of a sockbuf 757 * and move the next record to the front. 758 */ 759 sbdroprecord(sb) 760 register struct sockbuf *sb; 761 { 762 register struct mbuf *m, *mn; 763 764 m = sb->sb_mb; 765 if (m) { 766 sb->sb_mb = m->m_nextpkt; 767 do { 768 sbfree(sb, m); 769 MFREE(m, mn); 770 } while (m = mn); 771 } 772 } 773