1 /* 2 * Copyright (c) 1982, 1986 Regents of the University of California. 3 * All rights reserved. The Berkeley software License Agreement 4 * specifies the terms and conditions for redistribution. 5 * 6 * @(#)uipc_socket.c 7.6 (Berkeley) 10/30/87 7 */ 8 9 #include "param.h" 10 #include "dir.h" 11 #include "user.h" 12 #include "proc.h" 13 #include "file.h" 14 #include "mbuf.h" 15 #include "domain.h" 16 #include "protosw.h" 17 #include "socket.h" 18 #include "socketvar.h" 19 20 /* 21 * Socket operation routines. 22 * These routines are called by the routines in 23 * sys_socket.c or from a system process, and 24 * implement the semantics of socket operations by 25 * switching out to the protocol specific routines. 26 * 27 * TODO: 28 * test socketpair 29 * clean up async 30 * out-of-band is a kludge 31 */ 32 /*ARGSUSED*/ 33 socreate(dom, aso, type, proto) 34 struct socket **aso; 35 register int type; 36 int proto; 37 { 38 register struct protosw *prp; 39 register struct socket *so; 40 register struct mbuf *m; 41 register int error; 42 43 if (proto) 44 prp = pffindproto(dom, proto, type); 45 else 46 prp = pffindtype(dom, type); 47 if (prp == 0) 48 return (EPROTONOSUPPORT); 49 if (prp->pr_type != type) 50 return (EPROTOTYPE); 51 m = m_getclr(M_WAIT, MT_SOCKET); 52 so = mtod(m, struct socket *); 53 so->so_options = 0; 54 so->so_state = 0; 55 so->so_type = type; 56 if (u.u_uid == 0) 57 so->so_state = SS_PRIV; 58 so->so_proto = prp; 59 error = 60 (*prp->pr_usrreq)(so, PRU_ATTACH, 61 (struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0); 62 if (error) { 63 so->so_state |= SS_NOFDREF; 64 sofree(so); 65 return (error); 66 } 67 *aso = so; 68 return (0); 69 } 70 71 sobind(so, nam) 72 struct socket *so; 73 struct mbuf *nam; 74 { 75 int s = splnet(); 76 int error; 77 78 error = 79 (*so->so_proto->pr_usrreq)(so, PRU_BIND, 80 (struct mbuf *)0, nam, (struct mbuf *)0); 81 splx(s); 82 return (error); 83 } 84 85 solisten(so, backlog) 86 register struct socket *so; 87 int backlog; 88 { 89 int s = splnet(), error; 90 91 error = 92 (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, 93 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 94 if (error) { 95 splx(s); 96 return (error); 97 } 98 if (so->so_q == 0) { 99 so->so_q = so; 100 so->so_q0 = so; 101 so->so_options |= SO_ACCEPTCONN; 102 } 103 if (backlog < 0) 104 backlog = 0; 105 so->so_qlimit = MIN(backlog, SOMAXCONN); 106 splx(s); 107 return (0); 108 } 109 110 sofree(so) 111 register struct socket *so; 112 { 113 114 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) 115 return; 116 if (so->so_head) { 117 if (!soqremque(so, 0) && !soqremque(so, 1)) 118 panic("sofree dq"); 119 so->so_head = 0; 120 } 121 sbrelease(&so->so_snd); 122 sorflush(so); 123 (void) m_free(dtom(so)); 124 } 125 126 /* 127 * Close a socket on last file table reference removal. 128 * Initiate disconnect if connected. 129 * Free socket when disconnect complete. 130 */ 131 soclose(so) 132 register struct socket *so; 133 { 134 int s = splnet(); /* conservative */ 135 int error; 136 137 if (so->so_options & SO_ACCEPTCONN) { 138 while (so->so_q0 != so) 139 (void) soabort(so->so_q0); 140 while (so->so_q != so) 141 (void) soabort(so->so_q); 142 } 143 if (so->so_pcb == 0) 144 goto discard; 145 if (so->so_state & SS_ISCONNECTED) { 146 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 147 error = sodisconnect(so); 148 if (error) 149 goto drop; 150 } 151 if (so->so_options & SO_LINGER) { 152 if ((so->so_state & SS_ISDISCONNECTING) && 153 (so->so_state & SS_NBIO)) 154 goto drop; 155 while (so->so_state & SS_ISCONNECTED) 156 sleep((caddr_t)&so->so_timeo, PZERO+1); 157 } 158 } 159 drop: 160 if (so->so_pcb) { 161 int error2 = 162 (*so->so_proto->pr_usrreq)(so, PRU_DETACH, 163 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 164 if (error == 0) 165 error = error2; 166 } 167 discard: 168 if (so->so_state & SS_NOFDREF) 169 panic("soclose: NOFDREF"); 170 so->so_state |= SS_NOFDREF; 171 sofree(so); 172 splx(s); 173 return (error); 174 } 175 176 /* 177 * Must be called at splnet... 178 */ 179 soabort(so) 180 struct socket *so; 181 { 182 183 return ( 184 (*so->so_proto->pr_usrreq)(so, PRU_ABORT, 185 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 186 } 187 188 soaccept(so, nam) 189 register struct socket *so; 190 struct mbuf *nam; 191 { 192 int s = splnet(); 193 int error; 194 195 if ((so->so_state & SS_NOFDREF) == 0) 196 panic("soaccept: !NOFDREF"); 197 so->so_state &= ~SS_NOFDREF; 198 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, 199 (struct mbuf *)0, nam, (struct mbuf *)0); 200 splx(s); 201 return (error); 202 } 203 204 soconnect(so, nam) 205 register struct socket *so; 206 struct mbuf *nam; 207 { 208 int s; 209 int error; 210 211 if (so->so_options & SO_ACCEPTCONN) 212 return (EOPNOTSUPP); 213 s = splnet(); 214 /* 215 * If protocol is connection-based, can only connect once. 216 * Otherwise, if connected, try to disconnect first. 217 * This allows user to disconnect by connecting to, e.g., 218 * a null address. 219 */ 220 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 221 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 222 (error = sodisconnect(so)))) 223 error = EISCONN; 224 else 225 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, 226 (struct mbuf *)0, nam, (struct mbuf *)0); 227 splx(s); 228 return (error); 229 } 230 231 soconnect2(so1, so2) 232 register struct socket *so1; 233 struct socket *so2; 234 { 235 int s = splnet(); 236 int error; 237 238 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, 239 (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0); 240 splx(s); 241 return (error); 242 } 243 244 sodisconnect(so) 245 register struct socket *so; 246 { 247 int s = splnet(); 248 int error; 249 250 if ((so->so_state & SS_ISCONNECTED) == 0) { 251 error = ENOTCONN; 252 goto bad; 253 } 254 if (so->so_state & SS_ISDISCONNECTING) { 255 error = EALREADY; 256 goto bad; 257 } 258 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, 259 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 260 bad: 261 splx(s); 262 return (error); 263 } 264 265 /* 266 * Send on a socket. 267 * If send must go all at once and message is larger than 268 * send buffering, then hard error. 269 * Lock against other senders. 270 * If must go all at once and not enough room now, then 271 * inform user that this would block and do nothing. 272 * Otherwise, if nonblocking, send as much as possible. 273 */ 274 sosend(so, nam, uio, flags, rights) 275 register struct socket *so; 276 struct mbuf *nam; 277 register struct uio *uio; 278 int flags; 279 struct mbuf *rights; 280 { 281 struct mbuf *top = 0; 282 register struct mbuf *m, **mp; 283 register int space; 284 int len, rlen = 0, error = 0, s, dontroute, first = 1; 285 286 if (sosendallatonce(so) && uio->uio_resid > so->so_snd.sb_hiwat) 287 return (EMSGSIZE); 288 dontroute = 289 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 290 (so->so_proto->pr_flags & PR_ATOMIC); 291 u.u_ru.ru_msgsnd++; 292 if (rights) 293 rlen = rights->m_len; 294 #define snderr(errno) { error = errno; splx(s); goto release; } 295 296 restart: 297 sblock(&so->so_snd); 298 do { 299 s = splnet(); 300 if (so->so_state & SS_CANTSENDMORE) 301 snderr(EPIPE); 302 if (so->so_error) { 303 error = so->so_error; 304 so->so_error = 0; /* ??? */ 305 splx(s); 306 goto release; 307 } 308 if ((so->so_state & SS_ISCONNECTED) == 0) { 309 if (so->so_proto->pr_flags & PR_CONNREQUIRED) 310 snderr(ENOTCONN); 311 if (nam == 0) 312 snderr(EDESTADDRREQ); 313 } 314 if (flags & MSG_OOB) 315 space = 1024; 316 else { 317 space = sbspace(&so->so_snd); 318 if (space <= rlen || 319 (sosendallatonce(so) && 320 space < uio->uio_resid + rlen) || 321 (uio->uio_resid >= CLBYTES && space < CLBYTES && 322 so->so_snd.sb_cc >= CLBYTES && 323 (so->so_state & SS_NBIO) == 0)) { 324 if (so->so_state & SS_NBIO) { 325 if (first) 326 error = EWOULDBLOCK; 327 splx(s); 328 goto release; 329 } 330 sbunlock(&so->so_snd); 331 sbwait(&so->so_snd); 332 splx(s); 333 goto restart; 334 } 335 } 336 splx(s); 337 mp = ⊤ 338 space -= rlen; 339 while (space > 0) { 340 MGET(m, M_WAIT, MT_DATA); 341 if (uio->uio_resid >= CLBYTES / 2 && space >= CLBYTES) { 342 MCLGET(m); 343 if (m->m_len != CLBYTES) 344 goto nopages; 345 len = MIN(CLBYTES, uio->uio_resid); 346 space -= CLBYTES; 347 } else { 348 nopages: 349 len = MIN(MIN(MLEN, uio->uio_resid), space); 350 space -= len; 351 } 352 error = uiomove(mtod(m, caddr_t), len, UIO_WRITE, uio); 353 m->m_len = len; 354 *mp = m; 355 if (error) 356 goto release; 357 mp = &m->m_next; 358 if (uio->uio_resid <= 0) 359 break; 360 } 361 if (dontroute) 362 so->so_options |= SO_DONTROUTE; 363 s = splnet(); /* XXX */ 364 error = (*so->so_proto->pr_usrreq)(so, 365 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 366 top, (caddr_t)nam, rights); 367 splx(s); 368 if (dontroute) 369 so->so_options &= ~SO_DONTROUTE; 370 rights = 0; 371 rlen = 0; 372 top = 0; 373 first = 0; 374 if (error) 375 break; 376 } while (uio->uio_resid); 377 378 release: 379 sbunlock(&so->so_snd); 380 if (top) 381 m_freem(top); 382 if (error == EPIPE) 383 psignal(u.u_procp, SIGPIPE); 384 return (error); 385 } 386 387 /* 388 * Implement receive operations on a socket. 389 * We depend on the way that records are added to the sockbuf 390 * by sbappend*. In particular, each record (mbufs linked through m_next) 391 * must begin with an address if the protocol so specifies, 392 * followed by an optional mbuf containing access rights if supported 393 * by the protocol, and then zero or more mbufs of data. 394 * In order to avoid blocking network interrupts for the entire time here, 395 * we splx() while doing the actual copy to user space. 396 * Although the sockbuf is locked, new data may still be appended, 397 * and thus we must maintain consistency of the sockbuf during that time. 398 */ 399 soreceive(so, aname, uio, flags, rightsp) 400 register struct socket *so; 401 struct mbuf **aname; 402 register struct uio *uio; 403 int flags; 404 struct mbuf **rightsp; 405 { 406 register struct mbuf *m; 407 register int len, error = 0, s, offset; 408 struct protosw *pr = so->so_proto; 409 struct mbuf *nextrecord; 410 int moff; 411 412 if (rightsp) 413 *rightsp = 0; 414 if (aname) 415 *aname = 0; 416 if (flags & MSG_OOB) { 417 m = m_get(M_WAIT, MT_DATA); 418 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, 419 m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0); 420 if (error) 421 goto bad; 422 do { 423 len = uio->uio_resid; 424 if (len > m->m_len) 425 len = m->m_len; 426 error = 427 uiomove(mtod(m, caddr_t), (int)len, UIO_READ, uio); 428 m = m_free(m); 429 } while (uio->uio_resid && error == 0 && m); 430 bad: 431 if (m) 432 m_freem(m); 433 return (error); 434 } 435 436 restart: 437 sblock(&so->so_rcv); 438 s = splnet(); 439 440 if (so->so_rcv.sb_cc == 0) { 441 if (so->so_error) { 442 error = so->so_error; 443 so->so_error = 0; 444 goto release; 445 } 446 if (so->so_state & SS_CANTRCVMORE) 447 goto release; 448 if ((so->so_state & SS_ISCONNECTED) == 0 && 449 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 450 error = ENOTCONN; 451 goto release; 452 } 453 if (uio->uio_resid == 0) 454 goto release; 455 if (so->so_state & SS_NBIO) { 456 error = EWOULDBLOCK; 457 goto release; 458 } 459 sbunlock(&so->so_rcv); 460 sbwait(&so->so_rcv); 461 splx(s); 462 goto restart; 463 } 464 u.u_ru.ru_msgrcv++; 465 m = so->so_rcv.sb_mb; 466 if (m == 0) 467 panic("receive 1"); 468 nextrecord = m->m_act; 469 if (pr->pr_flags & PR_ADDR) { 470 if (m->m_type != MT_SONAME) 471 panic("receive 1a"); 472 if (flags & MSG_PEEK) { 473 if (aname) 474 *aname = m_copy(m, 0, m->m_len); 475 m = m->m_next; 476 } else { 477 sbfree(&so->so_rcv, m); 478 if (aname) { 479 *aname = m; 480 m = m->m_next; 481 (*aname)->m_next = 0; 482 so->so_rcv.sb_mb = m; 483 } else { 484 MFREE(m, so->so_rcv.sb_mb); 485 m = so->so_rcv.sb_mb; 486 } 487 if (m) 488 m->m_act = nextrecord; 489 } 490 } 491 if (m && m->m_type == MT_RIGHTS) { 492 if ((pr->pr_flags & PR_RIGHTS) == 0) 493 panic("receive 2"); 494 if (flags & MSG_PEEK) { 495 if (rightsp) 496 *rightsp = m_copy(m, 0, m->m_len); 497 m = m->m_next; 498 } else { 499 sbfree(&so->so_rcv, m); 500 if (rightsp) { 501 *rightsp = m; 502 so->so_rcv.sb_mb = m->m_next; 503 m->m_next = 0; 504 m = so->so_rcv.sb_mb; 505 } else { 506 MFREE(m, so->so_rcv.sb_mb); 507 m = so->so_rcv.sb_mb; 508 } 509 if (m) 510 m->m_act = nextrecord; 511 } 512 } 513 moff = 0; 514 offset = 0; 515 while (m && uio->uio_resid > 0 && error == 0) { 516 if (m->m_type != MT_DATA && m->m_type != MT_HEADER) 517 panic("receive 3"); 518 len = uio->uio_resid; 519 so->so_state &= ~SS_RCVATMARK; 520 if (so->so_oobmark && len > so->so_oobmark - offset) 521 len = so->so_oobmark - offset; 522 if (len > m->m_len - moff) 523 len = m->m_len - moff; 524 splx(s); 525 error = 526 uiomove(mtod(m, caddr_t) + moff, (int)len, UIO_READ, uio); 527 s = splnet(); 528 if (len == m->m_len - moff) { 529 if (flags & MSG_PEEK) { 530 m = m->m_next; 531 moff = 0; 532 } else { 533 nextrecord = m->m_act; 534 sbfree(&so->so_rcv, m); 535 MFREE(m, so->so_rcv.sb_mb); 536 m = so->so_rcv.sb_mb; 537 if (m) 538 m->m_act = nextrecord; 539 } 540 } else { 541 if (flags & MSG_PEEK) 542 moff += len; 543 else { 544 m->m_off += len; 545 m->m_len -= len; 546 so->so_rcv.sb_cc -= len; 547 } 548 } 549 if (so->so_oobmark) { 550 if ((flags & MSG_PEEK) == 0) { 551 so->so_oobmark -= len; 552 if (so->so_oobmark == 0) { 553 so->so_state |= SS_RCVATMARK; 554 break; 555 } 556 } else 557 offset += len; 558 } 559 } 560 if ((flags & MSG_PEEK) == 0) { 561 if (m == 0) 562 so->so_rcv.sb_mb = nextrecord; 563 else if (pr->pr_flags & PR_ATOMIC) 564 (void) sbdroprecord(&so->so_rcv); 565 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 566 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 567 (struct mbuf *)0, (struct mbuf *)0); 568 if (error == 0 && rightsp && *rightsp && 569 pr->pr_domain->dom_externalize) 570 error = (*pr->pr_domain->dom_externalize)(*rightsp); 571 } 572 release: 573 sbunlock(&so->so_rcv); 574 splx(s); 575 return (error); 576 } 577 578 soshutdown(so, how) 579 register struct socket *so; 580 register int how; 581 { 582 register struct protosw *pr = so->so_proto; 583 584 how++; 585 if (how & FREAD) 586 sorflush(so); 587 if (how & FWRITE) 588 return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN, 589 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 590 return (0); 591 } 592 593 sorflush(so) 594 register struct socket *so; 595 { 596 register struct sockbuf *sb = &so->so_rcv; 597 register struct protosw *pr = so->so_proto; 598 register int s; 599 struct sockbuf asb; 600 601 sblock(sb); 602 s = splimp(); 603 socantrcvmore(so); 604 sbunlock(sb); 605 asb = *sb; 606 bzero((caddr_t)sb, sizeof (*sb)); 607 splx(s); 608 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 609 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 610 sbrelease(&asb); 611 } 612 613 sosetopt(so, level, optname, m0) 614 register struct socket *so; 615 int level, optname; 616 struct mbuf *m0; 617 { 618 int error = 0; 619 register struct mbuf *m = m0; 620 621 if (level != SOL_SOCKET) { 622 if (so->so_proto && so->so_proto->pr_ctloutput) 623 return ((*so->so_proto->pr_ctloutput) 624 (PRCO_SETOPT, so, level, optname, &m0)); 625 error = ENOPROTOOPT; 626 } else { 627 switch (optname) { 628 629 case SO_LINGER: 630 if (m == NULL || m->m_len != sizeof (struct linger)) { 631 error = EINVAL; 632 goto bad; 633 } 634 so->so_linger = mtod(m, struct linger *)->l_linger; 635 /* fall thru... */ 636 637 case SO_DEBUG: 638 case SO_KEEPALIVE: 639 case SO_DONTROUTE: 640 case SO_USELOOPBACK: 641 case SO_BROADCAST: 642 case SO_REUSEADDR: 643 case SO_OOBINLINE: 644 if (m == NULL || m->m_len < sizeof (int)) { 645 error = EINVAL; 646 goto bad; 647 } 648 if (*mtod(m, int *)) 649 so->so_options |= optname; 650 else 651 so->so_options &= ~optname; 652 break; 653 654 case SO_SNDBUF: 655 case SO_RCVBUF: 656 case SO_SNDLOWAT: 657 case SO_RCVLOWAT: 658 case SO_SNDTIMEO: 659 case SO_RCVTIMEO: 660 if (m == NULL || m->m_len < sizeof (int)) { 661 error = EINVAL; 662 goto bad; 663 } 664 switch (optname) { 665 666 case SO_SNDBUF: 667 case SO_RCVBUF: 668 if (sbreserve(optname == SO_SNDBUF ? &so->so_snd : 669 &so->so_rcv, *mtod(m, int *)) == 0) { 670 error = ENOBUFS; 671 goto bad; 672 } 673 break; 674 675 case SO_SNDLOWAT: 676 so->so_snd.sb_lowat = *mtod(m, int *); 677 break; 678 case SO_RCVLOWAT: 679 so->so_rcv.sb_lowat = *mtod(m, int *); 680 break; 681 case SO_SNDTIMEO: 682 so->so_snd.sb_timeo = *mtod(m, int *); 683 break; 684 case SO_RCVTIMEO: 685 so->so_rcv.sb_timeo = *mtod(m, int *); 686 break; 687 } 688 break; 689 690 default: 691 error = ENOPROTOOPT; 692 break; 693 } 694 } 695 bad: 696 if (m) 697 (void) m_free(m); 698 return (error); 699 } 700 701 sogetopt(so, level, optname, mp) 702 register struct socket *so; 703 int level, optname; 704 struct mbuf **mp; 705 { 706 register struct mbuf *m; 707 708 if (level != SOL_SOCKET) { 709 if (so->so_proto && so->so_proto->pr_ctloutput) { 710 return ((*so->so_proto->pr_ctloutput) 711 (PRCO_GETOPT, so, level, optname, mp)); 712 } else 713 return (ENOPROTOOPT); 714 } else { 715 m = m_get(M_WAIT, MT_SOOPTS); 716 m->m_len = sizeof (int); 717 718 switch (optname) { 719 720 case SO_LINGER: 721 m->m_len = sizeof (struct linger); 722 mtod(m, struct linger *)->l_onoff = 723 so->so_options & SO_LINGER; 724 mtod(m, struct linger *)->l_linger = so->so_linger; 725 break; 726 727 case SO_USELOOPBACK: 728 case SO_DONTROUTE: 729 case SO_DEBUG: 730 case SO_KEEPALIVE: 731 case SO_REUSEADDR: 732 case SO_BROADCAST: 733 case SO_OOBINLINE: 734 *mtod(m, int *) = so->so_options & optname; 735 break; 736 737 case SO_TYPE: 738 *mtod(m, int *) = so->so_type; 739 break; 740 741 case SO_ERROR: 742 *mtod(m, int *) = so->so_error; 743 so->so_error = 0; 744 break; 745 746 case SO_SNDBUF: 747 *mtod(m, int *) = so->so_snd.sb_hiwat; 748 break; 749 750 case SO_RCVBUF: 751 *mtod(m, int *) = so->so_rcv.sb_hiwat; 752 break; 753 754 case SO_SNDLOWAT: 755 *mtod(m, int *) = so->so_snd.sb_lowat; 756 break; 757 758 case SO_RCVLOWAT: 759 *mtod(m, int *) = so->so_rcv.sb_lowat; 760 break; 761 762 case SO_SNDTIMEO: 763 *mtod(m, int *) = so->so_snd.sb_timeo; 764 break; 765 766 case SO_RCVTIMEO: 767 *mtod(m, int *) = so->so_rcv.sb_timeo; 768 break; 769 770 default: 771 (void)m_free(m); 772 return (ENOPROTOOPT); 773 } 774 *mp = m; 775 return (0); 776 } 777 } 778 779 sohasoutofband(so) 780 register struct socket *so; 781 { 782 struct proc *p; 783 784 if (so->so_pgrp < 0) 785 gsignal(-so->so_pgrp, SIGURG); 786 else if (so->so_pgrp > 0 && (p = pfind(so->so_pgrp)) != 0) 787 psignal(p, SIGURG); 788 if (so->so_rcv.sb_sel) { 789 selwakeup(so->so_rcv.sb_sel, so->so_rcv.sb_flags & SB_COLL); 790 so->so_rcv.sb_sel = 0; 791 so->so_rcv.sb_flags &= ~SB_COLL; 792 } 793 } 794