1 /* 2 * Copyright (c) 1982 Regents of the University of California. 3 * All rights reserved. The Berkeley software License Agreement 4 * specifies the terms and conditions for redistribution. 5 * 6 * @(#)uipc_socket.c 6.22 (Berkeley) 02/19/86 7 */ 8 9 #include "param.h" 10 #include "systm.h" 11 #include "dir.h" 12 #include "user.h" 13 #include "proc.h" 14 #include "file.h" 15 #include "inode.h" 16 #include "buf.h" 17 #include "mbuf.h" 18 #include "un.h" 19 #include "domain.h" 20 #include "protosw.h" 21 #include "socket.h" 22 #include "socketvar.h" 23 #include "stat.h" 24 #include "ioctl.h" 25 #include "uio.h" 26 #include "../net/route.h" 27 #include "../netinet/in.h" 28 #include "../net/if.h" 29 30 /* 31 * Socket operation routines. 32 * These routines are called by the routines in 33 * sys_socket.c or from a system process, and 34 * implement the semantics of socket operations by 35 * switching out to the protocol specific routines. 36 * 37 * TODO: 38 * test socketpair 39 * clean up async 40 * out-of-band is a kludge 41 */ 42 /*ARGSUSED*/ 43 socreate(dom, aso, type, proto) 44 struct socket **aso; 45 register int type; 46 int proto; 47 { 48 register struct protosw *prp; 49 register struct socket *so; 50 register struct mbuf *m; 51 register int error; 52 53 if (proto) 54 prp = pffindproto(dom, proto, type); 55 else 56 prp = pffindtype(dom, type); 57 if (prp == 0) 58 return (EPROTONOSUPPORT); 59 if (prp->pr_type != type) 60 return (EPROTOTYPE); 61 m = m_getclr(M_WAIT, MT_SOCKET); 62 so = mtod(m, struct socket *); 63 so->so_options = 0; 64 so->so_state = 0; 65 so->so_type = type; 66 if (u.u_uid == 0) 67 so->so_state = SS_PRIV; 68 so->so_proto = prp; 69 error = 70 (*prp->pr_usrreq)(so, PRU_ATTACH, 71 (struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0); 72 if (error) { 73 so->so_state |= SS_NOFDREF; 74 sofree(so); 75 return (error); 76 } 77 *aso = so; 78 return (0); 79 } 80 81 sobind(so, nam) 82 struct socket *so; 83 struct mbuf *nam; 84 { 85 int s = splnet(); 86 int error; 87 88 error = 89 (*so->so_proto->pr_usrreq)(so, PRU_BIND, 90 (struct mbuf *)0, nam, (struct mbuf *)0); 91 splx(s); 92 return (error); 93 } 94 95 solisten(so, backlog) 96 register struct socket *so; 97 int backlog; 98 { 99 int s = splnet(), error; 100 101 error = 102 (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, 103 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 104 if (error) { 105 splx(s); 106 return (error); 107 } 108 if (so->so_q == 0) { 109 so->so_q = so; 110 so->so_q0 = so; 111 so->so_options |= SO_ACCEPTCONN; 112 } 113 if (backlog < 0) 114 backlog = 0; 115 so->so_qlimit = MIN(backlog, SOMAXCONN); 116 splx(s); 117 return (0); 118 } 119 120 sofree(so) 121 register struct socket *so; 122 { 123 124 if (so->so_head) { 125 if (!soqremque(so, 0) && !soqremque(so, 1)) 126 panic("sofree dq"); 127 so->so_head = 0; 128 } 129 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) 130 return; 131 sbrelease(&so->so_snd); 132 sorflush(so); 133 (void) m_free(dtom(so)); 134 } 135 136 /* 137 * Close a socket on last file table reference removal. 138 * Initiate disconnect if connected. 139 * Free socket when disconnect complete. 140 */ 141 soclose(so) 142 register struct socket *so; 143 { 144 int s = splnet(); /* conservative */ 145 int error; 146 147 if (so->so_options & SO_ACCEPTCONN) { 148 while (so->so_q0 != so) 149 (void) soabort(so->so_q0); 150 while (so->so_q != so) 151 (void) soabort(so->so_q); 152 } 153 if (so->so_pcb == 0) 154 goto discard; 155 if (so->so_state & SS_ISCONNECTED) { 156 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 157 error = sodisconnect(so); 158 if (error) 159 goto drop; 160 } 161 if (so->so_options & SO_LINGER) { 162 if ((so->so_state & SS_ISDISCONNECTING) && 163 (so->so_state & SS_NBIO)) 164 goto drop; 165 while (so->so_state & SS_ISCONNECTED) 166 sleep((caddr_t)&so->so_timeo, PZERO+1); 167 } 168 } 169 drop: 170 if (so->so_pcb) { 171 int error2 = 172 (*so->so_proto->pr_usrreq)(so, PRU_DETACH, 173 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 174 if (error == 0) 175 error = error2; 176 } 177 discard: 178 if (so->so_state & SS_NOFDREF) 179 panic("soclose: NOFDREF"); 180 so->so_state |= SS_NOFDREF; 181 sofree(so); 182 splx(s); 183 return (error); 184 } 185 186 /* 187 * Must be called at splnet... 188 */ 189 soabort(so) 190 struct socket *so; 191 { 192 193 return ( 194 (*so->so_proto->pr_usrreq)(so, PRU_ABORT, 195 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 196 } 197 198 soaccept(so, nam) 199 register struct socket *so; 200 struct mbuf *nam; 201 { 202 int s = splnet(); 203 int error; 204 205 if ((so->so_state & SS_NOFDREF) == 0) 206 panic("soaccept: !NOFDREF"); 207 so->so_state &= ~SS_NOFDREF; 208 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, 209 (struct mbuf *)0, nam, (struct mbuf *)0); 210 splx(s); 211 return (error); 212 } 213 214 soconnect(so, nam) 215 register struct socket *so; 216 struct mbuf *nam; 217 { 218 int s = splnet(); 219 int error; 220 221 /* 222 * If protocol is connection-based, can only connect once. 223 * Otherwise, if connected, try to disconnect first. 224 * This allows user to disconnect by connecting to, e.g., 225 * a null address. 226 */ 227 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 228 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 229 (error = sodisconnect(so)))) 230 error = EISCONN; 231 else 232 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, 233 (struct mbuf *)0, nam, (struct mbuf *)0); 234 splx(s); 235 return (error); 236 } 237 238 soconnect2(so1, so2) 239 register struct socket *so1; 240 struct socket *so2; 241 { 242 int s = splnet(); 243 int error; 244 245 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, 246 (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0); 247 splx(s); 248 return (error); 249 } 250 251 sodisconnect(so) 252 register struct socket *so; 253 { 254 int s = splnet(); 255 int error; 256 257 if ((so->so_state & SS_ISCONNECTED) == 0) { 258 error = ENOTCONN; 259 goto bad; 260 } 261 if (so->so_state & SS_ISDISCONNECTING) { 262 error = EALREADY; 263 goto bad; 264 } 265 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, 266 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 267 bad: 268 splx(s); 269 return (error); 270 } 271 272 /* 273 * Send on a socket. 274 * If send must go all at once and message is larger than 275 * send buffering, then hard error. 276 * Lock against other senders. 277 * If must go all at once and not enough room now, then 278 * inform user that this would block and do nothing. 279 * Otherwise, if nonblocking, send as much as possible. 280 */ 281 sosend(so, nam, uio, flags, rights) 282 register struct socket *so; 283 struct mbuf *nam; 284 register struct uio *uio; 285 int flags; 286 struct mbuf *rights; 287 { 288 struct mbuf *top = 0; 289 register struct mbuf *m, **mp; 290 register int space; 291 int len, rlen = 0, error = 0, s, dontroute, first = 1; 292 293 if (sosendallatonce(so) && uio->uio_resid > so->so_snd.sb_hiwat) 294 return (EMSGSIZE); 295 dontroute = 296 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 297 (so->so_proto->pr_flags & PR_ATOMIC); 298 u.u_ru.ru_msgsnd++; 299 if (rights) 300 rlen = rights->m_len; 301 #define snderr(errno) { error = errno; splx(s); goto release; } 302 303 restart: 304 sblock(&so->so_snd); 305 do { 306 s = splnet(); 307 if (so->so_state & SS_CANTSENDMORE) 308 snderr(EPIPE); 309 if (so->so_error) { 310 error = so->so_error; 311 so->so_error = 0; /* ??? */ 312 splx(s); 313 goto release; 314 } 315 if ((so->so_state & SS_ISCONNECTED) == 0) { 316 if (so->so_proto->pr_flags & PR_CONNREQUIRED) 317 snderr(ENOTCONN); 318 if (nam == 0) 319 snderr(EDESTADDRREQ); 320 } 321 if (flags & MSG_OOB) 322 space = 1024; 323 else { 324 space = sbspace(&so->so_snd); 325 if (space <= rlen || 326 (sosendallatonce(so) && 327 space < uio->uio_resid + rlen) || 328 (uio->uio_resid >= CLBYTES && space < CLBYTES && 329 so->so_snd.sb_cc >= CLBYTES && 330 (so->so_state & SS_NBIO) == 0)) { 331 if (so->so_state & SS_NBIO) { 332 if (first) 333 error = EWOULDBLOCK; 334 splx(s); 335 goto release; 336 } 337 sbunlock(&so->so_snd); 338 sbwait(&so->so_snd); 339 splx(s); 340 goto restart; 341 } 342 } 343 splx(s); 344 mp = ⊤ 345 space -= rlen; 346 while (space > 0) { 347 register struct iovec *iov = uio->uio_iov; 348 349 MGET(m, M_WAIT, MT_DATA); 350 if (iov->iov_len >= NBPG && space >= CLBYTES) { 351 MCLGET(m); 352 if (m->m_len != CLBYTES) 353 goto nopages; 354 len = MIN(CLBYTES, iov->iov_len); 355 space -= CLBYTES; 356 } else { 357 nopages: 358 len = MIN(MIN(MLEN, iov->iov_len), space); 359 space -= len; 360 } 361 error = uiomove(mtod(m, caddr_t), len, UIO_WRITE, uio); 362 m->m_len = len; 363 *mp = m; 364 if (error) 365 goto release; 366 mp = &m->m_next; 367 if (uio->uio_resid <= 0) 368 break; 369 while (uio->uio_iov->iov_len == 0) { 370 uio->uio_iov++; 371 uio->uio_iovcnt--; 372 if (uio->uio_iovcnt <= 0) 373 panic("sosend"); 374 } 375 } 376 if (dontroute) 377 so->so_options |= SO_DONTROUTE; 378 s = splnet(); /* XXX */ 379 error = (*so->so_proto->pr_usrreq)(so, 380 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 381 top, (caddr_t)nam, rights); 382 splx(s); 383 if (dontroute) 384 so->so_options &= ~SO_DONTROUTE; 385 rights = 0; 386 rlen = 0; 387 top = 0; 388 first = 0; 389 if (error) 390 break; 391 } while (uio->uio_resid); 392 393 release: 394 sbunlock(&so->so_snd); 395 if (top) 396 m_freem(top); 397 if (error == EPIPE) 398 psignal(u.u_procp, SIGPIPE); 399 return (error); 400 } 401 402 /* 403 * Implement receive operations on a socket. 404 * We depend on the way that records are added to the sockbuf 405 * by sbappend*. In particular, each record (mbufs linked through m_next) 406 * must begin with an address if the protocol so specifies, 407 * followed by an optional mbuf containing access rights if supported 408 * by the protocol, and then zero or more mbufs of data. 409 * In order to avoid blocking network interrupts for the entire time here, 410 * we splx() while doing the actual copy to user space. 411 * Although the sockbuf is locked, new data may still be appended, 412 * and thus we must maintain consistency of the sockbuf during that time. 413 */ 414 soreceive(so, aname, uio, flags, rightsp) 415 register struct socket *so; 416 struct mbuf **aname; 417 register struct uio *uio; 418 int flags; 419 struct mbuf **rightsp; 420 { 421 register struct mbuf *m, *n; 422 register int len, error = 0, s, tomark; 423 struct protosw *pr = so->so_proto; 424 struct mbuf *nextrecord; 425 int moff; 426 427 if (rightsp) 428 *rightsp = 0; 429 if (aname) 430 *aname = 0; 431 if (flags & MSG_OOB) { 432 m = m_get(M_WAIT, MT_DATA); 433 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, 434 m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0); 435 if (error) 436 goto bad; 437 do { 438 len = uio->uio_resid; 439 if (len > m->m_len) 440 len = m->m_len; 441 error = 442 uiomove(mtod(m, caddr_t), (int)len, UIO_READ, uio); 443 m = m_free(m); 444 } while (uio->uio_resid && error == 0 && m); 445 bad: 446 if (m) 447 m_freem(m); 448 return (error); 449 } 450 451 restart: 452 sblock(&so->so_rcv); 453 s = splnet(); 454 455 #define rcverr(errno) { error = errno; splx(s); goto release; } 456 if (so->so_rcv.sb_cc == 0) { 457 if (so->so_error) { 458 error = so->so_error; 459 so->so_error = 0; 460 splx(s); 461 goto release; 462 } 463 if (so->so_state & SS_CANTRCVMORE) { 464 splx(s); 465 goto release; 466 } 467 if ((so->so_state & SS_ISCONNECTED) == 0 && 468 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 469 rcverr(ENOTCONN); 470 if (uio->uio_resid == 0) 471 goto release; 472 if (so->so_state & SS_NBIO) 473 rcverr(EWOULDBLOCK); 474 sbunlock(&so->so_rcv); 475 sbwait(&so->so_rcv); 476 splx(s); 477 goto restart; 478 } 479 u.u_ru.ru_msgrcv++; 480 m = so->so_rcv.sb_mb; 481 if (m == 0) 482 panic("receive 1"); 483 nextrecord = m->m_act; 484 if (pr->pr_flags & PR_ADDR) { 485 if (m->m_type != MT_SONAME) 486 panic("receive 1a"); 487 if (flags & MSG_PEEK) { 488 if (aname) 489 *aname = m_copy(m, 0, m->m_len); 490 m = m->m_next; 491 } else { 492 sbfree(&so->so_rcv, m); 493 if (aname) { 494 *aname = m; 495 m = m->m_next; 496 (*aname)->m_next = 0; 497 } else { 498 MFREE(m, n); 499 nextrecord = m->m_act; 500 m = n; 501 } 502 } 503 } 504 if (m && m->m_type == MT_RIGHTS) { 505 if ((pr->pr_flags & PR_RIGHTS) == 0) 506 panic("receive 2a"); 507 if (flags & MSG_PEEK) { 508 if (rightsp) 509 *rightsp = m_copy(m, 0, m->m_len); 510 m = m->m_next; 511 } else { 512 sbfree(&so->so_rcv, m); 513 if (rightsp) { 514 *rightsp = m; 515 n = m->m_next; 516 m->m_next = 0; 517 m = n; 518 } else { 519 MFREE(m, n); 520 m = n; 521 } 522 } 523 } 524 moff = 0; 525 tomark = so->so_oobmark; 526 while (m && uio->uio_resid > 0 && error == 0) { 527 if (m->m_type != MT_DATA && m->m_type != MT_HEADER) 528 panic("receive 3"); 529 len = uio->uio_resid; 530 so->so_state &= ~SS_RCVATMARK; 531 if (tomark && len > tomark) 532 len = tomark; 533 if (len > m->m_len - moff) 534 len = m->m_len - moff; 535 if ((flags & MSG_PEEK) == 0) { 536 so->so_rcv.sb_mb = m; 537 m->m_act = nextrecord; 538 } 539 splx(s); 540 error = 541 uiomove(mtod(m, caddr_t) + moff, (int)len, UIO_READ, uio); 542 s = splnet(); 543 if (len == m->m_len - moff) { 544 if (flags & MSG_PEEK) { 545 m = m->m_next; 546 moff = 0; 547 } else { 548 sbfree(&so->so_rcv, m); 549 nextrecord = m->m_act; 550 MFREE(m, n); 551 so->so_rcv.sb_mb = m = n; 552 } 553 } else { 554 if (flags & MSG_PEEK) 555 moff += len; 556 else { 557 m->m_off += len; 558 m->m_len -= len; 559 so->so_rcv.sb_cc -= len; 560 } 561 } 562 if ((flags & MSG_PEEK) == 0 && so->so_oobmark) { 563 so->so_oobmark -= len; 564 if (so->so_oobmark == 0) { 565 so->so_state |= SS_RCVATMARK; 566 break; 567 } 568 } 569 if (tomark) { 570 tomark -= len; 571 if (tomark == 0) 572 break; 573 } 574 } 575 if ((flags & MSG_PEEK) == 0) { 576 if (so->so_rcv.sb_mb == 0) 577 so->so_rcv.sb_mb = nextrecord; 578 else if (pr->pr_flags & PR_ATOMIC) 579 (void) sbdroprecord(&so->so_rcv); 580 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 581 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 582 (struct mbuf *)0, (struct mbuf *)0); 583 if (error == 0 && rightsp && *rightsp && 584 pr->pr_domain->dom_externalize) 585 error = (*pr->pr_domain->dom_externalize)(*rightsp); 586 } 587 release: 588 sbunlock(&so->so_rcv); 589 splx(s); 590 return (error); 591 } 592 593 soshutdown(so, how) 594 register struct socket *so; 595 register int how; 596 { 597 register struct protosw *pr = so->so_proto; 598 599 how++; 600 if (how & FREAD) 601 sorflush(so); 602 if (how & FWRITE) 603 return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN, 604 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 605 return (0); 606 } 607 608 sorflush(so) 609 register struct socket *so; 610 { 611 register struct sockbuf *sb = &so->so_rcv; 612 register struct protosw *pr = so->so_proto; 613 register int s; 614 struct sockbuf asb; 615 616 sblock(sb); 617 s = splimp(); 618 socantrcvmore(so); 619 sbunlock(sb); 620 asb = *sb; 621 bzero((caddr_t)sb, sizeof (*sb)); 622 splx(s); 623 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 624 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 625 sbrelease(&asb); 626 } 627 628 sosetopt(so, level, optname, m0) 629 register struct socket *so; 630 int level, optname; 631 struct mbuf *m0; 632 { 633 int error = 0; 634 register struct mbuf *m = m0; 635 636 if (level != SOL_SOCKET) { 637 if (so->so_proto && so->so_proto->pr_ctloutput) 638 return ((*so->so_proto->pr_ctloutput) 639 (PRCO_SETOPT, so, level, optname, &m0)); 640 error = ENOPROTOOPT; 641 } else { 642 switch (optname) { 643 644 case SO_LINGER: 645 if (m == NULL || m->m_len != sizeof (struct linger)) { 646 error = EINVAL; 647 goto bad; 648 } 649 so->so_linger = mtod(m, struct linger *)->l_linger; 650 /* fall thru... */ 651 652 case SO_DEBUG: 653 case SO_KEEPALIVE: 654 case SO_DONTROUTE: 655 case SO_USELOOPBACK: 656 case SO_BROADCAST: 657 case SO_REUSEADDR: 658 if (m == NULL || m->m_len < sizeof (int)) { 659 error = EINVAL; 660 goto bad; 661 } 662 if (*mtod(m, int *)) 663 so->so_options |= optname; 664 else 665 so->so_options &= ~optname; 666 break; 667 668 case SO_SNDBUF: 669 case SO_RCVBUF: 670 case SO_SNDLOWAT: 671 case SO_RCVLOWAT: 672 case SO_SNDTIMEO: 673 case SO_RCVTIMEO: 674 if (m == NULL || m->m_len < sizeof (int)) { 675 error = EINVAL; 676 goto bad; 677 } 678 switch (optname) { 679 680 case SO_SNDBUF: 681 case SO_RCVBUF: 682 if (sbreserve(optname == SO_SNDBUF ? &so->so_snd : 683 &so->so_rcv, *mtod(m, int *)) == 0) { 684 error = ENOBUFS; 685 goto bad; 686 } 687 break; 688 689 case SO_SNDLOWAT: 690 so->so_snd.sb_lowat = *mtod(m, int *); 691 break; 692 case SO_RCVLOWAT: 693 so->so_rcv.sb_lowat = *mtod(m, int *); 694 break; 695 case SO_SNDTIMEO: 696 so->so_snd.sb_timeo = *mtod(m, int *); 697 break; 698 case SO_RCVTIMEO: 699 so->so_rcv.sb_timeo = *mtod(m, int *); 700 break; 701 } 702 break; 703 704 default: 705 error = ENOPROTOOPT; 706 break; 707 } 708 } 709 bad: 710 if (m) 711 (void) m_free(m); 712 return (error); 713 } 714 715 sogetopt(so, level, optname, mp) 716 register struct socket *so; 717 int level, optname; 718 struct mbuf **mp; 719 { 720 register struct mbuf *m; 721 722 if (level != SOL_SOCKET) { 723 if (so->so_proto && so->so_proto->pr_ctloutput) { 724 return ((*so->so_proto->pr_ctloutput) 725 (PRCO_GETOPT, so, level, optname, mp)); 726 } else 727 return (ENOPROTOOPT); 728 } else { 729 m = m_get(M_WAIT, MT_SOOPTS); 730 m->m_len = sizeof (int); 731 732 switch (optname) { 733 734 case SO_LINGER: 735 m->m_len = sizeof (struct linger); 736 mtod(m, struct linger *)->l_onoff = 737 so->so_options & SO_LINGER; 738 mtod(m, struct linger *)->l_linger = so->so_linger; 739 break; 740 741 case SO_USELOOPBACK: 742 case SO_DONTROUTE: 743 case SO_DEBUG: 744 case SO_KEEPALIVE: 745 case SO_REUSEADDR: 746 case SO_BROADCAST: 747 *mtod(m, int *) = so->so_options & optname; 748 break; 749 750 case SO_TYPE: 751 *mtod(m, int *) = so->so_type; 752 break; 753 754 case SO_ERROR: 755 *mtod(m, int *) = so->so_error; 756 so->so_error = 0; 757 break; 758 759 case SO_SNDBUF: 760 *mtod(m, int *) = so->so_snd.sb_hiwat; 761 break; 762 763 case SO_RCVBUF: 764 *mtod(m, int *) = so->so_rcv.sb_hiwat; 765 break; 766 767 case SO_SNDLOWAT: 768 *mtod(m, int *) = so->so_snd.sb_lowat; 769 break; 770 771 case SO_RCVLOWAT: 772 *mtod(m, int *) = so->so_rcv.sb_lowat; 773 break; 774 775 case SO_SNDTIMEO: 776 *mtod(m, int *) = so->so_snd.sb_timeo; 777 break; 778 779 case SO_RCVTIMEO: 780 *mtod(m, int *) = so->so_rcv.sb_timeo; 781 break; 782 783 default: 784 m_free(m); 785 return (ENOPROTOOPT); 786 } 787 *mp = m; 788 return (0); 789 } 790 } 791 792 sohasoutofband(so) 793 register struct socket *so; 794 { 795 struct proc *p; 796 797 if (so->so_pgrp < 0) 798 gsignal(-so->so_pgrp, SIGURG); 799 else if (so->so_pgrp > 0 && (p = pfind(so->so_pgrp)) != 0) 800 psignal(p, SIGURG); 801 if (so->so_rcv.sb_sel) { 802 selwakeup(so->so_rcv.sb_sel, so->so_rcv.sb_flags & SB_COLL); 803 so->so_rcv.sb_sel = 0; 804 so->so_rcv.sb_flags &= ~SB_COLL; 805 } 806 } 807