1 /* 2 * Copyright (c) 1982, 1986, 1988 Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms are permitted 6 * provided that the above copyright notice and this paragraph are 7 * duplicated in all such forms and that any documentation, 8 * advertising materials, and other materials related to such 9 * distribution and use acknowledge that the software was developed 10 * by the University of California, Berkeley. The name of the 11 * University may not be used to endorse or promote products derived 12 * from this software without specific prior written permission. 13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 14 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 15 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 16 * 17 * @(#)uipc_socket.c 7.16 (Berkeley) 03/25/90 18 */ 19 20 #include "param.h" 21 #include "user.h" 22 #include "proc.h" 23 #include "file.h" 24 #include "malloc.h" 25 #include "mbuf.h" 26 #include "domain.h" 27 #include "protosw.h" 28 #include "socket.h" 29 #include "socketvar.h" 30 #include "tsleep.h" 31 32 /* 33 * Socket operation routines. 34 * These routines are called by the routines in 35 * sys_socket.c or from a system process, and 36 * implement the semantics of socket operations by 37 * switching out to the protocol specific routines. 38 * 39 * TODO: 40 * test socketpair 41 * clean up async 42 * out-of-band is a kludge 43 */ 44 /*ARGSUSED*/ 45 socreate(dom, aso, type, proto) 46 struct socket **aso; 47 register int type; 48 int proto; 49 { 50 register struct protosw *prp; 51 register struct socket *so; 52 register int error; 53 54 if (proto) 55 prp = pffindproto(dom, proto, type); 56 else 57 prp = pffindtype(dom, type); 58 if (prp == 0) 59 return (EPROTONOSUPPORT); 60 if (prp->pr_type != type) 61 return (EPROTOTYPE); 62 MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_WAIT); 63 bzero((caddr_t)so, sizeof(*so)); 64 so->so_type = type; 65 if (u.u_uid == 0) 66 so->so_state = SS_PRIV; 67 so->so_proto = prp; 68 error = 69 (*prp->pr_usrreq)(so, PRU_ATTACH, 70 (struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0); 71 if (error) { 72 so->so_state |= SS_NOFDREF; 73 sofree(so); 74 return (error); 75 } 76 *aso = so; 77 return (0); 78 } 79 80 sobind(so, nam) 81 struct socket *so; 82 struct mbuf *nam; 83 { 84 int s = splnet(); 85 int error; 86 87 error = 88 (*so->so_proto->pr_usrreq)(so, PRU_BIND, 89 (struct mbuf *)0, nam, (struct mbuf *)0); 90 splx(s); 91 return (error); 92 } 93 94 solisten(so, backlog) 95 register struct socket *so; 96 int backlog; 97 { 98 int s = splnet(), error; 99 100 error = 101 (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, 102 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 103 if (error) { 104 splx(s); 105 return (error); 106 } 107 if (so->so_q == 0) 108 so->so_options |= SO_ACCEPTCONN; 109 if (backlog < 0) 110 backlog = 0; 111 so->so_qlimit = min(backlog, SOMAXCONN); 112 splx(s); 113 return (0); 114 } 115 116 sofree(so) 117 register struct socket *so; 118 { 119 120 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) 121 return; 122 if (so->so_head) { 123 if (!soqremque(so, 0) && !soqremque(so, 1)) 124 panic("sofree dq"); 125 so->so_head = 0; 126 } 127 sbrelease(&so->so_snd); 128 sorflush(so); 129 FREE(so, M_SOCKET); 130 } 131 132 /* 133 * Close a socket on last file table reference removal. 134 * Initiate disconnect if connected. 135 * Free socket when disconnect complete. 136 */ 137 soclose(so) 138 register struct socket *so; 139 { 140 int s = splnet(); /* conservative */ 141 int error = 0; 142 143 if (so->so_options & SO_ACCEPTCONN) { 144 while (so->so_q0) 145 (void) soabort(so->so_q0); 146 while (so->so_q) 147 (void) soabort(so->so_q); 148 } 149 if (so->so_pcb == 0) 150 goto discard; 151 if (so->so_state & SS_ISCONNECTED) { 152 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 153 error = sodisconnect(so); 154 if (error) 155 goto drop; 156 } 157 if (so->so_options & SO_LINGER) { 158 if ((so->so_state & SS_ISDISCONNECTING) && 159 (so->so_state & SS_NBIO)) 160 goto drop; 161 while (so->so_state & SS_ISCONNECTED) 162 tsleep((caddr_t)&so->so_timeo, PZERO+1, 163 SLP_SO_LINGER, 0); 164 } 165 } 166 drop: 167 if (so->so_pcb) { 168 int error2 = 169 (*so->so_proto->pr_usrreq)(so, PRU_DETACH, 170 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 171 if (error == 0) 172 error = error2; 173 } 174 discard: 175 if (so->so_state & SS_NOFDREF) 176 panic("soclose: NOFDREF"); 177 so->so_state |= SS_NOFDREF; 178 sofree(so); 179 splx(s); 180 return (error); 181 } 182 183 /* 184 * Must be called at splnet... 185 */ 186 soabort(so) 187 struct socket *so; 188 { 189 190 return ( 191 (*so->so_proto->pr_usrreq)(so, PRU_ABORT, 192 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 193 } 194 195 soaccept(so, nam) 196 register struct socket *so; 197 struct mbuf *nam; 198 { 199 int s = splnet(); 200 int error; 201 202 if ((so->so_state & SS_NOFDREF) == 0) 203 panic("soaccept: !NOFDREF"); 204 so->so_state &= ~SS_NOFDREF; 205 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, 206 (struct mbuf *)0, nam, (struct mbuf *)0); 207 splx(s); 208 return (error); 209 } 210 211 soconnect(so, nam) 212 register struct socket *so; 213 struct mbuf *nam; 214 { 215 int s; 216 int error; 217 218 if (so->so_options & SO_ACCEPTCONN) 219 return (EOPNOTSUPP); 220 s = splnet(); 221 /* 222 * If protocol is connection-based, can only connect once. 223 * Otherwise, if connected, try to disconnect first. 224 * This allows user to disconnect by connecting to, e.g., 225 * a null address. 226 */ 227 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 228 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 229 (error = sodisconnect(so)))) 230 error = EISCONN; 231 else 232 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, 233 (struct mbuf *)0, nam, (struct mbuf *)0); 234 splx(s); 235 return (error); 236 } 237 238 soconnect2(so1, so2) 239 register struct socket *so1; 240 struct socket *so2; 241 { 242 int s = splnet(); 243 int error; 244 245 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, 246 (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0); 247 splx(s); 248 return (error); 249 } 250 251 sodisconnect(so) 252 register struct socket *so; 253 { 254 int s = splnet(); 255 int error; 256 257 if ((so->so_state & SS_ISCONNECTED) == 0) { 258 error = ENOTCONN; 259 goto bad; 260 } 261 if (so->so_state & SS_ISDISCONNECTING) { 262 error = EALREADY; 263 goto bad; 264 } 265 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, 266 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 267 bad: 268 splx(s); 269 return (error); 270 } 271 272 /* 273 * Send on a socket. 274 * If send must go all at once and message is larger than 275 * send buffering, then hard error. 276 * Lock against other senders. 277 * If must go all at once and not enough room now, then 278 * inform user that this would block and do nothing. 279 * Otherwise, if nonblocking, send as much as possible. 280 */ 281 sosend(so, nam, uio, flags, control) 282 register struct socket *so; 283 struct mbuf *nam; 284 register struct uio *uio; 285 int flags; 286 struct mbuf *control; 287 { 288 struct mbuf *top = 0, **mp; 289 register struct mbuf *m; 290 register int space, len; 291 int rlen = 0, error = 0, s, dontroute, first = 1, mlen; 292 int atomic = sosendallatonce(so); 293 294 if (atomic && uio->uio_resid > so->so_snd.sb_hiwat) 295 return (EMSGSIZE); 296 dontroute = 297 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 298 (so->so_proto->pr_flags & PR_ATOMIC); 299 u.u_ru.ru_msgsnd++; 300 if (control) 301 rlen = control->m_len; 302 #define snderr(errno) { error = errno; splx(s); goto release; } 303 304 restart: 305 sblock(&so->so_snd); 306 do { 307 s = splnet(); 308 if (so->so_state & SS_CANTSENDMORE) 309 snderr(EPIPE); 310 if (so->so_error) 311 snderr(so->so_error); 312 if ((so->so_state & SS_ISCONNECTED) == 0) { 313 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 314 if ((so->so_state & SS_ISCONFIRMING) == 0) 315 snderr(ENOTCONN); 316 } else if (nam == 0) 317 snderr(EDESTADDRREQ); 318 } 319 if (flags & MSG_OOB) 320 space = 1024; 321 else { 322 space = sbspace(&so->so_snd); 323 if (space <= rlen || 324 (atomic && space < uio->uio_resid + rlen) || 325 (uio->uio_resid >= MCLBYTES && space < MCLBYTES && 326 so->so_snd.sb_cc >= MCLBYTES && 327 (so->so_state & SS_NBIO) == 0)) { 328 if (so->so_state & SS_NBIO) { 329 if (first) 330 error = EWOULDBLOCK; 331 splx(s); 332 goto release; 333 } 334 sbunlock(&so->so_snd); 335 sbwait(&so->so_snd); 336 splx(s); 337 goto restart; 338 } 339 } 340 splx(s); 341 mp = ⊤ 342 space -= rlen; 343 do { 344 do { 345 if (top == 0) { 346 MGETHDR(m, M_WAIT, MT_DATA); 347 mlen = MHLEN; 348 m->m_pkthdr.len = 0; 349 m->m_pkthdr.rcvif = (struct ifnet *)0; 350 } else { 351 MGET(m, M_WAIT, MT_DATA); 352 mlen = MLEN; 353 } 354 if (uio->uio_resid >= MINCLSIZE && space >= MCLBYTES) { 355 MCLGET(m, M_WAIT); 356 if ((m->m_flags & M_EXT) == 0) 357 goto nopages; 358 mlen = MCLBYTES; 359 #ifdef MAPPED_MBUFS 360 len = min(MCLBYTES, uio->uio_resid); 361 if (len < mlen - max_hdr) 362 m->m_data += max_hdr; 363 #else 364 len = min(MCLBYTES - max_hdr, uio->uio_resid); 365 m->m_data += max_hdr; 366 #endif 367 space -= MCLBYTES; 368 } else { 369 nopages: 370 len = min(min(mlen, uio->uio_resid), space); 371 space -= len; 372 /* 373 * For datagram protocols, leave room 374 * for protocol headers in first mbuf. 375 */ 376 if (atomic && top == 0 && len < mlen) 377 MH_ALIGN(m, len); 378 } 379 error = uiomove(mtod(m, caddr_t), len, uio); 380 m->m_len = len; 381 *mp = m; 382 top->m_pkthdr.len += len; 383 if (error) 384 goto release; 385 mp = &m->m_next; 386 if (uio->uio_resid <= 0) { 387 if ((flags & MSG_EOR) && top) 388 top->m_flags |= M_EOR; 389 break; 390 } 391 } while (space > 0 && atomic); 392 if (dontroute) 393 so->so_options |= SO_DONTROUTE; 394 s = splnet(); /* XXX */ 395 error = (*so->so_proto->pr_usrreq)(so, 396 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 397 top, (caddr_t)nam, control); 398 splx(s); 399 if (dontroute) 400 so->so_options &= ~SO_DONTROUTE; 401 rlen = 0; 402 top = 0; 403 mp = ⊤ 404 first = 0; 405 if (error) 406 goto release; 407 } while (uio->uio_resid && space > 0); 408 } while (uio->uio_resid); 409 410 release: 411 sbunlock(&so->so_snd); 412 if (top) 413 m_freem(top); 414 if (error == EPIPE) 415 psignal(u.u_procp, SIGPIPE); 416 return (error); 417 } 418 419 /* 420 * Implement receive operations on a socket. 421 * We depend on the way that records are added to the sockbuf 422 * by sbappend*. In particular, each record (mbufs linked through m_next) 423 * must begin with an address if the protocol so specifies, 424 * followed by an optional mbuf containing access rights if supported 425 * by the protocol, and then zero or more mbufs of data. 426 * In order to avoid blocking network interrupts for the entire time here, 427 * we splx() while doing the actual copy to user space. 428 * Although the sockbuf is locked, new data may still be appended, 429 * and thus we must maintain consistency of the sockbuf during that time. 430 */ 431 soreceive(so, aname, uio, flagsp, rightsp, controlp) 432 register struct socket *so; 433 struct mbuf **aname; 434 register struct uio *uio; 435 int *flagsp; 436 struct mbuf **rightsp, **controlp; 437 { 438 register struct mbuf *m; 439 register int flags, len, error = 0, s, offset; 440 struct protosw *pr = so->so_proto; 441 struct mbuf *nextrecord, *m_with_eor; 442 int moff; 443 444 if (rightsp) 445 *rightsp = 0; 446 if (aname) 447 *aname = 0; 448 if (controlp) 449 *controlp = 0; 450 if (flagsp) 451 flags = *flagsp &~ MSG_EOR; 452 else 453 flags = 0; 454 if (flags & MSG_OOB) { 455 m = m_get(M_WAIT, MT_DATA); 456 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, 457 m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0); 458 if (error) 459 goto bad; 460 do { 461 len = uio->uio_resid; 462 if (len > m->m_len) 463 len = m->m_len; 464 error = uiomove(mtod(m, caddr_t), (int)len, uio); 465 m = m_free(m); 466 } while (uio->uio_resid && error == 0 && m); 467 bad: 468 if (m) 469 m_freem(m); 470 return (error); 471 } 472 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) 473 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 474 (struct mbuf *)0, (struct mbuf *)0); 475 476 restart: 477 sblock(&so->so_rcv); 478 s = splnet(); 479 480 m = so->so_rcv.sb_mb; 481 if (m == 0) { 482 if (so->so_rcv.sb_cc) 483 panic("receive 1"); 484 if (so->so_error) { 485 error = so->so_error; 486 so->so_error = 0; 487 goto release; 488 } 489 if (so->so_state & SS_CANTRCVMORE) 490 goto release; 491 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 492 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 493 error = ENOTCONN; 494 goto release; 495 } 496 if (uio->uio_resid == 0) 497 goto release; 498 if (so->so_state & SS_NBIO) { 499 error = EWOULDBLOCK; 500 goto release; 501 } 502 sbunlock(&so->so_rcv); 503 sbwait(&so->so_rcv); 504 splx(s); 505 goto restart; 506 } 507 u.u_ru.ru_msgrcv++; 508 if (m->m_type == 0) 509 panic("receive 3a"); 510 nextrecord = m->m_nextpkt; 511 if (pr->pr_flags & PR_ADDR) { 512 if (m->m_type != MT_SONAME) 513 panic("receive 1a"); 514 if (flags & MSG_PEEK) { 515 if (aname) 516 *aname = m_copy(m, 0, m->m_len); 517 m = m->m_next; 518 } else { 519 sbfree(&so->so_rcv, m); 520 if (aname) { 521 *aname = m; 522 so->so_rcv.sb_mb = m->m_next; 523 m->m_next = 0; 524 m = so->so_rcv.sb_mb; 525 } else { 526 MFREE(m, so->so_rcv.sb_mb); 527 m = so->so_rcv.sb_mb; 528 } 529 } 530 } 531 if (m && m->m_type == MT_RIGHTS) { 532 if ((pr->pr_flags & PR_RIGHTS) == 0) 533 panic("receive 2"); 534 if (flags & MSG_PEEK) { 535 if (rightsp) 536 *rightsp = m_copy(m, 0, m->m_len); 537 m = m->m_next; 538 } else { 539 sbfree(&so->so_rcv, m); 540 if (rightsp) { 541 *rightsp = m; 542 so->so_rcv.sb_mb = m->m_next; 543 m->m_next = 0; 544 m = so->so_rcv.sb_mb; 545 } else { 546 MFREE(m, so->so_rcv.sb_mb); 547 m = so->so_rcv.sb_mb; 548 } 549 } 550 } 551 if (m && m->m_type == MT_CONTROL) { 552 if (flags & MSG_PEEK) { 553 if (controlp) 554 *controlp = m_copy(m, 0, m->m_len); 555 m = m->m_next; 556 } else { 557 sbfree(&so->so_rcv, m); 558 if (controlp) { 559 *controlp = m; 560 so->so_rcv.sb_mb = m->m_next; 561 m->m_next = 0; 562 m = so->so_rcv.sb_mb; 563 } else { 564 MFREE(m, so->so_rcv.sb_mb); 565 m = so->so_rcv.sb_mb; 566 } 567 } 568 } 569 if (m) 570 m->m_nextpkt = nextrecord; 571 moff = 0; 572 offset = 0; 573 m_with_eor = 0; 574 while (m && uio->uio_resid > 0 && error == 0) { 575 if (m->m_type == MT_OOBDATA) 576 flags |= MSG_OOB; 577 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) 578 panic("receive 3"); 579 if (m->m_flags & M_EOR) 580 m_with_eor = m; 581 len = uio->uio_resid; 582 so->so_state &= ~SS_RCVATMARK; 583 if (so->so_oobmark && len > so->so_oobmark - offset) 584 len = so->so_oobmark - offset; 585 if (len > m->m_len - moff) 586 len = m->m_len - moff; 587 splx(s); 588 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); 589 s = splnet(); 590 if (len == m->m_len - moff) { 591 if (flags & MSG_PEEK) { 592 m = m->m_next; 593 moff = 0; 594 } else { 595 nextrecord = m->m_nextpkt; 596 sbfree(&so->so_rcv, m); 597 MFREE(m, so->so_rcv.sb_mb); 598 m = so->so_rcv.sb_mb; 599 if (m) 600 m->m_nextpkt = nextrecord; 601 } 602 } else { 603 if (flags & MSG_PEEK) 604 moff += len; 605 else { 606 m->m_data += len; 607 m->m_len -= len; 608 so->so_rcv.sb_cc -= len; 609 } 610 } 611 if (so->so_oobmark) { 612 if ((flags & MSG_PEEK) == 0) { 613 so->so_oobmark -= len; 614 if (so->so_oobmark == 0) { 615 so->so_state |= SS_RCVATMARK; 616 break; 617 } 618 } else 619 offset += len; 620 } 621 if (m_with_eor) 622 break; 623 } 624 if (m_with_eor) { 625 if (m != m_with_eor) 626 flags |= MSG_EOR; 627 /* else data not consumed from mbuf */ 628 } 629 if ((flags & MSG_PEEK) == 0) { 630 if (m == 0) 631 so->so_rcv.sb_mb = nextrecord; 632 else if (pr->pr_flags & PR_ATOMIC) { 633 flags |= MSG_TRUNC; 634 (void) sbdroprecord(&so->so_rcv); 635 } 636 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 637 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 638 (struct mbuf *)flags, (struct mbuf *)0, 639 (struct mbuf *)0); 640 if (error == 0 && rightsp && *rightsp && 641 pr->pr_domain->dom_externalize) 642 error = (*pr->pr_domain->dom_externalize)(*rightsp); 643 } 644 if (flagsp) 645 *flagsp |= flags; 646 release: 647 sbunlock(&so->so_rcv); 648 splx(s); 649 return (error); 650 } 651 652 soshutdown(so, how) 653 register struct socket *so; 654 register int how; 655 { 656 register struct protosw *pr = so->so_proto; 657 658 how++; 659 if (how & FREAD) 660 sorflush(so); 661 if (how & FWRITE) 662 return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN, 663 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 664 return (0); 665 } 666 667 sorflush(so) 668 register struct socket *so; 669 { 670 register struct sockbuf *sb = &so->so_rcv; 671 register struct protosw *pr = so->so_proto; 672 register int s; 673 struct sockbuf asb; 674 675 sblock(sb); 676 s = splimp(); 677 socantrcvmore(so); 678 sbunlock(sb); 679 asb = *sb; 680 bzero((caddr_t)sb, sizeof (*sb)); 681 splx(s); 682 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 683 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 684 sbrelease(&asb); 685 } 686 687 sosetopt(so, level, optname, m0) 688 register struct socket *so; 689 int level, optname; 690 struct mbuf *m0; 691 { 692 int error = 0; 693 register struct mbuf *m = m0; 694 695 if (level != SOL_SOCKET) { 696 if (so->so_proto && so->so_proto->pr_ctloutput) 697 return ((*so->so_proto->pr_ctloutput) 698 (PRCO_SETOPT, so, level, optname, &m0)); 699 error = ENOPROTOOPT; 700 } else { 701 switch (optname) { 702 703 case SO_LINGER: 704 if (m == NULL || m->m_len != sizeof (struct linger)) { 705 error = EINVAL; 706 goto bad; 707 } 708 so->so_linger = mtod(m, struct linger *)->l_linger; 709 /* fall thru... */ 710 711 case SO_DEBUG: 712 case SO_KEEPALIVE: 713 case SO_DONTROUTE: 714 case SO_USELOOPBACK: 715 case SO_BROADCAST: 716 case SO_REUSEADDR: 717 case SO_OOBINLINE: 718 if (m == NULL || m->m_len < sizeof (int)) { 719 error = EINVAL; 720 goto bad; 721 } 722 if (*mtod(m, int *)) 723 so->so_options |= optname; 724 else 725 so->so_options &= ~optname; 726 break; 727 728 case SO_SNDBUF: 729 case SO_RCVBUF: 730 case SO_SNDLOWAT: 731 case SO_RCVLOWAT: 732 case SO_SNDTIMEO: 733 case SO_RCVTIMEO: 734 if (m == NULL || m->m_len < sizeof (int)) { 735 error = EINVAL; 736 goto bad; 737 } 738 switch (optname) { 739 740 case SO_SNDBUF: 741 case SO_RCVBUF: 742 if (sbreserve(optname == SO_SNDBUF ? 743 &so->so_snd : &so->so_rcv, 744 (u_long) *mtod(m, int *)) == 0) { 745 error = ENOBUFS; 746 goto bad; 747 } 748 break; 749 750 case SO_SNDLOWAT: 751 so->so_snd.sb_lowat = *mtod(m, int *); 752 break; 753 case SO_RCVLOWAT: 754 so->so_rcv.sb_lowat = *mtod(m, int *); 755 break; 756 case SO_SNDTIMEO: 757 so->so_snd.sb_timeo = *mtod(m, int *); 758 break; 759 case SO_RCVTIMEO: 760 so->so_rcv.sb_timeo = *mtod(m, int *); 761 break; 762 } 763 break; 764 765 default: 766 error = ENOPROTOOPT; 767 break; 768 } 769 } 770 bad: 771 if (m) 772 (void) m_free(m); 773 return (error); 774 } 775 776 sogetopt(so, level, optname, mp) 777 register struct socket *so; 778 int level, optname; 779 struct mbuf **mp; 780 { 781 register struct mbuf *m; 782 783 if (level != SOL_SOCKET) { 784 if (so->so_proto && so->so_proto->pr_ctloutput) { 785 return ((*so->so_proto->pr_ctloutput) 786 (PRCO_GETOPT, so, level, optname, mp)); 787 } else 788 return (ENOPROTOOPT); 789 } else { 790 m = m_get(M_WAIT, MT_SOOPTS); 791 m->m_len = sizeof (int); 792 793 switch (optname) { 794 795 case SO_LINGER: 796 m->m_len = sizeof (struct linger); 797 mtod(m, struct linger *)->l_onoff = 798 so->so_options & SO_LINGER; 799 mtod(m, struct linger *)->l_linger = so->so_linger; 800 break; 801 802 case SO_USELOOPBACK: 803 case SO_DONTROUTE: 804 case SO_DEBUG: 805 case SO_KEEPALIVE: 806 case SO_REUSEADDR: 807 case SO_BROADCAST: 808 case SO_OOBINLINE: 809 *mtod(m, int *) = so->so_options & optname; 810 break; 811 812 case SO_TYPE: 813 *mtod(m, int *) = so->so_type; 814 break; 815 816 case SO_ERROR: 817 *mtod(m, int *) = so->so_error; 818 so->so_error = 0; 819 break; 820 821 case SO_SNDBUF: 822 *mtod(m, int *) = so->so_snd.sb_hiwat; 823 break; 824 825 case SO_RCVBUF: 826 *mtod(m, int *) = so->so_rcv.sb_hiwat; 827 break; 828 829 case SO_SNDLOWAT: 830 *mtod(m, int *) = so->so_snd.sb_lowat; 831 break; 832 833 case SO_RCVLOWAT: 834 *mtod(m, int *) = so->so_rcv.sb_lowat; 835 break; 836 837 case SO_SNDTIMEO: 838 *mtod(m, int *) = so->so_snd.sb_timeo; 839 break; 840 841 case SO_RCVTIMEO: 842 *mtod(m, int *) = so->so_rcv.sb_timeo; 843 break; 844 845 default: 846 (void)m_free(m); 847 return (ENOPROTOOPT); 848 } 849 *mp = m; 850 return (0); 851 } 852 } 853 854 sohasoutofband(so) 855 register struct socket *so; 856 { 857 struct proc *p; 858 859 if (so->so_pgid < 0) 860 gsignal(-so->so_pgid, SIGURG); 861 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) 862 psignal(p, SIGURG); 863 if (so->so_rcv.sb_sel) { 864 selwakeup(so->so_rcv.sb_sel, so->so_rcv.sb_flags & SB_COLL); 865 so->so_rcv.sb_sel = 0; 866 so->so_rcv.sb_flags &= ~SB_COLL; 867 } 868 } 869