1 /* 2 * Copyright (c) 1982, 1986, 1988 Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms are permitted 6 * provided that the above copyright notice and this paragraph are 7 * duplicated in all such forms and that any documentation, 8 * advertising materials, and other materials related to such 9 * distribution and use acknowledge that the software was developed 10 * by the University of California, Berkeley. The name of the 11 * University may not be used to endorse or promote products derived 12 * from this software without specific prior written permission. 13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 14 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 15 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 16 * 17 * @(#)uipc_socket.c 7.13 (Berkeley) 04/22/89 18 */ 19 20 #include "param.h" 21 #include "dir.h" 22 #include "user.h" 23 #include "proc.h" 24 #include "file.h" 25 #include "malloc.h" 26 #include "mbuf.h" 27 #include "domain.h" 28 #include "protosw.h" 29 #include "socket.h" 30 #include "socketvar.h" 31 32 /* 33 * Socket operation routines. 34 * These routines are called by the routines in 35 * sys_socket.c or from a system process, and 36 * implement the semantics of socket operations by 37 * switching out to the protocol specific routines. 38 * 39 * TODO: 40 * test socketpair 41 * clean up async 42 * out-of-band is a kludge 43 */ 44 /*ARGSUSED*/ 45 socreate(dom, aso, type, proto) 46 struct socket **aso; 47 register int type; 48 int proto; 49 { 50 register struct protosw *prp; 51 register struct socket *so; 52 register int error; 53 54 if (proto) 55 prp = pffindproto(dom, proto, type); 56 else 57 prp = pffindtype(dom, type); 58 if (prp == 0) 59 return (EPROTONOSUPPORT); 60 if (prp->pr_type != type) 61 return (EPROTOTYPE); 62 MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_WAIT); 63 bzero((caddr_t)so, sizeof(*so)); 64 so->so_type = type; 65 if (u.u_uid == 0) 66 so->so_state = SS_PRIV; 67 so->so_proto = prp; 68 error = 69 (*prp->pr_usrreq)(so, PRU_ATTACH, 70 (struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0); 71 if (error) { 72 so->so_state |= SS_NOFDREF; 73 sofree(so); 74 return (error); 75 } 76 *aso = so; 77 return (0); 78 } 79 80 sobind(so, nam) 81 struct socket *so; 82 struct mbuf *nam; 83 { 84 int s = splnet(); 85 int error; 86 87 error = 88 (*so->so_proto->pr_usrreq)(so, PRU_BIND, 89 (struct mbuf *)0, nam, (struct mbuf *)0); 90 splx(s); 91 return (error); 92 } 93 94 solisten(so, backlog) 95 register struct socket *so; 96 int backlog; 97 { 98 int s = splnet(), error; 99 100 error = 101 (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, 102 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 103 if (error) { 104 splx(s); 105 return (error); 106 } 107 if (so->so_q == 0) { 108 so->so_q = so; 109 so->so_q0 = so; 110 so->so_options |= SO_ACCEPTCONN; 111 } 112 if (backlog < 0) 113 backlog = 0; 114 so->so_qlimit = min(backlog, SOMAXCONN); 115 splx(s); 116 return (0); 117 } 118 119 sofree(so) 120 register struct socket *so; 121 { 122 123 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) 124 return; 125 if (so->so_head) { 126 if (!soqremque(so, 0) && !soqremque(so, 1)) 127 panic("sofree dq"); 128 so->so_head = 0; 129 } 130 sbrelease(&so->so_snd); 131 sorflush(so); 132 FREE(so, M_SOCKET); 133 } 134 135 /* 136 * Close a socket on last file table reference removal. 137 * Initiate disconnect if connected. 138 * Free socket when disconnect complete. 139 */ 140 soclose(so) 141 register struct socket *so; 142 { 143 int s = splnet(); /* conservative */ 144 int error = 0; 145 146 if (so->so_options & SO_ACCEPTCONN) { 147 while (so->so_q0 != so) 148 (void) soabort(so->so_q0); 149 while (so->so_q != so) 150 (void) soabort(so->so_q); 151 } 152 if (so->so_pcb == 0) 153 goto discard; 154 if (so->so_state & SS_ISCONNECTED) { 155 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 156 error = sodisconnect(so); 157 if (error) 158 goto drop; 159 } 160 if (so->so_options & SO_LINGER) { 161 if ((so->so_state & SS_ISDISCONNECTING) && 162 (so->so_state & SS_NBIO)) 163 goto drop; 164 while (so->so_state & SS_ISCONNECTED) 165 sleep((caddr_t)&so->so_timeo, PZERO+1); 166 } 167 } 168 drop: 169 if (so->so_pcb) { 170 int error2 = 171 (*so->so_proto->pr_usrreq)(so, PRU_DETACH, 172 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 173 if (error == 0) 174 error = error2; 175 } 176 discard: 177 if (so->so_state & SS_NOFDREF) 178 panic("soclose: NOFDREF"); 179 so->so_state |= SS_NOFDREF; 180 sofree(so); 181 splx(s); 182 return (error); 183 } 184 185 /* 186 * Must be called at splnet... 187 */ 188 soabort(so) 189 struct socket *so; 190 { 191 192 return ( 193 (*so->so_proto->pr_usrreq)(so, PRU_ABORT, 194 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 195 } 196 197 soaccept(so, nam) 198 register struct socket *so; 199 struct mbuf *nam; 200 { 201 int s = splnet(); 202 int error; 203 204 if ((so->so_state & SS_NOFDREF) == 0) 205 panic("soaccept: !NOFDREF"); 206 so->so_state &= ~SS_NOFDREF; 207 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, 208 (struct mbuf *)0, nam, (struct mbuf *)0); 209 splx(s); 210 return (error); 211 } 212 213 soconnect(so, nam) 214 register struct socket *so; 215 struct mbuf *nam; 216 { 217 int s; 218 int error; 219 220 if (so->so_options & SO_ACCEPTCONN) 221 return (EOPNOTSUPP); 222 s = splnet(); 223 /* 224 * If protocol is connection-based, can only connect once. 225 * Otherwise, if connected, try to disconnect first. 226 * This allows user to disconnect by connecting to, e.g., 227 * a null address. 228 */ 229 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 230 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 231 (error = sodisconnect(so)))) 232 error = EISCONN; 233 else 234 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, 235 (struct mbuf *)0, nam, (struct mbuf *)0); 236 splx(s); 237 return (error); 238 } 239 240 soconnect2(so1, so2) 241 register struct socket *so1; 242 struct socket *so2; 243 { 244 int s = splnet(); 245 int error; 246 247 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, 248 (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0); 249 splx(s); 250 return (error); 251 } 252 253 sodisconnect(so) 254 register struct socket *so; 255 { 256 int s = splnet(); 257 int error; 258 259 if ((so->so_state & SS_ISCONNECTED) == 0) { 260 error = ENOTCONN; 261 goto bad; 262 } 263 if (so->so_state & SS_ISDISCONNECTING) { 264 error = EALREADY; 265 goto bad; 266 } 267 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, 268 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 269 bad: 270 splx(s); 271 return (error); 272 } 273 274 /* 275 * Send on a socket. 276 * If send must go all at once and message is larger than 277 * send buffering, then hard error. 278 * Lock against other senders. 279 * If must go all at once and not enough room now, then 280 * inform user that this would block and do nothing. 281 * Otherwise, if nonblocking, send as much as possible. 282 */ 283 sosend(so, nam, uio, flags, rights, control) 284 register struct socket *so; 285 struct mbuf *nam; 286 register struct uio *uio; 287 int flags; 288 struct mbuf *rights, *control; 289 { 290 struct mbuf *top = 0, **mp; 291 register struct mbuf *m; 292 register int space, len; 293 int rlen = 0, error = 0, s, dontroute, first = 1, mlen; 294 int atomic = sosendallatonce(so); 295 296 if (atomic && uio->uio_resid > so->so_snd.sb_hiwat) 297 return (EMSGSIZE); 298 dontroute = 299 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 300 (so->so_proto->pr_flags & PR_ATOMIC); 301 u.u_ru.ru_msgsnd++; 302 if (rights) 303 rlen = rights->m_len; 304 #define snderr(errno) { error = errno; splx(s); goto release; } 305 306 restart: 307 sblock(&so->so_snd); 308 do { 309 s = splnet(); 310 if (so->so_state & SS_CANTSENDMORE) 311 snderr(EPIPE); 312 if (so->so_error) 313 snderr(so->so_error); 314 if ((so->so_state & SS_ISCONNECTED) == 0) { 315 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 316 if (!uio->uio_resid && !rights && control) { 317 snderr((*so->so_proto->pr_usrreq)(so, 318 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 319 top, (caddr_t)0, rights, control)); 320 } else if (so->so_state & SS_ISCONFIRMING) 321 /* is ok */; 322 else 323 snderr(ENOTCONN); 324 } else if (nam == 0) 325 snderr(EDESTADDRREQ); 326 } 327 if (flags & MSG_OOB) 328 space = 1024; 329 else { 330 space = sbspace(&so->so_snd); 331 if (space <= rlen || 332 (atomic && space < uio->uio_resid + rlen) || 333 (uio->uio_resid >= MCLBYTES && space < MCLBYTES && 334 so->so_snd.sb_cc >= MCLBYTES && 335 (so->so_state & SS_NBIO) == 0)) { 336 if (so->so_state & SS_NBIO) { 337 if (first) 338 error = EWOULDBLOCK; 339 splx(s); 340 goto release; 341 } 342 sbunlock(&so->so_snd); 343 sbwait(&so->so_snd); 344 splx(s); 345 goto restart; 346 } 347 } 348 splx(s); 349 mp = ⊤ 350 space -= rlen; 351 do { 352 do { 353 if (top == 0) { 354 MGETHDR(m, M_WAIT, MT_DATA); 355 mlen = MHLEN; 356 m->m_pkthdr.len = 0; 357 m->m_pkthdr.rcvif = (struct ifnet *)0; 358 } else { 359 MGET(m, M_WAIT, MT_DATA); 360 mlen = MLEN; 361 } 362 if (uio->uio_resid >= MINCLSIZE && space >= MCLBYTES) { 363 MCLGET(m, M_WAIT); 364 if ((m->m_flags & M_EXT) == 0) 365 goto nopages; 366 mlen = MCLBYTES; 367 #ifdef MAPPED_MBUFS 368 len = min(MCLBYTES, uio->uio_resid); 369 if (len < mlen - max_hdr) 370 m->m_data += max_hdr; 371 #else 372 len = min(MCLBYTES - max_hdr, uio->uio_resid); 373 m->m_data += max_hdr; 374 #endif 375 space -= MCLBYTES; 376 } else { 377 nopages: 378 len = min(min(mlen, uio->uio_resid), space); 379 space -= len; 380 /* 381 * For datagram protocols, leave room 382 * for protocol headers in first mbuf. 383 */ 384 if (atomic && top == 0 && len < mlen) 385 MH_ALIGN(m, len); 386 } 387 error = uiomove(mtod(m, caddr_t), len, UIO_WRITE, uio); 388 m->m_len = len; 389 *mp = m; 390 top->m_pkthdr.len += len; 391 if (error) 392 goto release; 393 mp = &m->m_next; 394 if (uio->uio_resid <= 0) { 395 if ((flags & MSG_EOR) && top) 396 top->m_flags |= M_EOR; 397 break; 398 } 399 } while (space > 0 && atomic); 400 if (dontroute) 401 so->so_options |= SO_DONTROUTE; 402 s = splnet(); /* XXX */ 403 error = (*so->so_proto->pr_usrreq)(so, 404 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 405 top, (caddr_t)nam, rights, control); 406 splx(s); 407 if (dontroute) 408 so->so_options &= ~SO_DONTROUTE; 409 rights = 0; 410 rlen = 0; 411 top = 0; 412 mp = ⊤ 413 first = 0; 414 if (error) 415 goto release; 416 } while (uio->uio_resid && space > 0); 417 } while (uio->uio_resid); 418 419 release: 420 sbunlock(&so->so_snd); 421 if (top) 422 m_freem(top); 423 if (error == EPIPE) 424 psignal(u.u_procp, SIGPIPE); 425 return (error); 426 } 427 428 /* 429 * Implement receive operations on a socket. 430 * We depend on the way that records are added to the sockbuf 431 * by sbappend*. In particular, each record (mbufs linked through m_next) 432 * must begin with an address if the protocol so specifies, 433 * followed by an optional mbuf containing access rights if supported 434 * by the protocol, and then zero or more mbufs of data. 435 * In order to avoid blocking network interrupts for the entire time here, 436 * we splx() while doing the actual copy to user space. 437 * Although the sockbuf is locked, new data may still be appended, 438 * and thus we must maintain consistency of the sockbuf during that time. 439 */ 440 soreceive(so, aname, uio, flagsp, rightsp, controlp) 441 register struct socket *so; 442 struct mbuf **aname; 443 register struct uio *uio; 444 int *flagsp; 445 struct mbuf **rightsp, **controlp; 446 { 447 register struct mbuf *m; 448 register int flags, len, error = 0, s, offset; 449 struct protosw *pr = so->so_proto; 450 struct mbuf *nextrecord; 451 int moff; 452 453 if (rightsp) 454 *rightsp = 0; 455 if (aname) 456 *aname = 0; 457 if (controlp) 458 *controlp = 0; 459 if (flagsp) 460 flags = *flagsp &~ MSG_EOR; 461 else 462 flags = 0; 463 if (flags & MSG_OOB) { 464 m = m_get(M_WAIT, MT_DATA); 465 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, 466 m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0); 467 if (error) 468 goto bad; 469 do { 470 len = uio->uio_resid; 471 if (len > m->m_len) 472 len = m->m_len; 473 error = 474 uiomove(mtod(m, caddr_t), (int)len, UIO_READ, uio); 475 m = m_free(m); 476 } while (uio->uio_resid && error == 0 && m); 477 bad: 478 if (m) 479 m_freem(m); 480 return (error); 481 } 482 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) 483 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 484 (struct mbuf *)0, (struct mbuf *)0); 485 486 restart: 487 sblock(&so->so_rcv); 488 s = splnet(); 489 490 m = so->so_rcv.sb_mb; 491 if (m == 0) { 492 if (so->so_rcv.sb_cc) 493 panic("receive 1"); 494 if (so->so_error) { 495 error = so->so_error; 496 so->so_error = 0; 497 goto release; 498 } 499 if (so->so_state & SS_CANTRCVMORE) 500 goto release; 501 if ((so->so_state & SS_ISCONNECTED) == 0 && 502 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 503 error = ENOTCONN; 504 goto release; 505 } 506 if (uio->uio_resid == 0) 507 goto release; 508 if (so->so_state & SS_NBIO) { 509 error = EWOULDBLOCK; 510 goto release; 511 } 512 sbunlock(&so->so_rcv); 513 sbwait(&so->so_rcv); 514 splx(s); 515 goto restart; 516 } 517 u.u_ru.ru_msgrcv++; 518 if (m->m_type == 0) 519 panic("receive 3a"); 520 nextrecord = m->m_nextpkt; 521 if (pr->pr_flags & PR_ADDR) { 522 if (m->m_type != MT_SONAME) 523 panic("receive 1a"); 524 if (flags & MSG_PEEK) { 525 if (aname) 526 *aname = m_copy(m, 0, m->m_len); 527 m = m->m_next; 528 } else { 529 sbfree(&so->so_rcv, m); 530 if (aname) { 531 *aname = m; 532 so->so_rcv.sb_mb = m->m_next; 533 m->m_next = 0; 534 m = so->so_rcv.sb_mb; 535 } else { 536 MFREE(m, so->so_rcv.sb_mb); 537 m = so->so_rcv.sb_mb; 538 } 539 } 540 } 541 if (m && m->m_type == MT_RIGHTS) { 542 if ((pr->pr_flags & PR_RIGHTS) == 0) 543 panic("receive 2"); 544 if (flags & MSG_PEEK) { 545 if (rightsp) 546 *rightsp = m_copy(m, 0, m->m_len); 547 m = m->m_next; 548 } else { 549 sbfree(&so->so_rcv, m); 550 if (rightsp) { 551 *rightsp = m; 552 so->so_rcv.sb_mb = m->m_next; 553 m->m_next = 0; 554 m = so->so_rcv.sb_mb; 555 } else { 556 MFREE(m, so->so_rcv.sb_mb); 557 m = so->so_rcv.sb_mb; 558 } 559 } 560 } 561 if (m && m->m_type == MT_CONTROL) { 562 if (flags & MSG_PEEK) { 563 if (controlp) 564 *controlp = m_copy(m, 0, m->m_len); 565 m = m->m_next; 566 } else { 567 sbfree(&so->so_rcv, m); 568 if (controlp) { 569 *controlp = m; 570 so->so_rcv.sb_mb = m->m_next; 571 m->m_next = 0; 572 m = so->so_rcv.sb_mb; 573 } else { 574 MFREE(m, so->so_rcv.sb_mb); 575 m = so->so_rcv.sb_mb; 576 } 577 } 578 } 579 if (m) 580 m->m_nextpkt = nextrecord; 581 moff = 0; 582 offset = 0; 583 while (m && uio->uio_resid > 0 && error == 0) { 584 if (m->m_type == MT_OOBDATA) 585 flags |= MSG_OOB; 586 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) 587 panic("receive 3"); 588 if (m->m_flags & M_EOR) 589 flags |= MSG_EOR; 590 len = uio->uio_resid; 591 so->so_state &= ~SS_RCVATMARK; 592 if (so->so_oobmark && len > so->so_oobmark - offset) 593 len = so->so_oobmark - offset; 594 if (len > m->m_len - moff) 595 len = m->m_len - moff; 596 splx(s); 597 error = 598 uiomove(mtod(m, caddr_t) + moff, (int)len, UIO_READ, uio); 599 s = splnet(); 600 if (len == m->m_len - moff) { 601 if (flags & MSG_PEEK) { 602 m = m->m_next; 603 moff = 0; 604 } else { 605 nextrecord = m->m_nextpkt; 606 sbfree(&so->so_rcv, m); 607 MFREE(m, so->so_rcv.sb_mb); 608 m = so->so_rcv.sb_mb; 609 if (m) 610 m->m_nextpkt = nextrecord; 611 } 612 } else { 613 if (flags & MSG_PEEK) 614 moff += len; 615 else { 616 m->m_data += len; 617 m->m_len -= len; 618 so->so_rcv.sb_cc -= len; 619 } 620 } 621 if (so->so_oobmark) { 622 if ((flags & MSG_PEEK) == 0) { 623 so->so_oobmark -= len; 624 if (so->so_oobmark == 0) { 625 so->so_state |= SS_RCVATMARK; 626 break; 627 } 628 } else 629 offset += len; 630 } 631 } 632 if (m && (flags & MSG_EOR)) { 633 flags &= ~MSG_EOR; 634 if ((flags & MSG_PEEK) == 0) 635 m->m_flags |= M_EOR; 636 } 637 if ((flags & MSG_PEEK) == 0) { 638 if (m == 0) 639 so->so_rcv.sb_mb = nextrecord; 640 else if (pr->pr_flags & PR_ATOMIC) { 641 flags |= MSG_TRUNC; 642 (void) sbdroprecord(&so->so_rcv); 643 } 644 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 645 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 646 (struct mbuf *)flags, (struct mbuf *)0, 647 (struct mbuf *)0); 648 if (error == 0 && rightsp && *rightsp && 649 pr->pr_domain->dom_externalize) 650 error = (*pr->pr_domain->dom_externalize)(*rightsp); 651 } 652 if (flagsp) 653 *flagsp |= flags; 654 release: 655 sbunlock(&so->so_rcv); 656 splx(s); 657 return (error); 658 } 659 660 soshutdown(so, how) 661 register struct socket *so; 662 register int how; 663 { 664 register struct protosw *pr = so->so_proto; 665 666 how++; 667 if (how & FREAD) 668 sorflush(so); 669 if (how & FWRITE) 670 return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN, 671 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 672 return (0); 673 } 674 675 sorflush(so) 676 register struct socket *so; 677 { 678 register struct sockbuf *sb = &so->so_rcv; 679 register struct protosw *pr = so->so_proto; 680 register int s; 681 struct sockbuf asb; 682 683 sblock(sb); 684 s = splimp(); 685 socantrcvmore(so); 686 sbunlock(sb); 687 asb = *sb; 688 bzero((caddr_t)sb, sizeof (*sb)); 689 splx(s); 690 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 691 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 692 sbrelease(&asb); 693 } 694 695 sosetopt(so, level, optname, m0) 696 register struct socket *so; 697 int level, optname; 698 struct mbuf *m0; 699 { 700 int error = 0; 701 register struct mbuf *m = m0; 702 703 if (level != SOL_SOCKET) { 704 if (so->so_proto && so->so_proto->pr_ctloutput) 705 return ((*so->so_proto->pr_ctloutput) 706 (PRCO_SETOPT, so, level, optname, &m0)); 707 error = ENOPROTOOPT; 708 } else { 709 switch (optname) { 710 711 case SO_LINGER: 712 if (m == NULL || m->m_len != sizeof (struct linger)) { 713 error = EINVAL; 714 goto bad; 715 } 716 so->so_linger = mtod(m, struct linger *)->l_linger; 717 /* fall thru... */ 718 719 case SO_DEBUG: 720 case SO_KEEPALIVE: 721 case SO_DONTROUTE: 722 case SO_USELOOPBACK: 723 case SO_BROADCAST: 724 case SO_REUSEADDR: 725 case SO_OOBINLINE: 726 if (m == NULL || m->m_len < sizeof (int)) { 727 error = EINVAL; 728 goto bad; 729 } 730 if (*mtod(m, int *)) 731 so->so_options |= optname; 732 else 733 so->so_options &= ~optname; 734 break; 735 736 case SO_SNDBUF: 737 case SO_RCVBUF: 738 case SO_SNDLOWAT: 739 case SO_RCVLOWAT: 740 case SO_SNDTIMEO: 741 case SO_RCVTIMEO: 742 if (m == NULL || m->m_len < sizeof (int)) { 743 error = EINVAL; 744 goto bad; 745 } 746 switch (optname) { 747 748 case SO_SNDBUF: 749 case SO_RCVBUF: 750 if (sbreserve(optname == SO_SNDBUF ? 751 &so->so_snd : &so->so_rcv, 752 (u_long) *mtod(m, int *)) == 0) { 753 error = ENOBUFS; 754 goto bad; 755 } 756 break; 757 758 case SO_SNDLOWAT: 759 so->so_snd.sb_lowat = *mtod(m, int *); 760 break; 761 case SO_RCVLOWAT: 762 so->so_rcv.sb_lowat = *mtod(m, int *); 763 break; 764 case SO_SNDTIMEO: 765 so->so_snd.sb_timeo = *mtod(m, int *); 766 break; 767 case SO_RCVTIMEO: 768 so->so_rcv.sb_timeo = *mtod(m, int *); 769 break; 770 } 771 break; 772 773 default: 774 error = ENOPROTOOPT; 775 break; 776 } 777 } 778 bad: 779 if (m) 780 (void) m_free(m); 781 return (error); 782 } 783 784 sogetopt(so, level, optname, mp) 785 register struct socket *so; 786 int level, optname; 787 struct mbuf **mp; 788 { 789 register struct mbuf *m; 790 791 if (level != SOL_SOCKET) { 792 if (so->so_proto && so->so_proto->pr_ctloutput) { 793 return ((*so->so_proto->pr_ctloutput) 794 (PRCO_GETOPT, so, level, optname, mp)); 795 } else 796 return (ENOPROTOOPT); 797 } else { 798 m = m_get(M_WAIT, MT_SOOPTS); 799 m->m_len = sizeof (int); 800 801 switch (optname) { 802 803 case SO_LINGER: 804 m->m_len = sizeof (struct linger); 805 mtod(m, struct linger *)->l_onoff = 806 so->so_options & SO_LINGER; 807 mtod(m, struct linger *)->l_linger = so->so_linger; 808 break; 809 810 case SO_USELOOPBACK: 811 case SO_DONTROUTE: 812 case SO_DEBUG: 813 case SO_KEEPALIVE: 814 case SO_REUSEADDR: 815 case SO_BROADCAST: 816 case SO_OOBINLINE: 817 *mtod(m, int *) = so->so_options & optname; 818 break; 819 820 case SO_TYPE: 821 *mtod(m, int *) = so->so_type; 822 break; 823 824 case SO_ERROR: 825 *mtod(m, int *) = so->so_error; 826 so->so_error = 0; 827 break; 828 829 case SO_SNDBUF: 830 *mtod(m, int *) = so->so_snd.sb_hiwat; 831 break; 832 833 case SO_RCVBUF: 834 *mtod(m, int *) = so->so_rcv.sb_hiwat; 835 break; 836 837 case SO_SNDLOWAT: 838 *mtod(m, int *) = so->so_snd.sb_lowat; 839 break; 840 841 case SO_RCVLOWAT: 842 *mtod(m, int *) = so->so_rcv.sb_lowat; 843 break; 844 845 case SO_SNDTIMEO: 846 *mtod(m, int *) = so->so_snd.sb_timeo; 847 break; 848 849 case SO_RCVTIMEO: 850 *mtod(m, int *) = so->so_rcv.sb_timeo; 851 break; 852 853 default: 854 (void)m_free(m); 855 return (ENOPROTOOPT); 856 } 857 *mp = m; 858 return (0); 859 } 860 } 861 862 sohasoutofband(so) 863 register struct socket *so; 864 { 865 struct proc *p; 866 867 if (so->so_pgid < 0) 868 gsignal(-so->so_pgid, SIGURG); 869 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) 870 psignal(p, SIGURG); 871 if (so->so_rcv.sb_sel) { 872 selwakeup(so->so_rcv.sb_sel, so->so_rcv.sb_flags & SB_COLL); 873 so->so_rcv.sb_sel = 0; 874 so->so_rcv.sb_flags &= ~SB_COLL; 875 } 876 } 877