1 /* 2 * Copyright (c) 1982, 1986, 1988, 1990 Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)uipc_socket.c 7.22 (Berkeley) 06/28/90 8 */ 9 10 #include "param.h" 11 #include "user.h" 12 #include "proc.h" 13 #include "file.h" 14 #include "malloc.h" 15 #include "mbuf.h" 16 #include "domain.h" 17 #include "protosw.h" 18 #include "socket.h" 19 #include "socketvar.h" 20 21 /* 22 * Socket operation routines. 23 * These routines are called by the routines in 24 * sys_socket.c or from a system process, and 25 * implement the semantics of socket operations by 26 * switching out to the protocol specific routines. 27 * 28 * TODO: 29 * test socketpair 30 * clean up async 31 * out-of-band is a kludge 32 */ 33 /*ARGSUSED*/ 34 socreate(dom, aso, type, proto) 35 struct socket **aso; 36 register int type; 37 int proto; 38 { 39 register struct protosw *prp; 40 register struct socket *so; 41 register int error; 42 43 if (proto) 44 prp = pffindproto(dom, proto, type); 45 else 46 prp = pffindtype(dom, type); 47 if (prp == 0) 48 return (EPROTONOSUPPORT); 49 if (prp->pr_type != type) 50 return (EPROTOTYPE); 51 MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_WAIT); 52 bzero((caddr_t)so, sizeof(*so)); 53 so->so_type = type; 54 if (u.u_uid == 0) 55 so->so_state = SS_PRIV; 56 so->so_proto = prp; 57 error = 58 (*prp->pr_usrreq)(so, PRU_ATTACH, 59 (struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0); 60 if (error) { 61 so->so_state |= SS_NOFDREF; 62 sofree(so); 63 return (error); 64 } 65 *aso = so; 66 return (0); 67 } 68 69 sobind(so, nam) 70 struct socket *so; 71 struct mbuf *nam; 72 { 73 int s = splnet(); 74 int error; 75 76 error = 77 (*so->so_proto->pr_usrreq)(so, PRU_BIND, 78 (struct mbuf *)0, nam, (struct mbuf *)0); 79 splx(s); 80 return (error); 81 } 82 83 solisten(so, backlog) 84 register struct socket *so; 85 int backlog; 86 { 87 int s = splnet(), error; 88 89 error = 90 (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, 91 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 92 if (error) { 93 splx(s); 94 return (error); 95 } 96 if (so->so_q == 0) 97 so->so_options |= SO_ACCEPTCONN; 98 if (backlog < 0) 99 backlog = 0; 100 so->so_qlimit = min(backlog, SOMAXCONN); 101 splx(s); 102 return (0); 103 } 104 105 sofree(so) 106 register struct socket *so; 107 { 108 109 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) 110 return; 111 if (so->so_head) { 112 if (!soqremque(so, 0) && !soqremque(so, 1)) 113 panic("sofree dq"); 114 so->so_head = 0; 115 } 116 sbrelease(&so->so_snd); 117 sorflush(so); 118 FREE(so, M_SOCKET); 119 } 120 121 /* 122 * Close a socket on last file table reference removal. 123 * Initiate disconnect if connected. 124 * Free socket when disconnect complete. 125 */ 126 soclose(so) 127 register struct socket *so; 128 { 129 int s = splnet(); /* conservative */ 130 int error = 0; 131 132 if (so->so_options & SO_ACCEPTCONN) { 133 while (so->so_q0) 134 (void) soabort(so->so_q0); 135 while (so->so_q) 136 (void) soabort(so->so_q); 137 } 138 if (so->so_pcb == 0) 139 goto discard; 140 if (so->so_state & SS_ISCONNECTED) { 141 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 142 error = sodisconnect(so); 143 if (error) 144 goto drop; 145 } 146 if (so->so_options & SO_LINGER) { 147 if ((so->so_state & SS_ISDISCONNECTING) && 148 (so->so_state & SS_NBIO)) 149 goto drop; 150 while (so->so_state & SS_ISCONNECTED) 151 if (error = tsleep((caddr_t)&so->so_timeo, 152 PSOCK | PCATCH, netcls, so->so_linger)) 153 break; 154 } 155 } 156 drop: 157 if (so->so_pcb) { 158 int error2 = 159 (*so->so_proto->pr_usrreq)(so, PRU_DETACH, 160 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 161 if (error == 0) 162 error = error2; 163 } 164 discard: 165 if (so->so_state & SS_NOFDREF) 166 panic("soclose: NOFDREF"); 167 so->so_state |= SS_NOFDREF; 168 sofree(so); 169 splx(s); 170 return (error); 171 } 172 173 /* 174 * Must be called at splnet... 175 */ 176 soabort(so) 177 struct socket *so; 178 { 179 180 return ( 181 (*so->so_proto->pr_usrreq)(so, PRU_ABORT, 182 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 183 } 184 185 soaccept(so, nam) 186 register struct socket *so; 187 struct mbuf *nam; 188 { 189 int s = splnet(); 190 int error; 191 192 if ((so->so_state & SS_NOFDREF) == 0) 193 panic("soaccept: !NOFDREF"); 194 so->so_state &= ~SS_NOFDREF; 195 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, 196 (struct mbuf *)0, nam, (struct mbuf *)0); 197 splx(s); 198 return (error); 199 } 200 201 soconnect(so, nam) 202 register struct socket *so; 203 struct mbuf *nam; 204 { 205 int s; 206 int error; 207 208 if (so->so_options & SO_ACCEPTCONN) 209 return (EOPNOTSUPP); 210 s = splnet(); 211 /* 212 * If protocol is connection-based, can only connect once. 213 * Otherwise, if connected, try to disconnect first. 214 * This allows user to disconnect by connecting to, e.g., 215 * a null address. 216 */ 217 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 218 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 219 (error = sodisconnect(so)))) 220 error = EISCONN; 221 else 222 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, 223 (struct mbuf *)0, nam, (struct mbuf *)0); 224 splx(s); 225 return (error); 226 } 227 228 soconnect2(so1, so2) 229 register struct socket *so1; 230 struct socket *so2; 231 { 232 int s = splnet(); 233 int error; 234 235 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, 236 (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0); 237 splx(s); 238 return (error); 239 } 240 241 sodisconnect(so) 242 register struct socket *so; 243 { 244 int s = splnet(); 245 int error; 246 247 if ((so->so_state & SS_ISCONNECTED) == 0) { 248 error = ENOTCONN; 249 goto bad; 250 } 251 if (so->so_state & SS_ISDISCONNECTING) { 252 error = EALREADY; 253 goto bad; 254 } 255 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, 256 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 257 bad: 258 splx(s); 259 return (error); 260 } 261 262 /* 263 * Send on a socket. 264 * If send must go all at once and message is larger than 265 * send buffering, then hard error. 266 * Lock against other senders. 267 * If must go all at once and not enough room now, then 268 * inform user that this would block and do nothing. 269 * Otherwise, if nonblocking, send as much as possible. 270 * The data to be sent is described by "uio" if nonzero, 271 * otherwise by the mbuf chain "top" (which must be null 272 * if uio is not). Data provided in mbuf chain must be small 273 * enough to send all at once. 274 * 275 * Returns nonzero on error, timeout or signal; callers 276 * must check for short counts if EINTR/ERESTART are returned. 277 * Data and control buffers are freed on return. 278 */ 279 sosend(so, addr, uio, top, control, flags) 280 register struct socket *so; 281 struct mbuf *addr; 282 struct uio *uio; 283 struct mbuf *top; 284 struct mbuf *control; 285 int flags; 286 { 287 struct mbuf **mp; 288 register struct mbuf *m; 289 register long space, len, resid; 290 int clen = 0, error, s, dontroute, mlen; 291 int atomic = sosendallatonce(so) || top; 292 293 if (uio) 294 resid = uio->uio_resid; 295 else 296 resid = top->m_pkthdr.len; 297 dontroute = 298 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 299 (so->so_proto->pr_flags & PR_ATOMIC); 300 u.u_ru.ru_msgsnd++; 301 if (control) 302 clen = control->m_len; 303 #define snderr(errno) { error = errno; splx(s); goto release; } 304 305 restart: 306 if (error = sblock(&so->so_snd)) 307 goto out; 308 do { 309 s = splnet(); 310 if (so->so_state & SS_CANTSENDMORE) 311 snderr(EPIPE); 312 if (so->so_error) 313 snderr(so->so_error); 314 if ((so->so_state & SS_ISCONNECTED) == 0) { 315 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 316 if ((so->so_state & SS_ISCONFIRMING) == 0) 317 snderr(ENOTCONN); 318 } else if (addr == 0) 319 snderr(EDESTADDRREQ); 320 } 321 space = sbspace(&so->so_snd); 322 if (flags & MSG_OOB) 323 space += 1024; 324 if (space < resid + clen && 325 (atomic || space < so->so_snd.sb_lowat || space < clen)) { 326 if (atomic && resid > so->so_snd.sb_hiwat || 327 clen > so->so_snd.sb_hiwat) 328 snderr(EMSGSIZE); 329 if (so->so_state & SS_NBIO) 330 snderr(EWOULDBLOCK); 331 sbunlock(&so->so_snd); 332 error = sbwait(&so->so_snd); 333 splx(s); 334 if (error) 335 goto out; 336 goto restart; 337 } 338 splx(s); 339 mp = ⊤ 340 space -= clen; 341 do { 342 if (uio == NULL) { 343 /* 344 * Data is prepackaged in "top". 345 */ 346 resid = 0; 347 if (flags & MSG_EOR) 348 top->m_flags |= M_EOR; 349 } else do { 350 if (top == 0) { 351 MGETHDR(m, M_WAIT, MT_DATA); 352 mlen = MHLEN; 353 m->m_pkthdr.len = 0; 354 m->m_pkthdr.rcvif = (struct ifnet *)0; 355 } else { 356 MGET(m, M_WAIT, MT_DATA); 357 mlen = MLEN; 358 } 359 if (resid >= MINCLSIZE && space >= MCLBYTES) { 360 MCLGET(m, M_WAIT); 361 if ((m->m_flags & M_EXT) == 0) 362 goto nopages; 363 mlen = MCLBYTES; 364 #ifdef MAPPED_MBUFS 365 len = min(MCLBYTES, resid); 366 #else 367 if (top == 0) { 368 len = min(MCLBYTES - max_hdr, resid); 369 m->m_data += max_hdr; 370 } 371 #endif 372 space -= MCLBYTES; 373 } else { 374 nopages: 375 len = min(min(mlen, resid), space); 376 space -= len; 377 /* 378 * For datagram protocols, leave room 379 * for protocol headers in first mbuf. 380 */ 381 if (atomic && top == 0 && len < mlen) 382 MH_ALIGN(m, len); 383 } 384 error = uiomove(mtod(m, caddr_t), len, uio); 385 resid = uio->uio_resid; 386 m->m_len = len; 387 *mp = m; 388 top->m_pkthdr.len += len; 389 if (error) 390 goto release; 391 mp = &m->m_next; 392 if (resid <= 0) { 393 if (flags & MSG_EOR) 394 top->m_flags |= M_EOR; 395 break; 396 } 397 } while (space > 0 && atomic); 398 if (dontroute) 399 so->so_options |= SO_DONTROUTE; 400 s = splnet(); /* XXX */ 401 error = (*so->so_proto->pr_usrreq)(so, 402 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 403 top, addr, control); 404 splx(s); 405 if (dontroute) 406 so->so_options &= ~SO_DONTROUTE; 407 clen = 0; 408 control = 0; 409 top = 0; 410 mp = ⊤ 411 if (error) 412 goto release; 413 } while (resid && space > 0); 414 } while (resid); 415 416 release: 417 sbunlock(&so->so_snd); 418 out: 419 if (top) 420 m_freem(top); 421 if (control) 422 m_freem(control); 423 return (error); 424 } 425 426 /* 427 * Implement receive operations on a socket. 428 * We depend on the way that records are added to the sockbuf 429 * by sbappend*. In particular, each record (mbufs linked through m_next) 430 * must begin with an address if the protocol so specifies, 431 * followed by an optional mbuf or mbufs containing ancillary data, 432 * and then zero or more mbufs of data. 433 * In order to avoid blocking network interrupts for the entire time here, 434 * we splx() while doing the actual copy to user space. 435 * Although the sockbuf is locked, new data may still be appended, 436 * and thus we must maintain consistency of the sockbuf during that time. 437 * 438 * The caller may receive the data as a single mbuf chain by supplying 439 * an mbuf **mp0 for use in returning the chain. The uio is then used 440 * only for the count in uio_resid. 441 */ 442 soreceive(so, paddr, uio, mp0, controlp, flagsp) 443 register struct socket *so; 444 struct mbuf **paddr; 445 struct uio *uio; 446 struct mbuf **mp0; 447 struct mbuf **controlp; 448 int *flagsp; 449 { 450 register struct mbuf *m, **mp; 451 register int flags, len, error, s, offset; 452 struct protosw *pr = so->so_proto; 453 struct mbuf *nextrecord; 454 int moff, type; 455 456 mp = mp0; 457 if (paddr) 458 *paddr = 0; 459 if (controlp) 460 *controlp = 0; 461 if (flagsp) 462 flags = *flagsp &~ MSG_EOR; 463 else 464 flags = 0; 465 if (flags & MSG_OOB) { 466 m = m_get(M_WAIT, MT_DATA); 467 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, 468 m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0); 469 if (error) 470 goto bad; 471 do { 472 error = uiomove(mtod(m, caddr_t), 473 (int) min(uio->uio_resid, m->m_len), uio); 474 m = m_free(m); 475 } while (uio->uio_resid && error == 0 && m); 476 bad: 477 if (m) 478 m_freem(m); 479 return (error); 480 } 481 if (mp) 482 *mp = (struct mbuf *)0; 483 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) 484 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 485 (struct mbuf *)0, (struct mbuf *)0); 486 487 restart: 488 if (error = sblock(&so->so_rcv)) 489 return (error); 490 s = splnet(); 491 492 m = so->so_rcv.sb_mb; 493 /* 494 * If we have less data than requested, block awaiting more 495 * (subject to any timeout) if: 496 * 1. the current count is less than the low water mark, or 497 * 2. MSG_WAITALL is set, and it is possible to do the entire 498 * receive operation at once if we block (resid <= hiwat). 499 * If MSG_WAITALL is set but resid is larger than the receive buffer, 500 * we have to do the receive in sections, and thus risk returning 501 * a short count if a timeout or signal occurs after we start. 502 */ 503 if (m == 0 || so->so_rcv.sb_cc < uio->uio_resid && 504 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 505 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat))) { 506 #ifdef DIAGNOSTIC 507 if (m == 0 && so->so_rcv.sb_cc) 508 panic("receive 1"); 509 #endif 510 if (so->so_error) { 511 error = so->so_error; 512 so->so_error = 0; 513 goto release; 514 } 515 if (so->so_state & SS_CANTRCVMORE) 516 goto release; 517 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 518 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 519 error = ENOTCONN; 520 goto release; 521 } 522 if (uio->uio_resid == 0) 523 goto release; 524 if (so->so_state & SS_NBIO) { 525 error = EWOULDBLOCK; 526 goto release; 527 } 528 sbunlock(&so->so_rcv); 529 error = sbwait(&so->so_rcv); 530 splx(s); 531 if (error) 532 return (error); 533 goto restart; 534 } 535 u.u_ru.ru_msgrcv++; 536 nextrecord = m->m_nextpkt; 537 if (pr->pr_flags & PR_ADDR) { 538 #ifdef DIAGNOSTIC 539 if (m->m_type != MT_SONAME) 540 panic("receive 1a"); 541 #endif 542 if (flags & MSG_PEEK) { 543 if (paddr) 544 *paddr = m_copy(m, 0, m->m_len); 545 m = m->m_next; 546 } else { 547 sbfree(&so->so_rcv, m); 548 if (paddr) { 549 *paddr = m; 550 so->so_rcv.sb_mb = m->m_next; 551 m->m_next = 0; 552 m = so->so_rcv.sb_mb; 553 } else { 554 MFREE(m, so->so_rcv.sb_mb); 555 m = so->so_rcv.sb_mb; 556 } 557 } 558 } 559 while (m && m->m_type == MT_CONTROL && error == 0) { 560 if (flags & MSG_PEEK) { 561 if (controlp) 562 *controlp = m_copy(m, 0, m->m_len); 563 m = m->m_next; 564 } else { 565 sbfree(&so->so_rcv, m); 566 if (controlp) { 567 if (pr->pr_domain->dom_externalize && 568 mtod(m, struct cmsghdr *)->cmsg_type == 569 SCM_RIGHTS) 570 error = (*pr->pr_domain->dom_externalize)(m); 571 *controlp = m; 572 so->so_rcv.sb_mb = m->m_next; 573 m->m_next = 0; 574 m = so->so_rcv.sb_mb; 575 } else { 576 MFREE(m, so->so_rcv.sb_mb); 577 m = so->so_rcv.sb_mb; 578 } 579 } 580 if (controlp) 581 controlp = &(*controlp)->m_next; 582 } 583 if (m) { 584 if ((flags & MSG_PEEK) == 0) 585 m->m_nextpkt = nextrecord; 586 type = m->m_type; 587 } 588 moff = 0; 589 offset = 0; 590 while (m && m->m_type == type && uio->uio_resid > 0 && error == 0) { 591 if (m->m_type == MT_OOBDATA) 592 flags |= MSG_OOB; 593 #ifdef DIAGNOSTIC 594 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) 595 panic("receive 3"); 596 #endif 597 type = m->m_type; 598 so->so_state &= ~SS_RCVATMARK; 599 len = uio->uio_resid; 600 if (so->so_oobmark && len > so->so_oobmark - offset) 601 len = so->so_oobmark - offset; 602 if (len > m->m_len - moff) 603 len = m->m_len - moff; 604 /* 605 * If mp is set, just pass back the mbufs. 606 * Otherwise copy them out via the uio, then free. 607 * Sockbuf must be consistent here (points to current mbuf, 608 * it points to next record) when we drop priority; 609 * we must note any additions to the sockbuf when we 610 * block interrupts again. 611 */ 612 if (mp == 0) { 613 splx(s); 614 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); 615 s = splnet(); 616 } else 617 uio->uio_resid -= len; 618 if (len == m->m_len - moff) { 619 if (m->m_flags & M_EOR) 620 flags |= MSG_EOR; 621 if (flags & MSG_PEEK) { 622 m = m->m_next; 623 moff = 0; 624 } else { 625 nextrecord = m->m_nextpkt; 626 sbfree(&so->so_rcv, m); 627 if (mp) { 628 *mp = m; 629 mp = &m->m_next; 630 so->so_rcv.sb_mb = m = m->m_next; 631 *mp = (struct mbuf *)0; 632 } else { 633 MFREE(m, so->so_rcv.sb_mb); 634 m = so->so_rcv.sb_mb; 635 } 636 if (m) 637 m->m_nextpkt = nextrecord; 638 } 639 } else { 640 if (flags & MSG_PEEK) 641 moff += len; 642 else { 643 if (mp) 644 *mp = m_copym(m, 0, len, M_WAIT); 645 m->m_data += len; 646 m->m_len -= len; 647 so->so_rcv.sb_cc -= len; 648 } 649 } 650 if (so->so_oobmark) { 651 if ((flags & MSG_PEEK) == 0) { 652 so->so_oobmark -= len; 653 if (so->so_oobmark == 0) { 654 so->so_state |= SS_RCVATMARK; 655 break; 656 } 657 } else 658 offset += len; 659 } 660 if (flags & MSG_EOR) 661 break; 662 /* 663 * If the MSG_WAITALL flag is set (for non-atomic socket), 664 * we must not quit until "uio->uio_resid == 0" or an error 665 * termination. If a signal/timeout occurs, return 666 * with a short count but without error. 667 * Keep sockbuf locked against other readers. 668 */ 669 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 && 670 !sosendallatonce(so)) { 671 error = sbwait(&so->so_rcv); 672 if (error) { 673 sbunlock(&so->so_rcv); 674 splx(s); 675 return (0); 676 } 677 if (m = so->so_rcv.sb_mb) 678 nextrecord = m->m_nextpkt; 679 if (so->so_error || so->so_state & SS_CANTRCVMORE) 680 break; 681 continue; 682 } 683 } 684 if ((flags & MSG_PEEK) == 0) { 685 if (m == 0) 686 so->so_rcv.sb_mb = nextrecord; 687 else if (pr->pr_flags & PR_ATOMIC) { 688 flags |= MSG_TRUNC; 689 (void) sbdroprecord(&so->so_rcv); 690 } 691 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 692 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 693 (struct mbuf *)flags, (struct mbuf *)0, 694 (struct mbuf *)0); 695 } 696 if (flagsp) 697 *flagsp |= flags; 698 release: 699 sbunlock(&so->so_rcv); 700 splx(s); 701 return (error); 702 } 703 704 soshutdown(so, how) 705 register struct socket *so; 706 register int how; 707 { 708 register struct protosw *pr = so->so_proto; 709 710 how++; 711 if (how & FREAD) 712 sorflush(so); 713 if (how & FWRITE) 714 return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN, 715 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 716 return (0); 717 } 718 719 sorflush(so) 720 register struct socket *so; 721 { 722 register struct sockbuf *sb = &so->so_rcv; 723 register struct protosw *pr = so->so_proto; 724 register int s; 725 struct sockbuf asb; 726 727 sb->sb_flags |= SB_NOINTR; 728 (void) sblock(sb); 729 s = splimp(); 730 socantrcvmore(so); 731 sbunlock(sb); 732 asb = *sb; 733 bzero((caddr_t)sb, sizeof (*sb)); 734 splx(s); 735 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 736 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 737 sbrelease(&asb); 738 } 739 740 sosetopt(so, level, optname, m0) 741 register struct socket *so; 742 int level, optname; 743 struct mbuf *m0; 744 { 745 int error = 0; 746 register struct mbuf *m = m0; 747 748 if (level != SOL_SOCKET) { 749 if (so->so_proto && so->so_proto->pr_ctloutput) 750 return ((*so->so_proto->pr_ctloutput) 751 (PRCO_SETOPT, so, level, optname, &m0)); 752 error = ENOPROTOOPT; 753 } else { 754 switch (optname) { 755 756 case SO_LINGER: 757 if (m == NULL || m->m_len != sizeof (struct linger)) { 758 error = EINVAL; 759 goto bad; 760 } 761 so->so_linger = mtod(m, struct linger *)->l_linger; 762 /* fall thru... */ 763 764 case SO_DEBUG: 765 case SO_KEEPALIVE: 766 case SO_DONTROUTE: 767 case SO_USELOOPBACK: 768 case SO_BROADCAST: 769 case SO_REUSEADDR: 770 case SO_OOBINLINE: 771 if (m == NULL || m->m_len < sizeof (int)) { 772 error = EINVAL; 773 goto bad; 774 } 775 if (*mtod(m, int *)) 776 so->so_options |= optname; 777 else 778 so->so_options &= ~optname; 779 break; 780 781 case SO_SNDBUF: 782 case SO_RCVBUF: 783 case SO_SNDLOWAT: 784 case SO_RCVLOWAT: 785 case SO_SNDTIMEO: 786 case SO_RCVTIMEO: 787 if (m == NULL || m->m_len < sizeof (int)) { 788 error = EINVAL; 789 goto bad; 790 } 791 switch (optname) { 792 793 case SO_SNDBUF: 794 case SO_RCVBUF: 795 if (sbreserve(optname == SO_SNDBUF ? 796 &so->so_snd : &so->so_rcv, 797 (u_long) *mtod(m, int *)) == 0) { 798 error = ENOBUFS; 799 goto bad; 800 } 801 break; 802 803 case SO_SNDLOWAT: 804 so->so_snd.sb_lowat = *mtod(m, int *); 805 break; 806 case SO_RCVLOWAT: 807 so->so_rcv.sb_lowat = *mtod(m, int *); 808 break; 809 case SO_SNDTIMEO: 810 so->so_snd.sb_timeo = *mtod(m, int *); 811 break; 812 case SO_RCVTIMEO: 813 so->so_rcv.sb_timeo = *mtod(m, int *); 814 break; 815 } 816 break; 817 818 default: 819 error = ENOPROTOOPT; 820 break; 821 } 822 } 823 bad: 824 if (m) 825 (void) m_free(m); 826 return (error); 827 } 828 829 sogetopt(so, level, optname, mp) 830 register struct socket *so; 831 int level, optname; 832 struct mbuf **mp; 833 { 834 register struct mbuf *m; 835 836 if (level != SOL_SOCKET) { 837 if (so->so_proto && so->so_proto->pr_ctloutput) { 838 return ((*so->so_proto->pr_ctloutput) 839 (PRCO_GETOPT, so, level, optname, mp)); 840 } else 841 return (ENOPROTOOPT); 842 } else { 843 m = m_get(M_WAIT, MT_SOOPTS); 844 m->m_len = sizeof (int); 845 846 switch (optname) { 847 848 case SO_LINGER: 849 m->m_len = sizeof (struct linger); 850 mtod(m, struct linger *)->l_onoff = 851 so->so_options & SO_LINGER; 852 mtod(m, struct linger *)->l_linger = so->so_linger; 853 break; 854 855 case SO_USELOOPBACK: 856 case SO_DONTROUTE: 857 case SO_DEBUG: 858 case SO_KEEPALIVE: 859 case SO_REUSEADDR: 860 case SO_BROADCAST: 861 case SO_OOBINLINE: 862 *mtod(m, int *) = so->so_options & optname; 863 break; 864 865 case SO_TYPE: 866 *mtod(m, int *) = so->so_type; 867 break; 868 869 case SO_ERROR: 870 *mtod(m, int *) = so->so_error; 871 so->so_error = 0; 872 break; 873 874 case SO_SNDBUF: 875 *mtod(m, int *) = so->so_snd.sb_hiwat; 876 break; 877 878 case SO_RCVBUF: 879 *mtod(m, int *) = so->so_rcv.sb_hiwat; 880 break; 881 882 case SO_SNDLOWAT: 883 *mtod(m, int *) = so->so_snd.sb_lowat; 884 break; 885 886 case SO_RCVLOWAT: 887 *mtod(m, int *) = so->so_rcv.sb_lowat; 888 break; 889 890 case SO_SNDTIMEO: 891 *mtod(m, int *) = so->so_snd.sb_timeo; 892 break; 893 894 case SO_RCVTIMEO: 895 *mtod(m, int *) = so->so_rcv.sb_timeo; 896 break; 897 898 default: 899 (void)m_free(m); 900 return (ENOPROTOOPT); 901 } 902 *mp = m; 903 return (0); 904 } 905 } 906 907 sohasoutofband(so) 908 register struct socket *so; 909 { 910 struct proc *p; 911 912 if (so->so_pgid < 0) 913 gsignal(-so->so_pgid, SIGURG); 914 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) 915 psignal(p, SIGURG); 916 if (so->so_rcv.sb_sel) { 917 selwakeup(so->so_rcv.sb_sel, so->so_rcv.sb_flags & SB_COLL); 918 so->so_rcv.sb_sel = 0; 919 so->so_rcv.sb_flags &= ~SB_COLL; 920 } 921 } 922