1 /* 2 * Copyright (c) 1982, 1986, 1988, 1990 Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)uipc_socket.c 7.29 (Berkeley) 08/30/91 8 */ 9 10 #include "param.h" 11 #include "proc.h" 12 #include "file.h" 13 #include "malloc.h" 14 #include "mbuf.h" 15 #include "domain.h" 16 #include "kernel.h" 17 #include "protosw.h" 18 #include "socket.h" 19 #include "socketvar.h" 20 #include "resourcevar.h" 21 22 /* 23 * Socket operation routines. 24 * These routines are called by the routines in 25 * sys_socket.c or from a system process, and 26 * implement the semantics of socket operations by 27 * switching out to the protocol specific routines. 28 */ 29 /*ARGSUSED*/ 30 socreate(dom, aso, type, proto) 31 struct socket **aso; 32 register int type; 33 int proto; 34 { 35 struct proc *p = curproc; /* XXX */ 36 register struct protosw *prp; 37 register struct socket *so; 38 register int error; 39 40 if (proto) 41 prp = pffindproto(dom, proto, type); 42 else 43 prp = pffindtype(dom, type); 44 if (prp == 0) 45 return (EPROTONOSUPPORT); 46 if (prp->pr_type != type) 47 return (EPROTOTYPE); 48 MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_WAIT); 49 bzero((caddr_t)so, sizeof(*so)); 50 so->so_type = type; 51 if (p->p_ucred->cr_uid == 0) 52 so->so_state = SS_PRIV; 53 so->so_proto = prp; 54 error = 55 (*prp->pr_usrreq)(so, PRU_ATTACH, 56 (struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0); 57 if (error) { 58 so->so_state |= SS_NOFDREF; 59 sofree(so); 60 return (error); 61 } 62 *aso = so; 63 return (0); 64 } 65 66 sobind(so, nam) 67 struct socket *so; 68 struct mbuf *nam; 69 { 70 int s = splnet(); 71 int error; 72 73 error = 74 (*so->so_proto->pr_usrreq)(so, PRU_BIND, 75 (struct mbuf *)0, nam, (struct mbuf *)0); 76 splx(s); 77 return (error); 78 } 79 80 solisten(so, backlog) 81 register struct socket *so; 82 int backlog; 83 { 84 int s = splnet(), error; 85 86 error = 87 (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, 88 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 89 if (error) { 90 splx(s); 91 return (error); 92 } 93 if (so->so_q == 0) 94 so->so_options |= SO_ACCEPTCONN; 95 if (backlog < 0) 96 backlog = 0; 97 so->so_qlimit = min(backlog, SOMAXCONN); 98 splx(s); 99 return (0); 100 } 101 102 sofree(so) 103 register struct socket *so; 104 { 105 106 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) 107 return; 108 if (so->so_head) { 109 if (!soqremque(so, 0) && !soqremque(so, 1)) 110 panic("sofree dq"); 111 so->so_head = 0; 112 } 113 sbrelease(&so->so_snd); 114 sorflush(so); 115 FREE(so, M_SOCKET); 116 } 117 118 /* 119 * Close a socket on last file table reference removal. 120 * Initiate disconnect if connected. 121 * Free socket when disconnect complete. 122 */ 123 soclose(so) 124 register struct socket *so; 125 { 126 int s = splnet(); /* conservative */ 127 int error = 0; 128 129 if (so->so_options & SO_ACCEPTCONN) { 130 while (so->so_q0) 131 (void) soabort(so->so_q0); 132 while (so->so_q) 133 (void) soabort(so->so_q); 134 } 135 if (so->so_pcb == 0) 136 goto discard; 137 if (so->so_state & SS_ISCONNECTED) { 138 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 139 error = sodisconnect(so); 140 if (error) 141 goto drop; 142 } 143 if (so->so_options & SO_LINGER) { 144 if ((so->so_state & SS_ISDISCONNECTING) && 145 (so->so_state & SS_NBIO)) 146 goto drop; 147 while (so->so_state & SS_ISCONNECTED) 148 if (error = tsleep((caddr_t)&so->so_timeo, 149 PSOCK | PCATCH, netcls, so->so_linger)) 150 break; 151 } 152 } 153 drop: 154 if (so->so_pcb) { 155 int error2 = 156 (*so->so_proto->pr_usrreq)(so, PRU_DETACH, 157 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 158 if (error == 0) 159 error = error2; 160 } 161 discard: 162 if (so->so_state & SS_NOFDREF) 163 panic("soclose: NOFDREF"); 164 so->so_state |= SS_NOFDREF; 165 sofree(so); 166 splx(s); 167 return (error); 168 } 169 170 /* 171 * Must be called at splnet... 172 */ 173 soabort(so) 174 struct socket *so; 175 { 176 177 return ( 178 (*so->so_proto->pr_usrreq)(so, PRU_ABORT, 179 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 180 } 181 182 soaccept(so, nam) 183 register struct socket *so; 184 struct mbuf *nam; 185 { 186 int s = splnet(); 187 int error; 188 189 if ((so->so_state & SS_NOFDREF) == 0) 190 panic("soaccept: !NOFDREF"); 191 so->so_state &= ~SS_NOFDREF; 192 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, 193 (struct mbuf *)0, nam, (struct mbuf *)0); 194 splx(s); 195 return (error); 196 } 197 198 soconnect(so, nam) 199 register struct socket *so; 200 struct mbuf *nam; 201 { 202 int s; 203 int error; 204 205 if (so->so_options & SO_ACCEPTCONN) 206 return (EOPNOTSUPP); 207 s = splnet(); 208 /* 209 * If protocol is connection-based, can only connect once. 210 * Otherwise, if connected, try to disconnect first. 211 * This allows user to disconnect by connecting to, e.g., 212 * a null address. 213 */ 214 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 215 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 216 (error = sodisconnect(so)))) 217 error = EISCONN; 218 else 219 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, 220 (struct mbuf *)0, nam, (struct mbuf *)0); 221 splx(s); 222 return (error); 223 } 224 225 soconnect2(so1, so2) 226 register struct socket *so1; 227 struct socket *so2; 228 { 229 int s = splnet(); 230 int error; 231 232 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, 233 (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0); 234 splx(s); 235 return (error); 236 } 237 238 sodisconnect(so) 239 register struct socket *so; 240 { 241 int s = splnet(); 242 int error; 243 244 if ((so->so_state & SS_ISCONNECTED) == 0) { 245 error = ENOTCONN; 246 goto bad; 247 } 248 if (so->so_state & SS_ISDISCONNECTING) { 249 error = EALREADY; 250 goto bad; 251 } 252 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, 253 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 254 bad: 255 splx(s); 256 return (error); 257 } 258 259 /* 260 * Send on a socket. 261 * If send must go all at once and message is larger than 262 * send buffering, then hard error. 263 * Lock against other senders. 264 * If must go all at once and not enough room now, then 265 * inform user that this would block and do nothing. 266 * Otherwise, if nonblocking, send as much as possible. 267 * The data to be sent is described by "uio" if nonzero, 268 * otherwise by the mbuf chain "top" (which must be null 269 * if uio is not). Data provided in mbuf chain must be small 270 * enough to send all at once. 271 * 272 * Returns nonzero on error, timeout or signal; callers 273 * must check for short counts if EINTR/ERESTART are returned. 274 * Data and control buffers are freed on return. 275 */ 276 sosend(so, addr, uio, top, control, flags) 277 register struct socket *so; 278 struct mbuf *addr; 279 struct uio *uio; 280 struct mbuf *top; 281 struct mbuf *control; 282 int flags; 283 { 284 struct proc *p = curproc; /* XXX */ 285 struct mbuf **mp; 286 register struct mbuf *m; 287 register long space, len, resid; 288 int clen = 0, error, s, dontroute, mlen; 289 int atomic = sosendallatonce(so) || top; 290 291 if (uio) 292 resid = uio->uio_resid; 293 else 294 resid = top->m_pkthdr.len; 295 dontroute = 296 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 297 (so->so_proto->pr_flags & PR_ATOMIC); 298 p->p_stats->p_ru.ru_msgsnd++; 299 if (control) 300 clen = control->m_len; 301 #define snderr(errno) { error = errno; splx(s); goto release; } 302 303 restart: 304 if (error = sblock(&so->so_snd)) 305 goto out; 306 do { 307 s = splnet(); 308 if (so->so_state & SS_CANTSENDMORE) 309 snderr(EPIPE); 310 if (so->so_error) 311 snderr(so->so_error); 312 if ((so->so_state & SS_ISCONNECTED) == 0) { 313 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 314 if ((so->so_state & SS_ISCONFIRMING) == 0 && 315 !(resid == 0 && clen != 0)) 316 snderr(ENOTCONN); 317 } else if (addr == 0) 318 snderr(EDESTADDRREQ); 319 } 320 space = sbspace(&so->so_snd); 321 if (flags & MSG_OOB) 322 space += 1024; 323 if (space < resid + clen && 324 (atomic || space < so->so_snd.sb_lowat || space < clen)) { 325 if (atomic && resid > so->so_snd.sb_hiwat || 326 clen > so->so_snd.sb_hiwat) 327 snderr(EMSGSIZE); 328 if (so->so_state & SS_NBIO) 329 snderr(EWOULDBLOCK); 330 sbunlock(&so->so_snd); 331 error = sbwait(&so->so_snd); 332 splx(s); 333 if (error) 334 goto out; 335 goto restart; 336 } 337 splx(s); 338 mp = ⊤ 339 space -= clen; 340 do { 341 if (uio == NULL) { 342 /* 343 * Data is prepackaged in "top". 344 */ 345 resid = 0; 346 if (flags & MSG_EOR) 347 top->m_flags |= M_EOR; 348 } else do { 349 if (top == 0) { 350 MGETHDR(m, M_WAIT, MT_DATA); 351 mlen = MHLEN; 352 m->m_pkthdr.len = 0; 353 m->m_pkthdr.rcvif = (struct ifnet *)0; 354 } else { 355 MGET(m, M_WAIT, MT_DATA); 356 mlen = MLEN; 357 } 358 if (resid >= MINCLSIZE && space >= MCLBYTES) { 359 MCLGET(m, M_WAIT); 360 if ((m->m_flags & M_EXT) == 0) 361 goto nopages; 362 mlen = MCLBYTES; 363 #ifdef MAPPED_MBUFS 364 len = min(MCLBYTES, resid); 365 #else 366 if (top == 0) { 367 len = min(MCLBYTES - max_hdr, resid); 368 m->m_data += max_hdr; 369 } else 370 len = min(MCLBYTES, resid); 371 #endif 372 space -= MCLBYTES; 373 } else { 374 nopages: 375 len = min(min(mlen, resid), space); 376 space -= len; 377 /* 378 * For datagram protocols, leave room 379 * for protocol headers in first mbuf. 380 */ 381 if (atomic && top == 0 && len < mlen) 382 MH_ALIGN(m, len); 383 } 384 error = uiomove(mtod(m, caddr_t), (int)len, uio); 385 resid = uio->uio_resid; 386 m->m_len = len; 387 *mp = m; 388 top->m_pkthdr.len += len; 389 if (error) 390 goto release; 391 mp = &m->m_next; 392 if (resid <= 0) { 393 if (flags & MSG_EOR) 394 top->m_flags |= M_EOR; 395 break; 396 } 397 } while (space > 0 && atomic); 398 if (dontroute) 399 so->so_options |= SO_DONTROUTE; 400 s = splnet(); /* XXX */ 401 error = (*so->so_proto->pr_usrreq)(so, 402 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 403 top, addr, control); 404 splx(s); 405 if (dontroute) 406 so->so_options &= ~SO_DONTROUTE; 407 clen = 0; 408 control = 0; 409 top = 0; 410 mp = ⊤ 411 if (error) 412 goto release; 413 } while (resid && space > 0); 414 } while (resid); 415 416 release: 417 sbunlock(&so->so_snd); 418 out: 419 if (top) 420 m_freem(top); 421 if (control) 422 m_freem(control); 423 return (error); 424 } 425 426 /* 427 * Implement receive operations on a socket. 428 * We depend on the way that records are added to the sockbuf 429 * by sbappend*. In particular, each record (mbufs linked through m_next) 430 * must begin with an address if the protocol so specifies, 431 * followed by an optional mbuf or mbufs containing ancillary data, 432 * and then zero or more mbufs of data. 433 * In order to avoid blocking network interrupts for the entire time here, 434 * we splx() while doing the actual copy to user space. 435 * Although the sockbuf is locked, new data may still be appended, 436 * and thus we must maintain consistency of the sockbuf during that time. 437 * 438 * The caller may receive the data as a single mbuf chain by supplying 439 * an mbuf **mp0 for use in returning the chain. The uio is then used 440 * only for the count in uio_resid. 441 */ 442 soreceive(so, paddr, uio, mp0, controlp, flagsp) 443 register struct socket *so; 444 struct mbuf **paddr; 445 struct uio *uio; 446 struct mbuf **mp0; 447 struct mbuf **controlp; 448 int *flagsp; 449 { 450 struct proc *p = curproc; /* XXX */ 451 register struct mbuf *m, **mp; 452 register int flags, len, error, s, offset; 453 struct protosw *pr = so->so_proto; 454 struct mbuf *nextrecord; 455 int moff, type; 456 457 mp = mp0; 458 if (paddr) 459 *paddr = 0; 460 if (controlp) 461 *controlp = 0; 462 if (flagsp) 463 flags = *flagsp &~ MSG_EOR; 464 else 465 flags = 0; 466 if (flags & MSG_OOB) { 467 m = m_get(M_WAIT, MT_DATA); 468 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, 469 m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0); 470 if (error) 471 goto bad; 472 do { 473 error = uiomove(mtod(m, caddr_t), 474 (int) min(uio->uio_resid, m->m_len), uio); 475 m = m_free(m); 476 } while (uio->uio_resid && error == 0 && m); 477 bad: 478 if (m) 479 m_freem(m); 480 return (error); 481 } 482 if (mp) 483 *mp = (struct mbuf *)0; 484 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) 485 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 486 (struct mbuf *)0, (struct mbuf *)0); 487 488 restart: 489 if (error = sblock(&so->so_rcv)) 490 return (error); 491 s = splnet(); 492 493 m = so->so_rcv.sb_mb; 494 /* 495 * If we have less data than requested, block awaiting more 496 * (subject to any timeout) if: 497 * 1. the current count is less than the low water mark, or 498 * 2. MSG_WAITALL is set, and it is possible to do the entire 499 * receive operation at once if we block (resid <= hiwat). 500 * If MSG_WAITALL is set but resid is larger than the receive buffer, 501 * we have to do the receive in sections, and thus risk returning 502 * a short count if a timeout or signal occurs after we start. 503 */ 504 while (m == 0 || so->so_rcv.sb_cc < uio->uio_resid && 505 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 506 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 507 m->m_nextpkt == 0) { 508 #ifdef DIAGNOSTIC 509 if (m == 0 && so->so_rcv.sb_cc) 510 panic("receive 1"); 511 #endif 512 if (so->so_error) { 513 if (m) 514 break; 515 error = so->so_error; 516 if ((flags & MSG_PEEK) == 0) 517 so->so_error = 0; 518 goto release; 519 } 520 if (so->so_state & SS_CANTRCVMORE) { 521 if (m) 522 break; 523 else 524 goto release; 525 } 526 for (; m; m = m->m_next) 527 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 528 m = so->so_rcv.sb_mb; 529 goto dontblock; 530 } 531 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 532 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 533 error = ENOTCONN; 534 goto release; 535 } 536 if (uio->uio_resid == 0) 537 goto release; 538 if (so->so_state & SS_NBIO) { 539 error = EWOULDBLOCK; 540 goto release; 541 } 542 sbunlock(&so->so_rcv); 543 error = sbwait(&so->so_rcv); 544 splx(s); 545 if (error) 546 return (error); 547 goto restart; 548 } 549 dontblock: 550 p->p_stats->p_ru.ru_msgrcv++; 551 nextrecord = m->m_nextpkt; 552 if (pr->pr_flags & PR_ADDR) { 553 #ifdef DIAGNOSTIC 554 if (m->m_type != MT_SONAME) 555 panic("receive 1a"); 556 #endif 557 if (flags & MSG_PEEK) { 558 if (paddr) 559 *paddr = m_copy(m, 0, m->m_len); 560 m = m->m_next; 561 } else { 562 sbfree(&so->so_rcv, m); 563 if (paddr) { 564 *paddr = m; 565 so->so_rcv.sb_mb = m->m_next; 566 m->m_next = 0; 567 m = so->so_rcv.sb_mb; 568 } else { 569 MFREE(m, so->so_rcv.sb_mb); 570 m = so->so_rcv.sb_mb; 571 } 572 } 573 } 574 while (m && m->m_type == MT_CONTROL && error == 0) { 575 if (flags & MSG_PEEK) { 576 if (controlp) 577 *controlp = m_copy(m, 0, m->m_len); 578 m = m->m_next; 579 } else { 580 sbfree(&so->so_rcv, m); 581 if (controlp) { 582 if (pr->pr_domain->dom_externalize && 583 mtod(m, struct cmsghdr *)->cmsg_type == 584 SCM_RIGHTS) 585 error = (*pr->pr_domain->dom_externalize)(m); 586 *controlp = m; 587 so->so_rcv.sb_mb = m->m_next; 588 m->m_next = 0; 589 m = so->so_rcv.sb_mb; 590 } else { 591 MFREE(m, so->so_rcv.sb_mb); 592 m = so->so_rcv.sb_mb; 593 } 594 } 595 if (controlp) 596 controlp = &(*controlp)->m_next; 597 } 598 if (m) { 599 if ((flags & MSG_PEEK) == 0) 600 m->m_nextpkt = nextrecord; 601 type = m->m_type; 602 if (type == MT_OOBDATA) 603 flags |= MSG_OOB; 604 } 605 moff = 0; 606 offset = 0; 607 while (m && uio->uio_resid > 0 && error == 0) { 608 if (m->m_type == MT_OOBDATA) { 609 if (type != MT_OOBDATA) 610 break; 611 } else if (type == MT_OOBDATA) 612 break; 613 #ifdef DIAGNOSTIC 614 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) 615 panic("receive 3"); 616 #endif 617 so->so_state &= ~SS_RCVATMARK; 618 len = uio->uio_resid; 619 if (so->so_oobmark && len > so->so_oobmark - offset) 620 len = so->so_oobmark - offset; 621 if (len > m->m_len - moff) 622 len = m->m_len - moff; 623 /* 624 * If mp is set, just pass back the mbufs. 625 * Otherwise copy them out via the uio, then free. 626 * Sockbuf must be consistent here (points to current mbuf, 627 * it points to next record) when we drop priority; 628 * we must note any additions to the sockbuf when we 629 * block interrupts again. 630 */ 631 if (mp == 0) { 632 splx(s); 633 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); 634 s = splnet(); 635 } else 636 uio->uio_resid -= len; 637 if (len == m->m_len - moff) { 638 if (m->m_flags & M_EOR) 639 flags |= MSG_EOR; 640 if (flags & MSG_PEEK) { 641 m = m->m_next; 642 moff = 0; 643 } else { 644 nextrecord = m->m_nextpkt; 645 sbfree(&so->so_rcv, m); 646 if (mp) { 647 *mp = m; 648 mp = &m->m_next; 649 so->so_rcv.sb_mb = m = m->m_next; 650 *mp = (struct mbuf *)0; 651 } else { 652 MFREE(m, so->so_rcv.sb_mb); 653 m = so->so_rcv.sb_mb; 654 } 655 if (m) 656 m->m_nextpkt = nextrecord; 657 } 658 } else { 659 if (flags & MSG_PEEK) 660 moff += len; 661 else { 662 if (mp) 663 *mp = m_copym(m, 0, len, M_WAIT); 664 m->m_data += len; 665 m->m_len -= len; 666 so->so_rcv.sb_cc -= len; 667 } 668 } 669 if (so->so_oobmark) { 670 if ((flags & MSG_PEEK) == 0) { 671 so->so_oobmark -= len; 672 if (so->so_oobmark == 0) { 673 so->so_state |= SS_RCVATMARK; 674 break; 675 } 676 } else 677 offset += len; 678 } 679 if (flags & MSG_EOR) 680 break; 681 /* 682 * If the MSG_WAITALL flag is set (for non-atomic socket), 683 * we must not quit until "uio->uio_resid == 0" or an error 684 * termination. If a signal/timeout occurs, return 685 * with a short count but without error. 686 * Keep sockbuf locked against other readers. 687 */ 688 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 && 689 !sosendallatonce(so)) { 690 if (so->so_error || so->so_state & SS_CANTRCVMORE) 691 break; 692 error = sbwait(&so->so_rcv); 693 if (error) { 694 sbunlock(&so->so_rcv); 695 splx(s); 696 return (0); 697 } 698 if (m = so->so_rcv.sb_mb) 699 nextrecord = m->m_nextpkt; 700 } 701 } 702 if ((flags & MSG_PEEK) == 0) { 703 if (m == 0) 704 so->so_rcv.sb_mb = nextrecord; 705 else if (pr->pr_flags & PR_ATOMIC) { 706 flags |= MSG_TRUNC; 707 (void) sbdroprecord(&so->so_rcv); 708 } 709 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 710 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 711 (struct mbuf *)flags, (struct mbuf *)0, 712 (struct mbuf *)0); 713 } 714 if (flagsp) 715 *flagsp |= flags; 716 release: 717 sbunlock(&so->so_rcv); 718 splx(s); 719 return (error); 720 } 721 722 soshutdown(so, how) 723 register struct socket *so; 724 register int how; 725 { 726 register struct protosw *pr = so->so_proto; 727 728 how++; 729 if (how & FREAD) 730 sorflush(so); 731 if (how & FWRITE) 732 return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN, 733 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 734 return (0); 735 } 736 737 sorflush(so) 738 register struct socket *so; 739 { 740 register struct sockbuf *sb = &so->so_rcv; 741 register struct protosw *pr = so->so_proto; 742 register int s; 743 struct sockbuf asb; 744 745 sb->sb_flags |= SB_NOINTR; 746 (void) sblock(sb); 747 s = splimp(); 748 socantrcvmore(so); 749 sbunlock(sb); 750 asb = *sb; 751 bzero((caddr_t)sb, sizeof (*sb)); 752 splx(s); 753 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 754 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 755 sbrelease(&asb); 756 } 757 758 sosetopt(so, level, optname, m0) 759 register struct socket *so; 760 int level, optname; 761 struct mbuf *m0; 762 { 763 int error = 0; 764 register struct mbuf *m = m0; 765 766 if (level != SOL_SOCKET) { 767 if (so->so_proto && so->so_proto->pr_ctloutput) 768 return ((*so->so_proto->pr_ctloutput) 769 (PRCO_SETOPT, so, level, optname, &m0)); 770 error = ENOPROTOOPT; 771 } else { 772 switch (optname) { 773 774 case SO_LINGER: 775 if (m == NULL || m->m_len != sizeof (struct linger)) { 776 error = EINVAL; 777 goto bad; 778 } 779 so->so_linger = mtod(m, struct linger *)->l_linger; 780 /* fall thru... */ 781 782 case SO_DEBUG: 783 case SO_KEEPALIVE: 784 case SO_DONTROUTE: 785 case SO_USELOOPBACK: 786 case SO_BROADCAST: 787 case SO_REUSEADDR: 788 case SO_OOBINLINE: 789 if (m == NULL || m->m_len < sizeof (int)) { 790 error = EINVAL; 791 goto bad; 792 } 793 if (*mtod(m, int *)) 794 so->so_options |= optname; 795 else 796 so->so_options &= ~optname; 797 break; 798 799 case SO_SNDBUF: 800 case SO_RCVBUF: 801 case SO_SNDLOWAT: 802 case SO_RCVLOWAT: 803 if (m == NULL || m->m_len < sizeof (int)) { 804 error = EINVAL; 805 goto bad; 806 } 807 switch (optname) { 808 809 case SO_SNDBUF: 810 case SO_RCVBUF: 811 if (sbreserve(optname == SO_SNDBUF ? 812 &so->so_snd : &so->so_rcv, 813 (u_long) *mtod(m, int *)) == 0) { 814 error = ENOBUFS; 815 goto bad; 816 } 817 break; 818 819 case SO_SNDLOWAT: 820 so->so_snd.sb_lowat = *mtod(m, int *); 821 break; 822 case SO_RCVLOWAT: 823 so->so_rcv.sb_lowat = *mtod(m, int *); 824 break; 825 } 826 break; 827 828 case SO_SNDTIMEO: 829 case SO_RCVTIMEO: 830 { 831 struct timeval *tv; 832 short val; 833 834 if (m == NULL || m->m_len < sizeof (*tv)) { 835 error = EINVAL; 836 goto bad; 837 } 838 tv = mtod(m, struct timeval *); 839 if (tv->tv_sec > SHRT_MAX / hz - hz) { 840 error = EDOM; 841 goto bad; 842 } 843 val = tv->tv_sec * hz + tv->tv_usec / tick; 844 845 switch (optname) { 846 847 case SO_SNDTIMEO: 848 so->so_snd.sb_timeo = val; 849 break; 850 case SO_RCVTIMEO: 851 so->so_rcv.sb_timeo = val; 852 break; 853 } 854 break; 855 } 856 857 default: 858 error = ENOPROTOOPT; 859 break; 860 } 861 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) 862 (void) ((*so->so_proto->pr_ctloutput) 863 (PRCO_SETOPT, so, level, optname, &m0)); 864 } 865 bad: 866 if (m) 867 (void) m_free(m); 868 return (error); 869 } 870 871 sogetopt(so, level, optname, mp) 872 register struct socket *so; 873 int level, optname; 874 struct mbuf **mp; 875 { 876 register struct mbuf *m; 877 878 if (level != SOL_SOCKET) { 879 if (so->so_proto && so->so_proto->pr_ctloutput) { 880 return ((*so->so_proto->pr_ctloutput) 881 (PRCO_GETOPT, so, level, optname, mp)); 882 } else 883 return (ENOPROTOOPT); 884 } else { 885 m = m_get(M_WAIT, MT_SOOPTS); 886 m->m_len = sizeof (int); 887 888 switch (optname) { 889 890 case SO_LINGER: 891 m->m_len = sizeof (struct linger); 892 mtod(m, struct linger *)->l_onoff = 893 so->so_options & SO_LINGER; 894 mtod(m, struct linger *)->l_linger = so->so_linger; 895 break; 896 897 case SO_USELOOPBACK: 898 case SO_DONTROUTE: 899 case SO_DEBUG: 900 case SO_KEEPALIVE: 901 case SO_REUSEADDR: 902 case SO_BROADCAST: 903 case SO_OOBINLINE: 904 *mtod(m, int *) = so->so_options & optname; 905 break; 906 907 case SO_TYPE: 908 *mtod(m, int *) = so->so_type; 909 break; 910 911 case SO_ERROR: 912 *mtod(m, int *) = so->so_error; 913 so->so_error = 0; 914 break; 915 916 case SO_SNDBUF: 917 *mtod(m, int *) = so->so_snd.sb_hiwat; 918 break; 919 920 case SO_RCVBUF: 921 *mtod(m, int *) = so->so_rcv.sb_hiwat; 922 break; 923 924 case SO_SNDLOWAT: 925 *mtod(m, int *) = so->so_snd.sb_lowat; 926 break; 927 928 case SO_RCVLOWAT: 929 *mtod(m, int *) = so->so_rcv.sb_lowat; 930 break; 931 932 case SO_SNDTIMEO: 933 case SO_RCVTIMEO: 934 { 935 int val = (optname == SO_SNDTIMEO ? 936 so->so_snd.sb_timeo : so->so_rcv.sb_timeo); 937 938 m->m_len = sizeof(struct timeval); 939 mtod(m, struct timeval *)->tv_sec = val / hz; 940 mtod(m, struct timeval *)->tv_usec = 941 (val % hz) / tick; 942 break; 943 } 944 945 default: 946 (void)m_free(m); 947 return (ENOPROTOOPT); 948 } 949 *mp = m; 950 return (0); 951 } 952 } 953 954 sohasoutofband(so) 955 register struct socket *so; 956 { 957 struct proc *p; 958 959 if (so->so_pgid < 0) 960 gsignal(-so->so_pgid, SIGURG); 961 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) 962 psignal(p, SIGURG); 963 if (so->so_rcv.sb_sel) { 964 selwakeup(so->so_rcv.sb_sel, so->so_rcv.sb_flags & SB_COLL); 965 so->so_rcv.sb_sel = 0; 966 so->so_rcv.sb_flags &= ~SB_COLL; 967 } 968 } 969