1 /* 2 * Copyright (c) 1982, 1986, 1988, 1990 Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms are permitted 6 * provided that the above copyright notice and this paragraph are 7 * duplicated in all such forms and that any documentation, 8 * advertising materials, and other materials related to such 9 * distribution and use acknowledge that the software was developed 10 * by the University of California, Berkeley. The name of the 11 * University may not be used to endorse or promote products derived 12 * from this software without specific prior written permission. 13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 14 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 15 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 16 * 17 * @(#)uipc_socket.c 7.21 (Berkeley) 06/28/90 18 */ 19 20 #include "param.h" 21 #include "user.h" 22 #include "proc.h" 23 #include "file.h" 24 #include "malloc.h" 25 #include "mbuf.h" 26 #include "domain.h" 27 #include "protosw.h" 28 #include "socket.h" 29 #include "socketvar.h" 30 31 /* 32 * Socket operation routines. 33 * These routines are called by the routines in 34 * sys_socket.c or from a system process, and 35 * implement the semantics of socket operations by 36 * switching out to the protocol specific routines. 37 * 38 * TODO: 39 * test socketpair 40 * clean up async 41 * out-of-band is a kludge 42 */ 43 /*ARGSUSED*/ 44 socreate(dom, aso, type, proto) 45 struct socket **aso; 46 register int type; 47 int proto; 48 { 49 register struct protosw *prp; 50 register struct socket *so; 51 register int error; 52 53 if (proto) 54 prp = pffindproto(dom, proto, type); 55 else 56 prp = pffindtype(dom, type); 57 if (prp == 0) 58 return (EPROTONOSUPPORT); 59 if (prp->pr_type != type) 60 return (EPROTOTYPE); 61 MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_WAIT); 62 bzero((caddr_t)so, sizeof(*so)); 63 so->so_type = type; 64 if (u.u_uid == 0) 65 so->so_state = SS_PRIV; 66 so->so_proto = prp; 67 error = 68 (*prp->pr_usrreq)(so, PRU_ATTACH, 69 (struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0); 70 if (error) { 71 so->so_state |= SS_NOFDREF; 72 sofree(so); 73 return (error); 74 } 75 *aso = so; 76 return (0); 77 } 78 79 sobind(so, nam) 80 struct socket *so; 81 struct mbuf *nam; 82 { 83 int s = splnet(); 84 int error; 85 86 error = 87 (*so->so_proto->pr_usrreq)(so, PRU_BIND, 88 (struct mbuf *)0, nam, (struct mbuf *)0); 89 splx(s); 90 return (error); 91 } 92 93 solisten(so, backlog) 94 register struct socket *so; 95 int backlog; 96 { 97 int s = splnet(), error; 98 99 error = 100 (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, 101 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 102 if (error) { 103 splx(s); 104 return (error); 105 } 106 if (so->so_q == 0) 107 so->so_options |= SO_ACCEPTCONN; 108 if (backlog < 0) 109 backlog = 0; 110 so->so_qlimit = min(backlog, SOMAXCONN); 111 splx(s); 112 return (0); 113 } 114 115 sofree(so) 116 register struct socket *so; 117 { 118 119 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) 120 return; 121 if (so->so_head) { 122 if (!soqremque(so, 0) && !soqremque(so, 1)) 123 panic("sofree dq"); 124 so->so_head = 0; 125 } 126 sbrelease(&so->so_snd); 127 sorflush(so); 128 FREE(so, M_SOCKET); 129 } 130 131 /* 132 * Close a socket on last file table reference removal. 133 * Initiate disconnect if connected. 134 * Free socket when disconnect complete. 135 */ 136 soclose(so) 137 register struct socket *so; 138 { 139 int s = splnet(); /* conservative */ 140 int error = 0; 141 142 if (so->so_options & SO_ACCEPTCONN) { 143 while (so->so_q0) 144 (void) soabort(so->so_q0); 145 while (so->so_q) 146 (void) soabort(so->so_q); 147 } 148 if (so->so_pcb == 0) 149 goto discard; 150 if (so->so_state & SS_ISCONNECTED) { 151 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 152 error = sodisconnect(so); 153 if (error) 154 goto drop; 155 } 156 if (so->so_options & SO_LINGER) { 157 if ((so->so_state & SS_ISDISCONNECTING) && 158 (so->so_state & SS_NBIO)) 159 goto drop; 160 while (so->so_state & SS_ISCONNECTED) 161 if (error = tsleep((caddr_t)&so->so_timeo, 162 PSOCK | PCATCH, netcls, so->so_linger)) 163 break; 164 } 165 } 166 drop: 167 if (so->so_pcb) { 168 int error2 = 169 (*so->so_proto->pr_usrreq)(so, PRU_DETACH, 170 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 171 if (error == 0) 172 error = error2; 173 } 174 discard: 175 if (so->so_state & SS_NOFDREF) 176 panic("soclose: NOFDREF"); 177 so->so_state |= SS_NOFDREF; 178 sofree(so); 179 splx(s); 180 return (error); 181 } 182 183 /* 184 * Must be called at splnet... 185 */ 186 soabort(so) 187 struct socket *so; 188 { 189 190 return ( 191 (*so->so_proto->pr_usrreq)(so, PRU_ABORT, 192 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 193 } 194 195 soaccept(so, nam) 196 register struct socket *so; 197 struct mbuf *nam; 198 { 199 int s = splnet(); 200 int error; 201 202 if ((so->so_state & SS_NOFDREF) == 0) 203 panic("soaccept: !NOFDREF"); 204 so->so_state &= ~SS_NOFDREF; 205 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, 206 (struct mbuf *)0, nam, (struct mbuf *)0); 207 splx(s); 208 return (error); 209 } 210 211 soconnect(so, nam) 212 register struct socket *so; 213 struct mbuf *nam; 214 { 215 int s; 216 int error; 217 218 if (so->so_options & SO_ACCEPTCONN) 219 return (EOPNOTSUPP); 220 s = splnet(); 221 /* 222 * If protocol is connection-based, can only connect once. 223 * Otherwise, if connected, try to disconnect first. 224 * This allows user to disconnect by connecting to, e.g., 225 * a null address. 226 */ 227 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 228 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 229 (error = sodisconnect(so)))) 230 error = EISCONN; 231 else 232 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, 233 (struct mbuf *)0, nam, (struct mbuf *)0); 234 splx(s); 235 return (error); 236 } 237 238 soconnect2(so1, so2) 239 register struct socket *so1; 240 struct socket *so2; 241 { 242 int s = splnet(); 243 int error; 244 245 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, 246 (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0); 247 splx(s); 248 return (error); 249 } 250 251 sodisconnect(so) 252 register struct socket *so; 253 { 254 int s = splnet(); 255 int error; 256 257 if ((so->so_state & SS_ISCONNECTED) == 0) { 258 error = ENOTCONN; 259 goto bad; 260 } 261 if (so->so_state & SS_ISDISCONNECTING) { 262 error = EALREADY; 263 goto bad; 264 } 265 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, 266 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 267 bad: 268 splx(s); 269 return (error); 270 } 271 272 /* 273 * Send on a socket. 274 * If send must go all at once and message is larger than 275 * send buffering, then hard error. 276 * Lock against other senders. 277 * If must go all at once and not enough room now, then 278 * inform user that this would block and do nothing. 279 * Otherwise, if nonblocking, send as much as possible. 280 * The data to be sent is described by "uio" if nonzero, 281 * otherwise by the mbuf chain "top" (which must be null 282 * if uio is not). Data provided in mbuf chain must be small 283 * enough to send all at once. 284 * 285 * Returns nonzero on error, timeout or signal; callers 286 * must check for short counts if EINTR/ERESTART are returned. 287 * Data and control buffers are freed on return. 288 */ 289 sosend(so, addr, uio, top, control, flags) 290 register struct socket *so; 291 struct mbuf *addr; 292 struct uio *uio; 293 struct mbuf *top; 294 struct mbuf *control; 295 int flags; 296 { 297 struct mbuf **mp; 298 register struct mbuf *m; 299 register long space, len, resid; 300 int clen = 0, error, s, dontroute, mlen; 301 int atomic = sosendallatonce(so) || top; 302 303 if (uio) 304 resid = uio->uio_resid; 305 else 306 resid = top->m_pkthdr.len; 307 dontroute = 308 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 309 (so->so_proto->pr_flags & PR_ATOMIC); 310 u.u_ru.ru_msgsnd++; 311 if (control) 312 clen = control->m_len; 313 #define snderr(errno) { error = errno; splx(s); goto release; } 314 315 restart: 316 if (error = sblock(&so->so_snd)) 317 goto out; 318 do { 319 s = splnet(); 320 if (so->so_state & SS_CANTSENDMORE) 321 snderr(EPIPE); 322 if (so->so_error) 323 snderr(so->so_error); 324 if ((so->so_state & SS_ISCONNECTED) == 0) { 325 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 326 if ((so->so_state & SS_ISCONFIRMING) == 0) 327 snderr(ENOTCONN); 328 } else if (addr == 0) 329 snderr(EDESTADDRREQ); 330 } 331 space = sbspace(&so->so_snd); 332 if (flags & MSG_OOB) 333 space += 1024; 334 if (space < resid + clen && 335 (atomic || space < so->so_snd.sb_lowat || space < clen)) { 336 if (atomic && resid > so->so_snd.sb_hiwat || 337 clen > so->so_snd.sb_hiwat) 338 snderr(EMSGSIZE); 339 if (so->so_state & SS_NBIO) 340 snderr(EWOULDBLOCK); 341 sbunlock(&so->so_snd); 342 error = sbwait(&so->so_snd); 343 splx(s); 344 if (error) 345 goto out; 346 goto restart; 347 } 348 splx(s); 349 mp = ⊤ 350 space -= clen; 351 do { 352 if (uio == NULL) { 353 /* 354 * Data is prepackaged in "top". 355 */ 356 resid = 0; 357 if (flags & MSG_EOR) 358 top->m_flags |= M_EOR; 359 } else do { 360 if (top == 0) { 361 MGETHDR(m, M_WAIT, MT_DATA); 362 mlen = MHLEN; 363 m->m_pkthdr.len = 0; 364 m->m_pkthdr.rcvif = (struct ifnet *)0; 365 } else { 366 MGET(m, M_WAIT, MT_DATA); 367 mlen = MLEN; 368 } 369 if (resid >= MINCLSIZE && space >= MCLBYTES) { 370 MCLGET(m, M_WAIT); 371 if ((m->m_flags & M_EXT) == 0) 372 goto nopages; 373 mlen = MCLBYTES; 374 #ifdef MAPPED_MBUFS 375 len = min(MCLBYTES, resid); 376 #else 377 if (top == 0) { 378 len = min(MCLBYTES - max_hdr, resid); 379 m->m_data += max_hdr; 380 } 381 #endif 382 space -= MCLBYTES; 383 } else { 384 nopages: 385 len = min(min(mlen, resid), space); 386 space -= len; 387 /* 388 * For datagram protocols, leave room 389 * for protocol headers in first mbuf. 390 */ 391 if (atomic && top == 0 && len < mlen) 392 MH_ALIGN(m, len); 393 } 394 error = uiomove(mtod(m, caddr_t), len, uio); 395 resid = uio->uio_resid; 396 m->m_len = len; 397 *mp = m; 398 top->m_pkthdr.len += len; 399 if (error) 400 goto release; 401 mp = &m->m_next; 402 if (resid <= 0) { 403 if (flags & MSG_EOR) 404 top->m_flags |= M_EOR; 405 break; 406 } 407 } while (space > 0 && atomic); 408 if (dontroute) 409 so->so_options |= SO_DONTROUTE; 410 s = splnet(); /* XXX */ 411 error = (*so->so_proto->pr_usrreq)(so, 412 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 413 top, addr, control); 414 splx(s); 415 if (dontroute) 416 so->so_options &= ~SO_DONTROUTE; 417 clen = 0; 418 control = 0; 419 top = 0; 420 mp = ⊤ 421 if (error) 422 goto release; 423 } while (resid && space > 0); 424 } while (resid); 425 426 release: 427 sbunlock(&so->so_snd); 428 out: 429 if (top) 430 m_freem(top); 431 if (control) 432 m_freem(control); 433 return (error); 434 } 435 436 /* 437 * Implement receive operations on a socket. 438 * We depend on the way that records are added to the sockbuf 439 * by sbappend*. In particular, each record (mbufs linked through m_next) 440 * must begin with an address if the protocol so specifies, 441 * followed by an optional mbuf or mbufs containing ancillary data, 442 * and then zero or more mbufs of data. 443 * In order to avoid blocking network interrupts for the entire time here, 444 * we splx() while doing the actual copy to user space. 445 * Although the sockbuf is locked, new data may still be appended, 446 * and thus we must maintain consistency of the sockbuf during that time. 447 * 448 * The caller may receive the data as a single mbuf chain by supplying 449 * an mbuf **mp0 for use in returning the chain. The uio is then used 450 * only for the count in uio_resid. 451 */ 452 soreceive(so, paddr, uio, mp0, controlp, flagsp) 453 register struct socket *so; 454 struct mbuf **paddr; 455 struct uio *uio; 456 struct mbuf **mp0; 457 struct mbuf **controlp; 458 int *flagsp; 459 { 460 register struct mbuf *m, **mp; 461 register int flags, len, error, s, offset; 462 struct protosw *pr = so->so_proto; 463 struct mbuf *nextrecord; 464 int moff, type; 465 466 mp = mp0; 467 if (paddr) 468 *paddr = 0; 469 if (controlp) 470 *controlp = 0; 471 if (flagsp) 472 flags = *flagsp &~ MSG_EOR; 473 else 474 flags = 0; 475 if (flags & MSG_OOB) { 476 m = m_get(M_WAIT, MT_DATA); 477 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, 478 m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0); 479 if (error) 480 goto bad; 481 do { 482 error = uiomove(mtod(m, caddr_t), 483 (int) min(uio->uio_resid, m->m_len), uio); 484 m = m_free(m); 485 } while (uio->uio_resid && error == 0 && m); 486 bad: 487 if (m) 488 m_freem(m); 489 return (error); 490 } 491 if (mp) 492 *mp = (struct mbuf *)0; 493 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) 494 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 495 (struct mbuf *)0, (struct mbuf *)0); 496 497 restart: 498 if (error = sblock(&so->so_rcv)) 499 return (error); 500 s = splnet(); 501 502 m = so->so_rcv.sb_mb; 503 /* 504 * If we have less data than requested, block awaiting more 505 * (subject to any timeout) if: 506 * 1. the current count is less than the low water mark, or 507 * 2. MSG_WAITALL is set, and it is possible to do the entire 508 * receive operation at once if we block (resid <= hiwat). 509 * If MSG_WAITALL is set but resid is larger than the receive buffer, 510 * we have to do the receive in sections, and thus risk returning 511 * a short count if a timeout or signal occurs after we start. 512 */ 513 if (m == 0 || so->so_rcv.sb_cc < uio->uio_resid && 514 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 515 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat))) { 516 #ifdef DIAGNOSTIC 517 if (m == 0 && so->so_rcv.sb_cc) 518 panic("receive 1"); 519 #endif 520 if (so->so_error) { 521 error = so->so_error; 522 so->so_error = 0; 523 goto release; 524 } 525 if (so->so_state & SS_CANTRCVMORE) 526 goto release; 527 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 528 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 529 error = ENOTCONN; 530 goto release; 531 } 532 if (uio->uio_resid == 0) 533 goto release; 534 if (so->so_state & SS_NBIO) { 535 error = EWOULDBLOCK; 536 goto release; 537 } 538 sbunlock(&so->so_rcv); 539 error = sbwait(&so->so_rcv); 540 splx(s); 541 if (error) 542 return (error); 543 goto restart; 544 } 545 u.u_ru.ru_msgrcv++; 546 nextrecord = m->m_nextpkt; 547 if (pr->pr_flags & PR_ADDR) { 548 #ifdef DIAGNOSTIC 549 if (m->m_type != MT_SONAME) 550 panic("receive 1a"); 551 #endif 552 if (flags & MSG_PEEK) { 553 if (paddr) 554 *paddr = m_copy(m, 0, m->m_len); 555 m = m->m_next; 556 } else { 557 sbfree(&so->so_rcv, m); 558 if (paddr) { 559 *paddr = m; 560 so->so_rcv.sb_mb = m->m_next; 561 m->m_next = 0; 562 m = so->so_rcv.sb_mb; 563 } else { 564 MFREE(m, so->so_rcv.sb_mb); 565 m = so->so_rcv.sb_mb; 566 } 567 } 568 } 569 while (m && m->m_type == MT_CONTROL && error == 0) { 570 if (flags & MSG_PEEK) { 571 if (controlp) 572 *controlp = m_copy(m, 0, m->m_len); 573 m = m->m_next; 574 } else { 575 sbfree(&so->so_rcv, m); 576 if (controlp) { 577 if (pr->pr_domain->dom_externalize && 578 mtod(m, struct cmsghdr *)->cmsg_type == 579 SCM_RIGHTS) 580 error = (*pr->pr_domain->dom_externalize)(m); 581 *controlp = m; 582 so->so_rcv.sb_mb = m->m_next; 583 m->m_next = 0; 584 m = so->so_rcv.sb_mb; 585 } else { 586 MFREE(m, so->so_rcv.sb_mb); 587 m = so->so_rcv.sb_mb; 588 } 589 } 590 if (controlp) 591 controlp = &(*controlp)->m_next; 592 } 593 if (m) { 594 if ((flags & MSG_PEEK) == 0) 595 m->m_nextpkt = nextrecord; 596 type = m->m_type; 597 } 598 moff = 0; 599 offset = 0; 600 while (m && m->m_type == type && uio->uio_resid > 0 && error == 0) { 601 if (m->m_type == MT_OOBDATA) 602 flags |= MSG_OOB; 603 #ifdef DIAGNOSTIC 604 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) 605 panic("receive 3"); 606 #endif 607 type = m->m_type; 608 so->so_state &= ~SS_RCVATMARK; 609 len = uio->uio_resid; 610 if (so->so_oobmark && len > so->so_oobmark - offset) 611 len = so->so_oobmark - offset; 612 if (len > m->m_len - moff) 613 len = m->m_len - moff; 614 /* 615 * If mp is set, just pass back the mbufs. 616 * Otherwise copy them out via the uio, then free. 617 * Sockbuf must be consistent here (points to current mbuf, 618 * it points to next record) when we drop priority; 619 * we must note any additions to the sockbuf when we 620 * block interrupts again. 621 */ 622 if (mp == 0) { 623 splx(s); 624 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); 625 s = splnet(); 626 } else 627 uio->uio_resid -= len; 628 if (len == m->m_len - moff) { 629 if (m->m_flags & M_EOR) 630 flags |= MSG_EOR; 631 if (flags & MSG_PEEK) { 632 m = m->m_next; 633 moff = 0; 634 } else { 635 nextrecord = m->m_nextpkt; 636 sbfree(&so->so_rcv, m); 637 if (mp) { 638 *mp = m; 639 mp = &m->m_next; 640 so->so_rcv.sb_mb = m = m->m_next; 641 *mp = (struct mbuf *)0; 642 } else { 643 MFREE(m, so->so_rcv.sb_mb); 644 m = so->so_rcv.sb_mb; 645 } 646 if (m) 647 m->m_nextpkt = nextrecord; 648 } 649 } else { 650 if (flags & MSG_PEEK) 651 moff += len; 652 else { 653 if (mp) 654 *mp = m_copym(m, 0, len, M_WAIT); 655 m->m_data += len; 656 m->m_len -= len; 657 so->so_rcv.sb_cc -= len; 658 } 659 } 660 if (so->so_oobmark) { 661 if ((flags & MSG_PEEK) == 0) { 662 so->so_oobmark -= len; 663 if (so->so_oobmark == 0) { 664 so->so_state |= SS_RCVATMARK; 665 break; 666 } 667 } else 668 offset += len; 669 } 670 if (flags & MSG_EOR) 671 break; 672 /* 673 * If the MSG_WAITALL flag is set (for non-atomic socket), 674 * we must not quit until "uio->uio_resid == 0" or an error 675 * termination. If a signal/timeout occurs, return 676 * with a short count but without error. 677 * Keep sockbuf locked against other readers. 678 */ 679 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 && 680 !sosendallatonce(so)) { 681 error = sbwait(&so->so_rcv); 682 if (error) { 683 sbunlock(&so->so_rcv); 684 splx(s); 685 return (0); 686 } 687 if (m = so->so_rcv.sb_mb) 688 nextrecord = m->m_nextpkt; 689 if (so->so_error || so->so_state & SS_CANTRCVMORE) 690 break; 691 continue; 692 } 693 } 694 if ((flags & MSG_PEEK) == 0) { 695 if (m == 0) 696 so->so_rcv.sb_mb = nextrecord; 697 else if (pr->pr_flags & PR_ATOMIC) { 698 flags |= MSG_TRUNC; 699 (void) sbdroprecord(&so->so_rcv); 700 } 701 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 702 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 703 (struct mbuf *)flags, (struct mbuf *)0, 704 (struct mbuf *)0); 705 } 706 if (flagsp) 707 *flagsp |= flags; 708 release: 709 sbunlock(&so->so_rcv); 710 splx(s); 711 return (error); 712 } 713 714 soshutdown(so, how) 715 register struct socket *so; 716 register int how; 717 { 718 register struct protosw *pr = so->so_proto; 719 720 how++; 721 if (how & FREAD) 722 sorflush(so); 723 if (how & FWRITE) 724 return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN, 725 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 726 return (0); 727 } 728 729 sorflush(so) 730 register struct socket *so; 731 { 732 register struct sockbuf *sb = &so->so_rcv; 733 register struct protosw *pr = so->so_proto; 734 register int s; 735 struct sockbuf asb; 736 737 sb->sb_flags |= SB_NOINTR; 738 (void) sblock(sb); 739 s = splimp(); 740 socantrcvmore(so); 741 sbunlock(sb); 742 asb = *sb; 743 bzero((caddr_t)sb, sizeof (*sb)); 744 splx(s); 745 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 746 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 747 sbrelease(&asb); 748 } 749 750 sosetopt(so, level, optname, m0) 751 register struct socket *so; 752 int level, optname; 753 struct mbuf *m0; 754 { 755 int error = 0; 756 register struct mbuf *m = m0; 757 758 if (level != SOL_SOCKET) { 759 if (so->so_proto && so->so_proto->pr_ctloutput) 760 return ((*so->so_proto->pr_ctloutput) 761 (PRCO_SETOPT, so, level, optname, &m0)); 762 error = ENOPROTOOPT; 763 } else { 764 switch (optname) { 765 766 case SO_LINGER: 767 if (m == NULL || m->m_len != sizeof (struct linger)) { 768 error = EINVAL; 769 goto bad; 770 } 771 so->so_linger = mtod(m, struct linger *)->l_linger; 772 /* fall thru... */ 773 774 case SO_DEBUG: 775 case SO_KEEPALIVE: 776 case SO_DONTROUTE: 777 case SO_USELOOPBACK: 778 case SO_BROADCAST: 779 case SO_REUSEADDR: 780 case SO_OOBINLINE: 781 if (m == NULL || m->m_len < sizeof (int)) { 782 error = EINVAL; 783 goto bad; 784 } 785 if (*mtod(m, int *)) 786 so->so_options |= optname; 787 else 788 so->so_options &= ~optname; 789 break; 790 791 case SO_SNDBUF: 792 case SO_RCVBUF: 793 case SO_SNDLOWAT: 794 case SO_RCVLOWAT: 795 case SO_SNDTIMEO: 796 case SO_RCVTIMEO: 797 if (m == NULL || m->m_len < sizeof (int)) { 798 error = EINVAL; 799 goto bad; 800 } 801 switch (optname) { 802 803 case SO_SNDBUF: 804 case SO_RCVBUF: 805 if (sbreserve(optname == SO_SNDBUF ? 806 &so->so_snd : &so->so_rcv, 807 (u_long) *mtod(m, int *)) == 0) { 808 error = ENOBUFS; 809 goto bad; 810 } 811 break; 812 813 case SO_SNDLOWAT: 814 so->so_snd.sb_lowat = *mtod(m, int *); 815 break; 816 case SO_RCVLOWAT: 817 so->so_rcv.sb_lowat = *mtod(m, int *); 818 break; 819 case SO_SNDTIMEO: 820 so->so_snd.sb_timeo = *mtod(m, int *); 821 break; 822 case SO_RCVTIMEO: 823 so->so_rcv.sb_timeo = *mtod(m, int *); 824 break; 825 } 826 break; 827 828 default: 829 error = ENOPROTOOPT; 830 break; 831 } 832 } 833 bad: 834 if (m) 835 (void) m_free(m); 836 return (error); 837 } 838 839 sogetopt(so, level, optname, mp) 840 register struct socket *so; 841 int level, optname; 842 struct mbuf **mp; 843 { 844 register struct mbuf *m; 845 846 if (level != SOL_SOCKET) { 847 if (so->so_proto && so->so_proto->pr_ctloutput) { 848 return ((*so->so_proto->pr_ctloutput) 849 (PRCO_GETOPT, so, level, optname, mp)); 850 } else 851 return (ENOPROTOOPT); 852 } else { 853 m = m_get(M_WAIT, MT_SOOPTS); 854 m->m_len = sizeof (int); 855 856 switch (optname) { 857 858 case SO_LINGER: 859 m->m_len = sizeof (struct linger); 860 mtod(m, struct linger *)->l_onoff = 861 so->so_options & SO_LINGER; 862 mtod(m, struct linger *)->l_linger = so->so_linger; 863 break; 864 865 case SO_USELOOPBACK: 866 case SO_DONTROUTE: 867 case SO_DEBUG: 868 case SO_KEEPALIVE: 869 case SO_REUSEADDR: 870 case SO_BROADCAST: 871 case SO_OOBINLINE: 872 *mtod(m, int *) = so->so_options & optname; 873 break; 874 875 case SO_TYPE: 876 *mtod(m, int *) = so->so_type; 877 break; 878 879 case SO_ERROR: 880 *mtod(m, int *) = so->so_error; 881 so->so_error = 0; 882 break; 883 884 case SO_SNDBUF: 885 *mtod(m, int *) = so->so_snd.sb_hiwat; 886 break; 887 888 case SO_RCVBUF: 889 *mtod(m, int *) = so->so_rcv.sb_hiwat; 890 break; 891 892 case SO_SNDLOWAT: 893 *mtod(m, int *) = so->so_snd.sb_lowat; 894 break; 895 896 case SO_RCVLOWAT: 897 *mtod(m, int *) = so->so_rcv.sb_lowat; 898 break; 899 900 case SO_SNDTIMEO: 901 *mtod(m, int *) = so->so_snd.sb_timeo; 902 break; 903 904 case SO_RCVTIMEO: 905 *mtod(m, int *) = so->so_rcv.sb_timeo; 906 break; 907 908 default: 909 (void)m_free(m); 910 return (ENOPROTOOPT); 911 } 912 *mp = m; 913 return (0); 914 } 915 } 916 917 sohasoutofband(so) 918 register struct socket *so; 919 { 920 struct proc *p; 921 922 if (so->so_pgid < 0) 923 gsignal(-so->so_pgid, SIGURG); 924 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) 925 psignal(p, SIGURG); 926 if (so->so_rcv.sb_sel) { 927 selwakeup(so->so_rcv.sb_sel, so->so_rcv.sb_flags & SB_COLL); 928 so->so_rcv.sb_sel = 0; 929 so->so_rcv.sb_flags &= ~SB_COLL; 930 } 931 } 932