1 /* 2 * Copyright (c) 1982, 1986, 1988 Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms are permitted 6 * provided that the above copyright notice and this paragraph are 7 * duplicated in all such forms and that any documentation, 8 * advertising materials, and other materials related to such 9 * distribution and use acknowledge that the software was developed 10 * by the University of California, Berkeley. The name of the 11 * University may not be used to endorse or promote products derived 12 * from this software without specific prior written permission. 13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 14 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 15 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 16 * 17 * @(#)uipc_socket.c 7.17 (Berkeley) 04/03/90 18 */ 19 20 #include "param.h" 21 #include "user.h" 22 #include "proc.h" 23 #include "file.h" 24 #include "malloc.h" 25 #include "mbuf.h" 26 #include "domain.h" 27 #include "protosw.h" 28 #include "socket.h" 29 #include "socketvar.h" 30 31 /* 32 * Socket operation routines. 33 * These routines are called by the routines in 34 * sys_socket.c or from a system process, and 35 * implement the semantics of socket operations by 36 * switching out to the protocol specific routines. 37 * 38 * TODO: 39 * test socketpair 40 * clean up async 41 * out-of-band is a kludge 42 */ 43 /*ARGSUSED*/ 44 socreate(dom, aso, type, proto) 45 struct socket **aso; 46 register int type; 47 int proto; 48 { 49 register struct protosw *prp; 50 register struct socket *so; 51 register int error; 52 53 if (proto) 54 prp = pffindproto(dom, proto, type); 55 else 56 prp = pffindtype(dom, type); 57 if (prp == 0) 58 return (EPROTONOSUPPORT); 59 if (prp->pr_type != type) 60 return (EPROTOTYPE); 61 MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_WAIT); 62 bzero((caddr_t)so, sizeof(*so)); 63 so->so_type = type; 64 if (u.u_uid == 0) 65 so->so_state = SS_PRIV; 66 so->so_proto = prp; 67 error = 68 (*prp->pr_usrreq)(so, PRU_ATTACH, 69 (struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0); 70 if (error) { 71 so->so_state |= SS_NOFDREF; 72 sofree(so); 73 return (error); 74 } 75 *aso = so; 76 return (0); 77 } 78 79 sobind(so, nam) 80 struct socket *so; 81 struct mbuf *nam; 82 { 83 int s = splnet(); 84 int error; 85 86 error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, 87 (struct mbuf *)0, nam, (struct mbuf *)0); 88 splx(s); 89 return (error); 90 } 91 92 solisten(so, backlog) 93 register struct socket *so; 94 int backlog; 95 { 96 int s = splnet(), error; 97 98 error = 99 (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, 100 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 101 if (error) { 102 splx(s); 103 return (error); 104 } 105 if (so->so_q == 0) 106 so->so_options |= SO_ACCEPTCONN; 107 if (backlog < 0) 108 backlog = 0; 109 so->so_qlimit = min(backlog, SOMAXCONN); 110 splx(s); 111 return (0); 112 } 113 114 sofree(so) 115 register struct socket *so; 116 { 117 118 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) 119 return; 120 if (so->so_head) { 121 if (!soqremque(so, 0) && !soqremque(so, 1)) 122 panic("sofree dq"); 123 so->so_head = 0; 124 } 125 sbrelease(&so->so_snd); 126 sorflush(so); 127 FREE(so, M_SOCKET); 128 } 129 130 /* 131 * Close a socket on last file table reference removal. 132 * Initiate disconnect if connected. 133 * Free socket when disconnect complete. 134 */ 135 soclose(so) 136 register struct socket *so; 137 { 138 int s = splnet(); /* conservative */ 139 int error = 0; 140 141 if (so->so_options & SO_ACCEPTCONN) { 142 while (so->so_q0) 143 (void) soabort(so->so_q0); 144 while (so->so_q) 145 (void) soabort(so->so_q); 146 } 147 if (so->so_pcb == 0) 148 goto discard; 149 if (so->so_state & SS_ISCONNECTED) { 150 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 151 error = sodisconnect(so); 152 if (error) 153 goto drop; 154 } 155 if (so->so_options & SO_LINGER) { 156 if ((so->so_state & SS_ISDISCONNECTING) && 157 (so->so_state & SS_NBIO)) 158 goto drop; 159 while (so->so_state & SS_ISCONNECTED) 160 if (error = tsleep((caddr_t)&so->so_timeo, 161 PSOCK | PCATCH, netcls, so->so_linger)) 162 break; 163 } 164 } 165 /* 166 * If there is an error on the socket, disregard any 167 * error from tsleep and return the socket error. 168 */ 169 if (so->so_error) 170 error = so->so_error; 171 drop: 172 if (so->so_pcb) { 173 int error2 = 174 (*so->so_proto->pr_usrreq)(so, PRU_DETACH, 175 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 176 if (error == 0) 177 error = error2; 178 } 179 discard: 180 if (so->so_state & SS_NOFDREF) 181 panic("soclose: NOFDREF"); 182 so->so_state |= SS_NOFDREF; 183 sofree(so); 184 splx(s); 185 return (error); 186 } 187 188 /* 189 * Must be called at splnet... 190 */ 191 soabort(so) 192 struct socket *so; 193 { 194 195 return ( 196 (*so->so_proto->pr_usrreq)(so, PRU_ABORT, 197 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 198 } 199 200 soaccept(so, nam) 201 register struct socket *so; 202 struct mbuf *nam; 203 { 204 int s = splnet(); 205 int error; 206 207 if ((so->so_state & SS_NOFDREF) == 0) 208 panic("soaccept: !NOFDREF"); 209 so->so_state &= ~SS_NOFDREF; 210 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, 211 (struct mbuf *)0, nam, (struct mbuf *)0); 212 splx(s); 213 return (error); 214 } 215 216 soconnect(so, nam) 217 register struct socket *so; 218 struct mbuf *nam; 219 { 220 int s; 221 int error; 222 223 if (so->so_options & SO_ACCEPTCONN) 224 return (EOPNOTSUPP); 225 s = splnet(); 226 /* 227 * If protocol is connection-based, can only connect once. 228 * Otherwise, if connected, try to disconnect first. 229 * This allows user to disconnect by connecting to, e.g., 230 * a null address. 231 */ 232 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 233 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 234 (error = sodisconnect(so)))) 235 error = EISCONN; 236 else 237 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, 238 (struct mbuf *)0, nam, (struct mbuf *)0); 239 splx(s); 240 return (error); 241 } 242 243 soconnect2(so1, so2) 244 register struct socket *so1; 245 struct socket *so2; 246 { 247 int s = splnet(); 248 int error; 249 250 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, 251 (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0); 252 splx(s); 253 return (error); 254 } 255 256 sodisconnect(so) 257 register struct socket *so; 258 { 259 int s = splnet(); 260 int error; 261 262 if ((so->so_state & SS_ISCONNECTED) == 0) { 263 error = ENOTCONN; 264 goto bad; 265 } 266 if (so->so_state & SS_ISDISCONNECTING) { 267 error = EALREADY; 268 goto bad; 269 } 270 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, 271 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); 272 bad: 273 splx(s); 274 return (error); 275 } 276 277 /* 278 * Send on a socket. 279 * If send must go all at once and message is larger than 280 * send buffering, then hard error. 281 * Lock against other senders. 282 * If must go all at once and not enough room now, then 283 * inform user that this would block and do nothing. 284 * Otherwise, if nonblocking, send as much as possible. 285 */ 286 sosend(so, nam, uio, flags, control) 287 register struct socket *so; 288 struct mbuf *nam; 289 register struct uio *uio; 290 int flags; 291 struct mbuf *control; 292 { 293 struct mbuf *top = 0, **mp; 294 register struct mbuf *m; 295 register int space, len; 296 int rlen = 0, error, s, dontroute, mlen; 297 int atomic = sosendallatonce(so); 298 299 if (atomic && uio->uio_resid > so->so_snd.sb_hiwat) 300 return (EMSGSIZE); 301 dontroute = 302 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 303 (so->so_proto->pr_flags & PR_ATOMIC); 304 u.u_ru.ru_msgsnd++; 305 if (control) 306 rlen = control->m_len; 307 #define snderr(errno) { error = errno; splx(s); goto release; } 308 309 restart: 310 if (error = sblock(&so->so_snd)) 311 return (error); 312 do { 313 s = splnet(); 314 if (so->so_state & SS_CANTSENDMORE) 315 snderr(EPIPE); 316 if (so->so_error) 317 snderr(so->so_error); 318 if ((so->so_state & SS_ISCONNECTED) == 0) { 319 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 320 if ((so->so_state & SS_ISCONFIRMING) == 0) 321 snderr(ENOTCONN); 322 } else if (nam == 0) 323 snderr(EDESTADDRREQ); 324 } 325 if (flags & MSG_OOB) 326 space = 1024; 327 else { 328 space = sbspace(&so->so_snd); 329 if (space <= rlen || 330 (atomic && space < uio->uio_resid + rlen) || 331 (uio->uio_resid >= MCLBYTES && space < MCLBYTES && 332 so->so_snd.sb_cc >= MCLBYTES && 333 (so->so_state & SS_NBIO) == 0)) { 334 if (so->so_state & SS_NBIO) 335 snderr(EWOULDBLOCK); 336 sbunlock(&so->so_snd); 337 if (error = sbwait(&so->so_snd)) 338 snderr(error); 339 splx(s); 340 goto restart; 341 } 342 } 343 splx(s); 344 mp = ⊤ 345 space -= rlen; 346 do { 347 do { 348 if (top == 0) { 349 MGETHDR(m, M_WAIT, MT_DATA); 350 mlen = MHLEN; 351 m->m_pkthdr.len = 0; 352 m->m_pkthdr.rcvif = (struct ifnet *)0; 353 } else { 354 MGET(m, M_WAIT, MT_DATA); 355 mlen = MLEN; 356 } 357 if (uio->uio_resid >= MINCLSIZE && space >= MCLBYTES) { 358 MCLGET(m, M_WAIT); 359 if ((m->m_flags & M_EXT) == 0) 360 goto nopages; 361 mlen = MCLBYTES; 362 #ifdef MAPPED_MBUFS 363 len = min(MCLBYTES, uio->uio_resid); 364 if (len < mlen - max_hdr) 365 m->m_data += max_hdr; 366 #else 367 len = min(MCLBYTES - max_hdr, uio->uio_resid); 368 m->m_data += max_hdr; 369 #endif 370 space -= MCLBYTES; 371 } else { 372 nopages: 373 len = min(min(mlen, uio->uio_resid), space); 374 space -= len; 375 /* 376 * For datagram protocols, leave room 377 * for protocol headers in first mbuf. 378 */ 379 if (atomic && top == 0 && len < mlen) 380 MH_ALIGN(m, len); 381 } 382 error = uiomove(mtod(m, caddr_t), len, uio); 383 m->m_len = len; 384 *mp = m; 385 top->m_pkthdr.len += len; 386 if (error) 387 goto release; 388 mp = &m->m_next; 389 if (uio->uio_resid <= 0) { 390 if ((flags & MSG_EOR) && top) 391 top->m_flags |= M_EOR; 392 break; 393 } 394 } while (space > 0 && atomic); 395 if (dontroute) 396 so->so_options |= SO_DONTROUTE; 397 s = splnet(); /* XXX */ 398 error = (*so->so_proto->pr_usrreq)(so, 399 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 400 top, (caddr_t)nam, control); 401 splx(s); 402 if (dontroute) 403 so->so_options &= ~SO_DONTROUTE; 404 rlen = 0; 405 top = 0; 406 mp = ⊤ 407 if (error) 408 goto release; 409 } while (uio->uio_resid && space > 0); 410 } while (uio->uio_resid); 411 412 release: 413 sbunlock(&so->so_snd); 414 if (top) 415 m_freem(top); 416 return (error); 417 } 418 419 /* 420 * Implement receive operations on a socket. 421 * We depend on the way that records are added to the sockbuf 422 * by sbappend*. In particular, each record (mbufs linked through m_next) 423 * must begin with an address if the protocol so specifies, 424 * followed by an optional mbuf containing access rights if supported 425 * by the protocol, and then zero or more mbufs of data. 426 * In order to avoid blocking network interrupts for the entire time here, 427 * we splx() while doing the actual copy to user space. 428 * Although the sockbuf is locked, new data may still be appended, 429 * and thus we must maintain consistency of the sockbuf during that time. 430 */ 431 soreceive(so, aname, uio, flagsp, rightsp, controlp) 432 register struct socket *so; 433 struct mbuf **aname; 434 register struct uio *uio; 435 int *flagsp; 436 struct mbuf **rightsp, **controlp; 437 { 438 register struct mbuf *m; 439 register int flags, len, error, s, offset; 440 struct protosw *pr = so->so_proto; 441 struct mbuf *nextrecord, *m_with_eor; 442 int moff; 443 444 if (rightsp) 445 *rightsp = 0; 446 if (aname) 447 *aname = 0; 448 if (controlp) 449 *controlp = 0; 450 if (flagsp) 451 flags = *flagsp &~ MSG_EOR; 452 else 453 flags = 0; 454 if (flags & MSG_OOB) { 455 m = m_get(M_WAIT, MT_DATA); 456 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, 457 m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0); 458 if (error) 459 goto bad; 460 do { 461 len = uio->uio_resid; 462 if (len > m->m_len) 463 len = m->m_len; 464 error = uiomove(mtod(m, caddr_t), (int)len, uio); 465 m = m_free(m); 466 } while (uio->uio_resid && error == 0 && m); 467 bad: 468 if (m) 469 m_freem(m); 470 return (error); 471 } 472 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) 473 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 474 (struct mbuf *)0, (struct mbuf *)0); 475 476 restart: 477 if (error = sblock(&so->so_rcv)) 478 return (error); 479 s = splnet(); 480 481 m = so->so_rcv.sb_mb; 482 if (m == 0) { 483 if (so->so_rcv.sb_cc) 484 panic("receive 1"); 485 if (so->so_error) { 486 error = so->so_error; 487 so->so_error = 0; 488 goto release; 489 } 490 if (so->so_state & SS_CANTRCVMORE) 491 goto release; 492 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 493 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 494 error = ENOTCONN; 495 goto release; 496 } 497 if (uio->uio_resid == 0) 498 goto release; 499 if (so->so_state & SS_NBIO) { 500 error = EWOULDBLOCK; 501 goto release; 502 } 503 sbunlock(&so->so_rcv); 504 if (error = sbwait(&so->so_rcv)) 505 goto release; 506 splx(s); 507 goto restart; 508 } 509 u.u_ru.ru_msgrcv++; 510 if (m->m_type == 0) 511 panic("receive 3a"); 512 nextrecord = m->m_nextpkt; 513 if (pr->pr_flags & PR_ADDR) { 514 if (m->m_type != MT_SONAME) 515 panic("receive 1a"); 516 if (flags & MSG_PEEK) { 517 if (aname) 518 *aname = m_copy(m, 0, m->m_len); 519 m = m->m_next; 520 } else { 521 sbfree(&so->so_rcv, m); 522 if (aname) { 523 *aname = m; 524 so->so_rcv.sb_mb = m->m_next; 525 m->m_next = 0; 526 m = so->so_rcv.sb_mb; 527 } else { 528 MFREE(m, so->so_rcv.sb_mb); 529 m = so->so_rcv.sb_mb; 530 } 531 } 532 } 533 if (m && m->m_type == MT_RIGHTS) { 534 if ((pr->pr_flags & PR_RIGHTS) == 0) 535 panic("receive 2"); 536 if (flags & MSG_PEEK) { 537 if (rightsp) 538 *rightsp = m_copy(m, 0, m->m_len); 539 m = m->m_next; 540 } else { 541 sbfree(&so->so_rcv, m); 542 if (rightsp) { 543 *rightsp = m; 544 so->so_rcv.sb_mb = m->m_next; 545 m->m_next = 0; 546 m = so->so_rcv.sb_mb; 547 } else { 548 MFREE(m, so->so_rcv.sb_mb); 549 m = so->so_rcv.sb_mb; 550 } 551 } 552 } 553 if (m && m->m_type == MT_CONTROL) { 554 if (flags & MSG_PEEK) { 555 if (controlp) 556 *controlp = m_copy(m, 0, m->m_len); 557 m = m->m_next; 558 } else { 559 sbfree(&so->so_rcv, m); 560 if (controlp) { 561 *controlp = m; 562 so->so_rcv.sb_mb = m->m_next; 563 m->m_next = 0; 564 m = so->so_rcv.sb_mb; 565 } else { 566 MFREE(m, so->so_rcv.sb_mb); 567 m = so->so_rcv.sb_mb; 568 } 569 } 570 } 571 if (m) 572 m->m_nextpkt = nextrecord; 573 moff = 0; 574 offset = 0; 575 m_with_eor = 0; 576 while (m && uio->uio_resid > 0 && error == 0) { 577 if (m->m_type == MT_OOBDATA) 578 flags |= MSG_OOB; 579 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) 580 panic("receive 3"); 581 if (m->m_flags & M_EOR) 582 m_with_eor = m; 583 len = uio->uio_resid; 584 so->so_state &= ~SS_RCVATMARK; 585 if (so->so_oobmark && len > so->so_oobmark - offset) 586 len = so->so_oobmark - offset; 587 if (len > m->m_len - moff) 588 len = m->m_len - moff; 589 splx(s); 590 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); 591 s = splnet(); 592 if (len == m->m_len - moff) { 593 if (flags & MSG_PEEK) { 594 m = m->m_next; 595 moff = 0; 596 } else { 597 nextrecord = m->m_nextpkt; 598 sbfree(&so->so_rcv, m); 599 MFREE(m, so->so_rcv.sb_mb); 600 m = so->so_rcv.sb_mb; 601 if (m) 602 m->m_nextpkt = nextrecord; 603 } 604 } else { 605 if (flags & MSG_PEEK) 606 moff += len; 607 else { 608 m->m_data += len; 609 m->m_len -= len; 610 so->so_rcv.sb_cc -= len; 611 } 612 } 613 if (so->so_oobmark) { 614 if ((flags & MSG_PEEK) == 0) { 615 so->so_oobmark -= len; 616 if (so->so_oobmark == 0) { 617 so->so_state |= SS_RCVATMARK; 618 break; 619 } 620 } else 621 offset += len; 622 } 623 if (m_with_eor) 624 break; 625 } 626 if (m_with_eor) { 627 if (m != m_with_eor) 628 flags |= MSG_EOR; 629 /* else data not consumed from mbuf */ 630 } 631 if ((flags & MSG_PEEK) == 0) { 632 if (m == 0) 633 so->so_rcv.sb_mb = nextrecord; 634 else if (pr->pr_flags & PR_ATOMIC) { 635 flags |= MSG_TRUNC; 636 (void) sbdroprecord(&so->so_rcv); 637 } 638 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 639 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, 640 (struct mbuf *)flags, (struct mbuf *)0, 641 (struct mbuf *)0); 642 if (error == 0 && rightsp && *rightsp && 643 pr->pr_domain->dom_externalize) 644 error = (*pr->pr_domain->dom_externalize)(*rightsp); 645 } 646 if (flagsp) 647 *flagsp |= flags; 648 release: 649 sbunlock(&so->so_rcv); 650 splx(s); 651 return (error); 652 } 653 654 soshutdown(so, how) 655 register struct socket *so; 656 register int how; 657 { 658 register struct protosw *pr = so->so_proto; 659 660 how++; 661 if (how & FREAD) 662 sorflush(so); 663 if (how & FWRITE) 664 return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN, 665 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); 666 return (0); 667 } 668 669 sorflush(so) 670 register struct socket *so; 671 { 672 register struct sockbuf *sb = &so->so_rcv; 673 register struct protosw *pr = so->so_proto; 674 register int s; 675 struct sockbuf asb; 676 677 sb->sb_flags |= SB_NOINTR; 678 (void) sblock(sb); 679 s = splimp(); 680 socantrcvmore(so); 681 sbunlock(sb); 682 asb = *sb; 683 bzero((caddr_t)sb, sizeof (*sb)); 684 splx(s); 685 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 686 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 687 sbrelease(&asb); 688 } 689 690 sosetopt(so, level, optname, m0) 691 register struct socket *so; 692 int level, optname; 693 struct mbuf *m0; 694 { 695 int error = 0; 696 u_long val; 697 register struct mbuf *m = m0; 698 register struct sockbuf *sb; 699 700 if (level != SOL_SOCKET) { 701 if (so->so_proto && so->so_proto->pr_ctloutput) 702 return ((*so->so_proto->pr_ctloutput) 703 (PRCO_SETOPT, so, level, optname, &m0)); 704 error = ENOPROTOOPT; 705 } else { 706 switch (optname) { 707 708 case SO_LINGER: 709 if (m == NULL || m->m_len != sizeof (struct linger)) { 710 error = EINVAL; 711 goto bad; 712 } 713 so->so_linger = mtod(m, struct linger *)->l_linger; 714 /* fall thru... */ 715 716 case SO_DEBUG: 717 case SO_KEEPALIVE: 718 case SO_DONTROUTE: 719 case SO_USELOOPBACK: 720 case SO_BROADCAST: 721 case SO_REUSEADDR: 722 case SO_OOBINLINE: 723 if (m == NULL || m->m_len < sizeof (int)) { 724 error = EINVAL; 725 goto bad; 726 } 727 if (*mtod(m, int *)) 728 so->so_options |= optname; 729 else 730 so->so_options &= ~optname; 731 break; 732 733 case SO_SNDBUF: 734 case SO_SNDLOWAT: 735 case SO_SNDTIMEO: 736 sb = &so->so_snd; 737 goto bufopts; 738 739 case SO_RCVBUF: 740 case SO_RCVLOWAT: 741 case SO_RCVTIMEO: 742 sb = &so->so_rcv; 743 bufopts: 744 if (m == NULL || m->m_len < sizeof (int)) { 745 error = EINVAL; 746 goto bad; 747 } 748 switch (optname) { 749 750 case SO_SNDBUF: 751 case SO_RCVBUF: 752 if ((val = (u_long) *mtod(m, int *)) == 0) { 753 error = EINVAL; 754 goto bad; 755 } 756 if (sbreserve(sb, val) == 0) { 757 error = ENOBUFS; 758 goto bad; 759 } 760 if (sb->sb_lowat > sb->sb_hiwat) 761 sb->sb_lowat = sb->sb_hiwat; 762 break; 763 764 case SO_SNDLOWAT: 765 case SO_RCVLOWAT: 766 if ((val = (u_long) *mtod(m, int *)) == 0 || 767 val > sb->sb_hiwat) { 768 error = EINVAL; 769 goto bad; 770 } 771 sb->sb_lowat = val; 772 break; 773 774 case SO_SNDTIMEO: 775 case SO_RCVTIMEO: 776 sb->sb_timeo = *mtod(m, int *); 777 break; 778 } 779 break; 780 781 default: 782 error = ENOPROTOOPT; 783 break; 784 } 785 } 786 bad: 787 if (m) 788 (void) m_free(m); 789 return (error); 790 } 791 792 sogetopt(so, level, optname, mp) 793 register struct socket *so; 794 int level, optname; 795 struct mbuf **mp; 796 { 797 register struct mbuf *m; 798 799 if (level != SOL_SOCKET) { 800 if (so->so_proto && so->so_proto->pr_ctloutput) { 801 return ((*so->so_proto->pr_ctloutput) 802 (PRCO_GETOPT, so, level, optname, mp)); 803 } else 804 return (ENOPROTOOPT); 805 } else { 806 m = m_get(M_WAIT, MT_SOOPTS); 807 m->m_len = sizeof (int); 808 809 switch (optname) { 810 811 case SO_LINGER: 812 m->m_len = sizeof (struct linger); 813 mtod(m, struct linger *)->l_onoff = 814 so->so_options & SO_LINGER; 815 mtod(m, struct linger *)->l_linger = so->so_linger; 816 break; 817 818 case SO_USELOOPBACK: 819 case SO_DONTROUTE: 820 case SO_DEBUG: 821 case SO_KEEPALIVE: 822 case SO_REUSEADDR: 823 case SO_BROADCAST: 824 case SO_OOBINLINE: 825 *mtod(m, int *) = so->so_options & optname; 826 break; 827 828 case SO_TYPE: 829 *mtod(m, int *) = so->so_type; 830 break; 831 832 case SO_ERROR: 833 *mtod(m, int *) = so->so_error; 834 so->so_error = 0; 835 break; 836 837 case SO_SNDBUF: 838 *mtod(m, int *) = so->so_snd.sb_hiwat; 839 break; 840 841 case SO_RCVBUF: 842 *mtod(m, int *) = so->so_rcv.sb_hiwat; 843 break; 844 845 case SO_SNDLOWAT: 846 *mtod(m, int *) = so->so_snd.sb_lowat; 847 break; 848 849 case SO_RCVLOWAT: 850 *mtod(m, int *) = so->so_rcv.sb_lowat; 851 break; 852 853 case SO_SNDTIMEO: 854 *mtod(m, int *) = so->so_snd.sb_timeo; 855 break; 856 857 case SO_RCVTIMEO: 858 *mtod(m, int *) = so->so_rcv.sb_timeo; 859 break; 860 861 default: 862 (void)m_free(m); 863 return (ENOPROTOOPT); 864 } 865 *mp = m; 866 return (0); 867 } 868 } 869 870 sohasoutofband(so) 871 register struct socket *so; 872 { 873 struct proc *p; 874 875 if (so->so_pgid < 0) 876 gsignal(-so->so_pgid, SIGURG); 877 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) 878 psignal(p, SIGURG); 879 if (so->so_rcv.sb_sel) { 880 selwakeup(so->so_rcv.sb_sel, so->so_rcv.sb_flags & SB_COLL); 881 so->so_rcv.sb_sel = 0; 882 so->so_rcv.sb_flags &= ~SB_COLL; 883 } 884 } 885