1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by the University of 49 * California, Berkeley and its contributors. 50 * 4. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 67 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $ 68 * $DragonFly: src/sys/kern/uipc_socket.c,v 1.55 2008/09/02 16:17:52 dillon Exp $ 69 */ 70 71 #include "opt_inet.h" 72 #include "opt_sctp.h" 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/fcntl.h> 77 #include <sys/malloc.h> 78 #include <sys/mbuf.h> 79 #include <sys/domain.h> 80 #include <sys/file.h> /* for struct knote */ 81 #include <sys/kernel.h> 82 #include <sys/malloc.h> 83 #include <sys/event.h> 84 #include <sys/proc.h> 85 #include <sys/protosw.h> 86 #include <sys/socket.h> 87 #include <sys/socketvar.h> 88 #include <sys/socketops.h> 89 #include <sys/resourcevar.h> 90 #include <sys/signalvar.h> 91 #include <sys/sysctl.h> 92 #include <sys/uio.h> 93 #include <sys/jail.h> 94 #include <vm/vm_zone.h> 95 #include <vm/pmap.h> 96 97 #include <sys/thread2.h> 98 #include <sys/socketvar2.h> 99 100 #include <machine/limits.h> 101 102 #ifdef INET 103 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 104 #endif /* INET */ 105 106 static void filt_sordetach(struct knote *kn); 107 static int filt_soread(struct knote *kn, long hint); 108 static void filt_sowdetach(struct knote *kn); 109 static int filt_sowrite(struct knote *kn, long hint); 110 static int filt_solisten(struct knote *kn, long hint); 111 112 static struct filterops solisten_filtops = 113 { FILTEROP_ISFD, NULL, filt_sordetach, filt_solisten }; 114 static struct filterops soread_filtops = 115 { FILTEROP_ISFD, NULL, filt_sordetach, filt_soread }; 116 static struct filterops sowrite_filtops = 117 { FILTEROP_ISFD, NULL, filt_sowdetach, filt_sowrite }; 118 static struct filterops soexcept_filtops = 119 { FILTEROP_ISFD, NULL, filt_sordetach, filt_soread }; 120 121 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct"); 122 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 123 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 124 125 126 static int somaxconn = SOMAXCONN; 127 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 128 &somaxconn, 0, "Maximum pending socket connection queue size"); 129 130 /* 131 * Socket operation routines. 132 * These routines are called by the routines in 133 * sys_socket.c or from a system process, and 134 * implement the semantics of socket operations by 135 * switching out to the protocol specific routines. 136 */ 137 138 /* 139 * Get a socket structure, and initialize it. 140 * Note that it would probably be better to allocate socket 141 * and PCB at the same time, but I'm not convinced that all 142 * the protocols can be easily modified to do this. 143 */ 144 struct socket * 145 soalloc(int waitok) 146 { 147 struct socket *so; 148 unsigned waitmask; 149 150 waitmask = waitok ? M_WAITOK : M_NOWAIT; 151 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask); 152 if (so) { 153 /* XXX race condition for reentrant kernel */ 154 TAILQ_INIT(&so->so_aiojobq); 155 TAILQ_INIT(&so->so_rcv.ssb_kq.ki_mlist); 156 TAILQ_INIT(&so->so_snd.ssb_kq.ki_mlist); 157 lwkt_token_init(&so->so_rcv.ssb_token, 1, "rcvtok"); 158 lwkt_token_init(&so->so_snd.ssb_token, 1, "rcvtok"); 159 so->so_state = SS_NOFDREF; 160 so->so_refs = 1; 161 } 162 return so; 163 } 164 165 int 166 socreate(int dom, struct socket **aso, int type, 167 int proto, struct thread *td) 168 { 169 struct proc *p = td->td_proc; 170 struct protosw *prp; 171 struct socket *so; 172 struct pru_attach_info ai; 173 int error; 174 175 if (proto) 176 prp = pffindproto(dom, proto, type); 177 else 178 prp = pffindtype(dom, type); 179 180 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0) 181 return (EPROTONOSUPPORT); 182 183 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only && 184 prp->pr_domain->dom_family != PF_LOCAL && 185 prp->pr_domain->dom_family != PF_INET && 186 prp->pr_domain->dom_family != PF_INET6 && 187 prp->pr_domain->dom_family != PF_ROUTE) { 188 return (EPROTONOSUPPORT); 189 } 190 191 if (prp->pr_type != type) 192 return (EPROTOTYPE); 193 so = soalloc(p != 0); 194 if (so == NULL) 195 return (ENOBUFS); 196 197 /* 198 * Callers of socreate() presumably will connect up a descriptor 199 * and call soclose() if they cannot. This represents our so_refs 200 * (which should be 1) from soalloc(). 201 */ 202 soclrstate(so, SS_NOFDREF); 203 204 /* 205 * Set a default port for protocol processing. No action will occur 206 * on the socket on this port until an inpcb is attached to it and 207 * is able to match incoming packets, or until the socket becomes 208 * available to userland. 209 */ 210 so->so_port = cpu0_soport(so, NULL, NULL); 211 212 TAILQ_INIT(&so->so_incomp); 213 TAILQ_INIT(&so->so_comp); 214 so->so_type = type; 215 so->so_cred = crhold(p->p_ucred); 216 so->so_proto = prp; 217 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE]; 218 ai.p_ucred = p->p_ucred; 219 ai.fd_rdir = p->p_fd->fd_rdir; 220 221 /* 222 * Auto-sizing of socket buffers is managed by the protocols and 223 * the appropriate flags must be set in the pru_attach function. 224 */ 225 error = so_pru_attach(so, proto, &ai); 226 if (error) { 227 sosetstate(so, SS_NOFDREF); 228 sofree(so); /* from soalloc */ 229 return error; 230 } 231 232 /* 233 * NOTE: Returns referenced socket. 234 */ 235 *aso = so; 236 return (0); 237 } 238 239 int 240 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 241 { 242 int error; 243 244 error = so_pru_bind(so, nam, td); 245 return (error); 246 } 247 248 static void 249 sodealloc(struct socket *so) 250 { 251 if (so->so_rcv.ssb_hiwat) 252 (void)chgsbsize(so->so_cred->cr_uidinfo, 253 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY); 254 if (so->so_snd.ssb_hiwat) 255 (void)chgsbsize(so->so_cred->cr_uidinfo, 256 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY); 257 #ifdef INET 258 /* remove accept filter if present */ 259 if (so->so_accf != NULL) 260 do_setopt_accept_filter(so, NULL); 261 #endif /* INET */ 262 crfree(so->so_cred); 263 kfree(so, M_SOCKET); 264 } 265 266 int 267 solisten(struct socket *so, int backlog, struct thread *td) 268 { 269 int error; 270 #ifdef SCTP 271 short oldopt, oldqlimit; 272 #endif /* SCTP */ 273 274 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) 275 return (EINVAL); 276 277 #ifdef SCTP 278 oldopt = so->so_options; 279 oldqlimit = so->so_qlimit; 280 #endif /* SCTP */ 281 282 lwkt_gettoken(&so->so_rcv.ssb_token); 283 if (TAILQ_EMPTY(&so->so_comp)) 284 so->so_options |= SO_ACCEPTCONN; 285 lwkt_reltoken(&so->so_rcv.ssb_token); 286 if (backlog < 0 || backlog > somaxconn) 287 backlog = somaxconn; 288 so->so_qlimit = backlog; 289 /* SCTP needs to look at tweak both the inbound backlog parameter AND 290 * the so_options (UDP model both connect's and gets inbound 291 * connections .. implicitly). 292 */ 293 error = so_pru_listen(so, td); 294 if (error) { 295 #ifdef SCTP 296 /* Restore the params */ 297 so->so_options = oldopt; 298 so->so_qlimit = oldqlimit; 299 #endif /* SCTP */ 300 return (error); 301 } 302 return (0); 303 } 304 305 /* 306 * Destroy a disconnected socket. This routine is a NOP if entities 307 * still have a reference on the socket: 308 * 309 * so_pcb - The protocol stack still has a reference 310 * SS_NOFDREF - There is no longer a file pointer reference 311 */ 312 void 313 sofree(struct socket *so) 314 { 315 struct socket *head = so->so_head; 316 317 /* 318 * Arbitrage the last free. 319 */ 320 KKASSERT(so->so_refs > 0); 321 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) 322 return; 323 324 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF)); 325 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0); 326 327 /* 328 * We're done, clean up 329 */ 330 if (head != NULL) { 331 lwkt_gettoken(&head->so_rcv.ssb_token); 332 if (so->so_state & SS_INCOMP) { 333 TAILQ_REMOVE(&head->so_incomp, so, so_list); 334 head->so_incqlen--; 335 } else if (so->so_state & SS_COMP) { 336 /* 337 * We must not decommission a socket that's 338 * on the accept(2) queue. If we do, then 339 * accept(2) may hang after select(2) indicated 340 * that the listening socket was ready. 341 */ 342 lwkt_reltoken(&head->so_rcv.ssb_token); 343 return; 344 } else { 345 panic("sofree: not queued"); 346 } 347 soclrstate(so, SS_INCOMP); 348 so->so_head = NULL; 349 lwkt_reltoken(&head->so_rcv.ssb_token); 350 } 351 ssb_release(&so->so_snd, so); 352 sorflush(so); 353 sodealloc(so); 354 } 355 356 /* 357 * Close a socket on last file table reference removal. 358 * Initiate disconnect if connected. 359 * Free socket when disconnect complete. 360 */ 361 int 362 soclose(struct socket *so, int fflag) 363 { 364 int error = 0; 365 366 funsetown(so->so_sigio); 367 if (so->so_pcb == NULL) 368 goto discard; 369 if (so->so_state & SS_ISCONNECTED) { 370 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 371 error = sodisconnect(so); 372 if (error) 373 goto drop; 374 } 375 if (so->so_options & SO_LINGER) { 376 if ((so->so_state & SS_ISDISCONNECTING) && 377 (fflag & FNONBLOCK)) 378 goto drop; 379 while (so->so_state & SS_ISCONNECTED) { 380 error = tsleep(&so->so_timeo, PCATCH, 381 "soclos", so->so_linger * hz); 382 if (error) 383 break; 384 } 385 } 386 } 387 drop: 388 if (so->so_pcb) { 389 int error2; 390 391 error2 = so_pru_detach(so); 392 if (error == 0) 393 error = error2; 394 } 395 discard: 396 lwkt_gettoken(&so->so_rcv.ssb_token); 397 if (so->so_options & SO_ACCEPTCONN) { 398 struct socket *sp; 399 400 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 401 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 402 soclrstate(sp, SS_INCOMP); 403 sp->so_head = NULL; 404 so->so_incqlen--; 405 soaborta(sp); 406 } 407 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 408 TAILQ_REMOVE(&so->so_comp, sp, so_list); 409 soclrstate(sp, SS_COMP); 410 sp->so_head = NULL; 411 so->so_qlen--; 412 soaborta(sp); 413 } 414 } 415 lwkt_reltoken(&so->so_rcv.ssb_token); 416 if (so->so_state & SS_NOFDREF) 417 panic("soclose: NOFDREF"); 418 sosetstate(so, SS_NOFDREF); /* take ref */ 419 sofree(so); /* dispose of ref */ 420 return (error); 421 } 422 423 /* 424 * Abort and destroy a socket. Only one abort can be in progress 425 * at any given moment. 426 */ 427 void 428 soabort(struct socket *so) 429 { 430 soreference(so); 431 so_pru_abort(so); 432 } 433 434 void 435 soaborta(struct socket *so) 436 { 437 soreference(so); 438 so_pru_aborta(so); 439 } 440 441 void 442 soabort_oncpu(struct socket *so) 443 { 444 soreference(so); 445 so_pru_abort_oncpu(so); 446 } 447 448 int 449 soaccept(struct socket *so, struct sockaddr **nam) 450 { 451 int error; 452 453 if ((so->so_state & SS_NOFDREF) == 0) 454 panic("soaccept: !NOFDREF"); 455 soreference(so); /* create ref */ 456 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */ 457 error = so_pru_accept(so, nam); 458 return (error); 459 } 460 461 int 462 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td) 463 { 464 int error; 465 466 if (so->so_options & SO_ACCEPTCONN) 467 return (EOPNOTSUPP); 468 /* 469 * If protocol is connection-based, can only connect once. 470 * Otherwise, if connected, try to disconnect first. 471 * This allows user to disconnect by connecting to, e.g., 472 * a null address. 473 */ 474 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 475 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 476 (error = sodisconnect(so)))) { 477 error = EISCONN; 478 } else { 479 /* 480 * Prevent accumulated error from previous connection 481 * from biting us. 482 */ 483 so->so_error = 0; 484 error = so_pru_connect(so, nam, td); 485 } 486 return (error); 487 } 488 489 int 490 soconnect2(struct socket *so1, struct socket *so2) 491 { 492 int error; 493 494 error = so_pru_connect2(so1, so2); 495 return (error); 496 } 497 498 int 499 sodisconnect(struct socket *so) 500 { 501 int error; 502 503 if ((so->so_state & SS_ISCONNECTED) == 0) { 504 error = ENOTCONN; 505 goto bad; 506 } 507 if (so->so_state & SS_ISDISCONNECTING) { 508 error = EALREADY; 509 goto bad; 510 } 511 error = so_pru_disconnect(so); 512 bad: 513 return (error); 514 } 515 516 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 517 /* 518 * Send on a socket. 519 * If send must go all at once and message is larger than 520 * send buffering, then hard error. 521 * Lock against other senders. 522 * If must go all at once and not enough room now, then 523 * inform user that this would block and do nothing. 524 * Otherwise, if nonblocking, send as much as possible. 525 * The data to be sent is described by "uio" if nonzero, 526 * otherwise by the mbuf chain "top" (which must be null 527 * if uio is not). Data provided in mbuf chain must be small 528 * enough to send all at once. 529 * 530 * Returns nonzero on error, timeout or signal; callers 531 * must check for short counts if EINTR/ERESTART are returned. 532 * Data and control buffers are freed on return. 533 */ 534 int 535 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 536 struct mbuf *top, struct mbuf *control, int flags, 537 struct thread *td) 538 { 539 struct mbuf **mp; 540 struct mbuf *m; 541 size_t resid; 542 int space, len; 543 int clen = 0, error, dontroute, mlen; 544 int atomic = sosendallatonce(so) || top; 545 int pru_flags; 546 547 if (uio) { 548 resid = uio->uio_resid; 549 } else { 550 resid = (size_t)top->m_pkthdr.len; 551 #ifdef INVARIANTS 552 len = 0; 553 for (m = top; m; m = m->m_next) 554 len += m->m_len; 555 KKASSERT(top->m_pkthdr.len == len); 556 #endif 557 } 558 559 /* 560 * WARNING! resid is unsigned, space and len are signed. space 561 * can wind up negative if the sockbuf is overcommitted. 562 * 563 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 564 * type sockets since that's an error. 565 */ 566 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 567 error = EINVAL; 568 goto out; 569 } 570 571 dontroute = 572 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 573 (so->so_proto->pr_flags & PR_ATOMIC); 574 if (td->td_lwp != NULL) 575 td->td_lwp->lwp_ru.ru_msgsnd++; 576 if (control) 577 clen = control->m_len; 578 #define gotoerr(errcode) { error = errcode; goto release; } 579 580 restart: 581 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 582 if (error) 583 goto out; 584 585 do { 586 if (so->so_state & SS_CANTSENDMORE) 587 gotoerr(EPIPE); 588 if (so->so_error) { 589 error = so->so_error; 590 so->so_error = 0; 591 goto release; 592 } 593 if ((so->so_state & SS_ISCONNECTED) == 0) { 594 /* 595 * `sendto' and `sendmsg' is allowed on a connection- 596 * based socket if it supports implied connect. 597 * Return ENOTCONN if not connected and no address is 598 * supplied. 599 */ 600 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 601 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 602 if ((so->so_state & SS_ISCONFIRMING) == 0 && 603 !(resid == 0 && clen != 0)) 604 gotoerr(ENOTCONN); 605 } else if (addr == 0) 606 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 607 ENOTCONN : EDESTADDRREQ); 608 } 609 if ((atomic && resid > so->so_snd.ssb_hiwat) || 610 clen > so->so_snd.ssb_hiwat) { 611 gotoerr(EMSGSIZE); 612 } 613 space = ssb_space(&so->so_snd); 614 if (flags & MSG_OOB) 615 space += 1024; 616 if ((space < 0 || (size_t)space < resid + clen) && uio && 617 (atomic || space < so->so_snd.ssb_lowat || space < clen)) { 618 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 619 gotoerr(EWOULDBLOCK); 620 ssb_unlock(&so->so_snd); 621 error = ssb_wait(&so->so_snd); 622 if (error) 623 goto out; 624 goto restart; 625 } 626 mp = ⊤ 627 space -= clen; 628 do { 629 if (uio == NULL) { 630 /* 631 * Data is prepackaged in "top". 632 */ 633 resid = 0; 634 if (flags & MSG_EOR) 635 top->m_flags |= M_EOR; 636 } else do { 637 if (resid > INT_MAX) 638 resid = INT_MAX; 639 m = m_getl((int)resid, MB_WAIT, MT_DATA, 640 top == NULL ? M_PKTHDR : 0, &mlen); 641 if (top == NULL) { 642 m->m_pkthdr.len = 0; 643 m->m_pkthdr.rcvif = NULL; 644 } 645 len = imin((int)szmin(mlen, resid), space); 646 if (resid < MINCLSIZE) { 647 /* 648 * For datagram protocols, leave room 649 * for protocol headers in first mbuf. 650 */ 651 if (atomic && top == 0 && len < mlen) 652 MH_ALIGN(m, len); 653 } 654 space -= len; 655 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 656 resid = uio->uio_resid; 657 m->m_len = len; 658 *mp = m; 659 top->m_pkthdr.len += len; 660 if (error) 661 goto release; 662 mp = &m->m_next; 663 if (resid == 0) { 664 if (flags & MSG_EOR) 665 top->m_flags |= M_EOR; 666 break; 667 } 668 } while (space > 0 && atomic); 669 if (dontroute) 670 so->so_options |= SO_DONTROUTE; 671 if (flags & MSG_OOB) { 672 pru_flags = PRUS_OOB; 673 } else if ((flags & MSG_EOF) && 674 (so->so_proto->pr_flags & PR_IMPLOPCL) && 675 (resid == 0)) { 676 /* 677 * If the user set MSG_EOF, the protocol 678 * understands this flag and nothing left to 679 * send then use PRU_SEND_EOF instead of PRU_SEND. 680 */ 681 pru_flags = PRUS_EOF; 682 } else if (resid > 0 && space > 0) { 683 /* If there is more to send, set PRUS_MORETOCOME */ 684 pru_flags = PRUS_MORETOCOME; 685 } else { 686 pru_flags = 0; 687 } 688 /* 689 * XXX all the SS_CANTSENDMORE checks previously 690 * done could be out of date. We could have recieved 691 * a reset packet in an interrupt or maybe we slept 692 * while doing page faults in uiomove() etc. We could 693 * probably recheck again inside the splnet() protection 694 * here, but there are probably other places that this 695 * also happens. We must rethink this. 696 */ 697 error = so_pru_send(so, pru_flags, top, addr, control, td); 698 if (dontroute) 699 so->so_options &= ~SO_DONTROUTE; 700 clen = 0; 701 control = 0; 702 top = NULL; 703 mp = ⊤ 704 if (error) 705 goto release; 706 } while (resid && space > 0); 707 } while (resid); 708 709 release: 710 ssb_unlock(&so->so_snd); 711 out: 712 if (top) 713 m_freem(top); 714 if (control) 715 m_freem(control); 716 return (error); 717 } 718 719 /* 720 * A specialization of sosend() for UDP based on protocol-specific knowledge: 721 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that 722 * sosendallatonce() returns true, 723 * the "atomic" variable is true, 724 * and sosendudp() blocks until space is available for the entire send. 725 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or 726 * PR_IMPLOPCL flags set. 727 * UDP has no out-of-band data. 728 * UDP has no control data. 729 * UDP does not support MSG_EOR. 730 */ 731 int 732 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, 733 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 734 { 735 boolean_t dontroute; /* temporary SO_DONTROUTE setting */ 736 size_t resid; 737 int error; 738 int space; 739 740 if (td->td_lwp != NULL) 741 td->td_lwp->lwp_ru.ru_msgsnd++; 742 if (control) 743 m_freem(control); 744 745 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); 746 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len; 747 748 restart: 749 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 750 if (error) 751 goto out; 752 753 if (so->so_state & SS_CANTSENDMORE) 754 gotoerr(EPIPE); 755 if (so->so_error) { 756 error = so->so_error; 757 so->so_error = 0; 758 goto release; 759 } 760 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) 761 gotoerr(EDESTADDRREQ); 762 if (resid > so->so_snd.ssb_hiwat) 763 gotoerr(EMSGSIZE); 764 space = ssb_space(&so->so_snd); 765 if (uio && (space < 0 || (size_t)space < resid)) { 766 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 767 gotoerr(EWOULDBLOCK); 768 ssb_unlock(&so->so_snd); 769 error = ssb_wait(&so->so_snd); 770 if (error) 771 goto out; 772 goto restart; 773 } 774 775 if (uio) { 776 top = m_uiomove(uio); 777 if (top == NULL) 778 goto release; 779 } 780 781 dontroute = (flags & MSG_DONTROUTE) && !(so->so_options & SO_DONTROUTE); 782 if (dontroute) 783 so->so_options |= SO_DONTROUTE; 784 785 error = so_pru_send(so, 0, top, addr, NULL, td); 786 top = NULL; /* sent or freed in lower layer */ 787 788 if (dontroute) 789 so->so_options &= ~SO_DONTROUTE; 790 791 release: 792 ssb_unlock(&so->so_snd); 793 out: 794 if (top) 795 m_freem(top); 796 return (error); 797 } 798 799 /* 800 * Implement receive operations on a socket. 801 * 802 * We depend on the way that records are added to the signalsockbuf 803 * by sbappend*. In particular, each record (mbufs linked through m_next) 804 * must begin with an address if the protocol so specifies, 805 * followed by an optional mbuf or mbufs containing ancillary data, 806 * and then zero or more mbufs of data. 807 * 808 * Although the signalsockbuf is locked, new data may still be appended. 809 * A token inside the ssb_lock deals with MP issues and still allows 810 * the network to access the socket if we block in a uio. 811 * 812 * The caller may receive the data as a single mbuf chain by supplying 813 * an mbuf **mp0 for use in returning the chain. The uio is then used 814 * only for the count in uio_resid. 815 */ 816 int 817 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 818 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 819 { 820 struct mbuf *m, *n; 821 struct mbuf *free_chain = NULL; 822 int flags, len, error, offset; 823 struct protosw *pr = so->so_proto; 824 int moff, type = 0; 825 size_t resid, orig_resid; 826 827 if (uio) 828 resid = uio->uio_resid; 829 else 830 resid = (size_t)(sio->sb_climit - sio->sb_cc); 831 orig_resid = resid; 832 833 if (psa) 834 *psa = NULL; 835 if (controlp) 836 *controlp = NULL; 837 if (flagsp) 838 flags = *flagsp &~ MSG_EOR; 839 else 840 flags = 0; 841 if (flags & MSG_OOB) { 842 m = m_get(MB_WAIT, MT_DATA); 843 if (m == NULL) 844 return (ENOBUFS); 845 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 846 if (error) 847 goto bad; 848 if (sio) { 849 do { 850 sbappend(sio, m); 851 KKASSERT(resid >= (size_t)m->m_len); 852 resid -= (size_t)m->m_len; 853 } while (resid > 0 && m); 854 } else { 855 do { 856 uio->uio_resid = resid; 857 error = uiomove(mtod(m, caddr_t), 858 (int)szmin(resid, m->m_len), 859 uio); 860 resid = uio->uio_resid; 861 m = m_free(m); 862 } while (uio->uio_resid && error == 0 && m); 863 } 864 bad: 865 if (m) 866 m_freem(m); 867 return (error); 868 } 869 if ((so->so_state & SS_ISCONFIRMING) && resid) 870 so_pru_rcvd(so, 0); 871 872 restart: 873 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 874 if (error) 875 goto done; 876 877 m = so->so_rcv.ssb_mb; 878 /* 879 * If we have less data than requested, block awaiting more 880 * (subject to any timeout) if: 881 * 1. the current count is less than the low water mark, or 882 * 2. MSG_WAITALL is set, and it is possible to do the entire 883 * receive operation at once if we block (resid <= hiwat). 884 * 3. MSG_DONTWAIT is not set 885 * If MSG_WAITALL is set but resid is larger than the receive buffer, 886 * we have to do the receive in sections, and thus risk returning 887 * a short count if a timeout or signal occurs after we start. 888 */ 889 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 890 (size_t)so->so_rcv.ssb_cc < resid) && 891 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 892 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) && 893 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { 894 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 895 if (so->so_error) { 896 if (m) 897 goto dontblock; 898 error = so->so_error; 899 if ((flags & MSG_PEEK) == 0) 900 so->so_error = 0; 901 goto release; 902 } 903 if (so->so_state & SS_CANTRCVMORE) { 904 if (m) 905 goto dontblock; 906 else 907 goto release; 908 } 909 for (; m; m = m->m_next) { 910 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 911 m = so->so_rcv.ssb_mb; 912 goto dontblock; 913 } 914 } 915 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 916 (pr->pr_flags & PR_CONNREQUIRED)) { 917 error = ENOTCONN; 918 goto release; 919 } 920 if (resid == 0) 921 goto release; 922 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 923 error = EWOULDBLOCK; 924 goto release; 925 } 926 ssb_unlock(&so->so_rcv); 927 error = ssb_wait(&so->so_rcv); 928 if (error) 929 goto done; 930 goto restart; 931 } 932 dontblock: 933 if (uio && uio->uio_td && uio->uio_td->td_proc) 934 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 935 936 /* 937 * note: m should be == sb_mb here. Cache the next record while 938 * cleaning up. Note that calling m_free*() will break out critical 939 * section. 940 */ 941 KKASSERT(m == so->so_rcv.ssb_mb); 942 943 /* 944 * Skip any address mbufs prepending the record. 945 */ 946 if (pr->pr_flags & PR_ADDR) { 947 KASSERT(m->m_type == MT_SONAME, ("receive 1a")); 948 orig_resid = 0; 949 if (psa) 950 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 951 if (flags & MSG_PEEK) 952 m = m->m_next; 953 else 954 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 955 } 956 957 /* 958 * Skip any control mbufs prepending the record. 959 */ 960 #ifdef SCTP 961 if (pr->pr_flags & PR_ADDR_OPT) { 962 /* 963 * For SCTP we may be getting a 964 * whole message OR a partial delivery. 965 */ 966 if (m && m->m_type == MT_SONAME) { 967 orig_resid = 0; 968 if (psa) 969 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 970 if (flags & MSG_PEEK) 971 m = m->m_next; 972 else 973 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 974 } 975 } 976 #endif /* SCTP */ 977 while (m && m->m_type == MT_CONTROL && error == 0) { 978 if (flags & MSG_PEEK) { 979 if (controlp) 980 *controlp = m_copy(m, 0, m->m_len); 981 m = m->m_next; /* XXX race */ 982 } else { 983 if (controlp) { 984 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 985 if (pr->pr_domain->dom_externalize && 986 mtod(m, struct cmsghdr *)->cmsg_type == 987 SCM_RIGHTS) 988 error = (*pr->pr_domain->dom_externalize)(m); 989 *controlp = m; 990 m = n; 991 } else { 992 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 993 } 994 } 995 if (controlp && *controlp) { 996 orig_resid = 0; 997 controlp = &(*controlp)->m_next; 998 } 999 } 1000 1001 /* 1002 * flag OOB data. 1003 */ 1004 if (m) { 1005 type = m->m_type; 1006 if (type == MT_OOBDATA) 1007 flags |= MSG_OOB; 1008 } 1009 1010 /* 1011 * Copy to the UIO or mbuf return chain (*mp). 1012 */ 1013 moff = 0; 1014 offset = 0; 1015 while (m && resid > 0 && error == 0) { 1016 if (m->m_type == MT_OOBDATA) { 1017 if (type != MT_OOBDATA) 1018 break; 1019 } else if (type == MT_OOBDATA) 1020 break; 1021 else 1022 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1023 ("receive 3")); 1024 soclrstate(so, SS_RCVATMARK); 1025 len = (resid > INT_MAX) ? INT_MAX : resid; 1026 if (so->so_oobmark && len > so->so_oobmark - offset) 1027 len = so->so_oobmark - offset; 1028 if (len > m->m_len - moff) 1029 len = m->m_len - moff; 1030 1031 /* 1032 * Copy out to the UIO or pass the mbufs back to the SIO. 1033 * The SIO is dealt with when we eat the mbuf, but deal 1034 * with the resid here either way. 1035 */ 1036 if (uio) { 1037 uio->uio_resid = resid; 1038 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1039 resid = uio->uio_resid; 1040 if (error) 1041 goto release; 1042 } else { 1043 resid -= (size_t)len; 1044 } 1045 1046 /* 1047 * Eat the entire mbuf or just a piece of it 1048 */ 1049 if (len == m->m_len - moff) { 1050 if (m->m_flags & M_EOR) 1051 flags |= MSG_EOR; 1052 #ifdef SCTP 1053 if (m->m_flags & M_NOTIFICATION) 1054 flags |= MSG_NOTIFICATION; 1055 #endif /* SCTP */ 1056 if (flags & MSG_PEEK) { 1057 m = m->m_next; 1058 moff = 0; 1059 } else { 1060 if (sio) { 1061 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1062 sbappend(sio, m); 1063 m = n; 1064 } else { 1065 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1066 } 1067 } 1068 } else { 1069 if (flags & MSG_PEEK) { 1070 moff += len; 1071 } else { 1072 if (sio) { 1073 n = m_copym(m, 0, len, MB_WAIT); 1074 if (n) 1075 sbappend(sio, n); 1076 } 1077 m->m_data += len; 1078 m->m_len -= len; 1079 so->so_rcv.ssb_cc -= len; 1080 } 1081 } 1082 if (so->so_oobmark) { 1083 if ((flags & MSG_PEEK) == 0) { 1084 so->so_oobmark -= len; 1085 if (so->so_oobmark == 0) { 1086 sosetstate(so, SS_RCVATMARK); 1087 break; 1088 } 1089 } else { 1090 offset += len; 1091 if (offset == so->so_oobmark) 1092 break; 1093 } 1094 } 1095 if (flags & MSG_EOR) 1096 break; 1097 /* 1098 * If the MSG_WAITALL flag is set (for non-atomic socket), 1099 * we must not quit until resid == 0 or an error 1100 * termination. If a signal/timeout occurs, return 1101 * with a short count but without error. 1102 * Keep signalsockbuf locked against other readers. 1103 */ 1104 while ((flags & MSG_WAITALL) && m == NULL && 1105 resid > 0 && !sosendallatonce(so) && 1106 so->so_rcv.ssb_mb == NULL) { 1107 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1108 break; 1109 /* 1110 * The window might have closed to zero, make 1111 * sure we send an ack now that we've drained 1112 * the buffer or we might end up blocking until 1113 * the idle takes over (5 seconds). 1114 */ 1115 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1116 so_pru_rcvd(so, flags); 1117 error = ssb_wait(&so->so_rcv); 1118 if (error) { 1119 ssb_unlock(&so->so_rcv); 1120 error = 0; 1121 goto done; 1122 } 1123 m = so->so_rcv.ssb_mb; 1124 } 1125 } 1126 1127 /* 1128 * If an atomic read was requested but unread data still remains 1129 * in the record, set MSG_TRUNC. 1130 */ 1131 if (m && pr->pr_flags & PR_ATOMIC) 1132 flags |= MSG_TRUNC; 1133 1134 /* 1135 * Cleanup. If an atomic read was requested drop any unread data. 1136 */ 1137 if ((flags & MSG_PEEK) == 0) { 1138 if (m && (pr->pr_flags & PR_ATOMIC)) 1139 sbdroprecord(&so->so_rcv.sb); 1140 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) 1141 so_pru_rcvd(so, flags); 1142 } 1143 1144 if (orig_resid == resid && orig_resid && 1145 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1146 ssb_unlock(&so->so_rcv); 1147 goto restart; 1148 } 1149 1150 if (flagsp) 1151 *flagsp |= flags; 1152 release: 1153 ssb_unlock(&so->so_rcv); 1154 done: 1155 if (free_chain) 1156 m_freem(free_chain); 1157 return (error); 1158 } 1159 1160 int 1161 soshutdown(struct socket *so, int how) 1162 { 1163 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1164 return (EINVAL); 1165 1166 if (how != SHUT_WR) { 1167 ssb_lock(&so->so_rcv, M_WAITOK); /* frontend lock */ 1168 sorflush(so); 1169 ssb_unlock(&so->so_rcv); 1170 } 1171 if (how != SHUT_RD) 1172 return (so_pru_shutdown(so)); 1173 return (0); 1174 } 1175 1176 void 1177 sorflush(struct socket *so) 1178 { 1179 struct signalsockbuf *ssb = &so->so_rcv; 1180 struct protosw *pr = so->so_proto; 1181 struct signalsockbuf asb; 1182 1183 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR); 1184 1185 lwkt_gettoken(&ssb->ssb_token); 1186 socantrcvmore(so); 1187 asb = *ssb; 1188 1189 /* 1190 * Can't just blow up the ssb structure here 1191 */ 1192 bzero(&ssb->sb, sizeof(ssb->sb)); 1193 ssb->ssb_timeo = 0; 1194 ssb->ssb_unused01 = 0; 1195 ssb->ssb_lowat = 0; 1196 ssb->ssb_hiwat = 0; 1197 ssb->ssb_mbmax = 0; 1198 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK); 1199 1200 lwkt_reltoken(&ssb->ssb_token); 1201 1202 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 1203 (*pr->pr_domain->dom_dispose)(asb.ssb_mb); 1204 ssb_release(&asb, so); 1205 } 1206 1207 #ifdef INET 1208 static int 1209 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) 1210 { 1211 struct accept_filter_arg *afap = NULL; 1212 struct accept_filter *afp; 1213 struct so_accf *af = so->so_accf; 1214 int error = 0; 1215 1216 /* do not set/remove accept filters on non listen sockets */ 1217 if ((so->so_options & SO_ACCEPTCONN) == 0) { 1218 error = EINVAL; 1219 goto out; 1220 } 1221 1222 /* removing the filter */ 1223 if (sopt == NULL) { 1224 if (af != NULL) { 1225 if (af->so_accept_filter != NULL && 1226 af->so_accept_filter->accf_destroy != NULL) { 1227 af->so_accept_filter->accf_destroy(so); 1228 } 1229 if (af->so_accept_filter_str != NULL) { 1230 FREE(af->so_accept_filter_str, M_ACCF); 1231 } 1232 FREE(af, M_ACCF); 1233 so->so_accf = NULL; 1234 } 1235 so->so_options &= ~SO_ACCEPTFILTER; 1236 return (0); 1237 } 1238 /* adding a filter */ 1239 /* must remove previous filter first */ 1240 if (af != NULL) { 1241 error = EINVAL; 1242 goto out; 1243 } 1244 /* don't put large objects on the kernel stack */ 1245 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), M_TEMP, M_WAITOK); 1246 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 1247 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 1248 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 1249 if (error) 1250 goto out; 1251 afp = accept_filt_get(afap->af_name); 1252 if (afp == NULL) { 1253 error = ENOENT; 1254 goto out; 1255 } 1256 MALLOC(af, struct so_accf *, sizeof(*af), M_ACCF, M_WAITOK | M_ZERO); 1257 if (afp->accf_create != NULL) { 1258 if (afap->af_name[0] != '\0') { 1259 int len = strlen(afap->af_name) + 1; 1260 1261 MALLOC(af->so_accept_filter_str, char *, len, M_ACCF, M_WAITOK); 1262 strcpy(af->so_accept_filter_str, afap->af_name); 1263 } 1264 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 1265 if (af->so_accept_filter_arg == NULL) { 1266 FREE(af->so_accept_filter_str, M_ACCF); 1267 FREE(af, M_ACCF); 1268 so->so_accf = NULL; 1269 error = EINVAL; 1270 goto out; 1271 } 1272 } 1273 af->so_accept_filter = afp; 1274 so->so_accf = af; 1275 so->so_options |= SO_ACCEPTFILTER; 1276 out: 1277 if (afap != NULL) 1278 FREE(afap, M_TEMP); 1279 return (error); 1280 } 1281 #endif /* INET */ 1282 1283 /* 1284 * Perhaps this routine, and sooptcopyout(), below, ought to come in 1285 * an additional variant to handle the case where the option value needs 1286 * to be some kind of integer, but not a specific size. 1287 * In addition to their use here, these functions are also called by the 1288 * protocol-level pr_ctloutput() routines. 1289 */ 1290 int 1291 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 1292 { 1293 return soopt_to_kbuf(sopt, buf, len, minlen); 1294 } 1295 1296 int 1297 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 1298 { 1299 size_t valsize; 1300 1301 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 1302 KKASSERT(kva_p(buf)); 1303 1304 /* 1305 * If the user gives us more than we wanted, we ignore it, 1306 * but if we don't get the minimum length the caller 1307 * wants, we return EINVAL. On success, sopt->sopt_valsize 1308 * is set to however much we actually retrieved. 1309 */ 1310 if ((valsize = sopt->sopt_valsize) < minlen) 1311 return EINVAL; 1312 if (valsize > len) 1313 sopt->sopt_valsize = valsize = len; 1314 1315 bcopy(sopt->sopt_val, buf, valsize); 1316 return 0; 1317 } 1318 1319 1320 int 1321 sosetopt(struct socket *so, struct sockopt *sopt) 1322 { 1323 int error, optval; 1324 struct linger l; 1325 struct timeval tv; 1326 u_long val; 1327 struct signalsockbuf *sotmp; 1328 1329 error = 0; 1330 sopt->sopt_dir = SOPT_SET; 1331 if (sopt->sopt_level != SOL_SOCKET) { 1332 if (so->so_proto && so->so_proto->pr_ctloutput) { 1333 return (so_pru_ctloutput(so, sopt)); 1334 } 1335 error = ENOPROTOOPT; 1336 } else { 1337 switch (sopt->sopt_name) { 1338 #ifdef INET 1339 case SO_ACCEPTFILTER: 1340 error = do_setopt_accept_filter(so, sopt); 1341 if (error) 1342 goto bad; 1343 break; 1344 #endif /* INET */ 1345 case SO_LINGER: 1346 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 1347 if (error) 1348 goto bad; 1349 1350 so->so_linger = l.l_linger; 1351 if (l.l_onoff) 1352 so->so_options |= SO_LINGER; 1353 else 1354 so->so_options &= ~SO_LINGER; 1355 break; 1356 1357 case SO_DEBUG: 1358 case SO_KEEPALIVE: 1359 case SO_DONTROUTE: 1360 case SO_USELOOPBACK: 1361 case SO_BROADCAST: 1362 case SO_REUSEADDR: 1363 case SO_REUSEPORT: 1364 case SO_OOBINLINE: 1365 case SO_TIMESTAMP: 1366 error = sooptcopyin(sopt, &optval, sizeof optval, 1367 sizeof optval); 1368 if (error) 1369 goto bad; 1370 if (optval) 1371 so->so_options |= sopt->sopt_name; 1372 else 1373 so->so_options &= ~sopt->sopt_name; 1374 break; 1375 1376 case SO_SNDBUF: 1377 case SO_RCVBUF: 1378 case SO_SNDLOWAT: 1379 case SO_RCVLOWAT: 1380 error = sooptcopyin(sopt, &optval, sizeof optval, 1381 sizeof optval); 1382 if (error) 1383 goto bad; 1384 1385 /* 1386 * Values < 1 make no sense for any of these 1387 * options, so disallow them. 1388 */ 1389 if (optval < 1) { 1390 error = EINVAL; 1391 goto bad; 1392 } 1393 1394 switch (sopt->sopt_name) { 1395 case SO_SNDBUF: 1396 case SO_RCVBUF: 1397 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ? 1398 &so->so_snd : &so->so_rcv, (u_long)optval, 1399 so, 1400 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) { 1401 error = ENOBUFS; 1402 goto bad; 1403 } 1404 sotmp = (sopt->sopt_name == SO_SNDBUF) ? 1405 &so->so_snd : &so->so_rcv; 1406 atomic_clear_int(&sotmp->ssb_flags, 1407 SSB_AUTOSIZE); 1408 break; 1409 1410 /* 1411 * Make sure the low-water is never greater than 1412 * the high-water. 1413 */ 1414 case SO_SNDLOWAT: 1415 so->so_snd.ssb_lowat = 1416 (optval > so->so_snd.ssb_hiwat) ? 1417 so->so_snd.ssb_hiwat : optval; 1418 atomic_clear_int(&so->so_snd.ssb_flags, 1419 SSB_AUTOLOWAT); 1420 break; 1421 case SO_RCVLOWAT: 1422 so->so_rcv.ssb_lowat = 1423 (optval > so->so_rcv.ssb_hiwat) ? 1424 so->so_rcv.ssb_hiwat : optval; 1425 atomic_clear_int(&so->so_rcv.ssb_flags, 1426 SSB_AUTOLOWAT); 1427 break; 1428 } 1429 break; 1430 1431 case SO_SNDTIMEO: 1432 case SO_RCVTIMEO: 1433 error = sooptcopyin(sopt, &tv, sizeof tv, 1434 sizeof tv); 1435 if (error) 1436 goto bad; 1437 1438 /* assert(hz > 0); */ 1439 if (tv.tv_sec < 0 || tv.tv_sec > SHRT_MAX / hz || 1440 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 1441 error = EDOM; 1442 goto bad; 1443 } 1444 /* assert(tick > 0); */ 1445 /* assert(ULONG_MAX - SHRT_MAX >= 1000000); */ 1446 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick; 1447 if (val > SHRT_MAX) { 1448 error = EDOM; 1449 goto bad; 1450 } 1451 if (val == 0 && tv.tv_usec != 0) 1452 val = 1; 1453 1454 switch (sopt->sopt_name) { 1455 case SO_SNDTIMEO: 1456 so->so_snd.ssb_timeo = val; 1457 break; 1458 case SO_RCVTIMEO: 1459 so->so_rcv.ssb_timeo = val; 1460 break; 1461 } 1462 break; 1463 default: 1464 error = ENOPROTOOPT; 1465 break; 1466 } 1467 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { 1468 (void) so_pru_ctloutput(so, sopt); 1469 } 1470 } 1471 bad: 1472 return (error); 1473 } 1474 1475 /* Helper routine for getsockopt */ 1476 int 1477 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 1478 { 1479 soopt_from_kbuf(sopt, buf, len); 1480 return 0; 1481 } 1482 1483 void 1484 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len) 1485 { 1486 size_t valsize; 1487 1488 if (len == 0) { 1489 sopt->sopt_valsize = 0; 1490 return; 1491 } 1492 1493 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 1494 KKASSERT(kva_p(buf)); 1495 1496 /* 1497 * Documented get behavior is that we always return a value, 1498 * possibly truncated to fit in the user's buffer. 1499 * Traditional behavior is that we always tell the user 1500 * precisely how much we copied, rather than something useful 1501 * like the total amount we had available for her. 1502 * Note that this interface is not idempotent; the entire answer must 1503 * generated ahead of time. 1504 */ 1505 valsize = szmin(len, sopt->sopt_valsize); 1506 sopt->sopt_valsize = valsize; 1507 if (sopt->sopt_val != 0) { 1508 bcopy(buf, sopt->sopt_val, valsize); 1509 } 1510 } 1511 1512 int 1513 sogetopt(struct socket *so, struct sockopt *sopt) 1514 { 1515 int error, optval; 1516 struct linger l; 1517 struct timeval tv; 1518 #ifdef INET 1519 struct accept_filter_arg *afap; 1520 #endif 1521 1522 error = 0; 1523 sopt->sopt_dir = SOPT_GET; 1524 if (sopt->sopt_level != SOL_SOCKET) { 1525 if (so->so_proto && so->so_proto->pr_ctloutput) { 1526 return (so_pru_ctloutput(so, sopt)); 1527 } else 1528 return (ENOPROTOOPT); 1529 } else { 1530 switch (sopt->sopt_name) { 1531 #ifdef INET 1532 case SO_ACCEPTFILTER: 1533 if ((so->so_options & SO_ACCEPTCONN) == 0) 1534 return (EINVAL); 1535 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), 1536 M_TEMP, M_WAITOK | M_ZERO); 1537 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 1538 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 1539 if (so->so_accf->so_accept_filter_str != NULL) 1540 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 1541 } 1542 error = sooptcopyout(sopt, afap, sizeof(*afap)); 1543 FREE(afap, M_TEMP); 1544 break; 1545 #endif /* INET */ 1546 1547 case SO_LINGER: 1548 l.l_onoff = so->so_options & SO_LINGER; 1549 l.l_linger = so->so_linger; 1550 error = sooptcopyout(sopt, &l, sizeof l); 1551 break; 1552 1553 case SO_USELOOPBACK: 1554 case SO_DONTROUTE: 1555 case SO_DEBUG: 1556 case SO_KEEPALIVE: 1557 case SO_REUSEADDR: 1558 case SO_REUSEPORT: 1559 case SO_BROADCAST: 1560 case SO_OOBINLINE: 1561 case SO_TIMESTAMP: 1562 optval = so->so_options & sopt->sopt_name; 1563 integer: 1564 error = sooptcopyout(sopt, &optval, sizeof optval); 1565 break; 1566 1567 case SO_TYPE: 1568 optval = so->so_type; 1569 goto integer; 1570 1571 case SO_ERROR: 1572 optval = so->so_error; 1573 so->so_error = 0; 1574 goto integer; 1575 1576 case SO_SNDBUF: 1577 optval = so->so_snd.ssb_hiwat; 1578 goto integer; 1579 1580 case SO_RCVBUF: 1581 optval = so->so_rcv.ssb_hiwat; 1582 goto integer; 1583 1584 case SO_SNDLOWAT: 1585 optval = so->so_snd.ssb_lowat; 1586 goto integer; 1587 1588 case SO_RCVLOWAT: 1589 optval = so->so_rcv.ssb_lowat; 1590 goto integer; 1591 1592 case SO_SNDTIMEO: 1593 case SO_RCVTIMEO: 1594 optval = (sopt->sopt_name == SO_SNDTIMEO ? 1595 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo); 1596 1597 tv.tv_sec = optval / hz; 1598 tv.tv_usec = (optval % hz) * ustick; 1599 error = sooptcopyout(sopt, &tv, sizeof tv); 1600 break; 1601 1602 default: 1603 error = ENOPROTOOPT; 1604 break; 1605 } 1606 return (error); 1607 } 1608 } 1609 1610 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 1611 int 1612 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 1613 { 1614 struct mbuf *m, *m_prev; 1615 int sopt_size = sopt->sopt_valsize, msize; 1616 1617 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, MT_DATA, 1618 0, &msize); 1619 if (m == NULL) 1620 return (ENOBUFS); 1621 m->m_len = min(msize, sopt_size); 1622 sopt_size -= m->m_len; 1623 *mp = m; 1624 m_prev = m; 1625 1626 while (sopt_size > 0) { 1627 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, 1628 MT_DATA, 0, &msize); 1629 if (m == NULL) { 1630 m_freem(*mp); 1631 return (ENOBUFS); 1632 } 1633 m->m_len = min(msize, sopt_size); 1634 sopt_size -= m->m_len; 1635 m_prev->m_next = m; 1636 m_prev = m; 1637 } 1638 return (0); 1639 } 1640 1641 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 1642 int 1643 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 1644 { 1645 soopt_to_mbuf(sopt, m); 1646 return 0; 1647 } 1648 1649 void 1650 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m) 1651 { 1652 size_t valsize; 1653 void *val; 1654 1655 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 1656 KKASSERT(kva_p(m)); 1657 if (sopt->sopt_val == NULL) 1658 return; 1659 val = sopt->sopt_val; 1660 valsize = sopt->sopt_valsize; 1661 while (m != NULL && valsize >= m->m_len) { 1662 bcopy(val, mtod(m, char *), m->m_len); 1663 valsize -= m->m_len; 1664 val = (caddr_t)val + m->m_len; 1665 m = m->m_next; 1666 } 1667 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 1668 panic("ip6_sooptmcopyin"); 1669 } 1670 1671 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 1672 int 1673 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 1674 { 1675 return soopt_from_mbuf(sopt, m); 1676 } 1677 1678 int 1679 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m) 1680 { 1681 struct mbuf *m0 = m; 1682 size_t valsize = 0; 1683 size_t maxsize; 1684 void *val; 1685 1686 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 1687 KKASSERT(kva_p(m)); 1688 if (sopt->sopt_val == NULL) 1689 return 0; 1690 val = sopt->sopt_val; 1691 maxsize = sopt->sopt_valsize; 1692 while (m != NULL && maxsize >= m->m_len) { 1693 bcopy(mtod(m, char *), val, m->m_len); 1694 maxsize -= m->m_len; 1695 val = (caddr_t)val + m->m_len; 1696 valsize += m->m_len; 1697 m = m->m_next; 1698 } 1699 if (m != NULL) { 1700 /* enough soopt buffer should be given from user-land */ 1701 m_freem(m0); 1702 return (EINVAL); 1703 } 1704 sopt->sopt_valsize = valsize; 1705 return 0; 1706 } 1707 1708 void 1709 sohasoutofband(struct socket *so) 1710 { 1711 if (so->so_sigio != NULL) 1712 pgsigio(so->so_sigio, SIGURG, 0); 1713 KNOTE(&so->so_rcv.ssb_kq.ki_note, NOTE_OOB); 1714 } 1715 1716 int 1717 sokqfilter(struct file *fp, struct knote *kn) 1718 { 1719 struct socket *so = (struct socket *)kn->kn_fp->f_data; 1720 struct signalsockbuf *ssb; 1721 1722 switch (kn->kn_filter) { 1723 case EVFILT_READ: 1724 if (so->so_options & SO_ACCEPTCONN) 1725 kn->kn_fop = &solisten_filtops; 1726 else 1727 kn->kn_fop = &soread_filtops; 1728 ssb = &so->so_rcv; 1729 break; 1730 case EVFILT_WRITE: 1731 kn->kn_fop = &sowrite_filtops; 1732 ssb = &so->so_snd; 1733 break; 1734 case EVFILT_EXCEPT: 1735 kn->kn_fop = &soexcept_filtops; 1736 ssb = &so->so_rcv; 1737 break; 1738 default: 1739 return (EOPNOTSUPP); 1740 } 1741 1742 knote_insert(&ssb->ssb_kq.ki_note, kn); 1743 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE); 1744 return (0); 1745 } 1746 1747 static void 1748 filt_sordetach(struct knote *kn) 1749 { 1750 struct socket *so = (struct socket *)kn->kn_fp->f_data; 1751 1752 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn); 1753 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note)) 1754 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE); 1755 } 1756 1757 /*ARGSUSED*/ 1758 static int 1759 filt_soread(struct knote *kn, long hint) 1760 { 1761 struct socket *so = (struct socket *)kn->kn_fp->f_data; 1762 1763 if (kn->kn_sfflags & NOTE_OOB) { 1764 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) { 1765 kn->kn_fflags |= NOTE_OOB; 1766 return (1); 1767 } 1768 return (0); 1769 } 1770 kn->kn_data = so->so_rcv.ssb_cc; 1771 1772 /* 1773 * Only set EOF if all data has been exhausted. 1774 */ 1775 if ((so->so_state & SS_CANTRCVMORE) && kn->kn_data == 0) { 1776 kn->kn_flags |= EV_EOF; 1777 kn->kn_fflags = so->so_error; 1778 return (1); 1779 } 1780 if (so->so_error) /* temporary udp error */ 1781 return (1); 1782 if (kn->kn_sfflags & NOTE_LOWAT) 1783 return (kn->kn_data >= kn->kn_sdata); 1784 return ((kn->kn_data >= so->so_rcv.ssb_lowat) || 1785 !TAILQ_EMPTY(&so->so_comp)); 1786 } 1787 1788 static void 1789 filt_sowdetach(struct knote *kn) 1790 { 1791 struct socket *so = (struct socket *)kn->kn_fp->f_data; 1792 1793 knote_remove(&so->so_snd.ssb_kq.ki_note, kn); 1794 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note)) 1795 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE); 1796 } 1797 1798 /*ARGSUSED*/ 1799 static int 1800 filt_sowrite(struct knote *kn, long hint) 1801 { 1802 struct socket *so = (struct socket *)kn->kn_fp->f_data; 1803 1804 kn->kn_data = ssb_space(&so->so_snd); 1805 if (so->so_state & SS_CANTSENDMORE) { 1806 kn->kn_flags |= EV_EOF; 1807 kn->kn_fflags = so->so_error; 1808 return (1); 1809 } 1810 if (so->so_error) /* temporary udp error */ 1811 return (1); 1812 if (((so->so_state & SS_ISCONNECTED) == 0) && 1813 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 1814 return (0); 1815 if (kn->kn_sfflags & NOTE_LOWAT) 1816 return (kn->kn_data >= kn->kn_sdata); 1817 return (kn->kn_data >= so->so_snd.ssb_lowat); 1818 } 1819 1820 /*ARGSUSED*/ 1821 static int 1822 filt_solisten(struct knote *kn, long hint) 1823 { 1824 struct socket *so = (struct socket *)kn->kn_fp->f_data; 1825 1826 kn->kn_data = so->so_qlen; 1827 return (! TAILQ_EMPTY(&so->so_comp)); 1828 } 1829