1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by the University of 49 * California, Berkeley and its contributors. 50 * 4. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 67 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $ 68 */ 69 70 #include "opt_inet.h" 71 #include "opt_sctp.h" 72 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/fcntl.h> 76 #include <sys/malloc.h> 77 #include <sys/mbuf.h> 78 #include <sys/domain.h> 79 #include <sys/file.h> /* for struct knote */ 80 #include <sys/kernel.h> 81 #include <sys/event.h> 82 #include <sys/proc.h> 83 #include <sys/protosw.h> 84 #include <sys/socket.h> 85 #include <sys/socketvar.h> 86 #include <sys/socketops.h> 87 #include <sys/resourcevar.h> 88 #include <sys/signalvar.h> 89 #include <sys/sysctl.h> 90 #include <sys/uio.h> 91 #include <sys/jail.h> 92 #include <vm/vm_zone.h> 93 #include <vm/pmap.h> 94 95 #include <sys/thread2.h> 96 #include <sys/socketvar2.h> 97 98 #include <machine/limits.h> 99 100 extern int tcp_sosnd_agglim; 101 102 #ifdef INET 103 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 104 #endif /* INET */ 105 106 static void filt_sordetach(struct knote *kn); 107 static int filt_soread(struct knote *kn, long hint); 108 static void filt_sowdetach(struct knote *kn); 109 static int filt_sowrite(struct knote *kn, long hint); 110 static int filt_solisten(struct knote *kn, long hint); 111 112 static struct filterops solisten_filtops = 113 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten }; 114 static struct filterops soread_filtops = 115 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 116 static struct filterops sowrite_filtops = 117 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite }; 118 static struct filterops soexcept_filtops = 119 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 120 121 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct"); 122 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 123 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 124 125 126 static int somaxconn = SOMAXCONN; 127 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 128 &somaxconn, 0, "Maximum pending socket connection queue size"); 129 130 /* 131 * Socket operation routines. 132 * These routines are called by the routines in 133 * sys_socket.c or from a system process, and 134 * implement the semantics of socket operations by 135 * switching out to the protocol specific routines. 136 */ 137 138 /* 139 * Get a socket structure, and initialize it. 140 * Note that it would probably be better to allocate socket 141 * and PCB at the same time, but I'm not convinced that all 142 * the protocols can be easily modified to do this. 143 */ 144 struct socket * 145 soalloc(int waitok) 146 { 147 struct socket *so; 148 unsigned waitmask; 149 150 waitmask = waitok ? M_WAITOK : M_NOWAIT; 151 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask); 152 if (so) { 153 /* XXX race condition for reentrant kernel */ 154 TAILQ_INIT(&so->so_aiojobq); 155 TAILQ_INIT(&so->so_rcv.ssb_kq.ki_mlist); 156 TAILQ_INIT(&so->so_snd.ssb_kq.ki_mlist); 157 lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok"); 158 lwkt_token_init(&so->so_snd.ssb_token, "sndtok"); 159 so->so_state = SS_NOFDREF; 160 so->so_refs = 1; 161 } 162 return so; 163 } 164 165 int 166 socreate(int dom, struct socket **aso, int type, 167 int proto, struct thread *td) 168 { 169 struct proc *p = td->td_proc; 170 struct protosw *prp; 171 struct socket *so; 172 struct pru_attach_info ai; 173 int error; 174 175 if (proto) 176 prp = pffindproto(dom, proto, type); 177 else 178 prp = pffindtype(dom, type); 179 180 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0) 181 return (EPROTONOSUPPORT); 182 183 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only && 184 prp->pr_domain->dom_family != PF_LOCAL && 185 prp->pr_domain->dom_family != PF_INET && 186 prp->pr_domain->dom_family != PF_INET6 && 187 prp->pr_domain->dom_family != PF_ROUTE) { 188 return (EPROTONOSUPPORT); 189 } 190 191 if (prp->pr_type != type) 192 return (EPROTOTYPE); 193 so = soalloc(p != 0); 194 if (so == NULL) 195 return (ENOBUFS); 196 197 /* 198 * Callers of socreate() presumably will connect up a descriptor 199 * and call soclose() if they cannot. This represents our so_refs 200 * (which should be 1) from soalloc(). 201 */ 202 soclrstate(so, SS_NOFDREF); 203 204 /* 205 * Set a default port for protocol processing. No action will occur 206 * on the socket on this port until an inpcb is attached to it and 207 * is able to match incoming packets, or until the socket becomes 208 * available to userland. 209 * 210 * We normally default the socket to the protocol thread on cpu 0. 211 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol 212 * thread and all pr_*()/pru_*() calls are executed synchronously. 213 */ 214 if (prp->pr_flags & PR_SYNC_PORT) 215 so->so_port = &netisr_sync_port; 216 else 217 so->so_port = cpu_portfn(0); 218 219 TAILQ_INIT(&so->so_incomp); 220 TAILQ_INIT(&so->so_comp); 221 so->so_type = type; 222 so->so_cred = crhold(p->p_ucred); 223 so->so_proto = prp; 224 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE]; 225 ai.p_ucred = p->p_ucred; 226 ai.fd_rdir = p->p_fd->fd_rdir; 227 228 /* 229 * Auto-sizing of socket buffers is managed by the protocols and 230 * the appropriate flags must be set in the pru_attach function. 231 */ 232 error = so_pru_attach(so, proto, &ai); 233 if (error) { 234 sosetstate(so, SS_NOFDREF); 235 sofree(so); /* from soalloc */ 236 return error; 237 } 238 239 /* 240 * NOTE: Returns referenced socket. 241 */ 242 *aso = so; 243 return (0); 244 } 245 246 int 247 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 248 { 249 int error; 250 251 error = so_pru_bind(so, nam, td); 252 return (error); 253 } 254 255 static void 256 sodealloc(struct socket *so) 257 { 258 if (so->so_rcv.ssb_hiwat) 259 (void)chgsbsize(so->so_cred->cr_uidinfo, 260 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY); 261 if (so->so_snd.ssb_hiwat) 262 (void)chgsbsize(so->so_cred->cr_uidinfo, 263 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY); 264 #ifdef INET 265 /* remove accept filter if present */ 266 if (so->so_accf != NULL) 267 do_setopt_accept_filter(so, NULL); 268 #endif /* INET */ 269 crfree(so->so_cred); 270 kfree(so, M_SOCKET); 271 } 272 273 int 274 solisten(struct socket *so, int backlog, struct thread *td) 275 { 276 int error; 277 #ifdef SCTP 278 short oldopt, oldqlimit; 279 #endif /* SCTP */ 280 281 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) 282 return (EINVAL); 283 284 #ifdef SCTP 285 oldopt = so->so_options; 286 oldqlimit = so->so_qlimit; 287 #endif /* SCTP */ 288 289 lwkt_gettoken(&so->so_rcv.ssb_token); 290 if (TAILQ_EMPTY(&so->so_comp)) 291 so->so_options |= SO_ACCEPTCONN; 292 lwkt_reltoken(&so->so_rcv.ssb_token); 293 if (backlog < 0 || backlog > somaxconn) 294 backlog = somaxconn; 295 so->so_qlimit = backlog; 296 /* SCTP needs to look at tweak both the inbound backlog parameter AND 297 * the so_options (UDP model both connect's and gets inbound 298 * connections .. implicitly). 299 */ 300 error = so_pru_listen(so, td); 301 if (error) { 302 #ifdef SCTP 303 /* Restore the params */ 304 so->so_options = oldopt; 305 so->so_qlimit = oldqlimit; 306 #endif /* SCTP */ 307 return (error); 308 } 309 return (0); 310 } 311 312 /* 313 * Destroy a disconnected socket. This routine is a NOP if entities 314 * still have a reference on the socket: 315 * 316 * so_pcb - The protocol stack still has a reference 317 * SS_NOFDREF - There is no longer a file pointer reference 318 */ 319 void 320 sofree(struct socket *so) 321 { 322 struct socket *head; 323 324 /* 325 * This is a bit hackish at the moment. We need to interlock 326 * any accept queue we are on before we potentially lose the 327 * last reference to avoid races against a re-reference from 328 * someone operating on the queue. 329 */ 330 while ((head = so->so_head) != NULL) { 331 lwkt_getpooltoken(head); 332 if (so->so_head == head) 333 break; 334 lwkt_relpooltoken(head); 335 } 336 337 /* 338 * Arbitrage the last free. 339 */ 340 KKASSERT(so->so_refs > 0); 341 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) { 342 if (head) 343 lwkt_relpooltoken(head); 344 return; 345 } 346 347 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF)); 348 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0); 349 350 /* 351 * We're done, remove ourselves from the accept queue we are 352 * on, if we are on one. 353 */ 354 if (head != NULL) { 355 if (so->so_state & SS_INCOMP) { 356 TAILQ_REMOVE(&head->so_incomp, so, so_list); 357 head->so_incqlen--; 358 } else if (so->so_state & SS_COMP) { 359 /* 360 * We must not decommission a socket that's 361 * on the accept(2) queue. If we do, then 362 * accept(2) may hang after select(2) indicated 363 * that the listening socket was ready. 364 */ 365 lwkt_relpooltoken(head); 366 return; 367 } else { 368 panic("sofree: not queued"); 369 } 370 soclrstate(so, SS_INCOMP); 371 so->so_head = NULL; 372 lwkt_relpooltoken(head); 373 } 374 ssb_release(&so->so_snd, so); 375 sorflush(so); 376 sodealloc(so); 377 } 378 379 /* 380 * Close a socket on last file table reference removal. 381 * Initiate disconnect if connected. 382 * Free socket when disconnect complete. 383 */ 384 int 385 soclose(struct socket *so, int fflag) 386 { 387 int error = 0; 388 389 funsetown(&so->so_sigio); 390 if (so->so_pcb == NULL) 391 goto discard; 392 if (so->so_state & SS_ISCONNECTED) { 393 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 394 error = sodisconnect(so); 395 if (error) 396 goto drop; 397 } 398 if (so->so_options & SO_LINGER) { 399 if ((so->so_state & SS_ISDISCONNECTING) && 400 (fflag & FNONBLOCK)) 401 goto drop; 402 while (so->so_state & SS_ISCONNECTED) { 403 error = tsleep(&so->so_timeo, PCATCH, 404 "soclos", so->so_linger * hz); 405 if (error) 406 break; 407 } 408 } 409 } 410 drop: 411 if (so->so_pcb) { 412 int error2; 413 414 error2 = so_pru_detach(so); 415 if (error == 0) 416 error = error2; 417 } 418 discard: 419 lwkt_getpooltoken(so); 420 if (so->so_options & SO_ACCEPTCONN) { 421 struct socket *sp; 422 423 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 424 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 425 soclrstate(sp, SS_INCOMP); 426 sp->so_head = NULL; 427 so->so_incqlen--; 428 soaborta(sp); 429 } 430 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 431 TAILQ_REMOVE(&so->so_comp, sp, so_list); 432 soclrstate(sp, SS_COMP); 433 sp->so_head = NULL; 434 so->so_qlen--; 435 soaborta(sp); 436 } 437 } 438 lwkt_relpooltoken(so); 439 if (so->so_state & SS_NOFDREF) 440 panic("soclose: NOFDREF"); 441 sosetstate(so, SS_NOFDREF); /* take ref */ 442 sofree(so); /* dispose of ref */ 443 return (error); 444 } 445 446 /* 447 * Abort and destroy a socket. Only one abort can be in progress 448 * at any given moment. 449 */ 450 void 451 soabort(struct socket *so) 452 { 453 soreference(so); 454 so_pru_abort(so); 455 } 456 457 void 458 soaborta(struct socket *so) 459 { 460 soreference(so); 461 so_pru_aborta(so); 462 } 463 464 void 465 soabort_oncpu(struct socket *so) 466 { 467 soreference(so); 468 so_pru_abort_oncpu(so); 469 } 470 471 /* 472 * so is passed in ref'd, which becomes owned by 473 * the cleared SS_NOFDREF flag. 474 */ 475 int 476 soaccept(struct socket *so, struct sockaddr **nam) 477 { 478 int error; 479 480 if ((so->so_state & SS_NOFDREF) == 0) 481 panic("soaccept: !NOFDREF"); 482 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */ 483 error = so_pru_accept_direct(so, nam); 484 return (error); 485 } 486 487 int 488 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td) 489 { 490 int error; 491 492 if (so->so_options & SO_ACCEPTCONN) 493 return (EOPNOTSUPP); 494 /* 495 * If protocol is connection-based, can only connect once. 496 * Otherwise, if connected, try to disconnect first. 497 * This allows user to disconnect by connecting to, e.g., 498 * a null address. 499 */ 500 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 501 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 502 (error = sodisconnect(so)))) { 503 error = EISCONN; 504 } else { 505 /* 506 * Prevent accumulated error from previous connection 507 * from biting us. 508 */ 509 so->so_error = 0; 510 error = so_pru_connect(so, nam, td); 511 } 512 return (error); 513 } 514 515 int 516 soconnect2(struct socket *so1, struct socket *so2) 517 { 518 int error; 519 520 error = so_pru_connect2(so1, so2); 521 return (error); 522 } 523 524 int 525 sodisconnect(struct socket *so) 526 { 527 int error; 528 529 if ((so->so_state & SS_ISCONNECTED) == 0) { 530 error = ENOTCONN; 531 goto bad; 532 } 533 if (so->so_state & SS_ISDISCONNECTING) { 534 error = EALREADY; 535 goto bad; 536 } 537 error = so_pru_disconnect(so); 538 bad: 539 return (error); 540 } 541 542 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 543 /* 544 * Send on a socket. 545 * If send must go all at once and message is larger than 546 * send buffering, then hard error. 547 * Lock against other senders. 548 * If must go all at once and not enough room now, then 549 * inform user that this would block and do nothing. 550 * Otherwise, if nonblocking, send as much as possible. 551 * The data to be sent is described by "uio" if nonzero, 552 * otherwise by the mbuf chain "top" (which must be null 553 * if uio is not). Data provided in mbuf chain must be small 554 * enough to send all at once. 555 * 556 * Returns nonzero on error, timeout or signal; callers 557 * must check for short counts if EINTR/ERESTART are returned. 558 * Data and control buffers are freed on return. 559 */ 560 int 561 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 562 struct mbuf *top, struct mbuf *control, int flags, 563 struct thread *td) 564 { 565 struct mbuf **mp; 566 struct mbuf *m; 567 size_t resid; 568 int space, len; 569 int clen = 0, error, dontroute, mlen; 570 int atomic = sosendallatonce(so) || top; 571 int pru_flags; 572 573 if (uio) { 574 resid = uio->uio_resid; 575 } else { 576 resid = (size_t)top->m_pkthdr.len; 577 #ifdef INVARIANTS 578 len = 0; 579 for (m = top; m; m = m->m_next) 580 len += m->m_len; 581 KKASSERT(top->m_pkthdr.len == len); 582 #endif 583 } 584 585 /* 586 * WARNING! resid is unsigned, space and len are signed. space 587 * can wind up negative if the sockbuf is overcommitted. 588 * 589 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 590 * type sockets since that's an error. 591 */ 592 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 593 error = EINVAL; 594 goto out; 595 } 596 597 dontroute = 598 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 599 (so->so_proto->pr_flags & PR_ATOMIC); 600 if (td->td_lwp != NULL) 601 td->td_lwp->lwp_ru.ru_msgsnd++; 602 if (control) 603 clen = control->m_len; 604 #define gotoerr(errcode) { error = errcode; goto release; } 605 606 restart: 607 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 608 if (error) 609 goto out; 610 611 do { 612 if (so->so_state & SS_CANTSENDMORE) 613 gotoerr(EPIPE); 614 if (so->so_error) { 615 error = so->so_error; 616 so->so_error = 0; 617 goto release; 618 } 619 if ((so->so_state & SS_ISCONNECTED) == 0) { 620 /* 621 * `sendto' and `sendmsg' is allowed on a connection- 622 * based socket if it supports implied connect. 623 * Return ENOTCONN if not connected and no address is 624 * supplied. 625 */ 626 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 627 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 628 if ((so->so_state & SS_ISCONFIRMING) == 0 && 629 !(resid == 0 && clen != 0)) 630 gotoerr(ENOTCONN); 631 } else if (addr == 0) 632 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 633 ENOTCONN : EDESTADDRREQ); 634 } 635 if ((atomic && resid > so->so_snd.ssb_hiwat) || 636 clen > so->so_snd.ssb_hiwat) { 637 gotoerr(EMSGSIZE); 638 } 639 space = ssb_space(&so->so_snd); 640 if (flags & MSG_OOB) 641 space += 1024; 642 if ((space < 0 || (size_t)space < resid + clen) && uio && 643 (atomic || space < so->so_snd.ssb_lowat || space < clen)) { 644 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 645 gotoerr(EWOULDBLOCK); 646 ssb_unlock(&so->so_snd); 647 error = ssb_wait(&so->so_snd); 648 if (error) 649 goto out; 650 goto restart; 651 } 652 mp = ⊤ 653 space -= clen; 654 do { 655 if (uio == NULL) { 656 /* 657 * Data is prepackaged in "top". 658 */ 659 resid = 0; 660 if (flags & MSG_EOR) 661 top->m_flags |= M_EOR; 662 } else do { 663 if (resid > INT_MAX) 664 resid = INT_MAX; 665 m = m_getl((int)resid, MB_WAIT, MT_DATA, 666 top == NULL ? M_PKTHDR : 0, &mlen); 667 if (top == NULL) { 668 m->m_pkthdr.len = 0; 669 m->m_pkthdr.rcvif = NULL; 670 } 671 len = imin((int)szmin(mlen, resid), space); 672 if (resid < MINCLSIZE) { 673 /* 674 * For datagram protocols, leave room 675 * for protocol headers in first mbuf. 676 */ 677 if (atomic && top == 0 && len < mlen) 678 MH_ALIGN(m, len); 679 } 680 space -= len; 681 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 682 resid = uio->uio_resid; 683 m->m_len = len; 684 *mp = m; 685 top->m_pkthdr.len += len; 686 if (error) 687 goto release; 688 mp = &m->m_next; 689 if (resid == 0) { 690 if (flags & MSG_EOR) 691 top->m_flags |= M_EOR; 692 break; 693 } 694 } while (space > 0 && atomic); 695 if (dontroute) 696 so->so_options |= SO_DONTROUTE; 697 if (flags & MSG_OOB) { 698 pru_flags = PRUS_OOB; 699 } else if ((flags & MSG_EOF) && 700 (so->so_proto->pr_flags & PR_IMPLOPCL) && 701 (resid == 0)) { 702 /* 703 * If the user set MSG_EOF, the protocol 704 * understands this flag and nothing left to 705 * send then use PRU_SEND_EOF instead of PRU_SEND. 706 */ 707 pru_flags = PRUS_EOF; 708 } else if (resid > 0 && space > 0) { 709 /* If there is more to send, set PRUS_MORETOCOME */ 710 pru_flags = PRUS_MORETOCOME; 711 } else { 712 pru_flags = 0; 713 } 714 /* 715 * XXX all the SS_CANTSENDMORE checks previously 716 * done could be out of date. We could have recieved 717 * a reset packet in an interrupt or maybe we slept 718 * while doing page faults in uiomove() etc. We could 719 * probably recheck again inside the splnet() protection 720 * here, but there are probably other places that this 721 * also happens. We must rethink this. 722 */ 723 error = so_pru_send(so, pru_flags, top, addr, control, td); 724 if (dontroute) 725 so->so_options &= ~SO_DONTROUTE; 726 clen = 0; 727 control = 0; 728 top = NULL; 729 mp = ⊤ 730 if (error) 731 goto release; 732 } while (resid && space > 0); 733 } while (resid); 734 735 release: 736 ssb_unlock(&so->so_snd); 737 out: 738 if (top) 739 m_freem(top); 740 if (control) 741 m_freem(control); 742 return (error); 743 } 744 745 /* 746 * A specialization of sosend() for UDP based on protocol-specific knowledge: 747 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that 748 * sosendallatonce() returns true, 749 * the "atomic" variable is true, 750 * and sosendudp() blocks until space is available for the entire send. 751 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or 752 * PR_IMPLOPCL flags set. 753 * UDP has no out-of-band data. 754 * UDP has no control data. 755 * UDP does not support MSG_EOR. 756 */ 757 int 758 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, 759 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 760 { 761 boolean_t dontroute; /* temporary SO_DONTROUTE setting */ 762 size_t resid; 763 int error; 764 int space; 765 766 if (td->td_lwp != NULL) 767 td->td_lwp->lwp_ru.ru_msgsnd++; 768 if (control) 769 m_freem(control); 770 771 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); 772 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len; 773 774 restart: 775 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 776 if (error) 777 goto out; 778 779 if (so->so_state & SS_CANTSENDMORE) 780 gotoerr(EPIPE); 781 if (so->so_error) { 782 error = so->so_error; 783 so->so_error = 0; 784 goto release; 785 } 786 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) 787 gotoerr(EDESTADDRREQ); 788 if (resid > so->so_snd.ssb_hiwat) 789 gotoerr(EMSGSIZE); 790 space = ssb_space(&so->so_snd); 791 if (uio && (space < 0 || (size_t)space < resid)) { 792 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 793 gotoerr(EWOULDBLOCK); 794 ssb_unlock(&so->so_snd); 795 error = ssb_wait(&so->so_snd); 796 if (error) 797 goto out; 798 goto restart; 799 } 800 801 if (uio) { 802 top = m_uiomove(uio); 803 if (top == NULL) 804 goto release; 805 } 806 807 dontroute = (flags & MSG_DONTROUTE) && !(so->so_options & SO_DONTROUTE); 808 if (dontroute) 809 so->so_options |= SO_DONTROUTE; 810 811 error = so_pru_send(so, 0, top, addr, NULL, td); 812 top = NULL; /* sent or freed in lower layer */ 813 814 if (dontroute) 815 so->so_options &= ~SO_DONTROUTE; 816 817 release: 818 ssb_unlock(&so->so_snd); 819 out: 820 if (top) 821 m_freem(top); 822 return (error); 823 } 824 825 int 826 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio, 827 struct mbuf *top, struct mbuf *control, int flags, 828 struct thread *td) 829 { 830 struct mbuf **mp; 831 struct mbuf *m; 832 size_t resid; 833 int space, len; 834 int error, mlen; 835 int allatonce; 836 int pru_flags; 837 838 if (uio) { 839 KKASSERT(top == NULL); 840 allatonce = 0; 841 resid = uio->uio_resid; 842 } else { 843 allatonce = 1; 844 resid = (size_t)top->m_pkthdr.len; 845 #ifdef INVARIANTS 846 len = 0; 847 for (m = top; m; m = m->m_next) 848 len += m->m_len; 849 KKASSERT(top->m_pkthdr.len == len); 850 #endif 851 } 852 853 /* 854 * WARNING! resid is unsigned, space and len are signed. space 855 * can wind up negative if the sockbuf is overcommitted. 856 * 857 * Also check to make sure that MSG_EOR isn't used on TCP 858 */ 859 if (flags & MSG_EOR) { 860 error = EINVAL; 861 goto out; 862 } 863 864 if (control) { 865 /* TCP doesn't do control messages (rights, creds, etc) */ 866 if (control->m_len) { 867 error = EINVAL; 868 goto out; 869 } 870 m_freem(control); /* empty control, just free it */ 871 control = NULL; 872 } 873 874 if (td->td_lwp != NULL) 875 td->td_lwp->lwp_ru.ru_msgsnd++; 876 877 #define gotoerr(errcode) { error = errcode; goto release; } 878 879 restart: 880 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 881 if (error) 882 goto out; 883 884 do { 885 if (so->so_state & SS_CANTSENDMORE) 886 gotoerr(EPIPE); 887 if (so->so_error) { 888 error = so->so_error; 889 so->so_error = 0; 890 goto release; 891 } 892 if ((so->so_state & SS_ISCONNECTED) == 0 && 893 (so->so_state & SS_ISCONFIRMING) == 0) 894 gotoerr(ENOTCONN); 895 if (allatonce && resid > so->so_snd.ssb_hiwat) 896 gotoerr(EMSGSIZE); 897 898 space = ssb_space(&so->so_snd); 899 if (flags & MSG_OOB) 900 space += 1024; 901 if ((space < 0 || (size_t)space < resid) && !allatonce && 902 space < so->so_snd.ssb_lowat) { 903 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 904 gotoerr(EWOULDBLOCK); 905 ssb_unlock(&so->so_snd); 906 error = ssb_wait(&so->so_snd); 907 if (error) 908 goto out; 909 goto restart; 910 } 911 mp = ⊤ 912 do { 913 int cnt = 0; 914 915 if (uio == NULL) { 916 /* 917 * Data is prepackaged in "top". 918 */ 919 resid = 0; 920 } else do { 921 if (resid > INT_MAX) 922 resid = INT_MAX; 923 m = m_getl((int)resid, MB_WAIT, MT_DATA, 924 top == NULL ? M_PKTHDR : 0, &mlen); 925 if (top == NULL) { 926 m->m_pkthdr.len = 0; 927 m->m_pkthdr.rcvif = NULL; 928 } 929 len = imin((int)szmin(mlen, resid), space); 930 space -= len; 931 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 932 resid = uio->uio_resid; 933 m->m_len = len; 934 *mp = m; 935 top->m_pkthdr.len += len; 936 if (error) 937 goto release; 938 mp = &m->m_next; 939 if (resid == 0) 940 break; 941 ++cnt; 942 } while (space > 0 && cnt < tcp_sosnd_agglim); 943 944 if (flags & MSG_OOB) { 945 pru_flags = PRUS_OOB; 946 } else if (resid > 0 && space > 0) { 947 /* If there is more to send, set PRUS_MORETOCOME */ 948 pru_flags = PRUS_MORETOCOME; 949 } else { 950 pru_flags = 0; 951 } 952 953 /* 954 * XXX all the SS_CANTSENDMORE checks previously 955 * done could be out of date. We could have recieved 956 * a reset packet in an interrupt or maybe we slept 957 * while doing page faults in uiomove() etc. We could 958 * probably recheck again inside the splnet() protection 959 * here, but there are probably other places that this 960 * also happens. We must rethink this. 961 */ 962 if ((pru_flags & PRUS_OOB) || 963 (pru_flags & PRUS_MORETOCOME) == 0) { 964 error = so_pru_send(so, pru_flags, top, 965 NULL, NULL, td); 966 } else { 967 so_pru_send_async(so, pru_flags, top, 968 NULL, NULL, td); 969 error = 0; 970 } 971 972 top = NULL; 973 mp = ⊤ 974 if (error) 975 goto release; 976 } while (resid && space > 0); 977 } while (resid); 978 979 release: 980 ssb_unlock(&so->so_snd); 981 out: 982 if (top) 983 m_freem(top); 984 if (control) 985 m_freem(control); 986 return (error); 987 } 988 989 /* 990 * Implement receive operations on a socket. 991 * 992 * We depend on the way that records are added to the signalsockbuf 993 * by sbappend*. In particular, each record (mbufs linked through m_next) 994 * must begin with an address if the protocol so specifies, 995 * followed by an optional mbuf or mbufs containing ancillary data, 996 * and then zero or more mbufs of data. 997 * 998 * Although the signalsockbuf is locked, new data may still be appended. 999 * A token inside the ssb_lock deals with MP issues and still allows 1000 * the network to access the socket if we block in a uio. 1001 * 1002 * The caller may receive the data as a single mbuf chain by supplying 1003 * an mbuf **mp0 for use in returning the chain. The uio is then used 1004 * only for the count in uio_resid. 1005 */ 1006 int 1007 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 1008 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1009 { 1010 struct mbuf *m, *n; 1011 struct mbuf *free_chain = NULL; 1012 int flags, len, error, offset; 1013 struct protosw *pr = so->so_proto; 1014 int moff, type = 0; 1015 size_t resid, orig_resid; 1016 1017 if (uio) 1018 resid = uio->uio_resid; 1019 else 1020 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1021 orig_resid = resid; 1022 1023 if (psa) 1024 *psa = NULL; 1025 if (controlp) 1026 *controlp = NULL; 1027 if (flagsp) 1028 flags = *flagsp &~ MSG_EOR; 1029 else 1030 flags = 0; 1031 if (flags & MSG_OOB) { 1032 m = m_get(MB_WAIT, MT_DATA); 1033 if (m == NULL) 1034 return (ENOBUFS); 1035 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1036 if (error) 1037 goto bad; 1038 if (sio) { 1039 do { 1040 sbappend(sio, m); 1041 KKASSERT(resid >= (size_t)m->m_len); 1042 resid -= (size_t)m->m_len; 1043 } while (resid > 0 && m); 1044 } else { 1045 do { 1046 uio->uio_resid = resid; 1047 error = uiomove(mtod(m, caddr_t), 1048 (int)szmin(resid, m->m_len), 1049 uio); 1050 resid = uio->uio_resid; 1051 m = m_free(m); 1052 } while (uio->uio_resid && error == 0 && m); 1053 } 1054 bad: 1055 if (m) 1056 m_freem(m); 1057 return (error); 1058 } 1059 if ((so->so_state & SS_ISCONFIRMING) && resid) 1060 so_pru_rcvd(so, 0); 1061 1062 /* 1063 * The token interlocks against the protocol thread while 1064 * ssb_lock is a blocking lock against other userland entities. 1065 */ 1066 lwkt_gettoken(&so->so_rcv.ssb_token); 1067 restart: 1068 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1069 if (error) 1070 goto done; 1071 1072 m = so->so_rcv.ssb_mb; 1073 /* 1074 * If we have less data than requested, block awaiting more 1075 * (subject to any timeout) if: 1076 * 1. the current count is less than the low water mark, or 1077 * 2. MSG_WAITALL is set, and it is possible to do the entire 1078 * receive operation at once if we block (resid <= hiwat). 1079 * 3. MSG_DONTWAIT is not set 1080 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1081 * we have to do the receive in sections, and thus risk returning 1082 * a short count if a timeout or signal occurs after we start. 1083 */ 1084 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1085 (size_t)so->so_rcv.ssb_cc < resid) && 1086 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1087 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) && 1088 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { 1089 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1090 if (so->so_error) { 1091 if (m) 1092 goto dontblock; 1093 error = so->so_error; 1094 if ((flags & MSG_PEEK) == 0) 1095 so->so_error = 0; 1096 goto release; 1097 } 1098 if (so->so_state & SS_CANTRCVMORE) { 1099 if (m) 1100 goto dontblock; 1101 else 1102 goto release; 1103 } 1104 for (; m; m = m->m_next) { 1105 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1106 m = so->so_rcv.ssb_mb; 1107 goto dontblock; 1108 } 1109 } 1110 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1111 (pr->pr_flags & PR_CONNREQUIRED)) { 1112 error = ENOTCONN; 1113 goto release; 1114 } 1115 if (resid == 0) 1116 goto release; 1117 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1118 error = EWOULDBLOCK; 1119 goto release; 1120 } 1121 ssb_unlock(&so->so_rcv); 1122 error = ssb_wait(&so->so_rcv); 1123 if (error) 1124 goto done; 1125 goto restart; 1126 } 1127 dontblock: 1128 if (uio && uio->uio_td && uio->uio_td->td_proc) 1129 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1130 1131 /* 1132 * note: m should be == sb_mb here. Cache the next record while 1133 * cleaning up. Note that calling m_free*() will break out critical 1134 * section. 1135 */ 1136 KKASSERT(m == so->so_rcv.ssb_mb); 1137 1138 /* 1139 * Skip any address mbufs prepending the record. 1140 */ 1141 if (pr->pr_flags & PR_ADDR) { 1142 KASSERT(m->m_type == MT_SONAME, ("receive 1a")); 1143 orig_resid = 0; 1144 if (psa) 1145 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1146 if (flags & MSG_PEEK) 1147 m = m->m_next; 1148 else 1149 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1150 } 1151 1152 /* 1153 * Skip any control mbufs prepending the record. 1154 */ 1155 #ifdef SCTP 1156 if (pr->pr_flags & PR_ADDR_OPT) { 1157 /* 1158 * For SCTP we may be getting a 1159 * whole message OR a partial delivery. 1160 */ 1161 if (m && m->m_type == MT_SONAME) { 1162 orig_resid = 0; 1163 if (psa) 1164 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1165 if (flags & MSG_PEEK) 1166 m = m->m_next; 1167 else 1168 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1169 } 1170 } 1171 #endif /* SCTP */ 1172 while (m && m->m_type == MT_CONTROL && error == 0) { 1173 if (flags & MSG_PEEK) { 1174 if (controlp) 1175 *controlp = m_copy(m, 0, m->m_len); 1176 m = m->m_next; /* XXX race */ 1177 } else { 1178 if (controlp) { 1179 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1180 if (pr->pr_domain->dom_externalize && 1181 mtod(m, struct cmsghdr *)->cmsg_type == 1182 SCM_RIGHTS) 1183 error = (*pr->pr_domain->dom_externalize)(m); 1184 *controlp = m; 1185 m = n; 1186 } else { 1187 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1188 } 1189 } 1190 if (controlp && *controlp) { 1191 orig_resid = 0; 1192 controlp = &(*controlp)->m_next; 1193 } 1194 } 1195 1196 /* 1197 * flag OOB data. 1198 */ 1199 if (m) { 1200 type = m->m_type; 1201 if (type == MT_OOBDATA) 1202 flags |= MSG_OOB; 1203 } 1204 1205 /* 1206 * Copy to the UIO or mbuf return chain (*mp). 1207 */ 1208 moff = 0; 1209 offset = 0; 1210 while (m && resid > 0 && error == 0) { 1211 if (m->m_type == MT_OOBDATA) { 1212 if (type != MT_OOBDATA) 1213 break; 1214 } else if (type == MT_OOBDATA) 1215 break; 1216 else 1217 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1218 ("receive 3")); 1219 soclrstate(so, SS_RCVATMARK); 1220 len = (resid > INT_MAX) ? INT_MAX : resid; 1221 if (so->so_oobmark && len > so->so_oobmark - offset) 1222 len = so->so_oobmark - offset; 1223 if (len > m->m_len - moff) 1224 len = m->m_len - moff; 1225 1226 /* 1227 * Copy out to the UIO or pass the mbufs back to the SIO. 1228 * The SIO is dealt with when we eat the mbuf, but deal 1229 * with the resid here either way. 1230 */ 1231 if (uio) { 1232 uio->uio_resid = resid; 1233 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1234 resid = uio->uio_resid; 1235 if (error) 1236 goto release; 1237 } else { 1238 resid -= (size_t)len; 1239 } 1240 1241 /* 1242 * Eat the entire mbuf or just a piece of it 1243 */ 1244 if (len == m->m_len - moff) { 1245 if (m->m_flags & M_EOR) 1246 flags |= MSG_EOR; 1247 #ifdef SCTP 1248 if (m->m_flags & M_NOTIFICATION) 1249 flags |= MSG_NOTIFICATION; 1250 #endif /* SCTP */ 1251 if (flags & MSG_PEEK) { 1252 m = m->m_next; 1253 moff = 0; 1254 } else { 1255 if (sio) { 1256 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1257 sbappend(sio, m); 1258 m = n; 1259 } else { 1260 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1261 } 1262 } 1263 } else { 1264 if (flags & MSG_PEEK) { 1265 moff += len; 1266 } else { 1267 if (sio) { 1268 n = m_copym(m, 0, len, MB_WAIT); 1269 if (n) 1270 sbappend(sio, n); 1271 } 1272 m->m_data += len; 1273 m->m_len -= len; 1274 so->so_rcv.ssb_cc -= len; 1275 } 1276 } 1277 if (so->so_oobmark) { 1278 if ((flags & MSG_PEEK) == 0) { 1279 so->so_oobmark -= len; 1280 if (so->so_oobmark == 0) { 1281 sosetstate(so, SS_RCVATMARK); 1282 break; 1283 } 1284 } else { 1285 offset += len; 1286 if (offset == so->so_oobmark) 1287 break; 1288 } 1289 } 1290 if (flags & MSG_EOR) 1291 break; 1292 /* 1293 * If the MSG_WAITALL flag is set (for non-atomic socket), 1294 * we must not quit until resid == 0 or an error 1295 * termination. If a signal/timeout occurs, return 1296 * with a short count but without error. 1297 * Keep signalsockbuf locked against other readers. 1298 */ 1299 while ((flags & MSG_WAITALL) && m == NULL && 1300 resid > 0 && !sosendallatonce(so) && 1301 so->so_rcv.ssb_mb == NULL) { 1302 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1303 break; 1304 /* 1305 * The window might have closed to zero, make 1306 * sure we send an ack now that we've drained 1307 * the buffer or we might end up blocking until 1308 * the idle takes over (5 seconds). 1309 */ 1310 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1311 so_pru_rcvd(so, flags); 1312 error = ssb_wait(&so->so_rcv); 1313 if (error) { 1314 ssb_unlock(&so->so_rcv); 1315 error = 0; 1316 goto done; 1317 } 1318 m = so->so_rcv.ssb_mb; 1319 } 1320 } 1321 1322 /* 1323 * If an atomic read was requested but unread data still remains 1324 * in the record, set MSG_TRUNC. 1325 */ 1326 if (m && pr->pr_flags & PR_ATOMIC) 1327 flags |= MSG_TRUNC; 1328 1329 /* 1330 * Cleanup. If an atomic read was requested drop any unread data. 1331 */ 1332 if ((flags & MSG_PEEK) == 0) { 1333 if (m && (pr->pr_flags & PR_ATOMIC)) 1334 sbdroprecord(&so->so_rcv.sb); 1335 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) 1336 so_pru_rcvd(so, flags); 1337 } 1338 1339 if (orig_resid == resid && orig_resid && 1340 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1341 ssb_unlock(&so->so_rcv); 1342 goto restart; 1343 } 1344 1345 if (flagsp) 1346 *flagsp |= flags; 1347 release: 1348 ssb_unlock(&so->so_rcv); 1349 done: 1350 lwkt_reltoken(&so->so_rcv.ssb_token); 1351 if (free_chain) 1352 m_freem(free_chain); 1353 return (error); 1354 } 1355 1356 /* 1357 * Shut a socket down. Note that we do not get a frontend lock as we 1358 * want to be able to shut the socket down even if another thread is 1359 * blocked in a read(), thus waking it up. 1360 */ 1361 int 1362 soshutdown(struct socket *so, int how) 1363 { 1364 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1365 return (EINVAL); 1366 1367 if (how != SHUT_WR) { 1368 /*ssb_lock(&so->so_rcv, M_WAITOK);*/ 1369 sorflush(so); 1370 /*ssb_unlock(&so->so_rcv);*/ 1371 } 1372 if (how != SHUT_RD) 1373 return (so_pru_shutdown(so)); 1374 return (0); 1375 } 1376 1377 void 1378 sorflush(struct socket *so) 1379 { 1380 struct signalsockbuf *ssb = &so->so_rcv; 1381 struct protosw *pr = so->so_proto; 1382 struct signalsockbuf asb; 1383 1384 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR); 1385 1386 lwkt_gettoken(&ssb->ssb_token); 1387 socantrcvmore(so); 1388 asb = *ssb; 1389 1390 /* 1391 * Can't just blow up the ssb structure here 1392 */ 1393 bzero(&ssb->sb, sizeof(ssb->sb)); 1394 ssb->ssb_timeo = 0; 1395 ssb->ssb_lowat = 0; 1396 ssb->ssb_hiwat = 0; 1397 ssb->ssb_mbmax = 0; 1398 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK); 1399 1400 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) 1401 (*pr->pr_domain->dom_dispose)(asb.ssb_mb); 1402 ssb_release(&asb, so); 1403 1404 lwkt_reltoken(&ssb->ssb_token); 1405 } 1406 1407 #ifdef INET 1408 static int 1409 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) 1410 { 1411 struct accept_filter_arg *afap = NULL; 1412 struct accept_filter *afp; 1413 struct so_accf *af = so->so_accf; 1414 int error = 0; 1415 1416 /* do not set/remove accept filters on non listen sockets */ 1417 if ((so->so_options & SO_ACCEPTCONN) == 0) { 1418 error = EINVAL; 1419 goto out; 1420 } 1421 1422 /* removing the filter */ 1423 if (sopt == NULL) { 1424 if (af != NULL) { 1425 if (af->so_accept_filter != NULL && 1426 af->so_accept_filter->accf_destroy != NULL) { 1427 af->so_accept_filter->accf_destroy(so); 1428 } 1429 if (af->so_accept_filter_str != NULL) { 1430 FREE(af->so_accept_filter_str, M_ACCF); 1431 } 1432 FREE(af, M_ACCF); 1433 so->so_accf = NULL; 1434 } 1435 so->so_options &= ~SO_ACCEPTFILTER; 1436 return (0); 1437 } 1438 /* adding a filter */ 1439 /* must remove previous filter first */ 1440 if (af != NULL) { 1441 error = EINVAL; 1442 goto out; 1443 } 1444 /* don't put large objects on the kernel stack */ 1445 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), M_TEMP, M_WAITOK); 1446 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 1447 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 1448 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 1449 if (error) 1450 goto out; 1451 afp = accept_filt_get(afap->af_name); 1452 if (afp == NULL) { 1453 error = ENOENT; 1454 goto out; 1455 } 1456 MALLOC(af, struct so_accf *, sizeof(*af), M_ACCF, M_WAITOK | M_ZERO); 1457 if (afp->accf_create != NULL) { 1458 if (afap->af_name[0] != '\0') { 1459 int len = strlen(afap->af_name) + 1; 1460 1461 MALLOC(af->so_accept_filter_str, char *, len, M_ACCF, M_WAITOK); 1462 strcpy(af->so_accept_filter_str, afap->af_name); 1463 } 1464 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 1465 if (af->so_accept_filter_arg == NULL) { 1466 FREE(af->so_accept_filter_str, M_ACCF); 1467 FREE(af, M_ACCF); 1468 so->so_accf = NULL; 1469 error = EINVAL; 1470 goto out; 1471 } 1472 } 1473 af->so_accept_filter = afp; 1474 so->so_accf = af; 1475 so->so_options |= SO_ACCEPTFILTER; 1476 out: 1477 if (afap != NULL) 1478 FREE(afap, M_TEMP); 1479 return (error); 1480 } 1481 #endif /* INET */ 1482 1483 /* 1484 * Perhaps this routine, and sooptcopyout(), below, ought to come in 1485 * an additional variant to handle the case where the option value needs 1486 * to be some kind of integer, but not a specific size. 1487 * In addition to their use here, these functions are also called by the 1488 * protocol-level pr_ctloutput() routines. 1489 */ 1490 int 1491 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 1492 { 1493 return soopt_to_kbuf(sopt, buf, len, minlen); 1494 } 1495 1496 int 1497 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 1498 { 1499 size_t valsize; 1500 1501 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 1502 KKASSERT(kva_p(buf)); 1503 1504 /* 1505 * If the user gives us more than we wanted, we ignore it, 1506 * but if we don't get the minimum length the caller 1507 * wants, we return EINVAL. On success, sopt->sopt_valsize 1508 * is set to however much we actually retrieved. 1509 */ 1510 if ((valsize = sopt->sopt_valsize) < minlen) 1511 return EINVAL; 1512 if (valsize > len) 1513 sopt->sopt_valsize = valsize = len; 1514 1515 bcopy(sopt->sopt_val, buf, valsize); 1516 return 0; 1517 } 1518 1519 1520 int 1521 sosetopt(struct socket *so, struct sockopt *sopt) 1522 { 1523 int error, optval; 1524 struct linger l; 1525 struct timeval tv; 1526 u_long val; 1527 struct signalsockbuf *sotmp; 1528 1529 error = 0; 1530 sopt->sopt_dir = SOPT_SET; 1531 if (sopt->sopt_level != SOL_SOCKET) { 1532 if (so->so_proto && so->so_proto->pr_ctloutput) { 1533 return (so_pr_ctloutput(so, sopt)); 1534 } 1535 error = ENOPROTOOPT; 1536 } else { 1537 switch (sopt->sopt_name) { 1538 #ifdef INET 1539 case SO_ACCEPTFILTER: 1540 error = do_setopt_accept_filter(so, sopt); 1541 if (error) 1542 goto bad; 1543 break; 1544 #endif /* INET */ 1545 case SO_LINGER: 1546 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 1547 if (error) 1548 goto bad; 1549 1550 so->so_linger = l.l_linger; 1551 if (l.l_onoff) 1552 so->so_options |= SO_LINGER; 1553 else 1554 so->so_options &= ~SO_LINGER; 1555 break; 1556 1557 case SO_DEBUG: 1558 case SO_KEEPALIVE: 1559 case SO_DONTROUTE: 1560 case SO_USELOOPBACK: 1561 case SO_BROADCAST: 1562 case SO_REUSEADDR: 1563 case SO_REUSEPORT: 1564 case SO_OOBINLINE: 1565 case SO_TIMESTAMP: 1566 error = sooptcopyin(sopt, &optval, sizeof optval, 1567 sizeof optval); 1568 if (error) 1569 goto bad; 1570 if (optval) 1571 so->so_options |= sopt->sopt_name; 1572 else 1573 so->so_options &= ~sopt->sopt_name; 1574 break; 1575 1576 case SO_SNDBUF: 1577 case SO_RCVBUF: 1578 case SO_SNDLOWAT: 1579 case SO_RCVLOWAT: 1580 error = sooptcopyin(sopt, &optval, sizeof optval, 1581 sizeof optval); 1582 if (error) 1583 goto bad; 1584 1585 /* 1586 * Values < 1 make no sense for any of these 1587 * options, so disallow them. 1588 */ 1589 if (optval < 1) { 1590 error = EINVAL; 1591 goto bad; 1592 } 1593 1594 switch (sopt->sopt_name) { 1595 case SO_SNDBUF: 1596 case SO_RCVBUF: 1597 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ? 1598 &so->so_snd : &so->so_rcv, (u_long)optval, 1599 so, 1600 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) { 1601 error = ENOBUFS; 1602 goto bad; 1603 } 1604 sotmp = (sopt->sopt_name == SO_SNDBUF) ? 1605 &so->so_snd : &so->so_rcv; 1606 atomic_clear_int(&sotmp->ssb_flags, 1607 SSB_AUTOSIZE); 1608 break; 1609 1610 /* 1611 * Make sure the low-water is never greater than 1612 * the high-water. 1613 */ 1614 case SO_SNDLOWAT: 1615 so->so_snd.ssb_lowat = 1616 (optval > so->so_snd.ssb_hiwat) ? 1617 so->so_snd.ssb_hiwat : optval; 1618 atomic_clear_int(&so->so_snd.ssb_flags, 1619 SSB_AUTOLOWAT); 1620 break; 1621 case SO_RCVLOWAT: 1622 so->so_rcv.ssb_lowat = 1623 (optval > so->so_rcv.ssb_hiwat) ? 1624 so->so_rcv.ssb_hiwat : optval; 1625 atomic_clear_int(&so->so_rcv.ssb_flags, 1626 SSB_AUTOLOWAT); 1627 break; 1628 } 1629 break; 1630 1631 case SO_SNDTIMEO: 1632 case SO_RCVTIMEO: 1633 error = sooptcopyin(sopt, &tv, sizeof tv, 1634 sizeof tv); 1635 if (error) 1636 goto bad; 1637 1638 /* assert(hz > 0); */ 1639 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 1640 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 1641 error = EDOM; 1642 goto bad; 1643 } 1644 /* assert(tick > 0); */ 1645 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 1646 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick; 1647 if (val > INT_MAX) { 1648 error = EDOM; 1649 goto bad; 1650 } 1651 if (val == 0 && tv.tv_usec != 0) 1652 val = 1; 1653 1654 switch (sopt->sopt_name) { 1655 case SO_SNDTIMEO: 1656 so->so_snd.ssb_timeo = val; 1657 break; 1658 case SO_RCVTIMEO: 1659 so->so_rcv.ssb_timeo = val; 1660 break; 1661 } 1662 break; 1663 default: 1664 error = ENOPROTOOPT; 1665 break; 1666 } 1667 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { 1668 (void) so_pr_ctloutput(so, sopt); 1669 } 1670 } 1671 bad: 1672 return (error); 1673 } 1674 1675 /* Helper routine for getsockopt */ 1676 int 1677 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 1678 { 1679 soopt_from_kbuf(sopt, buf, len); 1680 return 0; 1681 } 1682 1683 void 1684 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len) 1685 { 1686 size_t valsize; 1687 1688 if (len == 0) { 1689 sopt->sopt_valsize = 0; 1690 return; 1691 } 1692 1693 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 1694 KKASSERT(kva_p(buf)); 1695 1696 /* 1697 * Documented get behavior is that we always return a value, 1698 * possibly truncated to fit in the user's buffer. 1699 * Traditional behavior is that we always tell the user 1700 * precisely how much we copied, rather than something useful 1701 * like the total amount we had available for her. 1702 * Note that this interface is not idempotent; the entire answer must 1703 * generated ahead of time. 1704 */ 1705 valsize = szmin(len, sopt->sopt_valsize); 1706 sopt->sopt_valsize = valsize; 1707 if (sopt->sopt_val != 0) { 1708 bcopy(buf, sopt->sopt_val, valsize); 1709 } 1710 } 1711 1712 int 1713 sogetopt(struct socket *so, struct sockopt *sopt) 1714 { 1715 int error, optval; 1716 long optval_l; 1717 struct linger l; 1718 struct timeval tv; 1719 #ifdef INET 1720 struct accept_filter_arg *afap; 1721 #endif 1722 1723 error = 0; 1724 sopt->sopt_dir = SOPT_GET; 1725 if (sopt->sopt_level != SOL_SOCKET) { 1726 if (so->so_proto && so->so_proto->pr_ctloutput) { 1727 return (so_pr_ctloutput(so, sopt)); 1728 } else 1729 return (ENOPROTOOPT); 1730 } else { 1731 switch (sopt->sopt_name) { 1732 #ifdef INET 1733 case SO_ACCEPTFILTER: 1734 if ((so->so_options & SO_ACCEPTCONN) == 0) 1735 return (EINVAL); 1736 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), 1737 M_TEMP, M_WAITOK | M_ZERO); 1738 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 1739 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 1740 if (so->so_accf->so_accept_filter_str != NULL) 1741 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 1742 } 1743 error = sooptcopyout(sopt, afap, sizeof(*afap)); 1744 FREE(afap, M_TEMP); 1745 break; 1746 #endif /* INET */ 1747 1748 case SO_LINGER: 1749 l.l_onoff = so->so_options & SO_LINGER; 1750 l.l_linger = so->so_linger; 1751 error = sooptcopyout(sopt, &l, sizeof l); 1752 break; 1753 1754 case SO_USELOOPBACK: 1755 case SO_DONTROUTE: 1756 case SO_DEBUG: 1757 case SO_KEEPALIVE: 1758 case SO_REUSEADDR: 1759 case SO_REUSEPORT: 1760 case SO_BROADCAST: 1761 case SO_OOBINLINE: 1762 case SO_TIMESTAMP: 1763 optval = so->so_options & sopt->sopt_name; 1764 integer: 1765 error = sooptcopyout(sopt, &optval, sizeof optval); 1766 break; 1767 1768 case SO_TYPE: 1769 optval = so->so_type; 1770 goto integer; 1771 1772 case SO_ERROR: 1773 optval = so->so_error; 1774 so->so_error = 0; 1775 goto integer; 1776 1777 case SO_SNDBUF: 1778 optval = so->so_snd.ssb_hiwat; 1779 goto integer; 1780 1781 case SO_RCVBUF: 1782 optval = so->so_rcv.ssb_hiwat; 1783 goto integer; 1784 1785 case SO_SNDLOWAT: 1786 optval = so->so_snd.ssb_lowat; 1787 goto integer; 1788 1789 case SO_RCVLOWAT: 1790 optval = so->so_rcv.ssb_lowat; 1791 goto integer; 1792 1793 case SO_SNDTIMEO: 1794 case SO_RCVTIMEO: 1795 optval = (sopt->sopt_name == SO_SNDTIMEO ? 1796 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo); 1797 1798 tv.tv_sec = optval / hz; 1799 tv.tv_usec = (optval % hz) * ustick; 1800 error = sooptcopyout(sopt, &tv, sizeof tv); 1801 break; 1802 1803 case SO_SNDSPACE: 1804 optval_l = ssb_space(&so->so_snd); 1805 error = sooptcopyout(sopt, &optval_l, sizeof(optval_l)); 1806 break; 1807 1808 default: 1809 error = ENOPROTOOPT; 1810 break; 1811 } 1812 return (error); 1813 } 1814 } 1815 1816 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 1817 int 1818 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 1819 { 1820 struct mbuf *m, *m_prev; 1821 int sopt_size = sopt->sopt_valsize, msize; 1822 1823 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, MT_DATA, 1824 0, &msize); 1825 if (m == NULL) 1826 return (ENOBUFS); 1827 m->m_len = min(msize, sopt_size); 1828 sopt_size -= m->m_len; 1829 *mp = m; 1830 m_prev = m; 1831 1832 while (sopt_size > 0) { 1833 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, 1834 MT_DATA, 0, &msize); 1835 if (m == NULL) { 1836 m_freem(*mp); 1837 return (ENOBUFS); 1838 } 1839 m->m_len = min(msize, sopt_size); 1840 sopt_size -= m->m_len; 1841 m_prev->m_next = m; 1842 m_prev = m; 1843 } 1844 return (0); 1845 } 1846 1847 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 1848 int 1849 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 1850 { 1851 soopt_to_mbuf(sopt, m); 1852 return 0; 1853 } 1854 1855 void 1856 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m) 1857 { 1858 size_t valsize; 1859 void *val; 1860 1861 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 1862 KKASSERT(kva_p(m)); 1863 if (sopt->sopt_val == NULL) 1864 return; 1865 val = sopt->sopt_val; 1866 valsize = sopt->sopt_valsize; 1867 while (m != NULL && valsize >= m->m_len) { 1868 bcopy(val, mtod(m, char *), m->m_len); 1869 valsize -= m->m_len; 1870 val = (caddr_t)val + m->m_len; 1871 m = m->m_next; 1872 } 1873 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 1874 panic("ip6_sooptmcopyin"); 1875 } 1876 1877 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 1878 int 1879 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 1880 { 1881 return soopt_from_mbuf(sopt, m); 1882 } 1883 1884 int 1885 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m) 1886 { 1887 struct mbuf *m0 = m; 1888 size_t valsize = 0; 1889 size_t maxsize; 1890 void *val; 1891 1892 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 1893 KKASSERT(kva_p(m)); 1894 if (sopt->sopt_val == NULL) 1895 return 0; 1896 val = sopt->sopt_val; 1897 maxsize = sopt->sopt_valsize; 1898 while (m != NULL && maxsize >= m->m_len) { 1899 bcopy(mtod(m, char *), val, m->m_len); 1900 maxsize -= m->m_len; 1901 val = (caddr_t)val + m->m_len; 1902 valsize += m->m_len; 1903 m = m->m_next; 1904 } 1905 if (m != NULL) { 1906 /* enough soopt buffer should be given from user-land */ 1907 m_freem(m0); 1908 return (EINVAL); 1909 } 1910 sopt->sopt_valsize = valsize; 1911 return 0; 1912 } 1913 1914 void 1915 sohasoutofband(struct socket *so) 1916 { 1917 if (so->so_sigio != NULL) 1918 pgsigio(so->so_sigio, SIGURG, 0); 1919 KNOTE(&so->so_rcv.ssb_kq.ki_note, NOTE_OOB); 1920 } 1921 1922 int 1923 sokqfilter(struct file *fp, struct knote *kn) 1924 { 1925 struct socket *so = (struct socket *)kn->kn_fp->f_data; 1926 struct signalsockbuf *ssb; 1927 1928 switch (kn->kn_filter) { 1929 case EVFILT_READ: 1930 if (so->so_options & SO_ACCEPTCONN) 1931 kn->kn_fop = &solisten_filtops; 1932 else 1933 kn->kn_fop = &soread_filtops; 1934 ssb = &so->so_rcv; 1935 break; 1936 case EVFILT_WRITE: 1937 kn->kn_fop = &sowrite_filtops; 1938 ssb = &so->so_snd; 1939 break; 1940 case EVFILT_EXCEPT: 1941 kn->kn_fop = &soexcept_filtops; 1942 ssb = &so->so_rcv; 1943 break; 1944 default: 1945 return (EOPNOTSUPP); 1946 } 1947 1948 knote_insert(&ssb->ssb_kq.ki_note, kn); 1949 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE); 1950 return (0); 1951 } 1952 1953 static void 1954 filt_sordetach(struct knote *kn) 1955 { 1956 struct socket *so = (struct socket *)kn->kn_fp->f_data; 1957 1958 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn); 1959 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note)) 1960 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE); 1961 } 1962 1963 /*ARGSUSED*/ 1964 static int 1965 filt_soread(struct knote *kn, long hint) 1966 { 1967 struct socket *so = (struct socket *)kn->kn_fp->f_data; 1968 1969 if (kn->kn_sfflags & NOTE_OOB) { 1970 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) { 1971 kn->kn_fflags |= NOTE_OOB; 1972 return (1); 1973 } 1974 return (0); 1975 } 1976 kn->kn_data = so->so_rcv.ssb_cc; 1977 1978 if (so->so_state & SS_CANTRCVMORE) { 1979 /* 1980 * Only set NODATA if all data has been exhausted. 1981 */ 1982 if (kn->kn_data == 0) 1983 kn->kn_flags |= EV_NODATA; 1984 kn->kn_flags |= EV_EOF; 1985 kn->kn_fflags = so->so_error; 1986 return (1); 1987 } 1988 if (so->so_error) /* temporary udp error */ 1989 return (1); 1990 if (kn->kn_sfflags & NOTE_LOWAT) 1991 return (kn->kn_data >= kn->kn_sdata); 1992 return ((kn->kn_data >= so->so_rcv.ssb_lowat) || 1993 !TAILQ_EMPTY(&so->so_comp)); 1994 } 1995 1996 static void 1997 filt_sowdetach(struct knote *kn) 1998 { 1999 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2000 2001 knote_remove(&so->so_snd.ssb_kq.ki_note, kn); 2002 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note)) 2003 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE); 2004 } 2005 2006 /*ARGSUSED*/ 2007 static int 2008 filt_sowrite(struct knote *kn, long hint) 2009 { 2010 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2011 2012 kn->kn_data = ssb_space(&so->so_snd); 2013 if (so->so_state & SS_CANTSENDMORE) { 2014 kn->kn_flags |= (EV_EOF | EV_NODATA); 2015 kn->kn_fflags = so->so_error; 2016 return (1); 2017 } 2018 if (so->so_error) /* temporary udp error */ 2019 return (1); 2020 if (((so->so_state & SS_ISCONNECTED) == 0) && 2021 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2022 return (0); 2023 if (kn->kn_sfflags & NOTE_LOWAT) 2024 return (kn->kn_data >= kn->kn_sdata); 2025 return (kn->kn_data >= so->so_snd.ssb_lowat); 2026 } 2027 2028 /*ARGSUSED*/ 2029 static int 2030 filt_solisten(struct knote *kn, long hint) 2031 { 2032 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2033 2034 kn->kn_data = so->so_qlen; 2035 return (! TAILQ_EMPTY(&so->so_comp)); 2036 } 2037