1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 63 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $ 64 */ 65 66 #include "opt_inet.h" 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/fcntl.h> 71 #include <sys/malloc.h> 72 #include <sys/mbuf.h> 73 #include <sys/domain.h> 74 #include <sys/file.h> /* for struct knote */ 75 #include <sys/kernel.h> 76 #include <sys/event.h> 77 #include <sys/proc.h> 78 #include <sys/protosw.h> 79 #include <sys/socket.h> 80 #include <sys/socketvar.h> 81 #include <sys/socketops.h> 82 #include <sys/resourcevar.h> 83 #include <sys/signalvar.h> 84 #include <sys/sysctl.h> 85 #include <sys/uio.h> 86 #include <sys/jail.h> 87 #include <vm/vm_zone.h> 88 #include <vm/pmap.h> 89 #include <net/netmsg2.h> 90 #include <net/netisr2.h> 91 92 #include <sys/socketvar2.h> 93 #include <sys/spinlock2.h> 94 95 #include <machine/limits.h> 96 97 #ifdef INET 98 extern int tcp_sosend_agglim; 99 extern int tcp_sosend_async; 100 extern int tcp_sosend_jcluster; 101 extern int udp_sosend_async; 102 extern int udp_sosend_prepend; 103 104 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 105 #endif /* INET */ 106 107 static void filt_sordetach(struct knote *kn); 108 static int filt_soread(struct knote *kn, long hint); 109 static void filt_sowdetach(struct knote *kn); 110 static int filt_sowrite(struct knote *kn, long hint); 111 static int filt_solisten(struct knote *kn, long hint); 112 113 static int soclose_sync(struct socket *so, int fflag); 114 static void soclose_fast(struct socket *so); 115 116 static struct filterops solisten_filtops = 117 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten }; 118 static struct filterops soread_filtops = 119 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 120 static struct filterops sowrite_filtops = 121 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite }; 122 static struct filterops soexcept_filtops = 123 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 124 125 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct"); 126 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 127 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 128 129 130 static int somaxconn = SOMAXCONN; 131 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 132 &somaxconn, 0, "Maximum pending socket connection queue size"); 133 134 static int use_soclose_fast = 1; 135 SYSCTL_INT(_kern_ipc, OID_AUTO, soclose_fast, CTLFLAG_RW, 136 &use_soclose_fast, 0, "Fast socket close"); 137 138 int use_soaccept_pred_fast = 1; 139 SYSCTL_INT(_kern_ipc, OID_AUTO, soaccept_pred_fast, CTLFLAG_RW, 140 &use_soaccept_pred_fast, 0, "Fast socket accept predication"); 141 142 int use_sendfile_async = 1; 143 SYSCTL_INT(_kern_ipc, OID_AUTO, sendfile_async, CTLFLAG_RW, 144 &use_sendfile_async, 0, "sendfile uses asynchronized pru_send"); 145 146 int use_soconnect_async = 1; 147 SYSCTL_INT(_kern_ipc, OID_AUTO, soconnect_async, CTLFLAG_RW, 148 &use_soconnect_async, 0, "soconnect uses asynchronized pru_connect"); 149 150 static int use_socreate_fast = 1; 151 SYSCTL_INT(_kern_ipc, OID_AUTO, socreate_fast, CTLFLAG_RW, 152 &use_socreate_fast, 0, "Fast socket creation"); 153 154 static int soavailconn = 32; 155 SYSCTL_INT(_kern_ipc, OID_AUTO, soavailconn, CTLFLAG_RW, 156 &soavailconn, 0, "Maximum available socket connection queue size"); 157 158 /* 159 * Socket operation routines. 160 * These routines are called by the routines in 161 * sys_socket.c or from a system process, and 162 * implement the semantics of socket operations by 163 * switching out to the protocol specific routines. 164 */ 165 166 /* 167 * Get a socket structure, and initialize it. 168 * Note that it would probably be better to allocate socket 169 * and PCB at the same time, but I'm not convinced that all 170 * the protocols can be easily modified to do this. 171 */ 172 struct socket * 173 soalloc(int waitok, struct protosw *pr) 174 { 175 globaldata_t gd = mycpu; 176 struct socket *so; 177 unsigned waitmask; 178 179 waitmask = waitok ? M_WAITOK : M_NOWAIT; 180 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask); 181 if (so) { 182 /* XXX race condition for reentrant kernel */ 183 so->so_proto = pr; 184 TAILQ_INIT(&so->so_aiojobq); 185 TAILQ_INIT(&so->so_rcv.ssb_mlist); 186 TAILQ_INIT(&so->so_snd.ssb_mlist); 187 lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok"); 188 lwkt_token_init(&so->so_snd.ssb_token, "sndtok"); 189 spin_init(&so->so_rcvd_spin, "soalloc"); 190 netmsg_init(&so->so_rcvd_msg.base, so, &netisr_adone_rport, 191 MSGF_DROPABLE | MSGF_PRIORITY, 192 so->so_proto->pr_usrreqs->pru_rcvd); 193 so->so_rcvd_msg.nm_pru_flags |= PRUR_ASYNC; 194 so->so_state = SS_NOFDREF; 195 so->so_refs = 1; 196 so->so_inum = gd->gd_anoninum++ * ncpus + gd->gd_cpuid + 2; 197 } 198 return so; 199 } 200 201 int 202 socreate(int dom, struct socket **aso, int type, 203 int proto, struct thread *td) 204 { 205 struct proc *p = td->td_proc; 206 struct protosw *prp; 207 struct socket *so; 208 struct pru_attach_info ai; 209 int error; 210 211 if (proto) 212 prp = pffindproto(dom, proto, type); 213 else 214 prp = pffindtype(dom, type); 215 216 if (prp == NULL || prp->pr_usrreqs->pru_attach == 0) 217 return (EPROTONOSUPPORT); 218 219 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only && 220 prp->pr_domain->dom_family != PF_LOCAL && 221 prp->pr_domain->dom_family != PF_INET && 222 prp->pr_domain->dom_family != PF_INET6 && 223 prp->pr_domain->dom_family != PF_ROUTE) { 224 return (EPROTONOSUPPORT); 225 } 226 227 if (prp->pr_type != type) 228 return (EPROTOTYPE); 229 so = soalloc(p != NULL, prp); 230 if (so == NULL) 231 return (ENOBUFS); 232 233 /* 234 * Callers of socreate() presumably will connect up a descriptor 235 * and call soclose() if they cannot. This represents our so_refs 236 * (which should be 1) from soalloc(). 237 */ 238 soclrstate(so, SS_NOFDREF); 239 240 /* 241 * Set a default port for protocol processing. No action will occur 242 * on the socket on this port until an inpcb is attached to it and 243 * is able to match incoming packets, or until the socket becomes 244 * available to userland. 245 * 246 * We normally default the socket to the protocol thread on cpu 0, 247 * if protocol does not provide its own method to initialize the 248 * default port. 249 * 250 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol 251 * thread and all pr_*()/pru_*() calls are executed synchronously. 252 */ 253 if (prp->pr_flags & PR_SYNC_PORT) 254 so->so_port = &netisr_sync_port; 255 else if (prp->pr_initport != NULL) 256 so->so_port = prp->pr_initport(); 257 else 258 so->so_port = netisr_cpuport(0); 259 260 TAILQ_INIT(&so->so_incomp); 261 TAILQ_INIT(&so->so_comp); 262 so->so_type = type; 263 so->so_cred = crhold(p->p_ucred); 264 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE]; 265 ai.p_ucred = p->p_ucred; 266 ai.fd_rdir = p->p_fd->fd_rdir; 267 268 /* 269 * Auto-sizing of socket buffers is managed by the protocols and 270 * the appropriate flags must be set in the pru_attach function. 271 */ 272 if (use_socreate_fast && prp->pr_usrreqs->pru_preattach) 273 error = so_pru_attach_fast(so, proto, &ai); 274 else 275 error = so_pru_attach(so, proto, &ai); 276 if (error) { 277 sosetstate(so, SS_NOFDREF); 278 sofree(so); /* from soalloc */ 279 return error; 280 } 281 282 /* 283 * NOTE: Returns referenced socket. 284 */ 285 *aso = so; 286 return (0); 287 } 288 289 int 290 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 291 { 292 int error; 293 294 error = so_pru_bind(so, nam, td); 295 return (error); 296 } 297 298 static void 299 sodealloc(struct socket *so) 300 { 301 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 0); 302 303 #ifdef INVARIANTS 304 if (so->so_options & SO_ACCEPTCONN) { 305 KASSERT(TAILQ_EMPTY(&so->so_comp), ("so_comp is not empty")); 306 KASSERT(TAILQ_EMPTY(&so->so_incomp), 307 ("so_incomp is not empty")); 308 } 309 #endif 310 311 if (so->so_rcv.ssb_hiwat) 312 (void)chgsbsize(so->so_cred->cr_uidinfo, 313 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY); 314 if (so->so_snd.ssb_hiwat) 315 (void)chgsbsize(so->so_cred->cr_uidinfo, 316 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY); 317 #ifdef INET 318 /* remove accept filter if present */ 319 if (so->so_accf != NULL) 320 do_setopt_accept_filter(so, NULL); 321 #endif /* INET */ 322 crfree(so->so_cred); 323 if (so->so_faddr != NULL) 324 kfree(so->so_faddr, M_SONAME); 325 kfree(so, M_SOCKET); 326 } 327 328 int 329 solisten(struct socket *so, int backlog, struct thread *td) 330 { 331 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) 332 return (EINVAL); 333 334 lwkt_gettoken(&so->so_rcv.ssb_token); 335 if (TAILQ_EMPTY(&so->so_comp)) 336 so->so_options |= SO_ACCEPTCONN; 337 lwkt_reltoken(&so->so_rcv.ssb_token); 338 if (backlog < 0 || backlog > somaxconn) 339 backlog = somaxconn; 340 so->so_qlimit = backlog; 341 return so_pru_listen(so, td); 342 } 343 344 static void 345 soqflush(struct socket *so) 346 { 347 lwkt_getpooltoken(so); 348 if (so->so_options & SO_ACCEPTCONN) { 349 struct socket *sp; 350 351 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 352 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == 353 SS_INCOMP); 354 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 355 so->so_incqlen--; 356 soclrstate(sp, SS_INCOMP); 357 soabort_async(sp, TRUE); 358 } 359 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 360 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == 361 SS_COMP); 362 TAILQ_REMOVE(&so->so_comp, sp, so_list); 363 so->so_qlen--; 364 soclrstate(sp, SS_COMP); 365 soabort_async(sp, TRUE); 366 } 367 } 368 lwkt_relpooltoken(so); 369 } 370 371 /* 372 * Destroy a disconnected socket. This routine is a NOP if entities 373 * still have a reference on the socket: 374 * 375 * so_pcb - The protocol stack still has a reference 376 * SS_NOFDREF - There is no longer a file pointer reference 377 */ 378 void 379 sofree(struct socket *so) 380 { 381 struct socket *head; 382 383 /* 384 * This is a bit hackish at the moment. We need to interlock 385 * any accept queue we are on before we potentially lose the 386 * last reference to avoid races against a re-reference from 387 * someone operating on the queue. 388 */ 389 while ((head = so->so_head) != NULL) { 390 lwkt_getpooltoken(head); 391 if (so->so_head == head) 392 break; 393 lwkt_relpooltoken(head); 394 } 395 396 /* 397 * Arbitrage the last free. 398 */ 399 KKASSERT(so->so_refs > 0); 400 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) { 401 if (head) 402 lwkt_relpooltoken(head); 403 return; 404 } 405 406 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF)); 407 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0); 408 409 if (head != NULL) { 410 /* 411 * We're done, remove ourselves from the accept queue we are 412 * on, if we are on one. 413 */ 414 if (so->so_state & SS_INCOMP) { 415 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 416 SS_INCOMP); 417 TAILQ_REMOVE(&head->so_incomp, so, so_list); 418 head->so_incqlen--; 419 } else if (so->so_state & SS_COMP) { 420 /* 421 * We must not decommission a socket that's 422 * on the accept(2) queue. If we do, then 423 * accept(2) may hang after select(2) indicated 424 * that the listening socket was ready. 425 */ 426 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 427 SS_COMP); 428 lwkt_relpooltoken(head); 429 return; 430 } else { 431 panic("sofree: not queued"); 432 } 433 soclrstate(so, SS_INCOMP); 434 so->so_head = NULL; 435 lwkt_relpooltoken(head); 436 } else { 437 /* Flush accept queues, if we are accepting. */ 438 soqflush(so); 439 } 440 ssb_release(&so->so_snd, so); 441 sorflush(so); 442 sodealloc(so); 443 } 444 445 /* 446 * Close a socket on last file table reference removal. 447 * Initiate disconnect if connected. 448 * Free socket when disconnect complete. 449 */ 450 int 451 soclose(struct socket *so, int fflag) 452 { 453 int error; 454 455 funsetown(&so->so_sigio); 456 sosetstate(so, SS_ISCLOSING); 457 if (!use_soclose_fast || 458 (so->so_proto->pr_flags & PR_SYNC_PORT) || 459 ((so->so_state & SS_ISCONNECTED) && 460 (so->so_options & SO_LINGER) && 461 so->so_linger != 0)) { 462 error = soclose_sync(so, fflag); 463 } else { 464 soclose_fast(so); 465 error = 0; 466 } 467 return error; 468 } 469 470 void 471 sodiscard(struct socket *so) 472 { 473 if (so->so_state & SS_NOFDREF) 474 panic("soclose: NOFDREF"); 475 sosetstate(so, SS_NOFDREF); /* take ref */ 476 } 477 478 /* 479 * Append the completed queue of head to head_inh (inherting listen socket). 480 */ 481 void 482 soinherit(struct socket *head, struct socket *head_inh) 483 { 484 boolean_t do_wakeup = FALSE; 485 486 KASSERT(head->so_options & SO_ACCEPTCONN, 487 ("head does not accept connection")); 488 KASSERT(head_inh->so_options & SO_ACCEPTCONN, 489 ("head_inh does not accept connection")); 490 491 lwkt_getpooltoken(head); 492 lwkt_getpooltoken(head_inh); 493 494 if (head->so_qlen > 0) 495 do_wakeup = TRUE; 496 497 while (!TAILQ_EMPTY(&head->so_comp)) { 498 struct ucred *old_cr; 499 struct socket *sp; 500 501 sp = TAILQ_FIRST(&head->so_comp); 502 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == SS_COMP); 503 504 /* 505 * Remove this socket from the current listen socket 506 * completed queue. 507 */ 508 TAILQ_REMOVE(&head->so_comp, sp, so_list); 509 head->so_qlen--; 510 511 /* Save the old ucred for later free. */ 512 old_cr = sp->so_cred; 513 514 /* 515 * Install this socket to the inheriting listen socket 516 * completed queue. 517 */ 518 sp->so_cred = crhold(head_inh->so_cred); /* non-blocking */ 519 sp->so_head = head_inh; 520 521 TAILQ_INSERT_TAIL(&head_inh->so_comp, sp, so_list); 522 head_inh->so_qlen++; 523 524 /* 525 * NOTE: 526 * crfree() may block and release the tokens temporarily. 527 * However, we are fine here, since the transition is done. 528 */ 529 crfree(old_cr); 530 } 531 532 lwkt_relpooltoken(head_inh); 533 lwkt_relpooltoken(head); 534 535 if (do_wakeup) { 536 /* 537 * "New" connections have arrived 538 */ 539 sorwakeup(head_inh); 540 wakeup(&head_inh->so_timeo); 541 } 542 } 543 544 static int 545 soclose_sync(struct socket *so, int fflag) 546 { 547 int error = 0; 548 549 if ((so->so_proto->pr_flags & PR_SYNC_PORT) == 0) 550 so_pru_sync(so); /* unpend async prus */ 551 552 if (so->so_pcb == NULL) 553 goto discard; 554 555 if (so->so_state & SS_ISCONNECTED) { 556 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 557 error = sodisconnect(so); 558 if (error) 559 goto drop; 560 } 561 if (so->so_options & SO_LINGER) { 562 if ((so->so_state & SS_ISDISCONNECTING) && 563 (fflag & FNONBLOCK)) 564 goto drop; 565 while (so->so_state & SS_ISCONNECTED) { 566 error = tsleep(&so->so_timeo, PCATCH, 567 "soclos", so->so_linger * hz); 568 if (error) 569 break; 570 } 571 } 572 } 573 drop: 574 if (so->so_pcb) { 575 int error2; 576 577 error2 = so_pru_detach(so); 578 if (error2 == EJUSTRETURN) { 579 /* 580 * Protocol will call sodiscard() 581 * and sofree() for us. 582 */ 583 return error; 584 } 585 if (error == 0) 586 error = error2; 587 } 588 discard: 589 sodiscard(so); 590 sofree(so); /* dispose of ref */ 591 592 return (error); 593 } 594 595 static void 596 soclose_fast_handler(netmsg_t msg) 597 { 598 struct socket *so = msg->base.nm_so; 599 600 if (so->so_pcb == NULL) 601 goto discard; 602 603 if ((so->so_state & SS_ISCONNECTED) && 604 (so->so_state & SS_ISDISCONNECTING) == 0) 605 so_pru_disconnect_direct(so); 606 607 if (so->so_pcb) { 608 int error; 609 610 error = so_pru_detach_direct(so); 611 if (error == EJUSTRETURN) { 612 /* 613 * Protocol will call sodiscard() 614 * and sofree() for us. 615 */ 616 return; 617 } 618 } 619 discard: 620 sodiscard(so); 621 sofree(so); 622 } 623 624 static void 625 soclose_fast(struct socket *so) 626 { 627 struct netmsg_base *base = &so->so_clomsg; 628 629 netmsg_init(base, so, &netisr_apanic_rport, 0, 630 soclose_fast_handler); 631 if (so->so_port == netisr_curport()) 632 lwkt_sendmsg_oncpu(so->so_port, &base->lmsg); 633 else 634 lwkt_sendmsg(so->so_port, &base->lmsg); 635 } 636 637 /* 638 * Abort and destroy a socket. Only one abort can be in progress 639 * at any given moment. 640 */ 641 void 642 soabort_async(struct socket *so, boolean_t clr_head) 643 { 644 /* 645 * Keep a reference before clearing the so_head 646 * to avoid racing socket close in netisr. 647 */ 648 soreference(so); 649 if (clr_head) 650 so->so_head = NULL; 651 so_pru_abort_async(so); 652 } 653 654 void 655 soabort_direct(struct socket *so) 656 { 657 soreference(so); 658 so_pru_abort_direct(so); 659 } 660 661 /* 662 * so is passed in ref'd, which becomes owned by 663 * the cleared SS_NOFDREF flag. 664 */ 665 void 666 soaccept_generic(struct socket *so) 667 { 668 if ((so->so_state & SS_NOFDREF) == 0) 669 panic("soaccept: !NOFDREF"); 670 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */ 671 } 672 673 int 674 soaccept(struct socket *so, struct sockaddr **nam) 675 { 676 int error; 677 678 soaccept_generic(so); 679 error = so_pru_accept(so, nam); 680 return (error); 681 } 682 683 int 684 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td, 685 boolean_t sync) 686 { 687 int error; 688 689 if (so->so_options & SO_ACCEPTCONN) 690 return (EOPNOTSUPP); 691 /* 692 * If protocol is connection-based, can only connect once. 693 * Otherwise, if connected, try to disconnect first. 694 * This allows user to disconnect by connecting to, e.g., 695 * a null address. 696 */ 697 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 698 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 699 (error = sodisconnect(so)))) { 700 error = EISCONN; 701 } else { 702 /* 703 * Prevent accumulated error from previous connection 704 * from biting us. 705 */ 706 so->so_error = 0; 707 if (!sync && so->so_proto->pr_usrreqs->pru_preconnect) 708 error = so_pru_connect_async(so, nam, td); 709 else 710 error = so_pru_connect(so, nam, td); 711 } 712 return (error); 713 } 714 715 int 716 soconnect2(struct socket *so1, struct socket *so2) 717 { 718 int error; 719 720 error = so_pru_connect2(so1, so2); 721 return (error); 722 } 723 724 int 725 sodisconnect(struct socket *so) 726 { 727 int error; 728 729 if ((so->so_state & SS_ISCONNECTED) == 0) { 730 error = ENOTCONN; 731 goto bad; 732 } 733 if (so->so_state & SS_ISDISCONNECTING) { 734 error = EALREADY; 735 goto bad; 736 } 737 error = so_pru_disconnect(so); 738 bad: 739 return (error); 740 } 741 742 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 743 /* 744 * Send on a socket. 745 * If send must go all at once and message is larger than 746 * send buffering, then hard error. 747 * Lock against other senders. 748 * If must go all at once and not enough room now, then 749 * inform user that this would block and do nothing. 750 * Otherwise, if nonblocking, send as much as possible. 751 * The data to be sent is described by "uio" if nonzero, 752 * otherwise by the mbuf chain "top" (which must be null 753 * if uio is not). Data provided in mbuf chain must be small 754 * enough to send all at once. 755 * 756 * Returns nonzero on error, timeout or signal; callers 757 * must check for short counts if EINTR/ERESTART are returned. 758 * Data and control buffers are freed on return. 759 */ 760 int 761 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 762 struct mbuf *top, struct mbuf *control, int flags, 763 struct thread *td) 764 { 765 struct mbuf **mp; 766 struct mbuf *m; 767 size_t resid; 768 int space, len; 769 int clen = 0, error, dontroute, mlen; 770 int atomic = sosendallatonce(so) || top; 771 int pru_flags; 772 773 if (uio) { 774 resid = uio->uio_resid; 775 } else { 776 resid = (size_t)top->m_pkthdr.len; 777 #ifdef INVARIANTS 778 len = 0; 779 for (m = top; m; m = m->m_next) 780 len += m->m_len; 781 KKASSERT(top->m_pkthdr.len == len); 782 #endif 783 } 784 785 /* 786 * WARNING! resid is unsigned, space and len are signed. space 787 * can wind up negative if the sockbuf is overcommitted. 788 * 789 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 790 * type sockets since that's an error. 791 */ 792 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 793 error = EINVAL; 794 goto out; 795 } 796 797 dontroute = 798 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 799 (so->so_proto->pr_flags & PR_ATOMIC); 800 if (td->td_lwp != NULL) 801 td->td_lwp->lwp_ru.ru_msgsnd++; 802 if (control) 803 clen = control->m_len; 804 #define gotoerr(errcode) { error = errcode; goto release; } 805 806 restart: 807 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 808 if (error) 809 goto out; 810 811 do { 812 if (so->so_state & SS_CANTSENDMORE) 813 gotoerr(EPIPE); 814 if (so->so_error) { 815 error = so->so_error; 816 so->so_error = 0; 817 goto release; 818 } 819 if ((so->so_state & SS_ISCONNECTED) == 0) { 820 /* 821 * `sendto' and `sendmsg' is allowed on a connection- 822 * based socket if it supports implied connect. 823 * Return ENOTCONN if not connected and no address is 824 * supplied. 825 */ 826 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 827 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 828 if ((so->so_state & SS_ISCONFIRMING) == 0 && 829 !(resid == 0 && clen != 0)) 830 gotoerr(ENOTCONN); 831 } else if (addr == NULL) 832 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 833 ENOTCONN : EDESTADDRREQ); 834 } 835 if ((atomic && resid > so->so_snd.ssb_hiwat) || 836 clen > so->so_snd.ssb_hiwat) { 837 gotoerr(EMSGSIZE); 838 } 839 space = ssb_space(&so->so_snd); 840 if (flags & MSG_OOB) 841 space += 1024; 842 if ((space < 0 || (size_t)space < resid + clen) && uio && 843 (atomic || space < so->so_snd.ssb_lowat || space < clen)) { 844 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 845 gotoerr(EWOULDBLOCK); 846 ssb_unlock(&so->so_snd); 847 error = ssb_wait(&so->so_snd); 848 if (error) 849 goto out; 850 goto restart; 851 } 852 mp = ⊤ 853 space -= clen; 854 do { 855 if (uio == NULL) { 856 /* 857 * Data is prepackaged in "top". 858 */ 859 resid = 0; 860 if (flags & MSG_EOR) 861 top->m_flags |= M_EOR; 862 } else do { 863 if (resid > INT_MAX) 864 resid = INT_MAX; 865 m = m_getl((int)resid, M_WAITOK, MT_DATA, 866 top == NULL ? M_PKTHDR : 0, &mlen); 867 if (top == NULL) { 868 m->m_pkthdr.len = 0; 869 m->m_pkthdr.rcvif = NULL; 870 } 871 len = imin((int)szmin(mlen, resid), space); 872 if (resid < MINCLSIZE) { 873 /* 874 * For datagram protocols, leave room 875 * for protocol headers in first mbuf. 876 */ 877 if (atomic && top == NULL && len < mlen) 878 MH_ALIGN(m, len); 879 } 880 space -= len; 881 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 882 resid = uio->uio_resid; 883 m->m_len = len; 884 *mp = m; 885 top->m_pkthdr.len += len; 886 if (error) 887 goto release; 888 mp = &m->m_next; 889 if (resid == 0) { 890 if (flags & MSG_EOR) 891 top->m_flags |= M_EOR; 892 break; 893 } 894 } while (space > 0 && atomic); 895 if (dontroute) 896 so->so_options |= SO_DONTROUTE; 897 if (flags & MSG_OOB) { 898 pru_flags = PRUS_OOB; 899 } else if ((flags & MSG_EOF) && 900 (so->so_proto->pr_flags & PR_IMPLOPCL) && 901 (resid == 0)) { 902 /* 903 * If the user set MSG_EOF, the protocol 904 * understands this flag and nothing left to 905 * send then use PRU_SEND_EOF instead of PRU_SEND. 906 */ 907 pru_flags = PRUS_EOF; 908 } else if (resid > 0 && space > 0) { 909 /* If there is more to send, set PRUS_MORETOCOME */ 910 pru_flags = PRUS_MORETOCOME; 911 } else { 912 pru_flags = 0; 913 } 914 /* 915 * XXX all the SS_CANTSENDMORE checks previously 916 * done could be out of date. We could have recieved 917 * a reset packet in an interrupt or maybe we slept 918 * while doing page faults in uiomove() etc. We could 919 * probably recheck again inside the splnet() protection 920 * here, but there are probably other places that this 921 * also happens. We must rethink this. 922 */ 923 error = so_pru_send(so, pru_flags, top, addr, control, td); 924 if (dontroute) 925 so->so_options &= ~SO_DONTROUTE; 926 clen = 0; 927 control = NULL; 928 top = NULL; 929 mp = ⊤ 930 if (error) 931 goto release; 932 } while (resid && space > 0); 933 } while (resid); 934 935 release: 936 ssb_unlock(&so->so_snd); 937 out: 938 if (top) 939 m_freem(top); 940 if (control) 941 m_freem(control); 942 return (error); 943 } 944 945 #ifdef INET 946 /* 947 * A specialization of sosend() for UDP based on protocol-specific knowledge: 948 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that 949 * sosendallatonce() returns true, 950 * the "atomic" variable is true, 951 * and sosendudp() blocks until space is available for the entire send. 952 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or 953 * PR_IMPLOPCL flags set. 954 * UDP has no out-of-band data. 955 * UDP has no control data. 956 * UDP does not support MSG_EOR. 957 */ 958 int 959 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, 960 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 961 { 962 size_t resid; 963 int error, pru_flags = 0; 964 int space; 965 966 if (td->td_lwp != NULL) 967 td->td_lwp->lwp_ru.ru_msgsnd++; 968 if (control) 969 m_freem(control); 970 971 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); 972 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len; 973 974 restart: 975 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 976 if (error) 977 goto out; 978 979 if (so->so_state & SS_CANTSENDMORE) 980 gotoerr(EPIPE); 981 if (so->so_error) { 982 error = so->so_error; 983 so->so_error = 0; 984 goto release; 985 } 986 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) 987 gotoerr(EDESTADDRREQ); 988 if (resid > so->so_snd.ssb_hiwat) 989 gotoerr(EMSGSIZE); 990 space = ssb_space(&so->so_snd); 991 if (uio && (space < 0 || (size_t)space < resid)) { 992 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 993 gotoerr(EWOULDBLOCK); 994 ssb_unlock(&so->so_snd); 995 error = ssb_wait(&so->so_snd); 996 if (error) 997 goto out; 998 goto restart; 999 } 1000 1001 if (uio) { 1002 int hdrlen = max_hdr; 1003 1004 /* 1005 * We try to optimize out the additional mbuf 1006 * allocations in M_PREPEND() on output path, e.g. 1007 * - udp_output(), when it tries to prepend protocol 1008 * headers. 1009 * - Link layer output function, when it tries to 1010 * prepend link layer header. 1011 * 1012 * This probably will not benefit any data that will 1013 * be fragmented, so this optimization is only performed 1014 * when the size of data and max size of protocol+link 1015 * headers fit into one mbuf cluster. 1016 */ 1017 if (uio->uio_resid > MCLBYTES - hdrlen || 1018 !udp_sosend_prepend) { 1019 top = m_uiomove(uio); 1020 if (top == NULL) 1021 goto release; 1022 } else { 1023 int nsize; 1024 1025 top = m_getl(uio->uio_resid + hdrlen, M_WAITOK, 1026 MT_DATA, M_PKTHDR, &nsize); 1027 KASSERT(nsize >= uio->uio_resid + hdrlen, 1028 ("sosendudp invalid nsize %d, " 1029 "resid %zu, hdrlen %d", 1030 nsize, uio->uio_resid, hdrlen)); 1031 1032 top->m_len = uio->uio_resid; 1033 top->m_pkthdr.len = uio->uio_resid; 1034 top->m_data += hdrlen; 1035 1036 error = uiomove(mtod(top, caddr_t), top->m_len, uio); 1037 if (error) 1038 goto out; 1039 } 1040 } 1041 1042 if (flags & MSG_DONTROUTE) 1043 pru_flags |= PRUS_DONTROUTE; 1044 1045 if (udp_sosend_async && (flags & MSG_SYNC) == 0) { 1046 so_pru_send_async(so, pru_flags, top, addr, NULL, td); 1047 error = 0; 1048 } else { 1049 error = so_pru_send(so, pru_flags, top, addr, NULL, td); 1050 } 1051 top = NULL; /* sent or freed in lower layer */ 1052 1053 release: 1054 ssb_unlock(&so->so_snd); 1055 out: 1056 if (top) 1057 m_freem(top); 1058 return (error); 1059 } 1060 1061 int 1062 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio, 1063 struct mbuf *top, struct mbuf *control, int flags, 1064 struct thread *td) 1065 { 1066 struct mbuf **mp; 1067 struct mbuf *m; 1068 size_t resid; 1069 int space, len; 1070 int error, mlen; 1071 int allatonce; 1072 int pru_flags; 1073 1074 if (uio) { 1075 KKASSERT(top == NULL); 1076 allatonce = 0; 1077 resid = uio->uio_resid; 1078 } else { 1079 allatonce = 1; 1080 resid = (size_t)top->m_pkthdr.len; 1081 #ifdef INVARIANTS 1082 len = 0; 1083 for (m = top; m; m = m->m_next) 1084 len += m->m_len; 1085 KKASSERT(top->m_pkthdr.len == len); 1086 #endif 1087 } 1088 1089 /* 1090 * WARNING! resid is unsigned, space and len are signed. space 1091 * can wind up negative if the sockbuf is overcommitted. 1092 * 1093 * Also check to make sure that MSG_EOR isn't used on TCP 1094 */ 1095 if (flags & MSG_EOR) { 1096 error = EINVAL; 1097 goto out; 1098 } 1099 1100 if (control) { 1101 /* TCP doesn't do control messages (rights, creds, etc) */ 1102 if (control->m_len) { 1103 error = EINVAL; 1104 goto out; 1105 } 1106 m_freem(control); /* empty control, just free it */ 1107 control = NULL; 1108 } 1109 1110 if (td->td_lwp != NULL) 1111 td->td_lwp->lwp_ru.ru_msgsnd++; 1112 1113 #define gotoerr(errcode) { error = errcode; goto release; } 1114 1115 restart: 1116 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 1117 if (error) 1118 goto out; 1119 1120 do { 1121 if (so->so_state & SS_CANTSENDMORE) 1122 gotoerr(EPIPE); 1123 if (so->so_error) { 1124 error = so->so_error; 1125 so->so_error = 0; 1126 goto release; 1127 } 1128 if ((so->so_state & SS_ISCONNECTED) == 0 && 1129 (so->so_state & SS_ISCONFIRMING) == 0) 1130 gotoerr(ENOTCONN); 1131 if (allatonce && resid > so->so_snd.ssb_hiwat) 1132 gotoerr(EMSGSIZE); 1133 1134 space = ssb_space_prealloc(&so->so_snd); 1135 if (flags & MSG_OOB) 1136 space += 1024; 1137 if ((space < 0 || (size_t)space < resid) && !allatonce && 1138 space < so->so_snd.ssb_lowat) { 1139 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 1140 gotoerr(EWOULDBLOCK); 1141 ssb_unlock(&so->so_snd); 1142 error = ssb_wait(&so->so_snd); 1143 if (error) 1144 goto out; 1145 goto restart; 1146 } 1147 mp = ⊤ 1148 do { 1149 int cnt = 0, async = 0; 1150 1151 if (uio == NULL) { 1152 /* 1153 * Data is prepackaged in "top". 1154 */ 1155 resid = 0; 1156 } else do { 1157 if (resid > INT_MAX) 1158 resid = INT_MAX; 1159 if (tcp_sosend_jcluster) { 1160 m = m_getlj((int)resid, M_WAITOK, MT_DATA, 1161 top == NULL ? M_PKTHDR : 0, &mlen); 1162 } else { 1163 m = m_getl((int)resid, M_WAITOK, MT_DATA, 1164 top == NULL ? M_PKTHDR : 0, &mlen); 1165 } 1166 if (top == NULL) { 1167 m->m_pkthdr.len = 0; 1168 m->m_pkthdr.rcvif = NULL; 1169 } 1170 len = imin((int)szmin(mlen, resid), space); 1171 space -= len; 1172 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 1173 resid = uio->uio_resid; 1174 m->m_len = len; 1175 *mp = m; 1176 top->m_pkthdr.len += len; 1177 if (error) 1178 goto release; 1179 mp = &m->m_next; 1180 if (resid == 0) 1181 break; 1182 ++cnt; 1183 } while (space > 0 && cnt < tcp_sosend_agglim); 1184 1185 if (tcp_sosend_async) 1186 async = 1; 1187 1188 if (flags & MSG_OOB) { 1189 pru_flags = PRUS_OOB; 1190 async = 0; 1191 } else if ((flags & MSG_EOF) && resid == 0) { 1192 pru_flags = PRUS_EOF; 1193 } else if (resid > 0 && space > 0) { 1194 /* If there is more to send, set PRUS_MORETOCOME */ 1195 pru_flags = PRUS_MORETOCOME; 1196 async = 1; 1197 } else { 1198 pru_flags = 0; 1199 } 1200 1201 if (flags & MSG_SYNC) 1202 async = 0; 1203 1204 /* 1205 * XXX all the SS_CANTSENDMORE checks previously 1206 * done could be out of date. We could have recieved 1207 * a reset packet in an interrupt or maybe we slept 1208 * while doing page faults in uiomove() etc. We could 1209 * probably recheck again inside the splnet() protection 1210 * here, but there are probably other places that this 1211 * also happens. We must rethink this. 1212 */ 1213 for (m = top; m; m = m->m_next) 1214 ssb_preallocstream(&so->so_snd, m); 1215 if (!async) { 1216 error = so_pru_send(so, pru_flags, top, 1217 NULL, NULL, td); 1218 } else { 1219 so_pru_send_async(so, pru_flags, top, 1220 NULL, NULL, td); 1221 error = 0; 1222 } 1223 1224 top = NULL; 1225 mp = ⊤ 1226 if (error) 1227 goto release; 1228 } while (resid && space > 0); 1229 } while (resid); 1230 1231 release: 1232 ssb_unlock(&so->so_snd); 1233 out: 1234 if (top) 1235 m_freem(top); 1236 if (control) 1237 m_freem(control); 1238 return (error); 1239 } 1240 #endif 1241 1242 /* 1243 * Implement receive operations on a socket. 1244 * 1245 * We depend on the way that records are added to the signalsockbuf 1246 * by sbappend*. In particular, each record (mbufs linked through m_next) 1247 * must begin with an address if the protocol so specifies, 1248 * followed by an optional mbuf or mbufs containing ancillary data, 1249 * and then zero or more mbufs of data. 1250 * 1251 * Although the signalsockbuf is locked, new data may still be appended. 1252 * A token inside the ssb_lock deals with MP issues and still allows 1253 * the network to access the socket if we block in a uio. 1254 * 1255 * The caller may receive the data as a single mbuf chain by supplying 1256 * an mbuf **mp0 for use in returning the chain. The uio is then used 1257 * only for the count in uio_resid. 1258 */ 1259 int 1260 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 1261 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1262 { 1263 struct mbuf *m, *n; 1264 struct mbuf *free_chain = NULL; 1265 int flags, len, error, offset; 1266 struct protosw *pr = so->so_proto; 1267 int moff, type = 0; 1268 size_t resid, orig_resid; 1269 boolean_t free_rights = FALSE; 1270 1271 if (uio) 1272 resid = uio->uio_resid; 1273 else 1274 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1275 orig_resid = resid; 1276 1277 if (psa) 1278 *psa = NULL; 1279 if (controlp) 1280 *controlp = NULL; 1281 if (flagsp) 1282 flags = *flagsp &~ MSG_EOR; 1283 else 1284 flags = 0; 1285 if (flags & MSG_OOB) { 1286 m = m_get(M_WAITOK, MT_DATA); 1287 if (m == NULL) 1288 return (ENOBUFS); 1289 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1290 if (error) 1291 goto bad; 1292 if (sio) { 1293 do { 1294 sbappend(sio, m); 1295 KKASSERT(resid >= (size_t)m->m_len); 1296 resid -= (size_t)m->m_len; 1297 } while (resid > 0 && m); 1298 } else { 1299 do { 1300 uio->uio_resid = resid; 1301 error = uiomove(mtod(m, caddr_t), 1302 (int)szmin(resid, m->m_len), 1303 uio); 1304 resid = uio->uio_resid; 1305 m = m_free(m); 1306 } while (uio->uio_resid && error == 0 && m); 1307 } 1308 bad: 1309 if (m) 1310 m_freem(m); 1311 return (error); 1312 } 1313 if ((so->so_state & SS_ISCONFIRMING) && resid) 1314 so_pru_rcvd(so, 0); 1315 1316 /* 1317 * The token interlocks against the protocol thread while 1318 * ssb_lock is a blocking lock against other userland entities. 1319 */ 1320 lwkt_gettoken(&so->so_rcv.ssb_token); 1321 restart: 1322 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1323 if (error) 1324 goto done; 1325 1326 m = so->so_rcv.ssb_mb; 1327 /* 1328 * If we have less data than requested, block awaiting more 1329 * (subject to any timeout) if: 1330 * 1. the current count is less than the low water mark, or 1331 * 2. MSG_WAITALL is set, and it is possible to do the entire 1332 * receive operation at once if we block (resid <= hiwat). 1333 * 3. MSG_DONTWAIT is not set 1334 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1335 * we have to do the receive in sections, and thus risk returning 1336 * a short count if a timeout or signal occurs after we start. 1337 */ 1338 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1339 (size_t)so->so_rcv.ssb_cc < resid) && 1340 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1341 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) && 1342 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { 1343 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1344 if (so->so_error || so->so_rerror) { 1345 if (m) 1346 goto dontblock; 1347 if (so->so_error) 1348 error = so->so_error; 1349 else 1350 error = so->so_rerror; 1351 if ((flags & MSG_PEEK) == 0) { 1352 if (so->so_error) 1353 so->so_error = 0; 1354 else 1355 so->so_rerror = 0; 1356 } 1357 goto release; 1358 } 1359 if (so->so_state & SS_CANTRCVMORE) { 1360 if (m) 1361 goto dontblock; 1362 else 1363 goto release; 1364 } 1365 for (; m; m = m->m_next) { 1366 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1367 m = so->so_rcv.ssb_mb; 1368 goto dontblock; 1369 } 1370 } 1371 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1372 (pr->pr_flags & PR_CONNREQUIRED)) { 1373 error = ENOTCONN; 1374 goto release; 1375 } 1376 if (resid == 0) 1377 goto release; 1378 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1379 error = EWOULDBLOCK; 1380 goto release; 1381 } 1382 ssb_unlock(&so->so_rcv); 1383 error = ssb_wait(&so->so_rcv); 1384 if (error) 1385 goto done; 1386 goto restart; 1387 } 1388 dontblock: 1389 if (uio && uio->uio_td && uio->uio_td->td_proc) 1390 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1391 1392 /* 1393 * note: m should be == sb_mb here. Cache the next record while 1394 * cleaning up. Note that calling m_free*() will break out critical 1395 * section. 1396 */ 1397 KKASSERT(m == so->so_rcv.ssb_mb); 1398 1399 /* 1400 * Skip any address mbufs prepending the record. 1401 */ 1402 if (pr->pr_flags & PR_ADDR) { 1403 KASSERT(m->m_type == MT_SONAME, ("receive 1a")); 1404 orig_resid = 0; 1405 if (psa) 1406 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1407 if (flags & MSG_PEEK) 1408 m = m->m_next; 1409 else 1410 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1411 } 1412 1413 /* 1414 * Skip any control mbufs prepending the record. 1415 */ 1416 while (m && m->m_type == MT_CONTROL && error == 0) { 1417 if (flags & MSG_PEEK) { 1418 if (controlp) 1419 *controlp = m_copy(m, 0, m->m_len); 1420 m = m->m_next; /* XXX race */ 1421 } else { 1422 const struct cmsghdr *cm = mtod(m, struct cmsghdr *); 1423 1424 if (controlp) { 1425 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1426 if (pr->pr_domain->dom_externalize && 1427 cm->cmsg_level == SOL_SOCKET && 1428 cm->cmsg_type == SCM_RIGHTS) { 1429 error = pr->pr_domain->dom_externalize 1430 (m, flags); 1431 } 1432 *controlp = m; 1433 m = n; 1434 } else { 1435 if (cm->cmsg_level == SOL_SOCKET && 1436 cm->cmsg_type == SCM_RIGHTS) 1437 free_rights = TRUE; 1438 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1439 } 1440 } 1441 if (controlp && *controlp) { 1442 orig_resid = 0; 1443 controlp = &(*controlp)->m_next; 1444 } 1445 } 1446 1447 /* 1448 * flag OOB data. 1449 */ 1450 if (m) { 1451 type = m->m_type; 1452 if (type == MT_OOBDATA) 1453 flags |= MSG_OOB; 1454 } 1455 1456 /* 1457 * Copy to the UIO or mbuf return chain (*mp). 1458 */ 1459 moff = 0; 1460 offset = 0; 1461 while (m && resid > 0 && error == 0) { 1462 if (m->m_type == MT_OOBDATA) { 1463 if (type != MT_OOBDATA) 1464 break; 1465 } else if (type == MT_OOBDATA) 1466 break; 1467 else 1468 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1469 ("receive 3")); 1470 soclrstate(so, SS_RCVATMARK); 1471 len = (resid > INT_MAX) ? INT_MAX : resid; 1472 if (so->so_oobmark && len > so->so_oobmark - offset) 1473 len = so->so_oobmark - offset; 1474 if (len > m->m_len - moff) 1475 len = m->m_len - moff; 1476 1477 /* 1478 * Copy out to the UIO or pass the mbufs back to the SIO. 1479 * The SIO is dealt with when we eat the mbuf, but deal 1480 * with the resid here either way. 1481 */ 1482 if (uio) { 1483 uio->uio_resid = resid; 1484 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1485 resid = uio->uio_resid; 1486 if (error) 1487 goto release; 1488 } else { 1489 resid -= (size_t)len; 1490 } 1491 1492 /* 1493 * Eat the entire mbuf or just a piece of it 1494 */ 1495 if (len == m->m_len - moff) { 1496 if (m->m_flags & M_EOR) 1497 flags |= MSG_EOR; 1498 if (flags & MSG_PEEK) { 1499 m = m->m_next; 1500 moff = 0; 1501 } else { 1502 if (sio) { 1503 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1504 sbappend(sio, m); 1505 m = n; 1506 } else { 1507 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1508 } 1509 } 1510 } else { 1511 if (flags & MSG_PEEK) { 1512 moff += len; 1513 } else { 1514 if (sio) { 1515 n = m_copym(m, 0, len, M_WAITOK); 1516 if (n) 1517 sbappend(sio, n); 1518 } 1519 m->m_data += len; 1520 m->m_len -= len; 1521 so->so_rcv.ssb_cc -= len; 1522 } 1523 } 1524 if (so->so_oobmark) { 1525 if ((flags & MSG_PEEK) == 0) { 1526 so->so_oobmark -= len; 1527 if (so->so_oobmark == 0) { 1528 sosetstate(so, SS_RCVATMARK); 1529 break; 1530 } 1531 } else { 1532 offset += len; 1533 if (offset == so->so_oobmark) 1534 break; 1535 } 1536 } 1537 if (flags & MSG_EOR) 1538 break; 1539 /* 1540 * If the MSG_WAITALL flag is set (for non-atomic socket), 1541 * we must not quit until resid == 0 or an error 1542 * termination. If a signal/timeout occurs, return 1543 * with a short count but without error. 1544 * Keep signalsockbuf locked against other readers. 1545 */ 1546 while ((flags & MSG_WAITALL) && m == NULL && 1547 resid > 0 && !sosendallatonce(so) && 1548 so->so_rcv.ssb_mb == NULL) { 1549 if (so->so_error || so->so_rerror || 1550 so->so_state & SS_CANTRCVMORE) 1551 break; 1552 /* 1553 * The window might have closed to zero, make 1554 * sure we send an ack now that we've drained 1555 * the buffer or we might end up blocking until 1556 * the idle takes over (5 seconds). 1557 */ 1558 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1559 so_pru_rcvd(so, flags); 1560 error = ssb_wait(&so->so_rcv); 1561 if (error) { 1562 ssb_unlock(&so->so_rcv); 1563 error = 0; 1564 goto done; 1565 } 1566 m = so->so_rcv.ssb_mb; 1567 } 1568 } 1569 1570 /* 1571 * If an atomic read was requested but unread data still remains 1572 * in the record, set MSG_TRUNC. 1573 */ 1574 if (m && pr->pr_flags & PR_ATOMIC) 1575 flags |= MSG_TRUNC; 1576 1577 /* 1578 * Cleanup. If an atomic read was requested drop any unread data. 1579 */ 1580 if ((flags & MSG_PEEK) == 0) { 1581 if (m && (pr->pr_flags & PR_ATOMIC)) 1582 sbdroprecord(&so->so_rcv.sb); 1583 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) 1584 so_pru_rcvd(so, flags); 1585 } 1586 1587 if (orig_resid == resid && orig_resid && 1588 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1589 ssb_unlock(&so->so_rcv); 1590 goto restart; 1591 } 1592 1593 if (flagsp) 1594 *flagsp |= flags; 1595 release: 1596 ssb_unlock(&so->so_rcv); 1597 done: 1598 lwkt_reltoken(&so->so_rcv.ssb_token); 1599 if (free_chain) { 1600 if (free_rights && (pr->pr_flags & PR_RIGHTS) && 1601 pr->pr_domain->dom_dispose) 1602 pr->pr_domain->dom_dispose(free_chain); 1603 m_freem(free_chain); 1604 } 1605 return (error); 1606 } 1607 1608 int 1609 sorecvtcp(struct socket *so, struct sockaddr **psa, struct uio *uio, 1610 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1611 { 1612 struct mbuf *m, *n; 1613 struct mbuf *free_chain = NULL; 1614 int flags, len, error, offset; 1615 struct protosw *pr = so->so_proto; 1616 int moff; 1617 int didoob; 1618 size_t resid, orig_resid, restmp; 1619 1620 if (uio) 1621 resid = uio->uio_resid; 1622 else 1623 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1624 orig_resid = resid; 1625 1626 if (psa) 1627 *psa = NULL; 1628 if (controlp) 1629 *controlp = NULL; 1630 if (flagsp) 1631 flags = *flagsp &~ MSG_EOR; 1632 else 1633 flags = 0; 1634 if (flags & MSG_OOB) { 1635 m = m_get(M_WAITOK, MT_DATA); 1636 if (m == NULL) 1637 return (ENOBUFS); 1638 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1639 if (error) 1640 goto bad; 1641 if (sio) { 1642 do { 1643 sbappend(sio, m); 1644 KKASSERT(resid >= (size_t)m->m_len); 1645 resid -= (size_t)m->m_len; 1646 } while (resid > 0 && m); 1647 } else { 1648 do { 1649 uio->uio_resid = resid; 1650 error = uiomove(mtod(m, caddr_t), 1651 (int)szmin(resid, m->m_len), 1652 uio); 1653 resid = uio->uio_resid; 1654 m = m_free(m); 1655 } while (uio->uio_resid && error == 0 && m); 1656 } 1657 bad: 1658 if (m) 1659 m_freem(m); 1660 return (error); 1661 } 1662 1663 /* 1664 * The token interlocks against the protocol thread while 1665 * ssb_lock is a blocking lock against other userland entities. 1666 * 1667 * Lock a limited number of mbufs (not all, so sbcompress() still 1668 * works well). The token is used as an interlock for sbwait() so 1669 * release it afterwords. 1670 */ 1671 restart: 1672 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1673 if (error) 1674 goto done; 1675 1676 lwkt_gettoken(&so->so_rcv.ssb_token); 1677 m = so->so_rcv.ssb_mb; 1678 1679 /* 1680 * If we have less data than requested, block awaiting more 1681 * (subject to any timeout) if: 1682 * 1. the current count is less than the low water mark, or 1683 * 2. MSG_WAITALL is set, and it is possible to do the entire 1684 * receive operation at once if we block (resid <= hiwat). 1685 * 3. MSG_DONTWAIT is not set 1686 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1687 * we have to do the receive in sections, and thus risk returning 1688 * a short count if a timeout or signal occurs after we start. 1689 */ 1690 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1691 (size_t)so->so_rcv.ssb_cc < resid) && 1692 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1693 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)))) { 1694 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1695 if (so->so_error) { 1696 if (m) 1697 goto dontblock; 1698 lwkt_reltoken(&so->so_rcv.ssb_token); 1699 error = so->so_error; 1700 if ((flags & MSG_PEEK) == 0) 1701 so->so_error = 0; 1702 goto release; 1703 } 1704 if (so->so_state & SS_CANTRCVMORE) { 1705 if (m) 1706 goto dontblock; 1707 lwkt_reltoken(&so->so_rcv.ssb_token); 1708 goto release; 1709 } 1710 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1711 (pr->pr_flags & PR_CONNREQUIRED)) { 1712 lwkt_reltoken(&so->so_rcv.ssb_token); 1713 error = ENOTCONN; 1714 goto release; 1715 } 1716 if (resid == 0) { 1717 lwkt_reltoken(&so->so_rcv.ssb_token); 1718 goto release; 1719 } 1720 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1721 lwkt_reltoken(&so->so_rcv.ssb_token); 1722 error = EWOULDBLOCK; 1723 goto release; 1724 } 1725 ssb_unlock(&so->so_rcv); 1726 error = ssb_wait(&so->so_rcv); 1727 lwkt_reltoken(&so->so_rcv.ssb_token); 1728 if (error) 1729 goto done; 1730 goto restart; 1731 } 1732 1733 /* 1734 * Token still held 1735 */ 1736 dontblock: 1737 n = m; 1738 restmp = 0; 1739 while (n && restmp < resid) { 1740 n->m_flags |= M_SOLOCKED; 1741 restmp += n->m_len; 1742 if (n->m_next == NULL) 1743 n = n->m_nextpkt; 1744 else 1745 n = n->m_next; 1746 } 1747 1748 /* 1749 * Release token for loop 1750 */ 1751 lwkt_reltoken(&so->so_rcv.ssb_token); 1752 if (uio && uio->uio_td && uio->uio_td->td_proc) 1753 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1754 1755 /* 1756 * note: m should be == sb_mb here. Cache the next record while 1757 * cleaning up. Note that calling m_free*() will break out critical 1758 * section. 1759 */ 1760 KKASSERT(m == so->so_rcv.ssb_mb); 1761 1762 /* 1763 * Copy to the UIO or mbuf return chain (*mp). 1764 * 1765 * NOTE: Token is not held for loop 1766 */ 1767 moff = 0; 1768 offset = 0; 1769 didoob = 0; 1770 1771 while (m && (m->m_flags & M_SOLOCKED) && resid > 0 && error == 0) { 1772 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1773 ("receive 3")); 1774 1775 soclrstate(so, SS_RCVATMARK); 1776 len = (resid > INT_MAX) ? INT_MAX : resid; 1777 if (so->so_oobmark && len > so->so_oobmark - offset) 1778 len = so->so_oobmark - offset; 1779 if (len > m->m_len - moff) 1780 len = m->m_len - moff; 1781 1782 /* 1783 * Copy out to the UIO or pass the mbufs back to the SIO. 1784 * The SIO is dealt with when we eat the mbuf, but deal 1785 * with the resid here either way. 1786 */ 1787 if (uio) { 1788 uio->uio_resid = resid; 1789 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1790 resid = uio->uio_resid; 1791 if (error) 1792 goto release; 1793 } else { 1794 resid -= (size_t)len; 1795 } 1796 1797 /* 1798 * Eat the entire mbuf or just a piece of it 1799 */ 1800 offset += len; 1801 if (len == m->m_len - moff) { 1802 m = m->m_next; 1803 moff = 0; 1804 } else { 1805 moff += len; 1806 } 1807 1808 /* 1809 * Check oobmark 1810 */ 1811 if (so->so_oobmark && offset == so->so_oobmark) { 1812 didoob = 1; 1813 break; 1814 } 1815 } 1816 1817 /* 1818 * Synchronize sockbuf with data we read. 1819 * 1820 * NOTE: (m) is junk on entry (it could be left over from the 1821 * previous loop). 1822 */ 1823 if ((flags & MSG_PEEK) == 0) { 1824 lwkt_gettoken(&so->so_rcv.ssb_token); 1825 m = so->so_rcv.ssb_mb; 1826 while (m && offset >= m->m_len) { 1827 if (so->so_oobmark) { 1828 so->so_oobmark -= m->m_len; 1829 if (so->so_oobmark == 0) { 1830 sosetstate(so, SS_RCVATMARK); 1831 didoob = 1; 1832 } 1833 } 1834 offset -= m->m_len; 1835 if (sio) { 1836 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1837 sbappend(sio, m); 1838 m = n; 1839 } else { 1840 m = sbunlinkmbuf(&so->so_rcv.sb, 1841 m, &free_chain); 1842 } 1843 } 1844 if (offset) { 1845 KKASSERT(m); 1846 if (sio) { 1847 n = m_copym(m, 0, offset, M_WAITOK); 1848 if (n) 1849 sbappend(sio, n); 1850 } 1851 m->m_data += offset; 1852 m->m_len -= offset; 1853 so->so_rcv.ssb_cc -= offset; 1854 if (so->so_oobmark) { 1855 so->so_oobmark -= offset; 1856 if (so->so_oobmark == 0) { 1857 sosetstate(so, SS_RCVATMARK); 1858 didoob = 1; 1859 } 1860 } 1861 offset = 0; 1862 } 1863 lwkt_reltoken(&so->so_rcv.ssb_token); 1864 } 1865 1866 /* 1867 * If the MSG_WAITALL flag is set (for non-atomic socket), 1868 * we must not quit until resid == 0 or an error termination. 1869 * 1870 * If a signal/timeout occurs, return with a short count but without 1871 * error. 1872 * 1873 * Keep signalsockbuf locked against other readers. 1874 * 1875 * XXX if MSG_PEEK we currently do quit. 1876 */ 1877 if ((flags & MSG_WAITALL) && !(flags & MSG_PEEK) && 1878 didoob == 0 && resid > 0 && 1879 !sosendallatonce(so)) { 1880 lwkt_gettoken(&so->so_rcv.ssb_token); 1881 error = 0; 1882 while ((m = so->so_rcv.ssb_mb) == NULL) { 1883 if (so->so_error || (so->so_state & SS_CANTRCVMORE)) { 1884 error = so->so_error; 1885 break; 1886 } 1887 /* 1888 * The window might have closed to zero, make 1889 * sure we send an ack now that we've drained 1890 * the buffer or we might end up blocking until 1891 * the idle takes over (5 seconds). 1892 */ 1893 if (so->so_pcb) 1894 so_pru_rcvd_async(so); 1895 if (so->so_rcv.ssb_mb == NULL) 1896 error = ssb_wait(&so->so_rcv); 1897 if (error) { 1898 lwkt_reltoken(&so->so_rcv.ssb_token); 1899 ssb_unlock(&so->so_rcv); 1900 error = 0; 1901 goto done; 1902 } 1903 } 1904 if (m && error == 0) 1905 goto dontblock; 1906 lwkt_reltoken(&so->so_rcv.ssb_token); 1907 } 1908 1909 /* 1910 * Token not held here. 1911 * 1912 * Cleanup. If an atomic read was requested drop any unread data XXX 1913 */ 1914 if ((flags & MSG_PEEK) == 0) { 1915 if (so->so_pcb) 1916 so_pru_rcvd_async(so); 1917 } 1918 1919 if (orig_resid == resid && orig_resid && 1920 (so->so_state & SS_CANTRCVMORE) == 0) { 1921 ssb_unlock(&so->so_rcv); 1922 goto restart; 1923 } 1924 1925 if (flagsp) 1926 *flagsp |= flags; 1927 release: 1928 ssb_unlock(&so->so_rcv); 1929 done: 1930 if (free_chain) 1931 m_freem(free_chain); 1932 return (error); 1933 } 1934 1935 /* 1936 * Shut a socket down. Note that we do not get a frontend lock as we 1937 * want to be able to shut the socket down even if another thread is 1938 * blocked in a read(), thus waking it up. 1939 */ 1940 int 1941 soshutdown(struct socket *so, int how) 1942 { 1943 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1944 return (EINVAL); 1945 1946 if (how != SHUT_WR) { 1947 /*ssb_lock(&so->so_rcv, M_WAITOK);*/ 1948 sorflush(so); 1949 /*ssb_unlock(&so->so_rcv);*/ 1950 } 1951 if (how != SHUT_RD) 1952 return (so_pru_shutdown(so)); 1953 return (0); 1954 } 1955 1956 void 1957 sorflush(struct socket *so) 1958 { 1959 struct signalsockbuf *ssb = &so->so_rcv; 1960 struct protosw *pr = so->so_proto; 1961 struct signalsockbuf asb; 1962 1963 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR); 1964 1965 lwkt_gettoken(&ssb->ssb_token); 1966 socantrcvmore(so); 1967 asb = *ssb; 1968 1969 /* 1970 * Can't just blow up the ssb structure here 1971 */ 1972 bzero(&ssb->sb, sizeof(ssb->sb)); 1973 ssb->ssb_timeo = 0; 1974 ssb->ssb_lowat = 0; 1975 ssb->ssb_hiwat = 0; 1976 ssb->ssb_mbmax = 0; 1977 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK); 1978 1979 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) 1980 (*pr->pr_domain->dom_dispose)(asb.ssb_mb); 1981 ssb_release(&asb, so); 1982 1983 lwkt_reltoken(&ssb->ssb_token); 1984 } 1985 1986 #ifdef INET 1987 static int 1988 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) 1989 { 1990 struct accept_filter_arg *afap = NULL; 1991 struct accept_filter *afp; 1992 struct so_accf *af = so->so_accf; 1993 int error = 0; 1994 1995 /* do not set/remove accept filters on non listen sockets */ 1996 if ((so->so_options & SO_ACCEPTCONN) == 0) { 1997 error = EINVAL; 1998 goto out; 1999 } 2000 2001 /* removing the filter */ 2002 if (sopt == NULL) { 2003 if (af != NULL) { 2004 if (af->so_accept_filter != NULL && 2005 af->so_accept_filter->accf_destroy != NULL) { 2006 af->so_accept_filter->accf_destroy(so); 2007 } 2008 if (af->so_accept_filter_str != NULL) { 2009 kfree(af->so_accept_filter_str, M_ACCF); 2010 } 2011 kfree(af, M_ACCF); 2012 so->so_accf = NULL; 2013 } 2014 so->so_options &= ~SO_ACCEPTFILTER; 2015 return (0); 2016 } 2017 /* adding a filter */ 2018 /* must remove previous filter first */ 2019 if (af != NULL) { 2020 error = EINVAL; 2021 goto out; 2022 } 2023 /* don't put large objects on the kernel stack */ 2024 afap = kmalloc(sizeof(*afap), M_TEMP, M_WAITOK); 2025 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 2026 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 2027 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 2028 if (error) 2029 goto out; 2030 afp = accept_filt_get(afap->af_name); 2031 if (afp == NULL) { 2032 error = ENOENT; 2033 goto out; 2034 } 2035 af = kmalloc(sizeof(*af), M_ACCF, M_WAITOK | M_ZERO); 2036 if (afp->accf_create != NULL) { 2037 if (afap->af_name[0] != '\0') { 2038 int len = strlen(afap->af_name) + 1; 2039 2040 af->so_accept_filter_str = kmalloc(len, M_ACCF, 2041 M_WAITOK); 2042 strcpy(af->so_accept_filter_str, afap->af_name); 2043 } 2044 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 2045 if (af->so_accept_filter_arg == NULL) { 2046 kfree(af->so_accept_filter_str, M_ACCF); 2047 kfree(af, M_ACCF); 2048 so->so_accf = NULL; 2049 error = EINVAL; 2050 goto out; 2051 } 2052 } 2053 af->so_accept_filter = afp; 2054 so->so_accf = af; 2055 so->so_options |= SO_ACCEPTFILTER; 2056 out: 2057 if (afap != NULL) 2058 kfree(afap, M_TEMP); 2059 return (error); 2060 } 2061 #endif /* INET */ 2062 2063 /* 2064 * Perhaps this routine, and sooptcopyout(), below, ought to come in 2065 * an additional variant to handle the case where the option value needs 2066 * to be some kind of integer, but not a specific size. 2067 * In addition to their use here, these functions are also called by the 2068 * protocol-level pr_ctloutput() routines. 2069 */ 2070 int 2071 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2072 { 2073 return soopt_to_kbuf(sopt, buf, len, minlen); 2074 } 2075 2076 int 2077 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2078 { 2079 size_t valsize; 2080 2081 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2082 KKASSERT(kva_p(buf)); 2083 2084 /* 2085 * If the user gives us more than we wanted, we ignore it, 2086 * but if we don't get the minimum length the caller 2087 * wants, we return EINVAL. On success, sopt->sopt_valsize 2088 * is set to however much we actually retrieved. 2089 */ 2090 if ((valsize = sopt->sopt_valsize) < minlen) 2091 return EINVAL; 2092 if (valsize > len) 2093 sopt->sopt_valsize = valsize = len; 2094 2095 bcopy(sopt->sopt_val, buf, valsize); 2096 return 0; 2097 } 2098 2099 2100 int 2101 sosetopt(struct socket *so, struct sockopt *sopt) 2102 { 2103 int error, optval; 2104 struct linger l; 2105 struct timeval tv; 2106 u_long val; 2107 struct signalsockbuf *sotmp; 2108 2109 error = 0; 2110 sopt->sopt_dir = SOPT_SET; 2111 if (sopt->sopt_level != SOL_SOCKET) { 2112 if (so->so_proto && so->so_proto->pr_ctloutput) { 2113 return (so_pr_ctloutput(so, sopt)); 2114 } 2115 error = ENOPROTOOPT; 2116 } else { 2117 switch (sopt->sopt_name) { 2118 #ifdef INET 2119 case SO_ACCEPTFILTER: 2120 error = do_setopt_accept_filter(so, sopt); 2121 if (error) 2122 goto bad; 2123 break; 2124 #endif /* INET */ 2125 case SO_LINGER: 2126 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 2127 if (error) 2128 goto bad; 2129 2130 so->so_linger = l.l_linger; 2131 if (l.l_onoff) 2132 so->so_options |= SO_LINGER; 2133 else 2134 so->so_options &= ~SO_LINGER; 2135 break; 2136 2137 case SO_DEBUG: 2138 case SO_KEEPALIVE: 2139 case SO_DONTROUTE: 2140 case SO_USELOOPBACK: 2141 case SO_BROADCAST: 2142 case SO_REUSEADDR: 2143 case SO_REUSEPORT: 2144 case SO_OOBINLINE: 2145 case SO_TIMESTAMP: 2146 case SO_NOSIGPIPE: 2147 case SO_RERROR: 2148 error = sooptcopyin(sopt, &optval, sizeof optval, 2149 sizeof optval); 2150 if (error) 2151 goto bad; 2152 if (optval) 2153 so->so_options |= sopt->sopt_name; 2154 else 2155 so->so_options &= ~sopt->sopt_name; 2156 break; 2157 2158 case SO_SNDBUF: 2159 case SO_RCVBUF: 2160 case SO_SNDLOWAT: 2161 case SO_RCVLOWAT: 2162 error = sooptcopyin(sopt, &optval, sizeof optval, 2163 sizeof optval); 2164 if (error) 2165 goto bad; 2166 2167 /* 2168 * Values < 1 make no sense for any of these 2169 * options, so disallow them. 2170 */ 2171 if (optval < 1) { 2172 error = EINVAL; 2173 goto bad; 2174 } 2175 2176 switch (sopt->sopt_name) { 2177 case SO_SNDBUF: 2178 case SO_RCVBUF: 2179 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ? 2180 &so->so_snd : &so->so_rcv, (u_long)optval, 2181 so, 2182 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) { 2183 error = ENOBUFS; 2184 goto bad; 2185 } 2186 sotmp = (sopt->sopt_name == SO_SNDBUF) ? 2187 &so->so_snd : &so->so_rcv; 2188 atomic_clear_int(&sotmp->ssb_flags, 2189 SSB_AUTOSIZE); 2190 break; 2191 2192 /* 2193 * Make sure the low-water is never greater than 2194 * the high-water. 2195 */ 2196 case SO_SNDLOWAT: 2197 so->so_snd.ssb_lowat = 2198 (optval > so->so_snd.ssb_hiwat) ? 2199 so->so_snd.ssb_hiwat : optval; 2200 atomic_clear_int(&so->so_snd.ssb_flags, 2201 SSB_AUTOLOWAT); 2202 break; 2203 case SO_RCVLOWAT: 2204 so->so_rcv.ssb_lowat = 2205 (optval > so->so_rcv.ssb_hiwat) ? 2206 so->so_rcv.ssb_hiwat : optval; 2207 atomic_clear_int(&so->so_rcv.ssb_flags, 2208 SSB_AUTOLOWAT); 2209 break; 2210 } 2211 break; 2212 2213 case SO_SNDTIMEO: 2214 case SO_RCVTIMEO: 2215 error = sooptcopyin(sopt, &tv, sizeof tv, 2216 sizeof tv); 2217 if (error) 2218 goto bad; 2219 2220 /* assert(hz > 0); */ 2221 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2222 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2223 error = EDOM; 2224 goto bad; 2225 } 2226 /* assert(tick > 0); */ 2227 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2228 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick; 2229 if (val > INT_MAX) { 2230 error = EDOM; 2231 goto bad; 2232 } 2233 if (val == 0 && tv.tv_usec != 0) 2234 val = 1; 2235 2236 switch (sopt->sopt_name) { 2237 case SO_SNDTIMEO: 2238 so->so_snd.ssb_timeo = val; 2239 break; 2240 case SO_RCVTIMEO: 2241 so->so_rcv.ssb_timeo = val; 2242 break; 2243 } 2244 break; 2245 default: 2246 error = ENOPROTOOPT; 2247 break; 2248 } 2249 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { 2250 (void) so_pr_ctloutput(so, sopt); 2251 } 2252 } 2253 bad: 2254 return (error); 2255 } 2256 2257 /* Helper routine for getsockopt */ 2258 int 2259 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2260 { 2261 soopt_from_kbuf(sopt, buf, len); 2262 return 0; 2263 } 2264 2265 void 2266 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len) 2267 { 2268 size_t valsize; 2269 2270 if (len == 0) { 2271 sopt->sopt_valsize = 0; 2272 return; 2273 } 2274 2275 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2276 KKASSERT(kva_p(buf)); 2277 2278 /* 2279 * Documented get behavior is that we always return a value, 2280 * possibly truncated to fit in the user's buffer. 2281 * Traditional behavior is that we always tell the user 2282 * precisely how much we copied, rather than something useful 2283 * like the total amount we had available for her. 2284 * Note that this interface is not idempotent; the entire answer must 2285 * generated ahead of time. 2286 */ 2287 valsize = szmin(len, sopt->sopt_valsize); 2288 sopt->sopt_valsize = valsize; 2289 if (sopt->sopt_val != 0) { 2290 bcopy(buf, sopt->sopt_val, valsize); 2291 } 2292 } 2293 2294 int 2295 sogetopt(struct socket *so, struct sockopt *sopt) 2296 { 2297 int error, optval; 2298 long optval_l; 2299 struct linger l; 2300 struct timeval tv; 2301 #ifdef INET 2302 struct accept_filter_arg *afap; 2303 #endif 2304 2305 error = 0; 2306 sopt->sopt_dir = SOPT_GET; 2307 if (sopt->sopt_level != SOL_SOCKET) { 2308 if (so->so_proto && so->so_proto->pr_ctloutput) { 2309 return (so_pr_ctloutput(so, sopt)); 2310 } else 2311 return (ENOPROTOOPT); 2312 } else { 2313 switch (sopt->sopt_name) { 2314 #ifdef INET 2315 case SO_ACCEPTFILTER: 2316 if ((so->so_options & SO_ACCEPTCONN) == 0) 2317 return (EINVAL); 2318 afap = kmalloc(sizeof(*afap), M_TEMP, 2319 M_WAITOK | M_ZERO); 2320 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 2321 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 2322 if (so->so_accf->so_accept_filter_str != NULL) 2323 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 2324 } 2325 error = sooptcopyout(sopt, afap, sizeof(*afap)); 2326 kfree(afap, M_TEMP); 2327 break; 2328 #endif /* INET */ 2329 2330 case SO_LINGER: 2331 l.l_onoff = so->so_options & SO_LINGER; 2332 l.l_linger = so->so_linger; 2333 error = sooptcopyout(sopt, &l, sizeof l); 2334 break; 2335 2336 case SO_USELOOPBACK: 2337 case SO_DONTROUTE: 2338 case SO_DEBUG: 2339 case SO_KEEPALIVE: 2340 case SO_REUSEADDR: 2341 case SO_REUSEPORT: 2342 case SO_BROADCAST: 2343 case SO_OOBINLINE: 2344 case SO_TIMESTAMP: 2345 case SO_NOSIGPIPE: 2346 case SO_RERROR: 2347 optval = so->so_options & sopt->sopt_name; 2348 integer: 2349 error = sooptcopyout(sopt, &optval, sizeof optval); 2350 break; 2351 2352 case SO_TYPE: 2353 optval = so->so_type; 2354 goto integer; 2355 2356 case SO_ERROR: 2357 if (so->so_error) { 2358 optval = so->so_error; 2359 so->so_error = 0; 2360 } else { 2361 optval = so->so_rerror; 2362 so->so_rerror = 0; 2363 } 2364 goto integer; 2365 2366 case SO_SNDBUF: 2367 optval = so->so_snd.ssb_hiwat; 2368 goto integer; 2369 2370 case SO_RCVBUF: 2371 optval = so->so_rcv.ssb_hiwat; 2372 goto integer; 2373 2374 case SO_SNDLOWAT: 2375 optval = so->so_snd.ssb_lowat; 2376 goto integer; 2377 2378 case SO_RCVLOWAT: 2379 optval = so->so_rcv.ssb_lowat; 2380 goto integer; 2381 2382 case SO_SNDTIMEO: 2383 case SO_RCVTIMEO: 2384 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2385 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo); 2386 2387 tv.tv_sec = optval / hz; 2388 tv.tv_usec = (optval % hz) * ustick; 2389 error = sooptcopyout(sopt, &tv, sizeof tv); 2390 break; 2391 2392 case SO_SNDSPACE: 2393 optval_l = ssb_space(&so->so_snd); 2394 error = sooptcopyout(sopt, &optval_l, sizeof(optval_l)); 2395 break; 2396 2397 case SO_CPUHINT: 2398 optval = -1; /* no hint */ 2399 goto integer; 2400 2401 default: 2402 error = ENOPROTOOPT; 2403 break; 2404 } 2405 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) 2406 so_pr_ctloutput(so, sopt); 2407 return (error); 2408 } 2409 } 2410 2411 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2412 int 2413 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2414 { 2415 struct mbuf *m, *m_prev; 2416 int sopt_size = sopt->sopt_valsize, msize; 2417 2418 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA, 2419 0, &msize); 2420 if (m == NULL) 2421 return (ENOBUFS); 2422 m->m_len = min(msize, sopt_size); 2423 sopt_size -= m->m_len; 2424 *mp = m; 2425 m_prev = m; 2426 2427 while (sopt_size > 0) { 2428 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, 2429 MT_DATA, 0, &msize); 2430 if (m == NULL) { 2431 m_freem(*mp); 2432 return (ENOBUFS); 2433 } 2434 m->m_len = min(msize, sopt_size); 2435 sopt_size -= m->m_len; 2436 m_prev->m_next = m; 2437 m_prev = m; 2438 } 2439 return (0); 2440 } 2441 2442 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2443 int 2444 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2445 { 2446 soopt_to_mbuf(sopt, m); 2447 return 0; 2448 } 2449 2450 void 2451 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m) 2452 { 2453 size_t valsize; 2454 void *val; 2455 2456 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2457 KKASSERT(kva_p(m)); 2458 if (sopt->sopt_val == NULL) 2459 return; 2460 val = sopt->sopt_val; 2461 valsize = sopt->sopt_valsize; 2462 while (m != NULL && valsize >= m->m_len) { 2463 bcopy(val, mtod(m, char *), m->m_len); 2464 valsize -= m->m_len; 2465 val = (caddr_t)val + m->m_len; 2466 m = m->m_next; 2467 } 2468 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2469 panic("ip6_sooptmcopyin"); 2470 } 2471 2472 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2473 int 2474 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2475 { 2476 return soopt_from_mbuf(sopt, m); 2477 } 2478 2479 int 2480 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m) 2481 { 2482 struct mbuf *m0 = m; 2483 size_t valsize = 0; 2484 size_t maxsize; 2485 void *val; 2486 2487 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2488 KKASSERT(kva_p(m)); 2489 if (sopt->sopt_val == NULL) 2490 return 0; 2491 val = sopt->sopt_val; 2492 maxsize = sopt->sopt_valsize; 2493 while (m != NULL && maxsize >= m->m_len) { 2494 bcopy(mtod(m, char *), val, m->m_len); 2495 maxsize -= m->m_len; 2496 val = (caddr_t)val + m->m_len; 2497 valsize += m->m_len; 2498 m = m->m_next; 2499 } 2500 if (m != NULL) { 2501 /* enough soopt buffer should be given from user-land */ 2502 m_freem(m0); 2503 return (EINVAL); 2504 } 2505 sopt->sopt_valsize = valsize; 2506 return 0; 2507 } 2508 2509 void 2510 sohasoutofband(struct socket *so) 2511 { 2512 if (so->so_sigio != NULL) 2513 pgsigio(so->so_sigio, SIGURG, 0); 2514 /* 2515 * NOTE: 2516 * There is no need to use NOTE_OOB as KNOTE hint here: 2517 * soread filter depends on so_oobmark and SS_RCVATMARK 2518 * so_state. NOTE_OOB would cause unnecessary penalty 2519 * in KNOTE, if there was knote processing contention. 2520 */ 2521 KNOTE(&so->so_rcv.ssb_kq.ki_note, 0); 2522 } 2523 2524 int 2525 sokqfilter(struct file *fp, struct knote *kn) 2526 { 2527 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2528 struct signalsockbuf *ssb; 2529 2530 switch (kn->kn_filter) { 2531 case EVFILT_READ: 2532 if (so->so_options & SO_ACCEPTCONN) 2533 kn->kn_fop = &solisten_filtops; 2534 else 2535 kn->kn_fop = &soread_filtops; 2536 ssb = &so->so_rcv; 2537 break; 2538 case EVFILT_WRITE: 2539 kn->kn_fop = &sowrite_filtops; 2540 ssb = &so->so_snd; 2541 break; 2542 case EVFILT_EXCEPT: 2543 kn->kn_fop = &soexcept_filtops; 2544 ssb = &so->so_rcv; 2545 break; 2546 default: 2547 return (EOPNOTSUPP); 2548 } 2549 2550 knote_insert(&ssb->ssb_kq.ki_note, kn); 2551 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE); 2552 return (0); 2553 } 2554 2555 static void 2556 filt_sordetach(struct knote *kn) 2557 { 2558 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2559 2560 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn); 2561 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note)) 2562 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE); 2563 } 2564 2565 /*ARGSUSED*/ 2566 static int 2567 filt_soread(struct knote *kn, long hint __unused) 2568 { 2569 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2570 2571 if (kn->kn_sfflags & NOTE_OOB) { 2572 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) { 2573 kn->kn_fflags |= NOTE_OOB; 2574 return (1); 2575 } 2576 return (0); 2577 } 2578 kn->kn_data = so->so_rcv.ssb_cc; 2579 2580 if (so->so_state & SS_CANTRCVMORE) { 2581 /* 2582 * Only set NODATA if all data has been exhausted. 2583 */ 2584 if (kn->kn_data == 0) 2585 kn->kn_flags |= EV_NODATA; 2586 kn->kn_flags |= EV_EOF; 2587 kn->kn_fflags = so->so_error; 2588 return (1); 2589 } 2590 if (so->so_error || so->so_rerror) 2591 return (1); 2592 if (kn->kn_sfflags & NOTE_LOWAT) 2593 return (kn->kn_data >= kn->kn_sdata); 2594 return ((kn->kn_data >= so->so_rcv.ssb_lowat) || 2595 !TAILQ_EMPTY(&so->so_comp)); 2596 } 2597 2598 static void 2599 filt_sowdetach(struct knote *kn) 2600 { 2601 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2602 2603 knote_remove(&so->so_snd.ssb_kq.ki_note, kn); 2604 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note)) 2605 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE); 2606 } 2607 2608 /*ARGSUSED*/ 2609 static int 2610 filt_sowrite(struct knote *kn, long hint __unused) 2611 { 2612 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2613 2614 if (so->so_snd.ssb_flags & SSB_PREALLOC) 2615 kn->kn_data = ssb_space_prealloc(&so->so_snd); 2616 else 2617 kn->kn_data = ssb_space(&so->so_snd); 2618 2619 if (so->so_state & SS_CANTSENDMORE) { 2620 kn->kn_flags |= (EV_EOF | EV_NODATA); 2621 kn->kn_fflags = so->so_error; 2622 return (1); 2623 } 2624 if (so->so_error) /* temporary udp error */ 2625 return (1); 2626 if (((so->so_state & SS_ISCONNECTED) == 0) && 2627 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2628 return (0); 2629 if (kn->kn_sfflags & NOTE_LOWAT) 2630 return (kn->kn_data >= kn->kn_sdata); 2631 return (kn->kn_data >= so->so_snd.ssb_lowat); 2632 } 2633 2634 /*ARGSUSED*/ 2635 static int 2636 filt_solisten(struct knote *kn, long hint __unused) 2637 { 2638 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2639 int qlen = so->so_qlen; 2640 2641 if (soavailconn > 0 && qlen > soavailconn) 2642 qlen = soavailconn; 2643 kn->kn_data = qlen; 2644 2645 return (!TAILQ_EMPTY(&so->so_comp)); 2646 } 2647