1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 63 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $ 64 */ 65 66 #include "opt_inet.h" 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/fcntl.h> 71 #include <sys/malloc.h> 72 #include <sys/mbuf.h> 73 #include <sys/domain.h> 74 #include <sys/file.h> /* for struct knote */ 75 #include <sys/kernel.h> 76 #include <sys/event.h> 77 #include <sys/proc.h> 78 #include <sys/protosw.h> 79 #include <sys/socket.h> 80 #include <sys/socketvar.h> 81 #include <sys/socketops.h> 82 #include <sys/resourcevar.h> 83 #include <sys/signalvar.h> 84 #include <sys/sysctl.h> 85 #include <sys/uio.h> 86 #include <sys/jail.h> 87 #include <vm/vm_zone.h> 88 #include <vm/pmap.h> 89 #include <net/netmsg2.h> 90 #include <net/netisr2.h> 91 92 #include <sys/thread2.h> 93 #include <sys/socketvar2.h> 94 #include <sys/spinlock2.h> 95 96 #include <machine/limits.h> 97 98 #ifdef INET 99 extern int tcp_sosend_agglim; 100 extern int tcp_sosend_async; 101 extern int tcp_sosend_jcluster; 102 extern int udp_sosend_async; 103 extern int udp_sosend_prepend; 104 105 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 106 #endif /* INET */ 107 108 static void filt_sordetach(struct knote *kn); 109 static int filt_soread(struct knote *kn, long hint); 110 static void filt_sowdetach(struct knote *kn); 111 static int filt_sowrite(struct knote *kn, long hint); 112 static int filt_solisten(struct knote *kn, long hint); 113 114 static int soclose_sync(struct socket *so, int fflag); 115 static void soclose_fast(struct socket *so); 116 117 static struct filterops solisten_filtops = 118 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten }; 119 static struct filterops soread_filtops = 120 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 121 static struct filterops sowrite_filtops = 122 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite }; 123 static struct filterops soexcept_filtops = 124 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 125 126 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct"); 127 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 128 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 129 130 131 static int somaxconn = SOMAXCONN; 132 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 133 &somaxconn, 0, "Maximum pending socket connection queue size"); 134 135 static int use_soclose_fast = 1; 136 SYSCTL_INT(_kern_ipc, OID_AUTO, soclose_fast, CTLFLAG_RW, 137 &use_soclose_fast, 0, "Fast socket close"); 138 139 int use_soaccept_pred_fast = 1; 140 SYSCTL_INT(_kern_ipc, OID_AUTO, soaccept_pred_fast, CTLFLAG_RW, 141 &use_soaccept_pred_fast, 0, "Fast socket accept predication"); 142 143 int use_sendfile_async = 1; 144 SYSCTL_INT(_kern_ipc, OID_AUTO, sendfile_async, CTLFLAG_RW, 145 &use_sendfile_async, 0, "sendfile uses asynchronized pru_send"); 146 147 int use_soconnect_async = 1; 148 SYSCTL_INT(_kern_ipc, OID_AUTO, soconnect_async, CTLFLAG_RW, 149 &use_soconnect_async, 0, "soconnect uses asynchronized pru_connect"); 150 151 static int use_socreate_fast = 1; 152 SYSCTL_INT(_kern_ipc, OID_AUTO, socreate_fast, CTLFLAG_RW, 153 &use_socreate_fast, 0, "Fast socket creation"); 154 155 static int soavailconn = 32; 156 SYSCTL_INT(_kern_ipc, OID_AUTO, soavailconn, CTLFLAG_RW, 157 &soavailconn, 0, "Maximum available socket connection queue size"); 158 159 /* 160 * Socket operation routines. 161 * These routines are called by the routines in 162 * sys_socket.c or from a system process, and 163 * implement the semantics of socket operations by 164 * switching out to the protocol specific routines. 165 */ 166 167 /* 168 * Get a socket structure, and initialize it. 169 * Note that it would probably be better to allocate socket 170 * and PCB at the same time, but I'm not convinced that all 171 * the protocols can be easily modified to do this. 172 */ 173 struct socket * 174 soalloc(int waitok, struct protosw *pr) 175 { 176 globaldata_t gd = mycpu; 177 struct socket *so; 178 unsigned waitmask; 179 180 waitmask = waitok ? M_WAITOK : M_NOWAIT; 181 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask); 182 if (so) { 183 /* XXX race condition for reentrant kernel */ 184 so->so_proto = pr; 185 TAILQ_INIT(&so->so_aiojobq); 186 TAILQ_INIT(&so->so_rcv.ssb_mlist); 187 TAILQ_INIT(&so->so_snd.ssb_mlist); 188 lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok"); 189 lwkt_token_init(&so->so_snd.ssb_token, "sndtok"); 190 spin_init(&so->so_rcvd_spin, "soalloc"); 191 netmsg_init(&so->so_rcvd_msg.base, so, &netisr_adone_rport, 192 MSGF_DROPABLE | MSGF_PRIORITY, 193 so->so_proto->pr_usrreqs->pru_rcvd); 194 so->so_rcvd_msg.nm_pru_flags |= PRUR_ASYNC; 195 so->so_state = SS_NOFDREF; 196 so->so_refs = 1; 197 so->so_inum = gd->gd_anoninum++ * ncpus + gd->gd_cpuid + 2; 198 } 199 return so; 200 } 201 202 int 203 socreate(int dom, struct socket **aso, int type, 204 int proto, struct thread *td) 205 { 206 struct proc *p = td->td_proc; 207 struct protosw *prp; 208 struct socket *so; 209 struct pru_attach_info ai; 210 int error; 211 212 if (proto) 213 prp = pffindproto(dom, proto, type); 214 else 215 prp = pffindtype(dom, type); 216 217 if (prp == NULL || prp->pr_usrreqs->pru_attach == 0) 218 return (EPROTONOSUPPORT); 219 220 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only && 221 prp->pr_domain->dom_family != PF_LOCAL && 222 prp->pr_domain->dom_family != PF_INET && 223 prp->pr_domain->dom_family != PF_INET6 && 224 prp->pr_domain->dom_family != PF_ROUTE) { 225 return (EPROTONOSUPPORT); 226 } 227 228 if (prp->pr_type != type) 229 return (EPROTOTYPE); 230 so = soalloc(p != NULL, prp); 231 if (so == NULL) 232 return (ENOBUFS); 233 234 /* 235 * Callers of socreate() presumably will connect up a descriptor 236 * and call soclose() if they cannot. This represents our so_refs 237 * (which should be 1) from soalloc(). 238 */ 239 soclrstate(so, SS_NOFDREF); 240 241 /* 242 * Set a default port for protocol processing. No action will occur 243 * on the socket on this port until an inpcb is attached to it and 244 * is able to match incoming packets, or until the socket becomes 245 * available to userland. 246 * 247 * We normally default the socket to the protocol thread on cpu 0, 248 * if protocol does not provide its own method to initialize the 249 * default port. 250 * 251 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol 252 * thread and all pr_*()/pru_*() calls are executed synchronously. 253 */ 254 if (prp->pr_flags & PR_SYNC_PORT) 255 so->so_port = &netisr_sync_port; 256 else if (prp->pr_initport != NULL) 257 so->so_port = prp->pr_initport(); 258 else 259 so->so_port = netisr_cpuport(0); 260 261 TAILQ_INIT(&so->so_incomp); 262 TAILQ_INIT(&so->so_comp); 263 so->so_type = type; 264 so->so_cred = crhold(p->p_ucred); 265 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE]; 266 ai.p_ucred = p->p_ucred; 267 ai.fd_rdir = p->p_fd->fd_rdir; 268 269 /* 270 * Auto-sizing of socket buffers is managed by the protocols and 271 * the appropriate flags must be set in the pru_attach function. 272 */ 273 if (use_socreate_fast && prp->pr_usrreqs->pru_preattach) 274 error = so_pru_attach_fast(so, proto, &ai); 275 else 276 error = so_pru_attach(so, proto, &ai); 277 if (error) { 278 sosetstate(so, SS_NOFDREF); 279 sofree(so); /* from soalloc */ 280 return error; 281 } 282 283 /* 284 * NOTE: Returns referenced socket. 285 */ 286 *aso = so; 287 return (0); 288 } 289 290 int 291 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 292 { 293 int error; 294 295 error = so_pru_bind(so, nam, td); 296 return (error); 297 } 298 299 static void 300 sodealloc(struct socket *so) 301 { 302 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 0); 303 304 #ifdef INVARIANTS 305 if (so->so_options & SO_ACCEPTCONN) { 306 KASSERT(TAILQ_EMPTY(&so->so_comp), ("so_comp is not empty")); 307 KASSERT(TAILQ_EMPTY(&so->so_incomp), 308 ("so_incomp is not empty")); 309 } 310 #endif 311 312 if (so->so_rcv.ssb_hiwat) 313 (void)chgsbsize(so->so_cred->cr_uidinfo, 314 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY); 315 if (so->so_snd.ssb_hiwat) 316 (void)chgsbsize(so->so_cred->cr_uidinfo, 317 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY); 318 #ifdef INET 319 /* remove accept filter if present */ 320 if (so->so_accf != NULL) 321 do_setopt_accept_filter(so, NULL); 322 #endif /* INET */ 323 crfree(so->so_cred); 324 if (so->so_faddr != NULL) 325 kfree(so->so_faddr, M_SONAME); 326 kfree(so, M_SOCKET); 327 } 328 329 int 330 solisten(struct socket *so, int backlog, struct thread *td) 331 { 332 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) 333 return (EINVAL); 334 335 lwkt_gettoken(&so->so_rcv.ssb_token); 336 if (TAILQ_EMPTY(&so->so_comp)) 337 so->so_options |= SO_ACCEPTCONN; 338 lwkt_reltoken(&so->so_rcv.ssb_token); 339 if (backlog < 0 || backlog > somaxconn) 340 backlog = somaxconn; 341 so->so_qlimit = backlog; 342 return so_pru_listen(so, td); 343 } 344 345 static void 346 soqflush(struct socket *so) 347 { 348 lwkt_getpooltoken(so); 349 if (so->so_options & SO_ACCEPTCONN) { 350 struct socket *sp; 351 352 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 353 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == 354 SS_INCOMP); 355 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 356 so->so_incqlen--; 357 soclrstate(sp, SS_INCOMP); 358 soabort_async(sp, TRUE); 359 } 360 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 361 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == 362 SS_COMP); 363 TAILQ_REMOVE(&so->so_comp, sp, so_list); 364 so->so_qlen--; 365 soclrstate(sp, SS_COMP); 366 soabort_async(sp, TRUE); 367 } 368 } 369 lwkt_relpooltoken(so); 370 } 371 372 /* 373 * Destroy a disconnected socket. This routine is a NOP if entities 374 * still have a reference on the socket: 375 * 376 * so_pcb - The protocol stack still has a reference 377 * SS_NOFDREF - There is no longer a file pointer reference 378 */ 379 void 380 sofree(struct socket *so) 381 { 382 struct socket *head; 383 384 /* 385 * This is a bit hackish at the moment. We need to interlock 386 * any accept queue we are on before we potentially lose the 387 * last reference to avoid races against a re-reference from 388 * someone operating on the queue. 389 */ 390 while ((head = so->so_head) != NULL) { 391 lwkt_getpooltoken(head); 392 if (so->so_head == head) 393 break; 394 lwkt_relpooltoken(head); 395 } 396 397 /* 398 * Arbitrage the last free. 399 */ 400 KKASSERT(so->so_refs > 0); 401 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) { 402 if (head) 403 lwkt_relpooltoken(head); 404 return; 405 } 406 407 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF)); 408 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0); 409 410 if (head != NULL) { 411 /* 412 * We're done, remove ourselves from the accept queue we are 413 * on, if we are on one. 414 */ 415 if (so->so_state & SS_INCOMP) { 416 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 417 SS_INCOMP); 418 TAILQ_REMOVE(&head->so_incomp, so, so_list); 419 head->so_incqlen--; 420 } else if (so->so_state & SS_COMP) { 421 /* 422 * We must not decommission a socket that's 423 * on the accept(2) queue. If we do, then 424 * accept(2) may hang after select(2) indicated 425 * that the listening socket was ready. 426 */ 427 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 428 SS_COMP); 429 lwkt_relpooltoken(head); 430 return; 431 } else { 432 panic("sofree: not queued"); 433 } 434 soclrstate(so, SS_INCOMP); 435 so->so_head = NULL; 436 lwkt_relpooltoken(head); 437 } else { 438 /* Flush accept queues, if we are accepting. */ 439 soqflush(so); 440 } 441 ssb_release(&so->so_snd, so); 442 sorflush(so); 443 sodealloc(so); 444 } 445 446 /* 447 * Close a socket on last file table reference removal. 448 * Initiate disconnect if connected. 449 * Free socket when disconnect complete. 450 */ 451 int 452 soclose(struct socket *so, int fflag) 453 { 454 int error; 455 456 funsetown(&so->so_sigio); 457 sosetstate(so, SS_ISCLOSING); 458 if (!use_soclose_fast || 459 (so->so_proto->pr_flags & PR_SYNC_PORT) || 460 ((so->so_state & SS_ISCONNECTED) && 461 (so->so_options & SO_LINGER) && 462 so->so_linger != 0)) { 463 error = soclose_sync(so, fflag); 464 } else { 465 soclose_fast(so); 466 error = 0; 467 } 468 return error; 469 } 470 471 void 472 sodiscard(struct socket *so) 473 { 474 if (so->so_state & SS_NOFDREF) 475 panic("soclose: NOFDREF"); 476 sosetstate(so, SS_NOFDREF); /* take ref */ 477 } 478 479 /* 480 * Append the completed queue of head to head_inh (inherting listen socket). 481 */ 482 void 483 soinherit(struct socket *head, struct socket *head_inh) 484 { 485 boolean_t do_wakeup = FALSE; 486 487 KASSERT(head->so_options & SO_ACCEPTCONN, 488 ("head does not accept connection")); 489 KASSERT(head_inh->so_options & SO_ACCEPTCONN, 490 ("head_inh does not accept connection")); 491 492 lwkt_getpooltoken(head); 493 lwkt_getpooltoken(head_inh); 494 495 if (head->so_qlen > 0) 496 do_wakeup = TRUE; 497 498 while (!TAILQ_EMPTY(&head->so_comp)) { 499 struct ucred *old_cr; 500 struct socket *sp; 501 502 sp = TAILQ_FIRST(&head->so_comp); 503 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == SS_COMP); 504 505 /* 506 * Remove this socket from the current listen socket 507 * completed queue. 508 */ 509 TAILQ_REMOVE(&head->so_comp, sp, so_list); 510 head->so_qlen--; 511 512 /* Save the old ucred for later free. */ 513 old_cr = sp->so_cred; 514 515 /* 516 * Install this socket to the inheriting listen socket 517 * completed queue. 518 */ 519 sp->so_cred = crhold(head_inh->so_cred); /* non-blocking */ 520 sp->so_head = head_inh; 521 522 TAILQ_INSERT_TAIL(&head_inh->so_comp, sp, so_list); 523 head_inh->so_qlen++; 524 525 /* 526 * NOTE: 527 * crfree() may block and release the tokens temporarily. 528 * However, we are fine here, since the transition is done. 529 */ 530 crfree(old_cr); 531 } 532 533 lwkt_relpooltoken(head_inh); 534 lwkt_relpooltoken(head); 535 536 if (do_wakeup) { 537 /* 538 * "New" connections have arrived 539 */ 540 sorwakeup(head_inh); 541 wakeup(&head_inh->so_timeo); 542 } 543 } 544 545 static int 546 soclose_sync(struct socket *so, int fflag) 547 { 548 int error = 0; 549 550 if ((so->so_proto->pr_flags & PR_SYNC_PORT) == 0) 551 so_pru_sync(so); /* unpend async prus */ 552 553 if (so->so_pcb == NULL) 554 goto discard; 555 556 if (so->so_state & SS_ISCONNECTED) { 557 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 558 error = sodisconnect(so); 559 if (error) 560 goto drop; 561 } 562 if (so->so_options & SO_LINGER) { 563 if ((so->so_state & SS_ISDISCONNECTING) && 564 (fflag & FNONBLOCK)) 565 goto drop; 566 while (so->so_state & SS_ISCONNECTED) { 567 error = tsleep(&so->so_timeo, PCATCH, 568 "soclos", so->so_linger * hz); 569 if (error) 570 break; 571 } 572 } 573 } 574 drop: 575 if (so->so_pcb) { 576 int error2; 577 578 error2 = so_pru_detach(so); 579 if (error2 == EJUSTRETURN) { 580 /* 581 * Protocol will call sodiscard() 582 * and sofree() for us. 583 */ 584 return error; 585 } 586 if (error == 0) 587 error = error2; 588 } 589 discard: 590 sodiscard(so); 591 sofree(so); /* dispose of ref */ 592 593 return (error); 594 } 595 596 static void 597 soclose_fast_handler(netmsg_t msg) 598 { 599 struct socket *so = msg->base.nm_so; 600 601 if (so->so_pcb == NULL) 602 goto discard; 603 604 if ((so->so_state & SS_ISCONNECTED) && 605 (so->so_state & SS_ISDISCONNECTING) == 0) 606 so_pru_disconnect_direct(so); 607 608 if (so->so_pcb) { 609 int error; 610 611 error = so_pru_detach_direct(so); 612 if (error == EJUSTRETURN) { 613 /* 614 * Protocol will call sodiscard() 615 * and sofree() for us. 616 */ 617 return; 618 } 619 } 620 discard: 621 sodiscard(so); 622 sofree(so); 623 } 624 625 static void 626 soclose_fast(struct socket *so) 627 { 628 struct netmsg_base *base = &so->so_clomsg; 629 630 netmsg_init(base, so, &netisr_apanic_rport, 0, 631 soclose_fast_handler); 632 if (so->so_port == netisr_curport()) 633 lwkt_sendmsg_oncpu(so->so_port, &base->lmsg); 634 else 635 lwkt_sendmsg(so->so_port, &base->lmsg); 636 } 637 638 /* 639 * Abort and destroy a socket. Only one abort can be in progress 640 * at any given moment. 641 */ 642 void 643 soabort_async(struct socket *so, boolean_t clr_head) 644 { 645 /* 646 * Keep a reference before clearing the so_head 647 * to avoid racing socket close in netisr. 648 */ 649 soreference(so); 650 if (clr_head) 651 so->so_head = NULL; 652 so_pru_abort_async(so); 653 } 654 655 void 656 soabort_direct(struct socket *so) 657 { 658 soreference(so); 659 so_pru_abort_direct(so); 660 } 661 662 /* 663 * so is passed in ref'd, which becomes owned by 664 * the cleared SS_NOFDREF flag. 665 */ 666 void 667 soaccept_generic(struct socket *so) 668 { 669 if ((so->so_state & SS_NOFDREF) == 0) 670 panic("soaccept: !NOFDREF"); 671 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */ 672 } 673 674 int 675 soaccept(struct socket *so, struct sockaddr **nam) 676 { 677 int error; 678 679 soaccept_generic(so); 680 error = so_pru_accept(so, nam); 681 return (error); 682 } 683 684 int 685 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td, 686 boolean_t sync) 687 { 688 int error; 689 690 if (so->so_options & SO_ACCEPTCONN) 691 return (EOPNOTSUPP); 692 /* 693 * If protocol is connection-based, can only connect once. 694 * Otherwise, if connected, try to disconnect first. 695 * This allows user to disconnect by connecting to, e.g., 696 * a null address. 697 */ 698 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 699 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 700 (error = sodisconnect(so)))) { 701 error = EISCONN; 702 } else { 703 /* 704 * Prevent accumulated error from previous connection 705 * from biting us. 706 */ 707 so->so_error = 0; 708 if (!sync && so->so_proto->pr_usrreqs->pru_preconnect) 709 error = so_pru_connect_async(so, nam, td); 710 else 711 error = so_pru_connect(so, nam, td); 712 } 713 return (error); 714 } 715 716 int 717 soconnect2(struct socket *so1, struct socket *so2) 718 { 719 int error; 720 721 error = so_pru_connect2(so1, so2); 722 return (error); 723 } 724 725 int 726 sodisconnect(struct socket *so) 727 { 728 int error; 729 730 if ((so->so_state & SS_ISCONNECTED) == 0) { 731 error = ENOTCONN; 732 goto bad; 733 } 734 if (so->so_state & SS_ISDISCONNECTING) { 735 error = EALREADY; 736 goto bad; 737 } 738 error = so_pru_disconnect(so); 739 bad: 740 return (error); 741 } 742 743 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 744 /* 745 * Send on a socket. 746 * If send must go all at once and message is larger than 747 * send buffering, then hard error. 748 * Lock against other senders. 749 * If must go all at once and not enough room now, then 750 * inform user that this would block and do nothing. 751 * Otherwise, if nonblocking, send as much as possible. 752 * The data to be sent is described by "uio" if nonzero, 753 * otherwise by the mbuf chain "top" (which must be null 754 * if uio is not). Data provided in mbuf chain must be small 755 * enough to send all at once. 756 * 757 * Returns nonzero on error, timeout or signal; callers 758 * must check for short counts if EINTR/ERESTART are returned. 759 * Data and control buffers are freed on return. 760 */ 761 int 762 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 763 struct mbuf *top, struct mbuf *control, int flags, 764 struct thread *td) 765 { 766 struct mbuf **mp; 767 struct mbuf *m; 768 size_t resid; 769 int space, len; 770 int clen = 0, error, dontroute, mlen; 771 int atomic = sosendallatonce(so) || top; 772 int pru_flags; 773 774 if (uio) { 775 resid = uio->uio_resid; 776 } else { 777 resid = (size_t)top->m_pkthdr.len; 778 #ifdef INVARIANTS 779 len = 0; 780 for (m = top; m; m = m->m_next) 781 len += m->m_len; 782 KKASSERT(top->m_pkthdr.len == len); 783 #endif 784 } 785 786 /* 787 * WARNING! resid is unsigned, space and len are signed. space 788 * can wind up negative if the sockbuf is overcommitted. 789 * 790 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 791 * type sockets since that's an error. 792 */ 793 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 794 error = EINVAL; 795 goto out; 796 } 797 798 dontroute = 799 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 800 (so->so_proto->pr_flags & PR_ATOMIC); 801 if (td->td_lwp != NULL) 802 td->td_lwp->lwp_ru.ru_msgsnd++; 803 if (control) 804 clen = control->m_len; 805 #define gotoerr(errcode) { error = errcode; goto release; } 806 807 restart: 808 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 809 if (error) 810 goto out; 811 812 do { 813 if (so->so_state & SS_CANTSENDMORE) 814 gotoerr(EPIPE); 815 if (so->so_error) { 816 error = so->so_error; 817 so->so_error = 0; 818 goto release; 819 } 820 if ((so->so_state & SS_ISCONNECTED) == 0) { 821 /* 822 * `sendto' and `sendmsg' is allowed on a connection- 823 * based socket if it supports implied connect. 824 * Return ENOTCONN if not connected and no address is 825 * supplied. 826 */ 827 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 828 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 829 if ((so->so_state & SS_ISCONFIRMING) == 0 && 830 !(resid == 0 && clen != 0)) 831 gotoerr(ENOTCONN); 832 } else if (addr == NULL) 833 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 834 ENOTCONN : EDESTADDRREQ); 835 } 836 if ((atomic && resid > so->so_snd.ssb_hiwat) || 837 clen > so->so_snd.ssb_hiwat) { 838 gotoerr(EMSGSIZE); 839 } 840 space = ssb_space(&so->so_snd); 841 if (flags & MSG_OOB) 842 space += 1024; 843 if ((space < 0 || (size_t)space < resid + clen) && uio && 844 (atomic || space < so->so_snd.ssb_lowat || space < clen)) { 845 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 846 gotoerr(EWOULDBLOCK); 847 ssb_unlock(&so->so_snd); 848 error = ssb_wait(&so->so_snd); 849 if (error) 850 goto out; 851 goto restart; 852 } 853 mp = ⊤ 854 space -= clen; 855 do { 856 if (uio == NULL) { 857 /* 858 * Data is prepackaged in "top". 859 */ 860 resid = 0; 861 if (flags & MSG_EOR) 862 top->m_flags |= M_EOR; 863 } else do { 864 if (resid > INT_MAX) 865 resid = INT_MAX; 866 m = m_getl((int)resid, M_WAITOK, MT_DATA, 867 top == NULL ? M_PKTHDR : 0, &mlen); 868 if (top == NULL) { 869 m->m_pkthdr.len = 0; 870 m->m_pkthdr.rcvif = NULL; 871 } 872 len = imin((int)szmin(mlen, resid), space); 873 if (resid < MINCLSIZE) { 874 /* 875 * For datagram protocols, leave room 876 * for protocol headers in first mbuf. 877 */ 878 if (atomic && top == NULL && len < mlen) 879 MH_ALIGN(m, len); 880 } 881 space -= len; 882 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 883 resid = uio->uio_resid; 884 m->m_len = len; 885 *mp = m; 886 top->m_pkthdr.len += len; 887 if (error) 888 goto release; 889 mp = &m->m_next; 890 if (resid == 0) { 891 if (flags & MSG_EOR) 892 top->m_flags |= M_EOR; 893 break; 894 } 895 } while (space > 0 && atomic); 896 if (dontroute) 897 so->so_options |= SO_DONTROUTE; 898 if (flags & MSG_OOB) { 899 pru_flags = PRUS_OOB; 900 } else if ((flags & MSG_EOF) && 901 (so->so_proto->pr_flags & PR_IMPLOPCL) && 902 (resid == 0)) { 903 /* 904 * If the user set MSG_EOF, the protocol 905 * understands this flag and nothing left to 906 * send then use PRU_SEND_EOF instead of PRU_SEND. 907 */ 908 pru_flags = PRUS_EOF; 909 } else if (resid > 0 && space > 0) { 910 /* If there is more to send, set PRUS_MORETOCOME */ 911 pru_flags = PRUS_MORETOCOME; 912 } else { 913 pru_flags = 0; 914 } 915 /* 916 * XXX all the SS_CANTSENDMORE checks previously 917 * done could be out of date. We could have recieved 918 * a reset packet in an interrupt or maybe we slept 919 * while doing page faults in uiomove() etc. We could 920 * probably recheck again inside the splnet() protection 921 * here, but there are probably other places that this 922 * also happens. We must rethink this. 923 */ 924 error = so_pru_send(so, pru_flags, top, addr, control, td); 925 if (dontroute) 926 so->so_options &= ~SO_DONTROUTE; 927 clen = 0; 928 control = NULL; 929 top = NULL; 930 mp = ⊤ 931 if (error) 932 goto release; 933 } while (resid && space > 0); 934 } while (resid); 935 936 release: 937 ssb_unlock(&so->so_snd); 938 out: 939 if (top) 940 m_freem(top); 941 if (control) 942 m_freem(control); 943 return (error); 944 } 945 946 #ifdef INET 947 /* 948 * A specialization of sosend() for UDP based on protocol-specific knowledge: 949 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that 950 * sosendallatonce() returns true, 951 * the "atomic" variable is true, 952 * and sosendudp() blocks until space is available for the entire send. 953 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or 954 * PR_IMPLOPCL flags set. 955 * UDP has no out-of-band data. 956 * UDP has no control data. 957 * UDP does not support MSG_EOR. 958 */ 959 int 960 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, 961 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 962 { 963 size_t resid; 964 int error, pru_flags = 0; 965 int space; 966 967 if (td->td_lwp != NULL) 968 td->td_lwp->lwp_ru.ru_msgsnd++; 969 if (control) 970 m_freem(control); 971 972 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); 973 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len; 974 975 restart: 976 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 977 if (error) 978 goto out; 979 980 if (so->so_state & SS_CANTSENDMORE) 981 gotoerr(EPIPE); 982 if (so->so_error) { 983 error = so->so_error; 984 so->so_error = 0; 985 goto release; 986 } 987 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) 988 gotoerr(EDESTADDRREQ); 989 if (resid > so->so_snd.ssb_hiwat) 990 gotoerr(EMSGSIZE); 991 space = ssb_space(&so->so_snd); 992 if (uio && (space < 0 || (size_t)space < resid)) { 993 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 994 gotoerr(EWOULDBLOCK); 995 ssb_unlock(&so->so_snd); 996 error = ssb_wait(&so->so_snd); 997 if (error) 998 goto out; 999 goto restart; 1000 } 1001 1002 if (uio) { 1003 int hdrlen = max_hdr; 1004 1005 /* 1006 * We try to optimize out the additional mbuf 1007 * allocations in M_PREPEND() on output path, e.g. 1008 * - udp_output(), when it tries to prepend protocol 1009 * headers. 1010 * - Link layer output function, when it tries to 1011 * prepend link layer header. 1012 * 1013 * This probably will not benefit any data that will 1014 * be fragmented, so this optimization is only performed 1015 * when the size of data and max size of protocol+link 1016 * headers fit into one mbuf cluster. 1017 */ 1018 if (uio->uio_resid > MCLBYTES - hdrlen || 1019 !udp_sosend_prepend) { 1020 top = m_uiomove(uio); 1021 if (top == NULL) 1022 goto release; 1023 } else { 1024 int nsize; 1025 1026 top = m_getl(uio->uio_resid + hdrlen, M_WAITOK, 1027 MT_DATA, M_PKTHDR, &nsize); 1028 KASSERT(nsize >= uio->uio_resid + hdrlen, 1029 ("sosendudp invalid nsize %d, " 1030 "resid %zu, hdrlen %d", 1031 nsize, uio->uio_resid, hdrlen)); 1032 1033 top->m_len = uio->uio_resid; 1034 top->m_pkthdr.len = uio->uio_resid; 1035 top->m_data += hdrlen; 1036 1037 error = uiomove(mtod(top, caddr_t), top->m_len, uio); 1038 if (error) 1039 goto out; 1040 } 1041 } 1042 1043 if (flags & MSG_DONTROUTE) 1044 pru_flags |= PRUS_DONTROUTE; 1045 1046 if (udp_sosend_async && (flags & MSG_SYNC) == 0) { 1047 so_pru_send_async(so, pru_flags, top, addr, NULL, td); 1048 error = 0; 1049 } else { 1050 error = so_pru_send(so, pru_flags, top, addr, NULL, td); 1051 } 1052 top = NULL; /* sent or freed in lower layer */ 1053 1054 release: 1055 ssb_unlock(&so->so_snd); 1056 out: 1057 if (top) 1058 m_freem(top); 1059 return (error); 1060 } 1061 1062 int 1063 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio, 1064 struct mbuf *top, struct mbuf *control, int flags, 1065 struct thread *td) 1066 { 1067 struct mbuf **mp; 1068 struct mbuf *m; 1069 size_t resid; 1070 int space, len; 1071 int error, mlen; 1072 int allatonce; 1073 int pru_flags; 1074 1075 if (uio) { 1076 KKASSERT(top == NULL); 1077 allatonce = 0; 1078 resid = uio->uio_resid; 1079 } else { 1080 allatonce = 1; 1081 resid = (size_t)top->m_pkthdr.len; 1082 #ifdef INVARIANTS 1083 len = 0; 1084 for (m = top; m; m = m->m_next) 1085 len += m->m_len; 1086 KKASSERT(top->m_pkthdr.len == len); 1087 #endif 1088 } 1089 1090 /* 1091 * WARNING! resid is unsigned, space and len are signed. space 1092 * can wind up negative if the sockbuf is overcommitted. 1093 * 1094 * Also check to make sure that MSG_EOR isn't used on TCP 1095 */ 1096 if (flags & MSG_EOR) { 1097 error = EINVAL; 1098 goto out; 1099 } 1100 1101 if (control) { 1102 /* TCP doesn't do control messages (rights, creds, etc) */ 1103 if (control->m_len) { 1104 error = EINVAL; 1105 goto out; 1106 } 1107 m_freem(control); /* empty control, just free it */ 1108 control = NULL; 1109 } 1110 1111 if (td->td_lwp != NULL) 1112 td->td_lwp->lwp_ru.ru_msgsnd++; 1113 1114 #define gotoerr(errcode) { error = errcode; goto release; } 1115 1116 restart: 1117 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 1118 if (error) 1119 goto out; 1120 1121 do { 1122 if (so->so_state & SS_CANTSENDMORE) 1123 gotoerr(EPIPE); 1124 if (so->so_error) { 1125 error = so->so_error; 1126 so->so_error = 0; 1127 goto release; 1128 } 1129 if ((so->so_state & SS_ISCONNECTED) == 0 && 1130 (so->so_state & SS_ISCONFIRMING) == 0) 1131 gotoerr(ENOTCONN); 1132 if (allatonce && resid > so->so_snd.ssb_hiwat) 1133 gotoerr(EMSGSIZE); 1134 1135 space = ssb_space_prealloc(&so->so_snd); 1136 if (flags & MSG_OOB) 1137 space += 1024; 1138 if ((space < 0 || (size_t)space < resid) && !allatonce && 1139 space < so->so_snd.ssb_lowat) { 1140 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 1141 gotoerr(EWOULDBLOCK); 1142 ssb_unlock(&so->so_snd); 1143 error = ssb_wait(&so->so_snd); 1144 if (error) 1145 goto out; 1146 goto restart; 1147 } 1148 mp = ⊤ 1149 do { 1150 int cnt = 0, async = 0; 1151 1152 if (uio == NULL) { 1153 /* 1154 * Data is prepackaged in "top". 1155 */ 1156 resid = 0; 1157 } else do { 1158 if (resid > INT_MAX) 1159 resid = INT_MAX; 1160 if (tcp_sosend_jcluster) { 1161 m = m_getlj((int)resid, M_WAITOK, MT_DATA, 1162 top == NULL ? M_PKTHDR : 0, &mlen); 1163 } else { 1164 m = m_getl((int)resid, M_WAITOK, MT_DATA, 1165 top == NULL ? M_PKTHDR : 0, &mlen); 1166 } 1167 if (top == NULL) { 1168 m->m_pkthdr.len = 0; 1169 m->m_pkthdr.rcvif = NULL; 1170 } 1171 len = imin((int)szmin(mlen, resid), space); 1172 space -= len; 1173 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 1174 resid = uio->uio_resid; 1175 m->m_len = len; 1176 *mp = m; 1177 top->m_pkthdr.len += len; 1178 if (error) 1179 goto release; 1180 mp = &m->m_next; 1181 if (resid == 0) 1182 break; 1183 ++cnt; 1184 } while (space > 0 && cnt < tcp_sosend_agglim); 1185 1186 if (tcp_sosend_async) 1187 async = 1; 1188 1189 if (flags & MSG_OOB) { 1190 pru_flags = PRUS_OOB; 1191 async = 0; 1192 } else if ((flags & MSG_EOF) && resid == 0) { 1193 pru_flags = PRUS_EOF; 1194 } else if (resid > 0 && space > 0) { 1195 /* If there is more to send, set PRUS_MORETOCOME */ 1196 pru_flags = PRUS_MORETOCOME; 1197 async = 1; 1198 } else { 1199 pru_flags = 0; 1200 } 1201 1202 if (flags & MSG_SYNC) 1203 async = 0; 1204 1205 /* 1206 * XXX all the SS_CANTSENDMORE checks previously 1207 * done could be out of date. We could have recieved 1208 * a reset packet in an interrupt or maybe we slept 1209 * while doing page faults in uiomove() etc. We could 1210 * probably recheck again inside the splnet() protection 1211 * here, but there are probably other places that this 1212 * also happens. We must rethink this. 1213 */ 1214 for (m = top; m; m = m->m_next) 1215 ssb_preallocstream(&so->so_snd, m); 1216 if (!async) { 1217 error = so_pru_send(so, pru_flags, top, 1218 NULL, NULL, td); 1219 } else { 1220 so_pru_send_async(so, pru_flags, top, 1221 NULL, NULL, td); 1222 error = 0; 1223 } 1224 1225 top = NULL; 1226 mp = ⊤ 1227 if (error) 1228 goto release; 1229 } while (resid && space > 0); 1230 } while (resid); 1231 1232 release: 1233 ssb_unlock(&so->so_snd); 1234 out: 1235 if (top) 1236 m_freem(top); 1237 if (control) 1238 m_freem(control); 1239 return (error); 1240 } 1241 #endif 1242 1243 /* 1244 * Implement receive operations on a socket. 1245 * 1246 * We depend on the way that records are added to the signalsockbuf 1247 * by sbappend*. In particular, each record (mbufs linked through m_next) 1248 * must begin with an address if the protocol so specifies, 1249 * followed by an optional mbuf or mbufs containing ancillary data, 1250 * and then zero or more mbufs of data. 1251 * 1252 * Although the signalsockbuf is locked, new data may still be appended. 1253 * A token inside the ssb_lock deals with MP issues and still allows 1254 * the network to access the socket if we block in a uio. 1255 * 1256 * The caller may receive the data as a single mbuf chain by supplying 1257 * an mbuf **mp0 for use in returning the chain. The uio is then used 1258 * only for the count in uio_resid. 1259 */ 1260 int 1261 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 1262 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1263 { 1264 struct mbuf *m, *n; 1265 struct mbuf *free_chain = NULL; 1266 int flags, len, error, offset; 1267 struct protosw *pr = so->so_proto; 1268 int moff, type = 0; 1269 size_t resid, orig_resid; 1270 boolean_t free_rights = FALSE; 1271 1272 if (uio) 1273 resid = uio->uio_resid; 1274 else 1275 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1276 orig_resid = resid; 1277 1278 if (psa) 1279 *psa = NULL; 1280 if (controlp) 1281 *controlp = NULL; 1282 if (flagsp) 1283 flags = *flagsp &~ MSG_EOR; 1284 else 1285 flags = 0; 1286 if (flags & MSG_OOB) { 1287 m = m_get(M_WAITOK, MT_DATA); 1288 if (m == NULL) 1289 return (ENOBUFS); 1290 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1291 if (error) 1292 goto bad; 1293 if (sio) { 1294 do { 1295 sbappend(sio, m); 1296 KKASSERT(resid >= (size_t)m->m_len); 1297 resid -= (size_t)m->m_len; 1298 } while (resid > 0 && m); 1299 } else { 1300 do { 1301 uio->uio_resid = resid; 1302 error = uiomove(mtod(m, caddr_t), 1303 (int)szmin(resid, m->m_len), 1304 uio); 1305 resid = uio->uio_resid; 1306 m = m_free(m); 1307 } while (uio->uio_resid && error == 0 && m); 1308 } 1309 bad: 1310 if (m) 1311 m_freem(m); 1312 return (error); 1313 } 1314 if ((so->so_state & SS_ISCONFIRMING) && resid) 1315 so_pru_rcvd(so, 0); 1316 1317 /* 1318 * The token interlocks against the protocol thread while 1319 * ssb_lock is a blocking lock against other userland entities. 1320 */ 1321 lwkt_gettoken(&so->so_rcv.ssb_token); 1322 restart: 1323 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1324 if (error) 1325 goto done; 1326 1327 m = so->so_rcv.ssb_mb; 1328 /* 1329 * If we have less data than requested, block awaiting more 1330 * (subject to any timeout) if: 1331 * 1. the current count is less than the low water mark, or 1332 * 2. MSG_WAITALL is set, and it is possible to do the entire 1333 * receive operation at once if we block (resid <= hiwat). 1334 * 3. MSG_DONTWAIT is not set 1335 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1336 * we have to do the receive in sections, and thus risk returning 1337 * a short count if a timeout or signal occurs after we start. 1338 */ 1339 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1340 (size_t)so->so_rcv.ssb_cc < resid) && 1341 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1342 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) && 1343 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { 1344 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1345 if (so->so_error) { 1346 if (m) 1347 goto dontblock; 1348 error = so->so_error; 1349 if ((flags & MSG_PEEK) == 0) 1350 so->so_error = 0; 1351 goto release; 1352 } 1353 if (so->so_state & SS_CANTRCVMORE) { 1354 if (m) 1355 goto dontblock; 1356 else 1357 goto release; 1358 } 1359 for (; m; m = m->m_next) { 1360 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1361 m = so->so_rcv.ssb_mb; 1362 goto dontblock; 1363 } 1364 } 1365 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1366 (pr->pr_flags & PR_CONNREQUIRED)) { 1367 error = ENOTCONN; 1368 goto release; 1369 } 1370 if (resid == 0) 1371 goto release; 1372 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1373 error = EWOULDBLOCK; 1374 goto release; 1375 } 1376 ssb_unlock(&so->so_rcv); 1377 error = ssb_wait(&so->so_rcv); 1378 if (error) 1379 goto done; 1380 goto restart; 1381 } 1382 dontblock: 1383 if (uio && uio->uio_td && uio->uio_td->td_proc) 1384 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1385 1386 /* 1387 * note: m should be == sb_mb here. Cache the next record while 1388 * cleaning up. Note that calling m_free*() will break out critical 1389 * section. 1390 */ 1391 KKASSERT(m == so->so_rcv.ssb_mb); 1392 1393 /* 1394 * Skip any address mbufs prepending the record. 1395 */ 1396 if (pr->pr_flags & PR_ADDR) { 1397 KASSERT(m->m_type == MT_SONAME, ("receive 1a")); 1398 orig_resid = 0; 1399 if (psa) 1400 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1401 if (flags & MSG_PEEK) 1402 m = m->m_next; 1403 else 1404 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1405 } 1406 1407 /* 1408 * Skip any control mbufs prepending the record. 1409 */ 1410 while (m && m->m_type == MT_CONTROL && error == 0) { 1411 if (flags & MSG_PEEK) { 1412 if (controlp) 1413 *controlp = m_copy(m, 0, m->m_len); 1414 m = m->m_next; /* XXX race */ 1415 } else { 1416 const struct cmsghdr *cm = mtod(m, struct cmsghdr *); 1417 1418 if (controlp) { 1419 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1420 if (pr->pr_domain->dom_externalize && 1421 cm->cmsg_level == SOL_SOCKET && 1422 cm->cmsg_type == SCM_RIGHTS) { 1423 error = pr->pr_domain->dom_externalize 1424 (m, flags); 1425 } 1426 *controlp = m; 1427 m = n; 1428 } else { 1429 if (cm->cmsg_level == SOL_SOCKET && 1430 cm->cmsg_type == SCM_RIGHTS) 1431 free_rights = TRUE; 1432 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1433 } 1434 } 1435 if (controlp && *controlp) { 1436 orig_resid = 0; 1437 controlp = &(*controlp)->m_next; 1438 } 1439 } 1440 1441 /* 1442 * flag OOB data. 1443 */ 1444 if (m) { 1445 type = m->m_type; 1446 if (type == MT_OOBDATA) 1447 flags |= MSG_OOB; 1448 } 1449 1450 /* 1451 * Copy to the UIO or mbuf return chain (*mp). 1452 */ 1453 moff = 0; 1454 offset = 0; 1455 while (m && resid > 0 && error == 0) { 1456 if (m->m_type == MT_OOBDATA) { 1457 if (type != MT_OOBDATA) 1458 break; 1459 } else if (type == MT_OOBDATA) 1460 break; 1461 else 1462 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1463 ("receive 3")); 1464 soclrstate(so, SS_RCVATMARK); 1465 len = (resid > INT_MAX) ? INT_MAX : resid; 1466 if (so->so_oobmark && len > so->so_oobmark - offset) 1467 len = so->so_oobmark - offset; 1468 if (len > m->m_len - moff) 1469 len = m->m_len - moff; 1470 1471 /* 1472 * Copy out to the UIO or pass the mbufs back to the SIO. 1473 * The SIO is dealt with when we eat the mbuf, but deal 1474 * with the resid here either way. 1475 */ 1476 if (uio) { 1477 uio->uio_resid = resid; 1478 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1479 resid = uio->uio_resid; 1480 if (error) 1481 goto release; 1482 } else { 1483 resid -= (size_t)len; 1484 } 1485 1486 /* 1487 * Eat the entire mbuf or just a piece of it 1488 */ 1489 if (len == m->m_len - moff) { 1490 if (m->m_flags & M_EOR) 1491 flags |= MSG_EOR; 1492 if (flags & MSG_PEEK) { 1493 m = m->m_next; 1494 moff = 0; 1495 } else { 1496 if (sio) { 1497 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1498 sbappend(sio, m); 1499 m = n; 1500 } else { 1501 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1502 } 1503 } 1504 } else { 1505 if (flags & MSG_PEEK) { 1506 moff += len; 1507 } else { 1508 if (sio) { 1509 n = m_copym(m, 0, len, M_WAITOK); 1510 if (n) 1511 sbappend(sio, n); 1512 } 1513 m->m_data += len; 1514 m->m_len -= len; 1515 so->so_rcv.ssb_cc -= len; 1516 } 1517 } 1518 if (so->so_oobmark) { 1519 if ((flags & MSG_PEEK) == 0) { 1520 so->so_oobmark -= len; 1521 if (so->so_oobmark == 0) { 1522 sosetstate(so, SS_RCVATMARK); 1523 break; 1524 } 1525 } else { 1526 offset += len; 1527 if (offset == so->so_oobmark) 1528 break; 1529 } 1530 } 1531 if (flags & MSG_EOR) 1532 break; 1533 /* 1534 * If the MSG_WAITALL flag is set (for non-atomic socket), 1535 * we must not quit until resid == 0 or an error 1536 * termination. If a signal/timeout occurs, return 1537 * with a short count but without error. 1538 * Keep signalsockbuf locked against other readers. 1539 */ 1540 while ((flags & MSG_WAITALL) && m == NULL && 1541 resid > 0 && !sosendallatonce(so) && 1542 so->so_rcv.ssb_mb == NULL) { 1543 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1544 break; 1545 /* 1546 * The window might have closed to zero, make 1547 * sure we send an ack now that we've drained 1548 * the buffer or we might end up blocking until 1549 * the idle takes over (5 seconds). 1550 */ 1551 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1552 so_pru_rcvd(so, flags); 1553 error = ssb_wait(&so->so_rcv); 1554 if (error) { 1555 ssb_unlock(&so->so_rcv); 1556 error = 0; 1557 goto done; 1558 } 1559 m = so->so_rcv.ssb_mb; 1560 } 1561 } 1562 1563 /* 1564 * If an atomic read was requested but unread data still remains 1565 * in the record, set MSG_TRUNC. 1566 */ 1567 if (m && pr->pr_flags & PR_ATOMIC) 1568 flags |= MSG_TRUNC; 1569 1570 /* 1571 * Cleanup. If an atomic read was requested drop any unread data. 1572 */ 1573 if ((flags & MSG_PEEK) == 0) { 1574 if (m && (pr->pr_flags & PR_ATOMIC)) 1575 sbdroprecord(&so->so_rcv.sb); 1576 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) 1577 so_pru_rcvd(so, flags); 1578 } 1579 1580 if (orig_resid == resid && orig_resid && 1581 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1582 ssb_unlock(&so->so_rcv); 1583 goto restart; 1584 } 1585 1586 if (flagsp) 1587 *flagsp |= flags; 1588 release: 1589 ssb_unlock(&so->so_rcv); 1590 done: 1591 lwkt_reltoken(&so->so_rcv.ssb_token); 1592 if (free_chain) { 1593 if (free_rights && (pr->pr_flags & PR_RIGHTS) && 1594 pr->pr_domain->dom_dispose) 1595 pr->pr_domain->dom_dispose(free_chain); 1596 m_freem(free_chain); 1597 } 1598 return (error); 1599 } 1600 1601 int 1602 sorecvtcp(struct socket *so, struct sockaddr **psa, struct uio *uio, 1603 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1604 { 1605 struct mbuf *m, *n; 1606 struct mbuf *free_chain = NULL; 1607 int flags, len, error, offset; 1608 struct protosw *pr = so->so_proto; 1609 int moff; 1610 int didoob; 1611 size_t resid, orig_resid, restmp; 1612 1613 if (uio) 1614 resid = uio->uio_resid; 1615 else 1616 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1617 orig_resid = resid; 1618 1619 if (psa) 1620 *psa = NULL; 1621 if (controlp) 1622 *controlp = NULL; 1623 if (flagsp) 1624 flags = *flagsp &~ MSG_EOR; 1625 else 1626 flags = 0; 1627 if (flags & MSG_OOB) { 1628 m = m_get(M_WAITOK, MT_DATA); 1629 if (m == NULL) 1630 return (ENOBUFS); 1631 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1632 if (error) 1633 goto bad; 1634 if (sio) { 1635 do { 1636 sbappend(sio, m); 1637 KKASSERT(resid >= (size_t)m->m_len); 1638 resid -= (size_t)m->m_len; 1639 } while (resid > 0 && m); 1640 } else { 1641 do { 1642 uio->uio_resid = resid; 1643 error = uiomove(mtod(m, caddr_t), 1644 (int)szmin(resid, m->m_len), 1645 uio); 1646 resid = uio->uio_resid; 1647 m = m_free(m); 1648 } while (uio->uio_resid && error == 0 && m); 1649 } 1650 bad: 1651 if (m) 1652 m_freem(m); 1653 return (error); 1654 } 1655 1656 /* 1657 * The token interlocks against the protocol thread while 1658 * ssb_lock is a blocking lock against other userland entities. 1659 * 1660 * Lock a limited number of mbufs (not all, so sbcompress() still 1661 * works well). The token is used as an interlock for sbwait() so 1662 * release it afterwords. 1663 */ 1664 restart: 1665 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1666 if (error) 1667 goto done; 1668 1669 lwkt_gettoken(&so->so_rcv.ssb_token); 1670 m = so->so_rcv.ssb_mb; 1671 1672 /* 1673 * If we have less data than requested, block awaiting more 1674 * (subject to any timeout) if: 1675 * 1. the current count is less than the low water mark, or 1676 * 2. MSG_WAITALL is set, and it is possible to do the entire 1677 * receive operation at once if we block (resid <= hiwat). 1678 * 3. MSG_DONTWAIT is not set 1679 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1680 * we have to do the receive in sections, and thus risk returning 1681 * a short count if a timeout or signal occurs after we start. 1682 */ 1683 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1684 (size_t)so->so_rcv.ssb_cc < resid) && 1685 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1686 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)))) { 1687 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1688 if (so->so_error) { 1689 if (m) 1690 goto dontblock; 1691 lwkt_reltoken(&so->so_rcv.ssb_token); 1692 error = so->so_error; 1693 if ((flags & MSG_PEEK) == 0) 1694 so->so_error = 0; 1695 goto release; 1696 } 1697 if (so->so_state & SS_CANTRCVMORE) { 1698 if (m) 1699 goto dontblock; 1700 lwkt_reltoken(&so->so_rcv.ssb_token); 1701 goto release; 1702 } 1703 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1704 (pr->pr_flags & PR_CONNREQUIRED)) { 1705 lwkt_reltoken(&so->so_rcv.ssb_token); 1706 error = ENOTCONN; 1707 goto release; 1708 } 1709 if (resid == 0) { 1710 lwkt_reltoken(&so->so_rcv.ssb_token); 1711 goto release; 1712 } 1713 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1714 lwkt_reltoken(&so->so_rcv.ssb_token); 1715 error = EWOULDBLOCK; 1716 goto release; 1717 } 1718 ssb_unlock(&so->so_rcv); 1719 error = ssb_wait(&so->so_rcv); 1720 lwkt_reltoken(&so->so_rcv.ssb_token); 1721 if (error) 1722 goto done; 1723 goto restart; 1724 } 1725 1726 /* 1727 * Token still held 1728 */ 1729 dontblock: 1730 n = m; 1731 restmp = 0; 1732 while (n && restmp < resid) { 1733 n->m_flags |= M_SOLOCKED; 1734 restmp += n->m_len; 1735 if (n->m_next == NULL) 1736 n = n->m_nextpkt; 1737 else 1738 n = n->m_next; 1739 } 1740 1741 /* 1742 * Release token for loop 1743 */ 1744 lwkt_reltoken(&so->so_rcv.ssb_token); 1745 if (uio && uio->uio_td && uio->uio_td->td_proc) 1746 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1747 1748 /* 1749 * note: m should be == sb_mb here. Cache the next record while 1750 * cleaning up. Note that calling m_free*() will break out critical 1751 * section. 1752 */ 1753 KKASSERT(m == so->so_rcv.ssb_mb); 1754 1755 /* 1756 * Copy to the UIO or mbuf return chain (*mp). 1757 * 1758 * NOTE: Token is not held for loop 1759 */ 1760 moff = 0; 1761 offset = 0; 1762 didoob = 0; 1763 1764 while (m && (m->m_flags & M_SOLOCKED) && resid > 0 && error == 0) { 1765 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1766 ("receive 3")); 1767 1768 soclrstate(so, SS_RCVATMARK); 1769 len = (resid > INT_MAX) ? INT_MAX : resid; 1770 if (so->so_oobmark && len > so->so_oobmark - offset) 1771 len = so->so_oobmark - offset; 1772 if (len > m->m_len - moff) 1773 len = m->m_len - moff; 1774 1775 /* 1776 * Copy out to the UIO or pass the mbufs back to the SIO. 1777 * The SIO is dealt with when we eat the mbuf, but deal 1778 * with the resid here either way. 1779 */ 1780 if (uio) { 1781 uio->uio_resid = resid; 1782 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1783 resid = uio->uio_resid; 1784 if (error) 1785 goto release; 1786 } else { 1787 resid -= (size_t)len; 1788 } 1789 1790 /* 1791 * Eat the entire mbuf or just a piece of it 1792 */ 1793 offset += len; 1794 if (len == m->m_len - moff) { 1795 m = m->m_next; 1796 moff = 0; 1797 } else { 1798 moff += len; 1799 } 1800 1801 /* 1802 * Check oobmark 1803 */ 1804 if (so->so_oobmark && offset == so->so_oobmark) { 1805 didoob = 1; 1806 break; 1807 } 1808 } 1809 1810 /* 1811 * Synchronize sockbuf with data we read. 1812 * 1813 * NOTE: (m) is junk on entry (it could be left over from the 1814 * previous loop). 1815 */ 1816 if ((flags & MSG_PEEK) == 0) { 1817 lwkt_gettoken(&so->so_rcv.ssb_token); 1818 m = so->so_rcv.ssb_mb; 1819 while (m && offset >= m->m_len) { 1820 if (so->so_oobmark) { 1821 so->so_oobmark -= m->m_len; 1822 if (so->so_oobmark == 0) { 1823 sosetstate(so, SS_RCVATMARK); 1824 didoob = 1; 1825 } 1826 } 1827 offset -= m->m_len; 1828 if (sio) { 1829 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1830 sbappend(sio, m); 1831 m = n; 1832 } else { 1833 m = sbunlinkmbuf(&so->so_rcv.sb, 1834 m, &free_chain); 1835 } 1836 } 1837 if (offset) { 1838 KKASSERT(m); 1839 if (sio) { 1840 n = m_copym(m, 0, offset, M_WAITOK); 1841 if (n) 1842 sbappend(sio, n); 1843 } 1844 m->m_data += offset; 1845 m->m_len -= offset; 1846 so->so_rcv.ssb_cc -= offset; 1847 if (so->so_oobmark) { 1848 so->so_oobmark -= offset; 1849 if (so->so_oobmark == 0) { 1850 sosetstate(so, SS_RCVATMARK); 1851 didoob = 1; 1852 } 1853 } 1854 offset = 0; 1855 } 1856 lwkt_reltoken(&so->so_rcv.ssb_token); 1857 } 1858 1859 /* 1860 * If the MSG_WAITALL flag is set (for non-atomic socket), 1861 * we must not quit until resid == 0 or an error termination. 1862 * 1863 * If a signal/timeout occurs, return with a short count but without 1864 * error. 1865 * 1866 * Keep signalsockbuf locked against other readers. 1867 * 1868 * XXX if MSG_PEEK we currently do quit. 1869 */ 1870 if ((flags & MSG_WAITALL) && !(flags & MSG_PEEK) && 1871 didoob == 0 && resid > 0 && 1872 !sosendallatonce(so)) { 1873 lwkt_gettoken(&so->so_rcv.ssb_token); 1874 error = 0; 1875 while ((m = so->so_rcv.ssb_mb) == NULL) { 1876 if (so->so_error || (so->so_state & SS_CANTRCVMORE)) { 1877 error = so->so_error; 1878 break; 1879 } 1880 /* 1881 * The window might have closed to zero, make 1882 * sure we send an ack now that we've drained 1883 * the buffer or we might end up blocking until 1884 * the idle takes over (5 seconds). 1885 */ 1886 if (so->so_pcb) 1887 so_pru_rcvd_async(so); 1888 if (so->so_rcv.ssb_mb == NULL) 1889 error = ssb_wait(&so->so_rcv); 1890 if (error) { 1891 lwkt_reltoken(&so->so_rcv.ssb_token); 1892 ssb_unlock(&so->so_rcv); 1893 error = 0; 1894 goto done; 1895 } 1896 } 1897 if (m && error == 0) 1898 goto dontblock; 1899 lwkt_reltoken(&so->so_rcv.ssb_token); 1900 } 1901 1902 /* 1903 * Token not held here. 1904 * 1905 * Cleanup. If an atomic read was requested drop any unread data XXX 1906 */ 1907 if ((flags & MSG_PEEK) == 0) { 1908 if (so->so_pcb) 1909 so_pru_rcvd_async(so); 1910 } 1911 1912 if (orig_resid == resid && orig_resid && 1913 (so->so_state & SS_CANTRCVMORE) == 0) { 1914 ssb_unlock(&so->so_rcv); 1915 goto restart; 1916 } 1917 1918 if (flagsp) 1919 *flagsp |= flags; 1920 release: 1921 ssb_unlock(&so->so_rcv); 1922 done: 1923 if (free_chain) 1924 m_freem(free_chain); 1925 return (error); 1926 } 1927 1928 /* 1929 * Shut a socket down. Note that we do not get a frontend lock as we 1930 * want to be able to shut the socket down even if another thread is 1931 * blocked in a read(), thus waking it up. 1932 */ 1933 int 1934 soshutdown(struct socket *so, int how) 1935 { 1936 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1937 return (EINVAL); 1938 1939 if (how != SHUT_WR) { 1940 /*ssb_lock(&so->so_rcv, M_WAITOK);*/ 1941 sorflush(so); 1942 /*ssb_unlock(&so->so_rcv);*/ 1943 } 1944 if (how != SHUT_RD) 1945 return (so_pru_shutdown(so)); 1946 return (0); 1947 } 1948 1949 void 1950 sorflush(struct socket *so) 1951 { 1952 struct signalsockbuf *ssb = &so->so_rcv; 1953 struct protosw *pr = so->so_proto; 1954 struct signalsockbuf asb; 1955 1956 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR); 1957 1958 lwkt_gettoken(&ssb->ssb_token); 1959 socantrcvmore(so); 1960 asb = *ssb; 1961 1962 /* 1963 * Can't just blow up the ssb structure here 1964 */ 1965 bzero(&ssb->sb, sizeof(ssb->sb)); 1966 ssb->ssb_timeo = 0; 1967 ssb->ssb_lowat = 0; 1968 ssb->ssb_hiwat = 0; 1969 ssb->ssb_mbmax = 0; 1970 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK); 1971 1972 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) 1973 (*pr->pr_domain->dom_dispose)(asb.ssb_mb); 1974 ssb_release(&asb, so); 1975 1976 lwkt_reltoken(&ssb->ssb_token); 1977 } 1978 1979 #ifdef INET 1980 static int 1981 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) 1982 { 1983 struct accept_filter_arg *afap = NULL; 1984 struct accept_filter *afp; 1985 struct so_accf *af = so->so_accf; 1986 int error = 0; 1987 1988 /* do not set/remove accept filters on non listen sockets */ 1989 if ((so->so_options & SO_ACCEPTCONN) == 0) { 1990 error = EINVAL; 1991 goto out; 1992 } 1993 1994 /* removing the filter */ 1995 if (sopt == NULL) { 1996 if (af != NULL) { 1997 if (af->so_accept_filter != NULL && 1998 af->so_accept_filter->accf_destroy != NULL) { 1999 af->so_accept_filter->accf_destroy(so); 2000 } 2001 if (af->so_accept_filter_str != NULL) { 2002 kfree(af->so_accept_filter_str, M_ACCF); 2003 } 2004 kfree(af, M_ACCF); 2005 so->so_accf = NULL; 2006 } 2007 so->so_options &= ~SO_ACCEPTFILTER; 2008 return (0); 2009 } 2010 /* adding a filter */ 2011 /* must remove previous filter first */ 2012 if (af != NULL) { 2013 error = EINVAL; 2014 goto out; 2015 } 2016 /* don't put large objects on the kernel stack */ 2017 afap = kmalloc(sizeof(*afap), M_TEMP, M_WAITOK); 2018 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 2019 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 2020 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 2021 if (error) 2022 goto out; 2023 afp = accept_filt_get(afap->af_name); 2024 if (afp == NULL) { 2025 error = ENOENT; 2026 goto out; 2027 } 2028 af = kmalloc(sizeof(*af), M_ACCF, M_WAITOK | M_ZERO); 2029 if (afp->accf_create != NULL) { 2030 if (afap->af_name[0] != '\0') { 2031 int len = strlen(afap->af_name) + 1; 2032 2033 af->so_accept_filter_str = kmalloc(len, M_ACCF, 2034 M_WAITOK); 2035 strcpy(af->so_accept_filter_str, afap->af_name); 2036 } 2037 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 2038 if (af->so_accept_filter_arg == NULL) { 2039 kfree(af->so_accept_filter_str, M_ACCF); 2040 kfree(af, M_ACCF); 2041 so->so_accf = NULL; 2042 error = EINVAL; 2043 goto out; 2044 } 2045 } 2046 af->so_accept_filter = afp; 2047 so->so_accf = af; 2048 so->so_options |= SO_ACCEPTFILTER; 2049 out: 2050 if (afap != NULL) 2051 kfree(afap, M_TEMP); 2052 return (error); 2053 } 2054 #endif /* INET */ 2055 2056 /* 2057 * Perhaps this routine, and sooptcopyout(), below, ought to come in 2058 * an additional variant to handle the case where the option value needs 2059 * to be some kind of integer, but not a specific size. 2060 * In addition to their use here, these functions are also called by the 2061 * protocol-level pr_ctloutput() routines. 2062 */ 2063 int 2064 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2065 { 2066 return soopt_to_kbuf(sopt, buf, len, minlen); 2067 } 2068 2069 int 2070 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2071 { 2072 size_t valsize; 2073 2074 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2075 KKASSERT(kva_p(buf)); 2076 2077 /* 2078 * If the user gives us more than we wanted, we ignore it, 2079 * but if we don't get the minimum length the caller 2080 * wants, we return EINVAL. On success, sopt->sopt_valsize 2081 * is set to however much we actually retrieved. 2082 */ 2083 if ((valsize = sopt->sopt_valsize) < minlen) 2084 return EINVAL; 2085 if (valsize > len) 2086 sopt->sopt_valsize = valsize = len; 2087 2088 bcopy(sopt->sopt_val, buf, valsize); 2089 return 0; 2090 } 2091 2092 2093 int 2094 sosetopt(struct socket *so, struct sockopt *sopt) 2095 { 2096 int error, optval; 2097 struct linger l; 2098 struct timeval tv; 2099 u_long val; 2100 struct signalsockbuf *sotmp; 2101 2102 error = 0; 2103 sopt->sopt_dir = SOPT_SET; 2104 if (sopt->sopt_level != SOL_SOCKET) { 2105 if (so->so_proto && so->so_proto->pr_ctloutput) { 2106 return (so_pr_ctloutput(so, sopt)); 2107 } 2108 error = ENOPROTOOPT; 2109 } else { 2110 switch (sopt->sopt_name) { 2111 #ifdef INET 2112 case SO_ACCEPTFILTER: 2113 error = do_setopt_accept_filter(so, sopt); 2114 if (error) 2115 goto bad; 2116 break; 2117 #endif /* INET */ 2118 case SO_LINGER: 2119 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 2120 if (error) 2121 goto bad; 2122 2123 so->so_linger = l.l_linger; 2124 if (l.l_onoff) 2125 so->so_options |= SO_LINGER; 2126 else 2127 so->so_options &= ~SO_LINGER; 2128 break; 2129 2130 case SO_DEBUG: 2131 case SO_KEEPALIVE: 2132 case SO_DONTROUTE: 2133 case SO_USELOOPBACK: 2134 case SO_BROADCAST: 2135 case SO_REUSEADDR: 2136 case SO_REUSEPORT: 2137 case SO_OOBINLINE: 2138 case SO_TIMESTAMP: 2139 case SO_NOSIGPIPE: 2140 error = sooptcopyin(sopt, &optval, sizeof optval, 2141 sizeof optval); 2142 if (error) 2143 goto bad; 2144 if (optval) 2145 so->so_options |= sopt->sopt_name; 2146 else 2147 so->so_options &= ~sopt->sopt_name; 2148 break; 2149 2150 case SO_SNDBUF: 2151 case SO_RCVBUF: 2152 case SO_SNDLOWAT: 2153 case SO_RCVLOWAT: 2154 error = sooptcopyin(sopt, &optval, sizeof optval, 2155 sizeof optval); 2156 if (error) 2157 goto bad; 2158 2159 /* 2160 * Values < 1 make no sense for any of these 2161 * options, so disallow them. 2162 */ 2163 if (optval < 1) { 2164 error = EINVAL; 2165 goto bad; 2166 } 2167 2168 switch (sopt->sopt_name) { 2169 case SO_SNDBUF: 2170 case SO_RCVBUF: 2171 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ? 2172 &so->so_snd : &so->so_rcv, (u_long)optval, 2173 so, 2174 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) { 2175 error = ENOBUFS; 2176 goto bad; 2177 } 2178 sotmp = (sopt->sopt_name == SO_SNDBUF) ? 2179 &so->so_snd : &so->so_rcv; 2180 atomic_clear_int(&sotmp->ssb_flags, 2181 SSB_AUTOSIZE); 2182 break; 2183 2184 /* 2185 * Make sure the low-water is never greater than 2186 * the high-water. 2187 */ 2188 case SO_SNDLOWAT: 2189 so->so_snd.ssb_lowat = 2190 (optval > so->so_snd.ssb_hiwat) ? 2191 so->so_snd.ssb_hiwat : optval; 2192 atomic_clear_int(&so->so_snd.ssb_flags, 2193 SSB_AUTOLOWAT); 2194 break; 2195 case SO_RCVLOWAT: 2196 so->so_rcv.ssb_lowat = 2197 (optval > so->so_rcv.ssb_hiwat) ? 2198 so->so_rcv.ssb_hiwat : optval; 2199 atomic_clear_int(&so->so_rcv.ssb_flags, 2200 SSB_AUTOLOWAT); 2201 break; 2202 } 2203 break; 2204 2205 case SO_SNDTIMEO: 2206 case SO_RCVTIMEO: 2207 error = sooptcopyin(sopt, &tv, sizeof tv, 2208 sizeof tv); 2209 if (error) 2210 goto bad; 2211 2212 /* assert(hz > 0); */ 2213 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2214 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2215 error = EDOM; 2216 goto bad; 2217 } 2218 /* assert(tick > 0); */ 2219 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2220 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick; 2221 if (val > INT_MAX) { 2222 error = EDOM; 2223 goto bad; 2224 } 2225 if (val == 0 && tv.tv_usec != 0) 2226 val = 1; 2227 2228 switch (sopt->sopt_name) { 2229 case SO_SNDTIMEO: 2230 so->so_snd.ssb_timeo = val; 2231 break; 2232 case SO_RCVTIMEO: 2233 so->so_rcv.ssb_timeo = val; 2234 break; 2235 } 2236 break; 2237 default: 2238 error = ENOPROTOOPT; 2239 break; 2240 } 2241 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { 2242 (void) so_pr_ctloutput(so, sopt); 2243 } 2244 } 2245 bad: 2246 return (error); 2247 } 2248 2249 /* Helper routine for getsockopt */ 2250 int 2251 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2252 { 2253 soopt_from_kbuf(sopt, buf, len); 2254 return 0; 2255 } 2256 2257 void 2258 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len) 2259 { 2260 size_t valsize; 2261 2262 if (len == 0) { 2263 sopt->sopt_valsize = 0; 2264 return; 2265 } 2266 2267 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2268 KKASSERT(kva_p(buf)); 2269 2270 /* 2271 * Documented get behavior is that we always return a value, 2272 * possibly truncated to fit in the user's buffer. 2273 * Traditional behavior is that we always tell the user 2274 * precisely how much we copied, rather than something useful 2275 * like the total amount we had available for her. 2276 * Note that this interface is not idempotent; the entire answer must 2277 * generated ahead of time. 2278 */ 2279 valsize = szmin(len, sopt->sopt_valsize); 2280 sopt->sopt_valsize = valsize; 2281 if (sopt->sopt_val != 0) { 2282 bcopy(buf, sopt->sopt_val, valsize); 2283 } 2284 } 2285 2286 int 2287 sogetopt(struct socket *so, struct sockopt *sopt) 2288 { 2289 int error, optval; 2290 long optval_l; 2291 struct linger l; 2292 struct timeval tv; 2293 #ifdef INET 2294 struct accept_filter_arg *afap; 2295 #endif 2296 2297 error = 0; 2298 sopt->sopt_dir = SOPT_GET; 2299 if (sopt->sopt_level != SOL_SOCKET) { 2300 if (so->so_proto && so->so_proto->pr_ctloutput) { 2301 return (so_pr_ctloutput(so, sopt)); 2302 } else 2303 return (ENOPROTOOPT); 2304 } else { 2305 switch (sopt->sopt_name) { 2306 #ifdef INET 2307 case SO_ACCEPTFILTER: 2308 if ((so->so_options & SO_ACCEPTCONN) == 0) 2309 return (EINVAL); 2310 afap = kmalloc(sizeof(*afap), M_TEMP, 2311 M_WAITOK | M_ZERO); 2312 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 2313 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 2314 if (so->so_accf->so_accept_filter_str != NULL) 2315 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 2316 } 2317 error = sooptcopyout(sopt, afap, sizeof(*afap)); 2318 kfree(afap, M_TEMP); 2319 break; 2320 #endif /* INET */ 2321 2322 case SO_LINGER: 2323 l.l_onoff = so->so_options & SO_LINGER; 2324 l.l_linger = so->so_linger; 2325 error = sooptcopyout(sopt, &l, sizeof l); 2326 break; 2327 2328 case SO_USELOOPBACK: 2329 case SO_DONTROUTE: 2330 case SO_DEBUG: 2331 case SO_KEEPALIVE: 2332 case SO_REUSEADDR: 2333 case SO_REUSEPORT: 2334 case SO_BROADCAST: 2335 case SO_OOBINLINE: 2336 case SO_TIMESTAMP: 2337 case SO_NOSIGPIPE: 2338 optval = so->so_options & sopt->sopt_name; 2339 integer: 2340 error = sooptcopyout(sopt, &optval, sizeof optval); 2341 break; 2342 2343 case SO_TYPE: 2344 optval = so->so_type; 2345 goto integer; 2346 2347 case SO_ERROR: 2348 optval = so->so_error; 2349 so->so_error = 0; 2350 goto integer; 2351 2352 case SO_SNDBUF: 2353 optval = so->so_snd.ssb_hiwat; 2354 goto integer; 2355 2356 case SO_RCVBUF: 2357 optval = so->so_rcv.ssb_hiwat; 2358 goto integer; 2359 2360 case SO_SNDLOWAT: 2361 optval = so->so_snd.ssb_lowat; 2362 goto integer; 2363 2364 case SO_RCVLOWAT: 2365 optval = so->so_rcv.ssb_lowat; 2366 goto integer; 2367 2368 case SO_SNDTIMEO: 2369 case SO_RCVTIMEO: 2370 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2371 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo); 2372 2373 tv.tv_sec = optval / hz; 2374 tv.tv_usec = (optval % hz) * ustick; 2375 error = sooptcopyout(sopt, &tv, sizeof tv); 2376 break; 2377 2378 case SO_SNDSPACE: 2379 optval_l = ssb_space(&so->so_snd); 2380 error = sooptcopyout(sopt, &optval_l, sizeof(optval_l)); 2381 break; 2382 2383 case SO_CPUHINT: 2384 optval = -1; /* no hint */ 2385 goto integer; 2386 2387 default: 2388 error = ENOPROTOOPT; 2389 break; 2390 } 2391 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) 2392 so_pr_ctloutput(so, sopt); 2393 return (error); 2394 } 2395 } 2396 2397 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2398 int 2399 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2400 { 2401 struct mbuf *m, *m_prev; 2402 int sopt_size = sopt->sopt_valsize, msize; 2403 2404 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA, 2405 0, &msize); 2406 if (m == NULL) 2407 return (ENOBUFS); 2408 m->m_len = min(msize, sopt_size); 2409 sopt_size -= m->m_len; 2410 *mp = m; 2411 m_prev = m; 2412 2413 while (sopt_size > 0) { 2414 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, 2415 MT_DATA, 0, &msize); 2416 if (m == NULL) { 2417 m_freem(*mp); 2418 return (ENOBUFS); 2419 } 2420 m->m_len = min(msize, sopt_size); 2421 sopt_size -= m->m_len; 2422 m_prev->m_next = m; 2423 m_prev = m; 2424 } 2425 return (0); 2426 } 2427 2428 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2429 int 2430 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2431 { 2432 soopt_to_mbuf(sopt, m); 2433 return 0; 2434 } 2435 2436 void 2437 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m) 2438 { 2439 size_t valsize; 2440 void *val; 2441 2442 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2443 KKASSERT(kva_p(m)); 2444 if (sopt->sopt_val == NULL) 2445 return; 2446 val = sopt->sopt_val; 2447 valsize = sopt->sopt_valsize; 2448 while (m != NULL && valsize >= m->m_len) { 2449 bcopy(val, mtod(m, char *), m->m_len); 2450 valsize -= m->m_len; 2451 val = (caddr_t)val + m->m_len; 2452 m = m->m_next; 2453 } 2454 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2455 panic("ip6_sooptmcopyin"); 2456 } 2457 2458 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2459 int 2460 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2461 { 2462 return soopt_from_mbuf(sopt, m); 2463 } 2464 2465 int 2466 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m) 2467 { 2468 struct mbuf *m0 = m; 2469 size_t valsize = 0; 2470 size_t maxsize; 2471 void *val; 2472 2473 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2474 KKASSERT(kva_p(m)); 2475 if (sopt->sopt_val == NULL) 2476 return 0; 2477 val = sopt->sopt_val; 2478 maxsize = sopt->sopt_valsize; 2479 while (m != NULL && maxsize >= m->m_len) { 2480 bcopy(mtod(m, char *), val, m->m_len); 2481 maxsize -= m->m_len; 2482 val = (caddr_t)val + m->m_len; 2483 valsize += m->m_len; 2484 m = m->m_next; 2485 } 2486 if (m != NULL) { 2487 /* enough soopt buffer should be given from user-land */ 2488 m_freem(m0); 2489 return (EINVAL); 2490 } 2491 sopt->sopt_valsize = valsize; 2492 return 0; 2493 } 2494 2495 void 2496 sohasoutofband(struct socket *so) 2497 { 2498 if (so->so_sigio != NULL) 2499 pgsigio(so->so_sigio, SIGURG, 0); 2500 /* 2501 * NOTE: 2502 * There is no need to use NOTE_OOB as KNOTE hint here: 2503 * soread filter depends on so_oobmark and SS_RCVATMARK 2504 * so_state. NOTE_OOB would cause unnecessary penalty 2505 * in KNOTE, if there was knote processing contention. 2506 */ 2507 KNOTE(&so->so_rcv.ssb_kq.ki_note, 0); 2508 } 2509 2510 int 2511 sokqfilter(struct file *fp, struct knote *kn) 2512 { 2513 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2514 struct signalsockbuf *ssb; 2515 2516 switch (kn->kn_filter) { 2517 case EVFILT_READ: 2518 if (so->so_options & SO_ACCEPTCONN) 2519 kn->kn_fop = &solisten_filtops; 2520 else 2521 kn->kn_fop = &soread_filtops; 2522 ssb = &so->so_rcv; 2523 break; 2524 case EVFILT_WRITE: 2525 kn->kn_fop = &sowrite_filtops; 2526 ssb = &so->so_snd; 2527 break; 2528 case EVFILT_EXCEPT: 2529 kn->kn_fop = &soexcept_filtops; 2530 ssb = &so->so_rcv; 2531 break; 2532 default: 2533 return (EOPNOTSUPP); 2534 } 2535 2536 knote_insert(&ssb->ssb_kq.ki_note, kn); 2537 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE); 2538 return (0); 2539 } 2540 2541 static void 2542 filt_sordetach(struct knote *kn) 2543 { 2544 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2545 2546 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn); 2547 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note)) 2548 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE); 2549 } 2550 2551 /*ARGSUSED*/ 2552 static int 2553 filt_soread(struct knote *kn, long hint __unused) 2554 { 2555 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2556 2557 if (kn->kn_sfflags & NOTE_OOB) { 2558 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) { 2559 kn->kn_fflags |= NOTE_OOB; 2560 return (1); 2561 } 2562 return (0); 2563 } 2564 kn->kn_data = so->so_rcv.ssb_cc; 2565 2566 if (so->so_state & SS_CANTRCVMORE) { 2567 /* 2568 * Only set NODATA if all data has been exhausted. 2569 */ 2570 if (kn->kn_data == 0) 2571 kn->kn_flags |= EV_NODATA; 2572 kn->kn_flags |= EV_EOF; 2573 kn->kn_fflags = so->so_error; 2574 return (1); 2575 } 2576 if (so->so_error) /* temporary udp error */ 2577 return (1); 2578 if (kn->kn_sfflags & NOTE_LOWAT) 2579 return (kn->kn_data >= kn->kn_sdata); 2580 return ((kn->kn_data >= so->so_rcv.ssb_lowat) || 2581 !TAILQ_EMPTY(&so->so_comp)); 2582 } 2583 2584 static void 2585 filt_sowdetach(struct knote *kn) 2586 { 2587 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2588 2589 knote_remove(&so->so_snd.ssb_kq.ki_note, kn); 2590 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note)) 2591 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE); 2592 } 2593 2594 /*ARGSUSED*/ 2595 static int 2596 filt_sowrite(struct knote *kn, long hint __unused) 2597 { 2598 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2599 2600 if (so->so_snd.ssb_flags & SSB_PREALLOC) 2601 kn->kn_data = ssb_space_prealloc(&so->so_snd); 2602 else 2603 kn->kn_data = ssb_space(&so->so_snd); 2604 2605 if (so->so_state & SS_CANTSENDMORE) { 2606 kn->kn_flags |= (EV_EOF | EV_NODATA); 2607 kn->kn_fflags = so->so_error; 2608 return (1); 2609 } 2610 if (so->so_error) /* temporary udp error */ 2611 return (1); 2612 if (((so->so_state & SS_ISCONNECTED) == 0) && 2613 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2614 return (0); 2615 if (kn->kn_sfflags & NOTE_LOWAT) 2616 return (kn->kn_data >= kn->kn_sdata); 2617 return (kn->kn_data >= so->so_snd.ssb_lowat); 2618 } 2619 2620 /*ARGSUSED*/ 2621 static int 2622 filt_solisten(struct knote *kn, long hint __unused) 2623 { 2624 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2625 int qlen = so->so_qlen; 2626 2627 if (soavailconn > 0 && qlen > soavailconn) 2628 qlen = soavailconn; 2629 kn->kn_data = qlen; 2630 2631 return (!TAILQ_EMPTY(&so->so_comp)); 2632 } 2633