1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 63 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $ 64 */ 65 66 #include "opt_inet.h" 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/fcntl.h> 71 #include <sys/malloc.h> 72 #include <sys/mbuf.h> 73 #include <sys/domain.h> 74 #include <sys/file.h> /* for struct knote */ 75 #include <sys/kernel.h> 76 #include <sys/event.h> 77 #include <sys/proc.h> 78 #include <sys/protosw.h> 79 #include <sys/socket.h> 80 #include <sys/socketvar.h> 81 #include <sys/socketops.h> 82 #include <sys/resourcevar.h> 83 #include <sys/signalvar.h> 84 #include <sys/sysctl.h> 85 #include <sys/uio.h> 86 #include <sys/jail.h> 87 #include <vm/vm_zone.h> 88 #include <vm/pmap.h> 89 #include <net/netmsg2.h> 90 #include <net/netisr2.h> 91 92 #include <sys/thread2.h> 93 #include <sys/socketvar2.h> 94 #include <sys/spinlock2.h> 95 96 #include <machine/limits.h> 97 98 #ifdef INET 99 extern int tcp_sosend_agglim; 100 extern int tcp_sosend_async; 101 extern int tcp_sosend_jcluster; 102 extern int udp_sosend_async; 103 extern int udp_sosend_prepend; 104 105 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 106 #endif /* INET */ 107 108 static void filt_sordetach(struct knote *kn); 109 static int filt_soread(struct knote *kn, long hint); 110 static void filt_sowdetach(struct knote *kn); 111 static int filt_sowrite(struct knote *kn, long hint); 112 static int filt_solisten(struct knote *kn, long hint); 113 114 static int soclose_sync(struct socket *so, int fflag); 115 static void soclose_fast(struct socket *so); 116 117 static struct filterops solisten_filtops = 118 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten }; 119 static struct filterops soread_filtops = 120 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 121 static struct filterops sowrite_filtops = 122 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite }; 123 static struct filterops soexcept_filtops = 124 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 125 126 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct"); 127 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 128 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 129 130 131 static int somaxconn = SOMAXCONN; 132 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 133 &somaxconn, 0, "Maximum pending socket connection queue size"); 134 135 static int use_soclose_fast = 1; 136 SYSCTL_INT(_kern_ipc, OID_AUTO, soclose_fast, CTLFLAG_RW, 137 &use_soclose_fast, 0, "Fast socket close"); 138 139 int use_soaccept_pred_fast = 1; 140 SYSCTL_INT(_kern_ipc, OID_AUTO, soaccept_pred_fast, CTLFLAG_RW, 141 &use_soaccept_pred_fast, 0, "Fast socket accept predication"); 142 143 int use_sendfile_async = 1; 144 SYSCTL_INT(_kern_ipc, OID_AUTO, sendfile_async, CTLFLAG_RW, 145 &use_sendfile_async, 0, "sendfile uses asynchronized pru_send"); 146 147 int use_soconnect_async = 1; 148 SYSCTL_INT(_kern_ipc, OID_AUTO, soconnect_async, CTLFLAG_RW, 149 &use_soconnect_async, 0, "soconnect uses asynchronized pru_connect"); 150 151 static int use_socreate_fast = 1; 152 SYSCTL_INT(_kern_ipc, OID_AUTO, socreate_fast, CTLFLAG_RW, 153 &use_socreate_fast, 0, "Fast socket creation"); 154 155 /* 156 * Socket operation routines. 157 * These routines are called by the routines in 158 * sys_socket.c or from a system process, and 159 * implement the semantics of socket operations by 160 * switching out to the protocol specific routines. 161 */ 162 163 /* 164 * Get a socket structure, and initialize it. 165 * Note that it would probably be better to allocate socket 166 * and PCB at the same time, but I'm not convinced that all 167 * the protocols can be easily modified to do this. 168 */ 169 struct socket * 170 soalloc(int waitok, struct protosw *pr) 171 { 172 struct socket *so; 173 unsigned waitmask; 174 175 waitmask = waitok ? M_WAITOK : M_NOWAIT; 176 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask); 177 if (so) { 178 /* XXX race condition for reentrant kernel */ 179 so->so_proto = pr; 180 TAILQ_INIT(&so->so_aiojobq); 181 TAILQ_INIT(&so->so_rcv.ssb_mlist); 182 TAILQ_INIT(&so->so_snd.ssb_mlist); 183 lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok"); 184 lwkt_token_init(&so->so_snd.ssb_token, "sndtok"); 185 spin_init(&so->so_rcvd_spin, "soalloc"); 186 netmsg_init(&so->so_rcvd_msg.base, so, &netisr_adone_rport, 187 MSGF_DROPABLE | MSGF_PRIORITY, 188 so->so_proto->pr_usrreqs->pru_rcvd); 189 so->so_rcvd_msg.nm_pru_flags |= PRUR_ASYNC; 190 so->so_state = SS_NOFDREF; 191 so->so_refs = 1; 192 } 193 return so; 194 } 195 196 int 197 socreate(int dom, struct socket **aso, int type, 198 int proto, struct thread *td) 199 { 200 struct proc *p = td->td_proc; 201 struct protosw *prp; 202 struct socket *so; 203 struct pru_attach_info ai; 204 int error; 205 206 if (proto) 207 prp = pffindproto(dom, proto, type); 208 else 209 prp = pffindtype(dom, type); 210 211 if (prp == NULL || prp->pr_usrreqs->pru_attach == 0) 212 return (EPROTONOSUPPORT); 213 214 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only && 215 prp->pr_domain->dom_family != PF_LOCAL && 216 prp->pr_domain->dom_family != PF_INET && 217 prp->pr_domain->dom_family != PF_INET6 && 218 prp->pr_domain->dom_family != PF_ROUTE) { 219 return (EPROTONOSUPPORT); 220 } 221 222 if (prp->pr_type != type) 223 return (EPROTOTYPE); 224 so = soalloc(p != NULL, prp); 225 if (so == NULL) 226 return (ENOBUFS); 227 228 /* 229 * Callers of socreate() presumably will connect up a descriptor 230 * and call soclose() if they cannot. This represents our so_refs 231 * (which should be 1) from soalloc(). 232 */ 233 soclrstate(so, SS_NOFDREF); 234 235 /* 236 * Set a default port for protocol processing. No action will occur 237 * on the socket on this port until an inpcb is attached to it and 238 * is able to match incoming packets, or until the socket becomes 239 * available to userland. 240 * 241 * We normally default the socket to the protocol thread on cpu 0, 242 * if protocol does not provide its own method to initialize the 243 * default port. 244 * 245 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol 246 * thread and all pr_*()/pru_*() calls are executed synchronously. 247 */ 248 if (prp->pr_flags & PR_SYNC_PORT) 249 so->so_port = &netisr_sync_port; 250 else if (prp->pr_initport != NULL) 251 so->so_port = prp->pr_initport(); 252 else 253 so->so_port = netisr_cpuport(0); 254 255 TAILQ_INIT(&so->so_incomp); 256 TAILQ_INIT(&so->so_comp); 257 so->so_type = type; 258 so->so_cred = crhold(p->p_ucred); 259 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE]; 260 ai.p_ucred = p->p_ucred; 261 ai.fd_rdir = p->p_fd->fd_rdir; 262 263 /* 264 * Auto-sizing of socket buffers is managed by the protocols and 265 * the appropriate flags must be set in the pru_attach function. 266 */ 267 if (use_socreate_fast && prp->pr_usrreqs->pru_preattach) 268 error = so_pru_attach_fast(so, proto, &ai); 269 else 270 error = so_pru_attach(so, proto, &ai); 271 if (error) { 272 sosetstate(so, SS_NOFDREF); 273 sofree(so); /* from soalloc */ 274 return error; 275 } 276 277 /* 278 * NOTE: Returns referenced socket. 279 */ 280 *aso = so; 281 return (0); 282 } 283 284 int 285 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 286 { 287 int error; 288 289 error = so_pru_bind(so, nam, td); 290 return (error); 291 } 292 293 static void 294 sodealloc(struct socket *so) 295 { 296 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 0); 297 298 #ifdef INVARIANTS 299 if (so->so_options & SO_ACCEPTCONN) { 300 KASSERT(TAILQ_EMPTY(&so->so_comp), ("so_comp is not empty")); 301 KASSERT(TAILQ_EMPTY(&so->so_incomp), 302 ("so_incomp is not empty")); 303 } 304 #endif 305 306 if (so->so_rcv.ssb_hiwat) 307 (void)chgsbsize(so->so_cred->cr_uidinfo, 308 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY); 309 if (so->so_snd.ssb_hiwat) 310 (void)chgsbsize(so->so_cred->cr_uidinfo, 311 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY); 312 #ifdef INET 313 /* remove accept filter if present */ 314 if (so->so_accf != NULL) 315 do_setopt_accept_filter(so, NULL); 316 #endif /* INET */ 317 crfree(so->so_cred); 318 if (so->so_faddr != NULL) 319 kfree(so->so_faddr, M_SONAME); 320 kfree(so, M_SOCKET); 321 } 322 323 int 324 solisten(struct socket *so, int backlog, struct thread *td) 325 { 326 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) 327 return (EINVAL); 328 329 lwkt_gettoken(&so->so_rcv.ssb_token); 330 if (TAILQ_EMPTY(&so->so_comp)) 331 so->so_options |= SO_ACCEPTCONN; 332 lwkt_reltoken(&so->so_rcv.ssb_token); 333 if (backlog < 0 || backlog > somaxconn) 334 backlog = somaxconn; 335 so->so_qlimit = backlog; 336 return so_pru_listen(so, td); 337 } 338 339 static void 340 soqflush(struct socket *so) 341 { 342 lwkt_getpooltoken(so); 343 if (so->so_options & SO_ACCEPTCONN) { 344 struct socket *sp; 345 346 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 347 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == 348 SS_INCOMP); 349 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 350 so->so_incqlen--; 351 soclrstate(sp, SS_INCOMP); 352 soabort_async(sp, TRUE); 353 } 354 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 355 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == 356 SS_COMP); 357 TAILQ_REMOVE(&so->so_comp, sp, so_list); 358 so->so_qlen--; 359 soclrstate(sp, SS_COMP); 360 soabort_async(sp, TRUE); 361 } 362 } 363 lwkt_relpooltoken(so); 364 } 365 366 /* 367 * Destroy a disconnected socket. This routine is a NOP if entities 368 * still have a reference on the socket: 369 * 370 * so_pcb - The protocol stack still has a reference 371 * SS_NOFDREF - There is no longer a file pointer reference 372 */ 373 void 374 sofree(struct socket *so) 375 { 376 struct socket *head; 377 378 /* 379 * This is a bit hackish at the moment. We need to interlock 380 * any accept queue we are on before we potentially lose the 381 * last reference to avoid races against a re-reference from 382 * someone operating on the queue. 383 */ 384 while ((head = so->so_head) != NULL) { 385 lwkt_getpooltoken(head); 386 if (so->so_head == head) 387 break; 388 lwkt_relpooltoken(head); 389 } 390 391 /* 392 * Arbitrage the last free. 393 */ 394 KKASSERT(so->so_refs > 0); 395 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) { 396 if (head) 397 lwkt_relpooltoken(head); 398 return; 399 } 400 401 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF)); 402 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0); 403 404 if (head != NULL) { 405 /* 406 * We're done, remove ourselves from the accept queue we are 407 * on, if we are on one. 408 */ 409 if (so->so_state & SS_INCOMP) { 410 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 411 SS_INCOMP); 412 TAILQ_REMOVE(&head->so_incomp, so, so_list); 413 head->so_incqlen--; 414 } else if (so->so_state & SS_COMP) { 415 /* 416 * We must not decommission a socket that's 417 * on the accept(2) queue. If we do, then 418 * accept(2) may hang after select(2) indicated 419 * that the listening socket was ready. 420 */ 421 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 422 SS_COMP); 423 lwkt_relpooltoken(head); 424 return; 425 } else { 426 panic("sofree: not queued"); 427 } 428 soclrstate(so, SS_INCOMP); 429 so->so_head = NULL; 430 lwkt_relpooltoken(head); 431 } else { 432 /* Flush accept queues, if we are accepting. */ 433 soqflush(so); 434 } 435 ssb_release(&so->so_snd, so); 436 sorflush(so); 437 sodealloc(so); 438 } 439 440 /* 441 * Close a socket on last file table reference removal. 442 * Initiate disconnect if connected. 443 * Free socket when disconnect complete. 444 */ 445 int 446 soclose(struct socket *so, int fflag) 447 { 448 int error; 449 450 funsetown(&so->so_sigio); 451 sosetstate(so, SS_ISCLOSING); 452 if (!use_soclose_fast || 453 (so->so_proto->pr_flags & PR_SYNC_PORT) || 454 ((so->so_state & SS_ISCONNECTED) && 455 (so->so_options & SO_LINGER))) { 456 error = soclose_sync(so, fflag); 457 } else { 458 soclose_fast(so); 459 error = 0; 460 } 461 return error; 462 } 463 464 void 465 sodiscard(struct socket *so) 466 { 467 if (so->so_state & SS_NOFDREF) 468 panic("soclose: NOFDREF"); 469 sosetstate(so, SS_NOFDREF); /* take ref */ 470 } 471 472 /* 473 * Append the completed queue of head to head_inh (inherting listen socket). 474 */ 475 void 476 soinherit(struct socket *head, struct socket *head_inh) 477 { 478 boolean_t do_wakeup = FALSE; 479 480 KASSERT(head->so_options & SO_ACCEPTCONN, 481 ("head does not accept connection")); 482 KASSERT(head_inh->so_options & SO_ACCEPTCONN, 483 ("head_inh does not accept connection")); 484 485 lwkt_getpooltoken(head); 486 lwkt_getpooltoken(head_inh); 487 488 if (head->so_qlen > 0) 489 do_wakeup = TRUE; 490 491 while (!TAILQ_EMPTY(&head->so_comp)) { 492 struct ucred *old_cr; 493 struct socket *sp; 494 495 sp = TAILQ_FIRST(&head->so_comp); 496 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == SS_COMP); 497 498 /* 499 * Remove this socket from the current listen socket 500 * completed queue. 501 */ 502 TAILQ_REMOVE(&head->so_comp, sp, so_list); 503 head->so_qlen--; 504 505 /* Save the old ucred for later free. */ 506 old_cr = sp->so_cred; 507 508 /* 509 * Install this socket to the inheriting listen socket 510 * completed queue. 511 */ 512 sp->so_cred = crhold(head_inh->so_cred); /* non-blocking */ 513 sp->so_head = head_inh; 514 515 TAILQ_INSERT_TAIL(&head_inh->so_comp, sp, so_list); 516 head_inh->so_qlen++; 517 518 /* 519 * NOTE: 520 * crfree() may block and release the tokens temporarily. 521 * However, we are fine here, since the transition is done. 522 */ 523 crfree(old_cr); 524 } 525 526 lwkt_relpooltoken(head_inh); 527 lwkt_relpooltoken(head); 528 529 if (do_wakeup) { 530 /* 531 * "New" connections have arrived 532 */ 533 sorwakeup(head_inh); 534 wakeup(&head_inh->so_timeo); 535 } 536 } 537 538 static int 539 soclose_sync(struct socket *so, int fflag) 540 { 541 int error = 0; 542 543 if ((so->so_proto->pr_flags & PR_SYNC_PORT) == 0) 544 so_pru_sync(so); /* unpend async prus */ 545 546 if (so->so_pcb == NULL) 547 goto discard; 548 549 if (so->so_state & SS_ISCONNECTED) { 550 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 551 error = sodisconnect(so); 552 if (error) 553 goto drop; 554 } 555 if (so->so_options & SO_LINGER) { 556 if ((so->so_state & SS_ISDISCONNECTING) && 557 (fflag & FNONBLOCK)) 558 goto drop; 559 while (so->so_state & SS_ISCONNECTED) { 560 error = tsleep(&so->so_timeo, PCATCH, 561 "soclos", so->so_linger * hz); 562 if (error) 563 break; 564 } 565 } 566 } 567 drop: 568 if (so->so_pcb) { 569 int error2; 570 571 error2 = so_pru_detach(so); 572 if (error2 == EJUSTRETURN) { 573 /* 574 * Protocol will call sodiscard() 575 * and sofree() for us. 576 */ 577 return error; 578 } 579 if (error == 0) 580 error = error2; 581 } 582 discard: 583 sodiscard(so); 584 sofree(so); /* dispose of ref */ 585 586 return (error); 587 } 588 589 static void 590 soclose_fast_handler(netmsg_t msg) 591 { 592 struct socket *so = msg->base.nm_so; 593 594 if (so->so_pcb == NULL) 595 goto discard; 596 597 if ((so->so_state & SS_ISCONNECTED) && 598 (so->so_state & SS_ISDISCONNECTING) == 0) 599 so_pru_disconnect_direct(so); 600 601 if (so->so_pcb) { 602 int error; 603 604 error = so_pru_detach_direct(so); 605 if (error == EJUSTRETURN) { 606 /* 607 * Protocol will call sodiscard() 608 * and sofree() for us. 609 */ 610 return; 611 } 612 } 613 discard: 614 sodiscard(so); 615 sofree(so); 616 } 617 618 static void 619 soclose_fast(struct socket *so) 620 { 621 struct netmsg_base *base = &so->so_clomsg; 622 623 netmsg_init(base, so, &netisr_apanic_rport, 0, 624 soclose_fast_handler); 625 if (so->so_port == netisr_curport()) 626 lwkt_sendmsg_oncpu(so->so_port, &base->lmsg); 627 else 628 lwkt_sendmsg(so->so_port, &base->lmsg); 629 } 630 631 /* 632 * Abort and destroy a socket. Only one abort can be in progress 633 * at any given moment. 634 */ 635 void 636 soabort_async(struct socket *so, boolean_t clr_head) 637 { 638 /* 639 * Keep a reference before clearing the so_head 640 * to avoid racing socket close in netisr. 641 */ 642 soreference(so); 643 if (clr_head) 644 so->so_head = NULL; 645 so_pru_abort_async(so); 646 } 647 648 void 649 soabort_direct(struct socket *so) 650 { 651 soreference(so); 652 so_pru_abort_direct(so); 653 } 654 655 /* 656 * so is passed in ref'd, which becomes owned by 657 * the cleared SS_NOFDREF flag. 658 */ 659 void 660 soaccept_generic(struct socket *so) 661 { 662 if ((so->so_state & SS_NOFDREF) == 0) 663 panic("soaccept: !NOFDREF"); 664 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */ 665 } 666 667 int 668 soaccept(struct socket *so, struct sockaddr **nam) 669 { 670 int error; 671 672 soaccept_generic(so); 673 error = so_pru_accept(so, nam); 674 return (error); 675 } 676 677 int 678 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td, 679 boolean_t sync) 680 { 681 int error; 682 683 if (so->so_options & SO_ACCEPTCONN) 684 return (EOPNOTSUPP); 685 /* 686 * If protocol is connection-based, can only connect once. 687 * Otherwise, if connected, try to disconnect first. 688 * This allows user to disconnect by connecting to, e.g., 689 * a null address. 690 */ 691 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 692 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 693 (error = sodisconnect(so)))) { 694 error = EISCONN; 695 } else { 696 /* 697 * Prevent accumulated error from previous connection 698 * from biting us. 699 */ 700 so->so_error = 0; 701 if (!sync && so->so_proto->pr_usrreqs->pru_preconnect) 702 error = so_pru_connect_async(so, nam, td); 703 else 704 error = so_pru_connect(so, nam, td); 705 } 706 return (error); 707 } 708 709 int 710 soconnect2(struct socket *so1, struct socket *so2) 711 { 712 int error; 713 714 error = so_pru_connect2(so1, so2); 715 return (error); 716 } 717 718 int 719 sodisconnect(struct socket *so) 720 { 721 int error; 722 723 if ((so->so_state & SS_ISCONNECTED) == 0) { 724 error = ENOTCONN; 725 goto bad; 726 } 727 if (so->so_state & SS_ISDISCONNECTING) { 728 error = EALREADY; 729 goto bad; 730 } 731 error = so_pru_disconnect(so); 732 bad: 733 return (error); 734 } 735 736 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 737 /* 738 * Send on a socket. 739 * If send must go all at once and message is larger than 740 * send buffering, then hard error. 741 * Lock against other senders. 742 * If must go all at once and not enough room now, then 743 * inform user that this would block and do nothing. 744 * Otherwise, if nonblocking, send as much as possible. 745 * The data to be sent is described by "uio" if nonzero, 746 * otherwise by the mbuf chain "top" (which must be null 747 * if uio is not). Data provided in mbuf chain must be small 748 * enough to send all at once. 749 * 750 * Returns nonzero on error, timeout or signal; callers 751 * must check for short counts if EINTR/ERESTART are returned. 752 * Data and control buffers are freed on return. 753 */ 754 int 755 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 756 struct mbuf *top, struct mbuf *control, int flags, 757 struct thread *td) 758 { 759 struct mbuf **mp; 760 struct mbuf *m; 761 size_t resid; 762 int space, len; 763 int clen = 0, error, dontroute, mlen; 764 int atomic = sosendallatonce(so) || top; 765 int pru_flags; 766 767 if (uio) { 768 resid = uio->uio_resid; 769 } else { 770 resid = (size_t)top->m_pkthdr.len; 771 #ifdef INVARIANTS 772 len = 0; 773 for (m = top; m; m = m->m_next) 774 len += m->m_len; 775 KKASSERT(top->m_pkthdr.len == len); 776 #endif 777 } 778 779 /* 780 * WARNING! resid is unsigned, space and len are signed. space 781 * can wind up negative if the sockbuf is overcommitted. 782 * 783 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 784 * type sockets since that's an error. 785 */ 786 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 787 error = EINVAL; 788 goto out; 789 } 790 791 dontroute = 792 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 793 (so->so_proto->pr_flags & PR_ATOMIC); 794 if (td->td_lwp != NULL) 795 td->td_lwp->lwp_ru.ru_msgsnd++; 796 if (control) 797 clen = control->m_len; 798 #define gotoerr(errcode) { error = errcode; goto release; } 799 800 restart: 801 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 802 if (error) 803 goto out; 804 805 do { 806 if (so->so_state & SS_CANTSENDMORE) 807 gotoerr(EPIPE); 808 if (so->so_error) { 809 error = so->so_error; 810 so->so_error = 0; 811 goto release; 812 } 813 if ((so->so_state & SS_ISCONNECTED) == 0) { 814 /* 815 * `sendto' and `sendmsg' is allowed on a connection- 816 * based socket if it supports implied connect. 817 * Return ENOTCONN if not connected and no address is 818 * supplied. 819 */ 820 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 821 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 822 if ((so->so_state & SS_ISCONFIRMING) == 0 && 823 !(resid == 0 && clen != 0)) 824 gotoerr(ENOTCONN); 825 } else if (addr == NULL) 826 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 827 ENOTCONN : EDESTADDRREQ); 828 } 829 if ((atomic && resid > so->so_snd.ssb_hiwat) || 830 clen > so->so_snd.ssb_hiwat) { 831 gotoerr(EMSGSIZE); 832 } 833 space = ssb_space(&so->so_snd); 834 if (flags & MSG_OOB) 835 space += 1024; 836 if ((space < 0 || (size_t)space < resid + clen) && uio && 837 (atomic || space < so->so_snd.ssb_lowat || space < clen)) { 838 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 839 gotoerr(EWOULDBLOCK); 840 ssb_unlock(&so->so_snd); 841 error = ssb_wait(&so->so_snd); 842 if (error) 843 goto out; 844 goto restart; 845 } 846 mp = ⊤ 847 space -= clen; 848 do { 849 if (uio == NULL) { 850 /* 851 * Data is prepackaged in "top". 852 */ 853 resid = 0; 854 if (flags & MSG_EOR) 855 top->m_flags |= M_EOR; 856 } else do { 857 if (resid > INT_MAX) 858 resid = INT_MAX; 859 m = m_getl((int)resid, M_WAITOK, MT_DATA, 860 top == NULL ? M_PKTHDR : 0, &mlen); 861 if (top == NULL) { 862 m->m_pkthdr.len = 0; 863 m->m_pkthdr.rcvif = NULL; 864 } 865 len = imin((int)szmin(mlen, resid), space); 866 if (resid < MINCLSIZE) { 867 /* 868 * For datagram protocols, leave room 869 * for protocol headers in first mbuf. 870 */ 871 if (atomic && top == NULL && len < mlen) 872 MH_ALIGN(m, len); 873 } 874 space -= len; 875 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 876 resid = uio->uio_resid; 877 m->m_len = len; 878 *mp = m; 879 top->m_pkthdr.len += len; 880 if (error) 881 goto release; 882 mp = &m->m_next; 883 if (resid == 0) { 884 if (flags & MSG_EOR) 885 top->m_flags |= M_EOR; 886 break; 887 } 888 } while (space > 0 && atomic); 889 if (dontroute) 890 so->so_options |= SO_DONTROUTE; 891 if (flags & MSG_OOB) { 892 pru_flags = PRUS_OOB; 893 } else if ((flags & MSG_EOF) && 894 (so->so_proto->pr_flags & PR_IMPLOPCL) && 895 (resid == 0)) { 896 /* 897 * If the user set MSG_EOF, the protocol 898 * understands this flag and nothing left to 899 * send then use PRU_SEND_EOF instead of PRU_SEND. 900 */ 901 pru_flags = PRUS_EOF; 902 } else if (resid > 0 && space > 0) { 903 /* If there is more to send, set PRUS_MORETOCOME */ 904 pru_flags = PRUS_MORETOCOME; 905 } else { 906 pru_flags = 0; 907 } 908 /* 909 * XXX all the SS_CANTSENDMORE checks previously 910 * done could be out of date. We could have recieved 911 * a reset packet in an interrupt or maybe we slept 912 * while doing page faults in uiomove() etc. We could 913 * probably recheck again inside the splnet() protection 914 * here, but there are probably other places that this 915 * also happens. We must rethink this. 916 */ 917 error = so_pru_send(so, pru_flags, top, addr, control, td); 918 if (dontroute) 919 so->so_options &= ~SO_DONTROUTE; 920 clen = 0; 921 control = NULL; 922 top = NULL; 923 mp = ⊤ 924 if (error) 925 goto release; 926 } while (resid && space > 0); 927 } while (resid); 928 929 release: 930 ssb_unlock(&so->so_snd); 931 out: 932 if (top) 933 m_freem(top); 934 if (control) 935 m_freem(control); 936 return (error); 937 } 938 939 #ifdef INET 940 /* 941 * A specialization of sosend() for UDP based on protocol-specific knowledge: 942 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that 943 * sosendallatonce() returns true, 944 * the "atomic" variable is true, 945 * and sosendudp() blocks until space is available for the entire send. 946 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or 947 * PR_IMPLOPCL flags set. 948 * UDP has no out-of-band data. 949 * UDP has no control data. 950 * UDP does not support MSG_EOR. 951 */ 952 int 953 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, 954 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 955 { 956 size_t resid; 957 int error, pru_flags = 0; 958 int space; 959 960 if (td->td_lwp != NULL) 961 td->td_lwp->lwp_ru.ru_msgsnd++; 962 if (control) 963 m_freem(control); 964 965 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); 966 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len; 967 968 restart: 969 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 970 if (error) 971 goto out; 972 973 if (so->so_state & SS_CANTSENDMORE) 974 gotoerr(EPIPE); 975 if (so->so_error) { 976 error = so->so_error; 977 so->so_error = 0; 978 goto release; 979 } 980 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) 981 gotoerr(EDESTADDRREQ); 982 if (resid > so->so_snd.ssb_hiwat) 983 gotoerr(EMSGSIZE); 984 space = ssb_space(&so->so_snd); 985 if (uio && (space < 0 || (size_t)space < resid)) { 986 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 987 gotoerr(EWOULDBLOCK); 988 ssb_unlock(&so->so_snd); 989 error = ssb_wait(&so->so_snd); 990 if (error) 991 goto out; 992 goto restart; 993 } 994 995 if (uio) { 996 int hdrlen = max_hdr; 997 998 /* 999 * We try to optimize out the additional mbuf 1000 * allocations in M_PREPEND() on output path, e.g. 1001 * - udp_output(), when it tries to prepend protocol 1002 * headers. 1003 * - Link layer output function, when it tries to 1004 * prepend link layer header. 1005 * 1006 * This probably will not benefit any data that will 1007 * be fragmented, so this optimization is only performed 1008 * when the size of data and max size of protocol+link 1009 * headers fit into one mbuf cluster. 1010 */ 1011 if (uio->uio_resid > MCLBYTES - hdrlen || 1012 !udp_sosend_prepend) { 1013 top = m_uiomove(uio); 1014 if (top == NULL) 1015 goto release; 1016 } else { 1017 int nsize; 1018 1019 top = m_getl(uio->uio_resid + hdrlen, M_WAITOK, 1020 MT_DATA, M_PKTHDR, &nsize); 1021 KASSERT(nsize >= uio->uio_resid + hdrlen, 1022 ("sosendudp invalid nsize %d, " 1023 "resid %zu, hdrlen %d", 1024 nsize, uio->uio_resid, hdrlen)); 1025 1026 top->m_len = uio->uio_resid; 1027 top->m_pkthdr.len = uio->uio_resid; 1028 top->m_data += hdrlen; 1029 1030 error = uiomove(mtod(top, caddr_t), top->m_len, uio); 1031 if (error) 1032 goto out; 1033 } 1034 } 1035 1036 if (flags & MSG_DONTROUTE) 1037 pru_flags |= PRUS_DONTROUTE; 1038 1039 if (udp_sosend_async && (flags & MSG_SYNC) == 0) { 1040 so_pru_send_async(so, pru_flags, top, addr, NULL, td); 1041 error = 0; 1042 } else { 1043 error = so_pru_send(so, pru_flags, top, addr, NULL, td); 1044 } 1045 top = NULL; /* sent or freed in lower layer */ 1046 1047 release: 1048 ssb_unlock(&so->so_snd); 1049 out: 1050 if (top) 1051 m_freem(top); 1052 return (error); 1053 } 1054 1055 int 1056 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio, 1057 struct mbuf *top, struct mbuf *control, int flags, 1058 struct thread *td) 1059 { 1060 struct mbuf **mp; 1061 struct mbuf *m; 1062 size_t resid; 1063 int space, len; 1064 int error, mlen; 1065 int allatonce; 1066 int pru_flags; 1067 1068 if (uio) { 1069 KKASSERT(top == NULL); 1070 allatonce = 0; 1071 resid = uio->uio_resid; 1072 } else { 1073 allatonce = 1; 1074 resid = (size_t)top->m_pkthdr.len; 1075 #ifdef INVARIANTS 1076 len = 0; 1077 for (m = top; m; m = m->m_next) 1078 len += m->m_len; 1079 KKASSERT(top->m_pkthdr.len == len); 1080 #endif 1081 } 1082 1083 /* 1084 * WARNING! resid is unsigned, space and len are signed. space 1085 * can wind up negative if the sockbuf is overcommitted. 1086 * 1087 * Also check to make sure that MSG_EOR isn't used on TCP 1088 */ 1089 if (flags & MSG_EOR) { 1090 error = EINVAL; 1091 goto out; 1092 } 1093 1094 if (control) { 1095 /* TCP doesn't do control messages (rights, creds, etc) */ 1096 if (control->m_len) { 1097 error = EINVAL; 1098 goto out; 1099 } 1100 m_freem(control); /* empty control, just free it */ 1101 control = NULL; 1102 } 1103 1104 if (td->td_lwp != NULL) 1105 td->td_lwp->lwp_ru.ru_msgsnd++; 1106 1107 #define gotoerr(errcode) { error = errcode; goto release; } 1108 1109 restart: 1110 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 1111 if (error) 1112 goto out; 1113 1114 do { 1115 if (so->so_state & SS_CANTSENDMORE) 1116 gotoerr(EPIPE); 1117 if (so->so_error) { 1118 error = so->so_error; 1119 so->so_error = 0; 1120 goto release; 1121 } 1122 if ((so->so_state & SS_ISCONNECTED) == 0 && 1123 (so->so_state & SS_ISCONFIRMING) == 0) 1124 gotoerr(ENOTCONN); 1125 if (allatonce && resid > so->so_snd.ssb_hiwat) 1126 gotoerr(EMSGSIZE); 1127 1128 space = ssb_space_prealloc(&so->so_snd); 1129 if (flags & MSG_OOB) 1130 space += 1024; 1131 if ((space < 0 || (size_t)space < resid) && !allatonce && 1132 space < so->so_snd.ssb_lowat) { 1133 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 1134 gotoerr(EWOULDBLOCK); 1135 ssb_unlock(&so->so_snd); 1136 error = ssb_wait(&so->so_snd); 1137 if (error) 1138 goto out; 1139 goto restart; 1140 } 1141 mp = ⊤ 1142 do { 1143 int cnt = 0, async = 0; 1144 1145 if (uio == NULL) { 1146 /* 1147 * Data is prepackaged in "top". 1148 */ 1149 resid = 0; 1150 } else do { 1151 if (resid > INT_MAX) 1152 resid = INT_MAX; 1153 if (tcp_sosend_jcluster) { 1154 m = m_getlj((int)resid, M_WAITOK, MT_DATA, 1155 top == NULL ? M_PKTHDR : 0, &mlen); 1156 } else { 1157 m = m_getl((int)resid, M_WAITOK, MT_DATA, 1158 top == NULL ? M_PKTHDR : 0, &mlen); 1159 } 1160 if (top == NULL) { 1161 m->m_pkthdr.len = 0; 1162 m->m_pkthdr.rcvif = NULL; 1163 } 1164 len = imin((int)szmin(mlen, resid), space); 1165 space -= len; 1166 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 1167 resid = uio->uio_resid; 1168 m->m_len = len; 1169 *mp = m; 1170 top->m_pkthdr.len += len; 1171 if (error) 1172 goto release; 1173 mp = &m->m_next; 1174 if (resid == 0) 1175 break; 1176 ++cnt; 1177 } while (space > 0 && cnt < tcp_sosend_agglim); 1178 1179 if (tcp_sosend_async) 1180 async = 1; 1181 1182 if (flags & MSG_OOB) { 1183 pru_flags = PRUS_OOB; 1184 async = 0; 1185 } else if ((flags & MSG_EOF) && resid == 0) { 1186 pru_flags = PRUS_EOF; 1187 } else if (resid > 0 && space > 0) { 1188 /* If there is more to send, set PRUS_MORETOCOME */ 1189 pru_flags = PRUS_MORETOCOME; 1190 async = 1; 1191 } else { 1192 pru_flags = 0; 1193 } 1194 1195 if (flags & MSG_SYNC) 1196 async = 0; 1197 1198 /* 1199 * XXX all the SS_CANTSENDMORE checks previously 1200 * done could be out of date. We could have recieved 1201 * a reset packet in an interrupt or maybe we slept 1202 * while doing page faults in uiomove() etc. We could 1203 * probably recheck again inside the splnet() protection 1204 * here, but there are probably other places that this 1205 * also happens. We must rethink this. 1206 */ 1207 for (m = top; m; m = m->m_next) 1208 ssb_preallocstream(&so->so_snd, m); 1209 if (!async) { 1210 error = so_pru_send(so, pru_flags, top, 1211 NULL, NULL, td); 1212 } else { 1213 so_pru_send_async(so, pru_flags, top, 1214 NULL, NULL, td); 1215 error = 0; 1216 } 1217 1218 top = NULL; 1219 mp = ⊤ 1220 if (error) 1221 goto release; 1222 } while (resid && space > 0); 1223 } while (resid); 1224 1225 release: 1226 ssb_unlock(&so->so_snd); 1227 out: 1228 if (top) 1229 m_freem(top); 1230 if (control) 1231 m_freem(control); 1232 return (error); 1233 } 1234 #endif 1235 1236 /* 1237 * Implement receive operations on a socket. 1238 * 1239 * We depend on the way that records are added to the signalsockbuf 1240 * by sbappend*. In particular, each record (mbufs linked through m_next) 1241 * must begin with an address if the protocol so specifies, 1242 * followed by an optional mbuf or mbufs containing ancillary data, 1243 * and then zero or more mbufs of data. 1244 * 1245 * Although the signalsockbuf is locked, new data may still be appended. 1246 * A token inside the ssb_lock deals with MP issues and still allows 1247 * the network to access the socket if we block in a uio. 1248 * 1249 * The caller may receive the data as a single mbuf chain by supplying 1250 * an mbuf **mp0 for use in returning the chain. The uio is then used 1251 * only for the count in uio_resid. 1252 */ 1253 int 1254 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 1255 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1256 { 1257 struct mbuf *m, *n; 1258 struct mbuf *free_chain = NULL; 1259 int flags, len, error, offset; 1260 struct protosw *pr = so->so_proto; 1261 int moff, type = 0; 1262 size_t resid, orig_resid; 1263 boolean_t free_rights = FALSE; 1264 1265 if (uio) 1266 resid = uio->uio_resid; 1267 else 1268 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1269 orig_resid = resid; 1270 1271 if (psa) 1272 *psa = NULL; 1273 if (controlp) 1274 *controlp = NULL; 1275 if (flagsp) 1276 flags = *flagsp &~ MSG_EOR; 1277 else 1278 flags = 0; 1279 if (flags & MSG_OOB) { 1280 m = m_get(M_WAITOK, MT_DATA); 1281 if (m == NULL) 1282 return (ENOBUFS); 1283 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1284 if (error) 1285 goto bad; 1286 if (sio) { 1287 do { 1288 sbappend(sio, m); 1289 KKASSERT(resid >= (size_t)m->m_len); 1290 resid -= (size_t)m->m_len; 1291 } while (resid > 0 && m); 1292 } else { 1293 do { 1294 uio->uio_resid = resid; 1295 error = uiomove(mtod(m, caddr_t), 1296 (int)szmin(resid, m->m_len), 1297 uio); 1298 resid = uio->uio_resid; 1299 m = m_free(m); 1300 } while (uio->uio_resid && error == 0 && m); 1301 } 1302 bad: 1303 if (m) 1304 m_freem(m); 1305 return (error); 1306 } 1307 if ((so->so_state & SS_ISCONFIRMING) && resid) 1308 so_pru_rcvd(so, 0); 1309 1310 /* 1311 * The token interlocks against the protocol thread while 1312 * ssb_lock is a blocking lock against other userland entities. 1313 */ 1314 lwkt_gettoken(&so->so_rcv.ssb_token); 1315 restart: 1316 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1317 if (error) 1318 goto done; 1319 1320 m = so->so_rcv.ssb_mb; 1321 /* 1322 * If we have less data than requested, block awaiting more 1323 * (subject to any timeout) if: 1324 * 1. the current count is less than the low water mark, or 1325 * 2. MSG_WAITALL is set, and it is possible to do the entire 1326 * receive operation at once if we block (resid <= hiwat). 1327 * 3. MSG_DONTWAIT is not set 1328 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1329 * we have to do the receive in sections, and thus risk returning 1330 * a short count if a timeout or signal occurs after we start. 1331 */ 1332 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1333 (size_t)so->so_rcv.ssb_cc < resid) && 1334 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1335 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) && 1336 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { 1337 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1338 if (so->so_error) { 1339 if (m) 1340 goto dontblock; 1341 error = so->so_error; 1342 if ((flags & MSG_PEEK) == 0) 1343 so->so_error = 0; 1344 goto release; 1345 } 1346 if (so->so_state & SS_CANTRCVMORE) { 1347 if (m) 1348 goto dontblock; 1349 else 1350 goto release; 1351 } 1352 for (; m; m = m->m_next) { 1353 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1354 m = so->so_rcv.ssb_mb; 1355 goto dontblock; 1356 } 1357 } 1358 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1359 (pr->pr_flags & PR_CONNREQUIRED)) { 1360 error = ENOTCONN; 1361 goto release; 1362 } 1363 if (resid == 0) 1364 goto release; 1365 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1366 error = EWOULDBLOCK; 1367 goto release; 1368 } 1369 ssb_unlock(&so->so_rcv); 1370 error = ssb_wait(&so->so_rcv); 1371 if (error) 1372 goto done; 1373 goto restart; 1374 } 1375 dontblock: 1376 if (uio && uio->uio_td && uio->uio_td->td_proc) 1377 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1378 1379 /* 1380 * note: m should be == sb_mb here. Cache the next record while 1381 * cleaning up. Note that calling m_free*() will break out critical 1382 * section. 1383 */ 1384 KKASSERT(m == so->so_rcv.ssb_mb); 1385 1386 /* 1387 * Skip any address mbufs prepending the record. 1388 */ 1389 if (pr->pr_flags & PR_ADDR) { 1390 KASSERT(m->m_type == MT_SONAME, ("receive 1a")); 1391 orig_resid = 0; 1392 if (psa) 1393 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1394 if (flags & MSG_PEEK) 1395 m = m->m_next; 1396 else 1397 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1398 } 1399 1400 /* 1401 * Skip any control mbufs prepending the record. 1402 */ 1403 while (m && m->m_type == MT_CONTROL && error == 0) { 1404 if (flags & MSG_PEEK) { 1405 if (controlp) 1406 *controlp = m_copy(m, 0, m->m_len); 1407 m = m->m_next; /* XXX race */ 1408 } else { 1409 const struct cmsghdr *cm = mtod(m, struct cmsghdr *); 1410 1411 if (controlp) { 1412 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1413 if (pr->pr_domain->dom_externalize && 1414 cm->cmsg_level == SOL_SOCKET && 1415 cm->cmsg_type == SCM_RIGHTS) { 1416 error = pr->pr_domain->dom_externalize 1417 (m, flags); 1418 } 1419 *controlp = m; 1420 m = n; 1421 } else { 1422 if (cm->cmsg_level == SOL_SOCKET && 1423 cm->cmsg_type == SCM_RIGHTS) 1424 free_rights = TRUE; 1425 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1426 } 1427 } 1428 if (controlp && *controlp) { 1429 orig_resid = 0; 1430 controlp = &(*controlp)->m_next; 1431 } 1432 } 1433 1434 /* 1435 * flag OOB data. 1436 */ 1437 if (m) { 1438 type = m->m_type; 1439 if (type == MT_OOBDATA) 1440 flags |= MSG_OOB; 1441 } 1442 1443 /* 1444 * Copy to the UIO or mbuf return chain (*mp). 1445 */ 1446 moff = 0; 1447 offset = 0; 1448 while (m && resid > 0 && error == 0) { 1449 if (m->m_type == MT_OOBDATA) { 1450 if (type != MT_OOBDATA) 1451 break; 1452 } else if (type == MT_OOBDATA) 1453 break; 1454 else 1455 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1456 ("receive 3")); 1457 soclrstate(so, SS_RCVATMARK); 1458 len = (resid > INT_MAX) ? INT_MAX : resid; 1459 if (so->so_oobmark && len > so->so_oobmark - offset) 1460 len = so->so_oobmark - offset; 1461 if (len > m->m_len - moff) 1462 len = m->m_len - moff; 1463 1464 /* 1465 * Copy out to the UIO or pass the mbufs back to the SIO. 1466 * The SIO is dealt with when we eat the mbuf, but deal 1467 * with the resid here either way. 1468 */ 1469 if (uio) { 1470 uio->uio_resid = resid; 1471 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1472 resid = uio->uio_resid; 1473 if (error) 1474 goto release; 1475 } else { 1476 resid -= (size_t)len; 1477 } 1478 1479 /* 1480 * Eat the entire mbuf or just a piece of it 1481 */ 1482 if (len == m->m_len - moff) { 1483 if (m->m_flags & M_EOR) 1484 flags |= MSG_EOR; 1485 if (flags & MSG_PEEK) { 1486 m = m->m_next; 1487 moff = 0; 1488 } else { 1489 if (sio) { 1490 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1491 sbappend(sio, m); 1492 m = n; 1493 } else { 1494 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1495 } 1496 } 1497 } else { 1498 if (flags & MSG_PEEK) { 1499 moff += len; 1500 } else { 1501 if (sio) { 1502 n = m_copym(m, 0, len, M_WAITOK); 1503 if (n) 1504 sbappend(sio, n); 1505 } 1506 m->m_data += len; 1507 m->m_len -= len; 1508 so->so_rcv.ssb_cc -= len; 1509 } 1510 } 1511 if (so->so_oobmark) { 1512 if ((flags & MSG_PEEK) == 0) { 1513 so->so_oobmark -= len; 1514 if (so->so_oobmark == 0) { 1515 sosetstate(so, SS_RCVATMARK); 1516 break; 1517 } 1518 } else { 1519 offset += len; 1520 if (offset == so->so_oobmark) 1521 break; 1522 } 1523 } 1524 if (flags & MSG_EOR) 1525 break; 1526 /* 1527 * If the MSG_WAITALL flag is set (for non-atomic socket), 1528 * we must not quit until resid == 0 or an error 1529 * termination. If a signal/timeout occurs, return 1530 * with a short count but without error. 1531 * Keep signalsockbuf locked against other readers. 1532 */ 1533 while ((flags & MSG_WAITALL) && m == NULL && 1534 resid > 0 && !sosendallatonce(so) && 1535 so->so_rcv.ssb_mb == NULL) { 1536 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1537 break; 1538 /* 1539 * The window might have closed to zero, make 1540 * sure we send an ack now that we've drained 1541 * the buffer or we might end up blocking until 1542 * the idle takes over (5 seconds). 1543 */ 1544 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1545 so_pru_rcvd(so, flags); 1546 error = ssb_wait(&so->so_rcv); 1547 if (error) { 1548 ssb_unlock(&so->so_rcv); 1549 error = 0; 1550 goto done; 1551 } 1552 m = so->so_rcv.ssb_mb; 1553 } 1554 } 1555 1556 /* 1557 * If an atomic read was requested but unread data still remains 1558 * in the record, set MSG_TRUNC. 1559 */ 1560 if (m && pr->pr_flags & PR_ATOMIC) 1561 flags |= MSG_TRUNC; 1562 1563 /* 1564 * Cleanup. If an atomic read was requested drop any unread data. 1565 */ 1566 if ((flags & MSG_PEEK) == 0) { 1567 if (m && (pr->pr_flags & PR_ATOMIC)) 1568 sbdroprecord(&so->so_rcv.sb); 1569 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) 1570 so_pru_rcvd(so, flags); 1571 } 1572 1573 if (orig_resid == resid && orig_resid && 1574 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1575 ssb_unlock(&so->so_rcv); 1576 goto restart; 1577 } 1578 1579 if (flagsp) 1580 *flagsp |= flags; 1581 release: 1582 ssb_unlock(&so->so_rcv); 1583 done: 1584 lwkt_reltoken(&so->so_rcv.ssb_token); 1585 if (free_chain) { 1586 if (free_rights && (pr->pr_flags & PR_RIGHTS) && 1587 pr->pr_domain->dom_dispose) 1588 pr->pr_domain->dom_dispose(free_chain); 1589 m_freem(free_chain); 1590 } 1591 return (error); 1592 } 1593 1594 int 1595 sorecvtcp(struct socket *so, struct sockaddr **psa, struct uio *uio, 1596 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1597 { 1598 struct mbuf *m, *n; 1599 struct mbuf *free_chain = NULL; 1600 int flags, len, error, offset; 1601 struct protosw *pr = so->so_proto; 1602 int moff; 1603 int didoob; 1604 size_t resid, orig_resid, restmp; 1605 1606 if (uio) 1607 resid = uio->uio_resid; 1608 else 1609 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1610 orig_resid = resid; 1611 1612 if (psa) 1613 *psa = NULL; 1614 if (controlp) 1615 *controlp = NULL; 1616 if (flagsp) 1617 flags = *flagsp &~ MSG_EOR; 1618 else 1619 flags = 0; 1620 if (flags & MSG_OOB) { 1621 m = m_get(M_WAITOK, MT_DATA); 1622 if (m == NULL) 1623 return (ENOBUFS); 1624 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1625 if (error) 1626 goto bad; 1627 if (sio) { 1628 do { 1629 sbappend(sio, m); 1630 KKASSERT(resid >= (size_t)m->m_len); 1631 resid -= (size_t)m->m_len; 1632 } while (resid > 0 && m); 1633 } else { 1634 do { 1635 uio->uio_resid = resid; 1636 error = uiomove(mtod(m, caddr_t), 1637 (int)szmin(resid, m->m_len), 1638 uio); 1639 resid = uio->uio_resid; 1640 m = m_free(m); 1641 } while (uio->uio_resid && error == 0 && m); 1642 } 1643 bad: 1644 if (m) 1645 m_freem(m); 1646 return (error); 1647 } 1648 1649 /* 1650 * The token interlocks against the protocol thread while 1651 * ssb_lock is a blocking lock against other userland entities. 1652 * 1653 * Lock a limited number of mbufs (not all, so sbcompress() still 1654 * works well). The token is used as an interlock for sbwait() so 1655 * release it afterwords. 1656 */ 1657 restart: 1658 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1659 if (error) 1660 goto done; 1661 1662 lwkt_gettoken(&so->so_rcv.ssb_token); 1663 m = so->so_rcv.ssb_mb; 1664 1665 /* 1666 * If we have less data than requested, block awaiting more 1667 * (subject to any timeout) if: 1668 * 1. the current count is less than the low water mark, or 1669 * 2. MSG_WAITALL is set, and it is possible to do the entire 1670 * receive operation at once if we block (resid <= hiwat). 1671 * 3. MSG_DONTWAIT is not set 1672 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1673 * we have to do the receive in sections, and thus risk returning 1674 * a short count if a timeout or signal occurs after we start. 1675 */ 1676 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1677 (size_t)so->so_rcv.ssb_cc < resid) && 1678 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1679 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)))) { 1680 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1681 if (so->so_error) { 1682 if (m) 1683 goto dontblock; 1684 lwkt_reltoken(&so->so_rcv.ssb_token); 1685 error = so->so_error; 1686 if ((flags & MSG_PEEK) == 0) 1687 so->so_error = 0; 1688 goto release; 1689 } 1690 if (so->so_state & SS_CANTRCVMORE) { 1691 if (m) 1692 goto dontblock; 1693 lwkt_reltoken(&so->so_rcv.ssb_token); 1694 goto release; 1695 } 1696 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1697 (pr->pr_flags & PR_CONNREQUIRED)) { 1698 lwkt_reltoken(&so->so_rcv.ssb_token); 1699 error = ENOTCONN; 1700 goto release; 1701 } 1702 if (resid == 0) { 1703 lwkt_reltoken(&so->so_rcv.ssb_token); 1704 goto release; 1705 } 1706 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1707 lwkt_reltoken(&so->so_rcv.ssb_token); 1708 error = EWOULDBLOCK; 1709 goto release; 1710 } 1711 ssb_unlock(&so->so_rcv); 1712 error = ssb_wait(&so->so_rcv); 1713 lwkt_reltoken(&so->so_rcv.ssb_token); 1714 if (error) 1715 goto done; 1716 goto restart; 1717 } 1718 1719 /* 1720 * Token still held 1721 */ 1722 dontblock: 1723 n = m; 1724 restmp = 0; 1725 while (n && restmp < resid) { 1726 n->m_flags |= M_SOLOCKED; 1727 restmp += n->m_len; 1728 if (n->m_next == NULL) 1729 n = n->m_nextpkt; 1730 else 1731 n = n->m_next; 1732 } 1733 1734 /* 1735 * Release token for loop 1736 */ 1737 lwkt_reltoken(&so->so_rcv.ssb_token); 1738 if (uio && uio->uio_td && uio->uio_td->td_proc) 1739 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1740 1741 /* 1742 * note: m should be == sb_mb here. Cache the next record while 1743 * cleaning up. Note that calling m_free*() will break out critical 1744 * section. 1745 */ 1746 KKASSERT(m == so->so_rcv.ssb_mb); 1747 1748 /* 1749 * Copy to the UIO or mbuf return chain (*mp). 1750 * 1751 * NOTE: Token is not held for loop 1752 */ 1753 moff = 0; 1754 offset = 0; 1755 didoob = 0; 1756 1757 while (m && (m->m_flags & M_SOLOCKED) && resid > 0 && error == 0) { 1758 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1759 ("receive 3")); 1760 1761 soclrstate(so, SS_RCVATMARK); 1762 len = (resid > INT_MAX) ? INT_MAX : resid; 1763 if (so->so_oobmark && len > so->so_oobmark - offset) 1764 len = so->so_oobmark - offset; 1765 if (len > m->m_len - moff) 1766 len = m->m_len - moff; 1767 1768 /* 1769 * Copy out to the UIO or pass the mbufs back to the SIO. 1770 * The SIO is dealt with when we eat the mbuf, but deal 1771 * with the resid here either way. 1772 */ 1773 if (uio) { 1774 uio->uio_resid = resid; 1775 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1776 resid = uio->uio_resid; 1777 if (error) 1778 goto release; 1779 } else { 1780 resid -= (size_t)len; 1781 } 1782 1783 /* 1784 * Eat the entire mbuf or just a piece of it 1785 */ 1786 offset += len; 1787 if (len == m->m_len - moff) { 1788 m = m->m_next; 1789 moff = 0; 1790 } else { 1791 moff += len; 1792 } 1793 1794 /* 1795 * Check oobmark 1796 */ 1797 if (so->so_oobmark && offset == so->so_oobmark) { 1798 didoob = 1; 1799 break; 1800 } 1801 } 1802 1803 /* 1804 * Synchronize sockbuf with data we read. 1805 * 1806 * NOTE: (m) is junk on entry (it could be left over from the 1807 * previous loop). 1808 */ 1809 if ((flags & MSG_PEEK) == 0) { 1810 lwkt_gettoken(&so->so_rcv.ssb_token); 1811 m = so->so_rcv.ssb_mb; 1812 while (m && offset >= m->m_len) { 1813 if (so->so_oobmark) { 1814 so->so_oobmark -= m->m_len; 1815 if (so->so_oobmark == 0) { 1816 sosetstate(so, SS_RCVATMARK); 1817 didoob = 1; 1818 } 1819 } 1820 offset -= m->m_len; 1821 if (sio) { 1822 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1823 sbappend(sio, m); 1824 m = n; 1825 } else { 1826 m = sbunlinkmbuf(&so->so_rcv.sb, 1827 m, &free_chain); 1828 } 1829 } 1830 if (offset) { 1831 KKASSERT(m); 1832 if (sio) { 1833 n = m_copym(m, 0, offset, M_WAITOK); 1834 if (n) 1835 sbappend(sio, n); 1836 } 1837 m->m_data += offset; 1838 m->m_len -= offset; 1839 so->so_rcv.ssb_cc -= offset; 1840 if (so->so_oobmark) { 1841 so->so_oobmark -= offset; 1842 if (so->so_oobmark == 0) { 1843 sosetstate(so, SS_RCVATMARK); 1844 didoob = 1; 1845 } 1846 } 1847 offset = 0; 1848 } 1849 lwkt_reltoken(&so->so_rcv.ssb_token); 1850 } 1851 1852 /* 1853 * If the MSG_WAITALL flag is set (for non-atomic socket), 1854 * we must not quit until resid == 0 or an error termination. 1855 * 1856 * If a signal/timeout occurs, return with a short count but without 1857 * error. 1858 * 1859 * Keep signalsockbuf locked against other readers. 1860 * 1861 * XXX if MSG_PEEK we currently do quit. 1862 */ 1863 if ((flags & MSG_WAITALL) && !(flags & MSG_PEEK) && 1864 didoob == 0 && resid > 0 && 1865 !sosendallatonce(so)) { 1866 lwkt_gettoken(&so->so_rcv.ssb_token); 1867 error = 0; 1868 while ((m = so->so_rcv.ssb_mb) == NULL) { 1869 if (so->so_error || (so->so_state & SS_CANTRCVMORE)) { 1870 error = so->so_error; 1871 break; 1872 } 1873 /* 1874 * The window might have closed to zero, make 1875 * sure we send an ack now that we've drained 1876 * the buffer or we might end up blocking until 1877 * the idle takes over (5 seconds). 1878 */ 1879 if (so->so_pcb) 1880 so_pru_rcvd_async(so); 1881 if (so->so_rcv.ssb_mb == NULL) 1882 error = ssb_wait(&so->so_rcv); 1883 if (error) { 1884 lwkt_reltoken(&so->so_rcv.ssb_token); 1885 ssb_unlock(&so->so_rcv); 1886 error = 0; 1887 goto done; 1888 } 1889 } 1890 if (m && error == 0) 1891 goto dontblock; 1892 lwkt_reltoken(&so->so_rcv.ssb_token); 1893 } 1894 1895 /* 1896 * Token not held here. 1897 * 1898 * Cleanup. If an atomic read was requested drop any unread data XXX 1899 */ 1900 if ((flags & MSG_PEEK) == 0) { 1901 if (so->so_pcb) 1902 so_pru_rcvd_async(so); 1903 } 1904 1905 if (orig_resid == resid && orig_resid && 1906 (so->so_state & SS_CANTRCVMORE) == 0) { 1907 ssb_unlock(&so->so_rcv); 1908 goto restart; 1909 } 1910 1911 if (flagsp) 1912 *flagsp |= flags; 1913 release: 1914 ssb_unlock(&so->so_rcv); 1915 done: 1916 if (free_chain) 1917 m_freem(free_chain); 1918 return (error); 1919 } 1920 1921 /* 1922 * Shut a socket down. Note that we do not get a frontend lock as we 1923 * want to be able to shut the socket down even if another thread is 1924 * blocked in a read(), thus waking it up. 1925 */ 1926 int 1927 soshutdown(struct socket *so, int how) 1928 { 1929 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1930 return (EINVAL); 1931 1932 if (how != SHUT_WR) { 1933 /*ssb_lock(&so->so_rcv, M_WAITOK);*/ 1934 sorflush(so); 1935 /*ssb_unlock(&so->so_rcv);*/ 1936 } 1937 if (how != SHUT_RD) 1938 return (so_pru_shutdown(so)); 1939 return (0); 1940 } 1941 1942 void 1943 sorflush(struct socket *so) 1944 { 1945 struct signalsockbuf *ssb = &so->so_rcv; 1946 struct protosw *pr = so->so_proto; 1947 struct signalsockbuf asb; 1948 1949 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR); 1950 1951 lwkt_gettoken(&ssb->ssb_token); 1952 socantrcvmore(so); 1953 asb = *ssb; 1954 1955 /* 1956 * Can't just blow up the ssb structure here 1957 */ 1958 bzero(&ssb->sb, sizeof(ssb->sb)); 1959 ssb->ssb_timeo = 0; 1960 ssb->ssb_lowat = 0; 1961 ssb->ssb_hiwat = 0; 1962 ssb->ssb_mbmax = 0; 1963 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK); 1964 1965 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) 1966 (*pr->pr_domain->dom_dispose)(asb.ssb_mb); 1967 ssb_release(&asb, so); 1968 1969 lwkt_reltoken(&ssb->ssb_token); 1970 } 1971 1972 #ifdef INET 1973 static int 1974 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) 1975 { 1976 struct accept_filter_arg *afap = NULL; 1977 struct accept_filter *afp; 1978 struct so_accf *af = so->so_accf; 1979 int error = 0; 1980 1981 /* do not set/remove accept filters on non listen sockets */ 1982 if ((so->so_options & SO_ACCEPTCONN) == 0) { 1983 error = EINVAL; 1984 goto out; 1985 } 1986 1987 /* removing the filter */ 1988 if (sopt == NULL) { 1989 if (af != NULL) { 1990 if (af->so_accept_filter != NULL && 1991 af->so_accept_filter->accf_destroy != NULL) { 1992 af->so_accept_filter->accf_destroy(so); 1993 } 1994 if (af->so_accept_filter_str != NULL) { 1995 kfree(af->so_accept_filter_str, M_ACCF); 1996 } 1997 kfree(af, M_ACCF); 1998 so->so_accf = NULL; 1999 } 2000 so->so_options &= ~SO_ACCEPTFILTER; 2001 return (0); 2002 } 2003 /* adding a filter */ 2004 /* must remove previous filter first */ 2005 if (af != NULL) { 2006 error = EINVAL; 2007 goto out; 2008 } 2009 /* don't put large objects on the kernel stack */ 2010 afap = kmalloc(sizeof(*afap), M_TEMP, M_WAITOK); 2011 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 2012 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 2013 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 2014 if (error) 2015 goto out; 2016 afp = accept_filt_get(afap->af_name); 2017 if (afp == NULL) { 2018 error = ENOENT; 2019 goto out; 2020 } 2021 af = kmalloc(sizeof(*af), M_ACCF, M_WAITOK | M_ZERO); 2022 if (afp->accf_create != NULL) { 2023 if (afap->af_name[0] != '\0') { 2024 int len = strlen(afap->af_name) + 1; 2025 2026 af->so_accept_filter_str = kmalloc(len, M_ACCF, 2027 M_WAITOK); 2028 strcpy(af->so_accept_filter_str, afap->af_name); 2029 } 2030 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 2031 if (af->so_accept_filter_arg == NULL) { 2032 kfree(af->so_accept_filter_str, M_ACCF); 2033 kfree(af, M_ACCF); 2034 so->so_accf = NULL; 2035 error = EINVAL; 2036 goto out; 2037 } 2038 } 2039 af->so_accept_filter = afp; 2040 so->so_accf = af; 2041 so->so_options |= SO_ACCEPTFILTER; 2042 out: 2043 if (afap != NULL) 2044 kfree(afap, M_TEMP); 2045 return (error); 2046 } 2047 #endif /* INET */ 2048 2049 /* 2050 * Perhaps this routine, and sooptcopyout(), below, ought to come in 2051 * an additional variant to handle the case where the option value needs 2052 * to be some kind of integer, but not a specific size. 2053 * In addition to their use here, these functions are also called by the 2054 * protocol-level pr_ctloutput() routines. 2055 */ 2056 int 2057 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2058 { 2059 return soopt_to_kbuf(sopt, buf, len, minlen); 2060 } 2061 2062 int 2063 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2064 { 2065 size_t valsize; 2066 2067 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2068 KKASSERT(kva_p(buf)); 2069 2070 /* 2071 * If the user gives us more than we wanted, we ignore it, 2072 * but if we don't get the minimum length the caller 2073 * wants, we return EINVAL. On success, sopt->sopt_valsize 2074 * is set to however much we actually retrieved. 2075 */ 2076 if ((valsize = sopt->sopt_valsize) < minlen) 2077 return EINVAL; 2078 if (valsize > len) 2079 sopt->sopt_valsize = valsize = len; 2080 2081 bcopy(sopt->sopt_val, buf, valsize); 2082 return 0; 2083 } 2084 2085 2086 int 2087 sosetopt(struct socket *so, struct sockopt *sopt) 2088 { 2089 int error, optval; 2090 struct linger l; 2091 struct timeval tv; 2092 u_long val; 2093 struct signalsockbuf *sotmp; 2094 2095 error = 0; 2096 sopt->sopt_dir = SOPT_SET; 2097 if (sopt->sopt_level != SOL_SOCKET) { 2098 if (so->so_proto && so->so_proto->pr_ctloutput) { 2099 return (so_pr_ctloutput(so, sopt)); 2100 } 2101 error = ENOPROTOOPT; 2102 } else { 2103 switch (sopt->sopt_name) { 2104 #ifdef INET 2105 case SO_ACCEPTFILTER: 2106 error = do_setopt_accept_filter(so, sopt); 2107 if (error) 2108 goto bad; 2109 break; 2110 #endif /* INET */ 2111 case SO_LINGER: 2112 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 2113 if (error) 2114 goto bad; 2115 2116 so->so_linger = l.l_linger; 2117 if (l.l_onoff) 2118 so->so_options |= SO_LINGER; 2119 else 2120 so->so_options &= ~SO_LINGER; 2121 break; 2122 2123 case SO_DEBUG: 2124 case SO_KEEPALIVE: 2125 case SO_DONTROUTE: 2126 case SO_USELOOPBACK: 2127 case SO_BROADCAST: 2128 case SO_REUSEADDR: 2129 case SO_REUSEPORT: 2130 case SO_OOBINLINE: 2131 case SO_TIMESTAMP: 2132 case SO_NOSIGPIPE: 2133 error = sooptcopyin(sopt, &optval, sizeof optval, 2134 sizeof optval); 2135 if (error) 2136 goto bad; 2137 if (optval) 2138 so->so_options |= sopt->sopt_name; 2139 else 2140 so->so_options &= ~sopt->sopt_name; 2141 break; 2142 2143 case SO_SNDBUF: 2144 case SO_RCVBUF: 2145 case SO_SNDLOWAT: 2146 case SO_RCVLOWAT: 2147 error = sooptcopyin(sopt, &optval, sizeof optval, 2148 sizeof optval); 2149 if (error) 2150 goto bad; 2151 2152 /* 2153 * Values < 1 make no sense for any of these 2154 * options, so disallow them. 2155 */ 2156 if (optval < 1) { 2157 error = EINVAL; 2158 goto bad; 2159 } 2160 2161 switch (sopt->sopt_name) { 2162 case SO_SNDBUF: 2163 case SO_RCVBUF: 2164 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ? 2165 &so->so_snd : &so->so_rcv, (u_long)optval, 2166 so, 2167 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) { 2168 error = ENOBUFS; 2169 goto bad; 2170 } 2171 sotmp = (sopt->sopt_name == SO_SNDBUF) ? 2172 &so->so_snd : &so->so_rcv; 2173 atomic_clear_int(&sotmp->ssb_flags, 2174 SSB_AUTOSIZE); 2175 break; 2176 2177 /* 2178 * Make sure the low-water is never greater than 2179 * the high-water. 2180 */ 2181 case SO_SNDLOWAT: 2182 so->so_snd.ssb_lowat = 2183 (optval > so->so_snd.ssb_hiwat) ? 2184 so->so_snd.ssb_hiwat : optval; 2185 atomic_clear_int(&so->so_snd.ssb_flags, 2186 SSB_AUTOLOWAT); 2187 break; 2188 case SO_RCVLOWAT: 2189 so->so_rcv.ssb_lowat = 2190 (optval > so->so_rcv.ssb_hiwat) ? 2191 so->so_rcv.ssb_hiwat : optval; 2192 atomic_clear_int(&so->so_rcv.ssb_flags, 2193 SSB_AUTOLOWAT); 2194 break; 2195 } 2196 break; 2197 2198 case SO_SNDTIMEO: 2199 case SO_RCVTIMEO: 2200 error = sooptcopyin(sopt, &tv, sizeof tv, 2201 sizeof tv); 2202 if (error) 2203 goto bad; 2204 2205 /* assert(hz > 0); */ 2206 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2207 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2208 error = EDOM; 2209 goto bad; 2210 } 2211 /* assert(tick > 0); */ 2212 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2213 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick; 2214 if (val > INT_MAX) { 2215 error = EDOM; 2216 goto bad; 2217 } 2218 if (val == 0 && tv.tv_usec != 0) 2219 val = 1; 2220 2221 switch (sopt->sopt_name) { 2222 case SO_SNDTIMEO: 2223 so->so_snd.ssb_timeo = val; 2224 break; 2225 case SO_RCVTIMEO: 2226 so->so_rcv.ssb_timeo = val; 2227 break; 2228 } 2229 break; 2230 default: 2231 error = ENOPROTOOPT; 2232 break; 2233 } 2234 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { 2235 (void) so_pr_ctloutput(so, sopt); 2236 } 2237 } 2238 bad: 2239 return (error); 2240 } 2241 2242 /* Helper routine for getsockopt */ 2243 int 2244 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2245 { 2246 soopt_from_kbuf(sopt, buf, len); 2247 return 0; 2248 } 2249 2250 void 2251 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len) 2252 { 2253 size_t valsize; 2254 2255 if (len == 0) { 2256 sopt->sopt_valsize = 0; 2257 return; 2258 } 2259 2260 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2261 KKASSERT(kva_p(buf)); 2262 2263 /* 2264 * Documented get behavior is that we always return a value, 2265 * possibly truncated to fit in the user's buffer. 2266 * Traditional behavior is that we always tell the user 2267 * precisely how much we copied, rather than something useful 2268 * like the total amount we had available for her. 2269 * Note that this interface is not idempotent; the entire answer must 2270 * generated ahead of time. 2271 */ 2272 valsize = szmin(len, sopt->sopt_valsize); 2273 sopt->sopt_valsize = valsize; 2274 if (sopt->sopt_val != 0) { 2275 bcopy(buf, sopt->sopt_val, valsize); 2276 } 2277 } 2278 2279 int 2280 sogetopt(struct socket *so, struct sockopt *sopt) 2281 { 2282 int error, optval; 2283 long optval_l; 2284 struct linger l; 2285 struct timeval tv; 2286 #ifdef INET 2287 struct accept_filter_arg *afap; 2288 #endif 2289 2290 error = 0; 2291 sopt->sopt_dir = SOPT_GET; 2292 if (sopt->sopt_level != SOL_SOCKET) { 2293 if (so->so_proto && so->so_proto->pr_ctloutput) { 2294 return (so_pr_ctloutput(so, sopt)); 2295 } else 2296 return (ENOPROTOOPT); 2297 } else { 2298 switch (sopt->sopt_name) { 2299 #ifdef INET 2300 case SO_ACCEPTFILTER: 2301 if ((so->so_options & SO_ACCEPTCONN) == 0) 2302 return (EINVAL); 2303 afap = kmalloc(sizeof(*afap), M_TEMP, 2304 M_WAITOK | M_ZERO); 2305 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 2306 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 2307 if (so->so_accf->so_accept_filter_str != NULL) 2308 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 2309 } 2310 error = sooptcopyout(sopt, afap, sizeof(*afap)); 2311 kfree(afap, M_TEMP); 2312 break; 2313 #endif /* INET */ 2314 2315 case SO_LINGER: 2316 l.l_onoff = so->so_options & SO_LINGER; 2317 l.l_linger = so->so_linger; 2318 error = sooptcopyout(sopt, &l, sizeof l); 2319 break; 2320 2321 case SO_USELOOPBACK: 2322 case SO_DONTROUTE: 2323 case SO_DEBUG: 2324 case SO_KEEPALIVE: 2325 case SO_REUSEADDR: 2326 case SO_REUSEPORT: 2327 case SO_BROADCAST: 2328 case SO_OOBINLINE: 2329 case SO_TIMESTAMP: 2330 case SO_NOSIGPIPE: 2331 optval = so->so_options & sopt->sopt_name; 2332 integer: 2333 error = sooptcopyout(sopt, &optval, sizeof optval); 2334 break; 2335 2336 case SO_TYPE: 2337 optval = so->so_type; 2338 goto integer; 2339 2340 case SO_ERROR: 2341 optval = so->so_error; 2342 so->so_error = 0; 2343 goto integer; 2344 2345 case SO_SNDBUF: 2346 optval = so->so_snd.ssb_hiwat; 2347 goto integer; 2348 2349 case SO_RCVBUF: 2350 optval = so->so_rcv.ssb_hiwat; 2351 goto integer; 2352 2353 case SO_SNDLOWAT: 2354 optval = so->so_snd.ssb_lowat; 2355 goto integer; 2356 2357 case SO_RCVLOWAT: 2358 optval = so->so_rcv.ssb_lowat; 2359 goto integer; 2360 2361 case SO_SNDTIMEO: 2362 case SO_RCVTIMEO: 2363 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2364 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo); 2365 2366 tv.tv_sec = optval / hz; 2367 tv.tv_usec = (optval % hz) * ustick; 2368 error = sooptcopyout(sopt, &tv, sizeof tv); 2369 break; 2370 2371 case SO_SNDSPACE: 2372 optval_l = ssb_space(&so->so_snd); 2373 error = sooptcopyout(sopt, &optval_l, sizeof(optval_l)); 2374 break; 2375 2376 case SO_CPUHINT: 2377 optval = -1; /* no hint */ 2378 goto integer; 2379 2380 default: 2381 error = ENOPROTOOPT; 2382 break; 2383 } 2384 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) 2385 so_pr_ctloutput(so, sopt); 2386 return (error); 2387 } 2388 } 2389 2390 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2391 int 2392 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2393 { 2394 struct mbuf *m, *m_prev; 2395 int sopt_size = sopt->sopt_valsize, msize; 2396 2397 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA, 2398 0, &msize); 2399 if (m == NULL) 2400 return (ENOBUFS); 2401 m->m_len = min(msize, sopt_size); 2402 sopt_size -= m->m_len; 2403 *mp = m; 2404 m_prev = m; 2405 2406 while (sopt_size > 0) { 2407 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, 2408 MT_DATA, 0, &msize); 2409 if (m == NULL) { 2410 m_freem(*mp); 2411 return (ENOBUFS); 2412 } 2413 m->m_len = min(msize, sopt_size); 2414 sopt_size -= m->m_len; 2415 m_prev->m_next = m; 2416 m_prev = m; 2417 } 2418 return (0); 2419 } 2420 2421 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2422 int 2423 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2424 { 2425 soopt_to_mbuf(sopt, m); 2426 return 0; 2427 } 2428 2429 void 2430 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m) 2431 { 2432 size_t valsize; 2433 void *val; 2434 2435 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2436 KKASSERT(kva_p(m)); 2437 if (sopt->sopt_val == NULL) 2438 return; 2439 val = sopt->sopt_val; 2440 valsize = sopt->sopt_valsize; 2441 while (m != NULL && valsize >= m->m_len) { 2442 bcopy(val, mtod(m, char *), m->m_len); 2443 valsize -= m->m_len; 2444 val = (caddr_t)val + m->m_len; 2445 m = m->m_next; 2446 } 2447 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2448 panic("ip6_sooptmcopyin"); 2449 } 2450 2451 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2452 int 2453 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2454 { 2455 return soopt_from_mbuf(sopt, m); 2456 } 2457 2458 int 2459 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m) 2460 { 2461 struct mbuf *m0 = m; 2462 size_t valsize = 0; 2463 size_t maxsize; 2464 void *val; 2465 2466 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2467 KKASSERT(kva_p(m)); 2468 if (sopt->sopt_val == NULL) 2469 return 0; 2470 val = sopt->sopt_val; 2471 maxsize = sopt->sopt_valsize; 2472 while (m != NULL && maxsize >= m->m_len) { 2473 bcopy(mtod(m, char *), val, m->m_len); 2474 maxsize -= m->m_len; 2475 val = (caddr_t)val + m->m_len; 2476 valsize += m->m_len; 2477 m = m->m_next; 2478 } 2479 if (m != NULL) { 2480 /* enough soopt buffer should be given from user-land */ 2481 m_freem(m0); 2482 return (EINVAL); 2483 } 2484 sopt->sopt_valsize = valsize; 2485 return 0; 2486 } 2487 2488 void 2489 sohasoutofband(struct socket *so) 2490 { 2491 if (so->so_sigio != NULL) 2492 pgsigio(so->so_sigio, SIGURG, 0); 2493 /* 2494 * NOTE: 2495 * There is no need to use NOTE_OOB as KNOTE hint here: 2496 * soread filter depends on so_oobmark and SS_RCVATMARK 2497 * so_state. NOTE_OOB would cause unnecessary penalty 2498 * in KNOTE, if there was knote processing contention. 2499 */ 2500 KNOTE(&so->so_rcv.ssb_kq.ki_note, 0); 2501 } 2502 2503 int 2504 sokqfilter(struct file *fp, struct knote *kn) 2505 { 2506 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2507 struct signalsockbuf *ssb; 2508 2509 switch (kn->kn_filter) { 2510 case EVFILT_READ: 2511 if (so->so_options & SO_ACCEPTCONN) 2512 kn->kn_fop = &solisten_filtops; 2513 else 2514 kn->kn_fop = &soread_filtops; 2515 ssb = &so->so_rcv; 2516 break; 2517 case EVFILT_WRITE: 2518 kn->kn_fop = &sowrite_filtops; 2519 ssb = &so->so_snd; 2520 break; 2521 case EVFILT_EXCEPT: 2522 kn->kn_fop = &soexcept_filtops; 2523 ssb = &so->so_rcv; 2524 break; 2525 default: 2526 return (EOPNOTSUPP); 2527 } 2528 2529 knote_insert(&ssb->ssb_kq.ki_note, kn); 2530 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE); 2531 return (0); 2532 } 2533 2534 static void 2535 filt_sordetach(struct knote *kn) 2536 { 2537 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2538 2539 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn); 2540 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note)) 2541 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE); 2542 } 2543 2544 /*ARGSUSED*/ 2545 static int 2546 filt_soread(struct knote *kn, long hint __unused) 2547 { 2548 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2549 2550 if (kn->kn_sfflags & NOTE_OOB) { 2551 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) { 2552 kn->kn_fflags |= NOTE_OOB; 2553 return (1); 2554 } 2555 return (0); 2556 } 2557 kn->kn_data = so->so_rcv.ssb_cc; 2558 2559 if (so->so_state & SS_CANTRCVMORE) { 2560 /* 2561 * Only set NODATA if all data has been exhausted. 2562 */ 2563 if (kn->kn_data == 0) 2564 kn->kn_flags |= EV_NODATA; 2565 kn->kn_flags |= EV_EOF; 2566 kn->kn_fflags = so->so_error; 2567 return (1); 2568 } 2569 if (so->so_error) /* temporary udp error */ 2570 return (1); 2571 if (kn->kn_sfflags & NOTE_LOWAT) 2572 return (kn->kn_data >= kn->kn_sdata); 2573 return ((kn->kn_data >= so->so_rcv.ssb_lowat) || 2574 !TAILQ_EMPTY(&so->so_comp)); 2575 } 2576 2577 static void 2578 filt_sowdetach(struct knote *kn) 2579 { 2580 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2581 2582 knote_remove(&so->so_snd.ssb_kq.ki_note, kn); 2583 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note)) 2584 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE); 2585 } 2586 2587 /*ARGSUSED*/ 2588 static int 2589 filt_sowrite(struct knote *kn, long hint __unused) 2590 { 2591 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2592 2593 if (so->so_snd.ssb_flags & SSB_PREALLOC) 2594 kn->kn_data = ssb_space_prealloc(&so->so_snd); 2595 else 2596 kn->kn_data = ssb_space(&so->so_snd); 2597 2598 if (so->so_state & SS_CANTSENDMORE) { 2599 kn->kn_flags |= (EV_EOF | EV_NODATA); 2600 kn->kn_fflags = so->so_error; 2601 return (1); 2602 } 2603 if (so->so_error) /* temporary udp error */ 2604 return (1); 2605 if (((so->so_state & SS_ISCONNECTED) == 0) && 2606 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2607 return (0); 2608 if (kn->kn_sfflags & NOTE_LOWAT) 2609 return (kn->kn_data >= kn->kn_sdata); 2610 return (kn->kn_data >= so->so_snd.ssb_lowat); 2611 } 2612 2613 /*ARGSUSED*/ 2614 static int 2615 filt_solisten(struct knote *kn, long hint __unused) 2616 { 2617 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2618 2619 kn->kn_data = so->so_qlen; 2620 return (! TAILQ_EMPTY(&so->so_comp)); 2621 } 2622